text stringlengths 11 4.05M |
|---|
package main
import (
"github.com/go-chi/chi"
rest "github.com/ktnyt/go-rest"
)
func register(pattern string, router chi.Router, iface rest.Interface) {
router.Route("/"+pattern, func(router chi.Router) {
router.Get("/", iface.Browse)
router.Delete("/", iface.Delete)
router.Post("/", iface.Create)
router.Route("/{pk}", func(router chi.Router) {
router.Get("/", iface.Select)
router.Delete("/", iface.Remove)
router.Put("/", iface.Update)
router.Patch("/", iface.Modify)
})
})
}
|
/*
create a type SQUARE
create a type CIRCLE
attach a method to each that calculates AREA and returns it
circle area= π r 2
square area = L * W
create a type SHAPE that defines an interface as anything that has the AREA method
create a func INFO which takes type shape and then prints the area
create a value of type square
create a value of type circle
use func info to print the area of square
use func info to print the area of circle
*/
package main
import (
"fmt"
"math"
)
type shape interface {
area()
}
type Square struct {
l int
w int
}
func (s Square) area() int {
return s.l * s.w
}
type Circle struct {
r float64
}
func (c Circle) area() float64 {
return math.Pi * math.Pow(c.r, 2)
}
func main() {
square := Square{
20,
30,
}
circle := Circle{
30,
}
fmt.Println(circle.area())
fmt.Println(square.area())
}
|
package glog
import (
"time"
log "github.com/mosteknoloji/glog"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
var _ grpc.UnaryServerInterceptor = UnaryLogHandler
func UnaryLogHandler(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {
start := time.Now()
resp, err = handler(ctx, req)
end := time.Now()
latency := end.Sub(start)
if err != nil {
log.Infof("[ERR] %s took: %s err: %s", info.FullMethod, latency, err)
} else {
log.Infof("[INF] %s took: %s", info.FullMethod, latency)
}
return
}
|
package orderbook
import "sort"
type Orderbook struct {
Ask []*Order
Bid []*Order
}
func New() *Orderbook {
orbook := Orderbook{}
return &orbook
}
func (orderbook *Orderbook) Match(order *Order) ([]*Trade, *Order) {
tr := []*Trade{}
var or *Order
or = nil
switch order.Kind.String() {
case "MARKET":
orderbook.OrderMarket(order, &tr, &or)
case "LIMIT":
switch order.Side.String() {
case "ASK":
orderbook.OrderdAsk(order, &tr)
case "BID":
orderbook.OrderdBid(order, &tr)
}
}
return tr, or
}
func (orderbook *Orderbook) OrderdAsk(order *Order, tr *[]*Trade) {
flag := false
for _, bid := range orderbook.Bid {
if order.Price <= bid.Price {
if order.Volume >= bid.Volume {
newtr := Trade{Volume: bid.Volume, Price: bid.Price}
*tr = append(*tr, &newtr)
flag = true
order.Volume -= bid.Volume
bid.Volume = 0
}
}
}
if !flag {
index := sort.Search(len(orderbook.Ask), func(i int) bool { return orderbook.Ask[i].Price > order.Price })
orderbook.Ask = append(orderbook.Ask, order)
copy(orderbook.Ask[index+1:], orderbook.Ask[index:])
orderbook.Ask[index] = order
}
}
func (orderbook *Orderbook) OrderdBid(order *Order, tr *[]*Trade) {
flag := false
for _, ask := range orderbook.Ask {
if ask.Price <= order.Price {
if ask.Volume <= order.Volume {
newtr := Trade{Volume: ask.Volume, Price: ask.Price}
*tr = append(*tr, &newtr)
flag = true
order.Volume -= ask.Volume
ask.Volume = 0
} else {
newtr := Trade{Volume: order.Volume, Price: ask.Price}
*tr = append(*tr, &newtr)
flag = true
ask.Volume -= order.Volume
}
}
}
if !flag {
index := sort.Search(len(orderbook.Bid), func(i int) bool { return orderbook.Bid[i].Price < order.Price })
orderbook.Bid = append(orderbook.Bid, order)
copy(orderbook.Bid[index+1:], orderbook.Bid[index:])
orderbook.Bid[index] = order
}
}
func (orderbook *Orderbook) OrderMarket(order *Order, tr *[]*Trade, or **Order) {
switch order.Side.String() {
case "BID":
for _, ask := range orderbook.Ask {
if ask.Volume == 0 || order.Volume == 0 {
continue
}
if ask.Volume <= order.Volume {
newtr := Trade{Volume: ask.Volume, Price: ask.Price}
*tr = append(*tr, &newtr)
order.Volume -= ask.Volume
} else {
newtr := Trade{Volume: order.Volume, Price: ask.Price}
*tr = append(*tr, &newtr)
ask.Volume -= order.Volume
order.Volume = 0
}
}
if order.Volume > 0 {
*or = order
}
case "ASK":
newtr := Trade{}
for _, bid := range orderbook.Bid {
if bid.Volume == 0 || order.Volume == 0 {
continue
}
if bid.Volume <= order.Volume {
newtr = Trade{Volume: bid.Volume, Price: bid.Price}
*tr = append(*tr, &newtr)
order.Volume -= newtr.Volume
} else {
newtr = Trade{Volume: order.Volume, Price: bid.Price}
*tr = append(*tr, &newtr)
order.Volume = 0
}
}
if order.Volume > 0 {
*or = order
}
}
}
|
package netutil_test
import (
"fmt"
"net"
"github.com/AdguardTeam/golibs/netutil"
)
func ExampleIPv4Zero() {
fmt.Println(netutil.IPv4Zero())
// Output:
//
// 0.0.0.0
}
func ExampleIPv6Zero() {
fmt.Println(netutil.IPv6Zero())
// Output:
//
// ::
}
func ExampleParseIP() {
ip, err := netutil.ParseIP("1.2.3.4")
fmt.Println(ip, err)
ip, err = netutil.ParseIP("1234::cdef")
fmt.Println(ip, err)
ip, err = netutil.ParseIP("!!!")
fmt.Println(ip, err)
// Output:
//
// 1.2.3.4 <nil>
// 1234::cdef <nil>
// <nil> bad ip address "!!!"
}
func ExampleParseIPv4() {
ip, err := netutil.ParseIPv4("1.2.3.4")
fmt.Println(ip, err)
ip, err = netutil.ParseIPv4("1234::cdef")
fmt.Println(ip, err)
ip, err = netutil.ParseIPv4("!!!")
fmt.Println(ip, err)
// Output:
//
// 1.2.3.4 <nil>
// <nil> bad ipv4 address "1234::cdef"
// <nil> bad ipv4 address "!!!"
}
func ExampleParseSubnet() {
ip := net.IP{1, 2, 3, 4}
otherIP := net.IP{1, 2, 3, 5}
n, err := netutil.ParseSubnet("1.2.3.4")
fmt.Println(n, err)
fmt.Printf("%s is in %s: %t\n", ip, n, n.Contains(ip))
fmt.Printf("%s is in %s: %t\n", otherIP, n, n.Contains(otherIP))
n, err = netutil.ParseSubnet("1.2.3.4/16")
fmt.Println(n, err)
fmt.Printf("%s is in %s: %t\n", ip, n, n.Contains(ip))
fmt.Printf("%s is in %s: %t\n", otherIP, n, n.Contains(otherIP))
// Output:
//
// 1.2.3.4/32 <nil>
// 1.2.3.4 is in 1.2.3.4/32: true
// 1.2.3.5 is in 1.2.3.4/32: false
// 1.2.3.4/16 <nil>
// 1.2.3.4 is in 1.2.3.4/16: true
// 1.2.3.5 is in 1.2.3.4/16: true
}
func ExampleParseSubnets() {
ns, err := netutil.ParseSubnets("1.2.3.4", "1.2.3.4/16")
fmt.Println("error: ", err)
fmt.Println("networks:", ns)
fmt.Println()
ns, err = netutil.ParseSubnets()
fmt.Println("error: ", err)
fmt.Println("networks:", ns)
fmt.Println()
ns, err = netutil.ParseSubnets("4.3.2.1/32", "5.5.5.5/33")
fmt.Println("error: ", err)
fmt.Println("networks:", ns)
// Output:
//
// error: <nil>
// networks: [1.2.3.4/32 1.2.3.4/16]
//
// error: <nil>
// networks: []
//
// error: parsing network at index 1: bad cidr address "5.5.5.5/33"
// networks: []
}
func ExampleSingleIPSubnet() {
ip4 := net.IP{1, 2, 3, 4}
otherIP4 := net.IP{1, 2, 3, 5}
n := netutil.SingleIPSubnet(ip4)
fmt.Printf("%s is in %s: %t\n", ip4, n, n.Contains(ip4))
fmt.Printf("%s is in %s: %t\n", otherIP4, n, n.Contains(otherIP4))
ip6 := net.ParseIP("1234::cdef")
otherIP6 := net.ParseIP("1234::cdff")
n = netutil.SingleIPSubnet(ip6)
fmt.Printf("%s is in %s: %t\n", ip6, n, n.Contains(ip6))
fmt.Printf("%s is in %s: %t\n", otherIP6, n, n.Contains(otherIP6))
// Output:
//
// 1.2.3.4 is in 1.2.3.4/32: true
// 1.2.3.5 is in 1.2.3.4/32: false
// 1234::cdef is in 1234::cdef/128: true
// 1234::cdff is in 1234::cdef/128: false
}
|
// Copyright 2023 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package alpha
import (
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
)
func DCLUptimeCheckConfigSchema() *dcl.Schema {
return &dcl.Schema{
Info: &dcl.Info{
Title: "Monitoring/UptimeCheckConfig",
Description: "The Monitoring UptimeCheckConfig resource",
StructName: "UptimeCheckConfig",
},
Paths: &dcl.Paths{
Get: &dcl.Path{
Description: "The function used to get information about a UptimeCheckConfig",
Parameters: []dcl.PathParameters{
dcl.PathParameters{
Name: "uptimeCheckConfig",
Required: true,
Description: "A full instance of a UptimeCheckConfig",
},
},
},
Apply: &dcl.Path{
Description: "The function used to apply information about a UptimeCheckConfig",
Parameters: []dcl.PathParameters{
dcl.PathParameters{
Name: "uptimeCheckConfig",
Required: true,
Description: "A full instance of a UptimeCheckConfig",
},
},
},
Delete: &dcl.Path{
Description: "The function used to delete a UptimeCheckConfig",
Parameters: []dcl.PathParameters{
dcl.PathParameters{
Name: "uptimeCheckConfig",
Required: true,
Description: "A full instance of a UptimeCheckConfig",
},
},
},
DeleteAll: &dcl.Path{
Description: "The function used to delete all UptimeCheckConfig",
Parameters: []dcl.PathParameters{
dcl.PathParameters{
Name: "project",
Required: true,
Schema: &dcl.PathParametersSchema{
Type: "string",
},
},
},
},
List: &dcl.Path{
Description: "The function used to list information about many UptimeCheckConfig",
Parameters: []dcl.PathParameters{
dcl.PathParameters{
Name: "project",
Required: true,
Schema: &dcl.PathParametersSchema{
Type: "string",
},
},
},
},
},
Components: &dcl.Components{
Schemas: map[string]*dcl.Component{
"UptimeCheckConfig": &dcl.Component{
Title: "UptimeCheckConfig",
ID: "projects/{{project}}/uptimeCheckConfigs/{{name}}",
UsesStateHint: true,
ParentContainer: "project",
HasCreate: true,
SchemaProperty: dcl.Property{
Type: "object",
Required: []string{
"displayName",
"timeout",
},
Properties: map[string]*dcl.Property{
"contentMatchers": &dcl.Property{
Type: "array",
GoName: "ContentMatchers",
Description: "The content that is expected to appear in the data returned by the target server against which the check is run. Currently, only the first entry in the `content_matchers` list is supported, and additional entries will be ignored. This field is optional and should only be specified if a content match is required as part of the/ Uptime check.",
SendEmpty: true,
ListType: "list",
Items: &dcl.Property{
Type: "object",
GoType: "UptimeCheckConfigContentMatchers",
Required: []string{
"content",
},
Properties: map[string]*dcl.Property{
"content": &dcl.Property{
Type: "string",
GoName: "Content",
},
"matcher": &dcl.Property{
Type: "string",
GoName: "Matcher",
GoType: "UptimeCheckConfigContentMatchersMatcherEnum",
Description: " Possible values: CONTENT_MATCHER_OPTION_UNSPECIFIED, CONTAINS_STRING, NOT_CONTAINS_STRING, MATCHES_REGEX, NOT_MATCHES_REGEX",
Default: "CONTAINS_STRING",
Enum: []string{
"CONTENT_MATCHER_OPTION_UNSPECIFIED",
"CONTAINS_STRING",
"NOT_CONTAINS_STRING",
"MATCHES_REGEX",
"NOT_MATCHES_REGEX",
},
},
},
},
},
"displayName": &dcl.Property{
Type: "string",
GoName: "DisplayName",
Description: "A human-friendly name for the Uptime check configuration. The display name should be unique within a Stackdriver Workspace in order to make it easier to identify; however, uniqueness is not enforced. Required.",
},
"httpCheck": &dcl.Property{
Type: "object",
GoName: "HttpCheck",
GoType: "UptimeCheckConfigHttpCheck",
Description: "Contains information needed to make an HTTP or HTTPS check.",
Conflicts: []string{
"tcpCheck",
},
Properties: map[string]*dcl.Property{
"authInfo": &dcl.Property{
Type: "object",
GoName: "AuthInfo",
GoType: "UptimeCheckConfigHttpCheckAuthInfo",
Description: "The authentication information. Optional when creating an HTTP check; defaults to empty.",
Required: []string{
"username",
"password",
},
Properties: map[string]*dcl.Property{
"password": &dcl.Property{
Type: "string",
GoName: "Password",
Sensitive: true,
Unreadable: true,
},
"username": &dcl.Property{
Type: "string",
GoName: "Username",
},
},
},
"body": &dcl.Property{
Type: "string",
GoName: "Body",
Description: "The request body associated with the HTTP POST request. If `content_type` is `URL_ENCODED`, the body passed in must be URL-encoded. Users can provide a `Content-Length` header via the `headers` field or the API will do so. If the `request_method` is `GET` and `body` is not empty, the API will return an error. The maximum byte size is 1 megabyte. Note: As with all `bytes` fields JSON representations are base64 encoded. e.g.: \"foo=bar\" in URL-encoded form is \"foo%3Dbar\" and in base64 encoding is \"Zm9vJTI1M0RiYXI=\".",
},
"contentType": &dcl.Property{
Type: "string",
GoName: "ContentType",
GoType: "UptimeCheckConfigHttpCheckContentTypeEnum",
Description: "The content type to use for the check. Possible values: TYPE_UNSPECIFIED, URL_ENCODED",
Immutable: true,
Enum: []string{
"TYPE_UNSPECIFIED",
"URL_ENCODED",
},
},
"headers": &dcl.Property{
Type: "object",
AdditionalProperties: &dcl.Property{
Type: "string",
},
GoName: "Headers",
Description: "The list of headers to send as part of the Uptime check request. If two headers have the same key and different values, they should be entered as a single header, with the value being a comma-separated list of all the desired values as described at https://www.w3.org/Protocols/rfc2616/rfc2616.txt (page 31). Entering two separate headers with the same key in a Create call will cause the first to be overwritten by the second. The maximum number of headers allowed is 100.",
ServerDefault: true,
Unreadable: true,
},
"maskHeaders": &dcl.Property{
Type: "boolean",
GoName: "MaskHeaders",
Description: "Boolean specifying whether to encrypt the header information. Encryption should be specified for any headers related to authentication that you do not wish to be seen when retrieving the configuration. The server will be responsible for encrypting the headers. On Get/List calls, if `mask_headers` is set to `true` then the headers will be obscured with `******.`",
Immutable: true,
},
"path": &dcl.Property{
Type: "string",
GoName: "Path",
Description: "Optional (defaults to \"/\"). The path to the page against which to run the check. Will be combined with the `host` (specified within the `monitored_resource`) and `port` to construct the full URL. If the provided path does not begin with \"/\", a \"/\" will be prepended automatically.",
Default: "/",
},
"port": &dcl.Property{
Type: "integer",
Format: "int64",
GoName: "Port",
Description: "Optional (defaults to 80 when `use_ssl` is `false`, and 443 when `use_ssl` is `true`). The TCP port on the HTTP server against which to run the check. Will be combined with host (specified within the `monitored_resource`) and `path` to construct the full URL.",
ServerDefault: true,
},
"requestMethod": &dcl.Property{
Type: "string",
GoName: "RequestMethod",
GoType: "UptimeCheckConfigHttpCheckRequestMethodEnum",
Description: "The HTTP request method to use for the check. If set to `METHOD_UNSPECIFIED` then `request_method` defaults to `GET`.",
Immutable: true,
Default: "GET",
Enum: []string{
"METHOD_UNSPECIFIED",
"GET",
"POST",
},
},
"useSsl": &dcl.Property{
Type: "boolean",
GoName: "UseSsl",
Description: "If `true`, use HTTPS instead of HTTP to run the check.",
},
"validateSsl": &dcl.Property{
Type: "boolean",
GoName: "ValidateSsl",
Description: "Boolean specifying whether to include SSL certificate validation as a part of the Uptime check. Only applies to checks where `monitored_resource` is set to `uptime_url`. If `use_ssl` is `false`, setting `validate_ssl` to `true` has no effect.",
},
},
},
"monitoredResource": &dcl.Property{
Type: "object",
GoName: "MonitoredResource",
GoType: "UptimeCheckConfigMonitoredResource",
Description: "The [monitored resource](https://cloud.google.com/monitoring/api/resources) associated with the configuration. The following monitored resource types are supported for Uptime checks: `uptime_url`, `gce_instance`, `gae_app`, `aws_ec2_instance`, `aws_elb_load_balancer`",
Immutable: true,
Conflicts: []string{
"resourceGroup",
},
Required: []string{
"type",
"filterLabels",
},
Properties: map[string]*dcl.Property{
"filterLabels": &dcl.Property{
Type: "object",
AdditionalProperties: &dcl.Property{
Type: "string",
},
GoName: "FilterLabels",
Immutable: true,
},
"type": &dcl.Property{
Type: "string",
GoName: "Type",
Immutable: true,
},
},
},
"name": &dcl.Property{
Type: "string",
GoName: "Name",
Description: "A unique resource name for this Uptime check configuration. The format is: projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID] This field should be omitted when creating the Uptime check configuration; on create, the resource name is assigned by the server and included in the response.",
Immutable: true,
ServerGeneratedParameter: true,
},
"period": &dcl.Property{
Type: "string",
GoName: "Period",
Description: "How often, in seconds, the Uptime check is performed. Currently, the only supported values are `60s` (1 minute), `300s` (5 minutes), `600s` (10 minutes), and `900s` (15 minutes). Optional, defaults to `60s`.",
Default: "60s",
},
"project": &dcl.Property{
Type: "string",
GoName: "Project",
Description: "The project for this uptime check config.",
Immutable: true,
ResourceReferences: []*dcl.PropertyResourceReference{
&dcl.PropertyResourceReference{
Resource: "Cloudresourcemanager/Project",
Field: "name",
Parent: true,
},
},
},
"resourceGroup": &dcl.Property{
Type: "object",
GoName: "ResourceGroup",
GoType: "UptimeCheckConfigResourceGroup",
Description: "The group resource associated with the configuration.",
Immutable: true,
Conflicts: []string{
"monitoredResource",
},
Properties: map[string]*dcl.Property{
"groupId": &dcl.Property{
Type: "string",
GoName: "GroupId",
Description: "The group of resources being monitored. Should be only the `[GROUP_ID]`, and not the full-path `projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID]`.",
Immutable: true,
ResourceReferences: []*dcl.PropertyResourceReference{
&dcl.PropertyResourceReference{
Resource: "Monitoring/Group",
Field: "name",
},
},
},
"resourceType": &dcl.Property{
Type: "string",
GoName: "ResourceType",
GoType: "UptimeCheckConfigResourceGroupResourceTypeEnum",
Description: "The resource type of the group members. Possible values: RESOURCE_TYPE_UNSPECIFIED, INSTANCE, AWS_ELB_LOAD_BALANCER",
Immutable: true,
Enum: []string{
"RESOURCE_TYPE_UNSPECIFIED",
"INSTANCE",
"AWS_ELB_LOAD_BALANCER",
},
},
},
},
"selectedRegions": &dcl.Property{
Type: "array",
GoName: "SelectedRegions",
Description: "The list of regions from which the check will be run. Some regions contain one location, and others contain more than one. If this field is specified, enough regions must be provided to include a minimum of 3 locations. Not specifying this field will result in Uptime checks running from all available regions.",
SendEmpty: true,
ListType: "list",
Items: &dcl.Property{
Type: "string",
GoType: "string",
},
},
"tcpCheck": &dcl.Property{
Type: "object",
GoName: "TcpCheck",
GoType: "UptimeCheckConfigTcpCheck",
Description: "Contains information needed to make a TCP check.",
Conflicts: []string{
"httpCheck",
},
Required: []string{
"port",
},
Properties: map[string]*dcl.Property{
"port": &dcl.Property{
Type: "integer",
Format: "int64",
GoName: "Port",
Description: "The TCP port on the server against which to run the check. Will be combined with host (specified within the `monitored_resource`) to construct the full URL. Required.",
},
},
},
"timeout": &dcl.Property{
Type: "string",
GoName: "Timeout",
Description: "The maximum amount of time to wait for the request to complete (must be between 1 and 60 seconds). Required.",
},
},
},
},
},
},
}
}
|
package template
import (
"fmt"
"regexp"
"strings"
)
type CodeBlock struct {
Old string
New string
}
type CodeSegment struct {
Old string
New string
Block string
}
func FilterImports(src []byte) (dist []byte) {
content := string(src)
lines := strings.Split(content, "\n")
makeLines := make([]string, 0)
for _, v := range lines {
v = strings.TrimSpace(v)
if strings.HasPrefix(v, "//") {
continue
}
makeLines = append(makeLines, v)
}
makeContent := strings.Join(makeLines, "\n")
r1 := regexp.MustCompile(`import\s+\(([\s\S]*?)\)`)
r2 := regexp.MustCompile(`import\s+([A-Za-z0-9-._/"]*\s*"[A-Za-z0-9-._/"]+")`)
r3 := regexp.MustCompile(`([A-Za-z0-9-._/"]*)\s*"([A-Za-z0-9-._/"]+)"`)
blocks := make([]string, 0)
blocksMap := make(map[string]*CodeBlock)
res := r1.FindAllStringSubmatch(content, -1)
for _, v1 := range res {
if len(v1) < 0 {
continue
}
c := new(CodeBlock)
c.Old = v1[0]
c.New = v1[0]
for k2, v2 := range v1 {
if k2 > 0 {
block := strings.TrimSpace(v2)
blocks = append(blocks, block)
blocksMap[block] = c
}
}
}
res = r2.FindAllStringSubmatch(content, -1)
for _, v1 := range res {
if len(v1) < 0 {
continue
}
c := new(CodeBlock)
c.Old = v1[0]
c.New = v1[0]
for k2, v2 := range v1 {
if k2 > 0 {
block := strings.TrimSpace(v2)
blocks = append(blocks, block)
blocksMap[block] = c
}
}
}
packages := make([]string, 0)
packagesMap := make(map[string]*CodeSegment)
for _, block := range blocks {
items := strings.Split(block, "\n")
for _, v := range items {
v = strings.TrimSpace(v)
res = r3.FindAllStringSubmatch(v, -1)
for _, v1 := range res {
if len(v1) != 3 {
continue
}
c := new(CodeSegment)
c.Old = v1[0]
c.New = v1[0]
c.Block = block
p1 := strings.TrimSpace(v1[1])
if p1 != "" {
packages = append(packages, p1)
packagesMap[p1] = c
continue
}
strs := strings.Split(v1[2], "/")
p2 := strings.TrimSpace(strs[len(strs)-1])
if p2 != "" {
packages = append(packages, p2)
packagesMap[p2] = c
continue
}
}
}
}
for _, v := range packages {
if v == "_" || v == "." {
continue
}
r := regexp.MustCompile(fmt.Sprintf(`[\s(*+-/]+%s\.`, v))
if !r.MatchString(makeContent) {
blocksMap[packagesMap[v].Block].New = strings.ReplaceAll(blocksMap[packagesMap[v].Block].New, packagesMap[v].New, "")
}
}
for _, v := range blocksMap {
content = strings.ReplaceAll(content, v.Old, v.New)
}
lines = strings.Split(content, "\n")
result := make([]string, 0)
for _, v := range lines {
v = strings.TrimSpace(v)
if v == "import" || regexp.MustCompile(`import\s+\(\s*\)`).MatchString(v) {
continue
}
result = append(result, v)
}
return []byte(strings.Join(result, "\n"))
}
|
/*
Quentin Tarantino is a famous Hollywood filmmaker and actor. His films have a unique characteristic of connecting with youth by using popular culture references.
They are usually divided into various sub-parts denoted by chapters. His plot of stories is never linear, he always make sure that the chapters should never be shown in the order of Chapter 1, Chapter 2, Chapter 3, ,.. Chapter n linearly.
He believes doing this an insult to the intelligence of viewers. For example, Chapter 1, Chapter 2, Chapter 3 can never be a chapter sequence in Tarantino's movies.. He can have a sequence of Chapter 2, Chapter 3, Chapter 1 instead.
Recently Santa gifted Tom a movie which consists of n chapters. This movie can possibly be directed by your favorite Tarantino or by someone else.
Directors in hollywood can sometimes be very sloppy, they sometimes drop some chapters too and stupid they are that they will leave numbering of the chapters as it is and one can easily identify the chapters that were dropped.
For example, if you find the sequence of chapters as - chapter 1, chapter 5, chapter 3, don't be surprised, they did not even take burden of renumbering the chapters to chapter 1, chapter 3, chapter 2.
They can sometimes repeat the chapter numbers too. Remember that Tarantino is not a sloppy director, so he never makes this kind of stupid mistakes.
Tom happened to find the sequence of chapters in the movie, this sequence contains n chapters, i-th of which is denoted by chapteri.
Tom is a big fan of movies of Tarantino. So, Tom would like to check whether the movie could possibly be directed by Tarantino or not? Please help Tom, he really needs it.
Input
The first line contains an integer T denoting the number of test cases. T test cases follow.
The first line of each test case contains an integer n denoting number of chapters of the movie.
The second line of each test case contains n space separated integers chapteri, denoting numbering of ith chapter.
Output
For each test case, output "yes" or "no" in a separate line.
Constraints
1 ≤ T ≤ 100
2 ≤ N ≤ 100
1 ≤ chapteri ≤ 500
*/
package main
import (
"sort"
)
func main() {
assert(directed([]int{2, 3, 1}) == true)
assert(directed([]int{1, 5, 3}) == false)
assert(directed([]int{1, 2, 3}) == false)
assert(directed([]int{1, 1}) == false)
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func directed(a []int) bool {
m := make(map[int]int)
for _, v := range a {
m[v]++
}
n := len(a)
for i := 1; i <= n; i++ {
if m[i] != 1 {
return false
}
}
c := append([]int{}, a...)
sort.Ints(c)
for i := 0; i < n; i++ {
if c[i] == a[i] {
return false
}
}
return true
}
|
package types
import (
"encoding/binary"
"time"
"github.com/Secured-Finance/dione/types"
"github.com/libp2p/go-libp2p-core/crypto"
"github.com/ethereum/go-ethereum/common"
"github.com/wealdtech/go-merkletree"
"github.com/wealdtech/go-merkletree/keccak256"
"github.com/libp2p/go-libp2p-core/peer"
)
type Block struct {
Header *BlockHeader
Data []*Transaction
}
type BlockHeader struct {
Timestamp int64
Height uint64
Hash []byte
LastHash []byte
LastHashProof *merkletree.Proof
Proposer *peer.ID
ProposerEth common.Address
Signature []byte
BeaconEntry types.BeaconEntry
ElectionProof *types.ElectionProof
}
func GenesisBlock() *Block {
return &Block{
Header: &BlockHeader{
Timestamp: 1620845070,
Height: 0,
Hash: []byte("DIMICANDUM"),
},
Data: []*Transaction{},
}
}
func CreateBlock(lastBlockHeader *BlockHeader, txs []*Transaction, minerEth common.Address, privateKey crypto.PrivKey, eproof *types.ElectionProof) (*Block, error) {
timestamp := time.Now().UnixNano()
// extract hashes from transactions
var merkleHashes [][]byte
for _, tx := range txs {
merkleHashes = append(merkleHashes, tx.Hash)
}
merkleHashes = append(merkleHashes, lastBlockHeader.Hash)
// we use timestamp as salt for block hash, because salt doesn't work in this merkle tree library for some reason
timestampBytes := make([]byte, 8)
binary.LittleEndian.PutUint64(timestampBytes, uint64(timestamp))
merkleHashes = append(merkleHashes, timestampBytes)
tree, err := merkletree.NewUsing(merkleHashes, keccak256.New(), true)
if err != nil {
return nil, err
}
// fetch merkle tree root hash (block hash)
blockHash := tree.Root()
// sign the block hash
s, err := privateKey.Sign(blockHash)
if err != nil {
return nil, err
}
lastHashProof, err := tree.GenerateProof(lastBlockHeader.Hash, 0)
if err != nil {
return nil, err
}
proposer, err := peer.IDFromPrivateKey(privateKey)
if err != nil {
return nil, err
}
for _, tx := range txs {
mp, err := tree.GenerateProof(tx.Hash, 0)
if err != nil {
return nil, err
}
tx.MerkleProof = *mp
}
block := &Block{
Header: &BlockHeader{
Timestamp: timestamp,
Height: lastBlockHeader.Height + 1,
Proposer: &proposer,
ProposerEth: minerEth,
Signature: s,
Hash: blockHash,
LastHash: lastBlockHeader.Hash,
LastHashProof: lastHashProof,
ElectionProof: eproof,
},
Data: txs,
}
return block, nil
}
|
package vm
import (
"runtime"
"sync"
"sync/atomic"
"time"
"bounds"
"defs"
"fdops"
"mem"
"res"
"ustr"
"util"
)
type Vm_t struct {
// lock for vmregion, pmpages, pmap, and p_pmap
sync.Mutex
Vmregion Vmregion_t
// pmap pages
Pmap *mem.Pmap_t
P_pmap mem.Pa_t
pgfltaken bool
}
func (as *Vm_t) Lock_pmap() {
// useful for finding deadlock bugs with one cpu
//if p.pgfltaken {
// panic("double lock")
//}
as.Lock()
as.pgfltaken = true
}
func (as *Vm_t) Unlock_pmap() {
as.pgfltaken = false
as.Unlock()
}
func (as *Vm_t) Lockassert_pmap() {
if !as.pgfltaken {
panic("pgfl lock must be held")
}
}
func (as *Vm_t) Userdmap8_inner(va int, k2u bool) ([]uint8, defs.Err_t) {
as.Lockassert_pmap()
voff := va & int(PGOFFSET)
uva := uintptr(va)
vmi, ok := as.Vmregion.Lookup(uva)
if !ok {
return nil, -defs.EFAULT
}
pte, ok := vmi.Ptefor(as.Pmap, uva)
if !ok {
return nil, -defs.ENOMEM
}
ecode := uintptr(PTE_U)
needfault := true
isp := *pte&PTE_P != 0
if k2u {
ecode |= uintptr(PTE_W)
// XXX how to distinguish between user asking kernel to write
// to read-only page and kernel writing a page mapped read-only
// to user? (exec args)
//isw := *pte & PTE_W != 0
//if isp && isw {
iscow := *pte&PTE_COW != 0
if isp && !iscow {
needfault = false
}
} else {
if isp {
needfault = false
}
}
if needfault {
if err := Sys_pgfault(as, vmi, uva, ecode); err != 0 {
return nil, err
}
}
pg := mem.Physmem.Dmap(*pte & PTE_ADDR)
bpg := mem.Pg2bytes(pg)
return bpg[voff:], 0
}
// _userdmap8 and userdmap8r functions must only be used if concurrent
// modifications to the address space is impossible.
func (as *Vm_t) _userdmap8(va int, k2u bool) ([]uint8, defs.Err_t) {
as.Lock_pmap()
ret, err := as.Userdmap8_inner(va, k2u)
as.Unlock_pmap()
return ret, err
}
func (as *Vm_t) Userdmap8r(va int) ([]uint8, defs.Err_t) {
return as._userdmap8(va, false)
}
func (as *Vm_t) usermapped(va, n int) bool {
as.Lock_pmap()
defer as.Unlock_pmap()
_, ok := as.Vmregion.Lookup(uintptr(va))
return ok
}
func (as *Vm_t) Userreadn(va, n int) (int, defs.Err_t) {
as.Lock_pmap()
a, b := as.userreadn_inner(va, n)
as.Unlock_pmap()
return a, b
}
func (as *Vm_t) userreadn_inner(va, n int) (int, defs.Err_t) {
as.Lockassert_pmap()
if n > 8 {
panic("large n")
}
var ret int
var src []uint8
var err defs.Err_t
for i := 0; i < n; i += len(src) {
src, err = as.Userdmap8_inner(va+i, false)
if err != 0 {
return 0, err
}
l := n - i
if len(src) < l {
l = len(src)
}
v := util.Readn(src, l, 0)
ret |= v << (8 * uint(i))
}
return ret, 0
}
func (as *Vm_t) Userwriten(va, n, val int) defs.Err_t {
if n > 8 {
panic("large n")
}
as.Lock_pmap()
defer as.Unlock_pmap()
var dst []uint8
for i := 0; i < n; i += len(dst) {
v := val >> (8 * uint(i))
t, err := as.Userdmap8_inner(va+i, true)
dst = t
if err != 0 {
return err
}
util.Writen(dst, n-i, 0, v)
}
return 0
}
// first ret value is the string from user space second is error
func (as *Vm_t) Userstr(uva int, lenmax int) (ustr.Ustr, defs.Err_t) {
if lenmax < 0 {
return nil, 0
}
as.Lock_pmap()
//defer p.Vm.Unlock_pmap()
i := 0
s := ustr.MkUstr()
for {
str, err := as.Userdmap8_inner(uva+i, false)
if err != 0 {
as.Unlock_pmap()
return s, err
}
for j, c := range str {
if c == 0 {
s = append(s, str[:j]...)
// s = s + string(str[:j])
as.Unlock_pmap()
return s, 0
}
}
s = append(s, str...)
// s = s + string(str)
i += len(str)
if len(s) >= lenmax {
as.Unlock_pmap()
return nil, -defs.ENAMETOOLONG
}
}
}
func (as *Vm_t) Usertimespec(va int) (time.Duration, time.Time, defs.Err_t) {
var zt time.Time
secs, err := as.Userreadn(va, 8)
if err != 0 {
return 0, zt, err
}
nsecs, err := as.Userreadn(va+8, 8)
if err != 0 {
return 0, zt, err
}
if secs < 0 || nsecs < 0 {
return 0, zt, -defs.EINVAL
}
tot := time.Duration(secs) * time.Second
tot += time.Duration(nsecs) * time.Nanosecond
t := time.Unix(int64(secs), int64(nsecs))
return tot, t, 0
}
// copies src to the user virtual address uva. may copy part of src if uva +
// len(src) is not mapped
func (as *Vm_t) K2user(src []uint8, uva int) defs.Err_t {
as.Lock_pmap()
ret := as.K2user_inner(src, uva)
as.Unlock_pmap()
return ret
}
func (as *Vm_t) K2user_inner(src []uint8, uva int) defs.Err_t {
as.Lockassert_pmap()
cnt := 0
l := len(src)
for cnt != l {
gimme := bounds.Bounds(bounds.B_ASPACE_T_K2USER_INNER)
if !res.Resadd_noblock(gimme) {
return -defs.ENOHEAP
}
dst, err := as.Userdmap8_inner(uva+cnt, true)
if err != 0 {
return err
}
ub := len(src)
if ub > len(dst) {
ub = len(dst)
}
copy(dst, src)
src = src[ub:]
cnt += ub
}
return 0
}
// copies len(dst) bytes from userspace address uva to dst
func (as *Vm_t) User2k(dst []uint8, uva int) defs.Err_t {
as.Lock_pmap()
ret := as.User2k_inner(dst, uva)
as.Unlock_pmap()
return ret
}
func (as *Vm_t) User2k_inner(dst []uint8, uva int) defs.Err_t {
as.Lockassert_pmap()
cnt := 0
for len(dst) != 0 {
gimme := bounds.Bounds(bounds.B_ASPACE_T_USER2K_INNER)
if !res.Resadd_noblock(gimme) {
return -defs.ENOHEAP
}
src, err := as.Userdmap8_inner(uva+cnt, false)
if err != 0 {
return err
}
did := copy(dst, src)
dst = dst[did:]
cnt += did
}
return 0
}
func (as *Vm_t) Unusedva_inner(startva, len int) int {
as.Lockassert_pmap()
if len < 0 || len > 1<<48 {
panic("weird len")
}
startva = util.Rounddown(startva, mem.PGSIZE)
if startva < mem.USERMIN {
startva = mem.USERMIN
}
_ret, _l := as.Vmregion.empty(uintptr(startva), uintptr(len))
ret := int(_ret)
l := int(_l)
if startva > ret && startva < ret+l {
ret = startva
}
return ret
}
var _numtoapicid func(int) uint32
func Cpumap(f func(int) uint32) {
_numtoapicid = f
}
func (as *Vm_t) Tlbshoot(startva uintptr, pgcount int) {
if pgcount == 0 {
return
}
as.Lockassert_pmap()
if _numtoapicid == nil {
panic("cpumap not initted")
}
// fast path: the pmap is loaded in exactly one CPU's cr3, and it
// happens to be this CPU. we detect that one CPU has the pmap loaded
// by a pmap ref count == 2 (1 for Proc_t ref, 1 for CPU).
p_pmap := as.P_pmap
refp, _ := mem.Physmem.Refaddr(p_pmap)
// XXX XXX XXX use Tlbaddr to implement Condflush more simply
if runtime.Condflush(refp, uintptr(p_pmap), startva, pgcount) {
return
}
tlbp := mem.Physmem.Tlbaddr(p_pmap)
// slow path, must send TLB shootdowns
tlb_shootdown(as.P_pmap, tlbp, startva, pgcount)
}
// returns true if the fault was handled successfully
func Sys_pgfault(as *Vm_t, vmi *Vminfo_t, faultaddr, ecode uintptr) defs.Err_t {
isguard := vmi.Perms == 0
iswrite := ecode&uintptr(PTE_W) != 0
writeok := vmi.Perms&uint(PTE_W) != 0
if isguard || (iswrite && !writeok) {
return -defs.EFAULT
}
// pmap is Lock'ed in Proc_t.pgfault...
if ecode&uintptr(PTE_U) == 0 {
// kernel page faults should be noticed and crashed upon in
// runtime.trap(), but just in case
panic("kernel page fault")
}
if vmi.Mtype == VSANON {
panic("shared anon pages should always be mapped")
}
pte, ok := vmi.Ptefor(as.Pmap, faultaddr)
if !ok {
return -defs.ENOMEM
}
if (iswrite && *pte&PTE_WASCOW != 0) ||
(!iswrite && *pte&PTE_P != 0) {
// two threads simultaneously faulted on same page
return 0
}
var p_pg mem.Pa_t
isblockpage := false
perms := PTE_U | PTE_P
isempty := true
// shared file mappings are handled the same way regardless of whether
// the fault is read or write
if vmi.Mtype == VFILE && vmi.file.shared {
var err defs.Err_t
_, p_pg, err = vmi.Filepage(faultaddr)
if err != 0 {
return err
}
isblockpage = true
if vmi.Perms&uint(PTE_W) != 0 {
perms |= PTE_W
}
} else if iswrite {
// XXXPANIC
if *pte&PTE_W != 0 {
panic("bad state")
}
var pgsrc *mem.Pg_t
var p_bpg mem.Pa_t
// the copy-on-write page may be specified in the pte or it may
// not have been mapped at all yet.
cow := *pte&PTE_COW != 0
if cow {
// if this anonymous COW page is mapped exactly once
// (i.e. only this mapping maps the page), we can
// claim the page, skip the copy, and mark it writable.
phys := *pte & PTE_ADDR
ref, _ := mem.Physmem.Refaddr(phys)
if vmi.Mtype == VANON && atomic.LoadInt32(ref) == 1 &&
phys != mem.P_zeropg {
tmp := *pte &^ PTE_COW
tmp |= PTE_W | PTE_WASCOW
*pte = tmp
as.Tlbshoot(faultaddr, 1)
return 0
}
pgsrc = mem.Physmem.Dmap(phys)
isempty = false
} else {
// XXXPANIC
if *pte != 0 {
panic("no")
}
switch vmi.Mtype {
case VANON:
pgsrc = mem.Zeropg
case VFILE:
var err defs.Err_t
pgsrc, p_bpg, err = vmi.Filepage(faultaddr)
if err != 0 {
return err
}
defer mem.Physmem.Refdown(p_bpg)
default:
panic("wut")
}
}
var pg *mem.Pg_t
var ok bool
// don't zero new page
pg, p_pg, ok = mem.Physmem.Refpg_new_nozero()
if !ok {
return -defs.ENOMEM
}
*pg = *pgsrc
perms |= PTE_WASCOW
perms |= PTE_W
} else {
if *pte != 0 {
panic("must be 0")
}
switch vmi.Mtype {
case VANON:
p_pg = mem.P_zeropg
case VFILE:
var err defs.Err_t
_, p_pg, err = vmi.Filepage(faultaddr)
if err != 0 {
return err
}
isblockpage = true
default:
panic("wut")
}
if vmi.Perms&uint(PTE_W) != 0 {
perms |= PTE_COW
}
}
if perms&PTE_W != 0 {
perms |= PTE_D
}
perms |= PTE_A
var tshoot bool
if isblockpage {
tshoot, ok = as.Blockpage_insert(int(faultaddr), p_pg, perms, isempty, pte)
} else {
tshoot, ok = as.Page_insert(int(faultaddr), p_pg, perms, isempty, pte)
}
if !ok {
mem.Physmem.Refdown(p_pg)
return -defs.ENOMEM
}
if tshoot {
as.Tlbshoot(faultaddr, 1)
}
return 0
}
// the first return value is true if a present mapping was modified (i.e. need
// to flush TLB). the second return value is false if the page insertion failed
// due to lack of user pages. p_pg's ref count is increased so the caller can
// simply Physmem.Refdown()
func (as *Vm_t) Page_insert(va int, p_pg mem.Pa_t, perms mem.Pa_t,
vempty bool, pte *mem.Pa_t) (bool, bool) {
return as._page_insert(va, p_pg, perms, vempty, true, pte)
}
// the first return value is true if a present mapping was modified (i.e. need
// to flush TLB). the second return value is false if the page insertion failed
// due to lack of user pages. p_pg's ref count is increased so the caller can
// simply Physmem.Refdown()
func (as *Vm_t) Blockpage_insert(va int, p_pg mem.Pa_t, perms mem.Pa_t,
vempty bool, pte *mem.Pa_t) (bool, bool) {
return as._page_insert(va, p_pg, perms, vempty, false, pte)
}
func (as *Vm_t) _page_insert(va int, p_pg mem.Pa_t, perms mem.Pa_t,
vempty, refup bool, pte *mem.Pa_t) (bool, bool) {
as.Lockassert_pmap()
if refup {
mem.Physmem.Refup(p_pg)
}
if pte == nil {
var err defs.Err_t
pte, err = pmap_walk(as.Pmap, va, PTE_U|PTE_W)
if err != 0 {
return false, false
}
}
ninval := false
var p_old mem.Pa_t
if *pte&PTE_P != 0 {
if vempty {
panic("pte not empty")
}
if *pte&PTE_U == 0 {
panic("replacing kernel page")
}
ninval = true
p_old = mem.Pa_t(*pte & PTE_ADDR)
}
*pte = p_pg | perms | PTE_P
if ninval {
mem.Physmem.Refdown(p_old)
}
return ninval, true
}
func (as *Vm_t) Page_remove(va int) bool {
as.Lockassert_pmap()
remmed := false
pte := Pmap_lookup(as.Pmap, va)
if pte != nil && *pte&PTE_P != 0 {
if *pte&PTE_U == 0 {
panic("removing kernel page")
}
p_old := mem.Pa_t(*pte & PTE_ADDR)
mem.Physmem.Refdown(p_old)
*pte = 0
remmed = true
}
return remmed
}
// returns true if the pagefault was handled successfully
func (as *Vm_t) Pgfault(tid defs.Tid_t, fa, ecode uintptr) defs.Err_t {
as.Lock_pmap()
vmi, ok := as.Vmregion.Lookup(fa)
if !ok {
as.Unlock_pmap()
return -defs.EFAULT
}
ret := Sys_pgfault(as, vmi, fa, ecode)
as.Unlock_pmap()
return ret
}
func (as *Vm_t) Uvmfree() {
Uvmfree_inner(as.Pmap, as.P_pmap, &as.Vmregion)
// Dec_pmap could free the pmap itself. thus it must come after
// Uvmfree.
mem.Physmem.Dec_pmap(as.P_pmap)
// close all open mmap'ed files
as.Vmregion.Clear()
}
func (as *Vm_t) Vmadd_anon(start, len int, perms mem.Pa_t) {
vmi := as._mkvmi(VANON, start, len, perms, 0, nil, nil)
as.Vmregion.insert(vmi)
}
func (as *Vm_t) Vmadd_file(start, len int, perms mem.Pa_t, fops fdops.Fdops_i,
foff int) {
vmi := as._mkvmi(VFILE, start, len, perms, foff, fops, nil)
as.Vmregion.insert(vmi)
}
func (as *Vm_t) Vmadd_shareanon(start, len int, perms mem.Pa_t) {
vmi := as._mkvmi(VSANON, start, len, perms, 0, nil, nil)
as.Vmregion.insert(vmi)
}
func (as *Vm_t) Vmadd_sharefile(start, len int, perms mem.Pa_t, fops fdops.Fdops_i,
foff int, unpin mem.Unpin_i) {
vmi := as._mkvmi(VFILE, start, len, perms, foff, fops, unpin)
as.Vmregion.insert(vmi)
}
// does not increase opencount on fops (vmregion_t.insert does). perms should
// only use PTE_U/PTE_W; the page fault handler will install the correct COW
// flags. perms == 0 means that no mapping can go here (like for guard pages).
func (as *Vm_t) _mkvmi(mt mtype_t, start, len int, perms mem.Pa_t, foff int,
fops fdops.Fdops_i, unpin mem.Unpin_i) *Vminfo_t {
if len <= 0 {
panic("bad vmi len")
}
if mem.Pa_t(start|len)&PGOFFSET != 0 {
panic("start and len must be aligned")
}
// don't specify cow, present etc. -- page fault will handle all that
pm := PTE_W | PTE_COW | PTE_WASCOW | PTE_PS | PTE_PCD | PTE_P | PTE_U
if r := perms & pm; r != 0 && r != PTE_U && r != (PTE_W|PTE_U) {
panic("bad perms")
}
ret := &Vminfo_t{}
pgn := uintptr(start) >> PGSHIFT
pglen := util.Roundup(len, mem.PGSIZE) >> PGSHIFT
ret.Mtype = mt
ret.Pgn = pgn
ret.Pglen = pglen
ret.Perms = uint(perms)
if mt == VFILE {
ret.file.foff = foff
ret.file.mfile = &Mfile_t{}
ret.file.mfile.mfops = fops
ret.file.mfile.unpin = unpin
ret.file.mfile.mapcount = pglen
ret.file.shared = unpin != nil
}
return ret
}
func (as *Vm_t) Mkuserbuf(userva, len int) *Userbuf_t {
ret := &Userbuf_t{}
ret.ub_init(as, userva, len)
return ret
}
|
package main
//
// import (
// "net/http"
// "bytes"
// "strings"
// "os/exec"
// "log"
// "fmt"
// "io"
// "net"
// "time"
// "crypto/tls"
// "golang.org/x/net/http2"
// )
//
// func main() {
// cmd := exec.Command("docker", "inspect", "-f {{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}", "backend")
// var out bytes.Buffer
// cmd.Stdout = &out
// err := cmd.Run()
// if err != nil {
// log.Fatal(err)
// }
// server := strings.TrimSpace(out.String())
// handler := http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
// //serverURL, _ := url.Parse("http://"+server)
// //revproxy := httputil.NewSingleHostReverseProxy(serverURL)
// //revproxy.ServeHTTP(rw, req)
//
// req.URL.Scheme = "https"
// req.URL.Host = server
//
//
// //Connection HopByHop
// for _, header := range strings.Split(req.Header.Get("Connection"), ",") {
// req.Header.Del(header)
// }
//
// //HopByHop
// for _, header := range hopHeaders {
// req.Header.Del(header)
// }
//
// //XForwardedFor
// ip, _, _ := net.SplitHostPort(req.RemoteAddr)
// xf := req.Header.Get("X-Forwarded-For")
// if xf != "" {
// xf += ","
// }
// xf += ip
// req.Header.Set("X-Forwarded-For", xf)
//
// //XForwardedHost XForwardedPort
// host, port, _ := net.SplitHostPort(req.Host)
// req.Header.Set("X-Forwarded-Host", host)
// req.Header.Set("X-Forwarded-Port", port)
//
// ////TLS
// http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
// ////HTTP2
// http2.ConfigureTransport(http.DefaultTransport.(*http.Transport))
//
// resp, err := http.DefaultTransport.RoundTrip(req)
// if err != nil {
// rw.WriteHeader(500)
// fmt.Printf("Error on roundtrip: %s", err)
// rw.Write([]byte(fmt.Sprintf("error: %q", err)))
// return
// }
//
// //Connection HopByHop
// for _, header := range strings.Split(resp.Header.Get("Connection"), ",") {
// resp.Header.Del(header)
// }
//
// //HopByHop
// for _, header := range hopHeaders {
// resp.Header.Del(header)
// }
//
// //Copy resp Headers
// for key, value := range resp.Header {
// for _, val := range value {
// rw.Header().Add(key, val)
// }
// }
//
// //Trailer
// trailerKeys := make([]string, 0, len(resp.Trailer))
// for k := range resp.Trailer {
// trailerKeys = append(trailerKeys, k)
// }
// if len(trailerKeys) > 0 {
// rw.Header().Add("Trailer", strings.Join(trailerKeys, ", "))
// }
//
// //Write resp statusCode
// rw.WriteHeader(resp.StatusCode)
//
// //Flusher for stream
// done := make(chan bool)
// go func() {
// t := time.Tick(100 * time.Millisecond)
// for {
// select {
// case <-t:
// flusher, ok := rw.(http.Flusher)
// if ok {
// flusher.Flush()
// } else {
// return
// }
// case <-done:
// return
// }
//
// }
// }()
//
// // Body copy
// io.Copy(rw, resp.Body)
//
// //Flush end
// done <- true
//
// //Trailer read after body readed
// for key, values := range resp.Trailer {
// for _, value := range values {
// rw.Header().Add(key, value)
// }
// }
//
// })
// //http.ListenAndServe(":8080", handler)
// http.ListenAndServeTLS(":8080", "./cert.pem", "key.pem", handler)
// }
|
// Copyright 2019-present Open Networking Foundation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
gnmiutils "github.com/onosproject/onos-config/test/utils/gnmi"
"github.com/onosproject/onos-config/test/utils/proto"
"testing"
)
const (
udtestRootPath = "/interfaces/interface[name=test]"
udtestNamePath = udtestRootPath + "/config/name"
udtestEnabledPath = udtestRootPath + "/config/enabled"
udtestDescriptionPath = udtestRootPath + "/config/description"
udtestNameValue = "test"
udtestDescriptionValue = "description"
)
// TestUpdateDelete tests update and delete paths in a single GNMI request
func (s *TestSuite) TestUpdateDelete(t *testing.T) {
// Get the first configured simulator from the environment.
simulator := gnmiutils.CreateSimulator(t)
defer gnmiutils.DeleteSimulator(t, simulator)
// Make a GNMI client to use for requests
gnmiClient := gnmiutils.GetGNMIClientOrFail(t)
// Create interface tree using gNMI client
setNamePath := []proto.TargetPath{
{TargetName: simulator.Name(), Path: udtestNamePath, PathDataValue: udtestNameValue, PathDataType: proto.StringVal},
}
gnmiutils.SetGNMIValueOrFail(t, gnmiClient, setNamePath, gnmiutils.NoPaths, gnmiutils.SyncExtension(t))
gnmiutils.CheckGNMIValue(t, gnmiClient, setNamePath, udtestNameValue, 0, "Query name after set returned the wrong value")
// Set initial values for Enabled and Description using gNMI client
setInitialValuesPath := []proto.TargetPath{
{TargetName: simulator.Name(), Path: udtestEnabledPath, PathDataValue: "true", PathDataType: proto.BoolVal},
{TargetName: simulator.Name(), Path: udtestDescriptionPath, PathDataValue: udtestDescriptionValue, PathDataType: proto.StringVal},
}
gnmiutils.SetGNMIValueOrFail(t, gnmiClient, setInitialValuesPath, gnmiutils.NoPaths, gnmiutils.SyncExtension(t))
// Update Enabled, delete Description using gNMI client
updateEnabledPath := []proto.TargetPath{
{TargetName: simulator.Name(), Path: udtestEnabledPath, PathDataValue: "false", PathDataType: proto.BoolVal},
}
deleteDescriptionPath := []proto.TargetPath{
{TargetName: simulator.Name(), Path: udtestDescriptionPath},
}
gnmiutils.SetGNMIValueOrFail(t, gnmiClient, updateEnabledPath, deleteDescriptionPath, gnmiutils.SyncExtension(t))
// Check that the Enabled value is set correctly
gnmiutils.CheckGNMIValue(t, gnmiClient, updateEnabledPath, "false", 0, "Query name after set returned the wrong value")
// Make sure Description got removed
gnmiutils.CheckGNMIValue(t, gnmiClient, gnmiutils.GetTargetPath(simulator.Name(), udtestDescriptionPath), "", 0, "New child was not removed")
}
|
package main
import (
"strconv"
"encoding/json"
"time"
"net"
"bufio"
"fmt"
)
type ErrorData struct {
Ts string `json:"ts"`
Txt string `json:"txt"`
}
const (
tcpLogServerAddr = "127.0.0.1:33334"
procId = "iptv/ffmpeg_rtmp"
logRequestPeriod = 1000
)
func main(){
ts := strconv.FormatInt(time.Now().UnixNano(), 10)
for {
conn, err := net.Dial("tcp", tcpLogServerAddr)
if err != nil {
fmt.Println("Fail to connect tcp log server")
return
}
//fmt.Printf("log tcp client connected to %s\n", tcpLogServerAddr)
fmt.Fprintf(conn, `{"procId":"%s", "tsFrom": "%s"}`, procId, ts)
scanner := bufio.NewScanner(conn)
for scanner.Scan() {
line := scanner.Text()
//fmt.Printf("receive log response message line: %s\n", line)
msg := []ErrorData{}
err = json.Unmarshal([]byte(line), &msg)
if err != nil {
fmt.Println("json parse log response data error: " + err.Error())
return
}
fmt.Printf("Receive log response: %v\n", msg)
if len(msg) > 0 {
lastLogObj := msg[len(msg)-1:][0]
ts = lastLogObj.Ts
}
}
if err := scanner.Err(); err != nil {
fmt.Println("Log scannel error: " + err.Error())
}
conn.Close()
time.Sleep(time.Duration(logRequestPeriod) * time.Millisecond)
}
}
|
package data
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestDownloader_candlesCount(t *testing.T) {
tt := []struct {
start time.Time
end time.Time
timeframe string
interval time.Duration
total int
}{
{time.Now(), time.Now().AddDate(0, 0, 10), "1d", time.Hour * 24, 10},
{time.Now(), time.Now().Add(60 * time.Minute), "1m", time.Minute, 60},
{time.Now(), time.Now().Add(60 * time.Minute), "15m", 15 * time.Minute, 4},
}
for _, tc := range tt {
total, interval, err := candlesCount(tc.start, tc.end, tc.timeframe)
require.NoError(t, err)
assert.Equal(t, tc.total, total)
assert.Equal(t, tc.interval, interval)
}
}
|
// Copyright 2022 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"errors"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
emptypb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/empty_go_proto"
alphapb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/vertex/alpha/vertex_alpha_go_proto"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/vertex/alpha"
)
// MetadataSchemaServer implements the gRPC interface for MetadataSchema.
type MetadataSchemaServer struct{}
// ProtoToMetadataSchemaSchemaTypeEnum converts a MetadataSchemaSchemaTypeEnum enum from its proto representation.
func ProtoToVertexAlphaMetadataSchemaSchemaTypeEnum(e alphapb.VertexAlphaMetadataSchemaSchemaTypeEnum) *alpha.MetadataSchemaSchemaTypeEnum {
if e == 0 {
return nil
}
if n, ok := alphapb.VertexAlphaMetadataSchemaSchemaTypeEnum_name[int32(e)]; ok {
e := alpha.MetadataSchemaSchemaTypeEnum(n[len("VertexAlphaMetadataSchemaSchemaTypeEnum"):])
return &e
}
return nil
}
// ProtoToMetadataSchema converts a MetadataSchema resource from its proto representation.
func ProtoToMetadataSchema(p *alphapb.VertexAlphaMetadataSchema) *alpha.MetadataSchema {
obj := &alpha.MetadataSchema{
Name: dcl.StringOrNil(p.GetName()),
SchemaVersion: dcl.StringOrNil(p.GetSchemaVersion()),
Schema: dcl.StringOrNil(p.GetSchema()),
SchemaType: ProtoToVertexAlphaMetadataSchemaSchemaTypeEnum(p.GetSchemaType()),
CreateTime: dcl.StringOrNil(p.GetCreateTime()),
Project: dcl.StringOrNil(p.GetProject()),
Location: dcl.StringOrNil(p.GetLocation()),
MetadataStore: dcl.StringOrNil(p.GetMetadataStore()),
}
return obj
}
// MetadataSchemaSchemaTypeEnumToProto converts a MetadataSchemaSchemaTypeEnum enum to its proto representation.
func VertexAlphaMetadataSchemaSchemaTypeEnumToProto(e *alpha.MetadataSchemaSchemaTypeEnum) alphapb.VertexAlphaMetadataSchemaSchemaTypeEnum {
if e == nil {
return alphapb.VertexAlphaMetadataSchemaSchemaTypeEnum(0)
}
if v, ok := alphapb.VertexAlphaMetadataSchemaSchemaTypeEnum_value["MetadataSchemaSchemaTypeEnum"+string(*e)]; ok {
return alphapb.VertexAlphaMetadataSchemaSchemaTypeEnum(v)
}
return alphapb.VertexAlphaMetadataSchemaSchemaTypeEnum(0)
}
// MetadataSchemaToProto converts a MetadataSchema resource to its proto representation.
func MetadataSchemaToProto(resource *alpha.MetadataSchema) *alphapb.VertexAlphaMetadataSchema {
p := &alphapb.VertexAlphaMetadataSchema{}
p.SetName(dcl.ValueOrEmptyString(resource.Name))
p.SetSchemaVersion(dcl.ValueOrEmptyString(resource.SchemaVersion))
p.SetSchema(dcl.ValueOrEmptyString(resource.Schema))
p.SetSchemaType(VertexAlphaMetadataSchemaSchemaTypeEnumToProto(resource.SchemaType))
p.SetCreateTime(dcl.ValueOrEmptyString(resource.CreateTime))
p.SetProject(dcl.ValueOrEmptyString(resource.Project))
p.SetLocation(dcl.ValueOrEmptyString(resource.Location))
p.SetMetadataStore(dcl.ValueOrEmptyString(resource.MetadataStore))
return p
}
// applyMetadataSchema handles the gRPC request by passing it to the underlying MetadataSchema Apply() method.
func (s *MetadataSchemaServer) applyMetadataSchema(ctx context.Context, c *alpha.Client, request *alphapb.ApplyVertexAlphaMetadataSchemaRequest) (*alphapb.VertexAlphaMetadataSchema, error) {
p := ProtoToMetadataSchema(request.GetResource())
res, err := c.ApplyMetadataSchema(ctx, p)
if err != nil {
return nil, err
}
r := MetadataSchemaToProto(res)
return r, nil
}
// applyVertexAlphaMetadataSchema handles the gRPC request by passing it to the underlying MetadataSchema Apply() method.
func (s *MetadataSchemaServer) ApplyVertexAlphaMetadataSchema(ctx context.Context, request *alphapb.ApplyVertexAlphaMetadataSchemaRequest) (*alphapb.VertexAlphaMetadataSchema, error) {
cl, err := createConfigMetadataSchema(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return s.applyMetadataSchema(ctx, cl, request)
}
// DeleteMetadataSchema handles the gRPC request by passing it to the underlying MetadataSchema Delete() method.
func (s *MetadataSchemaServer) DeleteVertexAlphaMetadataSchema(ctx context.Context, request *alphapb.DeleteVertexAlphaMetadataSchemaRequest) (*emptypb.Empty, error) {
return nil, errors.New("no delete endpoint for MetadataSchema")
}
// ListVertexAlphaMetadataSchema handles the gRPC request by passing it to the underlying MetadataSchemaList() method.
func (s *MetadataSchemaServer) ListVertexAlphaMetadataSchema(ctx context.Context, request *alphapb.ListVertexAlphaMetadataSchemaRequest) (*alphapb.ListVertexAlphaMetadataSchemaResponse, error) {
cl, err := createConfigMetadataSchema(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
resources, err := cl.ListMetadataSchema(ctx, request.GetProject(), request.GetLocation(), request.GetMetadataStore())
if err != nil {
return nil, err
}
var protos []*alphapb.VertexAlphaMetadataSchema
for _, r := range resources.Items {
rp := MetadataSchemaToProto(r)
protos = append(protos, rp)
}
p := &alphapb.ListVertexAlphaMetadataSchemaResponse{}
p.SetItems(protos)
return p, nil
}
func createConfigMetadataSchema(ctx context.Context, service_account_file string) (*alpha.Client, error) {
conf := dcl.NewConfig(dcl.WithUserAgent("dcl-test"), dcl.WithCredentialsFile(service_account_file))
return alpha.NewClient(conf), nil
}
|
package main
import "fmt"
func main(){
s := make([]string, 3)
s[0] = "a"
s[1] = "b"
s[2] = "c"
s = append(s, "3")
s = append(s, "d", "e")
fmt.Println(s)
fmt.Println("Size of slice is", len(s))
l := s[2:5]
fmt.Println(l)
c := make([]string, 2)
copy(c,s)
fmt.Println(c)
}
|
package main
import (
"fmt"
"os"
"github.com/andywow/golang-lessons/lesson2/stringunpack"
)
func main() {
if (len(os.Args)) == 1 {
fmt.Fprintf(os.Stderr, "Error: specify input string\n")
os.Exit(1)
}
result, err := stringunpack.Unpack(os.Args[1])
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %s\n", err)
os.Exit(1)
}
fmt.Printf("Unpacked string: %s\n", result)
}
|
package database
import (
"errors"
"strconv"
db "taskweb/database"
)
type TbUser struct {
Id int
Name string
Pwd string
Email string
Createtime int64
Remark string
}
func ExistTbUser(id int) (bool, error) {
rows, err := db.Dtsc.Query("select count(0) Count from tb_user where id=?", id)
if err != nil {
return false, err
}
if len(rows) <= 0 {
return false, nil
}
for _, obj := range rows {
count, err := strconv.Atoi(string(obj["Count"]))
if err != nil {
return false, errors.New("parse Count error: " + err.Error())
}
return count > 0, nil
}
return false, nil
}
func InsertTbUser(tb_user TbUser) (int64, error) {
result, err := db.Dtsc.Exec("insert into tb_user(name,pwd,email,createtime,remark) values(?,?,?,?,?)", tb_user.Name,tb_user.Pwd,tb_user.Email,tb_user.Createtime,tb_user.Remark)
if err != nil {
return -1, err
}
return result.LastInsertId()
}
func UpdateTbUser(tb_user TbUser) (bool, error) {
result, err := db.Dtsc.Exec("update tb_user set name=?, pwd=?, email=?, createtime=?, remark=? where id=?", tb_user.Name, tb_user.Pwd, tb_user.Email, tb_user.Createtime, tb_user.Remark, tb_user.Id)
if err != nil {
return false, err
}
affected, err := result.RowsAffected()
if err != nil {
return false, err
}
return affected > 0, nil
}
func GetTbUser(id int) (tb_user TbUser, err error) {
rows, err := db.Dtsc.Query("select id, name, pwd, email, createtime, remark from tb_user where id=?", id)
if err != nil {
return tb_user, err
}
if len(rows) <= 0 {
return tb_user, nil
}
tb_users, err := _TbUserRowsToArray(rows)
if err != nil {
return tb_user, err
}
return tb_users[0], nil
}
func GetTbUserRowCount() (count int, err error) {
rows, err := db.Dtsc.Query("select count(0) Count from tb_user")
if err != nil {
return -1, err
}
if len(rows) <= 0 {
return -1, nil
}
for _, obj := range rows {
count, err := strconv.Atoi(string(obj["Count"]))
if err != nil {
return -1, errors.New("parse Count error: " + err.Error())
}
return count, nil
}
return -1, nil
}
func _TbUserRowsToArray(maps []map[string][]byte) ([]TbUser, error) {
models := make([]TbUser, len(maps))
var err error
for index, obj := range maps {
model := TbUser{}
model.Id, err = strconv.Atoi(string(obj["id"]))
if err != nil {
return nil, errors.New("parse Id error: " + err.Error())
}
model.Name = string(obj["name"])
model.Pwd = string(obj["pwd"])
model.Email = string(obj["email"])
model.Createtime, err = strconv.ParseInt(string(obj["createtime"]), 10, 64)
if err != nil {
return nil, errors.New("parse Createtime error: " + err.Error())
}
model.Remark = string(obj["remark"])
models[index] = model
}
return models, err
}
func UserLogin(name string,pwd string) (tb_user TbUser, err error ) {
rows, err := db.Dtsc.Query("select * from tb_user where name=? and pwd=?", name,pwd)
if err != nil {
return tb_user, err
}
if len(rows) <= 0 {
return tb_user, nil
}
tb_users, err := _TbUserRowsToArray(rows)
if err != nil {
return tb_user, err
}
return tb_users[0], nil
}
|
package main
import (
"errors"
"fmt"
)
type LinkedNode struct {
value interface{}
next *LinkedNode
prev *LinkedNode
}
func (l *LinkedNode) IsEmpty() bool {
if l.next == nil && l.prev == nil {
return true
}
if l.next == l && l.prev == l {
return true
}
return false
}
func (l *LinkedNode) AddNode(newNode *LinkedNode) {
if l.IsEmpty() {
l.next = newNode
l.prev = newNode
newNode.prev = l
newNode.next = l
} else {
currFirstNode := l.next
l.next = newNode
newNode.next = currFirstNode
newNode.prev = l
currFirstNode.prev = newNode
}
}
// Should only be called from the head of the list (the head node is essentially the sentinel)
func (l *LinkedNode) Traverse(nodeVisitorFn func(data interface{})) {
for curr := l.next; curr != l; curr = curr.next {
nodeVisitorFn(curr.value)
}
}
func DeleteNode(l *LinkedNode) error {
if l.IsEmpty() {
return errors.New("DeleteNode called on an empty list")
}
next := l.next
prev := l.prev
next.prev = prev
prev.next = next
return nil
}
func NewLinkedNode(data interface{}) *LinkedNode {
return &LinkedNode{value: data}
}
func NewEmptyLinkedNode() *LinkedNode {
return &LinkedNode{}
}
type LFUCache struct {
capacity int
size int
freqList *LinkedNode
keyMap map[int]valueDesc // int -> valueDesc
}
type valueDesc struct {
valueNode *LinkedNode // Node containing the actual value
freqListNode *LinkedNode // Node from the freq list containing the value node
}
type freqData struct {
freq int
valuesNodes *LinkedNode
}
type keyValueData struct {
key int
value int
}
func Constructor(capacity int) LFUCache {
return LFUCache{capacity: capacity, freqList: NewEmptyLinkedNode(), keyMap: make(map[int]valueDesc)}
}
func (this *LFUCache) Get(key int) int {
vdesc, ok := this.keyMap[key]
if !ok {
return -1
}
valueFreqNode := vdesc.freqListNode
valueFreqData := valueFreqNode.value.(freqData)
nextFreqNode := valueFreqNode.next
nextFreqData := freqData{}
if nextFreqNode.value != nil {
nextFreqData = nextFreqNode.value.(freqData)
}
var higherFreqNode *LinkedNode
if nextFreqNode == this.freqList || nextFreqData.freq != valueFreqData.freq+1 {
// Either:
// - this particular freq node is the last one, or
// - the next higher freq node is for higher frequency than we need
// For both cases, create a new freq node.
newFreqData := freqData{freq: valueFreqData.freq + 1, valuesNodes: NewEmptyLinkedNode()}
newFreqNode := NewLinkedNode(newFreqData)
valueFreqNode.AddNode(newFreqNode)
higherFreqNode = newFreqNode
} else if nextFreqData.freq == valueFreqData.freq+1 {
higherFreqNode = nextFreqNode
} else {
panic("freqNode conditions not met")
}
// Delete the value node from its current list and add it to the new freq node's value node list
DeleteNode(vdesc.valueNode)
higherFreqData := higherFreqNode.value.(freqData)
higherFreqData.valuesNodes.AddNode(vdesc.valueNode)
// We also need to update the freq node since it has changed
vdesc.freqListNode = higherFreqNode
this.keyMap[key] = vdesc
// If the old freq node's value node list is empty, we delete the freq node itself
if valueFreqData.valuesNodes.IsEmpty() {
DeleteNode(valueFreqNode)
}
return vdesc.valueNode.value.(keyValueData).value
}
func (this *LFUCache) Put(key int, value int) {
if this.Get(key) != -1 {
vdesc, ok := this.keyMap[key]
if !ok {
panic(fmt.Sprintf("We should have found the %d in keyMap", key))
}
kvData := vdesc.valueNode.value.(keyValueData)
kvData.value = value
vdesc.valueNode.value = kvData
return
}
// if ok {
// // Update this value. No changes needed to freqList
// kvData := vdesc.valueNode.value.(keyValueData)
// kvData.value = value
// vdesc.valueNode.value = kvData
// return
// }
if this.capacity == 0 {
return
}
// TODO: remove node if we're at capacity
if this.size == this.capacity {
// Delete one node to make space
firstFreqNode := this.freqList.next
firstFreqData := firstFreqNode.value.(freqData)
firstFreqValuesNodes := firstFreqData.valuesNodes
// Delete the last (oldest) node in the valuesNodes pointed at by this freqNode
valueNodeToDelete := firstFreqValuesNodes.prev
DeleteNode(valueNodeToDelete)
if firstFreqValuesNodes.IsEmpty() {
DeleteNode(firstFreqNode)
}
delete(this.keyMap, valueNodeToDelete.value.(keyValueData).key)
this.size--
}
newVDesc := valueDesc{}
newValueNode := NewLinkedNode(keyValueData{key: key, value: value})
newVDesc.valueNode = newValueNode
// key not found in map:
// check if there's a freq node that has never been accessed. If
// there's one, the new value will go into that freq node's list.
// Otherwise, we'll need to create a new freq node and add this new
// value node to that freq node's list.
if this.freqList.IsEmpty() {
newValuesNodes := NewEmptyLinkedNode()
newValuesNodes.AddNode(newValueNode)
newFreqData := freqData{freq: 0, valuesNodes: newValuesNodes}
newFreqNode := NewLinkedNode(newFreqData)
this.freqList.AddNode(newFreqNode)
newVDesc.freqListNode = newFreqNode
} else {
// does the first node have freq 0? If so, we just add to that freqNode list
firstFreqNode := this.freqList.next
// We want to panic if the types are wrong
firstFreqData := firstFreqNode.value.(freqData)
if firstFreqData.freq == 0 {
firstFreqValuesNodes := firstFreqData.valuesNodes
firstFreqValuesNodes.AddNode(newValueNode)
newVDesc.freqListNode = firstFreqNode
} else {
// We need to add new freq node with freq 0
newValuesNodes := NewEmptyLinkedNode()
newValuesNodes.AddNode(newValueNode)
newFreqData := freqData{freq: 0, valuesNodes: newValuesNodes}
newFreqNode := NewLinkedNode(newFreqData)
this.freqList.AddNode(newFreqNode)
newVDesc.freqListNode = newFreqNode
}
}
this.keyMap[key] = newVDesc
this.size++
}
func printFreqList(freqList *LinkedNode) {
freqNodeVisitor := func(d interface{}) {
fd := d.(freqData)
fmt.Printf("freq: %d\n", fd.freq)
valueNodeVisitor := func(d interface{}) {
kvData := d.(keyValueData)
fmt.Printf("\t%d -> %d\n", kvData.key, kvData.value)
}
fd.valuesNodes.Traverse(valueNodeVisitor)
}
freqList.Traverse(freqNodeVisitor)
}
func printValues(head *LinkedNode) {
visitorFn := func(d interface{}) {
num, ok := d.(int)
if !ok {
fmt.Printf("Unexpected type\n")
}
fmt.Printf("Data: %d\n", num)
}
head.Traverse(visitorFn)
}
func test1() {
lfuCache := Constructor(2)
lfuCache.Put(1, 1)
lfuCache.Put(2, 2)
lfuCache.Put(3, 3)
// cache.put(1, 1);
// cache.put(2, 2);
// cache.get(1); // returns 1
// cache.put(3, 3); // evicts key 2
// cache.get(2); // returns -1 (not found)
// cache.get(3); // returns 3.
// cache.put(4, 4); // evicts key 1.
// cache.get(1); // returns -1 (not found)
// cache.get(3); // returns 3
// cache.get(4);
fmt.Printf("lfuCache.Get(1) = %d\n", lfuCache.Get(1))
fmt.Printf("lfuCache.Get(2) = %d\n", lfuCache.Get(2))
fmt.Printf("lfuCache.Get(2) = %d\n", lfuCache.Get(2))
fmt.Printf("lfuCache.Get(3) = %d\n", lfuCache.Get(3))
fmt.Printf("lfuCache.Get(3) = %d\n", lfuCache.Get(3))
// firstFreqNode := lfuCache.freqList.next
// firstFreqData := firstFreqNode.value.(freqData)
// printValues(firstFreqData.valuesNodes)
printFreqList(lfuCache.freqList)
}
func test2() {
lfuCache := Constructor(2)
lfuCache.Put(1, 1)
lfuCache.Put(2, 2)
fmt.Printf("lfuCache.Get(1) = %d (expect 1)\n", lfuCache.Get(1))
lfuCache.Put(3, 3)
fmt.Printf("lfuCache.Get(2) = %d (expect -1)\n", lfuCache.Get(2))
fmt.Printf("lfuCache.Get(3) = %d (expect 3)\n", lfuCache.Get(3))
lfuCache.Put(4, 4)
fmt.Printf("lfuCache.Get(1) = %d (expect -1)\n", lfuCache.Get(1))
fmt.Printf("lfuCache.Get(3) = %d (expect 3)\n", lfuCache.Get(3))
fmt.Printf("lfuCache.Get(4) = %d (expect 4)\n", lfuCache.Get(4))
// cache.put(1, 1);
// cache.put(2, 2);
// cache.get(1); // returns 1
// cache.put(3, 3); // evicts key 2
// cache.get(2); // returns -1 (not found)
// cache.get(3); // returns 3.
// cache.put(4, 4); // evicts key 1.
// cache.get(1); // returns -1 (not found)
// cache.get(3); // returns 3
// cache.get(4);
printFreqList(lfuCache.freqList)
}
func test3() {
// ["LFUCache","put","put","put","put","get"]
//[[2],[3,1],[2,1],[2,2],[4,4],[2]]
lfuCache := Constructor(2)
lfuCache.Put(3, 1)
lfuCache.Put(2, 1)
lfuCache.Put(2, 2)
lfuCache.Put(4, 4)
fmt.Printf("lfuCache.Get(2) = %d (expect 2)\n", lfuCache.Get(2))
printFreqList(lfuCache.freqList)
}
func test4() {
// ["LFUCache","put","put","put","put","get","get"]
// [[2],[2,1],[1,1],[2,3],[4,1],[1],[2]]
lfuCache := Constructor(2)
lfuCache.Put(2, 1)
lfuCache.Put(1, 1)
lfuCache.Put(2, 3)
lfuCache.Put(4, 1)
// printFreqList(lfuCache.freqList)
fmt.Printf("lfuCache.Get(1) = %d (expect -1)\n", lfuCache.Get(1))
fmt.Printf("lfuCache.Get(2) = %d (expect 3)\n", lfuCache.Get(2))
printFreqList(lfuCache.freqList)
}
func main() {
// test1()
// test2()
// test3()
test4()
}
|
package types
import (
"errors"
"fmt"
"strings"
"gopkg.in/yaml.v2"
sdk "github.com/cosmos/cosmos-sdk/types"
paramtypes "github.com/cosmos/cosmos-sdk/x/params/types"
)
const (
// StandardDenom for coinswap
StandardDenom = sdk.DefaultBondDenom
)
// Parameter store keys
var (
KeyFee = []byte("Fee") // fee key
KeyStandardDenom = []byte("StandardDenom") // standard token denom key
)
// NewParams coinswap paramtypes constructor
func NewParams(fee sdk.Dec, feeDenom string) Params {
return Params{
Fee: fee,
StandardDenom: feeDenom,
}
}
// ParamTypeTable returns the TypeTable for coinswap module
func ParamKeyTable() paramtypes.KeyTable {
return paramtypes.NewKeyTable().RegisterParamSet(&Params{})
}
// KeyValuePairs implements paramtypes.KeyValuePairs
func (p *Params) ParamSetPairs() paramtypes.ParamSetPairs {
return paramtypes.ParamSetPairs{
paramtypes.NewParamSetPair(KeyFee, &p.Fee, validateFee),
paramtypes.NewParamSetPair(KeyStandardDenom, &p.StandardDenom, validateStandardDenom),
}
}
// DefaultParams returns the default coinswap module parameters
func DefaultParams() Params {
fee := sdk.NewDecWithPrec(3, 3)
return Params{
Fee: fee,
StandardDenom: StandardDenom,
}
}
// String returns a human readable string representation of the parameters.
func (p Params) String() string {
out, _ := yaml.Marshal(p)
return string(out)
}
// Validate returns err if Params is invalid
func (p Params) Validate() error {
if !p.Fee.GT(sdk.ZeroDec()) || !p.Fee.LT(sdk.OneDec()) {
return fmt.Errorf("fee must be positive and less than 1: %s", p.Fee.String())
}
if p.StandardDenom == "" {
return fmt.Errorf("coinswap parameter standard denom can't be an empty string")
}
return nil
}
func validateFee(i interface{}) error {
v, ok := i.(sdk.Dec)
if !ok {
return fmt.Errorf("invalid parameter type: %T", i)
}
if !v.GT(sdk.ZeroDec()) || !v.LT(sdk.OneDec()) {
return fmt.Errorf("fee must be positive and less than 1: %s", v.String())
}
return nil
}
func validateStandardDenom(i interface{}) error {
v, ok := i.(string)
if !ok {
return fmt.Errorf("invalid parameter type: %T", i)
}
if strings.TrimSpace(v) == "" {
return errors.New("standard denom cannot be blank")
}
if err := sdk.ValidateDenom(v); err != nil {
return err
}
return nil
}
|
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
package main
import (
"io/ioutil"
"log"
"os"
"testing"
"time"
"golang.org/x/net/context"
"github.com/GoogleCloudPlatform/golang-samples/internal/testutil"
)
func TestMain(m *testing.M) {
// These functions are noisy.
log.SetOutput(ioutil.Discard)
s := m.Run()
log.SetOutput(os.Stderr)
os.Exit(s)
}
func TestCustomMetric(t *testing.T) {
hc := testutil.SystemTest(t)
ctx, _ := context.WithTimeout(context.Background(), time.Second*30)
s, err := createService(ctx)
if err != nil {
t.Fatal(err)
}
if err := createCustomMetric(s, hc.ProjectID, metricType); err != nil {
t.Fatal(err)
}
for {
_, err = getCustomMetric(s, hc.ProjectID, metricType)
if err == nil {
break
}
time.Sleep(2 * time.Second)
}
if err != nil {
t.Fatal(err)
}
time.Sleep(2 * time.Second)
if err := writeTimeSeriesValue(s, hc.ProjectID, metricType); err != nil {
t.Error(err)
}
time.Sleep(2 * time.Second)
if err := readTimeSeriesValue(s, hc.ProjectID, metricType); err != nil {
t.Error(err)
}
}
|
package main
import (
"fmt"
"log"
)
func change(m map[string]string) {
m["dicky"] = "novanto"
}
func main() {
mapper := make(map[string]string)
change(mapper)
fmt.Println("map: ", mapper)
fmt.Println("mapper dari dicky: ", mapper["dicky"])
otherMapper := copyMap(mapper)
fmt.Println(otherMapper)
mapper["dicky"] = "andre"
fmt.Println("otherMapper:", otherMapper)
fmt.Println("mapper:", mapper)
resMap := returnMap()
for k, v := range resMap {
log.Println(k, v)
}
var cobaMap map[uint64]uint64
log.Printf("cobaMap: %v", cobaMap)
if cobaMap == nil {
log.Printf("coba map nil")
}
if _, ok := cobaMap[1]; !ok {
log.Printf("ga ketemu angka 1")
}
}
func copyMap(mapper map[string]string) map[string]string {
otherMap := make(map[string]string)
for k, v := range mapper {
otherMap[k] = v
}
return otherMap
}
func returnMap() (res map[int]string) {
res = make(map[int]string)
res[123] = "dicky"
return res
}
|
package services
import (
"fmt"
"github.com/blockcypher/gobcy"
"github.com/constant-money/constant-event/config"
helpers "github.com/constant-money/constant-web-api/helpers"
)
// BlockcypherService : ...
type BlockcypherService struct {
conf *config.Config
chain gobcy.API
}
// NewBlockcypherService : ...
func NewBlockcypherService(conf *config.Config) *BlockcypherService {
// For Bitcoin main:
chain := gobcy.API{conf.BcyToken, "btc", "main"}
if conf.BtcIsTestnet {
// For BlockCypher's internal testnet:
chain = gobcy.API{conf.BcyToken, "bcy", "test"}
}
return &BlockcypherService{
conf: conf,
chain: chain,
}
}
// SendTX : ...
func (bs *BlockcypherService) SendTX(from string, secret string, cipherKey string, destination string, amount int) (string, error) {
priKey, err := helpers.DecryptToString(secret, cipherKey)
// addr1, _ := bs.chain.GenAddrKeychain()
// addr2, _ := bs.chain.GenAddrKeychain()
// _, _ = bs.chain.Faucet(addr1, 3e5)
//Post New TXSkeleton
skel, err := bs.chain.NewTX(gobcy.TempNewTX(from, destination, 2e5), false)
//Sign it locally
err = skel.Sign([]string{priKey})
if err != nil {
fmt.Println(err)
}
//Send TXSkeleton
skel, err = bs.chain.SendTX(skel)
if err != nil {
fmt.Println(err)
}
fmt.Printf("%+v\n", skel)
return "", nil
}
|
// Copyright (c) 2022 Zededa, Inc.
// SPDX-License-Identifier: Apache-2.0
// A simple demonstration of reconciler + depgraph.
// Files, directories and their dependencies are represented using dependency
// graphs. Reconciler then takes care of the reconciliation between the intended
// and the actual content of a (temporary) directory.
package main
import (
"context"
"fmt"
"io/ioutil"
"log"
"os"
"github.com/lf-edge/eve/libs/depgraph"
"github.com/lf-edge/eve/libs/reconciler"
)
type demo struct {
registry *reconciler.DefaultRegistry
currentState depgraph.Graph
intendedState depgraph.Graph
}
func (d *demo) init() {
// Build configurator registry.
d.registry = &reconciler.DefaultRegistry{}
err := d.registry.Register(fileConfigurator{}, file{}.Type())
if err != nil {
log.Fatalf("Failed to register configurator for files: %v", err)
}
err = d.registry.Register(dirConfigurator{}, directory{}.Type())
if err != nil {
log.Fatalf("Failed to register configurator for directories: %v", err)
}
// Allow to visualize current/intended state using online Graphviz.
d.redirectGraphvizRendering()
}
func (d *demo) run() {
const graphName = "Reconciler Demo"
// Create root directory for our file-sync demo.
rootDirname, err := ioutil.TempDir("/tmp", "file-sync-demo-")
if err != nil {
log.Fatalf("Failed to create root directory for the demo: %v", err)
}
defer func() {
_ = os.RemoveAll(rootDirname)
}()
// Root directory was created externally (outside of the Reconciler).
rootDir := directory{dirname: rootDirname, permissions: 0755}
d.currentState = depgraph.New(depgraph.InitArgs{
Name: graphName,
ItemsWithState: []depgraph.ItemWithState{
{
Item: rootDir,
State: &reconciler.ItemStateData{
State: reconciler.ItemStateCreated,
LastOperation: reconciler.OperationCreate,
LastError: nil,
},
},
},
})
// 1. Initial intended state of the directory content.
// We want directory with svg images, further sorted between sub-directories.
// The whole svg-image directory and all its content will be grouped by a *subgraph*.
// Then we want directory with shell scripts and another empty directory later used
// for text files.
description := fmt.Sprintf(`%s
├── svg-images (this directory and all its content is grouped by a subgraph)
│ ├── circles
│ │ ├── one-circle.svg
│ │ └── two-circles.svg
│ └── squares
│ └── one-square.svg
├── scripts
│ ├── hello-world.sh
│ └── ls.sh
└── text-files (empty dir)
`, rootDirname)
svgImagesDir := directory{dirname: "svg-images", parent: &rootDir, permissions: 0755}
circlesDir := directory{dirname: "circles", parent: &svgImagesDir, permissions: 0755}
squaresDir := directory{dirname: "squares", parent: &svgImagesDir, permissions: 0755}
scriptsDir := directory{dirname: "scripts", parent: &rootDir, permissions: 0755}
textFilesDir := directory{dirname: "text-files", parent: &rootDir, permissions: 0755}
oneCircleFile := file{id: newFileID(), filename: "one-circle.svg",
content: d.svgImageCircles(1), permissions: 0644, parentDir: &circlesDir}
twoCircleFile := file{id: newFileID(), filename: "two-circles.svg",
content: d.svgImageCircles(2), permissions: 0644, parentDir: &circlesDir}
oneSquareFile := file{id: newFileID(), filename: "one-square.svg",
content: d.svgImageSquares(1), permissions: 0644, parentDir: &squaresDir}
helloWorldFile := file{id: newFileID(), filename: "hello-world.sh",
content: d.shellScript("echo 'Hello world!'"), permissions: 0744, parentDir: &scriptsDir}
lsFile := file{id: newFileID(), filename: "ls.sh",
content: d.shellScript("ls -al"), permissions: 0744, parentDir: &scriptsDir}
d.intendedState = depgraph.New(depgraph.InitArgs{
Name: graphName,
Items: []depgraph.Item{
rootDir,
scriptsDir,
textFilesDir,
helloWorldFile,
lsFile,
},
Subgraphs: []depgraph.InitArgs{
{
Name: "svg-images",
Description: "All SVG images",
Items: []depgraph.Item{
svgImagesDir,
circlesDir,
squaresDir,
oneCircleFile,
twoCircleFile,
oneSquareFile,
},
},
},
})
r := reconciler.New(d.registry)
status := r.Reconcile(context.Background(), d.currentState, d.intendedState)
if status.Err != nil {
log.Fatalf("State reconciliation failed: %v", status.Err)
}
// Inform the user.
d.printReport("Applied the intended state:")
fmt.Println(description)
d.printReport("Visualization of the graph with the current state: %s ",
gvRedirectURL+gvCurrentState)
d.printReport("Visualization of the graph with the intended state: %s ",
gvRedirectURL+gvIntendedState)
d.printReport("Visualization of the merged current and the intended state: %s ",
gvRedirectURL+gvMergedState)
d.printReport("Verify the content of %s and press ENTER to continue", rootDirname)
_, _ = fmt.Scanln()
// 2. Next intended state of the directory content.
// Now we want all svg images to be directly under svg-images.
// Script ls.sh should no longer exist. Script hello-world.sh has modified content.
// Directory with text files should now contain two files.
// Reconciler will perform create/modify/delete operations to get from the current
// state to the new intended state.
description = fmt.Sprintf(`%s
├── svg-images
│ ├── one-circle.svg (moved)
│ ├── two-circles.svg (moved)
│ └── one-square.svg (moved)
├── scripts
│ └── hello-world.sh (modified to German language)
└── text-files
├── empty-file.txt (new)
└── sample-file.txt (new)
`, rootDirname)
oneCircleFile.parentDir = &svgImagesDir
twoCircleFile.parentDir = &svgImagesDir
oneSquareFile.parentDir = &svgImagesDir
helloWorldFile.content = d.shellScript("echo 'Hallo Welt!'")
emptyFile := file{id: newFileID(), filename: "empty-file.txt",
content: "", permissions: 0644, parentDir: &textFilesDir}
sampleFile := file{id: newFileID(), filename: "sample-file.txt",
content: "sample", permissions: 0644, parentDir: &textFilesDir}
d.intendedState = depgraph.New(depgraph.InitArgs{
Name: graphName,
Items: []depgraph.Item{
rootDir,
scriptsDir,
textFilesDir,
helloWorldFile,
emptyFile,
sampleFile,
},
Subgraphs: []depgraph.InitArgs{
{
Name: "svg-images",
Description: "All SVG images",
Items: []depgraph.Item{
svgImagesDir,
oneCircleFile,
twoCircleFile,
oneSquareFile,
},
},
},
})
r = reconciler.New(d.registry)
status = r.Reconcile(context.Background(), d.currentState, d.intendedState)
if status.Err != nil {
log.Fatalf("State reconciliation failed: %v", status.Err)
}
// Inform the user.
d.printReport("Applied the intended state:")
fmt.Println(description)
d.printReport("Visualization of the graph with the current state: %s ",
gvRedirectURL+gvCurrentState)
d.printReport("Visualization of the graph with the intended state: %s ",
gvRedirectURL+gvIntendedState)
d.printReport("Visualization of the merged current and the intended state: %s ",
gvRedirectURL+gvMergedState)
d.printReport("Verify the content of %s and press ENTER to continue", rootDirname)
_, _ = fmt.Scanln()
// 3. Finally, remove the root from the graph of the current state (even before it is
// actually removed). Since everything either directly or transitively depends on it,
// all files and directories will be removed by Reconciler.
d.currentState.DelItem(depgraph.Reference(rootDir))
r = reconciler.New(d.registry)
status = r.Reconcile(context.Background(), d.currentState, d.intendedState)
if status.Err != nil {
log.Fatalf("State reconciliation failed: %v", status.Err)
}
// Inform the user.
d.printReport("Removed root from the current-state graph.")
d.printReport("All files and directories under %s should be removed "+
"by the Reconciler.", rootDirname)
d.printReport("Visualization of the graph with the current state: %s ",
gvRedirectURL+gvCurrentState)
d.printReport("Visualization of the graph with the intended state: %s ",
gvRedirectURL+gvIntendedState)
d.printReport("Visualization of the merged current and the intended state: %s ",
gvRedirectURL+gvMergedState)
d.printReport("Verify the content of %s and press ENTER to continue", rootDirname)
_, _ = fmt.Scanln()
}
func main() {
d := &demo{}
d.init()
d.run()
}
|
package wps
import (
`encoding/json`
)
const (
// 上传文件
FileTypeUpload FileType = "UPLOAD"
// 下载文件
FileTypeDownload FileType = "DOWNLOAD"
// 预览文件
FileTypePreview FileType = "PREVIEW"
)
type (
// FileType 文件类型
FileType string
// FileExistReq 文档是否存在的请求
FileExistReq struct {
// 文件Id
Id string `json:"id"`
// 文件类型
Type FileType `json:"type"`
}
FileExistRsp struct {
BaseRsp
// 数据
Data struct {
// 文件是否存在
ExistsFile bool `json:"existsFile"`
} `json:"data"`
}
)
func (fer FileExistReq) String() string {
jsonBytes, _ := json.MarshalIndent(fer, "", " ")
return string(jsonBytes)
}
func (fer FileExistRsp) String() string {
jsonBytes, _ := json.MarshalIndent(fer, "", " ")
return string(jsonBytes)
}
|
package laserframework
import (
"net/http"
"time"
)
type Service struct {
ProductID int `json:"productid"`
ProductName string `json:"productname"`
ServiceID int `json:"serviceid"`
ServiceName string `json:"servicename"`
ServiceType string `json:"servicetype"`
ServiceCount int `json:"servicecount"`
StartDate time.Time `json:"startdate"`
EndDate time.Time `json:"enddate"`
}
type Account struct {
AccountID int `json:"accountid"`
AccountName string `json:"accountname"`
Services []Service `json:"services"`
}
type LicenseFile struct {
AccountId int `json:"accountid"`
License string `json:"license"`
}
type AccountSvc struct{}
//create license file
func (self AccountSvc) CreateLicenseFile() (string, error) {
return "", nil
}
//load license file from db
func (self AccountSvc) GetLicenseFile(accountID int) (string, error) {
return "", nil
}
//valid the license file whether valid or not
func (self AccountSvc) ValidLicenseFile(license string) bool {
return true
}
|
package handler
import (
"context"
"fmt"
"github.com/jinmukeji/jiujiantang-services/service/auth"
proto "github.com/jinmukeji/proto/v3/gen/micro/idl/partner/xima/core/v1"
)
// SubmitRemark 用户修改备注
func (j *JinmuHealth) SubmitRemark(ctx context.Context, req *proto.SubmitRemarkRequest, resp *proto.SubmitRemarkResponse) error {
accessTokenType, _ := auth.AccessTokenTypeFromContext(ctx)
if accessTokenType != AccessTokenTypeWeChatValue {
var organizationID int
// 比较传入的userID与根据recordID得到的UserID是否是同一个组织
userID, _ := j.datastore.GetUserIDByRecordID(ctx, req.RecordId)
userId := int(req.UserId)
if req.UserId == -1 {
u, _ := j.datastore.FindUserByUsername(ctx, req.Username)
userId = u.UserID
}
o, _ := j.datastore.FindOrganizationByUserID(ctx, userId)
organizationID = o.OrganizationID
organization, _ := j.datastore.FindOrganizationByUserID(ctx, int(userID))
if organization.OrganizationID != organizationID {
return NewError(ErrNoPermissionSubmitRemark, fmt.Errorf("user %d has no permission to submit remark to record %d", req.UserId, req.RecordId))
}
}
if err := j.datastore.UpdateRemarkByRecordID(ctx, int(req.RecordId), req.Remark); err != nil {
return NewError(ErrDatabase, fmt.Errorf("failed to update remark by record %d: %s", req.RecordId, err.Error()))
}
return nil
}
|
package onnx
import (
fmt "fmt"
"strings"
)
func (model *ModelProto) fixNames() {
layerTypeOccurrences := map[string]int{}
graph := model.GetGraph()
for _, n := range graph.Node {
if _, ok := layerTypeOccurrences[n.OpType]; !ok {
layerTypeOccurrences[n.OpType] = 0
}
layerTypeOccurrences[n.OpType] = layerTypeOccurrences[n.OpType] + 1
if n.GetName() != "" {
continue
}
layerName := strings.ToLower(n.OpType)
layerOccurence := layerTypeOccurrences[n.OpType]
group := -1
for _, attr := range n.Attribute {
if attr.Name == "group" {
group = int(attr.I)
}
}
if group == -1 {
n.Name = fmt.Sprintf("%s_%d", layerName, layerOccurence)
} else {
n.Name = fmt.Sprintf("%s%d_%d", layerName, layerOccurence, group)
}
}
}
func (model *ModelProto) Fix() {
model.fixNames()
}
|
package fetcher
import (
"databaseConn"
"models"
"net/http"
"io/ioutil"
log "log"
"encoding/json"
"regexp"
"sync"
)
var threshold int = 5
type Posts struct {
Data struct{
Children []struct{
Kind string
Data models.Post
}
}
}
var wg sync.WaitGroup
func Execute() {
db := databaseConn.DB{}.GetDB()
subreddits := []models.Subreddit{}
db.Find(&subreddits)
for _, subreddit := range subreddits {
subredditUri := subreddit.Subreddit
url := "https://www.reddit.com/r/" + subredditUri + "/top.json?sort=top&t=day"
posts := fetchUrl(url, &Posts{})
var subreddit = models.Subreddit{}
db.Where("subreddit = ?", subredditUri).First(&subreddit)
for x := 0; x < threshold; x++ {
post := posts.Data.Children[x].Data
post.SubredditID = subreddit.ID
var Post = new(models.Post)
db.FirstOrInit(Post, post)
db.Create(&Post)
matches, _ := regexp.MatchString("jpg|gifv|gif|png", Post.Url)
if matches {
wg.Add(1)
go fetchImage(*Post, &wg)
}
}
}
wg.Wait()
}
func fetchUrl(url string, posts *Posts) *Posts{
client := &http.Client{}
req, err := http.NewRequest("GET", url, nil)
if err != nil {
log.Fatalln(err)
}
req.Header.Set("User-Agent", "Golang_Spider_Bot/3.0")
resp, err := client.Do(req)
if err != nil {
log.Fatalln(err)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Fatalln(err)
}
if err := json.Unmarshal([]byte(body), &posts); err != nil {
panic(err)
}
return posts
}
|
package main
import (
"bufio"
"fmt"
"math/big"
"os"
)
func main() {
var reader = bufio.NewReader(os.Stdin)
var a1, a2, a3, a4 big.Int
fmt.Fscan(reader, &a1)
fmt.Fscan(reader, &a2)
fmt.Fscan(reader, &a3)
fmt.Fscan(reader, &a4)
var ret1 big.Int
if a1.Cmp(&a2) > 0 {
ret1 = a2
} else {
ret1 = a1
}
var ret2 big.Int
if a3.Cmp(&a4) > 0 {
ret2 = a4
} else {
ret2 = a3
}
ret := big.NewInt(0)
ret = ret.Add(&ret1, &ret2)
ret = ret.Sqrt(ret)
fmt.Println(ret)
}
|
package nmxutil
import (
"sync"
)
type SRWaiter struct {
c chan error
token interface{}
}
type SingleResource struct {
acquired bool
waitQueue []SRWaiter
mtx sync.Mutex
}
func NewSingleResource() SingleResource {
return SingleResource{}
}
func (s *SingleResource) Acquire(token interface{}) error {
s.mtx.Lock()
if !s.acquired {
s.acquired = true
s.mtx.Unlock()
return nil
}
// XXX: Verify no duplicates.
w := SRWaiter{
c: make(chan error),
token: token,
}
s.waitQueue = append(s.waitQueue, w)
s.mtx.Unlock()
err := <-w.c
if err != nil {
return err
}
return nil
}
func (s *SingleResource) Release() {
s.mtx.Lock()
if !s.acquired {
panic("SingleResource release without acquire")
s.mtx.Unlock()
return
}
if len(s.waitQueue) == 0 {
s.acquired = false
s.mtx.Unlock()
return
}
w := s.waitQueue[0]
s.waitQueue = s.waitQueue[1:]
s.mtx.Unlock()
w.c <- nil
}
func (s *SingleResource) StopWaiting(token interface{}, err error) {
s.mtx.Lock()
defer s.mtx.Unlock()
for _, w := range s.waitQueue {
if w.token == token {
w.c <- err
return
}
}
}
func (s *SingleResource) Abort(err error) {
s.mtx.Lock()
defer s.mtx.Unlock()
for _, w := range s.waitQueue {
w.c <- err
}
s.waitQueue = nil
}
|
package server
import (
"fmt"
)
func Init(port int) {
r := NewRouter()
r.Run(fmt.Sprintf(":%d", port))
} |
package bst
import "errors"
//Insert add a new node to bst tree
func (t *Tree) Insert(data int) error {
if t.Root == nil {
t.Root = NewNode(data)
return nil
}
return t.Root.insertNode(data)
}
//Insert add node to a bst treer
func (n *Node) insertNode(data int) error {
if n == nil {
return errors.New("Cannot insert into a nil tree")
}
switch {
case data < n.Data:
if n.Left == nil {
n.Left = NewNode(data)
return nil
}
return n.Left.insertNode(data)
case data > n.Data:
if n.Right == nil {
n.Right = NewNode(data)
return nil
}
return n.Right.insertNode(data)
default:
return nil
}
}
|
// Package config is used for storing and manipulating the plumber config.
// There should be, at most, a single instance of the plumber config that is
// passed around between various components.
//
// If running in cluster mode, config will write the config to NATS. If running
// locally, the config will be saved to ~/.batchsh/plumber.json
package config
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path"
"strings"
"sync"
"github.com/Masterminds/semver"
"github.com/imdario/mergo"
"github.com/nats-io/nats.go"
"github.com/pkg/errors"
uuid "github.com/satori/go.uuid"
"github.com/sirupsen/logrus"
"golang.org/x/crypto/ssh/terminal"
"github.com/batchcorp/plumber/kv"
"github.com/batchcorp/plumber/options"
stypes "github.com/batchcorp/plumber/server/types"
)
const (
ConfigDir = ".batchsh"
ConfigFilename = "plumber.json"
KVConfigBucket = "plumber"
KVConfigKey = "persistent-config"
)
// Config stores Account IDs and the auth_token cookie
type Config struct {
ClusterID string `json:"-"` // This comes from an environment variable
PlumberID string `json:"plumber_id"`
Token string `json:"token"`
TeamID string `json:"team_id"`
UserID string `json:"user_id"`
EnableTelemetry bool `json:"enable_telemetry"`
LastVersion string `json:"last_version"`
Connections map[string]*stypes.Connection `json:"connections"`
Relays map[string]*stypes.Relay `json:"relays"`
Tunnels map[string]*stypes.Tunnel `json:"tunnels"`
ConnectionsMutex *sync.RWMutex `json:"-"`
RelaysMutex *sync.RWMutex `json:"-"`
TunnelsMutex *sync.RWMutex `json:"-"`
enableCluster bool
kv kv.IKV
log *logrus.Entry
}
// New will attempt to fetch and return an existing config from either NATS or
// the local disk. If neither are available, it will return a new config.
func New(enableCluster bool, k kv.IKV) (*Config, error) {
var cfg *Config
var err error
defer func() {
// When config is nil and an error is returned from New(),
// allow this defer to exit without panicking
if cfg == nil {
return
}
cfg.LastVersion = options.VERSION
// Old versions of plumber incorrectly defaulted plumber id to plumber1;
// everyone should have a unique plumber id
if cfg.PlumberID == "plumber1" {
cfg.PlumberID = getPlumberID()
}
}()
if enableCluster {
if k == nil {
return nil, errors.New("key value store not initialized - are you running in server mode?")
}
cfg, err = fetchConfigFromKV(k)
if err != nil {
if err == nats.ErrBucketNotFound || err == nats.ErrKeyNotFound {
return newConfig(enableCluster, k), nil
}
return nil, errors.Wrap(err, "unable to fetch config from kv")
}
}
// Not in cluster mode - attempt to read config from disk
if cfg == nil && exists(ConfigFilename) {
cfg, err = fetchConfigFromFile(ConfigFilename)
if err != nil {
logrus.Errorf("unable to load config: %s", err)
}
}
// Cleanup old config file (if exists)
if exists("config.json") {
if err := remove("config.json"); err != nil {
logrus.Warningf("unable to remove old config file: %s", err)
}
}
var initialRun bool
if cfg == nil {
initialRun = true
cfg = newConfig(false, nil)
}
// Should we perform an interactive config?
if requireReconfig(initialRun, cfg) {
cfg.Configure()
}
return cfg, nil
}
func requireReconfig(initialRun bool, cfg *Config) bool {
// Don't configure if NOT in terminal
if !terminal.IsTerminal(int(os.Stderr.Fd())) {
logrus.Debugf("detected non-terminal output")
return false
}
// Should not ever reach this case but avoid a panic just incase
if cfg == nil {
logrus.Warningf("bug? cfg is nil in requireReconfig")
return false
}
// No reason to ask to reconfigure if we can't figure out old version
currentVersion, err := semver.NewVersion(options.VERSION)
if err != nil {
logrus.Warningf("unable to parse current version: %s", err)
return false
}
// Brand new config or config doesn't contain LastVersion yet
if initialRun || cfg.LastVersion == "" {
return true
}
// We are probably dealing with an old plumber if we can't figure out the
// version - reconfigure
lastVersion, err := semver.NewVersion(cfg.LastVersion)
if err != nil {
logrus.Warningf("unable to parse last version: %s", err)
return true
}
if currentVersion.Minor() != lastVersion.Minor() {
return true
}
// Shouldn't ever hit this
return false
}
func (c *Config) Configure() {
// No need to ask about telemetry if it's already enabled
if !c.EnableTelemetry {
c.askTelemetry()
}
}
func (c *Config) askTelemetry() {
telemetryDescription := `If telemetry is enabled, plumber will collect the following anonymous telemetry data:
> General
- PlumberID (a unique, randomly generated ID for this plumber instance)
- Plumber version
- OS and architecture
- Plumber mode (server or CLI)
> For CLI
- Plumber action (read, write, relay, etc.)
- Backend used (kafka, rabbitmq, nats, etc.)
- Data format used for read or write (json, protobuf, etc.)
- If reading, whether continuous mode is used
- If using protobuf, whether file descriptors are used
> For server
- Number of connections, relays, tunnels
- Server uptime
- ClusterID
- gRPC methods used (create relay, stop tunnel, etc.)
NOTE: We do NOT collect ANY personally identifiable or confidential information.
You can read this statement here: https://docs.streamdal.com/plumber/telemetry
`
fmt.Printf(telemetryDescription + "\n")
enableTelemetry, err := askYesNo("Do you want to enable telemetry?", "N")
if err != nil {
c.log.Fatalf("unable to configure plumber: %s", err)
}
if enableTelemetry {
fmt.Printf("\nNICE! Thank you for opting in! This will help us improve plumber :)\n\n")
}
c.EnableTelemetry = enableTelemetry
}
func askYesNo(question, defaultAnswer string) (bool, error) {
if defaultAnswer != "" {
fmt.Printf(question+" [y/n (default: %s)]: ", defaultAnswer)
} else {
fmt.Print(question + " [y/n]: ")
}
var answer string
i, err := fmt.Scanln(&answer)
// Scan() doesn't return on only newline and empty string
// Scanln() will error on only new line and empty string
if err != nil && !strings.Contains(err.Error(), "unexpected newline") {
return false, fmt.Errorf("unable to read input: %s", err)
}
if i == 0 {
if defaultAnswer != "" {
answer = defaultAnswer
}
}
answer = strings.ToLower(answer)
if answer == "y" || answer == "yes" {
return true, nil
}
if answer == "n" || answer == "no" {
return false, nil
}
fmt.Println("invalid input")
return askYesNo(question, defaultAnswer)
}
func newConfig(enableCluster bool, k kv.IKV) *Config {
return &Config{
PlumberID: getPlumberID(),
Connections: make(map[string]*stypes.Connection),
Relays: make(map[string]*stypes.Relay),
Tunnels: make(map[string]*stypes.Tunnel),
ConnectionsMutex: &sync.RWMutex{},
RelaysMutex: &sync.RWMutex{},
TunnelsMutex: &sync.RWMutex{},
kv: k,
enableCluster: enableCluster,
log: logrus.WithField("pkg", "config"),
}
}
func getPlumberID() string {
return uuid.NewV4().String()
}
// Save is a convenience method of persisting the config to KV store or disk
func (c *Config) Save() error {
data, err := json.MarshalIndent(c, "", "\t")
if err != nil {
return errors.Wrap(err, "unable to marshal config to JSON")
}
if err := c.writeConfig(data); err != nil {
c.log.Errorf("unable to save config: %s", err)
return errors.Wrap(err, "unable to save config")
}
return nil
}
func fetchConfigFromKV(k kv.IKV) (*Config, error) {
var cfg *Config
var err error
// Fetch the config from KV
data, err := k.Get(context.Background(), KVConfigBucket, KVConfigKey)
if err != nil {
return nil, err
}
// Unmarshal the config
cfg, err = readConfigBytes(data)
if err != nil {
return nil, errors.Wrap(err, "unable to unmarshal config from KV")
}
cfg.enableCluster = true
cfg.kv = k
cfg.log = logrus.WithField("pkg", "config")
return cfg, nil
}
// readConfig reads a config JSON file into a Config struct
func fetchConfigFromFile(fileName string) (*Config, error) {
f, err := getConfigJson(fileName)
if err != nil {
return nil, errors.Wrapf(err, "could not read ~/.batchsh/%s", fileName)
}
defer f.Close()
data, err := ioutil.ReadAll(f)
if err != nil {
return nil, errors.Wrapf(err, "could not read ~/.batchsh/%s", fileName)
}
cfg, err := readConfigBytes(data)
if err != nil {
return nil, errors.Wrap(err, "could not read config bytes")
}
cfg.log = logrus.WithField("pkg", "config")
return cfg, nil
}
func readConfigBytes(data []byte) (*Config, error) {
// Hack: handle flag name change in marshaled data when upgrading plumber cluster
// Some flags were changed in v2, such as BatchshGRPCCollectorAddress, which is now
// StreamdalGRPCCollectorAddress
tmp := string(data)
if strings.Contains(tmp, "\"Batchsh") {
tmp = strings.Replace(tmp, "\"Batchsh", "\"Streamdal", -1)
println(tmp)
data = []byte(tmp)
}
cfg := &Config{
ConnectionsMutex: &sync.RWMutex{},
RelaysMutex: &sync.RWMutex{},
TunnelsMutex: &sync.RWMutex{},
Connections: make(map[string]*stypes.Connection),
Relays: make(map[string]*stypes.Relay),
Tunnels: make(map[string]*stypes.Tunnel),
}
if err := json.Unmarshal(data, cfg); err != nil {
return nil, errors.Wrapf(err, "could not unmarshal ~/.batchsh/%s", ConfigFilename)
}
return cfg, nil
}
// Exists determines if a config file exists yet
func exists(fileName string) bool {
configDir, err := getConfigDir()
if err != nil {
return false
}
configPath := path.Join(configDir, fileName)
if _, err := os.Stat(configPath); os.IsNotExist(err) {
return false
}
return true
}
func remove(fileName string) error {
configDir, err := getConfigDir()
if err != nil {
return err
}
configPath := path.Join(configDir, fileName)
if _, err := os.Stat(configPath); os.IsNotExist(err) {
return nil
}
return os.Remove(configPath)
}
// WriteConfig writes a Batch struct as JSON into a config.json file
func (c *Config) writeConfig(data []byte) error {
if c.enableCluster {
if err := c.kv.Put(context.Background(), KVConfigBucket, KVConfigKey, data); err != nil {
c.log.Errorf("unable to write config to KV: %v", err)
return errors.Wrap(err, "unable to write config to KV")
}
return nil
}
// Clustering not enabled - write to disk
configDir, err := getConfigDir()
if err != nil {
c.log.Errorf("unable to determine config dir: %v", err)
return errors.Wrap(err, "unable to determine config dir")
}
// Create dir if needed
if _, err := os.Stat(configDir); os.IsNotExist(err) {
if err := os.Mkdir(configDir, 0700); err != nil {
c.log.Errorf("unable to create config directory '%s': %v", configDir, err)
return errors.Wrapf(err, "unable to create config directory %s", configDir)
}
}
configPath := path.Join(configDir, ConfigFilename)
f, err := os.OpenFile(configPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
c.log.Errorf("failed to open file '%s' for write: %v", configPath, err)
return err
}
defer f.Close()
_, err = f.Write(data)
return err
}
func (c *Config) Update(cfg *Config) error {
if cfg == nil {
return errors.New("config cannot be nil")
}
if err := mergo.Merge(c, cfg); err != nil {
return errors.Wrap(err, "unable to merge configs")
}
if err := c.Save(); err != nil {
return errors.Wrap(err, "unable to save merged config")
}
return nil
}
// getConfigJson attempts to read a user's .batchsh/config.json file; if it
// doesn't exist, it will create an empty json config and return that.
func getConfigJson(fileName string) (*os.File, error) {
configDir, err := getConfigDir()
if err != nil {
return nil, err
}
configPath := path.Join(configDir, fileName)
// Directory ~/.batchsh/ doesn't exist, create it
if _, err := os.Stat(configPath); os.IsNotExist(err) {
if err := os.Mkdir(configPath, 0700); err != nil {
return nil, errors.Wrapf(err, "unable to create config directory %s", configPath)
}
// Create ~/.batchsh/config.json
f, err := os.Create(path.Join(configDir, fileName))
if err != nil {
return nil, err
}
f.WriteString("{}")
}
// ReadOptions exists, open it
return os.Open(configPath)
}
// getConfigDir returns a directory where the batch configuration will be stored
func getConfigDir() (string, error) {
// Get user's home directory
homeDir, err := os.UserHomeDir()
if err != nil {
return "", errors.Wrap(err, "unable to locate user's home directory")
}
return path.Join(homeDir, ConfigDir), nil
}
// GetRelay returns a relay from the in-memory map
func (c *Config) GetRelay(relayID string) *stypes.Relay {
c.RelaysMutex.RLock()
defer c.RelaysMutex.RUnlock()
r, _ := c.Relays[relayID]
return r
}
// SetRelay saves a relay to in-memory map
func (c *Config) SetRelay(relayID string, relay *stypes.Relay) {
c.RelaysMutex.Lock()
if c.Relays == nil {
c.Relays = make(map[string]*stypes.Relay)
}
c.Relays[relayID] = relay
c.RelaysMutex.Unlock()
}
// DeleteRelay removes a service from in-memory map
func (c *Config) DeleteRelay(relayID string) {
c.RelaysMutex.Lock()
defer c.RelaysMutex.Unlock()
delete(c.Relays, relayID)
}
// GetConnection retrieves a connection from in-memory map
func (c *Config) GetConnection(connID string) *stypes.Connection {
c.ConnectionsMutex.RLock()
defer c.ConnectionsMutex.RUnlock()
conn, _ := c.Connections[connID]
return conn
}
// SetConnection saves a connection to in-memory map
func (c *Config) SetConnection(connID string, conn *stypes.Connection) {
c.ConnectionsMutex.Lock()
defer c.ConnectionsMutex.Unlock()
c.Connections[connID] = conn
}
// DeleteConnection removes a connection from in-memory map
func (c *Config) DeleteConnection(connID string) {
c.ConnectionsMutex.Lock()
defer c.ConnectionsMutex.Unlock()
delete(c.Connections, connID)
}
// GetTunnel returns an in-progress read from the Tunnels map
func (c *Config) GetTunnel(tunnelID string) *stypes.Tunnel {
c.TunnelsMutex.RLock()
defer c.TunnelsMutex.RUnlock()
r, _ := c.Tunnels[tunnelID]
return r
}
// SetTunnel adds an in-progress read to the Tunnels map
func (c *Config) SetTunnel(tunnelID string, tunnel *stypes.Tunnel) {
c.TunnelsMutex.Lock()
defer c.TunnelsMutex.Unlock()
if c.Tunnels == nil {
c.Tunnels = make(map[string]*stypes.Tunnel)
}
c.Tunnels[tunnelID] = tunnel
}
// DeleteTunnel removes a tunnel from in-memory map
func (c *Config) DeleteTunnel(tunnelID string) {
c.TunnelsMutex.Lock()
defer c.TunnelsMutex.Unlock()
delete(c.Tunnels, tunnelID)
}
|
package main
import (
"flag"
"os"
"fmt"
"stfl"
"bufio"
"time"
"exec"
"strconv"
)
type ServerInfo struct {
Server string
Port int
Nick string
}
func init() {
stfl.Init()
}
func main() {
info := ServerInfo{ "", 6666, "" }
flag.IntVar(&info.Port, "port", 6666, "IRC server port")
flag.StringVar(&info.Server, "server", "", "IRC server hostname")
flag.StringVar(&info.Nick, "nick", os.Getenv("USER"), "Your nick")
flag.Parse()
if info.Server == "" {
usage()
}
wmanchan := make(chan LineMsg, 16)
v := CreateView(wmanchan, info)
wman := CreateWindowManager(v, wmanchan)
go wman.Run()
startii(info, wman)
v.Run()
}
func usage() {
fmt.Println("usage: uii -server <server> [-port <port>] [-nick <nick>]")
os.Exit(1)
}
func startii(info ServerInfo, wman *WindowManager) {
ii_path, err := exec.LookPath("ii")
if err != nil {
fmt.Println("Error: couldn't find ii.")
os.Exit(1)
}
_, err = exec.Run(ii_path, []string{ "ii", "-i", "ii-data", "-s", info.Server, "-p", strconv.Itoa(info.Port), "-n", info.Nick }, []string{ }, ".", exec.DevNull, exec.DevNull, exec.DevNull)
if err != nil {
fmt.Printf("Running ii failed: %s\n", err)
os.Exit(1)
}
go monitorFile("ii-data/" + info.Server + "/out", info.Server, wman)
}
func monitorFile(filename string, ircchan string, wman *WindowManager) {
var file *os.File
for {
f, err := os.Open(filename, os.O_RDONLY, 0)
if err != nil {
time.Sleep(1000000000)
} else {
file = f
break
}
}
reader := bufio.NewReader(file)
for {
line, err := reader.ReadBytes('\n')
if err != nil && len(line) == 0 {
time.Sleep(1000000000)
if err == os.EOF {
// XXX hack to reset EOF
reader = bufio.NewReader(file)
}
continue
}
msg := LineMsg{ string(line[:len(line)-1]), ircchan }
wman.LineQueue <- msg
}
}
func WriteLine(filename string, line string) {
f, err := os.Open(filename, os.O_WRONLY, 0644)
if err != nil {
return
}
f.Write([]byte(line + "\n"))
f.Close()
}
|
package endpoint
import (
"context"
guest "github.com/angryronald/guestlist/internal/guest/application"
// httpResponse "github.com/angryronald/guestlist/lib/net/http"
"github.com/go-kit/kit/endpoint"
)
func CountEmptySeats(application guest.Application) endpoint.Endpoint {
return func(ctx context.Context, req interface{}) (res interface{}, err error) {
res, err = application.Queries.CountEmptySeats.Execute(ctx)
// I have provided the specific function to adjust the response structure
// and adding response time in the metadata
// return httpResponse.ResponseWithRequestTime(ctx, res, nil), err
return res, nil
}
}
|
package cmd
import (
"context"
"os"
stencilv1 "github.com/odpf/stencil/server/odpf/stencil/v1"
"github.com/spf13/cobra"
"google.golang.org/grpc"
)
// DownloadCmd creates a new cobra command for download descriptor
func DownloadCmd() *cobra.Command {
var host, filePath string
var req stencilv1.DownloadDescriptorRequest
cmd := &cobra.Command{
Use: "download",
Short: "Download filedescriptorset file",
Args: cobra.NoArgs,
Annotations: map[string]string{
"group:core": "true",
},
RunE: func(cmd *cobra.Command, args []string) error {
conn, err := grpc.Dial(host, grpc.WithInsecure())
if err != nil {
return err
}
defer conn.Close()
client := stencilv1.NewStencilServiceClient(conn)
res, err := client.DownloadDescriptor(context.Background(), &req)
if err != nil {
return err
}
err = os.WriteFile(filePath, res.Data, 0666)
return err
},
}
cmd.Flags().StringVar(&host, "host", "", "stencil host address eg: localhost:8000")
cmd.MarkFlagRequired("host")
cmd.Flags().StringVar(&req.Namespace, "namespace", "", "provide namespace/group or entity name")
cmd.MarkFlagRequired("namespace")
cmd.Flags().StringVar(&req.Name, "name", "", "provide proto repo name")
cmd.MarkFlagRequired("name")
cmd.Flags().StringVar(&req.Version, "version", "", "provide semantic version compatible value")
cmd.MarkFlagRequired("version")
cmd.Flags().StringVar(&filePath, "output", "", "write to file")
cmd.Flags().StringSliceVar(&req.Fullnames, "fullnames", []string{}, "provide fully qualified proto full names. You can provide multiple names separated by \",\" Eg: google.protobuf.FileDescriptorProto,google.protobuf.FileDescriptorSet")
return cmd
}
|
package fifth
import "fmt"
type Word struct {
Name string
IsImmediate bool
IsPrimitive bool
IsCompileOnly bool
PrimBody PrimBody
Body []*Word
pc int // program counter
}
type PrimBody func() error
func (w *Word) String() string {
s := ""
if w.IsPrimitive {
s += fmt.Sprintf("primitive word: %q", w.Name)
} else {
s += fmt.Sprintf(": %s\n ", w.Name)
for _, bw := range w.Body {
s += bw.Name + " "
}
s += "\n;"
}
if w.IsImmediate {
s += " immediate"
}
return s
}
func (w *Word) Compile(bw *Word) {
w.Body = append(w.Body, bw)
}
|
package main
import (
"bytes"
"encoding/json"
"time"
"context"
"errors"
"io"
"net/http"
)
var (
schemaVersions = map[string]bool{"0.1": true}
modelVersions = map[string]bool{"1.0": true}
)
type requestError struct {
SchemaVersionError string `json:"schema_version_error,omitempty"`
ModelVersionError string `json:"model_version_error,omitempty"`
TimestampError string `json:"timestamp_error,omitempty"`
DataError string `json:"data_error,omitempty"`
UnknownFields []string `json:"unknown_fields,omitempty"`
}
func (re *requestError) Error() string {
var buf bytes.Buffer
err := json.NewEncoder(&buf).Encode(re)
if err != nil {
buf.WriteString("Request doesn't comply with schema")
}
return buf.String()
}
// Request represents user request
type Request struct {
SchemaVersion string `json:"schema_version"`
ModelVersion string `json:"model_version"`
Timestamp time.Time `json:"timestamp"`
Data json.RawMessage `json:"data"`
}
// DataString encodes request data to string
func (r *Request) DataString() string {
return string(r.Data)
}
// AppError is main app error type
type AppError struct {
Err error
StatusCode int
Reason string
Stacktrace string
}
func (ae *AppError) Error() string {
if ae.Reason != "" {
return ae.Reason
}
if ae.Err != nil {
return ae.Err.Error()
}
return http.StatusText(ae.StatusCode)
}
// Write writes response to http.ResponseWriter with given context
func (ae *AppError) Write(ctx context.Context, w http.ResponseWriter) {
resp := &Response{
SchemaVersion: "0.1",
ModelVersion: "1.0",
Timestamp: time.Now().UTC(),
Stacktrace: ae.Stacktrace,
Status: "error",
Reason: ae.Error(),
}
w.WriteHeader(ae.StatusCode)
resp.Write(ctx, w)
}
// CreateResponseFromRequest creates response object from user request
func CreateResponseFromRequest(r *Request) *Response {
return &Response{
SchemaVersion: r.SchemaVersion,
ModelVersion: r.ModelVersion,
}
}
// Response is main response object
type Response struct {
SchemaVersion string `json:"schema_version"`
ModelVersion string `json:"model_version"`
Timestamp time.Time `json:"timestamp"`
Status string `json:"status"`
ExecutionTime time.Duration `json:"execution_time,omitempty"`
Data json.RawMessage `json:"data,omitempty"`
Reason string `json:"reason,omitempty"`
Stacktrace string `json:"stacktrace,omitempty"`
err *AppError
}
// MarshalJSON implements custom json marshalling with logging
func (sr *Response) MarshalJSON() ([]byte, error) {
type ResponseAlias Response
if sr.err != nil {
sr.ExecutionTime = 0
sr.Data = []byte{}
}
resp, err := json.Marshal(&struct{ *ResponseAlias }{(*ResponseAlias)(sr)})
if err != nil {
logger.Printf("Error encoding response: %s\n%s", err, sr)
}
return resp, err
}
// Write writes response to http.ResponseWriter with given context
func (sr *Response) Write(ctx context.Context, w http.ResponseWriter) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.Header().Set("X-Content-Type-Options", "nosniff")
sr.Timestamp = time.Now().UTC()
err := json.NewEncoder(w).Encode(sr)
if err != nil {
if sr.err == nil {
sr.err = &AppError{
StatusCode: http.StatusInternalServerError,
Reason: "Script does not return valid json string.",
}
}
sr.err.Write(ctx, w)
}
}
// BuildRequest builds and validates user requests
func BuildRequest(r io.Reader) (*Request, error) {
var request Request
err := json.NewDecoder(r).Decode(&request)
if err != nil {
if err == io.EOF {
return nil, errors.New("received empty request")
}
return nil, err
}
return &request, nil
}
|
package vrf
import (
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/stretchr/testify/assert"
)
var (
keyD = common.HexToHash("0x0fdcdb4f276c1b7f6e3b17f6c80d6bdd229cee59955b0b6a0c69f67cbf3943fa").Big()
keyHash = common.HexToHash("0x9fe62971ada37edbdab3582f8aec660edf7c59b4659d1b9cf321396b73918b56")
)
func TestKeyHash(t *testing.T) {
publicKey := (&Key{keyD}).PublicKey()
assert.Equal(t, publicKey.Hash, keyHash, "invalid key hash generated")
}
func TestGenerateRandomness(t *testing.T) {
proof, err := (&Key{keyD}).GenerateProof(PreSeedData{
PreSeed: common.HexToHash("0xb3fb0f766b15159704d515f5e17f813a85d784bcc39ce982af38f0e997aef007"),
BlockHash: common.HexToHash("0xda2f81c1e0a64897c37fe16a8d0dce7ea5c2a0de03c9a629277cdd925b3ac228"),
BlockNumber: 777,
})
expectedRandomness := common.HexToHash("0x3c050221596be1d77aecba25186a0b1bcbf131d6fd5846c07f5c2ffb107b2f9b")
assert.Nil(t, err, "failed to generate randomness")
assert.Equal(t, proof.Randomness, expectedRandomness, "invalid randomness generated")
}
|
package config
import (
"github.com/spf13/viper"
)
const (
CacheTypeInMemory = "memory"
CacheTypeRedis = "redis"
BlockchainDatabaseInMemory = "memory"
BlockChainDatabaseLMDB = "lmdb"
)
type Config struct {
ListenPort int `mapstructure:"listen_port"`
ListenAddr string `mapstructure:"listen_addr"`
IsBootstrap bool `mapstructure:"is_bootstrap"`
BootstrapNodes []string `mapstructure:"bootstrap_node_multiaddr"`
Rendezvous string `mapstructure:"rendezvous"`
Ethereum EthereumConfig `mapstructure:"ethereum"`
Filecoin FilecoinConfig `mapstructure:"filecoin"`
PubSub PubSubConfig `mapstructure:"pubsub"`
ConsensusMinApprovals int `mapstructure:"consensus_min_approvals"`
Redis RedisConfig `mapstructure:"redis"`
CacheType string `mapstructure:"cache_type"`
Blockchain BlockchainConfig `mapstructure:"blockchain"`
PrivateKeyPath string `mapstructure:"private_key_path"`
}
type EthereumConfig struct {
GatewayAddress string `mapstructure:"gateway_address"`
ChainID int `mapstructure:"chain_id"`
PrivateKey string `mapstructure:"private_key"`
MnemonicPhrase string `mapstructure:"mnemonic_phrase"`
HDDerivationPath string `mapstructure:"hd_derivation_path"`
DioneOracleContractAddress string `mapstructure:"oracle_contract_address"`
DioneStakingContractAddress string `mapstructure:"staking_contract_address"`
DisputeContractAddress string `mapstructure:"dispute_contract_address"`
DisputeVoteWindow int `mapstructure:"dispute_vote_window"` // in secs
}
type FilecoinConfig struct {
LotusHost string `mapstructure:"lotusHost"`
LotusToken string `mapstructure:"lotusToken"`
}
type PubSubConfig struct {
ServiceTopicName string `mapstructure:"service_topic_name"`
}
type RedisConfig struct {
Addr string `mapstructure:"redis_addr"`
Password string `mapstructure:"redis_password"`
DB int `mapstructure:"redis_db"`
}
type BlockchainConfig struct {
DatabaseType string `mapstructure:"database_type"`
LMDB struct {
DatabasePath string `mapstructure:"database_path"`
} `mapstructure:"lmdb"`
}
// NewConfig creates a new config based on default values or provided .env file
func NewConfig(configPath string) (*Config, error) {
cfg := &Config{
ListenAddr: "localhost",
ListenPort: 8000,
BootstrapNodes: []string{"/ip4/127.0.0.1/tcp/0"},
Rendezvous: "dione",
Ethereum: EthereumConfig{
PrivateKey: "",
},
PubSub: PubSubConfig{
ServiceTopicName: "dione",
},
Redis: RedisConfig{
Addr: "localhost:6379",
Password: "",
DB: 0,
},
CacheType: "memory",
}
viper.SetConfigFile(configPath)
err := viper.ReadInConfig()
if err != nil {
return nil, err
}
err = viper.Unmarshal(cfg)
if err != nil {
return nil, err
}
return cfg, nil
}
|
package log
import (
"bufio"
"fmt"
"github.com/sirupsen/logrus"
"log/syslog"
"os"
"runtime"
"strings"
"github.com/api7/ingress-controller/conf"
)
var logEntry *logrus.Entry
func GetLogger() *logrus.Entry {
if logEntry == nil {
var log = logrus.New()
setNull(log)
log.SetLevel(logrus.DebugLevel)
if conf.ENV != conf.LOCAL {
log.SetLevel(logrus.InfoLevel)
}
log.SetFormatter(&logrus.JSONFormatter{})
logEntry = log.WithFields(logrus.Fields{
"app": "ingress-controller",
})
hook, err := createHook("udp", fmt.Sprintf("%s:514", conf.Syslog.Host),
syslog.LOG_LOCAL4, "ingress-controller")
if err != nil {
panic("failed to create log hook " + conf.Syslog.Host)
}
log.AddHook(hook)
}
return logEntry
}
func setNull(log *logrus.Logger) {
src, err := os.OpenFile(os.DevNull, os.O_APPEND|os.O_WRONLY, os.ModeAppend)
if err != nil {
fmt.Println("err", err)
}
writer := bufio.NewWriter(src)
log.SetOutput(writer)
}
type SysLogHook struct {
Writer *syslog.Writer
NetWork string
Raddr string
Formatter func(file, function string, line int) string
LineName string
}
func createHook(network, raddr string, priority syslog.Priority, tag string) (*SysLogHook, error) {
if w, err := syslog.Dial(network, raddr, priority, tag); err != nil {
return nil, err
} else {
return &SysLogHook{w, network, raddr,
func(file, function string, line int) string {
return fmt.Sprintf("%s:%d", file, line)
},
"line",
}, nil
}
}
func (hook *SysLogHook) Fire(entry *logrus.Entry) error {
//entry.Data[hook.LineName] = hook.Formatter(findCaller(5))
en := entry.WithField(hook.LineName, hook.Formatter(findCaller(5)))
en.Level = entry.Level
en.Message = entry.Message
en.Time = entry.Time
line, err := en.String()
if err != nil {
fmt.Fprintf(os.Stderr, "Unable to read entry, %v", err)
return err
}
switch en.Level {
case logrus.PanicLevel:
hook.Writer.Crit(line)
localPrint(line)
return nil
case logrus.FatalLevel:
hook.Writer.Crit(line)
localPrint(line)
return nil
case logrus.ErrorLevel:
hook.Writer.Err(line)
localPrint(line)
return nil
case logrus.WarnLevel:
hook.Writer.Warning(line)
localPrint(line)
return nil
case logrus.InfoLevel:
hook.Writer.Info(line)
localPrint(line)
return nil
case logrus.DebugLevel:
hook.Writer.Debug(line)
localPrint(line)
return nil
default:
return nil
}
}
func localPrint(line string) {
if conf.ENV != conf.BETA && conf.ENV != conf.PROD && conf.ENV != conf.HBPROD{
fmt.Print(line)
}
}
func (hook *SysLogHook) Levels() []logrus.Level {
return logrus.AllLevels
}
func findCaller(skip int) (string, string, int) {
var (
pc uintptr
file string
function string
line int
)
for i := 0; i < 10; i++ {
pc, file, line = getCaller(skip + i)
if !strings.HasPrefix(file, "logrus") {
break
}
}
if pc != 0 {
frames := runtime.CallersFrames([]uintptr{pc})
frame, _ := frames.Next()
function = frame.Function
}
return file, function, line
}
func getCaller(skip int) (uintptr, string, int) {
pc, file, line, ok := runtime.Caller(skip)
if !ok {
return 0, "", 0
}
n := 0
for i := len(file) - 1; i > 0; i-- {
if file[i] == '/' {
n += 1
if n >= 2 {
file = file[i+1:]
break
}
}
}
return pc, file, line
}
|
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"github.com/gorilla/handlers"
"github.com/gorilla/mux"
"github.com/go-pg/pg/v9"
)
// App interface for the full application
type App struct {
Router *mux.Router
DB *pg.DB
}
func checkAuth(r *http.Request) bool {
// This endpoint is not public
// @todo: better/more flexible auth
secret, existsSecret := os.LookupEnv("SECRET");
authHeader := r.Header["Authorization"];
if (!existsSecret || len(authHeader) < 1 || authHeader[0] != "Bearer " + secret) {
return false
}
return true
}
func optionsRequest(w http.ResponseWriter, r *http.Request) {
// Just return
}
func (a *App) getFeedbackItems(w http.ResponseWriter, r *http.Request) {
if (!checkAuth(r)) {
respondWithError(w, r, http.StatusUnauthorized, "Unauthorized")
return
}
feedbackData, err := getFeedbackItems(a.DB)
if err != nil {
respondWithError(w, r, http.StatusServiceUnavailable, "An unexpected error occurred")
return
}
respondWithJSON(w, r, http.StatusOK, feedbackData)
}
func (a *App) createFeedback(w http.ResponseWriter, r *http.Request) {
var newFeedback Feedback
reqBody, err := ioutil.ReadAll(r.Body)
if err != nil {
respondWithError(w, r, http.StatusBadRequest, "Incorrect data format")
return
}
err = json.Unmarshal(reqBody, &newFeedback)
if err != nil {
respondWithError(w, r, http.StatusUnprocessableEntity, "JSON parse error")
return
}
if newFeedback.Feedback == "" {
respondWithError(w, r, http.StatusBadRequest, "Feedback is required")
return
}
if len(newFeedback.Feedback) > 1000 {
respondWithError(w, r, http.StatusBadRequest, "Feedback text must be 1000 characters or fewer")
return
}
feedback := Feedback{
Feedback: newFeedback.Feedback,
IPAddress: r.RemoteAddr,
}
err = feedback.createFeedbackItem(a.DB)
if err != nil {
respondWithError(w, r, http.StatusServiceUnavailable, "Database write error")
return
}
respondWithJSON(w, r, http.StatusCreated, newFeedback)
}
func (a *App) getBooks(w http.ResponseWriter, r *http.Request) {
bookData, err := getBooks(a.DB)
if err != nil {
fmt.Println(err)
respondWithError(w, r, http.StatusServiceUnavailable, "An unexpected error occurred")
return
}
respondWithJSON(w, r, http.StatusOK, bookData)
}
func (a *App) createBook(w http.ResponseWriter, r *http.Request) {
if (!checkAuth(r)) {
respondWithError(w, r, http.StatusUnauthorized, "Unauthorized")
return
}
var newBook Book
reqBody, err := ioutil.ReadAll(r.Body)
if err != nil {
respondWithError(w, r, http.StatusBadRequest, "Incorrect data format")
return
}
err = json.Unmarshal(reqBody, &newBook)
if err != nil {
respondWithError(w, r, http.StatusUnprocessableEntity, "JSON parse error")
return
}
if newBook.Title == "" {
respondWithError(w, r, http.StatusBadRequest, "Title is required")
return
}
err = newBook.createBook(a.DB)
if err != nil {
respondWithError(w, r, http.StatusServiceUnavailable, "Database write error")
return
}
respondWithJSON(w, r, http.StatusCreated, newBook)
}
func (a *App) initializeRoutes() {
a.Router.HandleFunc("*", optionsRequest).Methods("OPTIONS")
a.Router.HandleFunc("/feedback", a.getFeedbackItems).Methods("GET")
a.Router.HandleFunc("/feedback", a.createFeedback).Methods("POST")
a.Router.HandleFunc("/books", a.getBooks).Methods("GET")
a.Router.HandleFunc("/books", a.createBook).Methods("POST")
}
func respondWithError(w http.ResponseWriter, r *http.Request, code int, message string) {
respondWithJSON(w, r, code, map[string]string{"error": message})
}
func respondWithJSON(w http.ResponseWriter, r *http.Request, code int, payload interface{}) {
response, _ := json.Marshal(payload)
log.Printf("%s %s %d %s", r.Method, r.RequestURI, code, r.RemoteAddr)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(code)
w.Write(response)
}
// Initialize the server and routes.
func (a *App) Initialize() {
fmt.Println("Starting server on port 8080")
a.Router = mux.NewRouter().StrictSlash(true)
a.initializeRoutes()
a.DB = ConnectDB()
}
// Run starts up the server
func (a *App) Run() {
withHandlers := handlers.CORS(
handlers.AllowedHeaders([]string{"X-Requested-With", "Content-Type", "Authorization"}),
handlers.AllowedMethods([]string{"GET", "POST", "HEAD", "OPTIONS"}),
handlers.AllowedOrigins([]string{"*"}))(a.Router)
log.Fatal(http.ListenAndServe(":8080", withHandlers))
} |
package lambdas_test
import (
"testing"
"github.com/life4/genesis/lambdas"
"github.com/matryer/is"
)
func TestAbs(t *testing.T) {
is := is.New(t)
is.Equal(lambdas.Abs(2), 2)
is.Equal(lambdas.Abs(-2), 2)
is.Equal(lambdas.Abs(0), 0)
is.Equal(lambdas.Abs(-1.2), 1.2)
}
func TestMin(t *testing.T) {
is := is.New(t)
is.Equal(lambdas.Min(2, 3), 2)
is.Equal(lambdas.Min(3, 2), 2)
is.Equal(lambdas.Min(-2, 3), -2)
is.Equal(lambdas.Min(2, -3), -3)
is.Equal(lambdas.Min(2, 2), 2)
}
func TestMax(t *testing.T) {
is := is.New(t)
is.Equal(lambdas.Max(2, 3), 3)
is.Equal(lambdas.Max(3, 2), 3)
is.Equal(lambdas.Max(-2, 3), 3)
is.Equal(lambdas.Max(2, -3), 2)
is.Equal(lambdas.Max(2, 2), 2)
}
func TestDefault(t *testing.T) {
is := is.New(t)
is.Equal(lambdas.Default(3), 0)
is.Equal(lambdas.Default(int32(3)), int32(0))
is.Equal(lambdas.Default(int64(3)), int64(0))
is.Equal(lambdas.Default(0), 0)
is.Equal(lambdas.Default(3.5), 0.0)
is.Equal(lambdas.Default("hi"), "")
}
|
package main
import (
"bufio"
"fmt"
"log"
"math"
"os"
"strconv"
"strings"
)
// Head represents the head vertex of an edge, along with its length.
type Head struct {
head, length int
}
// Dijkstra takes a directed graph as input, and returns the shortest
// paths from the source vertex 1 to every other vertex in the graph.
func Dijkstra(edges map[int][]Head) map[int]int {
// initialize a table with all lenghts set to infinity/max integer
lengths := make(map[int]int)
for k := range edges {
lengths[k] = math.MaxInt64
}
// initialize a table that holds explored vertices
explored := make(map[int]bool, len(edges))
// using vertex 1 as the source
lengths[1] = 0
explored[1] = true
// compare edges with an explored tail and unexplored head
var head, length int
for len(explored) < len(edges) {
length = math.MaxInt64
for k := range explored {
for _, e := range edges[k] {
if _, ok := explored[e.head]; !ok {
// it is an unexplored edge
if lengths[k]+e.length < length {
length = lengths[k] + e.length
head = e.head
}
}
}
}
// mark the head node with the shortest length as explored
explored[head] = true
lengths[head] = length
}
return lengths
}
func main() {
if len(os.Args) < 2 {
log.Fatal("file argument missing")
}
f, err := os.Open(os.Args[1])
if err != nil {
log.Fatal(err)
}
defer f.Close()
scanner := bufio.NewScanner(f)
edges := make(map[int][]Head)
for scanner.Scan() {
line := strings.Split(scanner.Text(), "\t")
tail, err := strconv.Atoi(line[0])
if err != nil {
log.Fatal(err)
}
edges[tail] = make([]Head, len(line)-1)
for i := 1; i < len(line); i++ {
if line[i] == "" {
continue
}
h := Head{}
h.head, err = strconv.Atoi(line[i][:strings.Index(line[i], ",")])
if err != nil {
log.Fatal(err)
}
h.length, err = strconv.Atoi(line[i][strings.Index(line[i], ",")+1:])
if err != nil {
log.Fatal(err)
}
edges[tail][i-1] = h
}
}
lengths := Dijkstra(edges)
fmt.Printf("vertex 7: %d\n", lengths[7])
fmt.Printf("vertex 37: %d\n", lengths[37])
fmt.Printf("vertex 59: %d\n", lengths[59])
fmt.Printf("vertex 82: %d\n", lengths[82])
fmt.Printf("vertex 99: %d\n", lengths[99])
fmt.Printf("vertex 115: %d\n", lengths[115])
fmt.Printf("vertex 133: %d\n", lengths[133])
fmt.Printf("vertex 165: %d\n", lengths[165])
fmt.Printf("vertex 188: %d\n", lengths[188])
fmt.Printf("vertex 197: %d\n", lengths[197])
}
|
package main
import (
"github.com/gin-gonic/gin"
"net/http"
)
func logout(c *gin.Context) {
c.JSON(http.StatusOK, gin.H{
"logout": "success",
})
}
func login1(c *gin.Context) {
c.JSON(http.StatusOK, gin.H{
"login": "success",
})
}
func main() {
r := gin.Default()
userGroup := r.Group("/user")
{
userGroup.GET("/logout", logout)
userGroup.GET("/login", login1)
userGroup.POST("/login", login1)
}
//shopGroup := r.Group("/shop")
//{
// shopGroup.GET("/index", func(c *gin.Context) {...})
// shopGroup.GET("/cart", func(c *gin.Context) {...})
// shopGroup.POST("/checkout", func(c *gin.Context) {...})
//}
//shopGroup := r.Group("/shop")
//{
// shopGroup.GET("/index", func(c *gin.Context) {...})
// shopGroup.GET("/cart", func(c *gin.Context) {...})
// shopGroup.POST("/checkout", func(c *gin.Context) {...})
// // 嵌套路由组
// xx := shopGroup.Group("xx")
// xx.GET("/oo", func(c *gin.Context) {...})
//}
r.Run()
}
|
package readconfig
import (
"gopkg.in/yaml.v2"
"io/ioutil"
"log"
)
// ConfigFileYaml 读取yaml格式的配置文件
type ConfigFileYaml struct {
Enabled bool `yaml:"enabled"` // yaml:yaml格式 Enabled:属性
Path string `yaml:"path"`
}
// ReadConfYaml 读取yaml格式的配置文件
func (conf *ConfigFileYaml) ReadConfYaml(path string) *ConfigFileYaml {
yamlFile, err := ioutil.ReadFile(path)
if err != nil {
log.Printf("YamlFile.Get err #%v", err)
}
err = yaml.Unmarshal(yamlFile,conf)
if err != nil {
log.Fatalf("Unmarshal: %v",err)
}
return conf
}
|
// Router module
// 菜单资源
package service
import (
"portal/util"
"portal/model"
"portal/database"
)
// Create router
func CreateRouter(r model.Route) (int, interface{}) {
// check router uniqueness
code, _ := database.UniqueRouter(r)
if code == 0 {
return 30001, "名称或地址已占用"
}
// check appid if parent not equal -1
if r.Parent != -1 {
equal, _ := database.EqualAppid(r.Parent, r.AppId)
if !equal {
return 30002, "所属应用与父菜单不一致"
}
}
rowId, err := database.CreateRouter(r)
if err != nil {
return 1, err
}
resource := model.Resource{AppId: r.AppId, ResType: 1, ResId: rowId}
// 关联resource表
err = database.InsertRes(resource)
if err != nil {
return 1, err
}
return 0, nil
}
// Update menu
func UpdateRouter(id int, r model.RouteUpdate) (int, interface{}) {
return database.UpdateRouter(id, r)
}
// Query menu router list
func GetRouterList(query *model.GlobalQueryBody) ([]interface{}, error) {
_sql, params := util.ParseQueryBody(query, true)
// Run sql
res, err := database.FindAllRouter(_sql, params...)
if err != nil {
return nil, err
}
return res, nil
}
// Delete Route By id
func DeleteRouter(id int) (int, interface{}) {
return database.SetDeletedAt(id, `portal_router`)
}
// Get Parent route
func GetParentRoute() ([]interface{}, error) {
return database.FindParentRouter()
} |
package shared
func newRepeatingString(length int, value string) string {
result := ""
for i := 0; i <= length; i++ {
result = result + value
}
return result
}
|
package map_slice
import (
"reflect"
"testing"
)
func TestCrossover(t *testing.T) {
arr := []struct{
ns, xs, ys, r1, r2 []int
} {
{
[]int{1, 3},
[]int{1,2,3,4,5,6},
[]int{7,8,9,10,11,12},
[]int{1,8,9,4,5,6},
[]int{7,2,3,10,11,12},
},
{
[]int{1},
[]int {1,2,3},
[]int {10,11,12},
[]int{1,11,12},
[]int{10,2,3},
},
}
for _, s := range arr {
actualR1, actualR2 := Crossover(s.ns, s.xs, s.ys)
if !reflect.DeepEqual(actualR1, s.r1) || !reflect.DeepEqual(actualR2, s.r2) {
t.Errorf("map_slice has error")
}
}
}
|
package main
import (
"net/http"
"fmt"
)
func d(w http.ResponseWriter,r *http.Request){ //this is the signature of handler interface
//switch r.URL.Path {
fmt.Fprintln(w,"barks")
//case "/cat":fmt.Fprintln(w,"meows")
//}
}
func c(w http.ResponseWriter,r *http.Request){ //this is the signature of handler interface
fmt.Fprintln(w,"mews")
}
func main() {
//Handle func needs a function with the following signature -- (w http.ResponseWriter,r *http.Request)
http.HandleFunc("/dog/",d) //c,d are functions with the above signature so they implement the handler interface
http.HandleFunc("/cat",c)
http.ListenAndServe(":8080",nil)
}
|
// This file was generated for SObject BusinessHours, API Version v43.0 at 2018-07-30 03:47:19.964149023 -0400 EDT m=+6.306992117
package sobjects
import (
"fmt"
"strings"
)
type BusinessHours struct {
BaseSObject
CreatedById string `force:",omitempty"`
CreatedDate string `force:",omitempty"`
FridayEndTime string `force:",omitempty"`
FridayStartTime string `force:",omitempty"`
Id string `force:",omitempty"`
IsActive bool `force:",omitempty"`
IsDefault bool `force:",omitempty"`
LastModifiedById string `force:",omitempty"`
LastModifiedDate string `force:",omitempty"`
LastViewedDate string `force:",omitempty"`
MondayEndTime string `force:",omitempty"`
MondayStartTime string `force:",omitempty"`
Name string `force:",omitempty"`
SaturdayEndTime string `force:",omitempty"`
SaturdayStartTime string `force:",omitempty"`
SundayEndTime string `force:",omitempty"`
SundayStartTime string `force:",omitempty"`
SystemModstamp string `force:",omitempty"`
ThursdayEndTime string `force:",omitempty"`
ThursdayStartTime string `force:",omitempty"`
TimeZoneSidKey string `force:",omitempty"`
TuesdayEndTime string `force:",omitempty"`
TuesdayStartTime string `force:",omitempty"`
WednesdayEndTime string `force:",omitempty"`
WednesdayStartTime string `force:",omitempty"`
}
func (t *BusinessHours) ApiName() string {
return "BusinessHours"
}
func (t *BusinessHours) String() string {
builder := strings.Builder{}
builder.WriteString(fmt.Sprintf("BusinessHours #%s - %s\n", t.Id, t.Name))
builder.WriteString(fmt.Sprintf("\tCreatedById: %v\n", t.CreatedById))
builder.WriteString(fmt.Sprintf("\tCreatedDate: %v\n", t.CreatedDate))
builder.WriteString(fmt.Sprintf("\tFridayEndTime: %v\n", t.FridayEndTime))
builder.WriteString(fmt.Sprintf("\tFridayStartTime: %v\n", t.FridayStartTime))
builder.WriteString(fmt.Sprintf("\tId: %v\n", t.Id))
builder.WriteString(fmt.Sprintf("\tIsActive: %v\n", t.IsActive))
builder.WriteString(fmt.Sprintf("\tIsDefault: %v\n", t.IsDefault))
builder.WriteString(fmt.Sprintf("\tLastModifiedById: %v\n", t.LastModifiedById))
builder.WriteString(fmt.Sprintf("\tLastModifiedDate: %v\n", t.LastModifiedDate))
builder.WriteString(fmt.Sprintf("\tLastViewedDate: %v\n", t.LastViewedDate))
builder.WriteString(fmt.Sprintf("\tMondayEndTime: %v\n", t.MondayEndTime))
builder.WriteString(fmt.Sprintf("\tMondayStartTime: %v\n", t.MondayStartTime))
builder.WriteString(fmt.Sprintf("\tName: %v\n", t.Name))
builder.WriteString(fmt.Sprintf("\tSaturdayEndTime: %v\n", t.SaturdayEndTime))
builder.WriteString(fmt.Sprintf("\tSaturdayStartTime: %v\n", t.SaturdayStartTime))
builder.WriteString(fmt.Sprintf("\tSundayEndTime: %v\n", t.SundayEndTime))
builder.WriteString(fmt.Sprintf("\tSundayStartTime: %v\n", t.SundayStartTime))
builder.WriteString(fmt.Sprintf("\tSystemModstamp: %v\n", t.SystemModstamp))
builder.WriteString(fmt.Sprintf("\tThursdayEndTime: %v\n", t.ThursdayEndTime))
builder.WriteString(fmt.Sprintf("\tThursdayStartTime: %v\n", t.ThursdayStartTime))
builder.WriteString(fmt.Sprintf("\tTimeZoneSidKey: %v\n", t.TimeZoneSidKey))
builder.WriteString(fmt.Sprintf("\tTuesdayEndTime: %v\n", t.TuesdayEndTime))
builder.WriteString(fmt.Sprintf("\tTuesdayStartTime: %v\n", t.TuesdayStartTime))
builder.WriteString(fmt.Sprintf("\tWednesdayEndTime: %v\n", t.WednesdayEndTime))
builder.WriteString(fmt.Sprintf("\tWednesdayStartTime: %v\n", t.WednesdayStartTime))
return builder.String()
}
type BusinessHoursQueryResponse struct {
BaseQuery
Records []BusinessHours `json:"Records" force:"records"`
}
|
package main
import (
"bufio"
"fmt"
"os"
"strconv"
)
// https://www.hackerrank.com/challenges/quicksort1
func main() {
next := func() func() int {
scan := bufio.NewScanner(os.Stdin)
scan.Split(bufio.ScanWords)
return func() int {
scan.Scan()
i, _ := strconv.Atoi(scan.Text())
return i
}
}()
l := next()
arr := make([]int, l)
for i := 0; i < l; i++ {
arr[i] = next()
}
pivot := arr[0]
i, j := l-1, l-1
for j > 0 {
if arr[j] >= pivot {
tmp := arr[j]
arr[j] = arr[i]
arr[i] = tmp
i--
}
j--
}
arr[0] = arr[i]
arr[i] = pivot
for _, i := range arr {
fmt.Print(i)
fmt.Print(" ")
}
fmt.Println()
}
|
// Copyright 2016, Google
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package b2 provides a high-level interface to Backblaze's B2 cloud storage
// service.
//
// It is specifically designed to abstract away the Backblaze API details by
// providing familiar Go interfaces, specifically an io.Writer for object
// storage, and an io.Reader for object download. Handling of transient
// errors, including network and authentication timeouts, is transparent.
//
// Methods that perform network requests accept a context.Context argument.
// Callers should use the context's cancellation abilities to end requests
// early, or to provide timeout or deadline guarantees.
//
// This package is in development and may make API changes.
package b2
import (
"bytes"
"crypto/sha1"
"fmt"
"io"
"strconv"
"time"
"golang.org/x/net/context"
)
// Client is a Backblaze B2 client.
type Client struct {
backend beRootInterface
}
// NewClient creates and returns a new Client with valid B2 service account
// tokens.
func NewClient(ctx context.Context, account, key string) (*Client, error) {
c := &Client{
backend: &beRoot{
b2i: &b2Root{},
},
}
if err := c.backend.authorizeAccount(ctx, account, key); err != nil {
return nil, err
}
return c, nil
}
// Bucket is a reference to a B2 bucket.
type Bucket struct {
b beBucketInterface
r beRootInterface
}
type BucketType string
const (
UnknownType BucketType = ""
Private = "allPrivate"
Public = "allPublic"
)
// Bucket returns the named bucket. If the bucket already exists (and belongs
// to this account), it is reused. Otherwise a new private bucket is created.
//
// Deprecated; use NewBucket instead.
func (c *Client) Bucket(ctx context.Context, name string) (*Bucket, error) {
return c.NewBucket(ctx, name, Private)
}
// NewBucket returns a bucket. The bucket is created if it does not already
// exist.
func (c *Client) NewBucket(ctx context.Context, name string, btype BucketType) (*Bucket, error) {
buckets, err := c.backend.listBuckets(ctx)
if err != nil {
return nil, err
}
for _, bucket := range buckets {
if bucket.name() == name && bucket.btype() == btype {
return &Bucket{
b: bucket,
r: c.backend,
}, nil
}
}
b, err := c.backend.createBucket(ctx, name, string(btype))
if err != nil {
return nil, err
}
return &Bucket{
b: b,
r: c.backend,
}, err
}
// ListBucket returns all the available buckets.
func (c *Client) ListBuckets(ctx context.Context) ([]*Bucket, error) {
bs, err := c.backend.listBuckets(ctx)
if err != nil {
return nil, err
}
var buckets []*Bucket
for _, b := range bs {
buckets = append(buckets, &Bucket{
b: b,
r: c.backend,
})
}
return buckets, nil
}
// Delete removes a bucket. The bucket must be empty.
func (b *Bucket) Delete(ctx context.Context) error {
return b.b.deleteBucket(ctx)
}
// BaseURL returns the base URL to use for all files uploaded to this bucket.
func (b *Bucket) BaseURL() string {
return b.b.baseURL()
}
// Name returns the bucket's name.
func (b *Bucket) Name() string {
return b.b.name()
}
// Object represents a B2 object.
type Object struct {
attrs *Attrs
name string
f beFileInterface
b *Bucket
}
// Attrs holds an object's metadata.
type Attrs struct {
Name string // Not used on upload.
Size int64 // Not used on upload.
ContentType string // Used on upload, default is "application/octet-stream".
Status ObjectState // Not used on upload.
UploadTimestamp time.Time // Not used on upload.
SHA1 string // Not used on upload. Can be "none" for large files.
LastModified time.Time // If present, and there are fewer than 10 keys in the Info field, this is saved on upload.
Info map[string]string // Save arbitrary metadata on upload, but limited to 10 keys.
}
// Attrs returns an object's attributes.
func (o *Object) Attrs(ctx context.Context) (*Attrs, error) {
if err := o.ensure(ctx); err != nil {
return nil, err
}
fi, err := o.f.getFileInfo(ctx)
if err != nil {
return nil, err
}
name, sha, size, ct, info, st, stamp := fi.stats()
var state ObjectState
switch st {
case "upload":
state = Uploaded
case "start":
state = Started
case "hide":
state = Hider
}
var mtime time.Time
if v, ok := info["src_last_modified_millis"]; ok {
ms, err := strconv.ParseInt(v, 10, 64)
if err != nil {
return nil, err
}
mtime = time.Unix(ms/1e3, (ms%1e3)*1e6)
delete(info, "src_last_modified_millis")
}
return &Attrs{
Name: name,
Size: size,
ContentType: ct,
UploadTimestamp: stamp,
SHA1: sha,
Info: info,
Status: state,
LastModified: mtime,
}, nil
}
// ObjectState represents the various states an object can be in.
type ObjectState int
const (
Unknown ObjectState = iota
// Started represents a large upload that has been started but not finished
// or canceled.
Started
// Uploaded represents an object that has finished uploading and is complete.
Uploaded
// Hider represents an object that exists only to hide another object. It
// cannot in itself be downloaded and, in particular, is not a hidden object.
Hider
)
// Object returns a reference to the named object in the bucket. Hidden
// objects cannot be referenced in this manner; they can only be found by
// finding the appropriate reference in ListObjects.
func (b *Bucket) Object(name string) *Object {
return &Object{
name: name,
b: b,
}
}
// URL returns the full URL to the given object.
func (o *Object) URL() string {
return fmt.Sprintf("%s/file/%s/%s", o.b.BaseURL(), o.b.Name(), o.name)
}
// NewWriter returns a new writer for the given object. Objects that are
// overwritten are not deleted, but are "hidden".
//
// Callers must close the writer when finished and check the error status.
func (o *Object) NewWriter(ctx context.Context) *Writer {
ctx, cancel := context.WithCancel(ctx)
bw := &Writer{
o: o,
name: o.name,
chsh: sha1.New(),
cbuf: &bytes.Buffer{},
ctx: ctx,
cancel: cancel,
}
bw.w = io.MultiWriter(bw.chsh, bw.cbuf)
return bw
}
// NewRangeReader returns a reader for the given object, reading up to length
// bytes. If length is negative, the rest of the object is read.
func (o *Object) NewRangeReader(ctx context.Context, offset, length int64) *Reader {
ctx, cancel := context.WithCancel(ctx)
return &Reader{
ctx: ctx,
cancel: cancel,
o: o,
name: o.name,
chunks: make(map[int]*bytes.Buffer),
length: length,
offset: offset,
}
}
// NewReader returns a reader for the given object.
func (o *Object) NewReader(ctx context.Context) *Reader {
return o.NewRangeReader(ctx, 0, -1)
}
func (o *Object) ensure(ctx context.Context) error {
if o.f == nil {
f, err := o.b.getObject(ctx, o.name)
if err != nil {
return err
}
o.f = f.f
}
return nil
}
// Delete removes the given object.
func (o *Object) Delete(ctx context.Context) error {
if err := o.ensure(ctx); err != nil {
return err
}
return o.f.deleteFileVersion(ctx)
}
// Cursor is passed to ListObjects to return subsequent pages.
type Cursor struct {
Name string
id string
}
// ListObjects returns all objects in the bucket, including multiple versions
// of the same object. Cursor may be nil; when passed to a subsequent query,
// it will continue the listing.
//
// ListObjects will return io.EOF when there are no objects left in the bucket,
// however it may do so concurrently with the last objects.
func (b *Bucket) ListObjects(ctx context.Context, count int, c *Cursor) ([]*Object, *Cursor, error) {
if c == nil {
c = &Cursor{}
}
fs, name, id, err := b.b.listFileVersions(ctx, count, c.Name, c.id)
if err != nil {
return nil, nil, err
}
var next *Cursor
if name != "" && id != "" {
next = &Cursor{
Name: name,
id: id,
}
}
var objects []*Object
for _, f := range fs {
objects = append(objects, &Object{
name: f.name(),
f: f,
b: b,
})
}
var rtnErr error
if len(objects) == 0 || next == nil {
rtnErr = io.EOF
}
return objects, next, rtnErr
}
// ListCurrentObjects is similar to ListObjects, except that it returns only
// current, unhidden objects in the bucket.
func (b *Bucket) ListCurrentObjects(ctx context.Context, count int, c *Cursor) ([]*Object, *Cursor, error) {
if c == nil {
c = &Cursor{}
}
fs, name, err := b.b.listFileNames(ctx, count, c.Name)
if err != nil {
return nil, nil, err
}
var next *Cursor
if name != "" {
next = &Cursor{
Name: name,
}
}
var objects []*Object
for _, f := range fs {
objects = append(objects, &Object{
name: f.name(),
f: f,
b: b,
})
}
var rtnErr error
if len(objects) == 0 || next == nil {
rtnErr = io.EOF
}
return objects, next, rtnErr
}
// Hide hides the object from name-based listing.
func (o *Object) Hide(ctx context.Context) error {
if err := o.ensure(ctx); err != nil {
return err
}
_, err := o.b.b.hideFile(ctx, o.name)
return err
}
// Reveal unhides (if hidden) the named object. If there are multiple objects
// of a given name, it will reveal the most recent.
func (b *Bucket) Reveal(ctx context.Context, name string) error {
cur := &Cursor{
Name: name,
}
objs, _, err := b.ListObjects(ctx, 1, cur)
if err != nil && err != io.EOF {
return err
}
if len(objs) < 1 || objs[0].name != name {
return fmt.Errorf("%s: not found", name)
}
obj := objs[0]
if obj.f.status() != "hide" {
return nil
}
return obj.Delete(ctx)
}
func (b *Bucket) getObject(ctx context.Context, name string) (*Object, error) {
fs, _, err := b.b.listFileNames(ctx, 1, name)
if err != nil {
return nil, err
}
if len(fs) < 1 {
return nil, fmt.Errorf("%s: not found", name)
}
f := fs[0]
if f.name() != name {
return nil, fmt.Errorf("%s: not found", name)
}
return &Object{
name: name,
f: f,
b: b,
}, nil
}
// AuthToken returns an authorization token that can be used to access objects
// in a private bucket. Only objects that begin with prefix can be accessed.
// The token expires after the given duration.
func (b *Bucket) AuthToken(ctx context.Context, prefix string, valid time.Duration) (string, error) {
return b.b.getDownloadAuthorization(ctx, prefix, valid)
}
|
func Integer: numPrint (Integer: num, Integer: length)
{
num := 1000;
length := 5;
Integer: i, j, first, temp;
Integer : c;
temp := 999;
c := 20 + num * (length + 1); /* arithmatic expression */
println(c);
In >> i;
println(i);
while i > 0 :
{
/* this is a comment */
i:= i - 1;
if i = 1:{
println(100);
}
elif i = 2:{
println(200);
}
else
{
println(i);
}
/*This is a
Multiline
Comment*/
}
println(temp);
ret i;
} |
package cryptotrader
import (
"fmt"
"strings"
)
// TradeVolumeType represents the way to calculate the trade volume
type TradeVolumeType int
const (
// TVTFixed use a fixed volume (quote asset) for trading
TVTFixed TradeVolumeType = iota
// TVTPercent use a percentage of the available quote asset for trading
TVTPercent
)
const maxPctVolume = 0.998
// TradeVolumeTypeFromString creates a new TradeVolumeType from it's string representation
func TradeVolumeTypeFromString(in string) (out TradeVolumeType, err error) {
switch strings.ToLower(in) {
case "fixed":
out = TVTFixed
case "pct", "percent":
out = TVTPercent
default:
err = fmt.Errorf("Invalid tradevolume type: %s", in)
}
return
}
// String return the string representation fo the TradeVolumeType
func (tvt TradeVolumeType) String() string {
if tvt == TVTFixed {
return "fixed"
}
return "percent"
}
// TradeConfig represents the config for the trades to place
type TradeConfig struct {
// TradeVolumeType how to calculate the volume of the buy/sell orders
TradeVolumeType TradeVolumeType
// The volume to trade (depends on TradeVolumeType)
// If TradeVolumeType == TVTFixed the Volume value is the actual quote asset quantity to trade
// If TradeVolumeType == TVTPercent the Volume value represents the percentage of the available quote asset quantity to trade (max Volume value = 1.0)
Volume float64
// Reduce if true reduces the Volume to the available quantity if the TradeVolumeType == TVTFixed and the available asset quantity is insufficient
Reduce bool
// Paper perform paper trading only, do not issue any orer on the exchange
Paper bool
// Max slippage in percent
// 0.1% = 0.001
MaxSlippage float64
// Stop loss un percent
StopLoss float64
}
// NewTradeConfigFromFlags creates a new TradeConfig insance from the cmdline argument values
func NewTradeConfigFromFlags(tvt string, volume float64, reduce bool, paper bool, maxSlippage float64, stopLoss float64) (tc TradeConfig, err error) {
if tc.TradeVolumeType, err = TradeVolumeTypeFromString(tvt); err != nil {
return
}
if tc.TradeVolumeType == TVTPercent {
if volume > maxPctVolume {
volume = maxPctVolume
}
}
tc.Volume = volume
tc.Reduce = reduce
tc.Paper = paper
tc.MaxSlippage = maxSlippage
tc.StopLoss = stopLoss
return
}
|
package fizz
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"net/url"
"os"
"reflect"
"sync"
"testing"
"time"
"github.com/gin-gonic/gin"
"github.com/gofrs/uuid"
"github.com/loopfz/gadgeto/tonic"
"github.com/stretchr/testify/assert"
"gopkg.in/yaml.v2"
"github.com/wI2L/fizz/openapi"
)
func TestMain(m *testing.M) {
// Don't print Gin debug in logs.
gin.SetMode(gin.ReleaseMode)
os.Exit(m.Run())
}
// TestInstance tests that a new Fizz
// instance can be created from scratch or
// from an existing Gin engine.
func TestInstance(t *testing.T) {
fizz := New()
assert.NotNil(t, fizz)
engine := gin.Default()
fizz = NewFromEngine(engine)
assert.NotNil(t, fizz)
assert.Equal(t, engine, fizz.Engine())
assert.EqualValues(t, fizz.RouterGroup.group, &engine.RouterGroup)
assert.NotNil(t, fizz.Generator())
assert.Len(t, fizz.Errors(), 0)
}
// TestGroup tests that a router group can be created.
func TestGroup(t *testing.T) {
engine := gin.New()
fizz := NewFromEngine(engine)
grp := fizz.Group("/test", "Test", "Test routes")
assert.NotNil(t, grp)
assert.Equal(t, grp.Name, "Test")
assert.Equal(t, grp.Description, "Test routes")
assert.Equal(t, grp.gen, fizz.gen)
assert.NotNil(t, grp.group)
}
// TestHandler tests that handlers can be
// registered on the Fizz instance.
func TestHandler(t *testing.T) {
fizz := New()
rid := uuid.Must(uuid.NewV4())
fizz.Use(func(c *gin.Context) {
c.Header("X-Request-Id", rid.String())
})
wg := sync.WaitGroup{}
h := func(c *gin.Context) {
wg.Done()
}
fizz.GET("/", nil, h)
fizz.POST("/", nil, h)
fizz.PUT("/", nil, h)
fizz.PATCH("/", nil, h)
fizz.DELETE("/", nil, h)
fizz.HEAD("/", nil, h)
fizz.OPTIONS("/", nil, h)
fizz.TRACE("/", nil, h)
wg.Add(8)
srv := httptest.NewServer(fizz)
defer srv.Close()
c := srv.Client()
c.Timeout = 1 * time.Second
for _, method := range []string{
"GET",
"POST",
"PUT",
"PATCH",
"DELETE",
"HEAD",
"OPTIONS",
"TRACE",
} {
req, err := http.NewRequest(method, srv.URL, nil)
if err != nil {
t.Error(err)
}
resp, err := c.Do(req)
if err != nil {
t.Error(err)
}
assert.Equal(t, rid.String(), resp.Header.Get("X-Request-Id"))
}
wg.Wait()
}
// customTime shows the date & time without timezone information
type customTime time.Time
func (c customTime) String() string {
return time.Time(c).Format("2006-01-02T15:04:05")
}
func (c customTime) MarshalJSON() ([]byte, error) {
// add quotes for JSON representation
ts := fmt.Sprintf("\"%s\"", c.String())
return []byte(ts), nil
}
func (c customTime) MarshalYAML() (interface{}, error) {
return c.String(), nil
}
func (c customTime) ParseExample(v string) (interface{}, error) {
t1, err := time.Parse(time.RFC3339, v)
if err != nil {
return nil, err
}
return customTime(t1), nil
}
type T struct {
X string `json:"x" yaml:"x" description:"This is X"`
Y int `json:"y" yaml:"y" description:"This is Y"`
Z customTime `json:"z" yaml:"z" example:"2022-02-07T18:00:00+09:00" description:"This is Z"`
}
type In struct {
A int `path:"a" description:"This is A"`
B string `query:"b" description:"This is B"`
C string `header:"X-Test-C" description:"This is C"`
}
// TestTonicHandler tests that a tonic-wrapped
// handler can be registered on a Fizz instance.
func TestTonicHandler(t *testing.T) {
fizz := New()
t1, err := time.Parse(time.RFC3339, "2022-02-07T18:00:00+09:00")
assert.Nil(t, err)
fizz.GET("/foo/:a", nil, tonic.Handler(func(c *gin.Context, params *In) (*T, error) {
assert.Equal(t, 0, params.A)
assert.Equal(t, "foobar", params.B)
assert.Equal(t, "foobaz", params.C)
return &T{X: "foo", Y: 1, Z: customTime(t1)}, nil
}, 200))
// Create a router group to test that tonic handlers works with router groups.
grp := fizz.Group("/test", "Test Group", "Test Group")
grp.GET("/bar/:a", nil, tonic.Handler(func(c *gin.Context, params *In) (*T, error) {
assert.Equal(t, 42, params.A)
assert.Equal(t, "group-foobar", params.B)
assert.Equal(t, "group-foobaz", params.C)
return &T{X: "group-foo", Y: 2, Z: customTime(t1)}, nil
}, 200))
srv := httptest.NewServer(fizz)
defer srv.Close()
c := srv.Client()
c.Timeout = 1 * time.Second
requests := []struct {
url string
method string
header http.Header
expectStatus int
expectBody string
}{
{
url: "/foo/0?b=foobar",
method: http.MethodGet,
header: http.Header{
"X-Test-C": []string{"foobaz"},
},
expectStatus: 200,
expectBody: `{"x":"foo","y":1,"z":"2022-02-07T18:00:00"}`,
},
{
url: "/test/bar/42?b=group-foobar",
method: http.MethodGet,
header: http.Header{
"X-Test-C": []string{"group-foobaz"},
},
expectStatus: 200,
expectBody: `{"x":"group-foo","y":2,"z":"2022-02-07T18:00:00"}`,
},
{
url: "/bar/42?b=group-foobar",
method: http.MethodGet,
header: http.Header{
"X-Test-C": []string{"group-foobaz"},
},
expectStatus: 404,
},
}
for _, req := range requests {
url, err := url.Parse(srv.URL + req.url)
if err != nil {
t.Error(err)
break
}
resp, err := c.Do(&http.Request{
URL: url,
Method: req.method,
Header: req.header,
})
if err != nil {
t.Error(err)
break
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Error(err)
break
}
if req.expectStatus < 300 {
assert.Equal(t, req.expectStatus, resp.StatusCode)
assert.Equal(t, req.expectBody, string(body))
} else {
assert.Equal(t, req.expectStatus, resp.StatusCode)
}
}
}
type testInputModel struct {
PathParam1 string `path:"a"`
PathParam2 int `path:"b"`
QueryParam string `query:"q"`
}
type testInputModel1 struct {
PathParam1 string `path:"a"`
}
type testInputModel2 struct {
C string `path:"c"`
Message string `json:"message" description:"A short message"`
AnyValue interface{} `json:"value" description:"A nullable value of arbitrary type"`
}
// TestSpecHandler tests that the OpenAPI handler
// return the spec properly marshaled in JSON.
func TestSpecHandler(t *testing.T) {
fizz := New()
fizz.GET("/test/:a",
[]OperationOption{
ID("GetTest"),
Summary("Test"),
Description("Test route"),
StatusDescription("200"),
StatusDescription("OK"),
Deprecated(true),
// Override summary and description
// with printf-like options.
Summaryf("Test-%s", "A"),
Descriptionf("Test %s", "routes"),
// Headers.
Header("X-Request-Id", "Unique request ID", String),
// Additional responses.
Response("429", "", String, []*openapi.ResponseHeader{
{
Name: "X-Rate-Limit",
Description: "Rate limit",
Model: Integer,
},
}, nil),
Response("404", "", String, nil, "not-found-example"),
ResponseWithExamples("400", "", String, nil, map[string]interface{}{
"one": "message1",
"two": "message2",
}),
XCodeSample(&openapi.XCodeSample{
Lang: "Shell",
Label: "v4.4",
Source: "curl http://0.0.0.0:8080",
}),
// Explicit override for SecurityRequirement (allow-all)
WithoutSecurity(),
XInternal(),
},
tonic.Handler(func(c *gin.Context, in *testInputModel1) (*T, error) {
return &T{}, nil
}, 200),
)
fizz.GET("/test/:a/:b", []OperationOption{
ID("GetTest2"),
InputModel(&testInputModel{}),
WithOptionalSecurity(),
Security(&openapi.SecurityRequirement{"oauth2": []string{"write:pets", "read:pets"}}),
}, tonic.Handler(func(c *gin.Context) error {
return nil
}, 200))
infos := &openapi.Info{
Title: "Test Server",
Description: `This is a test server.`,
Version: "1.0.0",
}
fizz.POST("/test/:c",
[]OperationOption{
ID("PostTest"),
StatusDescription("201"),
StatusDescription("Created"),
},
tonic.Handler(func(c *gin.Context, in *testInputModel2) error {
return nil
}, 201),
)
servers := []*openapi.Server{
{
URL: "https://foo.bar/{basePath}",
Description: "Such Server, Very Wow",
Variables: map[string]*openapi.ServerVariable{
"basePath": {
Default: "v2",
Description: "version of the API",
Enum: []string{"v1", "v2", "beta"},
},
},
},
}
fizz.Generator().SetServers(servers)
security := []*openapi.SecurityRequirement{
{"api_key": []string{}},
{"oauth2": []string{"write:pets", "read:pets"}},
}
fizz.Generator().SetSecurityRequirement(security)
fizz.Generator().API().Components.SecuritySchemes = map[string]*openapi.SecuritySchemeOrRef{
"api_key": {
SecurityScheme: &openapi.SecurityScheme{
Type: "apiKey",
Name: "api_key",
In: "header",
},
},
"oauth2": {
SecurityScheme: &openapi.SecurityScheme{
Type: "oauth2",
Flows: &openapi.OAuthFlows{
Implicit: &openapi.OAuthFlow{
AuthorizationURL: "https://example.com/api/oauth/dialog",
Scopes: map[string]string{
"write:pets": "modify pets in your account",
"read:pets": "read your pets",
},
},
},
},
},
}
fizz.GET("/openapi.json", nil, fizz.OpenAPI(infos, "")) // default is JSON
fizz.GET("/openapi.yaml", nil, fizz.OpenAPI(infos, "yaml"))
srv := httptest.NewServer(fizz)
defer srv.Close()
c := srv.Client()
c.Timeout = 1 * time.Second
respJSON, err := c.Get(srv.URL + "/openapi.json")
if err != nil {
t.Error(err)
}
defer respJSON.Body.Close()
assert.Equal(t, 200, respJSON.StatusCode)
specJSON, err := ioutil.ReadAll(respJSON.Body)
if err != nil {
t.Error(err)
}
// see testdata/spec.json.
expectedJSON, err := ioutil.ReadFile("testdata/spec.json")
if err != nil {
t.Error(err)
}
m, err := diffJSON(specJSON, expectedJSON)
if err != nil {
t.Error(err)
}
if !m {
t.Error("invalid JSON spec")
}
respYAML, err := c.Get(srv.URL + "/openapi.yaml")
if err != nil {
t.Error(err)
}
defer respYAML.Body.Close()
assert.Equal(t, 200, respYAML.StatusCode)
specYAML, err := ioutil.ReadAll(respYAML.Body)
if err != nil {
t.Error(err)
}
// see testdata/spec.yaml.
expectedYAML, err := ioutil.ReadFile("testdata/spec.yaml")
if err != nil {
t.Error(err)
}
m, err = diffYAML(specYAML, expectedYAML)
if err != nil {
t.Error(err)
}
if !m {
t.Error("invalid YAML spec")
}
}
// TestInvalidContentTypeOpenAPIHandler tests that the
// OpenAPI handler will panic if the given content type
// is invalid.
func TestInvalidContentTypeOpenAPIHandler(t *testing.T) {
fizz := New()
assert.Panics(t, func() {
fizz.GET("/openapi.xml", nil, fizz.OpenAPI(nil, "xml"))
})
}
// TestMultipleTonicHandler tests that adding more than
// one tonic-wrapped handler to a Fizz operation panics.
func TestMultipleTonicHandler(t *testing.T) {
fizz := New()
assert.Panics(t, func() {
fizz.GET("/:a", nil,
tonic.Handler(func(c *gin.Context) error { return nil }, 200),
tonic.Handler(func(c *gin.Context) error { return nil }, 200),
)
})
}
// TestErrorGen tests that the generator panics if
// if fails to add an operation to the specification.
func TestErrorGen(t *testing.T) {
type In struct {
A string `path:"a" query:"b"`
}
fizz := New()
assert.Panics(t, func() {
fizz.GET("/a", nil, tonic.Handler(func(c *gin.Context, param *In) error { return nil }, 200))
})
}
func TestJoinPaths(t *testing.T) {
jp := joinPaths
assert.Equal(t, "", jp("", ""))
assert.Equal(t, "/", jp("", "/"))
assert.Equal(t, "/a", jp("/a", ""))
assert.Equal(t, "/a/", jp("/a/", ""))
assert.Equal(t, "/a/", jp("/a/", "/"))
assert.Equal(t, "/a/", jp("/a", "/"))
assert.Equal(t, "/a/b", jp("/a", "/b"))
assert.Equal(t, "/a/b", jp("/a/", "/b"))
assert.Equal(t, "/a/b/", jp("/a/", "/b/"))
assert.Equal(t, "/a/b/", jp("/a/", "/b//"))
}
func TestLastChar(t *testing.T) {
assert.Equal(t, uint8('a'), lastChar("hola"))
assert.Equal(t, uint8('s'), lastChar("adios"))
assert.Panics(t, func() { lastChar("") })
}
func TestOperationContext(t *testing.T) {
fizz := New()
const (
id = "OperationContext"
desc = "Test for OpenAPI operation instance in Gin context"
)
tonicHandler := tonic.Handler(func(c *gin.Context) error {
op, err := OperationFromContext(c)
if err == nil && op.ID == id && op.Description == desc {
c.Status(http.StatusOK)
return nil
}
c.AbortWithStatus(http.StatusInternalServerError)
return nil
}, http.StatusOK)
fizz.GET("/op",
[]OperationOption{
ID(id),
Description(desc),
}, tonicHandler,
)
recorder := httptest.NewRecorder()
req, err := http.NewRequest("GET", "/op", nil)
if err != nil {
t.Fatal(err)
}
fizz.ServeHTTP(recorder, req)
if status := recorder.Code; status != http.StatusOK {
t.Errorf("handler returned wrong status: got %v want %v",
status, http.StatusOK,
)
}
fizz.POST("/noop", nil, func(c *gin.Context) {
_, err := OperationFromContext(c)
if err != nil {
c.Status(http.StatusOK)
return
}
c.Status(http.StatusInternalServerError)
})
req, err = http.NewRequest("POST", "/noop", nil)
if err != nil {
t.Fatal(err)
}
fizz.ServeHTTP(recorder, req)
if status := recorder.Code; status != http.StatusOK {
t.Errorf("handler returned wrong status: got %v want %v",
status, http.StatusOK,
)
}
}
func diffJSON(a, b []byte) (bool, error) {
var j1, j2 interface{}
if err := json.Unmarshal(a, &j1); err != nil {
return false, err
}
if err := json.Unmarshal(b, &j2); err != nil {
return false, err
}
return reflect.DeepEqual(j2, j1), nil
}
func diffYAML(a, b []byte) (bool, error) {
var j1, j2 interface{}
if err := yaml.Unmarshal(a, &j1); err != nil {
return false, err
}
if err := yaml.Unmarshal(b, &j2); err != nil {
return false, err
}
return reflect.DeepEqual(j2, j1), nil
}
|
package server
import "os"
func (h *handler) FilterItems(items []os.FileInfo) []os.FileInfo {
if h.shows == nil &&
h.showDirs == nil &&
h.showFiles == nil &&
h.hides == nil &&
h.hideDirs == nil &&
h.hideFiles == nil {
return items
}
filtered := make([]os.FileInfo, 0, len(items))
for _, item := range items {
shouldShow := true
if h.shows != nil {
shouldShow = shouldShow && h.shows.MatchString(item.Name())
}
if h.showDirs != nil && item.IsDir() {
shouldShow = shouldShow && h.showDirs.MatchString(item.Name())
}
if h.showFiles != nil && !item.IsDir() {
shouldShow = shouldShow && h.showFiles.MatchString(item.Name())
}
shouldHide := false
if h.hides != nil {
shouldHide = shouldHide || h.hides.MatchString(item.Name())
}
if h.hideDirs != nil && item.IsDir() {
shouldHide = shouldHide || h.hideDirs.MatchString(item.Name())
}
if h.hideFiles != nil && !item.IsDir() {
shouldHide = shouldHide || h.hideFiles.MatchString(item.Name())
}
if shouldShow && !shouldHide {
filtered = append(filtered, item)
}
}
return filtered
}
|
package main
import "fmt"
func main() {
s := "Hello everybody!\n"
fmt.Printf(s)
change_string(s, 's')
}
// // err
// func main() {
// var a int
// var b int32
// b = a + a
// b = b + 5
// }
const (
a = iota
b
c string = "0"
)
func change_string(s string, change rune) {
// s := "hello"
c := []rune(s)
c[0] = change
s2 := string(c)
fmt.Printf("%s\n", s2)
} |
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
//go:build enablek8sconnector && windows
package k8s
import (
"bytes"
"context"
"fmt"
"os"
"os/exec"
log "github.com/cihub/seelog"
)
const (
// Represents default path for k8s connector binary in EKS Windows AMIs.
defaultK8sConnectorBinaryPath = `C:\Program Files\Amazon\EKS\bin\aws-vpc-cni-k8s-connector.exe`
// Represents Env for log level for k8s connector binary.
envLogLevel = "VPC_CNI_LOG_LEVEL"
// Represents default log level for k8s connector binary.
defaultLogLevel = "info"
)
// GetPodIP retrieves pod IP address using k8s connector binary.
// Output from binary is received over named pipe.
// Create and read operations on named pipe are handled in separate go routine.
// Named pipe path will be passed as argument to k8s connector binary execution.
func GetPodIP(ctx context.Context, podNamespace, podName string) (string, error) {
// Get new named pipe path.
pipeName, err := newPipe()
if err != nil {
return "", fmt.Errorf("error creating new pipe: %w", err)
}
resultChan := make(chan pipeReadResult)
// Read output from named pipe in separate go routine as accepting and reading connection is blocking operation.
go readResultFromPipe(ctx, pipeName, resultChan)
// Executing k8s connector binary in main routine. binary will write output to named pipe.
err = executeK8sConnector(ctx, podNamespace, podName, pipeName)
if err != nil {
return "", fmt.Errorf("error executing k8s connector: %w", err)
}
// Get output from named pipe using result chan.
var result pipeReadResult
select {
// Check if context timed out.
case <-ctx.Done():
return "", fmt.Errorf("error getting output from pipe: %w", ctx.Err())
case result = <-resultChan:
}
if result.error != nil {
return "", fmt.Errorf("error reading output from pipe: %w", result.error)
}
log.Debugf("Got pod IP address %s for pod %s in namespace %s", result.output, podName, podNamespace)
return result.output, nil
}
// executeK8sConnector executes aws-vpc-cni-k8s-connector binary to get pod IP address.
// Output from binary is received over named pipe. Execution logs from binary are returned over stdout.
func executeK8sConnector(ctx context.Context, podNamespace string, podName string, pipe string) error {
// Prepare command to execute binary with required args.
cmd := exec.CommandContext(ctx, getK8sConnectorBinaryPath(),
"-pod-name", podName, "-pod-namespace", podNamespace,
"-pipe", pipe, "-log-level", getK8sConnectorLogLevel())
log.Debugf("Executing cmd %s to get pod IP address", cmd.String())
// Setting Stderr for command to receive complete error.
var errBytes bytes.Buffer
cmd.Stderr = &errBytes
output, err := cmd.Output()
log.Infof("Logs from k8s connector binary...\n")
log.Infof("%s\n", string(output))
log.Infof("End of k8s connector binary logs\n")
if err != nil {
return fmt.Errorf("error executing connector binary: %w with execution error: %s", err, errBytes.String())
}
return nil
}
// getK8sConnectorBinaryPath returns path to k8s connector binary.
func getK8sConnectorBinaryPath() string {
return defaultK8sConnectorBinaryPath
}
// getK8sConnectorLogLevel returns the log level for k8s connector binary.
func getK8sConnectorLogLevel() string {
logLevel := os.Getenv(envLogLevel)
if logLevel == "" {
logLevel = defaultLogLevel
}
return logLevel
}
|
package main
import (
"fmt"
"reflect"
)
func main() {
var num float64 = 1.2345
fmt.Println("old value of pointer:", num)
// 通过reflect.ValueOf获取num中的reflect.Value,注意,参数必须是指针才能修改其值
pointer := reflect.ValueOf(&num)
newValue := pointer.Elem()
fmt.Println("type of pointer:", newValue.Type())
fmt.Println("settability of pointer:", newValue.CanSet())
// 重新赋值
newValue.SetFloat(77)
//newValue.SetInt(77) //panic: reflect: call of reflect.Value.SetInt on float64 Value
fmt.Println("new value of pointer:", num)
//old value of pointer: 1.2345
//type of pointer: float64
//settability of pointer: true
//new value of pointer: 77
////////////////////
// 如果reflect.ValueOf的参数不是指针,会如何?
pointer = reflect.ValueOf(num)
fmt.Println("CanSet:", pointer.CanSet())
//newValue = pointer.Elem() // 如果非指针,这里直接panic,“panic: reflect: call of reflect.Value.Elem on float64 Value”
//panic: reflect: call of reflect.Value.Elem on float64 Value
//goroutine 1 [running]:
//reflect.Value.Elem(0x80cf580, 0x18776028, 0x8e, 0x18776028, 0x8e, 0x0)
// /home/rzexin/software/Google/go/src/reflect/value.go:775 +0x17f
//main.main()
// /home/rzexin/Workspaces/GoWorkspace/src/01.basic/reflectDemo/reflect1/reflect2.go:31 +0x2f9
}
|
package functions
import (
"fmt"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"k8s.io/apimachinery/pkg/api/errors"
)
func k8sToGRPCError(err error) error {
if errors.IsNotFound(err) {
return status.Error(codes.NotFound, "not found")
}
if errors.IsAlreadyExists(err) {
return status.Error(codes.AlreadyExists, "already exists")
}
if errors.IsInvalid(err) {
return status.Error(codes.InvalidArgument, fmt.Sprintf("invalid: %v", err))
}
return err
}
|
/*
Graded lexicographic order (grlex order for short) is a way of ordering words that:
First orders words by length.
Then orders words of the same size by their dictionary order.
For example, in grlex order:
"tray" < "trapped" since "tray" has length 4 while "trapped" has length 7.
"trap" < "tray" since both have length 4, but "trap" comes before "tray" in the dictionary.
Given an array of words, return that array in grlex order.
Examples
makeGrlex(["small", "big"]) ➞ ["big", "small"]
makeGrlex(["cat", "ran", "for", "the", "rat"]) ➞ ["cat", "for", "ran", "rat", "the"]
makeGrlex(["this", "is", "a", "small", "test"]) ➞ ["a", "is", "test", "this", "small"]
Notes
N/A
*/
package main
import (
"reflect"
"sort"
)
func main() {
test([]string{"small", "big"}, []string{"big", "small"})
test([]string{"big", "cat", "ran", "for", "the", "fat", "rat"}, []string{"big", "cat", "fat", "for", "ran", "rat", "the"})
test([]string{"this", "is", "a", "small", "test"}, []string{"a", "is", "test", "this", "small"})
test([]string{"let", "us", "try", "some", "long", "test", "to", "see", "if", "this", "works", "as", "it", "should"}, []string{"as", "if", "it", "to", "us", "let", "see", "try", "long", "some", "test", "this", "works", "should"})
}
func test(s, r []string) {
assert(reflect.DeepEqual(order(s), r))
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func order(s []string) []string {
r := append([]string{}, s...)
sort.Slice(r, func(i, j int) bool {
n := len(r[i])
m := len(r[j])
if n < m {
return true
}
if n > m {
return false
}
return r[i] < r[j]
})
return r
}
|
package req
const NameReq string = "NAME_REQ"
type NameForm struct {
Name string `json:"name"`
}
type IdForm struct {
ID int `json:"game_id"`
}
type TemplateReq struct {
Template string `json:"template"`
}
type ActionReq struct {
Action string `json:"action"`
}
|
package main
import (
"encoding/json"
"github.com/Shopify/sarama"
"github.com/astaxie/beego/logs"
"logAgent/config"
"logAgent/kafka"
"logAgent/logger"
"logAgent/tail"
)
func init() {
config.InitConfig()
logger.InitLogger()
}
type Msg struct {
IP string
Log string
}
func main() {
tailClient := tail.InitTail()
kafkaClient := kafka.InitKafka()
logs.Info("Log Agent start running...")
for line := range tailClient.Lines {
if line.Text != ""{
sendToKafka(kafkaClient, line.Text)
}
}
}
func sendToKafka(kafkaClient sarama.SyncProducer, log string) {
msg := Msg{config.LocalIP, log}
jmsg, err := json.Marshal(msg)
if err != nil{
logs.Error("send to kafka topic:[log] log:[%v] failed, %v", log, err)
return
}
pid, offset, err := kafkaClient.SendMessage(&sarama.ProducerMessage{
Topic:"log",
Value:sarama.StringEncoder(jmsg),
})
if err != nil {
logs.Error("send to kafka topic:[log] log:[%v] failed, %v", log, err)
return
}
logs.Debug("topic: [log] pid: [%v], offset: [%v]", pid, offset)
}
|
package config
import(
"encoding/json"
"io/ioutil"
"util"
)
type StockHistSourceItem struct {
Id string `json: "id"`
Url string `json: "url"`
}
type StockHistSourceConfig struct {
Sources [] StockHistSourceItem `json: "sources"`
}
type StockHistManager struct {
Config StockHistSourceConfig
}
func (c *StockHistManager) Parse(filename string) {
chunks, err := ioutil.ReadFile(filename)
util.CheckError(err)
err = json.Unmarshal(chunks, &c.Config)
util.CheckError(err)
}
func (c *StockHistManager) GetConfig(name string) StockHistSourceItem {
var item StockHistSourceItem
items := c.Config.Sources
for _, v := range items {
if v.Id == name {
item = v
break
}
}
return item
}
func NewStockHistConfig(filename string) *StockHistManager {
manager := new(StockHistManager)
manager.Parse(filename)
return manager
}
|
package max_common_prefix
import "testing"
func TestSolve(t *testing.T) {
t.Log(longestCommonPrefix([]string{"flower", "flow", "flight"}))
t.Log(longestCommonPrefix([]string{"dog", "racecar", "car"}))
}
|
// Copyright (c) 2020 Blockwatch Data Inc.
// Author: alex@blockwatch.cc
package puller
import (
"context"
"github.com/zyjblockchain/sandy_log/log"
"sort"
"sync"
"tezos_index/chain"
model "tezos_index/puller/models"
util "tezos_index/utils"
"time"
)
const rankDefaultSize = 1 << 16
type AccountRankingEntry struct {
AccountId model.AccountID
Address chain.Address
Balance int64 // total balance
TxFlow24h int64 // tx volume in+out
TxTraffic24h int64 // number of tx in+out
RichRank int // assigned rank based on balance
FlowRank int // assigned rank based on flow
TrafficRank int // assigned rank based on traffic
}
type AccountRanking struct {
idmap map[model.AccountID]*AccountRankingEntry // map for log-N lookups and updates
rich ByRichRank
traffic ByTrafficRank
flow ByFlowRank
}
func NewAccountRanking() *AccountRanking {
return &AccountRanking{
idmap: make(map[model.AccountID]*AccountRankingEntry),
rich: make(ByRichRank, 0, rankDefaultSize),
traffic: make(ByTrafficRank, 0, rankDefaultSize),
flow: make(ByFlowRank, 0, rankDefaultSize),
}
}
func (h *AccountRanking) TopRich(n int) []*AccountRankingEntry {
return h.rich[:util.Min(n, len(h.flow))]
}
func (h *AccountRanking) TopTraffic(n int) []*AccountRankingEntry {
return h.traffic[:util.Min(n, len(h.flow))]
}
func (h *AccountRanking) TopFlows(n int) []*AccountRankingEntry {
return h.flow[:util.Min(n, len(h.flow))]
}
func (h *AccountRanking) GetAccount(id model.AccountID) (*AccountRankingEntry, bool) {
r, ok := h.idmap[id]
return r, ok
}
type ByRichRank []*AccountRankingEntry
func (h ByRichRank) Len() int { return len(h) }
func (h ByRichRank) Less(i, j int) bool { return h[i].Balance > h[j].Balance }
func (h ByRichRank) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
type ByTrafficRank []*AccountRankingEntry
func (h ByTrafficRank) Len() int { return len(h) }
func (h ByTrafficRank) Less(i, j int) bool { return h[i].TxTraffic24h > h[j].TxTraffic24h }
func (h ByTrafficRank) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
type ByFlowRank []*AccountRankingEntry
func (h ByFlowRank) Len() int { return len(h) }
func (h ByFlowRank) Less(i, j int) bool { return h[i].TxFlow24h > h[j].TxFlow24h }
func (h ByFlowRank) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
func (m *Indexer) GetRanking(ctx context.Context, now time.Time) (*AccountRanking, error) {
// lazy-load on first call
ranks := m.ranks.Load()
if ranks == nil {
// grab lock
m.mu.Lock()
defer m.mu.Unlock()
// check again
ranks = m.ranks.Load()
// build if still not updated by other goroutine
if ranks == nil {
var err error
ranks, err = m.BuildAccountRanking(ctx, now)
if err != nil {
return nil, err
}
m.ranks.Store(ranks)
}
}
return ranks.(*AccountRanking), nil
}
func (m *Indexer) UpdateRanking(ctx context.Context, now time.Time) error {
ranks, err := m.BuildAccountRanking(ctx, now)
if err != nil {
return err
}
m.ranks.Store(ranks)
return nil
}
func (m *Indexer) BuildAccountRanking(ctx context.Context, now time.Time) (*AccountRanking, error) {
start := time.Now()
ranks := NewAccountRanking()
// Step 1: capture accounts
var accs []*model.Account
err := m.statedb.Select("row_id, hash, address_type, frozen_deposits, "+
"frozen_rewards, frozen_fees, unclaimed_balance, spendable_balance, "+
"is_activated").Where("is_funded = ?", true).Find(&accs).Error
if err != nil {
return nil, err
}
for _, a := range accs {
bal := a.FrozenDeposits + a.FrozenFees + a.FrozenRewards + a.SpendableBalance
if bal < 1 {
continue
}
acc := &AccountRankingEntry{
AccountId: a.RowId,
Address: chain.NewAddress(a.Type, a.Hash),
Balance: bal,
}
ranks.idmap[a.RowId] = acc
ranks.rich = append(ranks.rich, acc)
ranks.flow = append(ranks.flow, acc)
ranks.traffic = append(ranks.traffic, acc)
}
var ops []*model.Op
err = m.statedb.Select("row_id, sender_id, receiver_id, "+
"volume").Where("time >= ? and type = ? and is_success = ?",
now.Add(-24*time.Hour), int64(chain.OpTypeTransaction), true).Find(&ops).Error
if err != nil {
return nil, err
}
for _, o := range ops {
if sender, ok := ranks.idmap[o.SenderId]; ok {
sender.TxFlow24h += o.Volume
sender.TxTraffic24h++
}
if receiver, ok := ranks.idmap[o.ReceiverId]; ok {
receiver.TxFlow24h += o.Volume
receiver.TxTraffic24h++
}
}
// Step 3: sort embedded lists and assign rank order
var wg sync.WaitGroup
wg.Add(3)
go func() {
sort.Stable(ranks.rich)
rank := 1
var last int64
for _, v := range ranks.rich {
if v.Balance == 0 {
continue
}
v.RichRank = rank
if v.Balance != last {
last = v.Balance
rank++
}
}
wg.Done()
}()
go func() {
sort.Stable(ranks.flow)
rank := 1
var last int64
for _, v := range ranks.flow {
if v.TxFlow24h == 0 {
continue
}
v.FlowRank = rank
if v.TxFlow24h != last {
last = v.TxFlow24h
rank++
}
}
wg.Done()
}()
go func() {
sort.Stable(ranks.traffic)
rank := 1
var last int64
for _, v := range ranks.rich {
if v.TxTraffic24h == 0 {
continue
}
v.TrafficRank = rank
if v.TxTraffic24h != last {
last = v.TxTraffic24h
rank++
}
}
wg.Done()
}()
wg.Wait()
log.Debugf("Ranks built in %s", time.Since(start))
return ranks, nil
}
|
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resourcekeeper
import (
"context"
"sync"
"github.com/pkg/errors"
appsv1 "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha1"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/pkg/multicluster"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/policy"
"github.com/oam-dev/kubevela/pkg/resourcetracker"
"github.com/oam-dev/kubevela/pkg/utils/apply"
)
// ResourceKeeper handler for dispatching and deleting resources
type ResourceKeeper interface {
Dispatch(context.Context, []*unstructured.Unstructured, []apply.ApplyOption, ...DispatchOption) error
Delete(context.Context, []*unstructured.Unstructured, ...DeleteOption) error
GarbageCollect(context.Context, ...GCOption) (bool, []v1beta1.ManagedResource, error)
StateKeep(context.Context) error
ContainsResources([]*unstructured.Unstructured) bool
DispatchComponentRevision(context.Context, *appsv1.ControllerRevision) error
DeleteComponentRevision(context.Context, *appsv1.ControllerRevision) error
}
type resourceKeeper struct {
client.Client
app *v1beta1.Application
mu sync.Mutex
applicator apply.Applicator
_rootRT *v1beta1.ResourceTracker
_currentRT *v1beta1.ResourceTracker
_historyRTs []*v1beta1.ResourceTracker
_crRT *v1beta1.ResourceTracker
applyOncePolicy *v1alpha1.ApplyOncePolicySpec
garbageCollectPolicy *v1alpha1.GarbageCollectPolicySpec
sharedResourcePolicy *v1alpha1.SharedResourcePolicySpec
takeOverPolicy *v1alpha1.TakeOverPolicySpec
readOnlyPolicy *v1alpha1.ReadOnlyPolicySpec
resourceUpdatePolicy *v1alpha1.ResourceUpdatePolicySpec
cache *resourceCache
}
func (h *resourceKeeper) getRootRT(ctx context.Context) (rootRT *v1beta1.ResourceTracker, err error) {
if h._rootRT == nil {
if h._rootRT, err = resourcetracker.CreateRootResourceTracker(multicluster.ContextInLocalCluster(ctx), h.Client, h.app); err != nil {
return nil, err
}
}
return h._rootRT, nil
}
func (h *resourceKeeper) getCurrentRT(ctx context.Context) (currentRT *v1beta1.ResourceTracker, err error) {
if h._currentRT == nil {
if h._currentRT, err = resourcetracker.CreateCurrentResourceTracker(multicluster.ContextInLocalCluster(ctx), h.Client, h.app); err != nil {
return nil, err
}
}
return h._currentRT, nil
}
func (h *resourceKeeper) getComponentRevisionRT(ctx context.Context) (crRT *v1beta1.ResourceTracker, err error) {
if h._crRT == nil {
if h._crRT, err = resourcetracker.CreateComponentRevisionResourceTracker(multicluster.ContextInLocalCluster(ctx), h.Client, h.app); err != nil {
return nil, err
}
}
return h._crRT, nil
}
func (h *resourceKeeper) parseApplicationResourcePolicy() (err error) {
if h.applyOncePolicy, err = policy.ParsePolicy[v1alpha1.ApplyOncePolicySpec](h.app); err != nil {
return errors.Wrapf(err, "failed to parse apply-once policy")
}
if h.applyOncePolicy == nil && metav1.HasLabel(h.app.ObjectMeta, oam.LabelAddonName) {
h.applyOncePolicy = &v1alpha1.ApplyOncePolicySpec{Enable: true}
}
if h.garbageCollectPolicy, err = policy.ParsePolicy[v1alpha1.GarbageCollectPolicySpec](h.app); err != nil {
return errors.Wrapf(err, "failed to parse garbage-collect policy")
}
if h.sharedResourcePolicy, err = policy.ParsePolicy[v1alpha1.SharedResourcePolicySpec](h.app); err != nil {
return errors.Wrapf(err, "failed to parse shared-resource policy")
}
if h.takeOverPolicy, err = policy.ParsePolicy[v1alpha1.TakeOverPolicySpec](h.app); err != nil {
return errors.Wrapf(err, "failed to parse take-over policy")
}
if h.readOnlyPolicy, err = policy.ParsePolicy[v1alpha1.ReadOnlyPolicySpec](h.app); err != nil {
return errors.Wrapf(err, "failed to parse read-only policy")
}
if h.resourceUpdatePolicy, err = policy.ParsePolicy[v1alpha1.ResourceUpdatePolicySpec](h.app); err != nil {
return errors.Wrapf(err, "failed to parse resource-update policy")
}
return nil
}
func (h *resourceKeeper) loadResourceTrackers(ctx context.Context) (err error) {
h._rootRT, h._currentRT, h._historyRTs, h._crRT, err = resourcetracker.ListApplicationResourceTrackers(multicluster.ContextInLocalCluster(ctx), h.Client, h.app)
return err
}
// NewResourceKeeper create a handler for dispatching and deleting resources
func NewResourceKeeper(ctx context.Context, cli client.Client, app *v1beta1.Application) (_ ResourceKeeper, err error) {
h := &resourceKeeper{
Client: cli,
app: app,
applicator: apply.NewAPIApplicator(cli),
cache: newResourceCache(cli, app),
}
if err = h.loadResourceTrackers(ctx); err != nil {
return nil, errors.Wrapf(err, "failed to load resourcetrackers")
}
if err = h.parseApplicationResourcePolicy(); err != nil {
return nil, errors.Wrapf(err, "failed to parse resource policy")
}
return h, nil
}
|
package api
import (
"encoding/json"
"github.com/cloudfly/ecenter/pkg/account"
"github.com/cloudfly/ecenter/tools"
"github.com/cloudfly/mowa"
"github.com/pkg/errors"
"github.com/valyala/fasthttp"
)
func init() {
registerRoute("GET", "/v1/users", GetUsers, 0)
registerRoute("POST", "/v1/users", AddUser, 0)
registerRoute("GET", "/v1/users/:username", GetUsers, 0)
registerRoute("PUT", "/v1/users/:username", UpdateUser, 0)
registerRoute("DELETE", "/v1/users/:username", DeleteUser, 0)
registerRoute("POST", "/v1/users/:username/channels/:channel/:value", SetUserMessageReceiver, 0) // 设置
registerRoute("POST", "/v1/users/:username/channels/:channel", SetUserMessageReceiver, 0) // 删除
registerRoute("GET", "/v1/groups", GetUserGroups, account.UPGroup)
registerRoute("GET", "/v1/groups/:group", GetUserGroups, account.UPGroup)
registerRoute("POST", "/v1/groups", CreateUserGroup, account.UPGroup)
registerRoute("PUT", "/v1/groups/:group", UpdateUserGroup, account.UPGroup)
registerRoute("DELETE", "/v1/groups/:group", DeleteUserGroup, account.UPGroup)
registerRoute("POST", "/v1/groups/:group/users", AddUser2Group, account.UPGroup)
registerRoute("PUT", "/v1/groups/:group/users/:username", UpdateUserInGroup, account.UPGroup)
registerRoute("DELETE", "/v1/groups/:group/users/:username", DeleteUserFromGroup, account.UPGroup)
registerRoute("POST", "/v1/groups/:group/channels/:channel", SetGroupMessageReceiver, account.UPGroup) // value 为空时
registerRoute("POST", "/v1/groups/:group/channels/:channel/:value", SetGroupMessageReceiver, account.UPGroup)
registerRoute("POST", "/v1/password", ChangePassword, 0)
registerRoute("GET", "/v1/me", LoginUserInfo, 0)
registerPublicRoute("POST", "/v1/login", Login)
}
func GetUsers(ctx *fasthttp.RequestCtx) interface{} {
username := mowa.StringValue(ctx, "username", "")
if username == "" {
users, err := accounts.GetUsers(ctx, queryString(ctx, "search", ""), queryInt(ctx, "page", 1), queryInt(ctx, "page_size", 100))
if err != nil {
return mowa.Error(err)
}
return mowa.Data(users)
}
user, ok := accounts.GetUser(username)
if !ok {
return mowa.Error("user not found")
}
return mowa.Data(user)
}
func AddUser(ctx *fasthttp.RequestCtx) interface{} {
content := ctx.Request.Body()
var user account.User
if err := json.Unmarshal(content, &user); err != nil {
return mowa.Error(err)
}
if err := tools.ValidUsername(user.Username); err != nil {
return mowa.Error(err)
}
if user.Permission == 0 {
user.Permission = account.DefaultUserPermission
}
userInfo := getUser(ctx)
if !userInfo.IsAdmin() {
return mowa.Error("no permission")
}
user, err := accounts.AddUser(ctx, user.Username, user.Name, user.Username, user.Permission)
if err != nil {
return mowa.Error(err)
}
return mowa.Data(user)
}
func UpdateUser(ctx *fasthttp.RequestCtx) interface{} {
username := mowa.StringValue(ctx, "username", "")
if username == "" {
return mowa.Error("username required")
}
content := ctx.Request.Body()
var user account.User
if err := json.Unmarshal(content, &user); err != nil {
return mowa.Error(err)
}
userInfo := getUser(ctx)
if !userInfo.IsAdmin() {
// 普通用户是不允许修改权限的, 他们唯一能改的就是自己的姓名
username = userInfo.Username
user.Permission = 0
}
if err := accounts.UpdateUser(ctx, username, user.Name, user.Permission); err != nil {
return mowa.Error(err)
}
return mowa.Data("UPDATED")
}
func DeleteUser(ctx *fasthttp.RequestCtx) interface{} {
username := mowa.StringValue(ctx, "username", "")
if username == "" {
return mowa.Error("username required")
}
userInfo := getUser(ctx)
if !userInfo.IsAdmin() {
return mowa.Error("no permission")
}
if err := accounts.DeleteUser(ctx, username); err != nil {
return mowa.Error(err)
}
return mowa.Data("DELETED")
}
func SetUserMessageReceiver(ctx *fasthttp.RequestCtx) interface{} {
username := mowa.StringValue(ctx, "username", "")
channel := mowa.StringValue(ctx, "channel", "")
value := mowa.StringValue(ctx, "value", "")
userInfo := getUser(ctx)
if !userInfo.IsAdmin() {
username = userInfo.Username
}
if err := accounts.SetUserReceiver(ctx, username, channel, value); err != nil {
return mowa.Error(err)
}
return mowa.Data("OK")
}
func SetGroupMessageReceiver(ctx *fasthttp.RequestCtx) interface{} {
group := mowa.StringValue(ctx, "group", "")
channel := mowa.StringValue(ctx, "channel", "")
value := mowa.StringValue(ctx, "value", "")
if err := isGroupAdmin(ctx, group); err != nil {
return mowa.Error(err)
}
if err := accounts.SetGroupReceiver(ctx, group, channel, value); err != nil {
return mowa.Error(err)
}
return mowa.Data("OK")
}
func GetUserGroups(ctx *fasthttp.RequestCtx) interface{} {
userInfo := getUser(ctx)
name := mowa.StringValue(ctx, "group", "")
if name == "" {
if queryString(ctx, "public", "false") == "true" {
groups, err := accounts.GetPublicGroups(ctx, userInfo.Username, queryString(ctx, "name", ""), queryInt(ctx, "page", 1), queryInt(ctx, "page_size", 100))
if err != nil {
return mowa.Error(err)
}
return mowa.Data(groups)
}
groups, err := accounts.GetGroups(ctx, userInfo.Username, queryString(ctx, "name", ""), queryInt(ctx, "page", 1), queryInt(ctx, "page_size", 100))
if err != nil {
return mowa.Error(err)
}
return mowa.Data(groups)
}
group, ok := accounts.GetGroup(ctx, name)
if !ok {
return mowa.Error("not found")
}
if !userInfo.IsAdmin() && !group.Public && !group.HasPermission(userInfo.Username, 0) {
return mowa.Error("not found")
}
// 设置登录者在该组中的权限
group.Permission = account.GroupPermission(account.GUPJoin)
for _, user := range group.Users {
if user.Name == userInfo.Username {
group.Permission = user.Permission
break
}
}
return mowa.Data(group)
}
func CreateUserGroup(ctx *fasthttp.RequestCtx) interface{} {
userInfo := getUser(ctx)
content := ctx.Request.Body()
var group account.Group
if err := json.Unmarshal(content, &group); err != nil {
return mowa.Error(err)
}
if err := tools.ValidUsername(group.Name); err != nil {
return mowa.Error(err)
}
if _, ok := accounts.GetUser(group.Name); ok {
return mowa.Errorf("'%s' is a usename, cannot be used to create group", group.Name)
}
group, err := accounts.AddGroup(ctx, group.Name, group.Public, group.Description, userInfo.Username)
if err != nil {
return mowa.Error(err)
}
return mowa.Data(group)
}
func UpdateUserGroup(ctx *fasthttp.RequestCtx) interface{} {
groupname := mowa.StringValue(ctx, "group", "")
if groupname == "" {
return mowa.Error("group name required")
}
content := ctx.Request.Body()
var group account.Group
if err := json.Unmarshal(content, &group); err != nil {
return mowa.Error(err)
}
if err := isGroupAdmin(ctx, groupname); err != nil {
return mowa.Error(err)
}
if err := accounts.UpdateGroup(ctx, groupname, group.Public, group.Description); err != nil {
return mowa.Error(err)
}
return mowa.Data("UPDATED")
}
func DeleteUserGroup(ctx *fasthttp.RequestCtx) interface{} {
groupname := mowa.StringValue(ctx, "group", "")
if groupname == "" {
return mowa.Error("group name required")
}
if err := isGroupAdmin(ctx, groupname); err != nil {
return mowa.Error(err)
}
if err := accounts.DeleteGroup(ctx, groupname); err != nil {
return mowa.Error(err)
}
return mowa.Data("DELETED")
}
func AddUser2Group(ctx *fasthttp.RequestCtx) interface{} {
group := mowa.StringValue(ctx, "group", "")
if group == "" {
return mowa.Error("group required")
}
prevGroup, ok := accounts.GetGroup(ctx, group)
if !ok {
return mowa.Error("group not found")
}
isAdmin := isGroupAdmin(ctx, prevGroup.Name) == nil
if !isAdmin && !prevGroup.Public {
return mowa.Error("no permission")
}
var groupuser account.GroupUser
if err := json.Unmarshal(ctx.Request.Body(), &groupuser); err != nil {
return mowa.Error(err)
}
if !isAdmin {
groupuser.Permission &= ^account.GUPAdmin
}
if err := accounts.SetGroupUser(ctx, group, groupuser.Name, groupuser.Permission); err != nil {
return mowa.Error(err)
}
return mowa.Data("CREATED")
}
func UpdateUserInGroup(ctx *fasthttp.RequestCtx) interface{} {
groupname := mowa.StringValue(ctx, "group", "")
username := mowa.StringValue(ctx, "username", "")
if groupname == "" || username == "" {
return mowa.Error("group and username required")
}
var groupuser account.GroupUser
if err := json.Unmarshal(ctx.Request.Body(), &groupuser); err != nil {
return mowa.Error(err)
}
if err := isGroupAdmin(ctx, groupname); err != nil {
return mowa.Error(err)
}
if err := accounts.SetGroupUser(ctx, groupname, username, account.GroupPermission(groupuser.Permission)); err != nil {
return mowa.Error(err)
}
return mowa.Data("UPDATED")
}
func DeleteUserFromGroup(ctx *fasthttp.RequestCtx) interface{} {
groupname := mowa.StringValue(ctx, "group", "")
username := mowa.StringValue(ctx, "username", "")
if groupname == "" || username == "" {
return mowa.Error("group and username required")
}
userInfo := getUser(ctx)
if userInfo.Username != username {
if err := isGroupAdmin(ctx, groupname); err != nil {
return mowa.Error(err)
}
}
if err := accounts.DeleteGroupUser(ctx, groupname, username); err != nil {
return mowa.Error(err)
}
return mowa.Data("DELETED")
}
// ChangePassword 修改用户密码
func ChangePassword(ctx *fasthttp.RequestCtx) interface{} {
content := ctx.Request.Body()
if len(content) == 0 {
return mowa.Error("password is empty")
}
user := getUser(ctx)
if err := accounts.SetUserPassword(ctx, user.Username, string(content)); err != nil {
return mowa.Error(err)
}
return mowa.Data("UPDATED")
}
// LoginUserInfo 返回登录者的用户信息
func LoginUserInfo(ctx *fasthttp.RequestCtx) interface{} {
user := getUser(ctx)
return mowa.Data(user)
}
// Login 登录认证
func Login(ctx *fasthttp.RequestCtx) interface{} {
token, err := auther.Login(ctx)
if err != nil {
return mowa.Error(err)
}
return mowa.Data(token)
}
func isGroupAdmin(ctx *fasthttp.RequestCtx, group string) error {
prevGroup, ok := accounts.GetGroup(ctx, group)
if !ok {
return errors.New("group not found")
}
userInfo := getUser(ctx)
if userInfo.IsAdmin() || prevGroup.HasPermission(userInfo.Username, account.GUPAdmin) {
return nil
}
return errors.New("not permission")
}
|
// Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package statistics
import (
"fmt"
"testing"
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/codec"
"github.com/pingcap/tidb/util/mock"
"github.com/stretchr/testify/require"
)
func TestTruncateHistogram(t *testing.T) {
hist := NewHistogram(0, 0, 0, 0, types.NewFieldType(mysql.TypeLonglong), 1, 0)
low, high := types.NewIntDatum(0), types.NewIntDatum(1)
hist.AppendBucket(&low, &high, 0, 1)
newHist := hist.TruncateHistogram(1)
require.True(t, HistogramEqual(hist, newHist, true))
newHist = hist.TruncateHistogram(0)
require.Equal(t, 0, newHist.Len())
}
func TestValueToString4InvalidKey(t *testing.T) {
bytes, err := codec.EncodeKey(nil, nil, types.NewDatum(1), types.NewDatum(0.5))
require.NoError(t, err)
// Append invalid flag.
bytes = append(bytes, 20)
datum := types.NewDatum(bytes)
res, err := ValueToString(nil, &datum, 3, nil)
require.NoError(t, err)
require.Equal(t, "(1, 0.5, \x14)", res)
}
type bucket4Test struct {
lower int64
upper int64
count int64
repeat int64
ndv int64
}
type topN4Test struct {
data int64
count int64
}
func genHist4Test(t *testing.T, buckets []*bucket4Test, totColSize int64) *Histogram {
h := NewHistogram(0, 0, 0, 0, types.NewFieldType(mysql.TypeBlob), len(buckets), totColSize)
for _, bucket := range buckets {
lower, err := codec.EncodeKey(nil, nil, types.NewIntDatum(bucket.lower))
require.NoError(t, err)
upper, err := codec.EncodeKey(nil, nil, types.NewIntDatum(bucket.upper))
require.NoError(t, err)
di, du := types.NewBytesDatum(lower), types.NewBytesDatum(upper)
h.AppendBucketWithNDV(&di, &du, bucket.count, bucket.repeat, bucket.ndv)
}
return h
}
func TestMergePartitionLevelHist(t *testing.T) {
type testCase struct {
partitionHists [][]*bucket4Test
totColSize []int64
popedTopN []topN4Test
expHist []*bucket4Test
expBucketNumber int64
}
tests := []testCase{
{
partitionHists: [][]*bucket4Test{
{
// Col(1) = [1, 4,|| 6, 9, 9,|| 12, 12, 12,|| 13, 14, 15]
{
lower: 1,
upper: 4,
count: 2,
repeat: 1,
ndv: 2,
},
{
lower: 6,
upper: 9,
count: 5,
repeat: 2,
ndv: 2,
},
{
lower: 12,
upper: 12,
count: 8,
repeat: 3,
ndv: 1,
},
{
lower: 13,
upper: 15,
count: 11,
repeat: 1,
ndv: 3,
},
},
// Col(2) = [2, 5,|| 6, 7, 7,|| 11, 11, 11,|| 13, 14, 17]
{
{
lower: 2,
upper: 5,
count: 2,
repeat: 1,
ndv: 2,
},
{
lower: 6,
upper: 7,
count: 5,
repeat: 2,
ndv: 2,
},
{
lower: 11,
upper: 11,
count: 8,
repeat: 3,
ndv: 1,
},
{
lower: 13,
upper: 17,
count: 11,
repeat: 1,
ndv: 3,
},
},
},
totColSize: []int64{11, 11},
popedTopN: []topN4Test{},
expHist: []*bucket4Test{
{
lower: 1,
upper: 7,
count: 7,
repeat: 3,
ndv: 5,
},
{
lower: 7,
upper: 11,
count: 13,
repeat: 3,
ndv: 3,
},
{
lower: 11,
upper: 17,
count: 22,
repeat: 1,
ndv: 6,
},
},
expBucketNumber: 3,
},
{
partitionHists: [][]*bucket4Test{
{
// Col(1) = [1, 4,|| 6, 9, 9,|| 12, 12, 12,|| 13, 14, 15]
{
lower: 1,
upper: 4,
count: 2,
repeat: 1,
ndv: 2,
},
{
lower: 6,
upper: 9,
count: 5,
repeat: 2,
ndv: 2,
},
{
lower: 12,
upper: 12,
count: 8,
repeat: 3,
ndv: 1,
},
{
lower: 13,
upper: 15,
count: 11,
repeat: 1,
ndv: 3,
},
},
// Col(2) = [2, 5,|| 6, 7, 7,|| 11, 11, 11,|| 13, 14, 17]
{
{
lower: 2,
upper: 5,
count: 2,
repeat: 1,
ndv: 2,
},
{
lower: 6,
upper: 7,
count: 5,
repeat: 2,
ndv: 2,
},
{
lower: 11,
upper: 11,
count: 8,
repeat: 3,
ndv: 1,
},
{
lower: 13,
upper: 17,
count: 11,
repeat: 1,
ndv: 3,
},
},
},
totColSize: []int64{11, 11},
popedTopN: []topN4Test{
{
data: 18,
count: 5,
},
{
data: 4,
count: 6,
},
},
expHist: []*bucket4Test{
{
lower: 1,
upper: 5,
count: 10,
repeat: 1,
ndv: 3,
},
{
lower: 5,
upper: 12,
count: 22,
repeat: 3,
ndv: 6,
},
{
lower: 12,
upper: 18,
count: 33,
repeat: 5,
ndv: 6,
},
},
expBucketNumber: 3,
},
}
for _, tt := range tests {
var expTotColSize int64
hists := make([]*Histogram, 0, len(tt.partitionHists))
for i := range tt.partitionHists {
hists = append(hists, genHist4Test(t, tt.partitionHists[i], tt.totColSize[i]))
expTotColSize += tt.totColSize[i]
}
ctx := mock.NewContext()
sc := ctx.GetSessionVars().StmtCtx
poped := make([]TopNMeta, 0, len(tt.popedTopN))
for _, top := range tt.popedTopN {
b, err := codec.EncodeKey(sc, nil, types.NewIntDatum(top.data))
require.NoError(t, err)
tmp := TopNMeta{
Encoded: b,
Count: uint64(top.count),
}
poped = append(poped, tmp)
}
globalHist, err := MergePartitionHist2GlobalHist(sc, hists, poped, tt.expBucketNumber, true)
require.NoError(t, err)
for i, b := range tt.expHist {
lo, err := ValueToString(ctx.GetSessionVars(), globalHist.GetLower(i), 1, []byte{types.KindInt64})
require.NoError(t, err)
up, err := ValueToString(ctx.GetSessionVars(), globalHist.GetUpper(i), 1, []byte{types.KindInt64})
require.NoError(t, err)
require.Equal(t, lo, fmt.Sprintf("%v", b.lower))
require.Equal(t, up, fmt.Sprintf("%v", b.upper))
require.Equal(t, globalHist.Buckets[i].Count, b.count)
require.Equal(t, globalHist.Buckets[i].Repeat, b.repeat)
require.Equal(t, globalHist.Buckets[i].NDV, b.ndv)
}
require.Equal(t, expTotColSize, globalHist.TotColSize)
}
}
func genBucket4Merging4Test(lower, upper, ndv, disjointNDV int64) bucket4Merging {
l := types.NewIntDatum(lower)
r := types.NewIntDatum(upper)
return bucket4Merging{
lower: &l,
upper: &r,
Bucket: Bucket{
NDV: ndv,
},
disjointNDV: disjointNDV,
}
}
func TestMergeBucketNDV(t *testing.T) {
type testData struct {
left bucket4Merging
right bucket4Merging
result bucket4Merging
}
tests := []testData{
{
left: genBucket4Merging4Test(1, 2, 2, 0),
right: genBucket4Merging4Test(1, 2, 3, 0),
result: genBucket4Merging4Test(1, 2, 3, 0),
},
{
left: genBucket4Merging4Test(1, 3, 2, 0),
right: genBucket4Merging4Test(2, 3, 2, 0),
result: genBucket4Merging4Test(1, 3, 3, 0),
},
{
left: genBucket4Merging4Test(1, 3, 2, 0),
right: genBucket4Merging4Test(4, 6, 2, 2),
result: genBucket4Merging4Test(1, 3, 2, 4),
},
{
left: genBucket4Merging4Test(1, 5, 5, 0),
right: genBucket4Merging4Test(2, 6, 5, 0),
result: genBucket4Merging4Test(1, 6, 6, 0),
},
{
left: genBucket4Merging4Test(3, 5, 3, 0),
right: genBucket4Merging4Test(2, 6, 4, 0),
result: genBucket4Merging4Test(2, 6, 5, 0),
},
}
sc := mock.NewContext().GetSessionVars().StmtCtx
for _, tt := range tests {
res, err := mergeBucketNDV(sc, &tt.left, &tt.right)
require.NoError(t, err)
require.Equal(t, res.lower.GetInt64(), tt.result.lower.GetInt64())
require.Equal(t, res.upper.GetInt64(), tt.result.upper.GetInt64())
require.Equal(t, res.NDV, tt.result.NDV)
require.Equal(t, res.disjointNDV, tt.result.disjointNDV)
}
}
func TestIndexQueryBytes(t *testing.T) {
ctx := mock.NewContext()
sc := ctx.GetSessionVars().StmtCtx
idx := &Index{Info: &model.IndexInfo{Columns: []*model.IndexColumn{{Name: model.NewCIStr("a"), Offset: 0}}}}
idx.Histogram = *NewHistogram(0, 15, 0, 0, types.NewFieldType(mysql.TypeBlob), 0, 0)
low, err1 := codec.EncodeKey(sc, nil, types.NewBytesDatum([]byte("0")))
require.NoError(t, err1)
high, err2 := codec.EncodeKey(sc, nil, types.NewBytesDatum([]byte("3")))
require.NoError(t, err2)
idx.Bounds.AppendBytes(0, low)
idx.Bounds.AppendBytes(0, high)
idx.Buckets = append(idx.Buckets, Bucket{Repeat: 10, Count: 20, NDV: 20})
idx.PreCalculateScalar()
idx.CMSketch = nil
// Count / NDV
require.Equal(t, idx.QueryBytes(nil, low), uint64(1))
// Repeat
require.Equal(t, idx.QueryBytes(nil, high), uint64(10))
}
|
package parsing
import (
"github.com/s2gatev/sqlmorph/ast"
)
const (
RightWithoutJoinError = "Expected JOIN following RIGHT."
RightJoinWithoutTargetError = "RIGHT JOIN statement must be followed by a target class."
RightJoinWithoutOnError = "RIGHT JOIN statement must have an ON clause."
RightJoinWrongJoinFieldsError = "Wrong join fields in RIGHT JOIN statement."
)
type RightJoinState struct {
BaseState
}
func (s *RightJoinState) Name() string {
return "RIGHT JOIN"
}
func (s *RightJoinState) Parse(result ast.Node, tokenizer *Tokenizer) (ast.Node, bool) {
target := result.(ast.HasJoin)
if token, _ := tokenizer.ReadToken(); token != RIGHT {
tokenizer.UnreadToken()
return result, false
}
if token, value := tokenizer.ReadToken(); token != JOIN {
wrongTokenPanic(RightWithoutJoinError, value)
}
join := &ast.RightJoin{}
table := &ast.Table{}
if token, tableName := tokenizer.ReadToken(); token == LITERAL {
table.Name = tableName
} else {
wrongTokenPanic(RightJoinWithoutTargetError, tableName)
}
if token, tableAlias := tokenizer.ReadToken(); token == LITERAL {
table.Alias = tableAlias
} else {
tokenizer.UnreadToken()
}
join.Table = table
if token, value := tokenizer.ReadToken(); token != ON {
wrongTokenPanic(RightJoinWithoutOnError, value)
}
if token, leftField := tokenizer.ReadToken(); token == LITERAL {
join.Left = parseField(leftField)
} else {
wrongTokenPanic(RightJoinWrongJoinFieldsError, leftField)
}
if token, operator := tokenizer.ReadToken(); token != EQUALS {
wrongTokenPanic(RightJoinWrongJoinFieldsError, operator)
}
if token, rightField := tokenizer.ReadToken(); token == LITERAL {
join.Right = parseField(rightField)
} else {
wrongTokenPanic(RightJoinWrongJoinFieldsError, rightField)
}
target.AddJoin(join)
return result, true
}
|
package initRouter
import (
"github.com/gin-gonic/gin"
"proxy_download/handler"
)
func SetupRouter() *gin.Engine {
router := gin.Default()
// 添加 Get 请求路由
router.GET("/", handler.IndexHandler)
mysql := router.Group("/mysql")
{
mysql.GET("/detail/:id", handler.MysqlDetail)
mysql.GET("/list", handler.MysqlList)
mysql.POST("/del", handler.MysqlDel)
mysql.POST("/edit", handler.MysqlEdit)
mysql.POST("/name_validate", handler.MysqlNameValidate)
}
email := router.Group("/email")
{
email.GET("/detail/:id", handler.EmailDetail)
email.GET("/list", handler.EmailList)
email.POST("/del", handler.EmailDel)
email.POST("/edit", handler.EmailEdit)
email.POST("/name_validate", handler.EmailNameValidate)
email.POST("/user_list_validate", handler.EmailToUserListValidate)
}
variable := router.Group("/variable")
{
variable.GET("/detail/:id", handler.VariableDetail)
variable.GET("/list", handler.VariableList)
variable.POST("/del", handler.VariableDel)
variable.POST("/edit", handler.VariableEdit)
variable.POST("/name_validate", handler.VariableNameValidate)
}
group := router.Group("/group")
{
group.GET("/detail/:id", handler.GroupDetail)
group.GET("/list", handler.GroupList)
group.POST("/del", handler.GroupDel)
group.POST("/edit", handler.GroupEdit)
group.POST("/name_validate", handler.GroupNameValidate)
}
header := router.Group("/header")
{
header.GET("/detail/:id", handler.HeaderDetail)
header.GET("/list", handler.HeaderList)
header.POST("/del", handler.HeaderDel)
header.POST("/edit", handler.HeaderEdit)
header.POST("/name_validate", handler.HeaderNameValidate)
header.POST("/value_validate", handler.HeaderValueValidate)
}
testCase := router.Group("/case")
{
testCase.GET("/detail/:id", handler.TestCaseDetail)
testCase.GET("/list", handler.TestCaseList)
testCase.POST("/del", handler.TestCaseDel)
testCase.POST("/edit", handler.TestCaseEdit)
testCase.POST("/name_validate", handler.TestCaseNameValidate)
testCase.POST("/regular_validate", handler.TestCaseRegularValidate)
testCase.POST("/hope_validate", handler.TestCaseHopeValidate)
testCase.POST("/sql_variable_validate", handler.TestCaseSqlVariableValidate)
}
user := router.Group("/user")
{
user.GET("/detail/:id", handler.UserDetail)
user.POST("/edit", handler.UserEdit)
}
return router
}
|
package api
import (
"encoding/json"
"io/ioutil"
"log"
"net/http"
"strconv"
"time"
"kumparan/constants"
"kumparan/repository"
"kumparan/service"
)
type Handler interface {
CreateNews(w http.ResponseWriter, r *http.Request)
GetNews(w http.ResponseWriter, r *http.Request)
}
type handler struct {
producer service.ProducerService
cache repository.Cache
}
func InitHandler(producer service.ProducerService, cache repository.Cache) Handler {
return &handler{producer, cache}
}
func (h *handler) CreateNews(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
data, err := ioutil.ReadAll(r.Body)
defer r.Body.Close()
err = h.producer.CreateNews(data)
if err != nil {
response := FailedResponse{
Status: http.StatusForbidden,
Message: err.Error(),
}
json.NewEncoder(w).Encode(response)
return
}
response := SuccessResponse{
Status: http.StatusOK,
Message: "Successfully sent a message!",
}
json.NewEncoder(w).Encode(response)
}
func (h *handler) GetNews(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
content := h.cache.Get(r.RequestURI)
if content != nil {
log.Print("Cache Hit!\n")
json.NewEncoder(w).Encode(SuccessResponse{
Status: http.StatusOK,
Message: "Successfully retrieved news from cache!",
Data: content,
})
return
}
page, _ := strconv.Atoi(r.FormValue("page"))
news, err := h.producer.GetAllNews(page)
if err != nil {
response := FailedResponse{
Status: http.StatusNotFound,
Message: err.Error(),
}
json.NewEncoder(w).Encode(response)
return
}
response := SuccessResponse{
Status: http.StatusOK,
Message: "Successfully retrieved news!",
Data: news,
}
if duration, err := time.ParseDuration(constants.CacheDuration); err == nil {
log.Printf("New data cached: %s for %s\n", r.RequestURI, duration)
h.cache.Set(r.RequestURI, news, duration)
} else {
log.Printf("Page not cached. err: %s\n", err)
}
json.NewEncoder(w).Encode(response)
}
|
// Copyright 2020 The Reed Developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
package validation
import (
bm "github.com/reed/blockchain/blockmanager"
"github.com/reed/consensus/pow"
"github.com/reed/errors"
"github.com/reed/types"
)
var (
blockHeightErr = errors.New("invalid block height")
blockDiffErr = errors.New("invalid block difficulty value")
blockNonceErr = errors.New("invalid block nonce value")
blockParentHashErr = errors.New("invalid block prevBlockHash")
)
func ValidateBlockHeader(block *types.Block, prev *types.Block, bm *bm.BlockManager) error {
if block.Height != prev.Height+1 {
return errors.Wrapf(blockHeightErr, "prev height %d,cur height %d", prev.Height, block.Height)
}
difficulty := pow.GetDifficulty(block, bm.GetAncestor)
if block.BigNumber.Cmp(&difficulty) != 0 {
return errors.Wrap(blockDiffErr)
}
if !pow.CheckProofOfWork(difficulty, block.GetHash()) {
return errors.Wrap(blockNonceErr)
}
if block.PrevBlockHash != prev.GetHash() {
return errors.Wrapf(blockParentHashErr, "expect %x, actual %x", prev.GetHash(), &block.PrevBlockHash)
}
//TODO Version
//TODO timestamp
return nil
}
|
package fractalnoise
import (
"github.com/lmbarros/sbxs_go_noise"
)
// Params contains additional parameters passed to the fractal noise generator
// constructors.
//
// If any of these values is zero, a sensible default value is used instead:
// Layers (4), Frequency (1.0), Lacunarity (2.0), Amplitude (1.0), Gain (0.5).
type Params struct {
Layers int
Frequency float64
Lacunarity float64
Amplitude float64
Gain float64
}
// generator1d is a generator of 1D fractal noise.
type generator1d struct {
noiser noise.Noiser1D
params Params
}
// generator2d is a generator of 2D fractal noise.
type generator2d struct {
noiser noise.Noiser2D
params Params
}
// generator3d is a generator of 3D fractal noise.
type generator3d struct {
noiser noise.Noiser3D
params Params
}
// generator4d is a generator of 4D fractal noise.
type generator4d struct {
noiser noise.Noiser4D
params Params
}
// New1D instantiates a one-dimensional fractal noise generator.
func New1D(noiser noise.Noiser1D, params Params) noise.Noiser1D {
return &generator1d{
noiser: noiser,
params: newParams(params),
}
}
// New2D instantiates a two-dimensional fractal noise generator.
func New2D(noiser noise.Noiser2D, params Params) noise.Noiser2D {
return &generator2d{
noiser: noiser,
params: newParams(params),
}
}
// New3D instantiates a three-dimensional fractal noise generator.
func New3D(noiser noise.Noiser3D, params Params) noise.Noiser3D {
return &generator3d{
noiser: noiser,
params: newParams(params),
}
}
// New4D instantiates a four-dimensional fractal noise generator.
func New4D(noiser noise.Noiser4D, params Params) noise.Noiser4D {
return &generator4d{
noiser: noiser,
params: newParams(params),
}
}
// Noise1D generates 1D fractal noise sampled at a given coordinate.
func (g *generator1d) Noise1D(x float64) float64 {
sum := 0.0
freq := g.params.Frequency
amp := g.params.Amplitude
for i := 0; i < g.params.Layers; i++ {
d := float64(i) * 500.0 // to avoid "radial artifacts" around zero
sum += g.noiser.Noise1D((x+d)*freq) * amp
freq *= g.params.Lacunarity
amp *= g.params.Gain
}
return sum
}
// Noise2D generates 2D fractal noise sampled at given coordinates.
func (g *generator2d) Noise2D(x, y float64) float64 {
sum := 0.0
freq := g.params.Frequency
amp := g.params.Amplitude
for i := 0; i < g.params.Layers; i++ {
d := float64(i) * 500.0 // to avoid "radial artifacts" around zero
sum += g.noiser.Noise2D((x+d)*freq, (y+d)*freq) * amp
freq *= g.params.Lacunarity
amp *= g.params.Gain
}
return sum
}
// Noise3D generates 3D fractal noise sampled at given coordinates.
func (g *generator3d) Noise3D(x, y, z float64) float64 {
sum := 0.0
freq := g.params.Frequency
amp := g.params.Amplitude
for i := 0; i < g.params.Layers; i++ {
d := float64(i) * 500.0 // to avoid "radial artifacts" around zero
sum += g.noiser.Noise3D((x+d)*freq, (y+d)*freq, (z+d)*freq) * amp
freq *= g.params.Lacunarity
amp *= g.params.Gain
}
return sum
}
// Noise4D generates 5D fractal noise sampled at given coordinates.
func (g *generator4d) Noise4D(x, y, z, w float64) float64 {
sum := 0.0
freq := g.params.Frequency
amp := g.params.Amplitude
for i := 0; i < g.params.Layers; i++ {
d := float64(i) * 500.0 // to avoid "radial artifacts" around zero
sum += g.noiser.Noise4D((x+d)*freq, (y+d)*freq, (z+d)*freq, (w+d)*freq) * amp
freq *= g.params.Lacunarity
amp *= g.params.Gain
}
return sum
}
// newParams returns a Params structure initialized either with the values in
// the Params passed as parameter, or with the default values.
func newParams(params Params) Params {
result := Params{
Layers: 4,
Frequency: 1.0,
Lacunarity: 2.0,
Amplitude: 1.0,
Gain: 0.5,
}
if params.Layers != 0 {
result.Layers = params.Layers
}
if params.Frequency != 0.0 {
result.Frequency = params.Frequency
}
if params.Lacunarity != 0.0 {
result.Lacunarity = params.Lacunarity
}
if params.Amplitude != 0.0 {
result.Amplitude = params.Amplitude
}
if params.Gain != 0.0 {
result.Gain = params.Gain
}
return result
}
|
package scan
import (
"os"
"path/filepath"
"sync"
"time"
"github.com/mitro42/coback/catalog"
fsh "github.com/mitro42/coback/fshelper"
"github.com/spf13/afero"
)
type mockDoubleProgressBar struct {
count int64
size int64
countTotal int64
sizeTotal int64
incrByCount int
setTotalCount int
mux sync.Mutex
}
func newMockDoubleProgressBar() *mockDoubleProgressBar {
return &mockDoubleProgressBar{}
}
func (m *mockDoubleProgressBar) SetTotal(count int64, size int64) {
m.mux.Lock()
defer m.mux.Unlock()
m.countTotal = count
m.sizeTotal = size
m.setTotalCount++
}
func (m *mockDoubleProgressBar) IncrBy(n int) {
m.mux.Lock()
defer m.mux.Unlock()
m.size += int64(n)
m.count++
m.incrByCount++
}
func (m *mockDoubleProgressBar) CurrentSize() int64 {
m.mux.Lock()
defer m.mux.Unlock()
return m.size
}
func (m *mockDoubleProgressBar) CurrentCount() int64 {
m.mux.Lock()
defer m.mux.Unlock()
return m.count
}
func (m *mockDoubleProgressBar) Wait() {
}
func changeFileContent(fs afero.Fs, path string) error {
f, err := fs.OpenFile(path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return err
}
if _, err := f.Write([]byte("Some new stuff\n")); err != nil {
return err
}
return f.Close()
}
type dummyFileDescription struct {
Path string
Size int64
Md5Sum catalog.Checksum
Content string
}
var dummies = []dummyFileDescription{
{"subfolder/dummy1", 32, "30fac14a21fcc0c2d126a159beb14cb5", "This is just some dummy content\n"},
{"dummy2", 351, "546ea07b13dc314506dc2e48dcc2a9d1", "Just some other content... On the other hand, we denounce with righteous indignation and dislike men who are so beguiled and demoralized by the charms of pleasure of the moment, so blinded by desire, that they cannot foresee the pain and trouble that are bound to ensue; and equal blame belongs to those who fail in their duty through weakness of will"},
}
func createDummyFile(fs afero.Fs, file dummyFileDescription) error {
f, err := fs.OpenFile(file.Path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return err
}
if _, err := f.Write([]byte(file.Content)); err != nil {
return err
}
return f.Close()
}
func createDummyFileWithTimestamp(fs afero.Fs, file dummyFileDescription, modificationTime string) error {
f, err := fs.OpenFile(file.Path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return err
}
if _, err := f.Write([]byte(file.Content)); err != nil {
return err
}
err = f.Close()
if err != nil {
return err
}
return fsh.SetFileAttributes(fs, file.Path, modificationTime)
}
// createMemFsTestData creates an afero.MemFs and copies the contents of the test_data folder into it.
// This is necessary to work around an afero limitation:
// renaming and removing files from a CopyOnWriteFs is not yet supported.
func createMemFsTestData() afero.Fs {
basePath, _ := os.Getwd()
memFs := afero.NewMemMapFs()
testDataFs := afero.NewBasePathFs(memFs, "test_data")
diskFs := fsh.CreateSafeFs(filepath.Join(filepath.Dir(basePath), "test_data"))
afero.Walk(diskFs, ".", func(p string, fi os.FileInfo, err error) error {
if fi.IsDir() {
return nil
}
fsh.CopyFile(diskFs, p, fi.ModTime().Format(time.RFC3339Nano), testDataFs)
return nil
})
return memFs
}
|
package main
import (
"errors"
"strconv"
"strings"
)
func ParseSettings(settingsFile string) error {
if rxpBotToken.FindStringSubmatch(settingsFile) != nil && rxpBotToken.FindStringSubmatch(settingsFile)[1] != "" {
botToken = strings.Trim(rxpBotToken.FindStringSubmatch(settingsFile)[1], " ")
} else {
return errors.New("Wrong or empty BotToken")
}
if rxpYouTubeDeveloperKey.FindStringSubmatch(settingsFile) != nil && rxpYouTubeDeveloperKey.FindStringSubmatch(settingsFile)[1] != "" {
youTubeDeveloperKey = strings.Trim(rxpYouTubeDeveloperKey.FindStringSubmatch(settingsFile)[1], " ")
} else {
return errors.New("Wrong or empty YouTubeDeveloperKey")
}
if rxpMusicGraphKey.FindStringSubmatch(settingsFile) != nil && rxpMusicGraphKey.FindStringSubmatch(settingsFile)[1] != "" {
musicGraphKey = strings.Trim(rxpMusicGraphKey.FindStringSubmatch(settingsFile)[1], " ")
}
if rxpGuildName.FindStringSubmatch(settingsFile) != nil && rxpGuildName.FindStringSubmatch(settingsFile)[1] != "" {
guildName = strings.Trim(rxpGuildName.FindStringSubmatch(settingsFile)[1], " ")
} else {
return errors.New("Wrong or empty GuildName")
}
if rxpTextChannelName.FindStringSubmatch(settingsFile) != nil && rxpTextChannelName.FindStringSubmatch(settingsFile)[1] != "" {
textChannelName = strings.Trim(rxpTextChannelName.FindStringSubmatch(settingsFile)[1], " ")
} else {
return errors.New("Wrong or empty TextChannelName")
}
if rxpVoiceChannelName.FindStringSubmatch(settingsFile) != nil && rxpVoiceChannelName.FindStringSubmatch(settingsFile)[1] != "" {
voiceChannelName = strings.Trim(rxpVoiceChannelName.FindStringSubmatch(settingsFile)[1], " ")
} else {
return errors.New("Wrong or empty VoiceChannelName")
}
if rxpCommandPrefix.FindStringSubmatch(settingsFile) != nil && rxpCommandPrefix.FindStringSubmatch(settingsFile)[1] != "" {
commandPrefix = strings.Trim(rxpCommandPrefix.FindStringSubmatch(settingsFile)[1], " ")
} else {
return errors.New("Wrong or empty CommandPrefix")
}
if rxpBotMuted.FindStringSubmatch(settingsFile) != nil && rxpBotMuted.FindStringSubmatch(settingsFile)[1] != "" {
if strings.Trim(rxpBotMuted.FindStringSubmatch(settingsFile)[1], " ") == "true" {
botMuted = true
} else {
botMuted = false
}
} else {
return errors.New("Wrong or empty BotMuted")
}
if rxpBotDeaf.FindStringSubmatch(settingsFile) != nil && rxpBotDeaf.FindStringSubmatch(settingsFile)[1] != "" {
if strings.Trim(rxpBotDeaf.FindStringSubmatch(settingsFile)[1], " ") == "true" {
botDeaf = true
} else {
botDeaf = false
}
} else {
return errors.New("Wrong or empty BotDeaf")
}
if rxpSongsCounterFile.FindStringSubmatch(settingsFile) != nil && rxpSongsCounterFile.FindStringSubmatch(settingsFile)[1] != "" {
songsCounterFile = strings.Trim(rxpSongsCounterFile.FindStringSubmatch(settingsFile)[1], " ")
} else {
return errors.New("Wrong or empty SongsCounterFile")
}
if rxpDebugMode.FindStringSubmatch(settingsFile) != nil && rxpDebugMode.FindStringSubmatch(settingsFile)[1] != "" {
if strings.Trim(rxpDebugMode.FindStringSubmatch(settingsFile)[1], " ") == "true" {
debugMode = true
} else {
debugMode = false
}
} else {
return errors.New("Wrong or empty DebugMode")
}
if rxpPurgeAfter.FindStringSubmatch(settingsFile) != nil && rxpPurgeAfter.FindStringSubmatch(settingsFile)[1] != "" {
purgeTime, _ = strconv.ParseInt(strings.Trim(rxpPurgeAfter.FindStringSubmatch(settingsFile)[1], " "), 0, 64)
} else {
purgeTime = 0
}
return nil
}
|
package alertmanager
import (
"bytes"
"encoding/json"
"io"
"net/http"
"net/http/httptest"
"testing"
"github.com/go-kit/kit/log"
"github.com/pkg/errors"
"github.com/prometheus/alertmanager/notify/webhook"
"github.com/prometheus/client_golang/prometheus"
"github.com/stretchr/testify/assert"
)
const validWebhook = `{"receiver":"telegram","status":"firing","alerts":[{"status":"firing","labels":{"alertname":"Fire","severity":"critical"},"annotations":{"message":"Something is on fire"},"startsAt":"2018-11-04T22:43:58.283995108+01:00","endsAt":"2018-11-04T22:46:58.283995108+01:00","generatorURL":"http://localhost:9090/graph?g0.expr=vector%28666%29\u0026g0.tab=1"}],"groupLabels":{"alertname":"Fire"},"commonLabels":{"alertname":"Fire","severity":"critical"},"commonAnnotations":{"message":"Something is on fire"},"externalURL":"http://localhost:9093","version":"4","groupKey":"{}:{alertname=\"Fire\"}"}`
func TestHandleWebhook(t *testing.T) {
logger := log.NewNopLogger()
counter := prometheus.NewCounter(prometheus.CounterOpts{})
webhooks := make(chan TelegramWebhook, 1)
h := HandleTelegramWebhook(logger, counter, webhooks)
type checkFunc func(*http.Response) error
checkStatusCode := func(code int) checkFunc {
return func(resp *http.Response) error {
if resp.StatusCode != code {
return errors.Errorf("statusCode %d expected, got %d", code, resp.StatusCode)
}
return nil
}
}
testcases := []struct {
name string
req func() *http.Request
checks []checkFunc
}{
{
name: "NotPOST",
req: func() *http.Request {
req, _ := http.NewRequest(http.MethodGet, "/webhooks/telegram/123", nil)
return req
},
checks: []checkFunc{
checkStatusCode(http.StatusMethodNotAllowed),
},
},
{
name: "EmptyBody",
req: func() *http.Request {
var body io.Reader
req, _ := http.NewRequest(http.MethodPost, "/", body)
return req
},
checks: []checkFunc{
checkStatusCode(http.StatusBadRequest),
},
},
{
name: "InvalidJSON",
req: func() *http.Request {
body := bytes.NewBufferString(`[]`)
req, _ := http.NewRequest(http.MethodPost, "/", body)
return req
},
checks: []checkFunc{
checkStatusCode(http.StatusBadRequest),
},
},
{
name: "ValidWebhookPrivate",
req: func() *http.Request {
body := bytes.NewBufferString(validWebhook)
req, _ := http.NewRequest(http.MethodPost, "/webhooks/telegram/123", body)
return req
},
checks: []checkFunc{
checkStatusCode(http.StatusOK),
func(resp *http.Response) error {
var expected webhook.Message
if err := json.Unmarshal([]byte(validWebhook), &expected); err != nil {
return err
}
webhook := <-webhooks
if !assert.Equal(t, TelegramWebhook{ChatID: 123, Message: expected}, webhook) {
return errors.New("")
}
return nil
},
},
},
{
name: "ValidWebhookGroup",
req: func() *http.Request {
body := bytes.NewBufferString(validWebhook)
req, _ := http.NewRequest(http.MethodPost, "/webhooks/telegram/-1234", body)
return req
},
checks: []checkFunc{
checkStatusCode(http.StatusOK),
func(resp *http.Response) error {
var expected webhook.Message
if err := json.Unmarshal([]byte(validWebhook), &expected); err != nil {
return err
}
webhook := <-webhooks
if !assert.Equal(t, TelegramWebhook{ChatID: -1234, Message: expected}, webhook) {
return errors.New("")
}
return nil
},
},
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
rec := httptest.NewRecorder()
h.ServeHTTP(rec, tc.req())
for _, check := range tc.checks {
if err := check(rec.Result()); err != nil {
t.Error(err)
}
}
})
}
}
|
package grpc
import (
"context"
"google.golang.org/grpc/codes"
grpc_health "google.golang.org/grpc/health/grpc_health_v1"
"google.golang.org/grpc/status"
"github.com/pomerium/pomerium/internal/log"
)
type healthCheckSrv struct {
}
// NewHealthCheckServer returns a basic health checker
func NewHealthCheckServer() grpc_health.HealthServer {
return &healthCheckSrv{}
}
// Check confirms service is reachable, and assumes any service is operational
// an outlier detection should be used to detect runtime malfunction based on consequitive 5xx
func (h *healthCheckSrv) Check(ctx context.Context, req *grpc_health.HealthCheckRequest) (*grpc_health.HealthCheckResponse, error) {
log.Debug(ctx).Str("service", req.Service).Msg("health check")
return &grpc_health.HealthCheckResponse{
Status: grpc_health.HealthCheckResponse_SERVING,
}, nil
}
// Watch is not implemented as is not used by Envoy
func (h *healthCheckSrv) Watch(req *grpc_health.HealthCheckRequest, _ grpc_health.Health_WatchServer) error {
log.Error(context.Background()).Str("service", req.Service).Msg("health check watch")
return status.Errorf(codes.Unimplemented, "method Watch not implemented")
}
|
package ytrwrap
import (
"fmt"
"net/http"
"net/url"
"testing"
"github.com/stretchr/testify/assert"
)
func TestTr_DetectRU(t *testing.T) {
tr := createRealTestClientFromEnv()
lc, err := tr.Detect("мама мыла раму", nil)
assert.Nil(t, err, "err")
assert.Equal(t, RU, lc, "lc")
}
func TestTr_DetectEN(t *testing.T) {
tr := createRealTestClientFromEnv()
lc, err := tr.Detect("the pony is eating grass", nil)
assert.Nil(t, err, "err")
assert.Equal(t, EN, lc, "lc")
}
func TestTr_DetectErrUnmarshal(t *testing.T) {
dummyKey := "trnsl.there.is.no.key"
text2detect := "no-text"
client := newVoidClient()
tr := NewYandexTranslateWithClient(dummyKey, client)
_, apierr := tr.Detect(text2detect, nil)
assert.NotNil(t, apierr, "detect err")
client.Set(client.LastURL(), []byte("broken-data"), http.StatusOK, nil)
_, err := tr.Detect(text2detect, nil)
assert.NotNil(t, err, "err")
assert.Equal(t, WRAPPER_INTERNAL_ERROR, err.ErrorCode, "exp err")
assert.Equal(t, "500 | Unmarshal | invalid character 'b' looking for beginning of value", err.Error(), "exp err")
}
func TestTr_DetectErrGET(t *testing.T) {
dummyKey := "trnsl.there.is.no.key"
text2detect := "no-text"
client := newVoidClient()
tr := NewYandexTranslateWithClient(dummyKey, client)
_, apierr := tr.Detect(text2detect, nil)
assert.NotNil(t, apierr, "detect err")
client.Set(client.LastURL(), []byte(""), http.StatusNotFound, fmt.Errorf("offline"))
_, err := tr.Detect(text2detect, nil)
assert.NotNil(t, err, "err")
assert.Equal(t, WRAPPER_INTERNAL_ERROR, err.ErrorCode, "exp err")
assert.Equal(t, "500 | GET | offline", err.Error(), "exp err")
assert.Equal(t, WRAPPER_INTERNAL_ERROR, err.Code(), "err")
assert.Equal(t, "GET | offline", err.Message(), "err")
}
func TestURLDetect(t *testing.T) {
dummyKey := "trnsl.there.is.no.key"
text2detect := "no-text"
client := newVoidClient()
tr := NewYandexTranslateWithClient(dummyKey, client)
_, apierr := tr.Detect(text2detect, nil)
assert.NotNil(t, apierr, "detect err")
baseURL, err := url.Parse(YandexTranslateAPI)
assert.Nil(t, err, "URL.Parse err")
theURL, err := url.Parse(client.LastURL())
assert.Nil(t, err, "URL.Parse err")
assert.Equal(t, baseURL.Host, theURL.Host, "url")
assert.Equal(t, baseURL.Path+"/tr.json/detect", theURL.Path, "url")
values := theURL.Query()
assert.Equal(t, dummyKey, values.Get("key"), "url")
assert.Equal(t, "", values.Get("hint"), "url")
assert.Equal(t, text2detect, values.Get("text"), "url")
}
func TestURLDetectHints(t *testing.T) {
dummyKey := "trnsl.there.is.no.key"
text2detect := "no-text"
client := newVoidClient()
tr := NewYandexTranslateWithClient(dummyKey, client)
_, apierr := tr.Detect(text2detect, []LC{EN, DE})
assert.NotNil(t, apierr, "detect err")
baseURL, err := url.Parse(YandexTranslateAPI)
assert.Nil(t, err, "URL.Parse err")
theURL, err := url.Parse(client.LastURL())
assert.Nil(t, err, "URL.Parse err")
assert.Equal(t, baseURL.Host, theURL.Host, "url")
assert.Equal(t, baseURL.Path+"/tr.json/detect", theURL.Path, "url")
values := theURL.Query()
assert.Equal(t, dummyKey, values.Get("key"), "url")
assert.Equal(t, string(EN)+","+string(DE), values.Get("hint"), "url")
assert.Equal(t, text2detect, values.Get("text"), "url")
}
func TestURLDetectHintsRare(t *testing.T) {
dummyKey := "trnsl.there.is.no.key"
text2detect := "no-text"
client := newVoidClient()
tr := NewYandexTranslateWithClient(dummyKey, client)
_, apierr := tr.Detect(text2detect, []LC{EN, "", "", FR})
assert.NotNil(t, apierr, "detect err")
baseURL, err := url.Parse(YandexTranslateAPI)
assert.Nil(t, err, "URL.Parse err")
theURL, err := url.Parse(client.LastURL())
assert.Nil(t, err, "URL.Parse err")
assert.Equal(t, baseURL.Host, theURL.Host, "url")
assert.Equal(t, baseURL.Path+"/tr.json/detect", theURL.Path, "url")
values := theURL.Query()
assert.Equal(t, dummyKey, values.Get("key"), "url")
assert.Equal(t, string(EN)+","+string(FR), values.Get("hint"), "url")
assert.Equal(t, text2detect, values.Get("text"), "url")
}
func TestTrDetectErrCodeNotOK(t *testing.T) {
dummyKey := "trnsl.there.is.no.key"
text2detect := "no-text"
client := newVoidClient()
tr := NewYandexTranslateWithClient(dummyKey, client)
_, apierr := tr.Detect(text2detect, []LC{EN})
assert.NotNil(t, apierr, "detect err")
client.Set(client.LastURL(), []byte(`{
"code": 200,
"lang": "en"
}
`), http.StatusInternalServerError, nil)
_, err := tr.Detect(text2detect, []LC{EN})
assert.NotNil(t, err, "err")
assert.Equal(t, WRAPPER_INTERNAL_ERROR, err.ErrorCode, "exp err")
assert.Equal(t, "500 | ", err.Error(), "exp err")
}
|
package domain_test
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
. "oneday-infrastructure/internal/pkg/authenticate/domain"
"oneday-infrastructure/mocks"
"oneday-infrastructure/tools"
"testing"
)
var tt *testing.T
var mockRepo *mocks.LoginUserRepo
func TestLogin(t *testing.T) {
tt = t
mockRepo = &mocks.LoginUserRepo{}
mockRepo.Test(t)
RegisterFailHandler(Fail)
RunSpecs(t, "authenticate Suite")
}
var _ = Describe("service", func() {
var service LoginUserService
BeforeSuite(func() {
service = NewLoginUserService(mockRepo)
})
Context("Authenticate", func() {
var cmd = &LoginCmd{
Username: "username",
EffectiveSeconds: 10,
PassCode: "123",
LoginMode: "PASSWORD",
EncryptWay: "MD5",
}
user := LoginUser{
IsLock: false,
}
Describe("Authenticate", func() {
Context("login by Password", func() {
BeforeEach(func() {
user.PassCode = PassCode{
LoginMode: LoginMode(cmd.LoginMode),
Password: Password(tools.ChooseEncrypter(cmd.EncryptWay)(cmd.PassCode))}
mockRepo.On("FindOne", cmd.Username).Return(user, true).Once()
})
It("should return true ", func() {
token, result := service.Authenticate(cmd)
Expect(string(result)).To(Equal(Success))
Expect(token).NotTo(Equal(""))
mockRepo.AssertExpectations(tt)
})
})
Context("login by sms code", func() {
BeforeEach(func() {
cmd.LoginMode = "SMS_CODE"
cmd.EncryptWay = ""
user.PassCode = PassCode{
LoginMode: LoginMode(cmd.LoginMode),
SmsCode: SmsCode(cmd.PassCode),
}
mockRepo.On("FindOne", cmd.Username).Return(user, true).Once()
})
It("should return true ", func() {
token, result := service.Authenticate(cmd)
Expect(string(result)).To(Equal(Success))
Expect(token).NotTo(Equal(""))
mockRepo.AssertExpectations(tt)
})
})
})
Describe("GetUserStatus", func() {
Context("user does not exist", func() {
BeforeEach(func() {
mockRepo.On("FindOne", cmd.Username).Return(LoginUser{}, false).Once()
})
It("should return false", func() {
Expect(string(service.GetUserStatus(cmd.Username))).To(Equal(NotExist))
mockRepo.AssertExpectations(tt)
})
})
Context("user is locked", func() {
BeforeEach(func() {
user.IsLock = true
mockRepo.On("FindOne", cmd.Username).Return(user, true).Once()
})
It("should return false", func() {
Expect(string(service.GetUserStatus(cmd.Username))).To(Equal(NotAvailable))
mockRepo.AssertExpectations(tt)
})
})
Context("user are allowed to login", func() {
BeforeEach(func() {
user.IsLock = false
mockRepo.On("FindOne", cmd.Username).Return(user, true).Once()
})
It("should return true", func() {
Expect(string(service.GetUserStatus(cmd.Username))).To(Equal(ALLOWED))
mockRepo.AssertExpectations(tt)
})
})
})
Describe("reset user Password", func() {
cmd := &ResetPasswordCmd{
Username: "username",
NewPassword: "newPassword",
OldPassword: "oldPassword",
EncryptWay: "MD5",
}
When("oldPassword is correct", func() {
BeforeEach(func() {
loginUser := LoginUser{
PassCode: NewPassCode(tools.ChooseEncrypter(cmd.EncryptWay)(cmd.OldPassword)),
}
mockRepo.On("GetOne", cmd.Username).
Return(loginUser).Once()
loginUser = LoginUser{
PassCode: NewPassCode(tools.ChooseEncrypter(cmd.EncryptWay)(cmd.NewPassword)),
}
mockRepo.On(
"UpdatePasswordByUsername",
loginUser).Return(LoginUser{}).Once()
})
It("should return success", func() {
Expect(string(service.ReSetPassword(cmd))).To(Equal(ResetPasswordSuccess))
mockRepo.AssertExpectations(tt)
})
})
When("oldPassword is error", func() {
BeforeEach(func() {
mockRepo.On("GetOne", cmd.Username).
Return(LoginUser{PassCode: PassCode{
Password: Password(tools.ChooseEncrypter(cmd.EncryptWay)(""))}}).Once()
})
It("should return success", func() {
Expect(string(service.ReSetPassword(cmd))).To(Equal(PasswordError))
mockRepo.AssertExpectations(tt)
})
})
})
})
})
|
// Copyright © 2018 Inanc Gumus
// Learn Go Programming Course
// License: https://creativecommons.org/licenses/by-nc-sa/4.0/
//
// For more tutorials : https://learngoprogramming.com
// In-person training : https://www.linkedin.com/in/inancgumus/
// Follow me on twitter: https://twitter.com/inancgumus
package main
import "fmt"
func main() {
ratio := 1.0 / 10.0
// after 10 operations
// the inaccuracy is clear
//
// BTW, don't mind about this loop syntax for now
// I'm going to explain it afterwards
for range [...]int{10: 0} {
ratio += 1.0 / 10.0
}
fmt.Printf("%.60f", ratio)
}
|
package lv2_rotate_matrix
import (
"fmt"
"log"
)
func PrintMatrix(nums [][]int, rowLen, colLen int) {
log.Println("==========")
for i := 0; i < rowLen; i++ {
for j := 0; j < colLen; j++ {
fmt.Printf("%5d ", nums[i][j])
}
fmt.Println()
}
log.Println("==========")
}
|
package rest
import (
"fmt"
"github.com/gin-gonic/contrib/ginrus"
"github.com/natefinch/lumberjack"
log "github.com/sirupsen/logrus"
"io"
"os"
"time"
)
const (
loggerFile = "/tmp/logger.log"
)
func setupLogger(s *server) {
if !fileExists(loggerFile) {
createFile()
}
// setup logger
lumberjackLogRotate := &lumberjack.Logger{
Filename: loggerFile,
MaxSize: 5, // Max megabytes before log is rotated
MaxBackups: 90, // Max number of old log files to keep
MaxAge: 60, // Max number of days to retain log files
Compress: true,
}
mw := io.MultiWriter(os.Stdout, lumberjackLogRotate)
log.SetOutput(mw)
log.SetLevel(log.TraceLevel)
log.SetFormatter(&log.JSONFormatter{})
setupGinLogger(s, mw)
}
// try using it to prevent further errors.
func fileExists(filename string) bool {
info, err := os.Stat(filename)
if os.IsNotExist(err) {
return false
}
return !info.IsDir()
}
func createFile() {
_, err := os.Create(loggerFile)
if err != nil {
fmt.Print(err)
log.Println(err)
}
}
func setupGinLogger(s *server, mw io.Writer) {
l := log.New()
l.SetOutput(mw)
l.SetLevel(log.TraceLevel)
l.SetFormatter(&log.JSONFormatter{})
s.router.Use(ginrus.Ginrus(l, time.RFC3339, false))
}
|
package medianheap_test
import (
"fmt"
"github.com/pietv/medianheap"
)
func ExampleIntMedianHeap() {
h := medianheap.New()
h.Add(-1)
h.Add(0)
h.Add(1)
fmt.Println(h.Median())
// Output: 0
}
|
package api
import (
"github.com/ch3lo/overlord/configuration"
"github.com/gorilla/mux"
"github.com/thoas/stats"
)
var routesMap = map[string]map[string]serviceHandler{
"GET": {
"/": getServices,
"/{service_id}": getServiceByServiceId,
"/{service_id}/{cluster}": getServiceByClusterAndServiceId,
},
"PUT": {
"/": putService,
"/{service_id}/versions": putServiceVersionByServiceId,
},
}
func routes(config *configuration.Configuration, sts *stats.Stats) *mux.Router {
ctx := newContext(config)
router := mux.NewRouter()
router.Handle("/stats", &statsHandler{sts}).Methods("GET")
// API v1
v1Services := router.PathPrefix("/api/v1/services").Subrouter()
for method, mappings := range routesMap {
for path, h := range mappings {
v1Services.Handle(path, errorHandler{h, ctx}).Methods(method)
}
}
return router
}
|
package timingwheel
import (
"context"
"log"
"sync"
"testing"
"time"
)
func waitCtxTimeout(ctx context.Context, timeout time.Duration) {
before := time.Now()
<-ctx.Done()
log.Printf("%v -> %v", timeout, time.Since(before))
}
func TestWithTimeout(t *testing.T) {
timeouts := []time.Duration{
10 * time.Millisecond,
10 * time.Millisecond,
20 * time.Millisecond,
50 * time.Millisecond,
1 * time.Second,
2 * time.Second,
5 * time.Second,
5 * time.Second,
10 * time.Second,
20 * time.Second,
50 * time.Second,
50 * time.Second,
80 * time.Second,
90 * time.Second,
100 * time.Second,
100 * time.Second,
}
wg := sync.WaitGroup{}
for _, d := range timeouts {
d := d
ctx, _ := WithTimeout(nil, d)
wg.Add(1)
go func() {
waitCtxTimeout(ctx, d)
wg.Done()
}()
}
time.Sleep(time.Second)
ctx1, _ := WithTimeout(nil, time.Second)
wg.Add(1)
go func() {
waitCtxTimeout(ctx1, time.Second)
wg.Done()
}()
time.Sleep(time.Second)
ctx2, _ := WithTimeout(nil, 4*time.Second)
wg.Add(1)
go func() {
waitCtxTimeout(ctx2, 4*time.Second)
wg.Done()
}()
wg.Wait()
}
|
/*
Copyright (C) 2018 Synopsys, Inc.
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
*/
package hub
import (
"fmt"
"math"
"strings"
"time"
horizonapi "github.com/blackducksoftware/horizon/pkg/api"
horizon "github.com/blackducksoftware/horizon/pkg/deployer"
"github.com/blackducksoftware/perceptor-protoform/pkg/api/hub/v1"
hubclientset "github.com/blackducksoftware/perceptor-protoform/pkg/hub/client/clientset/versioned"
"github.com/blackducksoftware/perceptor-protoform/pkg/hub/containers"
"github.com/blackducksoftware/perceptor-protoform/pkg/protoform"
"github.com/blackducksoftware/perceptor-protoform/pkg/util"
routeclient "github.com/openshift/client-go/route/clientset/versioned/typed/route/v1"
securityclient "github.com/openshift/client-go/security/clientset/versioned/typed/security/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
log "github.com/sirupsen/logrus"
)
// Creater will store the configuration to create the Hub
type Creater struct {
Config *protoform.Config
KubeConfig *rest.Config
KubeClient *kubernetes.Clientset
HubClient *hubclientset.Clientset
osSecurityClient *securityclient.SecurityV1Client
routeClient *routeclient.RouteV1Client
}
// NewCreater will instantiate the Creater
func NewCreater(config *protoform.Config, kubeConfig *rest.Config, kubeClient *kubernetes.Clientset, hubClient *hubclientset.Clientset,
osSecurityClient *securityclient.SecurityV1Client, routeClient *routeclient.RouteV1Client) *Creater {
return &Creater{Config: config, KubeConfig: kubeConfig, KubeClient: kubeClient, HubClient: hubClient, osSecurityClient: osSecurityClient, routeClient: routeClient}
}
// DeleteHub will delete the Black Duck Hub
func (hc *Creater) DeleteHub(namespace string) {
var err error
// Verify whether the namespace exist
_, err = util.GetNamespace(hc.KubeClient, namespace)
if err != nil {
log.Errorf("Unable to find the namespace %+v due to %+v", namespace, err)
} else {
// Delete a namespace
err = util.DeleteNamespace(hc.KubeClient, namespace)
if err != nil {
log.Errorf("Unable to delete the namespace %+v due to %+v", namespace, err)
}
for {
// Verify whether the namespace deleted
ns, err := util.GetNamespace(hc.KubeClient, namespace)
log.Infof("Namespace: %v, status: %v", namespace, ns.Status)
time.Sleep(10 * time.Second)
if err != nil {
log.Infof("Deleted the namespace %+v", namespace)
break
}
}
}
// Delete a persistent volume
err = util.DeletePersistentVolume(hc.KubeClient, namespace)
if err != nil {
log.Errorf("unable to delete the pv for %+v", namespace)
}
// Delete a Cluster Role Binding
err = util.DeleteClusterRoleBinding(hc.KubeClient, namespace)
if err != nil {
log.Errorf("unable to delete the cluster role binding for %+v", namespace)
}
}
// GetDefaultPasswords returns admin,user,postgres passwords for db maintainance tasks. Should only be used during
// initialization, or for 'babysitting' ephemeral hub instances (which might have postgres restarts)
// MAKE SURE YOU SEND THE NAMESPACE OF THE SECRET SOURCE (operator), NOT OF THE new hub THAT YOUR TRYING TO CREATE !
func GetDefaultPasswords(kubeClient *kubernetes.Clientset, nsOfSecretHolder string) (adminPassword string, userPassword string, postgresPassword string, err error) {
blackduckSecret, err := util.GetSecret(kubeClient, nsOfSecretHolder, "blackduck-secret")
if err != nil {
log.Infof("warning: You need to first create a 'blackduck-secret' in this namespace with ADMIN_PASSWORD, USER_PASSWORD, POSTGRES_PASSWORD")
return "", "", "", err
}
adminPassword = string(blackduckSecret.Data["ADMIN_PASSWORD"])
userPassword = string(blackduckSecret.Data["USER_PASSWORD"])
postgresPassword = string(blackduckSecret.Data["POSTGRES_PASSWORD"])
// default named return
return adminPassword, userPassword, postgresPassword, err
}
// CreateHub will create the Black Duck Hub
func (hc *Creater) CreateHub(createHub *v1.HubSpec) (string, string, bool, error) {
log.Debugf("Create Hub details for %s: %+v", createHub.Namespace, createHub)
// Create a horizon deployer for each hub
deployer, err := horizon.NewDeployer(hc.KubeConfig)
if err != nil {
return "", "", true, fmt.Errorf("unable to create the horizon deployer due to %+v", err)
}
// Get Containers Flavor
hubContainerFlavor := containers.GetContainersFlavor(createHub.Flavor)
log.Debugf("Hub Container Flavor: %+v", hubContainerFlavor)
if hubContainerFlavor == nil {
return "", "", true, fmt.Errorf("invalid flavor type, Expected: Small, Medium, Large (or) OpsSight, Actual: %s", createHub.Flavor)
}
// All ConfigMap environment variables
allConfigEnv := []*horizonapi.EnvConfig{
{Type: horizonapi.EnvFromConfigMap, FromName: "hub-config"},
{Type: horizonapi.EnvFromConfigMap, FromName: "hub-db-config"},
{Type: horizonapi.EnvFromConfigMap, FromName: "hub-db-config-granular"},
}
var adminPassword, userPassword, postgresPassword string
for dbInitTry := 0; dbInitTry < math.MaxInt32; dbInitTry++ {
// get the secret from the default operator namespace, then copy it into the hub namespace.
adminPassword, userPassword, postgresPassword, err = GetDefaultPasswords(hc.KubeClient, hc.Config.Namespace)
if err == nil {
break
} else {
log.Infof("wasn't able to init database, sleeping 5 seconds. try = %v", dbInitTry)
time.Sleep(5 * time.Second)
}
}
log.Debugf("Before init: %+v", &createHub)
// Create the config-maps, secrets and postgres container
err = hc.init(deployer, createHub, hubContainerFlavor, allConfigEnv, adminPassword, userPassword)
if err != nil {
return "", "", true, err
}
// Deploy config-maps, secrets and postgres container
err = deployer.Run()
if err != nil {
log.Errorf("deployments failed because %+v", err)
}
// time.Sleep(20 * time.Second)
// Validate postgres pod is cloned/backed up
err = util.WaitForServiceEndpointReady(hc.KubeClient, createHub.Namespace, "postgres")
if err != nil {
return "", "", true, err
}
if strings.EqualFold(createHub.DbPrototype, "empty") {
err := InitDatabase(createHub, adminPassword, userPassword, postgresPassword)
if err != nil {
log.Errorf("%v: error: %+v", createHub.Namespace, err)
return "", "", true, fmt.Errorf("%v: error: %+v", createHub.Namespace, err)
}
}
err = hc.addAnyUIDToServiceAccount(createHub)
if err != nil {
log.Error(err)
}
// Create all hub deployments
deployer, _ = horizon.NewDeployer(hc.KubeConfig)
hc.createDeployer(deployer, createHub, hubContainerFlavor, allConfigEnv)
log.Debugf("%+v", deployer)
// Deploy all hub containers
err = deployer.Run()
if err != nil {
log.Errorf("deployments failed because %+v", err)
return "", "", true, fmt.Errorf("unable to deploy the hub in %s due to %+v", createHub.Namespace, err)
}
time.Sleep(10 * time.Second)
// Validate all pods are in running state
err = util.ValidatePodsAreRunningInNamespace(hc.KubeClient, createHub.Namespace)
if err != nil {
return "", "", true, err
}
// Retrieve the PVC volume name
pvcVolumeName := ""
if strings.EqualFold(createHub.BackupSupport, "Yes") || !strings.EqualFold(createHub.DbPrototype, "empty") {
pvcVolumeName, err = hc.getPVCVolumeName(createHub.Namespace)
if err != nil {
return "", "", false, err
}
}
// OpenShift routes
ipAddress := ""
if hc.routeClient != nil {
route, err := util.CreateOpenShiftRoutes(hc.routeClient, createHub.Namespace, createHub.Namespace, "Service", "webserver")
if err != nil {
return "", pvcVolumeName, false, err
}
log.Debugf("openshift route host: %s", route.Spec.Host)
ipAddress = route.Spec.Host
}
if strings.EqualFold(ipAddress, "") {
ipAddress, err = hc.getLoadBalancerIPAddress(createHub.Namespace, "webserver-lb")
if err != nil {
ipAddress, err = hc.getNodePortIPAddress(createHub.Namespace, "webserver-np")
if err != nil {
return "", pvcVolumeName, false, err
}
}
}
log.Infof("hub Ip address: %s", ipAddress)
return ipAddress, pvcVolumeName, false, nil
}
func (hc *Creater) getPVCVolumeName(namespace string) (string, error) {
for i := 0; i < 60; i++ {
time.Sleep(10 * time.Second)
pvc, err := util.GetPVC(hc.KubeClient, namespace, namespace)
if err != nil {
return "", fmt.Errorf("unable to get pvc in %s namespace due to %s", namespace, err.Error())
}
log.Debugf("pvc: %v", pvc)
if strings.EqualFold(pvc.Spec.VolumeName, "") {
continue
} else {
return pvc.Spec.VolumeName, nil
}
}
return "", fmt.Errorf("timeout: unable to get pvc %s in %s namespace", namespace, namespace)
}
func (hc *Creater) getLoadBalancerIPAddress(namespace string, serviceName string) (string, error) {
for i := 0; i < 10; i++ {
time.Sleep(10 * time.Second)
service, err := util.GetService(hc.KubeClient, namespace, serviceName)
if err != nil {
return "", fmt.Errorf("unable to get service %s in %s namespace due to %s", serviceName, namespace, err.Error())
}
log.Debugf("[%s] service: %v", serviceName, service.Status.LoadBalancer.Ingress)
if len(service.Status.LoadBalancer.Ingress) > 0 {
ipAddress := service.Status.LoadBalancer.Ingress[0].IP
return ipAddress, nil
}
}
return "", fmt.Errorf("timeout: unable to get ip address for the service %s in %s namespace", serviceName, namespace)
}
func (hc *Creater) getNodePortIPAddress(namespace string, serviceName string) (string, error) {
for i := 0; i < 10; i++ {
time.Sleep(10 * time.Second)
service, err := util.GetService(hc.KubeClient, namespace, serviceName)
if err != nil {
return "", fmt.Errorf("unable to get service %s in %s namespace due to %s", serviceName, namespace, err.Error())
}
log.Debugf("[%s] service: %v", serviceName, service.Spec.ClusterIP)
if !strings.EqualFold(service.Spec.ClusterIP, "") {
ipAddress := service.Spec.ClusterIP
return ipAddress, nil
}
}
return "", fmt.Errorf("timeout: unable to get ip address for the service %s in %s namespace", serviceName, namespace)
}
|
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package main
import (
"flag"
"fmt"
"net"
"go.uber.org/zap"
"google.golang.org/grpc"
"storj.io/storj/pkg/netstate"
proto "storj.io/storj/protos/netstate"
"storj.io/storj/storage/boltdb"
)
var (
port int
dbPath string
prod bool
)
func initializeFlags() {
flag.IntVar(&port, "port", 8080, "port")
flag.StringVar(&dbPath, "db", "netstate.db", "db path")
flag.BoolVar(&prod, "prod", false, "type of environment where this service runs")
flag.Parse()
}
func main() {
initializeFlags()
// No err here because no vars passed into NewDevelopment().
// The default won't return an error, but if args are passed in,
// then there will need to be error handling.
logger, _ := zap.NewDevelopment()
if prod {
logger, _ = zap.NewProduction()
}
defer logger.Sync()
bdb, err := boltdb.New(logger, dbPath)
if err != nil {
return
}
defer bdb.Close()
// start grpc server
lis, err := net.Listen("tcp", fmt.Sprintf(":%d", port))
if err != nil {
logger.Fatal("failed to listen", zap.Error(err))
}
grpcServer := grpc.NewServer()
proto.RegisterNetStateServer(grpcServer, netstate.NewServer(bdb, logger))
defer grpcServer.GracefulStop()
err = grpcServer.Serve(lis)
if err != nil {
logger.Error("Failed to serve:", zap.Error(err))
}
}
|
package network
import (
"bytes"
"eos-network/config"
"time"
"unsafe"
)
type BlockRequest struct {
Id Sha256Type
LocalRetry bool
}
type BlockOrigin struct {
Id Sha256Type
Origin *Connection
}
type TransactionOrigin struct {
Id Sha256Type
Origin *Connection
}
type DispatchMgr struct {
justSendItMax uint32
reqBlks []BlockRequest
reqTrx []Sha256Type
receivedBlocks []BlockOrigin
receivedTransactions []TransactionOrigin
}
func NewDispatchMgr() *DispatchMgr {
return &DispatchMgr{
justSendItMax: config.MaxImplicitRequest,
}
}
func (m *DispatchMgr) bcastBlock(blk *SignedBlock) {
blkId, err := blk.BlockID()
if err != nil {
return
}
var skip *Connection
for i := 0; i < len(m.receivedBlocks); i++ {
if bytes.Equal(m.receivedBlocks[i].Id, blkId) {
skip = m.receivedBlocks[i].Origin
m.receivedBlocks = append(m.receivedBlocks[:i], m.receivedBlocks[i+1:]...)
break
}
}
packsize := uint32(len(blk.Serialize())) + 1 //??
msgsize := packsize + uint32(unsafe.Sizeof(packsize))
pendingNotify := &NoticeMessage{}
pendingNotify.KnownBlocks.Mode = Normal
pendingNotify.KnownBlocks.IDs = append(pendingNotify.KnownBlocks.IDs, blkId)
pendingNotify.KnownTrx.Mode = None
tp := TimePoint{}
pbs := &PeerBlockState{blkId, blk.BlockNumber(), false, true, tp}
if (config.LargeMsgNotify && msgsize > m.justSendItMax) && skip != nil {
GetConnsMgr().SendAll(pendingNotify, func(c *Connection) bool {
if skip == c || !c.Current() {
return false
}
unknown := c.AddPeerBlock(pbs)
if !unknown {
//elog("${p} already has knowledge of block ${b}", ("p",c->peer_name())("b",pbstate.block_num));
}
return unknown
})
} else {
pbs.IsKnown = true
for _, c := range GetConnsMgr().Connections {
if c == skip || !c.Current() {
continue
}
c.AddPeerBlock(pbs)
c.Enqueue(blk)
}
}
}
func (m *DispatchMgr) recvBlock(c *Connection, id Sha256Type, bnum uint32) {
origin := BlockOrigin{id, c}
m.receivedBlocks = append(m.receivedBlocks, origin)
if c != nil && c.LastReq != nil && c.LastReq.ReqBlocks.Mode != None && bytes.Equal(c.LastReq.ReqBlocks.IDs[len(c.LastReq.ReqBlocks.IDs)-1], id) {
c.LastReq = nil
}
tp := TimePoint{}
pbs := &PeerBlockState{id, bnum, false, true, tp}
c.AddPeerBlock(pbs)
}
func (m *DispatchMgr) rejectedBlock(id Sha256Type) {
for i := 0; i < len(m.receivedBlocks); i++ {
if bytes.Equal(m.receivedBlocks[i].Id, id) {
m.receivedBlocks = append(m.receivedBlocks[:i], m.receivedBlocks[i+1:]...)
break
}
}
}
func (m *DispatchMgr) bcastTransaction(trx *PackedTransaction) {
id := trx.ID()
var skip *Connection
for i := 0; i < len(m.receivedTransactions); i++ {
if bytes.Equal(m.receivedTransactions[i].Id, id) {
skip = m.receivedTransactions[i].Origin
m.receivedTransactions = append(m.receivedTransactions[:i], m.receivedTransactions[i+1:]...)
break
}
}
for i := 0; i < len(m.reqTrx); i++ {
if bytes.Equal(m.reqTrx[i], id) {
m.reqTrx = append(m.reqTrx[:i], m.reqTrx[i+1:]...)
break
}
}
if GetConnsMgr().FindLocalTxnsById(id) != nil {
return
}
packsize := uint32(len(trx.Serialize())) + 1 //??
msgsize := packsize + uint32(unsafe.Sizeof(packsize))
trxExpire := trx.unpackedTrx.Expiration
buff := []byte{}
nts := &NodeTransactionState{id, trxExpire, trx, buff, 0, 0, 0}
GetConnsMgr().localTxns = append(GetConnsMgr().localTxns, nts)
if !config.LargeMsgNotify || msgsize <= m.justSendItMax {
GetConnsMgr().SendAll(trx, func(c *Connection) bool {
if c == skip || c.Syncing {
return false
}
bs := c.FindTrxStateById(id)
unknown := bs == nil
if unknown {
ts := &TransactionState{id, true, true, 0, trxExpire, TimePoint{}}
c.TrxState = append(c.TrxState, ts)
} else {
ute := &UpdateTxnExpiry{trxExpire}
c.ModifyTrxStateByTxnExpiry(bs, ute)
}
return unknown
})
} else {
pendingNotify := &NoticeMessage{}
pendingNotify.KnownTrx.Mode = Normal
pendingNotify.KnownTrx.IDs = append(pendingNotify.KnownTrx.IDs, id)
pendingNotify.KnownBlocks.Mode = None
GetConnsMgr().SendAll(pendingNotify, func(c *Connection) bool {
if c == skip || c.Syncing {
return false
}
bs := c.FindTrxStateById(id)
unknown := bs == nil
if unknown {
ts := &TransactionState{id, false, true, 0, trxExpire, TimePoint{}}
c.TrxState = append(c.TrxState, ts)
} else {
ute := &UpdateTxnExpiry{trxExpire}
c.ModifyTrxStateByTxnExpiry(bs, ute)
}
return unknown
})
}
}
func (m *DispatchMgr) recvTransaction(c *Connection, id Sha256Type) {
origin := TransactionOrigin{id, c}
m.receivedTransactions = append(m.receivedTransactions, origin)
if c != nil && c.LastReq != nil && c.LastReq.ReqTrx.Mode != None && bytes.Equal(c.LastReq.ReqTrx.IDs[len(c.LastReq.ReqTrx.IDs)-1], id) {
c.LastReq = nil
}
}
func (m *DispatchMgr) rejectedTransaction(id Sha256Type) {
for i := 0; i < len(m.receivedTransactions); i++ {
if bytes.Equal(m.receivedTransactions[i].Id, id) {
m.receivedTransactions = append(m.receivedTransactions[:i], m.receivedTransactions[i+1:]...)
break
}
}
}
func (m *DispatchMgr) RecvNotice(c *Connection, msg *NoticeMessage, generated bool) {
req := &RequestMessage{}
req.ReqTrx.Mode = None
req.ReqBlocks.Mode = None
sendReq := false
if msg.KnownTrx.Mode == Normal {
req.ReqTrx.Mode = Normal
req.ReqTrx.Pending = 0
for i := 0; i < len(msg.KnownTrx.IDs); i++ {
tx := GetConnsMgr().FindLocalTxnsById(msg.KnownTrx.IDs[i])
expires := TimePointSec{time.Now().Add(120 * time.Second)}
ts := &TransactionState{msg.KnownTrx.IDs[i], true, true, 0, expires, TimePoint{}}
if tx == nil {
c.TrxState = append(c.TrxState, ts)
req.ReqTrx.IDs = append(req.ReqTrx.IDs, msg.KnownTrx.IDs[i])
m.reqTrx = append(m.reqTrx, msg.KnownTrx.IDs[i])
}
}
sendReq = len(req.ReqTrx.IDs) > 0
} else if msg.KnownTrx.Mode != None {
return
}
if msg.KnownBlocks.Mode == Normal {
req.ReqBlocks.Mode = Normal
for i := 0; i < len(msg.KnownBlocks.IDs); i++ {
var b *SignedBlock
entry := &PeerBlockState{msg.KnownBlocks.IDs[i], 0, true, true, TimePoint{}}
//b = cc.fetch_block_by_id(blkid);
if b != nil {
entry.BlockNum = b.BlockNumber()
}
if b == nil {
sendReq = true
req.ReqBlocks.IDs = append(req.ReqBlocks.IDs, msg.KnownBlocks.IDs[i])
bq := BlockRequest{msg.KnownBlocks.IDs[i], generated}
m.reqBlks = append(m.reqBlks, bq)
entry.RequestedTime = TimePoint{time.Now()}
}
c.AddPeerBlock(entry)
}
} else if msg.KnownBlocks.Mode != None {
return
}
if sendReq {
c.Enqueue(req)
c.LastReq = req
}
}
func (m *DispatchMgr) retryFetch(c *Connection) {
if c.LastReq == nil {
return
}
var tid Sha256Type
var bid Sha256Type
isTxn := false
if c.LastReq.ReqTrx.Mode == Normal {
isTxn = true
tid = c.LastReq.ReqTrx.IDs[len(c.LastReq.ReqTrx.IDs)-1]
} else if c.LastReq.ReqBlocks.Mode == Normal {
bid = c.LastReq.ReqBlocks.IDs[len(c.LastReq.ReqBlocks.IDs)-1]
} else {
return
}
for _, conn := range GetConnsMgr().Connections {
if conn == c || conn.LastReq != nil {
continue
}
sendit := false
if isTxn {
trx := conn.FindTrxStateById(tid)
sendit = trx != nil && trx.IsKnownByPeer
} else {
blk := conn.FindPeerBlockStateById(bid)
sendit = blk != nil && blk.IsKnown
}
if sendit {
conn.Enqueue(c.LastReq)
//conn->fetch_wait();
conn.LastReq = c.LastReq
return
}
}
if c.Connected() {
c.Enqueue(c.LastReq)
}
}
|
package main
import (
"crypto/tls"
"flag"
"fmt"
"net/http"
"net/url"
"os"
"strconv"
remoteworkflowapi "code.it4i.cz/lexis/wp4/alien4cloud-interface/client"
kclib "code.it4i.cz/lexis/wp4/keycloak-lib"
remoteapprovalapi "github.com/lexis-project/lexis-backend-services-interface-approval-system.git/client"
remotedataapi "github.com/lexis-project/lexis-backend-services-interface-datasets.git/client"
"github.com/lexis-project/lexis-backend-services-api.git/restapi"
"github.com/lexis-project/lexis-backend-services-api.git/server/approvalapi"
"github.com/lexis-project/lexis-backend-services-api.git/server/datasetapi"
"github.com/lexis-project/lexis-backend-services-api.git/server/heappeapi"
"github.com/lexis-project/lexis-backend-services-api.git/server/usageManager"
"github.com/lexis-project/lexis-backend-services-api.git/server/userorgapi"
"github.com/lexis-project/lexis-backend-services-api.git/server/workflowapi"
remoteuserorgapi "github.com/lexis-project/lexis-backend-services-userorg-service.git/client"
"github.com/rs/cors"
"github.com/segmentio/encoding/json"
cdrapi "gitlab.com/cyclops-community/cdr-client-interface/client"
csapi "gitlab.com/cyclops-community/cs-client-interface/client"
udrapi "gitlab.com/cyclops-community/udr-client-interface/client"
l "gitlab.com/cyclops-utilities/logging"
)
var (
cfg configuration
version string
)
// getBasePath function is to get the base URL of the server.
// Returns:
// - String with the value of the base URL of the server.
func getBasePath() string {
type jsonBasePath struct {
BasePath string
}
bp := jsonBasePath{}
e := json.Unmarshal(restapi.SwaggerJSON, &bp)
if e != nil {
l.Warning.Printf("Unmarshalling of the basepath failed: %v\n", e)
}
return bp.BasePath
}
// init function - reads in configuration file and creates logger
func init() {
confFile := flag.String("conf", "./config", "configuration file path (without toml extension)")
flag.Parse()
//placeholder code as the default value will ensure this situation will never arise
if len(*confFile) == 0 {
fmt.Println("Usage: lexis-portal-api --conf=/path/to/configuration/file")
os.Exit(0)
}
readConfigFile(*confFile)
cfg = parseConfig()
cfg.Policies = parsePolicies()
dumpConfig(cfg)
// when communicating with other services, they may not be secured with valid Https
http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{InsecureSkipVerify: cfg.General.InsecureSkipVerify}
l.InitLogger(cfg.General.LogFile, cfg.General.LogLevel, cfg.General.LogToConsole)
l.Info.Printf("Initilizing Keycloak Lib...")
kclib.InitLib("config_keycloak.toml")
l.Info.Printf("LEXIS Portal API Service version %v initialized", version)
}
// main function creates the database connection and launches the endpoint handlers
func main() {
as := remoteapprovalapi.Config{
URL: &url.URL{
Scheme: "http",
Host: cfg.ApprovalService.Host + ":" + strconv.Itoa(cfg.ApprovalService.Port),
Path: cfg.ApprovalService.BaseURL,
},
}
a := approvalapi.New(as)
dc := remotedataapi.Config{
URL: &url.URL{
Scheme: "http",
Host: cfg.DataCatalogService.Host + ":" + strconv.Itoa(cfg.DataCatalogService.Port),
Path: cfg.DataCatalogService.BaseURL,
},
}
d := datasetapi.New(dc, cfg.Keycloak.RedirectURL)
uoc := remoteuserorgapi.Config{
URL: &url.URL{
Scheme: "http",
Host: cfg.UserOrgService.Host + ":" + strconv.Itoa(cfg.UserOrgService.Port),
Path: cfg.UserOrgService.BaseURL,
},
}
uo := userorgapi.New(uoc)
cs := csapi.Config{
URL: &url.URL{
Scheme: "http",
Host: cfg.CSService.Host + ":" + strconv.Itoa(cfg.CSService.Port),
Path: cfg.CSService.BaseURL,
},
}
cc := cdrapi.Config{
URL: &url.URL{
Scheme: "http",
Host: cfg.CDRService.Host + ":" + strconv.Itoa(cfg.CDRService.Port),
Path: cfg.CDRService.BaseURL,
},
}
uc := udrapi.Config{
URL: &url.URL{
Scheme: "http",
Host: cfg.UDRService.Host + ":" + strconv.Itoa(cfg.UDRService.Port),
Path: cfg.UDRService.BaseURL,
},
}
u := usageManager.New(uoc, uc, cc, cs)
wc := remoteworkflowapi.Config{
URL: &url.URL{
Scheme: "http",
Host: cfg.WorkflowService.Host + ":" + strconv.Itoa(cfg.WorkflowService.Port),
Path: cfg.WorkflowService.BaseURL,
},
}
w := workflowapi.New(wc)
hp := heappeapi.New()
// Initiate the http handler, with the objects that are implementing the business logic.
rc := restapi.Config{
ApprovalSystemManagementAPI: a,
DataSetManagementAPI: d,
StagingAPI: d,
UsageManagementAPI: u,
UserOrgManagementAPI: uo,
WorkflowManagementAPI: w,
ClusterInformationAPI: hp,
Logger: l.Info.Printf,
AuthAPIKeyHeader: AuthAPIKey,
AuthAPIKeyParam: AuthAPIKey,
AuthKeycloak: AuthKeycloak,
Authorizer: Authorizer,
}
h, e := restapi.Handler(rc)
if e != nil {
l.Error.Printf("Error creating REST handler: %s ...Exiting\n", e)
os.Exit(2)
}
hcors := cors.New(
cors.Options{
Debug: (cfg.General.LogLevel == "DEBUG"),
AllowedOrigins: cfg.General.CORSOrigins,
AllowedHeaders: cfg.General.CORSHeaders,
AllowedMethods: cfg.General.CORSMethods,
}).Handler(h)
// note that this runs on all interfaces right now
serviceLocation := ":" + strconv.Itoa(cfg.General.ServerPort)
l.Info.Printf("Starting to serve the LEXIS Portal API Service, access server on https://localhost:%v\n", serviceLocation)
// Run the standard http server
if cfg.General.HTTPSEnabled {
l.Error.Printf((http.ListenAndServeTLS(serviceLocation, cfg.General.CertificateFile, cfg.General.CertificateKey, hcors)).Error())
} else {
l.Warning.Printf("Running without TLS security - do not use in production scenario...")
l.Error.Printf((http.ListenAndServe(serviceLocation, hcors)).Error())
}
}
|
package apps
import (
"fmt"
"github.com/fd/forklift/util/syncset"
)
type (
domain_t struct {
Id string `json:"id,omitempty"`
Hostname string `json:"hostname"`
}
domain_set struct {
ctx *App
requested []string
current []string
domains map[string]*domain_t
}
)
func (app *App) sync_domains() error {
set := &domain_set{
ctx: app,
requested: app.Domains,
}
fmt.Printf("Domain:\n")
err := set.LoadCurrentKeys()
if err != nil {
return err
}
syncset.Sync(set)
return nil
}
func (set *domain_set) LoadCurrentKeys() error {
var (
data []*domain_t
mainhost = set.ctx.AppName + ".herokuapp.com"
)
err := set.ctx.HttpV3("GET", nil, &data, "/apps/%s/domains", set.ctx.AppName)
if err != nil {
return err
}
set.domains = make(map[string]*domain_t, len(data))
for _, domain := range data {
host := domain.Hostname
if host == mainhost {
continue
}
set.domains[host] = domain
set.current = append(set.current, host)
}
return nil
}
func (set *domain_set) RequestedKeys() []string {
return set.requested
}
func (set *domain_set) CurrentKeys() []string {
return set.current
}
func (set *domain_set) ShouldChange(key string) bool {
return false
}
func (set *domain_set) Change(key string) (string, string, error) {
return "", "", nil
}
func (set *domain_set) Add(host string) error {
domain := domain_t{
Hostname: host,
}
return set.ctx.HttpV3("POST", &domain, nil, "/apps/%s/domains", set.ctx.AppName)
}
func (set *domain_set) Remove(host string) error {
domain := set.domains[host]
return set.ctx.HttpV3("DELETE", nil, nil, "/apps/%s/domains/%s", set.ctx.AppName, domain.Id)
}
|
package jira
type TestClient struct {
Config
jTasks map[string]*Task
returnError error
}
func (t *TestClient) Connect() error {
return nil
}
func (t *TestClient) GetUserTasks() (map[string]*Task, error) {
return t.jTasks, t.returnError
}
|
package main
import (
"encoding/json"
"flag"
"fmt"
"log"
"net/http"
"strings"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
var (
awairAddress = flag.String("awair-address", "", "Base URL of Awair local API server")
listenAddress = flag.String("listen-address", ":8080", "Address to serve metrics on")
)
var prometheusNamespace = "awairlocal"
// The next few bits are more or less copied from
// https://godoc.org/github.com/prometheus/client_golang/prometheus#example-Collector
type Collector struct {
awairBaseURL string
descs map[string]*prometheus.Desc
}
func NewCollector(awairBaseURL string) *Collector {
c := Collector{
awairBaseURL: awairBaseURL,
descs: make(map[string]*prometheus.Desc),
}
for _, name := range ExpectedMetrics {
c.descs[name] = prometheus.NewDesc(
fmt.Sprintf("%s_%s", prometheusNamespace, name), "", nil, nil)
}
return &c
}
func (c *Collector) Describe(ch chan<- *prometheus.Desc) {
for _, desc := range c.descs {
ch <- desc
}
}
func (c *Collector) Collect(ch chan<- prometheus.Metric) {
data, err := GetAirData(c.awairBaseURL)
if err != nil {
// TODO: Count these errors. We need metrics for our metrics.
log.Printf("Error getting data from Awair: %v", err)
return
}
for name, val := range data.Metrics {
desc, ok := c.descs[name]
if !ok {
log.Printf("Ignoring unknown metric %q", name)
continue
}
m, err := prometheus.NewConstMetric(desc, prometheus.GaugeValue, val)
if err != nil {
// TODO: Count these errors. We need metrics for our metrics.
log.Printf("NewConstMetric: %v", err)
continue
}
ch <- prometheus.NewMetricWithTimestamp(data.Timestamp, m)
}
}
// ExpectedMetrics is the list of fields (excluding "timestamp") I expect the
// Awair Local API to return.
var ExpectedMetrics = []string{
"score", "dew_point", "temp", "humid", "abs_humid", "co2", "co2_est",
"co2_est_baseline", "voc", "voc_baseline", "voc_h2_raw",
"voc_ethanol_raw", "pm25", "pm10_est",
}
// AwairAirDataResponse represents the air data returned by the Local API.
type AirData struct {
// Timestamp reported by the Awair device
Timestamp time.Time
// Metrics reported by the device. See ExpectedMetrics for the fields I
// get from my device.
Metrics map[string]float64
}
// This metric instrements this service itself.
var getAirDataCounter = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: prometheusNamespace,
Subsystem: "ops",
Name: "GetAirData_calls",
Help: "Calls to the Get function, reading from the Awair local API",
}, []string{"result"})
// GetAirData reads data from the AwairLocal API, parses it and returns it.
func GetAirData(baseURL string) (*AirData, error) {
// My Awair device doesn't handle duplicate slashes at the beginning of
// the query path, ensure we don't run into that issue.
baseURL = strings.TrimRight(baseURL, "/")
url := baseURL + "/air-data/latest"
resp, err := http.Get(url)
if err != nil {
getAirDataCounter.With(prometheus.Labels{"result": "failed-get"}).Inc()
return nil, fmt.Errorf("failed to GET from Awair at %q: %v", url, err)
}
defer resp.Body.Close()
if resp.StatusCode > 200 || resp.StatusCode > 299 {
getAirDataCounter.With(prometheus.Labels{"result": "failed-get"}).Inc()
return nil, fmt.Errorf("Awair returned an error: %v", http.StatusText(resp.StatusCode))
}
var fields map[string]interface{}
err = json.NewDecoder(resp.Body).Decode(&fields)
if err != nil {
getAirDataCounter.With(prometheus.Labels{"result": "failed-decode"}).Inc()
return nil, fmt.Errorf("failed to JSON: %v", err)
}
data := AirData{Metrics: make(map[string]float64)}
// Parse the timestamp field from the JSON
ts, ok := fields["timestamp"]
if !ok {
getAirDataCounter.With(prometheus.Labels{"result": "failed-decode"}).Inc()
return nil, fmt.Errorf("no 'timestamp' field")
}
tsString, ok := ts.(string)
if !ok {
return nil, fmt.Errorf("'timestamp' field %v not a string", ts)
}
if data.Timestamp, err = time.Parse(time.RFC3339, tsString); err != nil {
getAirDataCounter.With(prometheus.Labels{"result": "failed-decode"}).Inc()
return nil, fmt.Errorf("failed ot parse timestamp: %v", err)
}
delete(fields, "timestamp")
// Now go through the other fields, they should all be float64s.
for key, val := range fields {
floatVal, ok := val.(float64)
if !ok {
log.Printf("Got non-float64 value %q (%v) in Awair response", key, val)
continue
}
data.Metrics[key] = floatVal
}
getAirDataCounter.With(prometheus.Labels{"result": "success"}).Inc()
return &data, nil
}
func main() {
flag.Parse()
if len(*awairAddress) == 0 {
log.Fatalf("--awair-address must be provided (%q)", *awairAddress)
}
// Add handler for metrics about this service.
http.Handle("/metrics", promhttp.Handler())
// Add handler for the actual air data metrics.
reg := prometheus.NewPedanticRegistry()
err := reg.Register(NewCollector(*awairAddress))
if err != nil {
log.Fatalf("Failed to register Prometheus collector: %v", err)
}
http.Handle("/air-data", promhttp.HandlerFor(reg, promhttp.HandlerOpts{}))
http.ListenAndServe(*listenAddress, nil)
}
|
package main
import (
"fmt"
"math/rand"
"net/http"
"os"
"time"
)
func main() {
fmt.Println("starting the server")
client := &http.Client{}
rand.Seed(time.Now().UTC().UnixNano())
port := os.Getenv("PORT")
if port == "" {
port = "8080"
}
host := os.Getenv("HOST")
if port == "" {
port = "localhost"
}
allRoutes := []string{
"/maybe/error",
"/maybe/fail",
"/random/error",
}
route := allRoutes[0]
for {
randRoute := rand.Intn(len(allRoutes))
route = allRoutes[randRoute]
fmt.Printf(fmt.Sprintf("http://%s:%s%s : ", host, port, route))
resp, _ := client.Get(fmt.Sprintf("http://%s:%s%s", host, port, route))
defer resp.Body.Close()
fmt.Println(resp.StatusCode)
//time.Sleep(time.Millisecond(100000))
time.Sleep(time.Second / 50)
}
}
|
// Copyright 2021 Clivern. All rights reserved.
// Use of this source code is governed by the MIT
// license that can be found in the LICENSE file.
package definition
import (
"fmt"
"strings"
"testing"
"github.com/franela/goblin"
)
// TestUnitVault test cases
func TestUnitVault(t *testing.T) {
g := goblin.Goblin(t)
g.Describe("#TestVault", func() {
g.It("It should satisfy all provided test cases", func() {
vault := GetVaultConfig("vault", "", "token")
result, err := vault.ToString()
g.Assert(strings.Contains(
result,
fmt.Sprintf("image: %s", fmt.Sprintf("%s:%s", VaultDockerImage, VaultDockerImageVersion)),
)).Equal(true)
g.Assert(strings.Contains(result, fmt.Sprintf(`- "%s"`, VaultHTTPPort))).Equal(true)
g.Assert(strings.Contains(result, fmt.Sprintf("restart: %s", VaultRestartPolicy))).Equal(true)
g.Assert(strings.Contains(result, fmt.Sprintf("VAULT_DEV_LISTEN_ADDRESS=0.0.0.0:%s", VaultHTTPPort))).Equal(true)
g.Assert(strings.Contains(result, "vault server -dev -dev-root-token-id=token")).Equal(true)
g.Assert(err).Equal(nil)
})
})
}
|
package tests
import (
"log"
"math/rand"
"testing"
"time"
"github.com/almerlucke/kallos"
"github.com/almerlucke/kallos/generators"
)
func TestRandomWalk(t *testing.T) {
seed := time.Now().UTC().UnixNano()
rand.Seed(seed)
matrix := &generators.RandomWalk2DMatrix{
Values: []kallos.Values{
kallos.ToValues(36, 38, 39, 42, 43, 44, 47),
kallos.ToValues(48, 50, 51, 52, 54, 55, 58),
kallos.ToValues(59, 60, 63, 64, 66, 65, 67),
kallos.ToValues(69, 70, 73, 74, 75, 78, 79),
kallos.ToValues(81, 82, 84, 86, 87, 90, 91),
kallos.ToValues(92, 95, 96, 98, 99, 101, 103),
kallos.ToValues(104, 107, 108, 109, 112, 113, 115),
},
}
walker := generators.NewRandomWalk([]int{7, 7}, matrix)
cnt := 0
for cnt < 20 {
v := walker.GenerateValue()
log.Printf("walker: %v\n", v)
cnt++
}
}
|
package venti
import "testing"
func TestPackRoot(t *testing.T) {
r := Root{
Name: "foo",
Type: "bar",
Score: ZeroScore(),
BlockSize: 256,
Prev: ZeroScore(),
}
buf := make([]byte, RootSize)
if err := r.Pack(buf); err != nil {
t.Fatal(err)
}
rr, err := UnpackRoot(buf)
if err != nil {
t.Fatal(err)
}
if *rr != r {
t.Fatalf("results differ: \n%v\n\tvs\n%v", r, *rr)
}
}
|
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
package common
import (
"fmt"
"sort"
"strings"
"github.com/blang/semver"
"github.com/pkg/errors"
)
// AllKubernetesSupportedVersions is a hash table of all supported Kubernetes version strings
// The bool value indicates if creating new clusters with this version is allowed
var AllKubernetesSupportedVersions = map[string]bool{
"1.6.6": false,
"1.6.9": false,
"1.6.11": false,
"1.6.12": false,
"1.6.13": false,
"1.7.0": false,
"1.7.1": false,
"1.7.2": false,
"1.7.4": false,
"1.7.5": false,
"1.7.7": false,
"1.7.9": false,
"1.7.10": false,
"1.7.12": false,
"1.7.13": false,
"1.7.14": false,
"1.7.15": false,
"1.7.16": false,
"1.8.0": false,
"1.8.1": false,
"1.8.2": false,
"1.8.4": false,
"1.8.6": false,
"1.8.7": false,
"1.8.8": false,
"1.8.9": false,
"1.8.10": false,
"1.8.11": false,
"1.8.12": false,
"1.8.13": false,
"1.8.14": false,
"1.8.15": false,
"1.9.0": false,
"1.9.1": false,
"1.9.2": false,
"1.9.3": false,
"1.9.4": false,
"1.9.5": false,
"1.9.6": false,
"1.9.7": false,
"1.9.8": false,
"1.9.9": false,
"1.9.10": false,
"1.9.11": false,
"1.10.0-beta.2": false,
"1.10.0-beta.4": false,
"1.10.0-rc.1": false,
"1.10.0": false,
"1.10.1": false,
"1.10.2": false,
"1.10.3": false,
"1.10.4": false,
"1.10.5": false,
"1.10.6": false,
"1.10.7": false,
"1.10.8": false,
"1.10.9": false,
"1.10.12": false,
"1.10.13": false,
"1.11.0-alpha.1": false,
"1.11.0-alpha.2": false,
"1.11.0-beta.1": false,
"1.11.0-beta.2": false,
"1.11.0-rc.1": false,
"1.11.0-rc.2": false,
"1.11.0-rc.3": false,
"1.11.0": false,
"1.11.1": false,
"1.11.2": false,
"1.11.3": false,
"1.11.4": false,
"1.11.5": false,
"1.11.6": false,
"1.11.7": false,
"1.11.8": false,
"1.11.9": false,
"1.11.10": false,
"1.12.0-alpha.1": false,
"1.12.0-beta.0": false,
"1.12.0-beta.1": false,
"1.12.0-rc.1": false,
"1.12.0-rc.2": false,
"1.12.0": false,
"1.12.1": false,
"1.12.2": false,
"1.12.4": false,
"1.12.5": false,
"1.12.6": false,
"1.12.7": false,
"1.12.8": false,
"1.12.9": false, // disabled because of https://github.com/Azure/aks-engine/issues/1421
"1.13.0-alpha.1": false,
"1.13.0-alpha.2": false,
"1.13.1": false,
"1.13.2": false,
"1.13.3": false,
"1.13.4": false,
"1.13.5": false,
"1.13.6": false, // disabled because of https://github.com/kubernetes/kubernetes/issues/78308
"1.13.7": false,
"1.13.8": false,
"1.13.9": false,
"1.13.10": false,
"1.13.11": false,
"1.13.12": false, // disabled because of https://github.com/Azure/aks-engine/issues/2312
"1.14.0-alpha.1": false,
"1.14.0-alpha.2": false,
"1.14.0-beta.1": false,
"1.14.0-beta.2": false,
"1.14.0-rc.1": false,
"1.14.0": false,
"1.14.1": false,
"1.14.2": false, // disabled because of https://github.com/kubernetes/kubernetes/issues/78308
"1.14.3": false,
"1.14.4": false,
"1.14.5": false,
"1.14.6": false,
"1.14.7": false,
"1.14.8": false, // disabled because of https://github.com/Azure/aks-engine/issues/2312
"1.14.10": false, // disabled because of https://github.com/Azure/aks-engine/issues/2312
"1.15.0-alpha.1": false,
"1.15.0-alpha.2": false,
"1.15.0-alpha.3": false,
"1.15.0-beta.1": false,
"1.15.0-beta.2": false,
"1.15.0-rc.1": false,
"1.15.0": false,
"1.15.1": false,
"1.15.2": false,
"1.15.3": false,
"1.15.4": false,
"1.15.5": false, // disabled because of https://github.com/Azure/aks-engine/issues/2312
"1.15.7": false,
"1.15.8": false, // disabled because of https://github.com/kubernetes/release/issues/1020
"1.15.9": false,
"1.15.10": false,
"1.15.11": false,
"1.15.12": false,
"1.16.0-alpha.1": false,
"1.16.0-alpha.2": false,
"1.16.0-alpha.3": false,
"1.16.0-beta.1": false,
"1.16.0-beta.2": false,
"1.16.0-rc.1": false,
"1.16.0": false,
"1.16.1": false,
"1.16.2": false, // disabled because of https://github.com/Azure/aks-engine/issues/2312
"1.16.4": false,
"1.16.5": false, // disabled because of https://github.com/kubernetes/release/issues/1020
"1.16.6": false,
"1.16.7": false,
"1.16.8": false,
"1.16.9": false,
"1.16.10": false,
"1.16.11": false,
"1.16.12": false,
"1.16.13": false,
"1.16.14": false,
"1.16.15": false,
"1.17.0-alpha.1": false,
"1.17.0-alpha.2": false,
"1.17.0-alpha.3": false,
"1.17.0-beta.1": false,
"1.17.0-beta.2": false,
"1.17.0-rc.1": false,
"1.17.0-rc.2": false,
"1.17.0": false,
"1.17.1": false,
"1.17.2": false,
"1.17.3": false,
"1.17.4": false,
"1.17.5": false,
"1.17.6": false,
"1.17.7": false,
"1.17.8": false,
"1.17.9": false,
"1.17.10": false, // replaced by 1.17.11 due to k8s release engineering issues
"1.17.11": false,
"1.17.12": false,
"1.17.13": false,
"1.17.14": false, // disabled, see https://github.com/kubernetes/kubernetes/pull/96623
"1.17.15": false, // replaced by 1.17.16 due to k8s release engineering issues
"1.17.16": false,
"1.17.17": false,
"1.18.0-alpha.1": false,
"1.18.0-alpha.2": false,
"1.18.0-alpha.3": false,
"1.18.0-alpha.5": false,
"1.18.0-beta.1": false,
"1.18.0": false,
"1.18.1": false,
"1.18.2": false,
"1.18.3": false,
"1.18.4": false,
"1.18.5": false,
"1.18.6": false,
"1.18.7": false, // replaced by 1.18.8 due to k8s release engineering issues
"1.18.8": false,
"1.18.9": false,
"1.18.10": false,
"1.18.11": false, // replaced by 1.18.12 due to k8s release engineering issues
"1.18.12": false,
"1.18.13": false,
"1.18.14": false,
"1.18.15": false,
"1.18.16": false,
"1.18.17": false,
"1.18.18": false,
"1.18.19": false,
"1.18.20": false,
"1.19.0-alpha.1": false,
"1.19.0-alpha.2": false,
"1.19.0-alpha.3": false,
"1.19.0-beta.0": false,
"1.19.0-beta.1": false,
"1.19.0-beta.2": false,
"1.19.0-rc.3": false,
"1.19.0-rc.4": false,
"1.19.0": false,
"1.19.1": false,
"1.19.2": false,
"1.19.3": false,
"1.19.4": false,
"1.19.5": false,
"1.19.6": false,
"1.19.7": false,
"1.19.8": false,
"1.19.9": false,
"1.19.10": false,
"1.19.11": false,
"1.19.12": false,
"1.19.13": false,
"1.19.14": false,
"1.19.15": false,
"1.19.16": false,
"1.20.0-alpha.1": false,
"1.20.0-alpha.2": false,
"1.20.0-alpha.3": false,
"1.20.0-beta.0": false,
"1.20.0-beta.1": false,
"1.20.0-beta.2": false,
"1.20.0-rc.0": false,
"1.20.0": false,
"1.20.1": false,
"1.20.2": false,
"1.20.3": false,
"1.20.4": false,
"1.20.5": false,
"1.20.6": false,
"1.20.7": false,
"1.20.8": false,
"1.20.9": false,
"1.20.10": false,
"1.20.11": false,
"1.20.12": false,
"1.20.13": false,
"1.20.14": false,
"1.20.15": true,
"1.21.0-alpha.1": false,
"1.21.0-alpha.2": false, // disabled, see https://github.com/kubernetes/kubernetes/issues/98419
"1.21.0-alpha.3": false,
"1.21.0-beta.0": false,
"1.21.0-beta.1": false,
"1.21.0-rc.0": false,
"1.21.0": false,
"1.21.1": false,
"1.21.2": false,
"1.21.3": false,
"1.21.4": false,
"1.21.5": false,
"1.21.6": false,
"1.21.7": false,
"1.21.8": false,
"1.21.9": false,
"1.21.10": false,
"1.21.11": false,
"1.21.12": false,
"1.21.13": false,
"1.21.14": true,
"1.22.0-alpha.1": false,
"1.22.0-alpha.2": false,
"1.22.0-alpha.3": false,
"1.22.0-beta.0": false,
"1.22.0-beta.1": false,
"1.22.0-beta.2": false,
"1.22.0": false,
"1.22.1": false,
"1.22.2": false,
"1.22.3": false,
"1.22.4": false,
"1.22.5": false,
"1.22.6": false,
"1.22.7": false,
"1.22.8": false,
"1.22.9": false,
"1.22.10": false,
"1.22.11": false,
"1.22.12": false,
"1.22.13": false,
"1.22.14": false,
"1.22.15": false,
"1.22.16": false,
"1.22.17": true,
"1.23.0-alpha.1": false,
"1.23.0-alpha.2": false,
"1.23.0-alpha.3": false,
"1.23.0-alpha.4": false,
"1.23.0-beta.0": false,
"1.23.0-rc.0": false,
"1.23.0-rc.1": false,
"1.23.0": false,
"1.23.1": false,
"1.23.2": false,
"1.23.3": false,
"1.23.4": false,
"1.23.5": false,
"1.23.6": false,
"1.23.7": false,
"1.23.8": false,
"1.23.9": false,
"1.23.10": false,
"1.23.11": false,
"1.23.12": false,
"1.23.13": false,
"1.23.14": false,
"1.23.15": false,
"1.23.16": false,
"1.23.17": true,
"1.24.0-alpha.2": false,
"1.24.0-alpha.3": false,
"1.24.0": false,
"1.24.1": false,
"1.24.2": false,
"1.24.3": false,
"1.24.4": false,
"1.24.5": false,
"1.24.6": false,
"1.24.7": false,
"1.24.8": false,
"1.24.9": false,
"1.24.10": false,
"1.24.11": false,
"1.24.13": false,
"1.24.16": false,
"1.24.17": true,
}
// AllKubernetesSupportedVersionsAzureStack is a hash table of all supported Kubernetes version strings on Azure Stack
// The bool value indicates if creating new clusters with this version is allowed
var AllKubernetesSupportedVersionsAzureStack = map[string]bool{
"1.14.7": false,
"1.14.8": false, // disabled because of https://github.com/Azure/aks-engine/issues/2312
"1.15.4": false,
"1.15.5": false, // disabled because of https://github.com/Azure/aks-engine/issues/2312
"1.15.10": false,
"1.15.11": false,
"1.15.12": false,
"1.16.9": false,
"1.16.10": false,
"1.16.11": false,
"1.16.13": false,
"1.16.14": false,
"1.16.15": false,
"1.17.4": false,
"1.17.5": false,
"1.17.6": false,
"1.17.7": false,
"1.17.9": false,
"1.17.11": false,
"1.17.17": false,
"1.18.10": false,
"1.18.15": false,
"1.18.18": false,
"1.19.10": false,
"1.19.15": false,
"1.20.6": false,
"1.20.11": false,
"1.21.10": false,
"1.22.7": false,
"1.22.15": false,
"1.22.16": false,
"1.23.6": false,
"1.23.12": false,
"1.23.13": false,
"1.23.14": false,
"1.23.15": true,
"1.24.7": false,
"1.24.9": true,
}
// AllKubernetesWindowsSupportedVersionsAzureStack maintain a set of available k8s Windows versions in aks-engine on Azure Stack
var AllKubernetesWindowsSupportedVersionsAzureStack = map[string]bool{
"1.15.10": false,
"1.15.11": false,
"1.15.12": false,
"1.16.9": false,
"1.16.10": false,
"1.16.11": false,
"1.16.13": false,
"1.16.14": false,
"1.16.15": false,
"1.17.4": false,
"1.17.5": false,
"1.17.6": false,
"1.17.7": false,
"1.17.9": false,
"1.17.11": false,
"1.17.17": false,
"1.18.10": false,
"1.18.15": false,
"1.18.18": false,
"1.19.10": false,
"1.19.15": false,
"1.20.6": false,
"1.20.11": false,
"1.21.10": false,
"1.22.7": false,
"1.22.15": false,
"1.22.16": false,
"1.23.6": false,
"1.23.12": false,
"1.23.13": false,
"1.23.14": false,
"1.23.15": true,
"1.24.7": false,
"1.24.9": true,
}
// GetDefaultKubernetesVersion returns the default Kubernetes version, that is the latest patch of the default release
func GetDefaultKubernetesVersion(hasWindows bool, isAzureStackCloud bool) string {
defaultRelease := KubernetesDefaultRelease
if hasWindows {
defaultRelease = KubernetesDefaultReleaseWindows
}
if isAzureStackCloud && hasWindows {
defaultRelease = KubernetesDefaultReleaseWindowsAzureStack
}
if isAzureStackCloud && !hasWindows {
defaultRelease = KubernetesDefaultReleaseAzureStack
}
return GetLatestPatchVersion(defaultRelease, GetAllSupportedKubernetesVersions(false, hasWindows, isAzureStackCloud))
}
// GetSupportedKubernetesVersion verifies that a passed-in version string is supported, or returns a default version string if not
func GetSupportedKubernetesVersion(version string, hasWindows bool, isAzureStackCloud bool) string {
k8sVersion := GetDefaultKubernetesVersion(hasWindows, isAzureStackCloud)
if hasWindows {
if AllKubernetesWindowsSupportedVersions[version] {
k8sVersion = version
}
} else {
if AllKubernetesSupportedVersions[version] {
k8sVersion = version
}
}
if isAzureStackCloud {
if hasWindows {
if AllKubernetesWindowsSupportedVersionsAzureStack[version] {
k8sVersion = version
}
} else {
if AllKubernetesSupportedVersionsAzureStack[version] {
k8sVersion = version
}
}
}
return k8sVersion
}
// GetAllSupportedKubernetesVersions returns a slice of all supported Kubernetes versions
func GetAllSupportedKubernetesVersions(isUpdate, hasWindows bool, isAzureStackCloud bool) []string {
var versions []string
allSupportedVersions := AllKubernetesSupportedVersions
if hasWindows {
allSupportedVersions = AllKubernetesWindowsSupportedVersions
}
if isAzureStackCloud && hasWindows {
allSupportedVersions = AllKubernetesWindowsSupportedVersionsAzureStack
}
if isAzureStackCloud && !hasWindows {
allSupportedVersions = AllKubernetesSupportedVersionsAzureStack
}
for ver, supported := range allSupportedVersions {
if isUpdate || supported {
versions = append(versions, ver)
}
}
sort.Slice(versions, func(i, j int) bool {
return IsKubernetesVersionGe(versions[j], versions[i])
})
return versions
}
// GetVersionsGt returns a list of versions greater than a semver string given a list of versions
// inclusive=true means that we test for equality as well
// preReleases=true means that we include pre-release versions in the list
func GetVersionsGt(versions []string, version string, inclusive, preReleases bool) []string {
// Try to get latest version matching the release
var ret []string
minVersion, _ := semver.Make(version)
for _, v := range versions {
sv, _ := semver.Make(v)
if !preReleases && len(sv.Pre) != 0 {
continue
}
if (inclusive && sv.GTE(minVersion)) || (!inclusive && sv.GT(minVersion)) {
ret = append(ret, v)
}
}
return ret
}
// GetVersionsLt returns a list of versions less than than a semver string given a list of versions
// inclusive=true means that we test for equality as well
// preReleases=true means that we include pre-release versions in the list
func GetVersionsLt(versions []string, version string, inclusive, preReleases bool) []string {
// Try to get latest version matching the release
var ret []string
minVersion, _ := semver.Make(version)
for _, v := range versions {
sv, _ := semver.Make(v)
if !preReleases && len(sv.Pre) != 0 {
continue
}
if (inclusive && sv.LTE(minVersion)) || (!inclusive && sv.LT(minVersion)) {
ret = append(ret, v)
}
}
return ret
}
// GetVersionsBetween returns a list of versions between a min and max
// inclusive=true means that we test for equality on both bounds
// preReleases=true means that we include pre-release versions in the list
func GetVersionsBetween(versions []string, versionMin, versionMax string, inclusive, preReleases bool) []string {
var ret []string
if minV, _ := semver.Make(versionMin); len(minV.Pre) != 0 {
preReleases = true
}
greaterThan := GetVersionsGt(versions, versionMin, inclusive, preReleases)
lessThan := GetVersionsLt(versions, versionMax, inclusive, preReleases)
for _, lv := range lessThan {
for _, gv := range greaterThan {
if lv == gv {
ret = append(ret, lv)
}
}
}
return ret
}
// GetMinVersion gets the lowest semver version
// preRelease=true means accept a pre-release version as a min value
func GetMinVersion(versions []string, preRelease bool) string {
if len(versions) < 1 {
return ""
}
semverVersions := getSortedSemverVersions(versions, preRelease)
return semverVersions[0].String()
}
// GetMaxVersion gets the highest semver version
// preRelease=true means accept a pre-release version as a max value
func GetMaxVersion(versions []string, preRelease bool) string {
if len(versions) < 1 {
return ""
}
semverVersions := getSortedSemverVersions(versions, preRelease)
return semverVersions[len(semverVersions)-1].String()
}
func getSortedSemverVersions(versions []string, preRelease bool) []semver.Version {
var semverVersions []semver.Version
for _, v := range versions {
sv, _ := semver.Make(v)
if len(sv.Pre) == 0 || preRelease {
semverVersions = append(semverVersions, sv)
}
}
semver.Sort(semverVersions)
return semverVersions
}
// AllKubernetesWindowsSupportedVersions maintain a set of available k8s Windows versions in aks-engine
var AllKubernetesWindowsSupportedVersions = getAllKubernetesWindowsSupportedVersionsMap()
func getAllKubernetesWindowsSupportedVersionsMap() map[string]bool {
ret := make(map[string]bool)
for k, v := range AllKubernetesSupportedVersions {
ret[k] = v
}
for _, version := range []string{
"1.6.6",
"1.6.9",
"1.6.11",
"1.6.12",
"1.6.13",
"1.7.0",
"1.7.1",
"1.8.13",
"1.8.14",
"1.8.15",
"1.10.0-beta.2",
"1.10.0-beta.4",
"1.10.0-rc.1",
"1.11.0-alpha.1",
"1.11.0-alpha.2"} {
delete(ret, version)
}
// 1.8.12 is the latest supported patch for Windows
ret["1.8.12"] = true
return ret
}
// GetSupportedVersions get supported version list for a certain orchestrator
func GetSupportedVersions(orchType string, isUpdate, hasWindows bool, isAzureStackCloud bool) (versions []string, defaultVersion string) {
switch orchType {
case Kubernetes:
return GetAllSupportedKubernetesVersions(isUpdate, hasWindows, isAzureStackCloud), GetDefaultKubernetesVersion(hasWindows, isAzureStackCloud)
default:
return nil, ""
}
}
// GetValidPatchVersion gets the current valid patch version for the minor version of the passed in version
func GetValidPatchVersion(orchType, orchVer string, isUpdate, hasWindows bool, isAzureStackCloud bool) string {
if orchVer == "" {
return RationalizeReleaseAndVersion(
orchType,
"",
"",
isUpdate,
hasWindows,
isAzureStackCloud)
}
// check if the current version is valid, this allows us to have multiple supported patch versions in the future if we need it
version := RationalizeReleaseAndVersion(
orchType,
"",
orchVer,
isUpdate,
hasWindows,
isAzureStackCloud)
if version == "" {
sv, err := semver.Make(orchVer)
if err != nil {
return ""
}
sr := fmt.Sprintf("%d.%d", sv.Major, sv.Minor)
version = RationalizeReleaseAndVersion(
orchType,
sr,
"",
isUpdate,
hasWindows,
isAzureStackCloud)
}
return version
}
// RationalizeReleaseAndVersion return a version when it can be rationalized from the input, otherwise ""
func RationalizeReleaseAndVersion(orchType, orchRel, orchVer string, isUpdate, hasWindows bool, isAzureStackCloud bool) (version string) {
if orchType == "" {
orchType = Kubernetes
}
// ignore "v" prefix in orchestrator version and release: "v1.8.0" is equivalent to "1.8.0", "v1.9" is equivalent to "1.9"
orchVer = strings.TrimPrefix(orchVer, "v")
orchRel = strings.TrimPrefix(orchRel, "v")
supportedVersions, defaultVersion := GetSupportedVersions(orchType, isUpdate, hasWindows, isAzureStackCloud)
if supportedVersions == nil {
return ""
}
if orchRel == "" && orchVer == "" {
return defaultVersion
}
if orchVer == "" {
// Try to get latest version matching the release
version = GetLatestPatchVersion(orchRel, supportedVersions)
return version
} else if orchRel == "" {
// Try to get version the same with orchVer
version = ""
for _, ver := range supportedVersions {
if ver == orchVer {
version = ver
break
}
}
return version
}
// Try to get latest version matching the release
version = ""
for _, ver := range supportedVersions {
sv, _ := semver.Make(ver)
sr := fmt.Sprintf("%d.%d", sv.Major, sv.Minor)
if sr == orchRel && ver == orchVer {
version = ver
break
}
}
return version
}
func IsValidMinVersion(orchType, orchRelease, orchVersion, minVersion string) (bool, error) {
version := RationalizeReleaseAndVersion(
orchType,
orchRelease,
orchVersion,
false,
false,
false)
if version == "" {
return false, errors.Errorf("the following user supplied OrchestratorProfile configuration is not supported: OrchestratorType: %s, OrchestratorRelease: %s, OrchestratorVersion: %s. Please check supported Release or Version for this build of aks-engine",
orchType,
orchRelease,
orchVersion)
}
sv, err := semver.Make(version)
if err != nil {
return false, errors.Errorf("could not validate version %s", version)
}
m, err := semver.Make(minVersion)
if err != nil {
return false, errors.New("could not validate version")
}
if sv.LT(m) {
return false, nil
}
return true, nil
}
// IsKubernetesVersionGe returns true if actualVersion is greater than or equal to version
func IsKubernetesVersionGe(actualVersion, version string) bool {
v1, _ := semver.Make(actualVersion)
v2, _ := semver.Make(version)
return v1.GE(v2)
}
// GetLatestPatchVersion gets the most recent patch version from a list of semver versions given a major.minor string
func GetLatestPatchVersion(majorMinor string, versionsList []string) (version string) {
// Try to get latest version matching the release
version = ""
for _, ver := range versionsList {
sv, err := semver.Make(ver)
if err != nil {
return
}
sr := fmt.Sprintf("%d.%d", sv.Major, sv.Minor)
if sr == majorMinor {
if version == "" {
version = ver
} else {
current, _ := semver.Make(version)
if sv.GT(current) {
version = ver
}
}
}
}
return version
}
// IsSupportedKubernetesVersion return true if the provided Kubernetes version is supported
func IsSupportedKubernetesVersion(version string, isUpdate, hasWindows bool, isAzureStackCloud bool) bool {
for _, ver := range GetAllSupportedKubernetesVersions(isUpdate, hasWindows, isAzureStackCloud) {
if ver == version {
return true
}
}
return false
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.