text stringlengths 11 4.05M |
|---|
package dogachess
type Position struct {
board Bitboard
subboard [14]Bitboard
color bool
}
var position Position
func FreshBoard() Position {
position.subboard[0] = 0 // White occupied
position.subboard[1] = 0 // Black occupied
position.subboard[2] = 0x00FF00000000 // WHite occupied
position.subboard[3] = 0x00000000FF00 // Black occupied
return position
}
|
package main
import "fmt"
func main() {
name := "Wilbrone Okoth"
name := os.Args[1]
fmt.Println(os.Args[0])
fmt.Println(os.Args[1])
str := fmt.Sprint(
`
<DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>
Hello World!
</title>
</head>
<body>
<h1>
` + name + `
</h1>
</body>
</html>`,
)
nf, err := os.Create("index.html")
if err != nil {
log.Fatal("Erro creating file", err)
}
defer nf.Close()
io.Copy(nf, strings.NewReader(str))
// fmt.Println(tpl)
}
|
package Jwt
type IdentityClaims struct {
Audience string `json:"aud,omitempty" structs:"aud"`
ExpiresAt int64 `json:"exp,omitempty" structs:"exp"`
Id string `json:"jti,omitempty" structs:"jti"`
IssuedAt int64 `json:"iat,omitempty" structs:"iat"`
Issuer string `json:"iss,omitempty" structs:"iss"`
NotBefore int64 `json:"nbf,omitempty" structs:"nbf"`
Subject string `json:"sub,omitempty" structs:"sub"`
Email string `json:"email,omitempty" structs:"email"`
UserId string `json:"user_id,omitempty" structs:"user_id"`
}
func (i IdentityClaims) Valid() error {
return nil
}
|
// Copyright 2014 The Sporting Exchange Limited. All rights reserved.
// Use of this source code is governed by a free license that can be
// found in the LICENSE file.
// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris
package collect
import (
"os/exec"
"syscall"
)
// statusReschedule is the exit code used to request reschedule.
const statusReschedule = 13
func reschedule(err error) bool {
if err, ok := err.(*exec.ExitError); ok {
status := err.Sys().(syscall.WaitStatus)
return status.Exited() && status.ExitStatus() == statusReschedule
}
return false
}
|
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// Code generated from the elasticsearch-specification DO NOT EDIT.
// https://github.com/elastic/elasticsearch-specification/tree/33e8a1c9cad22a5946ac735c4fba31af2da2cec2
package types
import (
"bytes"
"encoding/json"
"errors"
"io"
"strconv"
)
// MappingLimitSettings type.
//
// https://github.com/elastic/elasticsearch-specification/blob/33e8a1c9cad22a5946ac735c4fba31af2da2cec2/specification/indices/_types/IndexSettings.ts#L402-L415
type MappingLimitSettings struct {
Coerce *bool `json:"coerce,omitempty"`
Depth *MappingLimitSettingsDepth `json:"depth,omitempty"`
DimensionFields *MappingLimitSettingsDimensionFields `json:"dimension_fields,omitempty"`
FieldNameLength *MappingLimitSettingsFieldNameLength `json:"field_name_length,omitempty"`
IgnoreMalformed *bool `json:"ignore_malformed,omitempty"`
NestedFields *MappingLimitSettingsNestedFields `json:"nested_fields,omitempty"`
NestedObjects *MappingLimitSettingsNestedObjects `json:"nested_objects,omitempty"`
TotalFields *MappingLimitSettingsTotalFields `json:"total_fields,omitempty"`
}
func (s *MappingLimitSettings) UnmarshalJSON(data []byte) error {
dec := json.NewDecoder(bytes.NewReader(data))
for {
t, err := dec.Token()
if err != nil {
if errors.Is(err, io.EOF) {
break
}
return err
}
switch t {
case "coerce":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.ParseBool(v)
if err != nil {
return err
}
s.Coerce = &value
case bool:
s.Coerce = &v
}
case "depth":
if err := dec.Decode(&s.Depth); err != nil {
return err
}
case "dimension_fields":
if err := dec.Decode(&s.DimensionFields); err != nil {
return err
}
case "field_name_length":
if err := dec.Decode(&s.FieldNameLength); err != nil {
return err
}
case "ignore_malformed":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.ParseBool(v)
if err != nil {
return err
}
s.IgnoreMalformed = &value
case bool:
s.IgnoreMalformed = &v
}
case "nested_fields":
if err := dec.Decode(&s.NestedFields); err != nil {
return err
}
case "nested_objects":
if err := dec.Decode(&s.NestedObjects); err != nil {
return err
}
case "total_fields":
if err := dec.Decode(&s.TotalFields); err != nil {
return err
}
}
}
return nil
}
// NewMappingLimitSettings returns a MappingLimitSettings.
func NewMappingLimitSettings() *MappingLimitSettings {
r := &MappingLimitSettings{}
return r
}
|
package main
import (
"github.com/miguelmota/cointop/cointop"
)
func main() {
cointop.Run()
}
|
package main
import "sort"
func threeSumClosest(nums []int, target int) int {
// 题目限制了只有唯一答案,所以也说明了len(nums)>=3
if len(nums) < 3 {
return 0
}
sort.Ints(nums)
minDistance := 100000000000
ans := -1
for i := 0; i < len(nums); i++ {
l, r := i+1, len(nums)-1
for l < r {
sum := nums[l] + nums[r] + nums[i]
if abs(sum-target) < minDistance {
minDistance = abs(sum - target)
ans = sum
}
if sum > target {
r--
} else {
l++
}
}
}
return ans
}
func abs(a int) int {
if a > 0 {
return a
}
return -a
}
/*
总结
1. 这题之前我是用暴力+二分搜索AC的,代码很长也有些乱。
2. 采用双指针做的时候代码更简洁了,时间复杂度也更低了
*/
|
/**
* @file
* @copyright defined in aergo/LICENSE.txt
*/
package cmd
import (
"context"
"encoding/json"
"io/ioutil"
"math/big"
"os"
"strings"
"github.com/aergoio/aergo/cmd/aergocli/util"
"github.com/aergoio/aergo/types"
"github.com/mr-tron/base58/base58"
"github.com/spf13/cobra"
)
var revert bool
var election string
func init() {
rootCmd.AddCommand(voteStatCmd)
voteStatCmd.Flags().StringVar(&address, "address", "", "address of account")
voteStatCmd.MarkFlagRequired("address")
rootCmd.AddCommand(bpCmd)
bpCmd.Flags().Uint64Var(&number, "count", 23, "the number of elected")
rootCmd.AddCommand(paramCmd)
paramCmd.Flags().StringVar(&election, "election", "bp", "block chain parameter")
}
var voteStatCmd = &cobra.Command{
Use: "votestat",
Short: "show voting stat",
Run: execVoteStat,
}
var voteCmd = &cobra.Command{
Use: "vote",
Short: "vote to BPs",
Run: execVote,
}
var bpCmd = &cobra.Command{
Use: "bp",
Short: "show BP list",
Run: execBP,
}
var paramCmd = &cobra.Command{
Use: "param",
Short: "show given parameter status",
Run: execParam,
}
const PeerIDLength = 39
func execVote(cmd *cobra.Command, args []string) {
account, err := types.DecodeAddress(address)
if err != nil {
cmd.Printf("Failed: %s\n", err.Error())
return
}
_, err = os.Stat(to)
if err == nil {
b, readerr := ioutil.ReadFile(to)
if readerr != nil {
cmd.Printf("Failed: %s\n", readerr.Error())
return
}
to = string(b)
}
var ci types.CallInfo
switch strings.ToLower(election) {
case "bp":
ci.Name = types.VoteBP
err = json.Unmarshal([]byte(to), &ci.Args)
if err != nil {
cmd.Printf("Failed: %s\n", err.Error())
return
}
for i, v := range ci.Args {
if i >= types.MaxCandidates {
cmd.Println("too many candidates")
return
}
candidate, err := base58.Decode(v.(string))
if err != nil {
cmd.Printf("Failed: %s (%s)\n", err.Error(), v)
return
}
_, err = types.IDFromBytes(candidate)
if err != nil {
cmd.Printf("Failed: %s (%s)\n", err.Error(), v)
return
}
}
case "numofbp",
"gasprice",
"nameprice",
"minimumstaking":
ci.Name = getVoteCmd(election)
numberArg, ok := new(big.Int).SetString(to, 10)
if !ok {
cmd.Printf("Failed: %s\n", err.Error())
return
}
ci.Args = append(ci.Args, numberArg.String())
default:
cmd.Printf("Failed: Wrong election\n")
return
}
state, err := client.GetState(context.Background(),
&types.SingleBytes{Value: account})
if err != nil {
cmd.Printf("Failed: %s\n", err.Error())
return
}
payload, err := json.Marshal(ci)
if err != nil {
cmd.Printf("Failed: %s\n", err.Error())
return
}
tx := &types.Tx{
Body: &types.TxBody{
Account: account,
Recipient: []byte(aergosystem),
Payload: payload,
GasLimit: 0,
Type: types.TxType_GOVERNANCE,
Nonce: state.GetNonce() + 1,
},
}
//cmd.Println(string(payload))
//TODO : support local
msg, err := client.SendTX(context.Background(), tx)
if err != nil {
cmd.Printf("Failed: %s\n", err.Error())
return
}
cmd.Println(util.JSON(msg))
}
func execVoteStat(cmd *cobra.Command, args []string) {
rawAddr, err := types.DecodeAddress(address)
if err != nil {
cmd.Printf("Failed: %s\n", err.Error())
return
}
msg, err := client.GetAccountVotes(context.Background(), &types.AccountAddress{Value: rawAddr})
if err != nil {
cmd.Printf("Failed: %s\n", err.Error())
return
}
cmd.Println(util.JSON(msg))
}
func execBP(cmd *cobra.Command, args []string) {
msg, err := client.GetVotes(context.Background(), &types.VoteParams{
Count: uint32(number),
Id: types.VoteBP[2:],
})
if err != nil {
cmd.Printf("Failed: %s\n", err.Error())
return
}
cmd.Println("[")
comma := ","
for i, r := range msg.GetVotes() {
cmd.Printf("{\"" + base58.Encode(r.Candidate) + "\":" + r.GetAmountBigInt().String() + "}")
if i+1 == len(msg.GetVotes()) {
comma = ""
}
cmd.Println(comma)
}
cmd.Println("]")
}
func getVoteCmd(param string) string {
numberVote := map[string]string{
"numofbp": types.VoteNumBP,
"gasprice": types.VoteGasPrice,
"nameprice": types.VoteNamePrice,
"minimumstaking": types.VoteMinStaking,
}
return numberVote[election]
}
func execParam(cmd *cobra.Command, args []string) {
id := getVoteCmd(election)
if len(id) == 0 {
cmd.Printf("Failed: unsupported parameter : %s\n", election)
return
}
msg, err := client.GetVotes(context.Background(), &types.VoteParams{
Count: uint32(number),
Id: id[2:],
})
if err != nil {
cmd.Printf("Failed: %s\n", err.Error())
return
}
cmd.Println("[")
comma := ","
for i, r := range msg.GetVotes() {
value, _ := new(big.Int).SetString(string(r.Candidate), 10)
cmd.Printf("{\"" + value.String() + "\":" + r.GetAmountBigInt().String() + "}")
if i+1 == len(msg.GetVotes()) {
comma = ""
}
cmd.Println(comma)
}
cmd.Println("]")
}
|
package main
import (
"encoding/json"
"fmt"
"html/template"
"io/ioutil"
"log"
"math/rand"
"net/http"
)
type Weather struct {
Status WeatherCondition `json:"status"`
}
type WeatherCondition struct {
Water int `json:"Water"`
Wind int `json:"Wind"`
}
func index(w http.ResponseWriter, r *http.Request) {
// run html file
t, err := template.ParseFiles("index.html")
if err != nil {
panic(err)
}
err = t.ExecuteTemplate(w, "index.html", nil)
if err != nil {
panic(err)
}
data, err := ioutil.ReadFile("weather.json")
if err != nil {
fmt.Print(err)
}
var obj Weather
err = json.Unmarshal(data, &obj)
if err != nil {
fmt.Println("error:", err)
}
dataJson()
fmt.Fprintln(w, "The Weather is :")
fmt.Fprintln(w, "wind :", obj.Status.Wind, "kmph")
fmt.Fprintln(w, "water :", obj.Status.Water, "m")
condition(w, obj)
}
func condition(w http.ResponseWriter, obj Weather) {
if obj.Status.Wind <= 6 {
fmt.Fprintln(w, "Wind :aman")
}
if obj.Status.Wind >= 7 && obj.Status.Wind <= 15 {
fmt.Fprintln(w, "Wind :status siaga")
}
if obj.Status.Wind > 15 {
fmt.Fprintln(w, "Wind :bahaya")
}
if obj.Status.Water <= 5 {
fmt.Fprintln(w, "Water :aman")
}
if obj.Status.Water >= 6 && obj.Status.Water <= 8 {
fmt.Fprintln(w, "Water :status siaga")
}
if obj.Status.Water > 8 {
fmt.Fprintln(w, "Water :bahaya")
}
}
func main() {
address := "localhost:9090"
http.HandleFunc("/", index)
log.Printf("Your service is up and running at : " + address)
err := http.ListenAndServe(address, nil)
if err != nil {
log.Fatal("Error running service: ", err)
}
}
func dataJson() {
wind := rand.Intn(100)
water := rand.Intn(100)
data := Weather{
Status: WeatherCondition{Wind: wind,
Water: water},
}
file, _ := json.MarshalIndent(data, "", " ")
//write the file
_ = ioutil.WriteFile("weather.json", file, 0644)
}
|
package main
import (
"time"
validator "gopkg.in/go-playground/validator.v9"
)
// Validate date in the form (YYYY-MM-DD).
// All dates in publiccode.yml must follow the format "YYYY-MM-DD",
// which is one of the ISO8601 allowed encoding.
// This is the only allowed encoding though, so not the full ISO8601
// is allowed for the date keys.*/
func dateValidator(fl validator.FieldLevel) bool {
date := fl.Field().String()
// Use the go time.Parse to check.
if _, err := time.Parse("2006-01-02", date); err != nil {
return false
}
return true
}
|
package main
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"os/exec"
)
func defaultScratchfile() string {
return os.ExpandEnv("$GOPATH/src/github.com/fletcher91/docker-go/Dockerfile.scratch")
}
func transformScratchDockerfile(outFile string) {
if defaultScratchfile() == *scratchfile {
dockerfile, err := ioutil.ReadFile(*scratchfile)
handleErr(err)
replaced := bytes.Replace(
dockerfile,
[]byte("${EXEC_NAME}"),
[]byte(*execName),
2,
)
replaced = bytes.Replace(
replaced,
[]byte("${ARTIFACTS_PATH}"),
[]byte(*artifacts),
2,
)
ioutil.WriteFile(outFile, replaced, 0777)
}
}
func bakeImage() {
artPath := fmt.Sprintf("%s%s/%s", os.Getenv("GOPATH"), *projectName, *artifacts)
tScratchFile := fmt.Sprintf("%s/Dockerfile.scratch.final", artPath)
transformScratchDockerfile(tScratchFile)
bakeCmd := exec.Command(
"docker", "build",
"-t", *imageName,
"-f", tScratchFile,
fmt.Sprintf("%s%s", os.Getenv("GOPATH"), *projectName),
)
executeCommand(bakeCmd)
fmt.Println(*imageName)
}
|
package main
import "fmt"
func main() {
year := 2016
if year%4 == 0 {
fmt.Println("Look before you leap.")
}
}
|
package script
type Bool struct {
Type
}
//AnyBool is anything that can retrieve a script.Bool.
type AnyBool interface {
BoolFromCtx(AnyCtx) Bool
}
//BoolFromCtx implements AnyBool.
func (b Bool) BoolFromCtx(AnyCtx) Bool {
return b
}
func (q Ctx) Bool(literal bool) Bool {
return Bool{q.Literal(literal)}
}
func (q Ctx) Not(b Bool) Bool {
return q.Language.Not(b)
}
|
package main
import (
"fmt"
"net/http"
)
func helloWorld(w http.ResponseWriter, r *http.Request) {
r.ParseForm()
fmt.Fprintf(w, "%v \n", r.Form)
fmt.Fprintf(w, "url: %s \n", r.URL)
fmt.Fprintf(w, "scheme: %s \n", r.URL.Scheme)
fmt.Fprintf(w, "hello world ... \n")
}
func main() {
http.HandleFunc("/", helloWorld)
err := http.ListenAndServe(":9090", nil)
if err != nil {
fmt.Println("http listen failed... err: ", err)
}
}
|
package ytrwrap
import (
"net/http"
"testing"
"github.com/stretchr/testify/assert"
)
func TestClientGet(t *testing.T) {
ff := NewFetcher(nil)
_, code, err := ff.Get("http://localhost:1234")
assert.NotNil(t, err, "err")
assert.Equal(t, http.StatusInternalServerError, code, "code")
}
|
// Copyright 2023 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
emptypb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/empty_go_proto"
iappb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/iap/iap_go_proto"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/iap"
)
// IdentityAwareProxyClientServer implements the gRPC interface for IdentityAwareProxyClient.
type IdentityAwareProxyClientServer struct{}
// ProtoToIdentityAwareProxyClient converts a IdentityAwareProxyClient resource from its proto representation.
func ProtoToIdentityAwareProxyClient(p *iappb.IapIdentityAwareProxyClient) *iap.IdentityAwareProxyClient {
obj := &iap.IdentityAwareProxyClient{
Name: dcl.StringOrNil(p.GetName()),
Secret: dcl.StringOrNil(p.GetSecret()),
DisplayName: dcl.StringOrNil(p.GetDisplayName()),
Project: dcl.StringOrNil(p.GetProject()),
Brand: dcl.StringOrNil(p.GetBrand()),
}
return obj
}
// IdentityAwareProxyClientToProto converts a IdentityAwareProxyClient resource to its proto representation.
func IdentityAwareProxyClientToProto(resource *iap.IdentityAwareProxyClient) *iappb.IapIdentityAwareProxyClient {
p := &iappb.IapIdentityAwareProxyClient{}
p.SetName(dcl.ValueOrEmptyString(resource.Name))
p.SetSecret(dcl.ValueOrEmptyString(resource.Secret))
p.SetDisplayName(dcl.ValueOrEmptyString(resource.DisplayName))
p.SetProject(dcl.ValueOrEmptyString(resource.Project))
p.SetBrand(dcl.ValueOrEmptyString(resource.Brand))
return p
}
// applyIdentityAwareProxyClient handles the gRPC request by passing it to the underlying IdentityAwareProxyClient Apply() method.
func (s *IdentityAwareProxyClientServer) applyIdentityAwareProxyClient(ctx context.Context, c *iap.Client, request *iappb.ApplyIapIdentityAwareProxyClientRequest) (*iappb.IapIdentityAwareProxyClient, error) {
p := ProtoToIdentityAwareProxyClient(request.GetResource())
res, err := c.ApplyIdentityAwareProxyClient(ctx, p)
if err != nil {
return nil, err
}
r := IdentityAwareProxyClientToProto(res)
return r, nil
}
// applyIapIdentityAwareProxyClient handles the gRPC request by passing it to the underlying IdentityAwareProxyClient Apply() method.
func (s *IdentityAwareProxyClientServer) ApplyIapIdentityAwareProxyClient(ctx context.Context, request *iappb.ApplyIapIdentityAwareProxyClientRequest) (*iappb.IapIdentityAwareProxyClient, error) {
cl, err := createConfigIdentityAwareProxyClient(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return s.applyIdentityAwareProxyClient(ctx, cl, request)
}
// DeleteIdentityAwareProxyClient handles the gRPC request by passing it to the underlying IdentityAwareProxyClient Delete() method.
func (s *IdentityAwareProxyClientServer) DeleteIapIdentityAwareProxyClient(ctx context.Context, request *iappb.DeleteIapIdentityAwareProxyClientRequest) (*emptypb.Empty, error) {
cl, err := createConfigIdentityAwareProxyClient(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return &emptypb.Empty{}, cl.DeleteIdentityAwareProxyClient(ctx, ProtoToIdentityAwareProxyClient(request.GetResource()))
}
// ListIapIdentityAwareProxyClient handles the gRPC request by passing it to the underlying IdentityAwareProxyClientList() method.
func (s *IdentityAwareProxyClientServer) ListIapIdentityAwareProxyClient(ctx context.Context, request *iappb.ListIapIdentityAwareProxyClientRequest) (*iappb.ListIapIdentityAwareProxyClientResponse, error) {
cl, err := createConfigIdentityAwareProxyClient(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
resources, err := cl.ListIdentityAwareProxyClient(ctx, request.GetProject(), request.GetBrand())
if err != nil {
return nil, err
}
var protos []*iappb.IapIdentityAwareProxyClient
for _, r := range resources.Items {
rp := IdentityAwareProxyClientToProto(r)
protos = append(protos, rp)
}
p := &iappb.ListIapIdentityAwareProxyClientResponse{}
p.SetItems(protos)
return p, nil
}
func createConfigIdentityAwareProxyClient(ctx context.Context, service_account_file string) (*iap.Client, error) {
conf := dcl.NewConfig(dcl.WithUserAgent("dcl-test"), dcl.WithCredentialsFile(service_account_file))
return iap.NewClient(conf), nil
}
|
package romannumerals
import "errors"
import "bytes"
import "strings"
const testVersion = 3
// The Romans wrote numbers using letters - I, V, X, L, C, D, M.
// I - 1 : 1-3
// V - 5 : 4-8
// X - 10 : 9-39
// L - 50 : 40-89
// C - 100: 90-399
// D - 500: 400-899
// M - 1000: 900-3999
// this style looks much shorter than the if else version,
// but does not mean faster
var translateTable = []struct {
arabic int
roman string
}{
{1, "I"}, {4, "IV"}, {5, "V"}, {9, "IX"}, {10, "X"}, {40, "XL"}, {50, "L"},
{90, "XC"}, {100, "C"}, {400, "CD"}, {500, "D"}, {900, "CM"}, {1000, "M"},
}
// Given a number n, convert it into a roman number
// 0<=n<=3000
func ToRomanNumeral(n int) (string, error) {
if n <= 0 || n >= 4000 {
return "", errors.New("n must be within range 1-3999")
}
var buf bytes.Buffer
for {
if n == 0 {
break
}
for i := len(translateTable) - 1; i >= 0; i-- {
cnt := n / translateTable[i].arabic
n = n % translateTable[i].arabic
buf.WriteString(strings.Repeat(translateTable[i].roman, cnt))
}
}
return buf.String(), nil
}
|
package constant
const (
URLCode = "https://open.weixin.qq.com/connect/oauth2/authorize?appid=%s&redirect_uri=%s&response_type=code&scope=%s&state=%s#wechat_redirect"
URLToken = "https://api.weixin.qq.com/sns/oauth2/access_token"
URLRefresh = "https://api.weixin.qq.com/sns/oauth2/refresh_token"
URLUserInfo = "https://api.weixin.qq.com/sns/userinfo"
URLWeixinDefaultURL = "<default weixin url>"
)
|
package session
import (
"fmt"
"os"
"testing"
"time"
"github.com/google/uuid"
"github.com/jmoiron/sqlx"
_ "github.com/lib/pq"
"github.com/trussworks/sesh/pkg/dbstore"
"github.com/trussworks/sesh/pkg/domain"
"github.com/trussworks/sesh/pkg/mock"
)
func dbURLFromEnv() string {
host := os.Getenv("DATABASE_HOST")
port := os.Getenv("DATABASE_PORT")
name := os.Getenv("DATABASE_NAME")
user := os.Getenv("DATABASE_USER")
// password := os.Getenv("DATABASE_PASSWORD")
sslmode := os.Getenv("DATABASE_SSL_MODE")
connStr := fmt.Sprintf("postgres://%s@%s:%s/%s?sslmode=%s", user, host, port, name, sslmode)
return connStr
}
func getTestStore(t *testing.T) domain.SessionStorageService {
t.Helper()
connStr := dbURLFromEnv()
connection, err := sqlx.Open("postgres", connStr)
if err != nil {
t.Fatal(err)
return nil
}
return dbstore.NewDBStore(connection)
}
func TestAuthExists(t *testing.T) {
timeout := 5 * time.Second
store := getTestStore(t)
defer store.Close()
sessionLog := domain.FmtLogger(true)
session := NewSessionService(timeout, store, sessionLog)
session.UserDidAuthenticate("foo")
}
func TestLogSessionCreatedDestroyed(t *testing.T) {
timeout := 5 * time.Second
store := getTestStore(t)
defer store.Close()
sessionLog := mock.NewLogRecorder(domain.FmtLogger(true))
session := NewSessionService(timeout, store, &sessionLog)
accountID := uuid.New().String()
sessionKey, authErr := session.UserDidAuthenticate(accountID)
if authErr != nil {
t.Fatal(authErr)
}
createMsg, logErr := sessionLog.GetOnlyMatchingMessage(domain.SessionCreated)
if logErr != nil {
t.Fatal(logErr)
}
if createMsg.Level != "INFO" {
t.Fatal("Wrong Log Level", createMsg.Level)
}
sessionHash, ok := createMsg.Fields["session_hash"]
if !ok {
t.Fatal("Didn't log the hashed session key")
}
if sessionHash == sessionKey {
t.Fatal("We logged the actual session key!")
}
delErr := session.UserDidLogout(sessionKey)
if delErr != nil {
t.Fatal(delErr)
}
delMsg, delLogErr := sessionLog.GetOnlyMatchingMessage(domain.SessionDestroyed)
if delLogErr != nil {
t.Fatal(delLogErr)
}
if delMsg.Level != "INFO" {
t.Fatal("Wrong Log Level", delMsg.Level)
}
delSessionHash, ok := delMsg.Fields["session_hash"]
if !ok {
t.Fatal("Didn't log the hashed session key")
}
if delSessionHash == sessionKey {
t.Fatal("We logged the actual session key!")
}
_, getErr := session.GetSessionIfValid(sessionKey)
if getErr != domain.ErrValidSessionNotFound {
t.Fatal(getErr)
}
nonExistantMsg, logNonExistantErr := sessionLog.GetOnlyMatchingMessage(domain.SessionDoesNotExist)
if logNonExistantErr != nil {
t.Fatal(logNonExistantErr)
}
nonExistantSessionHash, ok := nonExistantMsg.Fields["session_hash"]
if !ok {
t.Fatal("Didn't log the hashed session key")
}
if nonExistantSessionHash == sessionKey {
t.Fatal("We logged the actual session key!")
}
}
func TestLogSessionExpired(t *testing.T) {
timeout := -5 * time.Second
store := getTestStore(t)
defer store.Close()
sessionLog := mock.NewLogRecorder(domain.FmtLogger(true))
session := NewSessionService(timeout, store, &sessionLog)
accountID := uuid.New().String()
sessionKey, authErr := session.UserDidAuthenticate(accountID)
if authErr != nil {
t.Fatal(authErr)
}
logCreateMsg, logCreateErr := sessionLog.GetOnlyMatchingMessage(domain.SessionCreated)
if logCreateErr != nil {
t.Fatal(logCreateErr)
}
if logCreateMsg.Level != "INFO" {
t.Fatal("Wrong Log Level", logCreateMsg.Level)
}
_, getErr := session.GetSessionIfValid(sessionKey)
if getErr != domain.ErrSessionExpired {
t.Fatal("didn't get the right error back getting the expired session:", getErr)
}
expiredMsg, logExpiredErr := sessionLog.GetOnlyMatchingMessage(domain.SessionExpired)
if logExpiredErr != nil {
t.Fatal(logExpiredErr)
}
expiredSessionHash, ok := expiredMsg.Fields["session_hash"]
if !ok {
t.Fatal("Didn't log the hashed session key")
}
if expiredSessionHash == sessionKey {
t.Fatal("We logged the actual session key!")
}
// make sure you can re-auth after ending a session
_, newAuthErr := session.UserDidAuthenticate(accountID)
if newAuthErr != nil {
t.Fatal(newAuthErr)
}
}
// TestLogConcurrentSession tests that if you create a session, then create a new session over it, we log something.
func TestLogConcurrentSession(t *testing.T) {
timeout := 5 * time.Second
store := getTestStore(t)
defer store.Close()
sessionLog := mock.NewLogRecorder(domain.FmtLogger(true))
session := NewSessionService(timeout, store, &sessionLog)
accountID := uuid.New().String()
_, authErr := session.UserDidAuthenticate(accountID)
if authErr != nil {
t.Fatal(authErr)
}
_, logCreateErr := sessionLog.GetOnlyMatchingMessage(domain.SessionCreated)
if logCreateErr != nil {
t.Fatal(logCreateErr)
}
// Now login again:
_, authAgainErr := session.UserDidAuthenticate(accountID)
if authAgainErr != nil {
t.Fatal(authAgainErr)
}
createMessages := sessionLog.MatchingMessages(domain.SessionCreated)
if len(createMessages) != 2 {
t.Fatal("Should have 2 create messages now")
}
_, logConcurrentErr := sessionLog.GetOnlyMatchingMessage(domain.SessionConcurrentLogin)
if logConcurrentErr != nil {
t.Fatal(logConcurrentErr)
}
}
|
package physics
import "math"
var (
Gravity Vector = Vector(-5)
)
type Direction int
const (
PositiveDirection Direction = 1
NegativeDirection = -1
)
type Vector float64
func (f Vector) Direction() Direction {
if f > 0 {
return PositiveDirection
}
return NegativeDirection
}
func (f Vector) Magnitude() float64 {
return math.Abs(float64(f))
}
func AddVectors(vectors ...Vector) Vector {
var result Vector
for _, v := range vectors {
result += v
}
return result
}
|
// Runs instantly, using Dijkstra and priority queue
package main
import "fmt"
import "strings"
import "strconv"
import "io/ioutil"
import "github.com/roessland/gopkg/digraph"
var N int
var mat [][]float64
func ReadMatrix(filename string) {
buf, err := ioutil.ReadFile(filename)
if err != nil {
panic(err.Error())
}
s := string(buf)
lines := strings.Split(s, "\n")
N = len(lines) - 1
lines = lines[0:N]
mat = make([][]float64, N)
for i, line := range lines {
mat[i] = make([]float64, N)
for j, numStr := range strings.Split(line, ",") {
num, _ := strconv.ParseFloat(numStr, 64)
mat[i][j] = num
}
}
}
func Idx(i, j int) int {
return N*i + j
}
func main() {
ReadMatrix("p083_matrix.txt")
//ReadMatrix("smallmatrix.txt")
graph := digraph.Graph{make([]digraph.Node, N*N)}
for i := 0; i < N; i++ {
for j := 0; j < N; j++ {
nodeIdx := Idx(i, j)
// Add edge upwards if possible
if i != 0 {
graph.Nodes[nodeIdx].Neighbors =
append(graph.Nodes[nodeIdx].Neighbors, digraph.Edge{Idx(i-1, j), mat[i-1][j]})
}
// Add edge downwards if possible
if i != N-1 {
graph.Nodes[nodeIdx].Neighbors =
append(graph.Nodes[nodeIdx].Neighbors, digraph.Edge{Idx(i+1, j), mat[i+1][j]})
}
// Add edge rightwards if possible
if j != N-1 {
graph.Nodes[nodeIdx].Neighbors =
append(graph.Nodes[nodeIdx].Neighbors, digraph.Edge{Idx(i, j+1), mat[i][j+1]})
}
// Add edge rightwards if possible
if j != 0 {
graph.Nodes[nodeIdx].Neighbors =
append(graph.Nodes[nodeIdx].Neighbors, digraph.Edge{Idx(i, j-1), mat[i][j-1]})
}
}
}
dist, _ := digraph.Dijkstra(graph, Idx(0, 0))
fmt.Printf("%v\n", mat[0][0]+dist[Idx(N-1, N-1)])
}
|
package main
import (
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"log"
"math"
"net/http"
"os"
"path/filepath"
"strings"
"time"
)
func CheckError(err error) {
if err != nil {
log.Fatal(err)
}
}
var data []string
var dataSize float64
func LoadData(basePath string, filePattern string) error {
dir, _ := os.ReadDir(basePath)
for _, file := range dir {
name := file.Name()
matched, _ := filepath.Match(filePattern, name)
if matched {
contents, err := ioutil.ReadFile(fmt.Sprintf("%s%c%s", basePath, os.PathSeparator, name))
if err != nil {
return err
}
var jsonElements []map[string]interface{}
err = json.Unmarshal(contents, &jsonElements)
if err != nil {
return err
}
for _, value := range jsonElements {
b, _ := json.Marshal(value)
data = append(data, string(b))
}
}
}
dataSize = float64(len(data))
log.Printf("Loaded %d records to be served over %d seconds, at approximately %d per second", int(dataSize), int(runLength), int(dataSize/runLength))
return nil
}
var curPos int
var runLength float64
var startTime int
func handler(w http.ResponseWriter, r *http.Request) {
expected := float64(int(time.Now().Unix())-startTime) / runLength * dataSize
max := int(math.Min(expected, float64(len(data)-1)))
// handle needing an empty result
if curPos == max || max == 0 {
fmt.Fprintf(w, "")
} else {
fmt.Fprintf(w, "%s", strings.Join(data[curPos:max], "\n"))
}
//log.Printf("Returning %d records, current index %d, moving towards %d with total uptime of %d and service period of %d", max-curPos, curPos, max, (int(time.Now().Unix()) - startTime), int(runLength))
curPos = max
}
func main() {
// load data
basePath := flag.String("d", ".", "directory of files to load")
filePattern := flag.String("g", "*.json", "glob pattern of json files to load")
length := flag.Int("l", 300, "how many seconds data will be served for (at most 1 call after will return results)")
port := flag.Int("p", 4056, "What port to listen to HTTP requests on.")
flag.Parse()
runLength = float64(*length)
err := LoadData(*basePath, *filePattern)
CheckError(err)
http.HandleFunc("/next", handler)
startTime = int(time.Now().Unix())
http.ListenAndServe(fmt.Sprintf(":%d", *port), nil)
}
|
package v1alpha1
import (
"github.com/operator-framework/operator-sdk/pkg/status"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// ContainerSnapshotSpec defines the desired state of ContainerSnapshot
type ContainerSnapshotSpec struct {
// Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file
// Add custom validation using kubebuilder tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html
// PodName+ContainerName is the name of the running container going to have a snapshot
PodName string `json:"podName"`
ContainerName string `json:"containerName"`
// Image is the snapshot image, registry host and tag are optional
Image string `json:"image"`
// ImagePushSecrets are references to docker-registry secret in the same namespace to use for pushing checkout image,
// same as an ImagePullSecrets.
// More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod
ImagePushSecrets []v1.LocalObjectReference `json:"imagePushSecrets"`
}
// ContainerSnapshotStatus defines the observed state of ContainerSnapshot
type ContainerSnapshotStatus struct {
// Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file
// Add custom validation using kubebuilder tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html
// JobRef is a reference to the internal snapshot job which does the real commit/push works
JobRef v1.LocalObjectReference `json:"jobRef"`
// NodeName is the name of the node the container running on, the snapshot job must run on this node
NodeName string `json:"nodeName"`
// ContainerID is the docker id of the source container
ContainerID string `json:"containerID"`
// container snapshot worker state
// +kubebuilder:validation:Enum=Created;Running;Complete;Failed;Unknown
WorkerState WorkerState `json:"workerState"`
// The latest available observations of the snapshot
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
Conditions status.Conditions `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
}
// WorkerState indicates underlaying snapshot worker state
type WorkerState string
const (
WorkerCreated WorkerState = "Created"
WorkerRunning WorkerState = "Running"
WorkerComplete WorkerState = "Complete"
WorkerFailed WorkerState = "Failed"
WorkerUnknown WorkerState = "Unknown"
)
// Conditions indicate errors occurred when creating or running the snapshot worker pod
const (
SourcePodNotFound status.ConditionType = "SourcePodNotFound"
SourceContainerNotFound status.ConditionType = "SourceContainerNotFound"
SourcePodNotReady status.ConditionType = "SourcePodNotReady"
SourcePodFinishied status.ConditionType = "SourcePodFinished"
DockerCommitFailed status.ConditionType = "DockerCommitFailed"
DockerPushFailed status.ConditionType = "DockerPushFailed"
InvalidImage status.ConditionType = "InvalidImage"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ContainerSnapshot is the Schema for the containersnapshots API
// +kubebuilder:subresource:status
// +kubebuilder:resource:path=containersnapshots,scope=Namespaced
// +kubebuilder:printcolumn:name="Name",type="string",JSONPath=".metadata.name"
// +kubebuilder:printcolumn:name="Pod",type="string",JSONPath=".spec.podName",description="pod name of snapshot source"
// +kubebuilder:printcolumn:name="Container",type="string",JSONPath=".spec.containerName",description="container name of snapshot source"
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
// +kubebuilder:printcolumn:name="State",type="string",JSONPath=".status.workerState",description="container snapshot worker state"
type ContainerSnapshot struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ContainerSnapshotSpec `json:"spec,omitempty"`
Status ContainerSnapshotStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ContainerSnapshotList contains a list of ContainerSnapshot
type ContainerSnapshotList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []ContainerSnapshot `json:"items"`
}
func init() {
SchemeBuilder.Register(&ContainerSnapshot{}, &ContainerSnapshotList{})
}
|
package main
import (
"fmt"
)
type person struct {
first_name string
last_name string
fav_icecream []string
}
func main() {
p1 := person{
first_name: "Alireza",
last_name: "Alavi",
fav_icecream: []string{
"Dark Choclate",
"Vanilla Ice",
},
}
p2 := person{
first_name: "Marian",
last_name: "Alavi Razavi",
fav_icecream: []string{
"Strawberry",
"Saffron",
},
}
for k, v := range p1.fav_icecream {
fmt.Println(k, v)
}
for _, val := range p2.fav_icecream {
fmt.Println(val)
}
}
|
package controller
import (
"encoding/json"
"github.com/bearname/videohost/internal/common/caching"
commonDto "github.com/bearname/videohost/internal/common/dto"
"github.com/bearname/videohost/internal/common/infrarstructure/transport/controller"
"github.com/bearname/videohost/internal/common/util"
"github.com/bearname/videohost/internal/videoserver/domain"
"github.com/bearname/videohost/internal/videoserver/domain/dto"
"github.com/bearname/videohost/internal/videoserver/domain/model"
"github.com/bearname/videohost/internal/videoserver/infrastructure/transport/requestparser"
"github.com/gorilla/context"
"github.com/gorilla/mux"
"net/http"
"strconv"
"time"
)
type PlayListController struct {
controller.BaseController
cache caching.Cache
playListService domain.PlayListService
authServerAddress string
}
func NewPlayListController(playListService domain.PlayListService, authServerAddress string) *PlayListController {
v := new(PlayListController)
v.playListService = playListService
v.authServerAddress = authServerAddress
return v
}
func (c *PlayListController) CreatePlaylist() func(http.ResponseWriter, *http.Request) {
return func(writer http.ResponseWriter, request *http.Request) {
userId := context.Get(request, "userId").(string)
context.Clear(request)
if len(userId) == 0 {
c.BaseController.WriteResponse(writer, http.StatusBadRequest, false, "cannot get userId form context")
return
}
var createPlayListRequest CreatePlayListRequest
err := json.NewDecoder(request.Body).Decode(&createPlayListRequest)
if err != nil {
c.BaseController.WriteResponse(writer, http.StatusBadRequest, false, "cannot decode name|privacy|videosId struct")
return
}
playlistId, err := c.playListService.CreatePlaylist(dto.CreatePlaylistDto{
Name: createPlayListRequest.Name,
OwnerId: userId,
Privacy: createPlayListRequest.Privacy,
Videos: createPlayListRequest.VideosId})
if err != nil {
//TODO add translate method
c.BaseController.WriteResponse(writer, http.StatusBadRequest, false, err.Error())
return
}
c.BaseController.WriteJsonResponse(writer, struct {
Code int `json:"code"`
Message string `json:"message"`
}{1, "Success create playlist with id " + strconv.Itoa(int(playlistId))})
}
}
func (c *PlayListController) GetUserPlaylists() func(http.ResponseWriter, *http.Request) {
return func(writer http.ResponseWriter, request *http.Request) {
ownerId, ok := requestparser.GetStringQueryParameter(request, "ownerId")
if !ok {
c.BaseController.WriteResponse(writer, http.StatusBadRequest, false, "query parameter ownerId not set")
return
}
var privacyType []model.PrivacyType
privacyType = append(privacyType, model.Public)
privacyType = append(privacyType, model.Unlisted)
authorization := request.Header.Get("Authorization")
if len(authorization) != 0 {
var userDto commonDto.UserDto
userDto, ok = util.ValidateToken(authorization, c.authServerAddress)
if ok && userDto.UserId == ownerId {
privacyType = append(privacyType, model.Private)
}
}
userPlaylists, err := c.playListService.FindUserPlaylists(ownerId, privacyType)
if err != nil {
//TODO add translate method
c.BaseController.WriteResponse(writer, http.StatusBadRequest, false, err.Error())
return
}
c.BaseController.WriteJsonResponse(writer, userPlaylists)
}
}
func (c *PlayListController) GetPlayList() func(http.ResponseWriter, *http.Request) {
return func(writer http.ResponseWriter, request *http.Request) {
vars := mux.Vars(request)
playlistId, err := strconv.Atoi(vars["playlistId"])
if err != nil {
c.BaseController.WriteResponse(writer, http.StatusBadRequest, false, "cannot get playlistId")
return
}
playlist, err := c.playListService.FindPlaylist(playlistId)
if err != nil {
//TODO add translate method
c.BaseController.WriteResponse(writer, http.StatusBadRequest, false, err.Error())
return
}
if playlist.Privacy == model.Private {
authorization := request.Header.Get("Authorization")
var userDto commonDto.UserDto
var ok bool
userDto, ok = util.ValidateToken(authorization, c.authServerAddress)
if !ok && userDto.UserId != playlist.OwnerId {
c.BaseController.WriteResponse(writer, http.StatusUnauthorized, false, "Not grant permission")
return
}
}
c.BaseController.WriteJsonResponse(writer, struct {
Id string `json:"id"`
Name string `json:"name"`
OwnerId string `json:"owner_id"`
Created time.Time `json:"created"`
Privacy model.PrivacyType `json:"privacy"`
Videos string `json:"videos"`
}{playlist.Id, playlist.Name, playlist.OwnerId, playlist.Created, playlist.Privacy, playlist.VideoString})
}
}
func (c *PlayListController) ModifyVideoToPlaylist() func(http.ResponseWriter, *http.Request) {
return func(writer http.ResponseWriter, request *http.Request) {
userId := context.Get(request, "userId").(string)
context.Clear(request)
if len(userId) == 0 {
c.BaseController.WriteResponse(writer, http.StatusBadRequest, false, "cannot get userId form context")
return
}
vars := mux.Vars(request)
playlistId, err := strconv.Atoi(vars["playlistId"])
if err != nil {
c.BaseController.WriteResponse(writer, http.StatusBadRequest, false, "playlistId not set")
return
}
var modificationRequest PlayListVideoModificationRequest
err = json.NewDecoder(request.Body).Decode(&modificationRequest)
if err != nil {
c.BaseController.WriteResponse(writer, http.StatusBadRequest, false, "cannot decode videos struct")
return
}
action := modificationRequest.Action
if len(action.String()) == 0 {
c.BaseController.WriteResponse(writer, http.StatusBadRequest, false, "invalid action")
return
}
err = c.playListService.ModifyVideosOnPlaylist(playlistId, userId, modificationRequest.Videos, action)
if err != nil {
if err == domain.ErrPlaylistDuplicate {
c.BaseController.WriteResponse(writer, http.StatusConflict, false, err.Error())
} else {
c.BaseController.WriteResponse(writer, http.StatusBadRequest, false, err.Error())
}
return
}
c.BaseController.WriteResponse(writer, http.StatusOK, true, "success "+action.String()+" videos")
}
}
func (c *PlayListController) ChangePrivacy() func(http.ResponseWriter, *http.Request) {
return func(writer http.ResponseWriter, request *http.Request) {
userId := context.Get(request, "userId").(string)
context.Clear(request)
if len(userId) == 0 {
c.BaseController.WriteResponse(writer, http.StatusBadRequest, false, "cannot get userId form context")
return
}
vars := mux.Vars(request)
playlistId, err := strconv.Atoi(vars["playlistId"])
if err != nil {
c.BaseController.WriteResponse(writer, http.StatusBadRequest, false, "playlistId not set")
return
}
privacyType, err := strconv.Atoi(vars["privacyType"])
if err != nil {
c.BaseController.WriteResponse(writer, http.StatusBadRequest, false, "playlistId not set")
return
}
err = c.playListService.ChangePrivacy(userId, playlistId, model.PrivacyType(privacyType))
if err != nil {
c.BaseController.WriteResponse(writer, http.StatusBadRequest, false, err.Error())
return
}
c.BaseController.WriteResponse(writer, http.StatusOK, true, "success change playlist privacy with id "+strconv.Itoa(playlistId))
}
}
func (c *PlayListController) DeletePlaylist() func(http.ResponseWriter, *http.Request) {
return func(writer http.ResponseWriter, request *http.Request) {
userId := context.Get(request, "userId").(string)
context.Clear(request)
if len(userId) == 0 {
c.BaseController.WriteResponse(writer, http.StatusBadRequest, false, "cannot get userId form context")
return
}
vars := mux.Vars(request)
playlistId, err := strconv.Atoi(vars["playlistId"])
if err != nil {
c.BaseController.WriteResponse(writer, http.StatusBadRequest, false, "playlistId not set")
return
}
err = c.playListService.Delete(userId, playlistId)
if err != nil {
c.BaseController.WriteResponse(writer, http.StatusBadRequest, false, err.Error())
return
}
c.BaseController.WriteResponse(writer, http.StatusOK, true, "success delete playlist with id "+strconv.Itoa(playlistId))
}
}
|
package account
import(
"github.com/tealeg/xlsx"
acc "entity/accountentity"
"util"
//"fmt"
"strings"
)
type AccountColumnParser struct{
//each sheet define some tables, sheet-table-columns
//use to create the database table, the key is table/common name
CategoryColumnMap map[string]map[string][]*acc.Column
//use to parse the excel/html data, where the key is "name" column in excel
//It will be a Chinese name as utf-8 encoding in our definition.
ColumnMap map[string]*acc.Column
logger *util.StockLog
}
func (p *AccountColumnParser) GetSheetMap(sheetname string) map[string][]*acc.Column {
sheetMap, ok := p.CategoryColumnMap[sheetname]
if !ok {
sheetMap = make(map[string][]*acc.Column)
}
return sheetMap
}
func (p *AccountColumnParser)Parse(filename string) {
p.logger = util.NewLog()
file, err := xlsx.OpenFile(filename)
if err != nil{
p.logger.Error("Cannot open the excel:", filename, err)
return
}
p.CategoryColumnMap = make(map[string]map[string][]*acc.Column)
p.ColumnMap = make(map[string]*acc.Column)
for i, sheet := range file.Sheets{
//fmt.Println(i, sheet.Name)
p.logger.Info("Start to parse sheet: ", i, sheet.Name)
//rowlen := len(sheet.Rows)
p.ParseRow(sheet.Name, sheet.Rows)
}
}
func (p *AccountColumnParser) ParseRow(sheetname string, rows []*xlsx.Row) {
columnMap := make(map[string] int)
isCommon := false
isTable := false
isNormal := false
sheettabcolmap := make(map[string][]*acc.Column)
var parentColName string
var nmidx, nmeidx, colidx, typeidx, maxszidx int
for ridx, row := range rows{
if ridx == 0 {
//parse the header to init columnMap
for cidx, cell := range row.Cells {
value := strings.TrimSpace(cell.String())
columnMap[value] = cidx
}
nmidx = columnMap["name"]
nmeidx = columnMap["name_en"]
colidx = columnMap["column"]
typeidx = columnMap["type"]
maxszidx = columnMap["maxsize"]
} else {
cols := make([]string, 0)
for _, cell := range row.Cells {
value := strings.TrimSpace(cell.String())
cols = append(cols, value)
}
size := 0
if len(cols) > maxszidx {
size = util.ToInt(cols[maxszidx])
}
column := acc.Column{
Name: cols[nmidx],
Name_en: cols[nmeidx],
Column: cols[colidx],
Type: cols[typeidx],
Maxsize: size,
}
if column.Type == acc.Common {
isCommon = true
isTable = false
isNormal = false
parentColName = column.Column
} else if column.Type == acc.Table {
isCommon = false
isTable = true
isNormal = false
parentColName = column.Column
} else {
isNormal = true
}
if (isCommon || isTable) && isNormal {
ccols, ok := sheettabcolmap[parentColName]
if !ok {
ccols = make([]*acc.Column, 0)
ccols = append(ccols, &column)
sheettabcolmap[parentColName] = ccols
} else {
ccols = append(ccols, &column)
sheettabcolmap[parentColName] = ccols
}
}
if isNormal {
p.ColumnMap[column.Name] = &column
}
}
}
p.CategoryColumnMap[sheetname] = sheettabcolmap
}
func NewAccountColumnParser() *AccountColumnParser{
return &AccountColumnParser{}
}
|
package common
type TopicType string
//Topic type
const (
EventPublish TopicType = "publish"
EventConfirmReq TopicType = "confirmReq"
EventConfirmAck TopicType = "confirmAck"
EventSyncBlock TopicType = "syncBlock"
EventConfirmedBlock TopicType = "confirmedBlock"
EventBroadcast TopicType = "broadcast"
EventSendMsgToPeers TopicType = "sendMsgToPeers"
EventPeersInfo TopicType = "peersInfo"
EventSyncing TopicType = "syncing"
EventAddRelation TopicType = "addRelation"
EventDeleteRelation TopicType = "deleteRelation"
EventGenerateBlock TopicType = "generateBlock"
EventRollbackUnchecked TopicType = "rollbackUnchecked"
EventRestartChain TopicType = "restartChain"
EventSendMsgToSingle TopicType = "sendMsgToSingle"
EventAddP2PStream TopicType = "addP2PStream"
EventDeleteP2PStream TopicType = "deleteP2PStream"
EventPovPeerStatus TopicType = "povPeerStatus"
EventPovRecvBlock TopicType = "povRecvBlock"
EventPovBulkPullReq TopicType = "povBulkPullReq"
EventPovBulkPullRsp TopicType = "povBulkPullRsp"
EventPovSyncState TopicType = "povSyncState"
)
// Sync state
type SyncState uint
const (
SyncNotStart SyncState = iota
Syncing
Syncdone
)
var syncStatus = [...]string{
SyncNotStart: "SyncNotStart",
Syncing: "Synchronising",
Syncdone: "Syncdone",
}
func (s SyncState) String() string {
if s > Syncdone {
return "unknown sync state"
}
return syncStatus[s]
}
func (s SyncState) IsSyncExited() bool {
if s == Syncdone {
return true
}
return false
}
|
package main
import "fmt"
func main2() {
n, err := fmt.Printf("hello world\n")
fmt.Printf("%d\n", n)
fmt.Println(err)
}
|
package smhi
import (
"context"
"fmt"
"net/http"
"net/http/httptest"
"net/url"
"reflect"
"testing"
)
var baseURLPath = "/api-test"
func setup() (client *Client, mux *http.ServeMux, serverURL string, teardown func()) {
mux = http.NewServeMux()
apiHandler := http.NewServeMux()
apiHandler.Handle(baseURLPath+"/", http.StripPrefix(baseURLPath, mux))
server := httptest.NewServer(apiHandler)
client = NewClient(nil)
url, _ := url.Parse(server.URL + baseURLPath + "/")
client.BaseURL = url
teardown = server.Close
return
}
func testMethod(t *testing.T, r *http.Request, want string) {
if got := r.Method; got != want {
t.Errorf("Request method: %v, want %v", got, want)
}
}
func TestNewClient(t *testing.T) {
c := NewClient(nil)
if got, want := c.BaseURL.String(), baseURL; got != want {
t.Errorf("NewClient BaseURL is %v, want %v", got, want)
}
}
func TestNewRequest(t *testing.T) {
c := NewClient(nil)
inURL, outURL := "/test", baseURL+"test"
req, _ := c.NewRequest("GET", inURL)
// test that relative URL was expanded
if got, want := req.URL.String(), outURL; got != want {
t.Errorf("NewRequest(%q) URL is %v, want %v", inURL, got, want)
}
}
func TestDo(t *testing.T) {
client, mux, _, teardown := setup()
defer teardown()
type test struct {
Name string
}
mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
testMethod(t, r, "GET")
fmt.Fprint(w, `{"Name":"test"}`)
})
req, _ := client.NewRequest("GET", ".")
body := new(test)
client.Do(context.Background(), req, body)
want := &test{"test"}
if !reflect.DeepEqual(body, want) {
t.Errorf("Response body = %v, want %v", body, want)
}
}
func TestDo_httpError(t *testing.T) {
client, mux, _, teardown := setup()
defer teardown()
mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
http.Error(w, "Bad Request", 400)
})
req, _ := client.NewRequest("GET", ".")
resp, _ := client.Do(context.Background(), req, nil)
// TODO: Needs checking err flow
//if err == nil {
// t.Fatalf("Expected error, got %v instead.", resp)
//}
if resp.StatusCode != 400 {
t.Errorf("Expected HTTP 400 error, got %d status code.", resp.StatusCode)
}
}
|
package main
type CopyBuffer struct {
CutMode bool
Cards []*Card
CardsToSerialized map[*Card]string
}
func NewCopyBuffer() *CopyBuffer {
buffer := &CopyBuffer{}
buffer.Clear()
return buffer
}
func (buffer *CopyBuffer) Clear() {
buffer.Cards = []*Card{}
buffer.CardsToSerialized = map[*Card]string{}
}
func (buffer *CopyBuffer) Copy(card *Card) {
buffer.Cards = append(buffer.Cards, card)
buffer.CardsToSerialized[card] = card.Serialize()
}
func (buffer *CopyBuffer) Index(card *Card) int {
for i, c := range buffer.Cards {
if card == c {
return i
}
}
return -1
}
|
// Copyright 2023 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package utils
import (
"context"
"fmt"
"path"
"strings"
"sync"
"time"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/log"
clientv3 "go.etcd.io/etcd/client/v3"
"go.uber.org/zap"
)
// RegisterTaskType for the sub-prefix path for key
type RegisterTaskType int
const (
RegisterRestore RegisterTaskType = iota
RegisterLightning
RegisterImportInto
)
func (tp RegisterTaskType) String() string {
switch tp {
case RegisterRestore:
return "restore"
case RegisterLightning:
return "lightning"
case RegisterImportInto:
return "import-into"
}
return "default"
}
// The key format should be {RegisterImportTaskPrefix}/{RegisterTaskType}/{taskName}
const (
// RegisterImportTaskPrefix is the prefix of the key for task register
// todo: remove "/import" suffix, it's confusing to have a key like "/tidb/brie/import/restore/restore-xxx"
RegisterImportTaskPrefix = "/tidb/brie/import"
RegisterRetryInternal = 10 * time.Second
defaultTaskRegisterTTL = 3 * time.Minute // 3 minutes
)
// TaskRegister can register the task to PD with a lease.
type TaskRegister interface {
// Close closes the background task if using RegisterTask
// and revoke the lease.
// NOTE: we don't close the etcd client here, call should do it.
Close(ctx context.Context) (err error)
// RegisterTask firstly put its key to PD with a lease,
// and start to keepalive the lease in the background.
// DO NOT mix calls to RegisterTask and RegisterTaskOnce.
RegisterTask(c context.Context) error
// RegisterTaskOnce put its key to PD with a lease if the key does not exist,
// else we refresh the lease.
// you have to call this method periodically to keep the lease alive.
// DO NOT mix calls to RegisterTask and RegisterTaskOnce.
RegisterTaskOnce(ctx context.Context) error
}
type taskRegister struct {
client *clientv3.Client
ttl time.Duration
secondTTL int64
key string
// leaseID used to revoke the lease
curLeaseID clientv3.LeaseID
wg sync.WaitGroup
cancel context.CancelFunc
}
// NewTaskRegisterWithTTL build a TaskRegister with key format {RegisterTaskPrefix}/{RegisterTaskType}/{taskName}
func NewTaskRegisterWithTTL(client *clientv3.Client, ttl time.Duration, tp RegisterTaskType, taskName string) TaskRegister {
return &taskRegister{
client: client,
ttl: ttl,
secondTTL: int64(ttl / time.Second),
key: path.Join(RegisterImportTaskPrefix, tp.String(), taskName),
curLeaseID: clientv3.NoLease,
}
}
// NewTaskRegister build a TaskRegister with key format {RegisterTaskPrefix}/{RegisterTaskType}/{taskName}
func NewTaskRegister(client *clientv3.Client, tp RegisterTaskType, taskName string) TaskRegister {
return NewTaskRegisterWithTTL(client, defaultTaskRegisterTTL, tp, taskName)
}
// Close implements the TaskRegister interface
func (tr *taskRegister) Close(ctx context.Context) (err error) {
// not needed if using RegisterTaskOnce
if tr.cancel != nil {
tr.cancel()
}
tr.wg.Wait()
if tr.curLeaseID != clientv3.NoLease {
_, err = tr.client.Lease.Revoke(ctx, tr.curLeaseID)
if err != nil {
log.Warn("failed to revoke the lease", zap.Error(err), zap.Int64("lease-id", int64(tr.curLeaseID)))
}
}
return err
}
func (tr *taskRegister) grant(ctx context.Context) (*clientv3.LeaseGrantResponse, error) {
lease, err := tr.client.Lease.Grant(ctx, tr.secondTTL)
if err != nil {
return nil, err
}
if len(lease.Error) > 0 {
return nil, errors.New(lease.Error)
}
return lease, nil
}
// RegisterTaskOnce implements the TaskRegister interface
func (tr *taskRegister) RegisterTaskOnce(ctx context.Context) error {
resp, err := tr.client.Get(ctx, tr.key)
if err != nil {
return errors.Trace(err)
}
if len(resp.Kvs) == 0 {
lease, err2 := tr.grant(ctx)
if err2 != nil {
return errors.Annotatef(err2, "failed grant a lease")
}
tr.curLeaseID = lease.ID
_, err2 = tr.client.KV.Put(ctx, tr.key, "", clientv3.WithLease(lease.ID))
if err2 != nil {
return errors.Trace(err2)
}
} else {
// if the task is run distributively, like IMPORT INTO, we should refresh the lease ID,
// in case the owner changed during the registration, and the new owner create the key.
tr.curLeaseID = clientv3.LeaseID(resp.Kvs[0].Lease)
_, err2 := tr.client.Lease.KeepAliveOnce(ctx, tr.curLeaseID)
if err2 != nil {
return errors.Trace(err2)
}
}
return nil
}
// RegisterTask implements the TaskRegister interface
func (tr *taskRegister) RegisterTask(c context.Context) error {
cctx, cancel := context.WithCancel(c)
tr.cancel = cancel
lease, err := tr.grant(cctx)
if err != nil {
return errors.Annotatef(err, "failed grant a lease")
}
tr.curLeaseID = lease.ID
_, err = tr.client.KV.Put(cctx, tr.key, "", clientv3.WithLease(lease.ID))
if err != nil {
return errors.Trace(err)
}
// KeepAlive interval equals to ttl/3
respCh, err := tr.client.Lease.KeepAlive(cctx, lease.ID)
if err != nil {
return errors.Trace(err)
}
tr.wg.Add(1)
go tr.keepaliveLoop(cctx, respCh)
return nil
}
func (tr *taskRegister) keepaliveLoop(ctx context.Context, ch <-chan *clientv3.LeaseKeepAliveResponse) {
defer tr.wg.Done()
const minTimeLeftThreshold time.Duration = 20 * time.Second
var (
timeLeftThreshold time.Duration = tr.ttl / 4
lastUpdateTime time.Time = time.Now()
err error
)
if timeLeftThreshold < minTimeLeftThreshold {
timeLeftThreshold = minTimeLeftThreshold
}
failpoint.Inject("brie-task-register-always-grant", func(_ failpoint.Value) {
timeLeftThreshold = tr.ttl
})
for {
CONSUMERESP:
for {
failpoint.Inject("brie-task-register-keepalive-stop", func(_ failpoint.Value) {
if _, err = tr.client.Lease.Revoke(ctx, tr.curLeaseID); err != nil {
log.Warn("brie-task-register-keepalive-stop", zap.Error(err))
}
})
select {
case <-ctx.Done():
return
case _, ok := <-ch:
if !ok {
break CONSUMERESP
}
lastUpdateTime = time.Now()
}
}
log.Warn("the keepalive channel is closed, try to recreate it")
needReputKV := false
RECREATE:
for {
timeGap := time.Since(lastUpdateTime)
if tr.ttl-timeGap <= timeLeftThreshold {
lease, err := tr.grant(ctx)
failpoint.Inject("brie-task-register-failed-to-grant", func(_ failpoint.Value) {
err = errors.New("failpoint-error")
})
if err != nil {
select {
case <-ctx.Done():
return
default:
}
log.Warn("failed to grant lease", zap.Error(err))
time.Sleep(RegisterRetryInternal)
continue
}
tr.curLeaseID = lease.ID
lastUpdateTime = time.Now()
needReputKV = true
}
if needReputKV {
// if the lease has expired, need to put the key again
_, err := tr.client.KV.Put(ctx, tr.key, "", clientv3.WithLease(tr.curLeaseID))
failpoint.Inject("brie-task-register-failed-to-reput", func(_ failpoint.Value) {
err = errors.New("failpoint-error")
})
if err != nil {
select {
case <-ctx.Done():
return
default:
}
log.Warn("failed to put new kv", zap.Error(err))
time.Sleep(RegisterRetryInternal)
continue
}
needReputKV = false
}
// recreate keepalive
ch, err = tr.client.Lease.KeepAlive(ctx, tr.curLeaseID)
if err != nil {
select {
case <-ctx.Done():
return
default:
}
log.Warn("failed to create new kv", zap.Error(err))
time.Sleep(RegisterRetryInternal)
continue
}
break RECREATE
}
}
}
// RegisterTask saves the task's information
type RegisterTask struct {
Key string
LeaseID int64
TTL int64
}
// MessageToUser marshal the task to user message
func (task RegisterTask) MessageToUser() string {
return fmt.Sprintf("[ key: %s, lease-id: %x, ttl: %ds ]", task.Key, task.LeaseID, task.TTL)
}
type RegisterTasksList struct {
Tasks []RegisterTask
}
func (list RegisterTasksList) MessageToUser() string {
var tasksMsgBuf strings.Builder
for _, task := range list.Tasks {
tasksMsgBuf.WriteString(task.MessageToUser())
tasksMsgBuf.WriteString(", ")
}
return tasksMsgBuf.String()
}
func (list RegisterTasksList) Empty() bool {
return len(list.Tasks) == 0
}
// GetImportTasksFrom try to get all the import tasks with prefix `RegisterTaskPrefix`
func GetImportTasksFrom(ctx context.Context, client *clientv3.Client) (RegisterTasksList, error) {
resp, err := client.KV.Get(ctx, RegisterImportTaskPrefix, clientv3.WithPrefix())
if err != nil {
return RegisterTasksList{}, errors.Trace(err)
}
list := RegisterTasksList{
Tasks: make([]RegisterTask, 0, len(resp.Kvs)),
}
for _, kv := range resp.Kvs {
leaseResp, err := client.Lease.TimeToLive(ctx, clientv3.LeaseID(kv.Lease))
if err != nil {
return list, errors.Annotatef(err, "failed to get time-to-live of lease: %x", kv.Lease)
}
// the lease has expired
if leaseResp.TTL <= 0 {
continue
}
list.Tasks = append(list.Tasks, RegisterTask{
Key: string(kv.Key),
LeaseID: kv.Lease,
TTL: leaseResp.TTL,
})
}
return list, nil
}
|
package implwin
import (
"fmt"
"testing"
"draw"
"win"
)
func TestGetPenStyleWin(t *testing.T) {
testdata := []struct {
style draw.PenStyle
styleWin uint32
}{
{draw.PenStyle{}, win.PS_COSMETIC | win.PS_SOLID | win.PS_ENDCAP_ROUND | win.PS_JOIN_ROUND},
{draw.PenStyle{draw.PEN_TYPE_GEOMETRIC, draw.PEN_DASH, draw.PEN_CAP_SQUARE, draw.PEN_JOIN_MITER}, win.PS_GEOMETRIC | win.PS_DASH | win.PS_ENDCAP_SQUARE | win.PS_JOIN_MITER},
{draw.PenStyle{draw.PEN_TYPE_COSMETIC, draw.PEN_DASH_DOT_DOT, draw.PEN_CAP_FLAT, draw.PEN_JOIN_BEVEL}, win.PS_COSMETIC | win.PS_DASHDOTDOT | win.PS_ENDCAP_FLAT | win.PS_JOIN_BEVEL},
}
prefix := "TestGetPenStyleWin"
for i, v := range testdata {
val := getPenStyleWin(v.style)
if val != v.styleWin {
fmt.Println("style =", v.style)
t.Errorf("%s[%d] failed: winStyle = 0x%08x, wanted = 0x%08x\n", prefix, i, val, v.styleWin)
continue
}
}
}
|
package config
import (
"os"
"fmt"
"io/ioutil"
"encoding/json"
"portal/util"
)
// App basic config
type appConfig struct {
TokenSecrect string
TokenMaxAge int
AesKey string
}
// DB connect config
type dbConfig struct {
Username string
Password string
Host string
Port int
Database string
URL string
}
// Redis connect config
type redisConfig struct {
Host string
Port int
Password string
URL string
MaxIdle int
MaxActive int
}
// init variable
var (
AppConfig appConfig
MysqlConfig dbConfig
RedisConfig redisConfig
)
// 解析json配置文件
var jsonData map[string]interface{}
func initJSON() {
bytes, err := ioutil.ReadFile("./config/config.json")
if err != nil {
fmt.Println("ReadFile: ", err.Error())
os.Exit(-1)
}
configStr := string(bytes[:])
bytes = []byte(configStr)
if err := json.Unmarshal(bytes, &jsonData); err != nil {
fmt.Println("invalid config: ", err.Error())
os.Exit(-1)
}
}
// init database params
func initMysql() {
util.SetStructByJSON(&MysqlConfig, jsonData["mysql"].(map[string]interface{}))
MysqlConfig.URL = fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?parseTime=true", MysqlConfig.Username, MysqlConfig.Password,
MysqlConfig.Host, MysqlConfig.Port,MysqlConfig.Database)
}
// init app
func initApp() {
util.SetStructByJSON(&AppConfig, jsonData["app"].(map[string]interface{}))
}
// init redis config
func initRedis() {
util.SetStructByJSON(&RedisConfig, jsonData["redis"].(map[string]interface{}))
RedisConfig.URL = fmt.Sprintf("%s:%d", RedisConfig.Host, RedisConfig.Port)
fmt.Print("redis url: ", RedisConfig.URL)
}
func init() {
initJSON()
initMysql()
initApp()
initRedis()
} |
package rotate_test
import (
"fmt"
"io/ioutil"
"os"
"github.com/bingoohuang/golog/pkg/rotate"
)
func ExampleNew() {
logDir, err := ioutil.TempDir("", "rotate_test")
if err != nil {
fmt.Println("could not create log directory ", err)
return
}
logPath := fmt.Sprintf("%s/test.log", logDir)
for i := 0; i < 2; i++ {
writer, err := rotate.New(logPath)
if err != nil {
fmt.Println("Could not open log file ", err)
return
}
n, err := writer.Write([]byte("test"))
if err != nil || n != 4 {
fmt.Println("Write failed ", err, " number written ", n)
return
}
err = writer.Close()
if err != nil {
fmt.Println("Close failed ", err)
return
}
}
files, err := ioutil.ReadDir(logDir)
if err != nil {
fmt.Println("ReadDir failed ", err)
return
}
for _, file := range files {
fmt.Println(file.Name(), file.Size())
}
err = os.RemoveAll(logDir)
if err != nil {
fmt.Println("RemoveAll failed ", err)
return
}
}
|
package config
import (
"encoding/json"
"github.com/hunterhug/fafacms/core/util/mail"
"github.com/hunterhug/fafacms/core/util/oss"
"github.com/hunterhug/fafacms/core/util/rdb"
"github.com/hunterhug/fafacms/core/util/session"
"github.com/alexedwards/scs"
)
var (
FafaConfig *Config
FafaRdb *rdb.MyDb
FafaSessionMgr *scs.Manager
)
type Config struct {
DefaultConfig MyConfig
OssConfig oss.Key
DbConfig rdb.MyDbConfig
SessionConfig session.MyRedisConf
MailConfig mail.Sender `json:"Email"`
}
type MyConfig struct {
WebPort string
LogPath string
StoragePath string
LogDebug bool
StorageOss bool
CloseRegister bool
}
func JsonOutConfig(config Config) (string, error) {
raw, err := json.Marshal(config)
if err != nil {
return "", err
}
back := string(raw)
return back, nil
}
|
package common
import (
"testing"
)
func TestDedup(t *testing.T) {
inChannel := make(chan LogMessage, 10)
noDupChannel := Dedup(inChannel)
inChannel <- LogMessage{
ID: "1",
}
inChannel <- LogMessage{
ID: "2",
}
inChannel <- LogMessage{
ID: "3",
}
if msg := <-noDupChannel; msg.ID != "1" {
t.Fatal("Didn't get first message")
}
if msg := <-noDupChannel; msg.ID != "2" {
t.Fatal("Didn't get second message")
}
if msg := <-noDupChannel; msg.ID != "3" {
t.Fatal("Didn't get third message")
}
inChannel <- LogMessage{
ID: "3",
}
inChannel <- LogMessage{
ID: "2",
}
inChannel <- LogMessage{
ID: "4",
}
if msg := <-noDupChannel; msg.ID != "4" {
t.Fatal("Should have received 4")
}
}
|
package sim
import (
"math"
"time"
)
type Job struct {
*Simulacrum
employed int
unemployed int
monthly float64
}
func NewJob(sim *Simulation, baseAnnualSalary float64) *Job {
return &Job{
Simulacrum: NewSimulacrum(sim),
employed: 0,
unemployed: 0,
monthly: baseAnnualSalary / 12.0,
}
}
func (j *Job) Employed(date time.Time) bool {
if j.Retired(date) {
j.unemployed = 0
return false
}
if j.employed < 0 {
return false
}
bc := j.Sim().Economy.MarketReturn(date)
if bc < -10.0 {
if j.employed > int(math.Round(j.Gauss(12.0, 4.0))) {
j.employed = int(math.Min(-1.0, math.Round(j.Gauss(-4.0, 2.0))))
j.monthly *= 0.9
j.AddEvent(date, j.Sim().Name + " Job Loss", 7)
start := date.AddDate(0, -1 * j.employed, 0)
if j.Age(start) < j.Sim().RetirementAge() - 0.5 {
j.AddEvent(start, j.Sim().Name + " New Job", 3)
j.AddEvent(start, j.Sim().Name + " 10% Pay Cut", 5)
} else {
j.employed -= 12
}
return false
}
return true
}
if bc < -2.0 {
if j.employed > int(math.Round(j.Gauss(24.0, 6.0))) {
j.employed = int(math.Min(-1.0, math.Round(j.Gauss(-2.0, 2.0))))
j.monthly *= 0.95
j.AddEvent(date, j.Sim().Name + " Job Loss", 7)
start := date.AddDate(0, -1 * j.employed, 0)
if j.Age(start) < j.Sim().RetirementAge() - 0.5 {
j.AddEvent(start, j.Sim().Name + " New Job", 3)
j.AddEvent(start, j.Sim().Name + " 5% Pay Cut", 4)
} else {
j.employed -= 12
}
return false
}
return true
}
if bc < 2.0 {
if j.employed > int(math.Round(j.Gauss(36.0, 6.0))) {
j.employed = int(math.Min(-1.0, math.Round(j.Gauss(-2.0, 1.0))))
j.AddEvent(date, j.Sim().Name + " Job Loss", 7)
start := date.AddDate(0, -1 * j.employed, 0)
if j.Age(start) < j.Sim().RetirementAge() - 0.5 {
j.AddEvent(start, j.Sim().Name + " New Job", 3)
} else {
j.employed -= 12
}
return false
}
return true
}
if bc < 8.0 {
if j.employed > int(math.Round(j.Gauss(42.0, 6.0))) {
j.employed = int(math.Min(0.0, math.Round(j.Gauss(-2.0, 1.0))))
j.AddEvent(date, j.Sim().Name + " Job Loss", 7)
start := date.AddDate(0, -1 * j.employed, 0)
if j.Age(start) < j.Sim().RetirementAge() - 0.5 {
j.AddEvent(start, j.Sim().Name + " New Job", 3)
} else {
j.employed -= 12
}
return false
}
return true
}
if j.employed > int(math.Round(j.Gauss(42.0, 6.0))) {
if j.Age(date) < j.Sim().RetirementAge() - 2.0 {
j.employed = 0
j.monthly *= 1.05
j.AddEvent(date, j.Sim().Name + " Better Job", 3)
}
}
return true
}
func (j *Job) Earn(date time.Time) float64 {
if !j.Employed(date) {
j.unemployed++
return 0.0
}
j.unemployed = 0
if j.employed == 0 {
return j.monthly
}
if j.employed % 12 == 0 {
j.monthly *= 1.01
}
return j.monthly
}
func (j *Job) Unemployment(date time.Time) float64 {
if j.unemployed > 1 && j.unemployed <= 12 {
return 2500.0
}
return 0.0
}
func (j *Job) Monthly(date time.Time) {
amount := j.Earn(date)
j.CashAccount().Deposit(amount, date, "Salary")
j.TaxMen().Withhold(amount, date, true)
unem := j.Unemployment(date)
j.CashAccount().Deposit(unem, date, "Unemployment")
j.TaxMen().Withhold(unem, date, false)
j.employed++
}
|
package main
import (
"fmt"
"io"
"os"
)
/*
create a program Read a contents from text file and prints on terminal
Note : File should be read on Command Line Args
*/
func main() {
f, err := os.Open(os.Args[1]) //--------->>> open a file from terminal EX: go run InterfaceAssignment2.go filename
// os.Args[0] creates a temp file created by go ---- os.Args[1] takes a filename as we pass and commandLIne
if err != nil {
fmt.Println("Error---> : ", err)
os.Exit(1)
}
io.Copy(os.Stdout, f) // This Lines uses for Print the data in console/terminal
}
|
package sparkpost
import (
"errors"
sp "github.com/SparkPost/gosparkpost"
"github.com/mkj-gram/go_email_service/internal/emailprovider"
"log"
"os"
"strings"
)
type SparkPostProvider struct{}
var client *sp.Client
func (s SparkPostProvider) Init() error {
cfg := &sp.Config{
BaseUrl: "https://api.sparkpost.com",
ApiKey: os.Getenv("SPARKPOST_API_KEY"),
ApiVersion: 1,
}
var c sp.Client
err := c.Init(cfg)
if err == nil {
client = &c
} else {
log.Println("Could not initialize Spark Post provider.")
}
return err
}
func (s SparkPostProvider) Send(m emailprovider.Email) error {
if client == nil {
return errors.New("SparkPost provider not initialized correctly")
}
log.Printf("Sending through Spark Post: %s\n", m)
content := sp.Content{
From: sp.Address{Name: m.From.Name(), Email: m.From.Address()},
Subject: m.Subject.String(),
Text: m.Body,
HTML: m.HtmlBody.String(),
}
headerTo := make([]string, 0, len(m.To))
for _, e := range m.To {
headerTo = append(headerTo, e.Address())
}
headerToValue := strings.Join(headerTo, ",")
tx := &sp.Transmission{
Content: content,
Recipients: []sp.Recipient{},
}
for _, e := range m.To {
tx.Recipients = append(tx.Recipients.([]sp.Recipient), sp.Recipient{
Address: sp.Address{Name: e.Name(), Email: e.Address(), HeaderTo: headerToValue},
})
}
if len(m.Cc) > 0 {
ccTo := make([]string, 0, len(m.Cc))
for _, e := range m.Cc {
tx.Recipients = append(tx.Recipients.([]sp.Recipient), sp.Recipient{
Address: sp.Address{Name: e.Name(), Email: e.Address(), HeaderTo: headerToValue},
})
ccTo = append(ccTo, e.Address())
}
content.Headers["cc"] = strings.Join(ccTo, ",")
}
for _, e := range m.Bcc {
tx.Recipients = append(tx.Recipients.([]sp.Recipient), sp.Recipient{
Address: sp.Address{Name: e.Name(), Email: e.Address(), HeaderTo: headerToValue},
})
}
_, response, err := client.Send(tx)
if err != nil {
log.Printf("Error sending through Spark Post: %d %s %s\n", response.HTTP.StatusCode, string(response.Body), response.Errors)
}
return err
}
|
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package cmd
import (
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/vespa-engine/vespa/client/go/vespa"
)
func TestConfig(t *testing.T) {
assertConfigCommandErr(t, "Error: invalid option or value: \"foo\": \"bar\"\n", "config", "set", "foo", "bar")
assertConfigCommand(t, "foo = <unset>\n", "config", "get", "foo")
assertConfigCommand(t, "target = local\n", "config", "get", "target")
assertConfigCommand(t, "", "config", "set", "target", "hosted")
assertConfigCommand(t, "target = hosted\n", "config", "get", "target")
assertConfigCommand(t, "", "config", "set", "target", "cloud")
assertConfigCommand(t, "target = cloud\n", "config", "get", "target")
assertConfigCommand(t, "", "config", "set", "target", "http://127.0.0.1:8080")
assertConfigCommand(t, "", "config", "set", "target", "https://127.0.0.1")
assertConfigCommand(t, "target = https://127.0.0.1\n", "config", "get", "target")
assertEnvConfigCommand(t, "api-key-file = /tmp/private.key\n", []string{"VESPA_CLI_API_KEY_FILE=/tmp/private.key"}, "config", "get", "api-key-file")
assertConfigCommand(t, "", "config", "set", "api-key-file", "/tmp/private.key")
assertConfigCommand(t, "api-key-file = /tmp/private.key\n", "config", "get", "api-key-file")
assertConfigCommandErr(t, "Error: invalid application: \"foo\"\n", "config", "set", "application", "foo")
assertConfigCommand(t, "application = <unset>\n", "config", "get", "application")
assertConfigCommand(t, "", "config", "set", "application", "t1.a1.i1")
assertConfigCommand(t, "application = t1.a1.i1\n", "config", "get", "application")
assertConfigCommand(t, "api-key-file = /tmp/private.key\napplication = t1.a1.i1\ncolor = auto\ninstance = <unset>\nquiet = false\ntarget = https://127.0.0.1\nwait = 0\n", "config", "get")
assertConfigCommand(t, "", "config", "set", "wait", "60")
assertConfigCommandErr(t, "Error: wait option must be an integer >= 0, got \"foo\"\n", "config", "set", "wait", "foo")
assertConfigCommand(t, "wait = 60\n", "config", "get", "wait")
assertConfigCommand(t, "", "config", "set", "quiet", "true")
assertConfigCommand(t, "", "config", "set", "quiet", "false")
assertConfigCommand(t, "", "config", "set", "instance", "i2")
assertConfigCommand(t, "instance = i2\n", "config", "get", "instance")
assertConfigCommand(t, "", "config", "set", "application", "t1.a1")
assertConfigCommand(t, "application = t1.a1.default\n", "config", "get", "application")
}
func assertConfigCommand(t *testing.T, expected string, args ...string) {
assertEnvConfigCommand(t, expected, nil, args...)
}
func assertEnvConfigCommand(t *testing.T, expected string, env []string, args ...string) {
cli, stdout, _ := newTestCLI(t, env...)
err := cli.Run(args...)
assert.Nil(t, err)
assert.Equal(t, expected, stdout.String())
}
func assertConfigCommandErr(t *testing.T, expected string, args ...string) {
cli, _, stderr := newTestCLI(t)
err := cli.Run(args...)
assert.NotNil(t, err)
assert.Equal(t, expected, stderr.String())
}
func TestUseAPIKey(t *testing.T) {
cli, _, _ := newTestCLI(t)
assert.False(t, cli.config.useAPIKey(cli, vespa.PublicSystem, "t1"))
cli.config.set(apiKeyFileFlag, "/tmp/foo")
assert.True(t, cli.config.useAPIKey(cli, vespa.PublicSystem, "t1"))
cli.config.set(apiKeyFileFlag, "")
cli, _, _ = newTestCLI(t, "VESPA_CLI_API_KEY=foo")
assert.True(t, cli.config.useAPIKey(cli, vespa.PublicSystem, "t1"))
// Test deprecated functionality
authContent := `
{
"version": 1,
"providers": {
"auth0": {
"version": 1,
"systems": {
"public": {
"access_token": "...",
"scopes": ["openid", "offline_access"],
"expires_at": "2030-01-01T01:01:01.000001+01:00"
}
}
}
}
}`
cli, _, _ = newTestCLI(t, "VESPA_CLI_CLOUD_SYSTEM=public")
_, err := os.Create(filepath.Join(cli.config.homeDir, "t2.api-key.pem"))
require.Nil(t, err)
assert.True(t, cli.config.useAPIKey(cli, vespa.PublicSystem, "t2"))
require.Nil(t, os.WriteFile(filepath.Join(cli.config.homeDir, "auth.json"), []byte(authContent), 0600))
assert.False(t, cli.config.useAPIKey(cli, vespa.PublicSystem, "t2"))
}
|
package sqlite
import (
"database/sql"
"github.com/jakewitcher/pos-server/graph/model"
"github.com/jakewitcher/pos-server/internal/users"
"golang.org/x/crypto/bcrypt"
"log"
"strconv"
)
type UserProvider struct {
db *sql.DB
}
func (p *UserProvider) CreateUser(newUser model.NewUserInput) (*model.User, error) {
employeeId, err := strconv.ParseInt(newUser.EmployeeID, 10, 64)
if err != nil {
return nil, newInvalidIdError(Employee, newUser.EmployeeID)
}
userId, err := p.insertNewUser(newUser, employeeId)
user := &users.UserEntity{
Id: userId,
EmployeeId: employeeId,
Username: newUser.Username,
}
return user.ToDTO(), nil
}
func (p *UserProvider) insertNewUser(newUser model.NewUserInput, employeeId int64) (int64, error) {
hashedPassword, err := HashPassword(newUser.Password)
if err != nil {
return 0, serverError
}
statement, err := p.db.Prepare(
`INSERT INTO User(EmployeeId, Username, Password)
VALUES (?,?,?)`)
if err != nil {
log.Println(err)
return 0, serverError
}
defer statement.Close()
result, err := statement.Exec(employeeId, newUser.Username, hashedPassword)
if err != nil {
log.Println(err)
return 0, serverError
}
userId, err := result.LastInsertId()
if err != nil {
log.Println(err)
return 0, serverError
}
return userId, nil
}
func NewUserProvider(db *sql.DB) *UserProvider {
return &UserProvider{db: db}
}
func HashPassword(password string) (string, error) {
bytes, err := bcrypt.GenerateFromPassword([]byte(password), 14)
return string(bytes), err
} |
package models
// Crontab defines structure for crontab
type Crontab struct {
Records []Record
}
// Record defines single record at the crontab
type Record struct {
Schedule string
Command string
} |
package memory
import (
"sync"
)
// Store реализует интерфейс хранилища данных
type Store struct {
statements map[int][]string
counter int
mu sync.Mutex
}
// NewMemoryStore конструктор Store
func NewMemoryStore() (s *Store) {
return &Store{
statements: make(map[int][]string),
}
}
// SaveStatement - сохранить заявку
func (s *Store) SaveStatement(urls []string) (int, error) {
s.mu.Lock()
s.counter++
counter := s.counter
s.statements[counter] = urls
s.mu.Unlock()
return counter, nil
}
// GetStatementURLs - получить данные о заявке
func (s *Store) GetStatementURLs(id int) ([]string, error) {
s.mu.Lock()
sites := s.statements[id]
s.mu.Unlock()
return sites, nil
}
|
package main
import (
"github.com/google/gopacket"
"github.com/google/gopacket/pcap"
"fmt"
"log"
"time"
)
var (
device string = "en0"
snaplen int32 = 65535
promisc bool = false
err error
timeout time.Duration = -1 * time.Second
handle *pcap.Handle
)
func main() {
handle, err = pcap.OpenLive(device,snaplen,promisc,timeout)
if err != nil {
log.Fatal(err)
}
defer handle.Close()
var filter string = "src host 127.0.0.1 and icmp"
err = handle.SetBPFFilter(filter)
if err != nil {
log.Fatal(err)
}
packetSource := gopacket.NewPacketSource(handle,handle.LinkType())
for packet := range packetSource.Packets() {
fmt.Println("You have been pinged!!")
fmt.Println("----------")
fmt.Println(packet)
}
}
|
package main
import (
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
)
func handler(writer http.ResponseWriter, request *http.Request) {
fmt.Fprintf(writer, "Hello World, %s!", request.URL.Path[1:])
}
// to run from the command line type go run main.go
// Open a browser and navigate to localhost:5555
func main() {
f, _ := os.OpenFile("alogfile.txt", os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
log.SetOutput(f)
log.Printf("Here is some text to write")
http.Handle("/test", new(MyHandler))
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("Hello World"))
// fmt.Fprintf(w, "Hello, %q", html.EscapeString(r.URL.Path))
})
// pass nil to use the default mux (Multiplexing)
log.Fatal(http.ListenAndServe(":5555", nil))
}
// MyHandler example struct
type MyHandler struct {
http.Handler
}
func (this *MyHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
path := "public/" + req.URL.Path
data, err := ioutil.ReadFile(string(path))
if err == nil {
w.Write(data)
} else {
w.WriteHeader(404)
w.Write([]byte("404 not found -" + http.StatusText(404)))
}
}
|
package 贪心
func groupThePeople(groupSizes []int) [][]int {
uids := make(map[int][]int)
result := make([][]int, 0)
for i := 0; i < len(groupSizes); i++ {
uid := i
uids[groupSizes[i]] = append(uids[groupSizes[i]], uid)
if len(uids[groupSizes[i]]) == groupSizes[i] {
result = append(result, uids[groupSizes[i]])
delete(uids, groupSizes[i])
}
}
return result
}
/*
题目链接: https://leetcode-cn.com/problems/group-the-people-given-the-group-size-they-belong-to/
*/
|
package issueProcessor
import (
"fmt"
"time"
"github.com/google/go-github/github"
"github.com/pouchcontainer/pouchrobot/utils"
)
func (ip *IssueProcessor) ActToIssueExpired(issue *github.Issue) error {
ip.ActToCloseExpire(issue)
return nil
}
func (ip *IssueProcessor) ActToCloseExpire(issue *github.Issue) error {
now = time.Now()
d, _ := time.ParseDuration("-24h")
d30 := now.add(30 * d)
if _, exist := ip.Client.IssueHasComment(*(issue.Number), utils.IssueNeedP1CommentSubStr); exist {
if res := d30.Sub(issue.UpdatedAt); res.Duation < 0 {
return nil
}
} else if _, exist := ip.Client.IssueHasComment(*(issue.Number), utils.IssueNeedP1CommentSubStr); !exist {
return nil
}
body := fmt.Sprintf(utils.IssueNeedP1Comment, *(issue.User.Login))
newComment := &github.IssueComment{
Body: &body,
}
return ip.Client.CloseExpireIssues(*(issue.Number))
}
|
package main
import (
"fmt"
"os"
"path/filepath"
"strings"
)
const (
docsDirEntry = "website/docs"
)
func main() {
pwd, err := os.Getwd()
if err != nil {
fmt.Println("error while getting pwd")
panic(err)
}
rootDir := filepath.Join(pwd, "..", "..")
err = validateDocsDirStructure(rootDir)
if err != nil {
fmt.Println("error while validating docs directory structure")
panic(err)
}
}
func validateDocsDirStructure(rootDir string) error {
docsDirPath := filepath.Join(rootDir, docsDirEntry)
// expected directory structure
expectedDirs := []string{"mutation-examples", "validation"}
expectedFiles := []string{"intro.md", "pspintro.md"}
// Get the list of files and directories in the docs directory
files, err := os.ReadDir(docsDirPath)
if err != nil {
return err
}
// Validate the directory structure
for _, file := range files {
if file.IsDir() {
if !contains(expectedDirs, file.Name()) {
err = fmt.Errorf("unexpected directory: %s, found at: %s", file.Name(), filepath.Join(docsDirPath, file.Name()))
return err
}
} else {
if !contains(expectedFiles, file.Name()) {
err = fmt.Errorf("unexpected file: %s, found at: %s", file.Name(), filepath.Join(docsDirPath, file.Name()))
return err
}
}
}
// Check for missing directories and files
for _, expectedDir := range expectedDirs {
if _, err := os.Stat(filepath.Join(docsDirPath, expectedDir)); os.IsNotExist(err) {
err = fmt.Errorf("missing directory: %s", expectedDir)
return err
}
}
for _, expectedFile := range expectedFiles {
if _, err := os.Stat(filepath.Join(docsDirPath, expectedFile)); os.IsNotExist(err) {
err = fmt.Errorf("missing file: %s", expectedFile)
return err
}
}
return nil
}
func contains(items []string, item string) bool {
for _, i := range items {
if strings.EqualFold(i, item) {
return true
}
}
return false
}
|
package task
import (
log "code.google.com/p/log4go"
"github.com/d-d-j/ddj_master/common"
"github.com/d-d-j/ddj_master/dto"
"github.com/d-d-j/ddj_master/node"
"fmt"
)
type TaskWorker struct {
reqChan chan dto.RestRequest
getNodeChan chan node.GetNodeRequest
done chan Worker
pending int
index int
idGenerator common.Int64Generator
balancer *node.LoadBalancer
}
//This is interface that must be implement by worker in worker pool
type Worker interface {
common.Int64Generator
Work()
RequestChan() chan dto.RestRequest
Done()
IncrementPending()
DecrementPending()
Id() int
String() string
getNodeForInsert() (*node.Node, error)
}
//Constructor for Worker. jobsPerWorker is actual size of buffer in request channel.
func NewTaskWorker(idx int, jobsPerWorker int32, getNodeChan chan node.GetNodeRequest, done chan Worker, nodeBalancer *node.LoadBalancer, idGen common.Int64Generator) Worker {
w := new(TaskWorker)
w.index = idx
w.pending = 0
w.reqChan = make(chan dto.RestRequest, jobsPerWorker)
w.getNodeChan = getNodeChan
w.balancer = nodeBalancer
w.done = done
w.idGenerator = idGen
return w
}
// This method will be called to start worker and it should be gorutine
func (w *TaskWorker) Work() {
for {
req := <-w.reqChan // GET REQUEST
log.Finest(w, "Get request to process")
j := w.getJob(req.Type)
j(req)
w.Done()
}
}
func (w *TaskWorker) getNodeForInsert() (*node.Node, error) {
nodeId := w.balancer.CurrentInsertNodeId
if nodeId == common.CONST_UNINITIALIZED {
return nil, fmt.Errorf("Balancer is uninitialized")
}
nodeChan := make(chan *node.Node)
w.getNodeChan <- node.GetNodeRequest{NodeId: nodeId, BackChan: nodeChan}
node := <-nodeChan
if node == nil {
return nil, fmt.Errorf("Node does not existing")
}
return node, nil
}
func createMessage(req dto.RestRequest, t *dto.Task, deviceId int32) []byte {
var (
message []byte
err error
)
message, err = t.MakeRequest(deviceId).Encode()
if err != nil {
log.Error("Error while encoding request - ", err)
req.Response <- dto.NewRestResponse("Internal server error", 0, nil)
return nil
}
return message
}
//Interface implementation
func (w *TaskWorker) String() string {
return fmt.Sprintf("Worker #%d (pending:%d)", w.index, w.pending)
}
//Return worker Id
func (w *TaskWorker) Id() int { return w.index }
//Increment pending jobs counter
func (w *TaskWorker) IncrementPending() { w.pending++ }
//Decrement pending jobs counter
func (w *TaskWorker) DecrementPending() {
log.Finest(w, "Pending decrement")
w.pending--
}
//Return worker request channel
func (w *TaskWorker) RequestChan() chan dto.RestRequest { return w.reqChan }
func (w *TaskWorker) Done() {
log.Finest(w, "Worker is done")
w.done <- w
}
//Return worker Id
func (w *TaskWorker) GetId() int64 { return w.idGenerator.GetId() }
|
package bbir
import (
"os"
"testing"
)
func Test_BulkCommandExecutor(t *testing.T) {
injector := NewInjectorForCommandBuilderTest(t)
converter := injector.Get(new(CommandConverter)).(*CommandConverter)
filePath := fixturesPath + "example.csv"
file, err := os.Open(filePath)
if err != nil {
t.Errorf("Could not open fixture file: %v", filePath)
}
_, lines, err := NewCSVReader(file).ReadAll()
if err != nil {
t.Errorf("Could not read csv data: %v", err.Error())
}
commands, err := converter.Convert(lines)
if err != nil {
t.Errorf("Could not convert:\n%s", err.Error())
}
executor := injector.Get(new(BulkCommandExecutor)).(*BulkCommandExecutor)
if err := executor.Do(commands); err != nil {
t.Errorf("Could not execute:\n%s", err.Error())
}
}
|
package imagekit
import (
"context"
"errors"
"time"
)
//
// RESPONSES
//
type GetFileDetailsResponse struct {
// FileID is the unique ID of the uploaded file.
FileID string `json:"fileId"`
// Type of item. It can be either file or imageFolder.
Type string `json:"type"`
// Name of the file or imageFolder.
Name string `json:"name"`
// FilePath of the file. In the case of an image, you can use this path to construct different transform.
FilePath string `json:"filePath"`
// Tags is array of tags associated with the image.
Tags []string `json:"tags"`
// IsPrivateFile is the file marked as private. It can be either "true" or "false".
IsPrivateFile bool `json:"isPrivateFile"`
// CustomCoordinates is the value of custom coordinates associated with the image in format "x,y,width,height".
CustomCoordinates string `json:"customCoordinates"`
// URL of the file.
URL string `json:"url"`
// Thumbnail is a small thumbnail URL in case of an image.
Thumbnail string `json:"thumbnail"`
// FileType of the file, it could be either image or non-image.
FileType string `json:"fileType"`
// MIME Type of the file.
MIME string `json:"mime"`
// Height of the uploaded image file.
//
// Only applicable when file type is image.
Height int `json:"height"`
// Width of the uploaded image file.
//
// Only applicable when file type is image.
Width int `json:"width"`
// Size of the uploaded file in bytes.
Size int `json:"size"`
// HasAlpha is whether the image has an alpha component or not.
HasAlpha bool `json:"hasAlpha"`
// The date and time when the file was first uploaded.
//
// The format is YYYY-MM-DDTHH:mm:ss.sssZ
CreatedAt time.Time `json:"created_at"`
}
//
// METHODS
//
// GetFileDetails such as tags, customCoordinates, and isPrivate properties using get file detail API.
func (s *MediaService) GetFileDetails(ctx context.Context, fid string) (*GetFileDetailsResponse, error) {
if fid == "" {
return nil, errors.New("file id is empty")
}
// Prepare request
req, err := s.client.request("GET", "v1/files/"+fid+"/details", nil, requestTypeAPI)
if err != nil {
return nil, err
}
// Submit the request
res := new(GetFileDetailsResponse)
err = s.client.do(ctx, req, res)
if err != nil {
return nil, err
}
return res, nil
}
|
package testcase
import (
"net/http"
"time"
)
type Options struct {
name string `validate:"required"`
timeout time.Duration `validate:"min=100ms,max=30s"`
maxAttempts int `validate:"min=1,max=10"`
httpClient *http.Client `validate:"gt=0"`
}
func getDefaults() Options {
return Options{
name: "some-name",
timeout: 3 * time.Second,
maxAttempts: 4,
httpClient: &http.Client{Transport: &http.Transport{MaxConnsPerHost: 10}},
}
}
|
package main
import (
"fmt"
"github.com/achakravarty/30daysofgo/day8"
)
func main() {
var size int
fmt.Scanf("%d\n", &size)
phoneBook := day8.PhoneBook{}.NewPhoneBook()
for i := 0; i < size; i++ {
var name string
var number int
fmt.Scanf("%s %d", &name, &number)
phoneBook.Add(name, number)
}
var query string
count, _ := fmt.Scanf("%s\n", &query)
for count > 0 {
err, number := phoneBook.Find(query)
if err != nil {
fmt.Println(err.Error())
} else {
fmt.Printf("%s=%d\n", query, number)
}
count, _ = fmt.Scanf("%s\n", &query)
}
}
|
// Copyright 2023 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
emptypb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/empty_go_proto"
betapb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/iam/beta/iam_beta_go_proto"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/iam/beta"
)
// ServiceAccountServer implements the gRPC interface for ServiceAccount.
type ServiceAccountServer struct{}
// ProtoToServiceAccountActasResources converts a ServiceAccountActasResources object from its proto representation.
func ProtoToIamBetaServiceAccountActasResources(p *betapb.IamBetaServiceAccountActasResources) *beta.ServiceAccountActasResources {
if p == nil {
return nil
}
obj := &beta.ServiceAccountActasResources{}
for _, r := range p.GetResources() {
obj.Resources = append(obj.Resources, *ProtoToIamBetaServiceAccountActasResourcesResources(r))
}
return obj
}
// ProtoToServiceAccountActasResourcesResources converts a ServiceAccountActasResourcesResources object from its proto representation.
func ProtoToIamBetaServiceAccountActasResourcesResources(p *betapb.IamBetaServiceAccountActasResourcesResources) *beta.ServiceAccountActasResourcesResources {
if p == nil {
return nil
}
obj := &beta.ServiceAccountActasResourcesResources{
FullResourceName: dcl.StringOrNil(p.GetFullResourceName()),
}
return obj
}
// ProtoToServiceAccount converts a ServiceAccount resource from its proto representation.
func ProtoToServiceAccount(p *betapb.IamBetaServiceAccount) *beta.ServiceAccount {
obj := &beta.ServiceAccount{
Name: dcl.StringOrNil(p.GetName()),
Project: dcl.StringOrNil(p.GetProject()),
UniqueId: dcl.StringOrNil(p.GetUniqueId()),
Email: dcl.StringOrNil(p.GetEmail()),
DisplayName: dcl.StringOrNil(p.GetDisplayName()),
Description: dcl.StringOrNil(p.GetDescription()),
OAuth2ClientId: dcl.StringOrNil(p.GetOauth2ClientId()),
ActasResources: ProtoToIamBetaServiceAccountActasResources(p.GetActasResources()),
Disabled: dcl.Bool(p.GetDisabled()),
}
return obj
}
// ServiceAccountActasResourcesToProto converts a ServiceAccountActasResources object to its proto representation.
func IamBetaServiceAccountActasResourcesToProto(o *beta.ServiceAccountActasResources) *betapb.IamBetaServiceAccountActasResources {
if o == nil {
return nil
}
p := &betapb.IamBetaServiceAccountActasResources{}
sResources := make([]*betapb.IamBetaServiceAccountActasResourcesResources, len(o.Resources))
for i, r := range o.Resources {
sResources[i] = IamBetaServiceAccountActasResourcesResourcesToProto(&r)
}
p.SetResources(sResources)
return p
}
// ServiceAccountActasResourcesResourcesToProto converts a ServiceAccountActasResourcesResources object to its proto representation.
func IamBetaServiceAccountActasResourcesResourcesToProto(o *beta.ServiceAccountActasResourcesResources) *betapb.IamBetaServiceAccountActasResourcesResources {
if o == nil {
return nil
}
p := &betapb.IamBetaServiceAccountActasResourcesResources{}
p.SetFullResourceName(dcl.ValueOrEmptyString(o.FullResourceName))
return p
}
// ServiceAccountToProto converts a ServiceAccount resource to its proto representation.
func ServiceAccountToProto(resource *beta.ServiceAccount) *betapb.IamBetaServiceAccount {
p := &betapb.IamBetaServiceAccount{}
p.SetName(dcl.ValueOrEmptyString(resource.Name))
p.SetProject(dcl.ValueOrEmptyString(resource.Project))
p.SetUniqueId(dcl.ValueOrEmptyString(resource.UniqueId))
p.SetEmail(dcl.ValueOrEmptyString(resource.Email))
p.SetDisplayName(dcl.ValueOrEmptyString(resource.DisplayName))
p.SetDescription(dcl.ValueOrEmptyString(resource.Description))
p.SetOauth2ClientId(dcl.ValueOrEmptyString(resource.OAuth2ClientId))
p.SetActasResources(IamBetaServiceAccountActasResourcesToProto(resource.ActasResources))
p.SetDisabled(dcl.ValueOrEmptyBool(resource.Disabled))
return p
}
// applyServiceAccount handles the gRPC request by passing it to the underlying ServiceAccount Apply() method.
func (s *ServiceAccountServer) applyServiceAccount(ctx context.Context, c *beta.Client, request *betapb.ApplyIamBetaServiceAccountRequest) (*betapb.IamBetaServiceAccount, error) {
p := ProtoToServiceAccount(request.GetResource())
res, err := c.ApplyServiceAccount(ctx, p)
if err != nil {
return nil, err
}
r := ServiceAccountToProto(res)
return r, nil
}
// applyIamBetaServiceAccount handles the gRPC request by passing it to the underlying ServiceAccount Apply() method.
func (s *ServiceAccountServer) ApplyIamBetaServiceAccount(ctx context.Context, request *betapb.ApplyIamBetaServiceAccountRequest) (*betapb.IamBetaServiceAccount, error) {
cl, err := createConfigServiceAccount(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return s.applyServiceAccount(ctx, cl, request)
}
// DeleteServiceAccount handles the gRPC request by passing it to the underlying ServiceAccount Delete() method.
func (s *ServiceAccountServer) DeleteIamBetaServiceAccount(ctx context.Context, request *betapb.DeleteIamBetaServiceAccountRequest) (*emptypb.Empty, error) {
cl, err := createConfigServiceAccount(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return &emptypb.Empty{}, cl.DeleteServiceAccount(ctx, ProtoToServiceAccount(request.GetResource()))
}
// ListIamBetaServiceAccount handles the gRPC request by passing it to the underlying ServiceAccountList() method.
func (s *ServiceAccountServer) ListIamBetaServiceAccount(ctx context.Context, request *betapb.ListIamBetaServiceAccountRequest) (*betapb.ListIamBetaServiceAccountResponse, error) {
cl, err := createConfigServiceAccount(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
resources, err := cl.ListServiceAccount(ctx, request.GetProject())
if err != nil {
return nil, err
}
var protos []*betapb.IamBetaServiceAccount
for _, r := range resources.Items {
rp := ServiceAccountToProto(r)
protos = append(protos, rp)
}
p := &betapb.ListIamBetaServiceAccountResponse{}
p.SetItems(protos)
return p, nil
}
func createConfigServiceAccount(ctx context.Context, service_account_file string) (*beta.Client, error) {
conf := dcl.NewConfig(dcl.WithUserAgent("dcl-test"), dcl.WithCredentialsFile(service_account_file))
return beta.NewClient(conf), nil
}
|
package main
import (
"container/list"
"strconv"
"strings"
)
type Codec struct {
}
func Constructor() Codec {
return Codec{}
}
// Serializes a tree to a single string.
func (c *Codec) serialize(root *TreeNode) string {
if root == nil {
return "[]"
}
q := list.New()
q.PushFront(root)
result := []string{c.toStr(root)}
for q.Len() > 0 {
node := q.Remove(q.Back()).(*TreeNode)
result = append(result, c.toStr(node.Left), c.toStr(node.Right))
if node.Left != nil {
q.PushFront(node.Left)
}
if node.Right != nil {
q.PushFront(node.Right)
}
}
i := len(result) - 1
for ; i > 0 && result[i] == "null"; i-- {
}
res := "[" + strings.Join(result[:i+1], ",") + "]"
return res
}
// Deserializes your encoded data to tree.
func (c *Codec) deserialize(data string) *TreeNode {
N := len(data)
if N < 3 || data[0] != '[' || data[N-1] != ']' {
return nil
}
vals := strings.Split(data[1:N-1], ",")
if len(vals) == 0 {
return nil
}
root, i := c.toNode(vals[0]), 1
q := list.New()
q.PushFront(root)
for q.Len() > 0 && i < len(vals) {
node := q.Remove(q.Back()).(*TreeNode)
node.Left = c.toNode(vals[i])
if i+1 < len(vals) {
node.Right = c.toNode(vals[i+1])
}
i += 2
if node.Left != nil {
q.PushFront(node.Left)
}
if node.Right != nil {
q.PushFront(node.Right)
}
}
return root
}
func (c *Codec) toStr(node *TreeNode) string {
if node == nil {
return "null"
}
return strconv.Itoa(node.Val)
}
func (c *Codec) toNode(s string) *TreeNode {
if s == "null" {
return nil
}
v, _ := strconv.Atoi(s)
return &TreeNode{v, nil, nil}
}
/**
* Your Codec object will be instantiated and called as such:
* ser := Constructor();
* deser := Constructor();
* data := ser.serialize(root);
* ans := deser.deserialize(data);
*/
|
package chain
import (
"fmt"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/rpcclient"
"github.com/btcsuite/btcd/wire"
)
// BitcoindEvents is the interface that must be satisfied by any type that
// serves bitcoind block and transactions events.
type BitcoindEvents interface {
// TxNotifications will return a channel which will deliver new
// transactions.
TxNotifications() <-chan *wire.MsgTx
// BlockNotifications will return a channel which will deliver new
// blocks.
BlockNotifications() <-chan *wire.MsgBlock
// LookupInputSpend will return the transaction found in mempool that
// spends the given input.
LookupInputSpend(op wire.OutPoint) (chainhash.Hash, bool)
// Start will kick off any goroutines required for operation.
Start() error
// Stop will clean up any resources and goroutines.
Stop() error
}
// Ensure rpcclient.Client implements the rpcClient interface at compile time.
var _ rpcClient = (*rpcclient.Client)(nil)
// NewBitcoindEventSubscriber initialises a new BitcoinEvents object impl
// depending on the config passed.
func NewBitcoindEventSubscriber(cfg *BitcoindConfig,
client *rpcclient.Client) (BitcoindEvents, error) {
if cfg.PollingConfig != nil && cfg.ZMQConfig != nil {
return nil, fmt.Errorf("either PollingConfig or ZMQConfig " +
"should be specified, not both")
}
if cfg.PollingConfig != nil {
if client == nil {
return nil, fmt.Errorf("rpc client must be given " +
"if rpc polling is to be used for event " +
"subscriptions")
}
pollingEvents := newBitcoindRPCPollingEvents(
cfg.PollingConfig, client,
)
return pollingEvents, nil
}
if cfg.ZMQConfig == nil {
return nil, fmt.Errorf("ZMQConfig must be specified if " +
"rpcpolling is disabled")
}
return newBitcoindZMQEvents(cfg.ZMQConfig, client)
}
|
package hive_test
import (
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"testing"
"github.com/geoffgarside/homekit-hive/pkg/api/v6/hive"
)
func TestHomeConnect(t *testing.T) {
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "must be post", http.StatusBadRequest)
return
}
if r.URL.Path != "/omnia/auth/sessions" {
http.Error(w, "unknown path", http.StatusNotFound)
return
}
var loginRequest struct {
Sessions []struct {
Username string
Password string
Caller string
}
}
if err := json.NewDecoder(r.Body).Decode(&loginRequest); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/vnd.alertme.zoo-6.1+json;charset=UTF-8")
if len(loginRequest.Sessions) != 1 {
http.Error(w,
`{"errors":[{"code":"MISSING_PARAMETER","title":"Username and password not specified","links":[]}]}`,
http.StatusBadRequest)
return
}
s := loginRequest.Sessions[0]
switch {
case s.Username == "" || s.Password == "":
http.Error(w,
`{"errors":[{"code":"USERNAME_PASSWORD_ERROR","title":"Username or password not specified or invalid","links":[]}]}`,
http.StatusBadRequest)
case s.Username == "invalid" && s.Password == "json":
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, `{
"meta":{},
"links":{},
"linked":{},
"sessions":[{
"id":"4wdz82NrUmdYCuuNz3wzofWGymjRWigL"
"username":%q,
"userId":"b3a1835b-d27a-4ce9-b095-830fe9f0e398",
"extCustomerLevel":1,
"latestSupportedApiVersion":"6",
"sessionId":"4wdz82NrUmdYCuuNz3wzofWGymjRWigL"
}]
}`, s.Username)
case s.Username == "empty" && s.Password == "sessions":
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, `{
"meta":{},
"links":{},
"linked":{},
"sessions":[]
}`)
default:
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, `{
"meta":{},
"links":{},
"linked":{},
"sessions":[{
"id":"4wdz82NrUmdYCuuNz3wzofWGymjRWigL",
"username":%q,
"userId":"b3a1835b-d27a-4ce9-b095-830fe9f0e398",
"extCustomerLevel":1,
"latestSupportedApiVersion":"6",
"sessionId":"4wdz82NrUmdYCuuNz3wzofWGymjRWigL"
}]
}`, s.Username)
}
}))
defer srv.Close()
tests := []struct {
name string
username string
password string
wantErr bool
errCode string
}{
{"Blank Username & Password", "", "", true, hive.ErrInvalidCredentials},
{"Blank Username", "", "testing", true, hive.ErrInvalidCredentials},
{"Blank Password", "username", "", true, hive.ErrInvalidCredentials},
{"Valid Credentials", "username", "password", false, ""},
{"Invalid Response JSON", "invalid", "json", true, hive.ErrInvalidJSON},
{"Empty Sessions JSON", "empty", "sessions", true, hive.ErrInvalidLoginRespose},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
home, err := hive.Connect(
hive.WithCredentials(tt.username, tt.password),
hive.WithHTTPClient(srv.Client()),
hive.WithURL(srv.URL),
)
if (err != nil) != tt.wantErr {
t.Errorf("hive.Connect() error = %v, wantErr %v", err, tt.wantErr)
}
if err != nil && hive.ErrorCode(err) != tt.errCode {
t.Errorf("hive.Connect() error = %v, errCode %v", err, tt.errCode)
}
if err == nil && home == nil {
t.Errorf("hive.Connect() home = %v, want != nil", home)
}
})
}
}
|
package util_test
import (
"github.com/maprost/application/generator/internal/util"
"github.com/maprost/assertion"
"testing"
)
func TestWebsiteIcons(t *testing.T) {
assert := assertion.New(t)
assert.Equal(util.WebsiteIcon("https://www.linkedin.com/myname"), util.LinkedinIconPath)
assert.Equal(util.WebsiteIcon("https://github.com/myaccount"), util.GithubIconPath)
assert.Equal(util.WebsiteIcon("https://www.xing.com/myself"), util.XingIconPath)
assert.Equal(util.WebsiteIcon("https://mywebsite.de"), util.WebsiteIconPath)
}
func TestWebsiteIcons_onlyHttp(t *testing.T) {
assert := assertion.New(t)
assert.Equal(util.WebsiteIcon("http://www.linkedin.com/myname"), util.LinkedinIconPath)
assert.Equal(util.WebsiteIcon("http://github.com/myaccount"), util.GithubIconPath)
assert.Equal(util.WebsiteIcon("http://www.xing.com/myself"), util.XingIconPath)
}
func TestWebsiteIcons_noHttp(t *testing.T) {
assert := assertion.New(t)
assert.Equal(util.WebsiteIcon("www.linkedin.com/myname"), util.LinkedinIconPath)
assert.Equal(util.WebsiteIcon("github.com/myaccount"), util.GithubIconPath)
assert.Equal(util.WebsiteIcon("www.xing.com/myself"), util.XingIconPath)
}
func TestWebsiteIcons_noWWW(t *testing.T) {
assert := assertion.New(t)
assert.Equal(util.WebsiteIcon("https://linkedin.com/myname"), util.LinkedinIconPath)
assert.Equal(util.WebsiteIcon("https://xing.com/myself"), util.XingIconPath)
}
func TestWebsiteIcons_noHttp_noWWW(t *testing.T) {
assert := assertion.New(t)
assert.Equal(util.WebsiteIcon("linkedin.com/myname"), util.LinkedinIconPath)
assert.Equal(util.WebsiteIcon("github.com/myaccount"), util.GithubIconPath)
assert.Equal(util.WebsiteIcon("xing.com/myself"), util.XingIconPath)
}
func TestWebsiteIcons_withUpperCase(t *testing.T) {
assert := assertion.New(t)
assert.Equal(util.WebsiteIcon("http://wWw.LinkedIn.Com/myname"), util.LinkedinIconPath)
assert.Equal(util.WebsiteIcon("http://GitHub.com/myaccount"), util.GithubIconPath)
assert.Equal(util.WebsiteIcon("HTTP://www.Xing.com/myself"), util.XingIconPath)
}
|
// This program demonstrates attaching an eBPF program to a control group.
// The eBPF program will be attached as an egress filter,
// receiving an `__sk_buff` pointer for each outgoing packet.
// It prints the count of total packets every second.
package main
import (
"bufio"
"errors"
"log"
"os"
"strings"
"time"
"github.com/cilium/ebpf"
"github.com/cilium/ebpf/link"
"github.com/cilium/ebpf/rlimit"
)
// $BPF_CLANG and $BPF_CFLAGS are set by the Makefile.
//go:generate go run github.com/cilium/ebpf/cmd/bpf2go -cc $BPF_CLANG -cflags $BPF_CFLAGS bpf cgroup_skb.c -- -I../headers
func main() {
// Allow the current process to lock memory for eBPF resources.
if err := rlimit.RemoveMemlock(); err != nil {
log.Fatal(err)
}
// Load pre-compiled programs and maps into the kernel.
objs := bpfObjects{}
if err := loadBpfObjects(&objs, nil); err != nil {
log.Fatalf("loading objects: %v", err)
}
defer objs.Close()
// Get the first-mounted cgroupv2 path.
cgroupPath, err := detectCgroupPath()
if err != nil {
log.Fatal(err)
}
// Link the count_egress_packets program to the cgroup.
l, err := link.AttachCgroup(link.CgroupOptions{
Path: cgroupPath,
Attach: ebpf.AttachCGroupInetEgress,
Program: objs.CountEgressPackets,
})
if err != nil {
log.Fatal(err)
}
defer l.Close()
log.Println("Counting packets...")
// Read loop reporting the total amount of times the kernel
// function was entered, once per second.
ticker := time.NewTicker(1 * time.Second)
defer ticker.Stop()
for range ticker.C {
var value uint64
if err := objs.PktCount.Lookup(uint32(0), &value); err != nil {
log.Fatalf("reading map: %v", err)
}
log.Printf("number of packets: %d\n", value)
}
}
// detectCgroupPath returns the first-found mount point of type cgroup2
// and stores it in the cgroupPath global variable.
func detectCgroupPath() (string, error) {
f, err := os.Open("/proc/mounts")
if err != nil {
return "", err
}
defer f.Close()
scanner := bufio.NewScanner(f)
for scanner.Scan() {
// example fields: cgroup2 /sys/fs/cgroup/unified cgroup2 rw,nosuid,nodev,noexec,relatime 0 0
fields := strings.Split(scanner.Text(), " ")
if len(fields) >= 3 && fields[2] == "cgroup2" {
return fields[1], nil
}
}
return "", errors.New("cgroup2 not mounted")
}
|
package lineartable
import (
"bytes"
"errors"
"fmt"
)
// NewCircleLinkedList 返回链表
func NewCircleLinkedList() *CircleLinkedList {
h := &CircleListNode{}
h.Next = h
return &CircleLinkedList{h, 0}
}
// CircleListNode 节点
type CircleListNode struct {
Next *CircleListNode
Data interface{}
}
// CircleLinkedList 链表
type CircleLinkedList struct {
Head *CircleListNode
Len int
}
func (l *CircleLinkedList) findNode(index int) (*CircleListNode, error) {
if index < 0 || index > l.Len {
return nil, errors.New("index error")
}
p := l.Head
for i := 0; i < index; i++ {
p = p.Next
}
return p, nil
}
// Insert 插入
func (l *CircleLinkedList) Insert(index int, v interface{}) error {
if index < 1 || index > l.Len+1 {
return errors.New("index error")
}
p, _ := l.findNode(index - 1)
//inert
node := &CircleListNode{p.Next, v}
p.Next = node
l.Len++
return nil
}
// Remove 删除
func (l *CircleLinkedList) Remove(index int) error {
if index < 1 || index > l.Len {
return errors.New("index error")
}
p, _ := l.findNode(index - 1)
//remove
p.Next = p.Next.Next
l.Len--
return nil
}
// Get 获取
func (l *CircleLinkedList) Get(index int) (interface{}, error) {
if index < 1 || index > l.Len {
return nil, errors.New("index error")
}
p, _ := l.findNode(index)
//get
return p.Data, nil
}
// Set 修改
func (l *CircleLinkedList) Set(index int, v interface{}) error {
if index < 1 || index > l.Len {
return errors.New("index error")
}
p, _ := l.findNode(index)
//set
p.Data = v
return nil
}
// Find 查找
func (l *CircleLinkedList) Find(v interface{}) int {
p := l.Head.Next
i := 1
for p.Next != l.Head {
if p.Data == v {
return i
}
p = p.Next
i++
}
return -1
}
// FindAll 查找所有
func (l *CircleLinkedList) FindAll(v interface{}) (indexs []int) {
p := l.Head.Next
i := 1
for p.Next != l.Head {
if p.Data == v {
indexs = append(indexs, i)
}
p = p.Next
i++
}
return indexs
}
func (l *CircleLinkedList) String() string {
var buffer bytes.Buffer
buffer.WriteString(fmt.Sprintf("LikedList: Length = %d, ", l.Len))
buffer.WriteString(fmt.Sprintf("header = [%p,%d,%p->], ", l.Head, l.Head.Data, l.Head.Next))
node := l.Head.Next
buffer.WriteString("data = [")
for node.Next != l.Head {
buffer.WriteString(fmt.Sprintf("%p,%d,%p->, ", node, node.Data, node.Next))
node = node.Next
}
buffer.WriteString(fmt.Sprintf("%p,%d,%p]", node, node.Data, node.Next))
return buffer.String()
}
|
package rest
import (
"fmt"
"time"
"github.com/jinmukeji/jiujiantang-services/pkg/rest"
analysispb "github.com/jinmukeji/proto/v3/gen/micro/idl/partner/xima/analysis/v1"
generalpb "github.com/jinmukeji/proto/v3/gen/micro/idl/ptypes/v2"
ptypesv2 "github.com/jinmukeji/proto/v3/gen/micro/idl/ptypes/v2"
"github.com/kataras/iris/v12"
)
const (
// GenderMale 男性
GenderMale = 0
// GenderFemale 女性
GenderFemale = 1
// GenderInvalid 性别非法
GenderInvalid = -1
)
// Language 语言
type Language string
const (
// LanguageSimpleChinese 简体中文
LanguageSimpleChinese Language = "zh-Hans"
// LanguageTraditionalChinese 繁体中文
LanguageTraditionalChinese Language = "zh-Hant"
// LanguageEnglish 英文
LanguageEnglish Language = "en"
)
// GetMonthlyReportBody 请求月报的body
type GetMonthlyReportBody struct {
Language Language `json:"language"`
}
// GetMonthlyReport 月报
func (h *v2Handler) GetMonthlyReport(ctx iris.Context) {
userID, err := ctx.Params().GetInt("user_id")
if err != nil {
writeError(ctx, wrapError(ErrInvalidValue, "", err), false)
return
}
if ctx.Values().GetString(ClientIDKey) == seamlessClient {
writeError(
ctx,
wrapError(ErrDeniedToAccessAPI, "", fmt.Errorf("%s is denied to access this API", seamlessClient)),
false,
)
return
}
var body GetMonthlyReportBody
errReadJSON := ctx.ReadJSON(&body)
if errReadJSON != nil {
writeError(ctx, wrapError(ErrParsingRequestFailed, "", errReadJSON), false)
return
}
language, err := mapRestLanguageToProto(body.Language)
if err != nil {
writeError(ctx, wrapError(ErrValueRequired, "", err), false)
return
}
timeZone := getTimeZone(ctx)
// 月历史记录信息
location, _ := time.LoadLocation(timeZone)
now := time.Now().In(location)
// 从当天的23点59分59秒,往前推30天
monthStart := time.Date(now.Year(), now.Month(), now.Day(), 23, 59, 59, 0, location).AddDate(0, 0, -30)
// 从当天的23点59分59秒算结束时间
monthEnd := time.Date(now.Year(), now.Month(), now.Day(), 23, 59, 59, 0, location).AddDate(0, 0, 0)
statData, err := h.getWeekOrMonthStatData(ctx, userID, monthStart.UTC(), monthEnd.UTC(), MonthStatData)
if err != nil {
writeError(ctx, wrapError(ErrRPCInternal, "", err), false)
return
}
// 错误提示不为空,表示记录信息不全,返回错误提示
if statData.ErrorMessage != "" {
rest.WriteOkJSON(ctx, WeeklyOrMonthlyReportResponse{
ErrorMessage: statData.ErrorMessage,
})
return
}
req := new(analysispb.GetMonthlyAnalyzeResultRequest)
req.UserId = int32(userID)
req.Language = language
req.Cid = rest.GetCidFromContext(ctx)
req.CInfo = &analysispb.CInfo{
C0: statData.AverageMeridian.C0,
C1: statData.AverageMeridian.C1,
C2: statData.AverageMeridian.C2,
C3: statData.AverageMeridian.C3,
C4: statData.AverageMeridian.C4,
C5: statData.AverageMeridian.C5,
C6: statData.AverageMeridian.C6,
C7: statData.AverageMeridian.C7,
}
req.PhysicalDialectics = statData.PhysicalDialectics
resp, err := h.rpcAnalysisSvc.GetMonthlyAnalyzeResult(
newRPCContext(ctx), req,
)
if err != nil {
writeRPCInternalError(ctx, err, false)
return
}
// 与引擎分析结果相关模块
analysisReportContent, err := getAnalysisModules(resp.GetReport().GetModules())
if err != nil {
writeRPCInternalError(ctx, wrapError(ErrRPCInternal, "", err), false)
return
}
// 个人信息模块
userProfileModule, err := getUserProfileModule(resp.GetReport().GetUserProfile())
if err != nil {
writeRPCInternalError(ctx, wrapError(ErrRPCInternal, "", err), false)
return
}
analysisReportContent.UserProfile = userProfileModule
rest.WriteOkJSON(ctx, WeeklyOrMonthlyReportResponse{
ReportVersion: resp.ReportVersion,
ReportContent: analysisReportContent,
StartTime: monthStart,
EndTime: monthEnd,
})
}
func (h *v2Handler) getphysicalDialecticsFromLists(ctx iris.Context, cData []*CData, language ptypesv2.Language) []string {
var physicalDialectics []string
for _, value := range cData {
reqGetAnalyzeResult := &analysispb.GetAnalyzeResultByRecordIDRequest{}
reqGetAnalyzeResult.RecordId = int32(value.RecordId)
respGetAnalyzeResult, err := h.rpcAnalysisSvc.GetAnalyzeResultByRecordID(newRPCContext(ctx), reqGetAnalyzeResult)
if err != nil {
continue
}
analysisReportContent, err := getAnalysisModules(respGetAnalyzeResult.GetReport().GetModules())
if err != nil {
continue
}
for _, value := range analysisReportContent.PhysicalDialectics.Lookups {
physicalDialectics = append(physicalDialectics, value.Content)
}
}
return physicalDialectics
}
// mapProtoGenderToRest 将 proto 类型的 gender 转换为 rest 的 int64 类型
func mapProtoGenderToRest(gender generalpb.Gender) (int64, error) {
switch gender {
case generalpb.Gender_GENDER_INVALID:
return GenderInvalid, fmt.Errorf("invalid proto gender %d", gender)
case generalpb.Gender_GENDER_UNSET:
return GenderInvalid, fmt.Errorf("invalid proto gender %d", gender)
case generalpb.Gender_GENDER_MALE:
return GenderMale, nil
case generalpb.Gender_GENDER_FEMALE:
return GenderFemale, nil
}
return GenderInvalid, fmt.Errorf("invalid proto gender %d", gender)
}
|
package main
import (
"fmt"
"io/ioutil"
"runtime"
"time"
)
func openFile() {
//_, err := ioutil.ReadFile("/Users/ckhero/php-5.6.40.tar.gz") // just pass the file name
_, err := ioutil.ReadFile("/Users/ckhero/sophisticate/sophiticate/IPZ-933-C.mp4") // just pass the file name
if err != nil {
fmt.Print(err)
}
fmt.Println("OPEN FILE")
}
func main() {
runtime.GOMAXPROCS(1)
go openFile()
for {
time.Sleep(time.Millisecond * 100)
fmt.Println("i get schedule")
}
}
|
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
const (
// RefObjectsComponentType refers to the type of ref-objects
RefObjectsComponentType = "ref-objects"
)
// RefObjectsComponentSpec defines the spec of ref-objects component
type RefObjectsComponentSpec struct {
// Objects the referrers to the Kubernetes objects
Objects []ObjectReferrer `json:"objects,omitempty"`
// URLs are the links that stores the referred objects
URLs []string `json:"urls,omitempty"`
}
// ObjectReferrer selects Kubernetes objects
type ObjectReferrer struct {
// ObjectTypeIdentifier identifies the type of referred objects
ObjectTypeIdentifier `json:",inline"`
// ObjectSelector select object by name or labelSelector
ObjectSelector `json:",inline"`
}
// ObjectTypeIdentifier identifies the scheme of Kubernetes object
type ObjectTypeIdentifier struct {
// Resource is the resource name of the Kubernetes object.
Resource string `json:"resource"`
// Group is the API Group of the Kubernetes object.
Group string `json:"group"`
// LegacyObjectTypeIdentifier is the legacy identifier
// Deprecated: use resource/group instead
LegacyObjectTypeIdentifier `json:",inline"`
}
// LegacyObjectTypeIdentifier legacy object type identifier
type LegacyObjectTypeIdentifier struct {
// APIVersion is the APIVersion of the Kubernetes object.
APIVersion string `json:"apiVersion"`
// APIVersion is the Kind of the Kubernetes object.
Kind string `json:"kind"`
}
// ObjectSelector selector for Kubernetes object
type ObjectSelector struct {
// Name is the name of the Kubernetes object.
// If empty, it will inherit the application component's name.
Name string `json:"name,omitempty"`
// Namespace is the namespace for selecting Kubernetes objects.
// If empty, it will inherit the application's namespace.
Namespace string `json:"namespace,omitempty"`
// Cluster is the cluster for selecting Kubernetes objects.
// If empty, it will use the local cluster
Cluster string `json:"cluster,omitempty"`
// LabelSelector selects Kubernetes objects by labels
// Exclusive to "name"
LabelSelector map[string]string `json:"labelSelector,omitempty"`
// DeprecatedLabelSelector a deprecated alias to LabelSelector
// Deprecated: use labelSelector instead.
DeprecatedLabelSelector map[string]string `json:"selector,omitempty"`
}
|
// Copyright © 2017 Aeneas Rekkas <aeneas+oss@aeneas.io>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cli
import (
"fmt"
"net/http"
"crypto/tls"
"github.com/ory/hydra/config"
hydra "github.com/ory/hydra/sdk/go/hydra/swagger"
"github.com/spf13/cobra"
)
type RevocationHandler struct {
Config *config.Config
}
func newRevocationHandler(c *config.Config) *RevocationHandler {
return &RevocationHandler{Config: c}
}
func (h *RevocationHandler) RevokeToken(cmd *cobra.Command, args []string) {
if len(args) != 1 {
fmt.Print(cmd.UsageString())
return
}
handler := hydra.NewOAuth2ApiWithBasePath(h.Config.ClusterURL)
handler.Configuration.Username = h.Config.ClientID
handler.Configuration.Password = h.Config.ClientSecret
if skip, _ := cmd.Flags().GetBool("skip-tls-verify"); skip {
handler.Configuration.Transport = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
}
if term, _ := cmd.Flags().GetBool("fake-tls-termination"); term {
handler.Configuration.DefaultHeader["X-Forwarded-Proto"] = "https"
}
token := args[0]
response, err := handler.RevokeOAuth2Token(args[0])
checkResponse(response, err, http.StatusOK)
fmt.Printf("Revoked token %s", token)
}
|
package model
type UnableToRenameFileError struct {
Err string
}
func (e UnableToRenameFileError) Error() string {
return e.Err
} |
package provider
import "github.com/nats-io/nats.go"
type Provider interface {
Connect(url string, queueName string, subject string, providerName string, reply string, jetStream bool)
Provide() []byte
OnReply(msg *nats.Msg)
}
|
package main
import "fmt"
import "sort"
//import "reflect"
//abbbbccdde
//aeccddbbbb
type ss struct {
freq int
ch uint8
}
func frequency_sort(ip string) {
if len(ip) == 0 {
return
}
var m1 map[uint8]int = make(map[uint8]int, 10)
for i:=0; i<len(ip); i++ {
m1[ip[i]]++
}
sl := make([]ss, 0)
for k,v:= range m1 {
//var tmp ss
//ss.freq := v
//ss.ch := k
var tmp = ss{v, k}
sl = append(sl, tmp)
}
sort.Slice(sl, func(i int, j int) bool {
return sl[i].freq < sl[j].freq
})
op:=make([]uint8, 0)
for i:=0; i<len(sl); i++ {
for j:=0; j<sl[i].freq; j++ {
op = append(op, sl[i].ch)
}
}
fmt.Println(string(op))
return
}
func main() {
var input string
fmt.Scanln(&input)
frequency_sort(input)
//fmt.Println(reflect.TypeOf(input[0]))
return
}
|
package service
import (
"context"
"fmt"
"strconv"
"time"
"boiler/pkg/entity"
"boiler/pkg/errors"
"boiler/pkg/store"
"github.com/lestrrat-go/jwx/jwa"
"github.com/lestrrat-go/jwx/jwt"
"golang.org/x/crypto/bcrypt"
)
// AddUser add a new user
func (s *Service) AddUser(ctx context.Context, user *entity.User) error {
hash, err := bcrypt.GenerateFromPassword([]byte(user.Password), bcrypt.DefaultCost)
if err != nil {
return fmt.Errorf("could not generate password; %w", err)
}
tx, err := s.store.Tx()
if err != nil {
return fmt.Errorf("could not begin transaction; %w", err)
}
user.Password = string(hash)
err = s.store.AddUser(ctx, tx, user)
if err != nil {
if er := tx.Rollback(); er != nil {
err = fmt.Errorf("%s; %w", er, err)
}
return fmt.Errorf("could not add user; %w", err)
}
if err := tx.Commit(); err != nil {
return fmt.Errorf("could not add user; %w", err)
}
return nil
}
// AuthUser returns a JWT token from users credentials
func (s *Service) AuthUser(ctx context.Context, email, password string, user *entity.User, token *string) error {
var IDs []int64
err := s.store.FilterUsersID(ctx, store.FilterUsers{Email: email, Limit: FilterUsersDefaultLimit}, &IDs)
if err != nil {
return err
}
if len(IDs) != 1 {
return errors.ErrNotFound
}
err = s.GetUserByID(ctx, IDs[0], user)
if err != nil {
return err
}
if bcrypt.CompareHashAndPassword([]byte(user.Password), []byte(password)) != nil {
return errors.ErrInvalidPassword
}
t := jwt.New()
// https://tools.ietf.org/html/rfc7519#page-9
_ = t.Set(jwt.SubjectKey, strconv.FormatInt(user.ID, 10))
_ = t.Set(jwt.IssuedAtKey, time.Now().Unix())
_ = t.Set(jwt.ExpirationKey, time.Now().Add(s.config.JWT.ExpireIn).Unix())
_ = t.Set(jwt.AudienceKey, "auth")
_ = t.Set(jwt.IssuerKey, s.config.JWT.Issuer)
raw, err := jwt.Sign(t, jwa.RS256, s.config.JWT.PrivateKey)
if err != nil {
return err
}
*token = string(raw)
return nil
}
// EnqueueDeleteUser enqueue user to be deleted
func (s *Service) EnqueueDeleteUser(ctx context.Context, userID int64) error {
_, err := s.enqueuer.Enqueue(DeleteUser, map[string]interface{}{"id": userID})
return err
}
// DeleteUser remove user by ID
func (s *Service) DeleteUser(ctx context.Context, userID int64) error {
tx, err := s.store.Tx()
if err != nil {
return fmt.Errorf("could not begin delete user transaction; %w", err)
}
err = s.store.DeleteUser(ctx, tx, userID)
if err != nil && err != errors.ErrNotFound {
if er := tx.Rollback(); er != nil {
err = fmt.Errorf("%s; %w", er, err)
}
return fmt.Errorf("could not delete user; %w", err)
}
err = s.store.DeleteEmailsByUserID(ctx, tx, userID)
if err != nil && err != errors.ErrNotFound {
if er := tx.Rollback(); er != nil {
err = fmt.Errorf("%s; %w", er, err)
}
return fmt.Errorf("could not delete user emails; %w", err)
}
if err := tx.Commit(); err != nil {
return fmt.Errorf("could not commit delete user; %w", err)
}
return nil
}
// FilterUsers retrieve users
func (s *Service) FilterUsers(ctx context.Context, filter store.FilterUsers, users *[]entity.User) error {
if filter.Limit == 0 {
filter.Limit = FilterUsersDefaultLimit
}
var IDs []int64
err := s.store.FilterUsersID(ctx, filter, &IDs)
if err != nil {
return err
}
return s.store.FetchUsers(ctx, IDs, users)
}
// GetUserByID get user by ID
func (s *Service) GetUserByID(ctx context.Context, userID int64, user *entity.User) error {
var users []entity.User
err := s.store.FetchUsers(ctx, []int64{userID}, &users)
if err != nil {
return err
}
if len(users) != 1 {
return errors.ErrNotFound
}
*user = users[0]
return nil
}
// GetUserByEmail get user by Email
func (s *Service) GetUserByEmail(ctx context.Context, email string, user *entity.User) error {
filter := store.FilterUsers{Email: email, Limit: FilterUsersDefaultLimit}
var IDs []int64
err := s.store.FilterUsersID(ctx, filter, &IDs)
if err != nil {
return err
}
if len(IDs) != 1 {
return errors.ErrNotFound
}
return s.GetUserByID(ctx, IDs[0], user)
}
|
package initiate
import (
"os"
"proximity/config"
"github.com/ralstan-vaz/go-errors"
)
// Version ...
var Version string
// Env Gets the enviroment variable from TIER
// By default the env will be development
func Env() (string, error) {
env := os.Getenv("TIER")
if env == "" {
err := os.Setenv("TIER", config.ENVDevelopment)
if err != nil {
return "", errors.NewInternalError(err).SetCode("INITIATE.ENV.SETENV_FAILED")
}
env = os.Getenv("TIER")
}
return env, nil
}
// SetApmEnv ... sets various env flags for the apm
func SetApmEnv(env string) {
os.Setenv("ELASTIC_APM_CAPTURE_HEADERS", "false")
}
|
package main
import (
"fmt"
"github.com/sinksmell/files-cmp/client/utils"
"github.com/sinksmell/files-cmp/models"
)
const (
HOST string = "http://localhost:8080/v1/check"
HASH_URL string = "/hash"
FILE_URL string = "/file"
)
var (
groups []string //分组文件列表
diffFiles []string // 需要对比的小文件集合
)
func init() {
// 初始化http请求端
//utils.Init()
// 初始化文件列表
models.InitFiles(models.FILE_PATH)
// 文件分组 组文件中存放 文件名 MD5值
models.Divide(models.FILE_PATH, models.GROUP_PATH)
// 获取组文件列表
groups, _ = models.GetAllFiles(models.GROUP_PATH)
diffFiles = make([]string, 0)
}
func main() {
var (
resp *models.Response // 文件对比结果
msg string // 输出的信息
err error
)
for _, grp := range groups {
if resp, err = utils.SendGrpMd5(grp, HOST+HASH_URL); err != nil {
fmt.Println(err)
continue
}
switch resp.Code {
case models.EQUAL:
// 分组文件的Md5值相同 不需要处理
msg = setColor(grp+"\t分组文件内容相同!", 0, 0, 37)
fmt.Println(msg)
case models.NOT_EQUAL:
// 如果分组文件的Md5值不同 则把该分组文件发送过去 找到需要对比的小文件
// 红色高亮显示
msg = setColor(grp+"\t分组文件内容不同!", 0, 0, 31)
fmt.Println(msg)
handleDiffGroup(grp)
fmt.Println("")
case models.REQ_ERR:
fmt.Println("Request Err!")
}
}
}
// 处理分组文件Md5值不同
func handleDiffGroup(grpFile string) {
var (
res *models.Response
err error
)
// 发送分组文件 文件对比类型为 cmp_group 即对比响应的分组文件
if res, err = utils.PostFile(grpFile, HOST+FILE_URL, models.GROUP_PATH, models.CMP_GROUP); err != nil {
fmt.Println(err)
return
}
// 获取响应中的 期望对比的文件集合
if len(res.Ack) > 0 {
for _, fname := range res.Ack {
// 遍历集合 把小文件发送过去 对比文件内容
if _res, err := utils.PostFile(fname, HOST+FILE_URL, models.FILE_PATH, models.CMP_FILE); err != nil {
fmt.Println(err)
fmt.Println("发送错误!")
continue
} else {
fmt.Println(_res.Diff)
}
}
}
}
// 设定颜色打印
func setColor(msg string, conf, bg, text int) string {
return fmt.Sprintf("%c[%d;%d;%dm%s%c[0m", 0x1B, conf, bg, text, msg, 0x1B)
}
|
package service
import (
entity "github.com/Surafeljava/Court-Case-Management-System/Entity"
notificationuse "github.com/Surafeljava/Court-Case-Management-System/notificationUse"
)
//NotificationServiceImpl struct
type NotificationServiceImpl struct {
notfRepo notificationuse.NotificationRepository
}
//NewNotificationServiceImpl returns new Notification service Object
func NewNotificationServiceImpl(notf notificationuse.NotificationRepository) notificationuse.NotificationService {
return &NotificationServiceImpl{notfRepo: notf}
}
//Notifications returns all stored Notifications from database
func (notf *NotificationServiceImpl) Notifications() ([]entity.Notification, []error) {
notifications, errs := notf.notfRepo.Notifications()
if len(errs) > 0 {
return nil, errs
}
return notifications, errs
}
//ViewNotification retrieves a Notification by its id(title)
func (notf *NotificationServiceImpl) ViewNotification(id uint) (*entity.Notification, []error) {
notification, errs := notf.notfRepo.ViewNotification(id)
if len(errs) > 0 {
return nil, errs
}
return notification, errs
}
//PostNotification admin posts a notifiacication
func (notf *NotificationServiceImpl) PostNotification(notification *entity.Notification) (*entity.Notification, []error) {
ntf, errs := notf.notfRepo.PostNotification(notification)
if len(errs) > 0 {
return nil, errs
}
return ntf, errs
}
//UpdateNotification implemented below
func (notf *NotificationServiceImpl) UpdateNotification(notification *entity.Notification) (*entity.Notification, []error) {
notfic, errs := notf.notfRepo.UpdateNotification(notification)
if len(errs) > 0 {
return nil, errs
}
return notfic, errs
}
//DeleteNotification deletes a given notification
func (notf *NotificationServiceImpl) DeleteNotification(id uint) (*entity.Notification, []error) {
ntf, errs := notf.notfRepo.DeleteNotification(id)
if len(errs) > 0 {
return nil, errs
}
return ntf, nil
}
|
package logging
import (
"context"
"github.com/sirupsen/logrus"
)
// PrintfLogger is a logger that implements a common Printf logger.
type PrintfLogger struct {
level logrus.Level
logrus *logrus.Logger
}
// Printf is the implementation of the interface.
func (l *PrintfLogger) Printf(format string, args ...any) {
l.logrus.Logf(l.level, format, args...)
}
// CtxPrintfLogger is a logger that implements a common Printf logger with a ctx.
type CtxPrintfLogger struct {
level logrus.Level
logrus *logrus.Logger
}
// Printf is the implementation of the interface.
func (l *CtxPrintfLogger) Printf(_ context.Context, format string, args ...any) {
l.logrus.Logf(l.level, format, args...)
}
|
/*
* @lc app=leetcode.cn id=173 lang=golang
*
* [173] 二叉搜索树迭代器
*/
package main
// @lc code=start
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
// type TreeNode struct {
// Val int
// Left *TreeNode
// Right *TreeNode
// }
type BSTIterator struct {
vals []*TreeNode
}
func Constructor(root *TreeNode) BSTIterator {
iterator := BSTIterator{}
iterator.inorder(root)
return iterator
}
func (this *BSTIterator) inorder(root *TreeNode) {
if root == nil {
return
}
this.inorder(root.Left)
this.vals = append(this.vals, root)
this.inorder(root.Right)
}
func (this *BSTIterator) Next() int {
val := this.vals[0].Val
this.vals = this.vals[1:]
return val
}
func (this *BSTIterator) HasNext() bool {
return len(this.vals) != 0
}
/**
* Your BSTIterator object will be instantiated and called as such:
* obj := Constructor(root);
* param_1 := obj.Next();
* param_2 := obj.HasNext();
*/
// @lc code=end
|
// common functions used across multiple files
package main
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os"
"github.com/bwmarrin/discordgo"
)
func getFileFullPath(filename string) (string, error) {
if !getDebugMode() {
homePath := os.Getenv("HOME")
if homePath == "" {
return "", errors.New("Use Linux and set your $HOME variable you filthy casual")
}
return fmt.Sprintf("%s/.config/gapbot/%s", homePath, filename), nil
}
return fmt.Sprintf("./%s", filename), nil
}
func loadJSON(filename string, v interface{}) error {
filePath, err := getFileFullPath(filename)
if err != nil {
return err
}
file, err := os.Open(filePath)
if err != nil {
return err
}
data, err := ioutil.ReadAll(file)
if err != nil {
return err
}
err = json.Unmarshal(data, v)
if err != nil {
return err
}
return nil
}
func writeJSON(filename string, v interface{}) error {
filePath, err := getFileFullPath(filename)
if err != nil {
return err
}
marshalledJSON, err := json.Marshal(v)
if err != nil {
return err
}
err = ioutil.WriteFile(filePath, marshalledJSON, 0644)
if err != nil {
return err
}
return nil
}
func itemInSlice(item string, slice []string) bool {
for _, v := range slice {
if item == v {
return true
}
}
return false
}
func dmUser(s *discordgo.Session, user discordgo.User, message string) {
c, err := s.UserChannelCreate(user.ID)
if err != nil {
fmt.Printf("Error creating channel: %s", err)
return
}
_, err = s.ChannelMessageSend(c.ID, message)
if err != nil {
fmt.Printf("Error sending message to user: %s", err)
}
}
|
package sese
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document00700101 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:sese.007.001.01 Document"`
Message *TransferInConfirmation `xml:"sese.007.001.01"`
}
func (d *Document00700101) AddMessage() *TransferInConfirmation {
d.Message = new(TransferInConfirmation)
return d.Message
}
// Scope
// TheTransferInConfirmation message is sent by an executing party to the instructing party or the instructing party's designated agent.
// This message is used to confirm the receipt of a financial instrument, free of payment, at a given date, from a specified party. This message can also be used to confirm the transfer a financial instrument from an own account or from a third party.
// Usage
// TheTransferInConfirmation message is used by an executing party to confirm to the instructing party receipt of a financial instrument, either from another account owned by the instructing party or from a third party.
type TransferInConfirmation struct {
// Reference to a linked message that was previously received.
RelatedReference *iso20022.AdditionalReference2 `xml:"RltdRef"`
// Collective reference identifying a set of messages.
PoolReference *iso20022.AdditionalReference2 `xml:"PoolRef,omitempty"`
// Reference to a linked message that was previously sent.
PreviousReference *iso20022.AdditionalReference2 `xml:"PrvsRef,omitempty"`
// General information related to the transfer of a financial instrument.
TransferDetails *iso20022.Transfer4 `xml:"TrfDtls"`
// Information related to the financial instrument received.
FinancialInstrumentDetails *iso20022.FinancialInstrument3 `xml:"FinInstrmDtls"`
// Information related to the account into which the financial instrument was received.
AccountDetails *iso20022.InvestmentAccount10 `xml:"AcctDtls"`
// Information related to the delivering side of the transfer.
SettlementDetails *iso20022.DeliverInformation2 `xml:"SttlmDtls"`
// Additional information that cannot be captured in the structured elements and/or any other specific block.
Extension []*iso20022.Extension1 `xml:"Xtnsn,omitempty"`
}
func (t *TransferInConfirmation) AddRelatedReference() *iso20022.AdditionalReference2 {
t.RelatedReference = new(iso20022.AdditionalReference2)
return t.RelatedReference
}
func (t *TransferInConfirmation) AddPoolReference() *iso20022.AdditionalReference2 {
t.PoolReference = new(iso20022.AdditionalReference2)
return t.PoolReference
}
func (t *TransferInConfirmation) AddPreviousReference() *iso20022.AdditionalReference2 {
t.PreviousReference = new(iso20022.AdditionalReference2)
return t.PreviousReference
}
func (t *TransferInConfirmation) AddTransferDetails() *iso20022.Transfer4 {
t.TransferDetails = new(iso20022.Transfer4)
return t.TransferDetails
}
func (t *TransferInConfirmation) AddFinancialInstrumentDetails() *iso20022.FinancialInstrument3 {
t.FinancialInstrumentDetails = new(iso20022.FinancialInstrument3)
return t.FinancialInstrumentDetails
}
func (t *TransferInConfirmation) AddAccountDetails() *iso20022.InvestmentAccount10 {
t.AccountDetails = new(iso20022.InvestmentAccount10)
return t.AccountDetails
}
func (t *TransferInConfirmation) AddSettlementDetails() *iso20022.DeliverInformation2 {
t.SettlementDetails = new(iso20022.DeliverInformation2)
return t.SettlementDetails
}
func (t *TransferInConfirmation) AddExtension() *iso20022.Extension1 {
newValue := new(iso20022.Extension1)
t.Extension = append(t.Extension, newValue)
return newValue
}
|
package fsm
//-----------------------------------------------------------------------------
// Activate activates the state and it's consecutive states until the next state
// is nil or encounters an error
func Activate(s State) (funcErr error) {
next := s
for next != nil && funcErr == nil {
next, funcErr = next.Activate()
}
return
}
//-----------------------------------------------------------------------------
|
// Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metrics
import (
"context"
"math"
"time"
"github.com/pingcap/tidb/metrics"
"github.com/prometheus/client_golang/prometheus"
)
// Phases to trace
var (
PhaseIdle = "idle"
PhaseBeginTxn = "begin_txn"
PhaseCommitTxn = "commit_txn"
PhaseQuery = "query"
PhaseCheckTTL = "check_ttl"
PhaseWaitRetry = "wait_retry"
PhaseDispatch = "dispatch"
PhaseWaitToken = "wait_token"
PhaseOther = "other"
)
// TTL metrics
var (
SelectSuccessDuration prometheus.Observer
SelectErrorDuration prometheus.Observer
DeleteSuccessDuration prometheus.Observer
DeleteErrorDuration prometheus.Observer
ScannedExpiredRows prometheus.Counter
DeleteSuccessExpiredRows prometheus.Counter
DeleteErrorExpiredRows prometheus.Counter
RunningJobsCnt prometheus.Gauge
CancellingJobsCnt prometheus.Gauge
ScanningTaskCnt prometheus.Gauge
DeletingTaskCnt prometheus.Gauge
WaterMarkScheduleDelayNames = []struct {
Name string
Delay time.Duration
}{
{
Name: "01 hour",
Delay: time.Hour,
},
{
Name: "02 hour",
Delay: time.Hour,
},
{
Name: "06 hour",
Delay: 6 * time.Hour,
},
{
Name: "12 hour",
Delay: 12 * time.Hour,
},
{
Name: "24 hour",
Delay: 24 * time.Hour,
},
{
Name: "72 hour",
Delay: 72 * time.Hour,
},
{
Name: "one week",
Delay: 72 * time.Hour,
},
{
Name: "others",
Delay: math.MaxInt64,
},
}
)
func init() {
InitMetricsVars()
}
// InitMetricsVars init ttl metrics vars vars.
func InitMetricsVars() {
SelectSuccessDuration = metrics.TTLQueryDuration.With(
prometheus.Labels{metrics.LblSQLType: "select", metrics.LblResult: metrics.LblOK})
SelectErrorDuration = metrics.TTLQueryDuration.With(
prometheus.Labels{metrics.LblSQLType: "select", metrics.LblResult: metrics.LblError})
DeleteSuccessDuration = metrics.TTLQueryDuration.With(
prometheus.Labels{metrics.LblSQLType: "delete", metrics.LblResult: metrics.LblOK})
DeleteErrorDuration = metrics.TTLQueryDuration.With(
prometheus.Labels{metrics.LblSQLType: "delete", metrics.LblResult: metrics.LblError})
ScannedExpiredRows = metrics.TTLProcessedExpiredRowsCounter.With(
prometheus.Labels{metrics.LblSQLType: "select", metrics.LblResult: metrics.LblOK})
DeleteSuccessExpiredRows = metrics.TTLProcessedExpiredRowsCounter.With(
prometheus.Labels{metrics.LblSQLType: "delete", metrics.LblResult: metrics.LblOK})
DeleteErrorExpiredRows = metrics.TTLProcessedExpiredRowsCounter.With(
prometheus.Labels{metrics.LblSQLType: "delete", metrics.LblResult: metrics.LblError})
RunningJobsCnt = metrics.TTLJobStatus.With(prometheus.Labels{metrics.LblType: "running"})
CancellingJobsCnt = metrics.TTLJobStatus.With(prometheus.Labels{metrics.LblType: "cancelling"})
ScanningTaskCnt = metrics.TTLTaskStatus.With(prometheus.Labels{metrics.LblType: "scanning"})
DeletingTaskCnt = metrics.TTLTaskStatus.With(prometheus.Labels{metrics.LblType: "deleting"})
scanWorkerPhases = initWorkerPhases("scan_worker")
deleteWorkerPhases = initWorkerPhases("delete_worker")
}
func initWorkerPhases(workerType string) map[string]prometheus.Counter {
return map[string]prometheus.Counter{
PhaseIdle: metrics.TTLPhaseTime.WithLabelValues(workerType, PhaseIdle),
PhaseBeginTxn: metrics.TTLPhaseTime.WithLabelValues(workerType, PhaseBeginTxn),
PhaseCommitTxn: metrics.TTLPhaseTime.WithLabelValues(workerType, PhaseCommitTxn),
PhaseQuery: metrics.TTLPhaseTime.WithLabelValues(workerType, PhaseQuery),
PhaseWaitRetry: metrics.TTLPhaseTime.WithLabelValues(workerType, PhaseWaitRetry),
PhaseDispatch: metrics.TTLPhaseTime.WithLabelValues(workerType, PhaseDispatch),
PhaseCheckTTL: metrics.TTLPhaseTime.WithLabelValues(workerType, PhaseCheckTTL),
PhaseWaitToken: metrics.TTLPhaseTime.WithLabelValues(workerType, PhaseWaitToken),
PhaseOther: metrics.TTLPhaseTime.WithLabelValues(workerType, PhaseOther),
}
}
var scanWorkerPhases map[string]prometheus.Counter
var deleteWorkerPhases map[string]prometheus.Counter
// PhaseTracer is used to tracer the phases duration
type PhaseTracer struct {
getTime func() time.Time
recordDuration func(phase string, duration time.Duration)
phase string
phaseTime time.Time
}
// NewScanWorkerPhaseTracer returns a tracer for scan worker
func NewScanWorkerPhaseTracer() *PhaseTracer {
return newPhaseTracer(time.Now, func(status string, duration time.Duration) {
if counter, ok := scanWorkerPhases[status]; ok {
counter.Add(duration.Seconds())
}
})
}
// NewDeleteWorkerPhaseTracer returns a tracer for delete worker
func NewDeleteWorkerPhaseTracer() *PhaseTracer {
return newPhaseTracer(time.Now, func(status string, duration time.Duration) {
if counter, ok := deleteWorkerPhases[status]; ok {
counter.Add(duration.Seconds())
}
})
}
func newPhaseTracer(getTime func() time.Time, recordDuration func(status string, duration time.Duration)) *PhaseTracer {
return &PhaseTracer{
getTime: getTime,
recordDuration: recordDuration,
phaseTime: getTime(),
}
}
// Phase returns the current phase
func (t *PhaseTracer) Phase() string {
if t == nil {
return ""
}
return t.phase
}
// EnterPhase enters into a new phase
func (t *PhaseTracer) EnterPhase(phase string) {
if t == nil {
return
}
now := t.getTime()
if t.phase != "" {
t.recordDuration(t.phase, now.Sub(t.phaseTime))
}
t.phase = phase
t.phaseTime = now
}
// EndPhase ends the current phase
func (t *PhaseTracer) EndPhase() {
if t == nil {
return
}
t.EnterPhase("")
}
type ttlPhaseTraceKey struct{}
// CtxWithPhaseTracer create a new context with tracer
func CtxWithPhaseTracer(ctx context.Context, tracer *PhaseTracer) context.Context {
return context.WithValue(ctx, ttlPhaseTraceKey{}, tracer)
}
// PhaseTracerFromCtx returns a tracer from a given context
func PhaseTracerFromCtx(ctx context.Context) *PhaseTracer {
if tracer, ok := ctx.Value(ttlPhaseTraceKey{}).(*PhaseTracer); ok {
return tracer
}
return nil
}
// DelayMetricsRecord is the delay metric record for a table
type DelayMetricsRecord struct {
TableID int64
LastJobTime time.Time
AbsoluteDelay time.Duration
ScheduleRelativeDelay time.Duration
}
func getWaterMarkScheduleDelayName(t time.Duration) string {
for _, l := range WaterMarkScheduleDelayNames {
if t <= l.Delay {
return l.Name
}
}
return WaterMarkScheduleDelayNames[len(WaterMarkScheduleDelayNames)-1].Name
}
// UpdateDelayMetrics updates the metrics of TTL delay
func UpdateDelayMetrics(records map[int64]*DelayMetricsRecord) {
scheduleMetrics := make(map[string]float64, len(WaterMarkScheduleDelayNames))
for _, l := range WaterMarkScheduleDelayNames {
scheduleMetrics[l.Name] = 0
}
for _, r := range records {
name := getWaterMarkScheduleDelayName(r.ScheduleRelativeDelay)
scheduleMetrics[name] = scheduleMetrics[name] + 1
}
for delay, v := range scheduleMetrics {
metrics.TTLWatermarkDelay.With(prometheus.Labels{metrics.LblType: "schedule", metrics.LblName: delay}).Set(v)
}
}
|
package lang
import (
"encoding/json"
"errors"
"log"
"math"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"tetra/lib/dbg"
"tetra/lib/levenshtein"
"tetra/lib/store"
)
var (
dict = make(map[string]string)
// store words which already printed debug message
debug = make(map[string]bool)
rxlc = regexp.MustCompile(`^[a-z][a-z](?:[\-_][A-Z][A-Z]?)$`)
// ErrBadLocale indicate locale is invalid
ErrBadLocale = errors.New(`Bad locale`)
matchlc func(availables, prefers []string) string
validlc func(string) bool
)
func filename(dir, lc string) string {
return dir + `/` + lc + `.json`
}
// Reset to initial state
func Reset() {
dict = make(map[string]string)
}
// Load dictionary, do fuzzy match if lc unavaible.
func Load(dir, lc string) (matched string, err error) {
if lc == "" {
lc = Detect()
dbg.Logf("use auto detected locale %s\n", lc)
}
dbg.Logf("loading dict \"%s\" from %s\n", lc, dir)
defer func() {
matched = lc
if err != nil {
log.Printf("error: failed to load dict \"%s\" from %s\n", lc, dir)
}
}()
err = loadExact(dir, lc)
if err == nil {
return
}
lcs := Enum(dir)
lc = Match(lcs, lc)
dbg.Logf(" use %s\n", lc)
err = loadExact(dir, lc)
return
}
func loadExact(dir, lc string) error {
if !IsValid(lc) {
return ErrBadLocale
}
var fn = filename(dir, lc)
var m map[string]string
var data []byte
var err error
data, err = store.ReadFile(fn)
if err != nil {
return err
}
err = json.Unmarshal(data, &m)
if err != nil {
return err
}
for k, v := range m {
if v == "" {
dbg.Logf("%s: untranslated: %s\n", filepath.Base(fn), strconv.Quote(v))
continue
}
dict[k] = v
}
return nil
}
// Enum enum available locales
func Enum(dir string) (lcs []string) {
var ents []os.FileInfo
var err error
ents, err = store.ReadDir(dir)
if err != nil {
return
}
for _, ent := range ents {
name := ent.Name()
if ent.IsDir() || !strings.HasSuffix(name, `.json`) {
continue
}
name = strings.TrimSuffix(name, `.json`)
if !IsValid(name) {
continue
}
lcs = append(lcs, name)
}
return
}
// Tr translate str
func Tr(str string) string {
if str == "" {
return str
}
if dst, ok := dict[str]; ok {
return dst
}
dict[str] = str
dbg.Logf("untranslated text: %s\n", strconv.Quote(str))
return str
}
// Or simply return the orintial str
func Or(str string) string {
return str
}
// IsValid check whether lc is valid lc
func IsValid(lc string) bool {
if !rxlc.MatchString(lc) {
return false
}
if validlc != nil {
return validlc(lc)
}
return true
}
func parseEnvStr(s string) string {
if s == "" {
return ""
}
pos := strings.IndexByte(s, '.')
if pos != -1 {
s = s[:pos]
}
if IsValid(s) {
return s
}
return ""
}
// Detect auto detect locle.
func Detect() (s string) {
defer func() {
s = strings.Replace(s, "_", "-", -1)
}()
s = parseEnvStr(os.Getenv("LANG"))
if s != "" {
return s
}
s = parseEnvStr(os.Getenv("LC_ALL"))
if s != "" {
return s
}
s = "en"
return
}
// Match find prefer locale from availables
func Match(availables []string, prefers ...string) (lc string) {
if len(availables) == 0 {
return "en"
}
if len(availables) == 1 {
return availables[0]
}
if len(prefers) == 0 {
return availables[0]
}
if matchlc != nil {
lc = matchlc(availables, prefers)
if lc != "" {
return
}
}
d := int(math.MaxInt32)
lc = availables[0] // fallback
for i, p := range prefers {
p = strings.ToLower(p)
for _, a := range availables {
a1 := strings.ToLower(a)
d2 := len(a1) - len(a1)
if d2 < 0 {
d2 = -d2
}
d1 := (i + 1) * (levenshtein.Distance(p, a1)*3 + d2)
if d1 < d {
lc = a
d = d1
}
}
}
return lc
}
|
package main
import (
"github.com/guilhermeonrails/api-go-gin/database"
"github.com/guilhermeonrails/api-go-gin/routes"
)
func main() {
database.ConectaComBancoDeDados()
routes.HandleRequest()
}
|
package main
import "fmt"
func main() {
var a,b int
fmt.Scanln(&a)
fmt.Scanln(&b)
fmt.Println("X =",a+b)
} |
package rectangle
type Rectangle struct {
Len float64
Wid float64
}
func (r Rectangle)Area() float64 {
return r.Len*r.Wid
}
|
// Package main is the main package for Goswift.
package main
import (
"github.com/ChristopherRabotin/gin-contrib-headerauth"
"github.com/gin-gonic/gin"
"github.com/op/go-logging"
"sync"
)
// testGoswift must be true when testing to avoid starting the server.
var testGoswift = false
// testS3Locations will store the list of S3 locations to delete after running the tests.
var testS3Locations []string
// log is the main go-logging logger.
var log = logging.MustGetLogger("goswift")
// persisterWg is the persister wait group, which will write to S3.
var persisterWg sync.WaitGroup
// init is ran before the main, so we'll perform the environment verifications there.
func init() {
CheckEnvVars() // This will fail if there are env vars missing.
ConfigureLogger()
ConfigureRuntime()
}
// main starts all needed functions to start the server.
func main() {
PourGin()
persisterWg.Wait()
}
// PourGin starts pouring the gin, i.e. sets up routes and starts listening.
// This returns an engine specifically for testing purposes.
func PourGin() *gin.Engine {
gin.SetMode(ServerMode())
engine := gin.Default()
engine.GET("/", IndexGet)
// S3 persister variables
persistChan := make(chan *S3Persist, 250)
go S3PersistingHandler(persistChan, &persisterWg)
// Auth managers
perishableHA := NewPerishableTokenMgr("DecayingToken", "token")
analyticsHA := NewAnalyticsTokenMgr("DecayingToken", "token", persistChan, &persisterWg)
// Auth group.
authG := engine.Group("/auth")
authG.GET("/token", GetNewToken)
// Auth testing group for tokens. Works on *all* methods.
authTokenTest := authG.Group("/token/test")
authTokenTest.Use(headerauth.HeaderAuth(perishableHA))
methods := []string{"GET", "POST", "PUT", "DELETE", "PATCH"}
for _, meth := range methods {
authTokenTest.Handle(meth, "/", []gin.HandlerFunc{SuccessJSON}[0])
}
// Analytics group.
analyticsG := engine.Group("/analytics")
analyticsG.Use(headerauth.HeaderAuth(analyticsHA))
analyticsG.PUT("/record", RecordAnalytics)
if testGoswift {
testS3Locations = make([]string, 0) // Allows append to assign directly to zeroth element.
} else {
// Starting the server.
engine.Run(ServerConfig())
return nil
}
return engine
}
|
package main
import (
"fmt"
"io/ioutil"
)
func main() {
// 读取文件
byteStr, err := ioutil.ReadFile("./main/test.txt")
if err != nil {
fmt.Println("读取文件出错")
return
}
// 写入指定的文件
ioutil.WriteFile("./main/test2.txt", byteStr, 777)
}
|
//go:build linux || windows
// +build linux windows
/*
Copyright © 2021 SUSE LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockerproxy
import (
"context"
"encoding/json"
"fmt"
"io"
"log"
"net"
"net/http"
"net/http/httputil"
"os"
"os/signal"
"regexp"
"sync"
"github.com/Masterminds/semver"
"github.com/sirupsen/logrus"
"github.com/rancher-sandbox/rancher-desktop/src/go/wsl-helper/pkg/dockerproxy/models"
"github.com/rancher-sandbox/rancher-desktop/src/go/wsl-helper/pkg/dockerproxy/platform"
)
// RequestContextValue contains things we attach to incoming requests
type RequestContextValue map[interface{}]interface{}
// requestContext is the context key for requestContextValue
var requestContext = struct{}{}
type containerInspectResponseBody struct {
Id string
}
const dockerAPIVersion = "v1.41.0"
// Serve up the docker proxy at the given endpoint, using the given function to
// create a connection to the real dockerd.
func Serve(endpoint string, dialer func() (net.Conn, error)) error {
listener, err := platform.Listen(endpoint)
if err != nil {
return err
}
termch := make(chan os.Signal, 1)
signal.Notify(termch, os.Interrupt)
go func() {
<-termch
signal.Stop(termch)
err := listener.Close()
if err != nil {
logrus.WithError(err).Error("Error closing listener on interrupt")
}
}()
logWriter := logrus.StandardLogger().Writer()
defer logWriter.Close()
munger := newRequestMunger()
proxy := &httputil.ReverseProxy{
Director: func(req *http.Request) {
logrus.WithField("request", req).
WithField("headers", req.Header).
WithField("url", req.URL).
Debug("got proxy request")
// The incoming URL is relative (to the root of the server); we need
// to add scheme and host ("http://proxy.invalid/") to it.
req.URL.Scheme = "http"
req.URL.Host = "proxy.invalid"
originalReq := *req
originalURL := *req.URL
originalReq.URL = &originalURL
err := munger.MungeRequest(req, dialer)
if err != nil {
logrus.WithError(err).
WithField("original request", originalReq).
WithField("modified request", req).
Error("could not munge request")
}
},
Transport: &http.Transport{
Dial: func(string, string) (net.Conn, error) {
return dialer()
},
DisableCompression: true, // for debugging
},
ModifyResponse: func(resp *http.Response) error {
logEntry := logrus.WithField("response", resp)
defer func() { logEntry.Debug("got backend response") }()
// Check the API version response, and if there is one, make sure
// it's not newer than the API version we support.
backendVersion, err := semver.NewVersion(resp.Header.Get("API-Version"))
if err == nil {
logEntry = logEntry.WithField("backend version", backendVersion)
if backendVersion.GreaterThan(&dockerSpec.Info.Version) {
overrideVersion := fmt.Sprintf("v%s", dockerSpec.Info.Version.Original())
resp.Header.Set("API-Version", overrideVersion)
logEntry = logEntry.WithField("override version", overrideVersion)
}
}
err = munger.MungeResponse(resp, dialer)
if err != nil {
return err
}
return nil
},
ErrorLog: log.New(logWriter, "", 0),
}
contextAttacher := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
ctx := context.WithValue(req.Context(), requestContext, &RequestContextValue{})
newReq := req.WithContext(ctx)
proxy.ServeHTTP(w, newReq)
})
logrus.WithField("endpoint", endpoint).Info("Listening")
err = http.Serve(listener, contextAttacher)
if err != nil {
logrus.WithError(err).Error("serve exited with error")
}
return nil
}
// requestMunger is used to modify the incoming http.Request as required.
type requestMunger struct {
// apiDetectPattern is used to detect the API version request path prefix.
apiDetectPattern *regexp.Regexp
sync.RWMutex
}
// newRequestMunger initializes a new requestMunger.
func newRequestMunger() *requestMunger {
return &requestMunger{
apiDetectPattern: regexp.MustCompile(`^/v[0-9.]+/`),
}
}
func (m *requestMunger) getRequestPath(req *http.Request) string {
// Strip the version string at the start of the request, if it exists.
requestPath := req.URL.Path
match := m.apiDetectPattern.FindStringIndex(requestPath)
logrus.WithFields(logrus.Fields{
"request path": requestPath,
"matcher": m.apiDetectPattern,
"match": match,
}).Debug("getting request path")
if match != nil {
return requestPath[match[1]-1:]
}
return requestPath
}
// MungeRequest modifies a given request in-place.
func (m *requestMunger) MungeRequest(req *http.Request, dialer func() (net.Conn, error)) error {
requestPath := m.getRequestPath(req)
logEntry := logrus.WithFields(logrus.Fields{
"method": req.Method,
"path": requestPath,
"phase": "request",
})
mungerMapping.RLock()
mapping, ok := mungerMapping.mungers[req.Method]
mungerMapping.RUnlock()
if !ok {
logEntry.Debug("no munger with method")
return nil
}
munger, templates := mapping.getRequestMunger(requestPath)
if munger == nil {
logEntry.Debug("request munger not found")
return nil
}
// ensure id is always the long container id
id, ok := templates["id"]
if ok {
inspect, err := m.CanonicalizeContainerID(req, id, dialer)
if err != nil {
logEntry.WithField("id", id).WithError(err).Error("unable to resolve container id")
} else {
templates["id"] = inspect.Id
}
}
contextValue, _ := req.Context().Value(requestContext).(*RequestContextValue)
logEntry.Debug("calling request munger")
err := munger(req, contextValue, templates)
if err != nil {
logEntry.WithField("munger", munger).WithError(err).Error("munger failed")
return fmt.Errorf("munger failed for %s: %w", requestPath, err)
}
return nil
}
func (m *requestMunger) MungeResponse(resp *http.Response, dialer func() (net.Conn, error)) error {
requestPath := m.getRequestPath(resp.Request)
logEntry := logrus.WithFields(logrus.Fields{
"method": resp.Request.Method,
"path": requestPath,
"phase": "response",
})
mungerMapping.RLock()
mapping, ok := mungerMapping.mungers[resp.Request.Method]
mungerMapping.RUnlock()
if !ok {
logEntry.Debug("no munger with method")
return nil
}
munger, templates := mapping.getResponseMunger(requestPath)
if munger == nil {
logEntry.Debug("request munger not found")
return nil
}
// ensure id is always the long container id
id, ok := templates["id"]
if ok {
inspect, err := m.CanonicalizeContainerID(resp.Request, id, dialer)
if err != nil {
logEntry.WithField("id", id).WithError(err).Error("unable to resolve container id")
} else {
templates["id"] = inspect.Id
}
}
contextValue, _ := resp.Request.Context().Value(requestContext).(*RequestContextValue)
logEntry.Debug("calling response munger")
err := munger(resp, contextValue, templates)
if err != nil {
logEntry.WithField("munger", munger).WithError(err).Error("munger failed")
return fmt.Errorf("munger failed for %s: %w", requestPath, err)
}
return nil
}
/*
CanonicalizeContainerID makes a request upstream to inspect and resolve the full id of the container
we use the provided id path template variable to make an upstream request to the docker engine api to inspect the container.
Fortunately it supports both id or name as the container identifier.
The Id returned will be the full long container id that is used to lookup in docker-binds.json.
*/
func (m *requestMunger) CanonicalizeContainerID(req *http.Request, id string, dialer func() (net.Conn, error)) (*containerInspectResponseBody, error) {
// url for inspecting container
inspectURL, err := req.URL.Parse(fmt.Sprintf("/%s/containers/%s/json", dockerAPIVersion, id))
if err != nil {
return nil, err
}
client := &http.Client{
Transport: &http.Transport{
Dial: func(string, string) (net.Conn, error) {
return dialer()
},
},
}
// make the inspect request
inspectResponse, err := client.Get(inspectURL.String())
if err != nil {
return nil, err
}
// parse response as json
body := containerInspectResponseBody{}
buf, err := io.ReadAll(inspectResponse.Body)
if err != nil {
return nil, fmt.Errorf("could not read request body: %w", err)
}
err = json.Unmarshal(buf, &body)
if err != nil {
return nil, fmt.Errorf("could not unmarshal request body: %w", err)
}
return &body, nil
}
// dockerSpec contains information about the embedded OpenAPI specification for
// docker.
var dockerSpec struct {
Info struct {
Version semver.Version
}
}
// requestMungerFunc is a munger for an incoming request; it also receives an
// arbitrary mapping that can be reused in the response munger, as well as a
// mapping of any path templating patterns that were matched.
type requestMungerFunc func(*http.Request, *RequestContextValue, map[string]string) error
// responseMungerFunc is a munger for an outgoing response; it also receives an
// arbitrary mapping that was initially passed to the matching request munger,
// as well a a mapping of any path templating patterns that were matched.
type responseMungerFunc func(*http.Response, *RequestContextValue, map[string]string) error
// mungerMethodMapping is a helper structure to find a munger given an API path,
// specialized for a given HTTP method (GET, POST, etc.).
// This should only be written to during init(), at which point it's protected
// by the lock on mungerMapping.
type mungerMethodMapping struct {
// requests that are simple (have no path templating)
requests map[string]requestMungerFunc
// requestPatterns are requests that involve path templating
requestPatterns map[*regexp.Regexp]requestMungerFunc
// responses that are simple (have no path templating)
responses map[string]responseMungerFunc
// responsePatterns are responses that involve path templating
responsePatterns map[*regexp.Regexp]responseMungerFunc
}
// getRequestMunger gets the munger to use for this request, as well as the
// path templating elements (if relevant for the munger).
func (m *mungerMethodMapping) getRequestMunger(apiPath string) (requestMungerFunc, map[string]string) {
if munger, ok := m.requests[apiPath]; ok {
return munger, nil
}
for pattern, munger := range m.requestPatterns {
matches := pattern.FindStringSubmatch(apiPath)
if matches != nil {
names := pattern.SubexpNames()
results := make(map[string]string)
for i, name := range names {
results[name] = matches[i]
}
return munger, results
}
}
return nil, nil
}
func (m *mungerMethodMapping) getResponseMunger(apiPath string) (responseMungerFunc, map[string]string) {
if munger, ok := m.responses[apiPath]; ok {
return munger, nil
}
for pattern, munger := range m.responsePatterns {
matches := pattern.FindStringSubmatch(apiPath)
if matches != nil {
names := pattern.SubexpNames()
results := make(map[string]string)
for i, name := range names {
results[name] = matches[i]
}
return munger, results
}
}
return nil, nil
}
// mungerMapping contains mungers that will handle particular API endpoints.
var mungerMapping struct {
sync.RWMutex
mungers map[string]*mungerMethodMapping
}
// convertPattern converts an API path to a regular expression pattern for
// matching URLs with path templating; if there are no path templates, this
// returns nil. The returned pattern always matches the whole string.
func convertPattern(apiPath string) *regexp.Regexp {
matches := regexp.MustCompile(`{[^}/]+}`).FindAllStringIndex(apiPath, -1)
if len(matches) < 1 {
return nil
}
lastEnd := 0
pattern := `\A`
for _, match := range matches {
pattern += regexp.QuoteMeta(apiPath[lastEnd:match[0]])
pattern += fmt.Sprintf(`(?P<%s>[^/]+)`, apiPath[match[0]+1:match[1]-1])
lastEnd = match[1]
}
pattern += regexp.QuoteMeta(apiPath[lastEnd:]) + `\z`
return regexp.MustCompile(pattern)
}
func RegisterRequestMunger(method, apiPath string, munger requestMungerFunc) {
mungerMapping.Lock()
defer mungerMapping.Unlock()
mapping, ok := mungerMapping.mungers[method]
if !ok {
mapping = &mungerMethodMapping{
requests: make(map[string]requestMungerFunc),
requestPatterns: make(map[*regexp.Regexp]requestMungerFunc),
responses: make(map[string]responseMungerFunc),
responsePatterns: make(map[*regexp.Regexp]responseMungerFunc),
}
mungerMapping.mungers[method] = mapping
}
pattern := convertPattern(apiPath)
if pattern == nil {
mapping.requests[apiPath] = munger
} else {
mapping.requestPatterns[pattern] = munger
}
}
func RegisterResponseMunger(method, apiPath string, munger responseMungerFunc) {
mungerMapping.Lock()
defer mungerMapping.Unlock()
mapping, ok := mungerMapping.mungers[method]
if !ok {
mapping = &mungerMethodMapping{
requests: make(map[string]requestMungerFunc),
requestPatterns: make(map[*regexp.Regexp]requestMungerFunc),
responses: make(map[string]responseMungerFunc),
responsePatterns: make(map[*regexp.Regexp]responseMungerFunc),
}
mungerMapping.mungers[method] = mapping
}
pattern := convertPattern(apiPath)
if pattern == nil {
mapping.responses[apiPath] = munger
} else {
mapping.responsePatterns[pattern] = munger
}
}
func init() {
mungerMapping.mungers = make(map[string]*mungerMethodMapping)
err := json.Unmarshal(models.SwaggerJSON, &dockerSpec)
if err != nil {
panic("could not parse embedded spec version")
}
}
|
package main
import (
"fmt"
linuxproc "github.com/c9s/goprocinfo/linux"
"github.com/google/go-cmp/cmp"
"testing"
)
func TestGetCpuCoresIdleTime(t *testing.T) {
mockReadStat := func(s string) (*linuxproc.Stat, error) {
return &linuxproc.Stat{
CPUStats: []linuxproc.CPUStat{
{Idle: 90},
{Idle: 100},
{Idle: 105},
{Idle: 170},
},
}, nil
}
desiredResponse := []Core{
{0, 90},
{1, 100},
{2, 105},
{3, 170},
}
fmt.Println(desiredResponse)
fmt.Println(getCpuCoresIdleTime(mockReadStat))
if !cmp.Equal(getCpuCoresIdleTime(mockReadStat), desiredResponse) {
t.FailNow()
}
}
|
package pathutil
import (
"fmt"
"testing"
)
func use() {
fmt.Println("")
}
func getSolFiles(solPath string) (solFiles []string, err error) {
err = filepath.Walk(solPath, func(solFile string, solFileInfo os.FileInfo, err error) error {
if solFileInfo == nil {
return err
}
if solFileInfo.IsDir() {
return nil
}
solFiles = append(solFiles, solFile)
return nil
})
if err != nil {
return nil, err
}
return solFiles, err
}
func TestPathutil(t *testing.T) {
fmt.Println(GetCurrentDirectory())
fmt.Println(PathUsage("/data"))
}
|
package main
// --------------------- StockSpanner ---------------------
// 执行用时:192 ms, 在所有 Go 提交中击败了 98.48% 的用户
// 内存消耗:8.7 MB, 在所有 Go 提交中击败了 100.00% 的用户
//
// 时间复杂度: O(n)
// 思路: 维护一个单调递减栈。
type StockSpanner struct {
data []int
indexStack *MyStack
}
func Constructor() StockSpanner {
return StockSpanner{
data: make([]int, 0),
indexStack: NewMyStack(),
}
}
func (ss *StockSpanner) Next(price int) int {
ss.data = append(ss.data, price)
curIndex := len(ss.data) - 1
for !ss.indexStack.IsEmpty() && ss.data[ss.indexStack.GetTop()] <= ss.data[curIndex] {
ss.indexStack.Pop()
}
maxCountOfContinuousDay := 0
if ss.indexStack.IsEmpty() {
maxCountOfContinuousDay = curIndex + 1
} else {
maxCountOfContinuousDay = curIndex - ss.indexStack.GetTop()
}
ss.indexStack.Push(curIndex)
return maxCountOfContinuousDay
}
// --------------------- MyStack ---------------------
type MyStack struct {
data []int
}
func NewMyStack() *MyStack {
return &MyStack{}
}
func (ms *MyStack) Push(val int) {
ms.data = append(ms.data, val)
}
func (ms *MyStack) Pop() int {
top := ms.data[ms.GetSize()-1]
ms.data = ms.data[:ms.GetSize()-1]
return top
}
func (ms *MyStack) GetTop() int {
return ms.data[ms.GetSize()-1]
}
func (ms *MyStack) IsEmpty() bool {
return ms.GetSize() == 0
}
func (ms *MyStack) GetSize() int {
return len(ms.data)
}
/*
题目链接:
总结:
1. 这题就是求: 数组中,A[:i+1] 中连续有多少个数小于等于 A[i]。
*/
|
// Exercise 1.1:
// Modify the echo statement to also print os.Args[0], the name of the command
// that invoked it.
// Prints the command that invoked it.
package main
import (
"fmt"
"os"
)
func main() {
fmt.Println(os.Args[0])
}
|
package main
import "time"
type endpoint struct {
ID int `json:"id,omitempty"`
Name string `json:"name,omitempty"`
URL string `json:"url,omitempty"`
Status int `json:"status,omitempty"`
ResponseTime time.Duration `json:"responsetime,omitempty"`
}
|
package v1
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// HelloType is a top-level type
type HelloType struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ObjectMeta `json:"metadata,omitempty"`
// +optional
Status HelloTypeStatus `json:"status,omitempty"`
Spec HelloSpec `json:"spec,omitempty"`
}
type HelloSpec struct {
Message string `json:"message,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// no client needed for list as it's been created in above
type HelloTypeList struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ListMeta `son:"metadata,omitempty"`
Items []HelloType `json:"items"`
}
type HelloTypeStatus struct {
Name string
}
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type ClusterHelloTypeList struct {
metav1.TypeMeta
metav1.ListMeta
Items []ClusterHelloType
}
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale
// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/kubernetes/pkg/apis/autoscaling.Scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale
type ClusterHelloType struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ObjectMeta `json:"metadata,omitempty"`
// +optional
Status ClusterHelloTypeStatus `json:"status,omitempty"`
}
type ClusterHelloTypeStatus struct {
Name string
}
|
package mutex
import (
"sync"
"sync/atomic"
"gvisor.dev/gvisor/pkg/tmutex"
)
type TMutex struct {
tmutex.Mutex
initialized uint32
}
func NewTMutex() *TMutex {
m := &TMutex{
Mutex: tmutex.Mutex{},
initialized: 0x1,
}
m.Mutex.Init()
return m
}
func (m *TMutex) Lock() {
if atomic.CompareAndSwapUint32(&m.initialized, 0x0, 0x1) {
m.Mutex.Init()
}
m.Mutex.Lock()
}
func (m *TMutex) TryLock() bool {
if atomic.CompareAndSwapUint32(&m.initialized, 0x0, 0x1) {
m.Mutex.Init()
}
return m.Mutex.TryLock()
}
func (m *TMutex) Unlock() {
if atomic.CompareAndSwapUint32(&m.initialized, 0x0, 0x1) {
m.Mutex.Init()
}
m.Mutex.Unlock()
}
type Mutex struct {
sync.Mutex
flag uint32
}
func (m *Mutex) Lock() {
m.Mutex.Lock()
atomic.StoreUint32(&m.flag, 0x1)
}
func (m *Mutex) TryLock() bool {
if atomic.CompareAndSwapUint32(&m.flag, 0x0, 0x1) {
m.Mutex.Lock()
return true
}
return false
}
func (m *Mutex) Unlock() {
atomic.StoreUint32(&m.flag, 0x0)
m.Mutex.Unlock()
}
type RWMutex struct {
sync.RWMutex
flags [2]uint32
}
func (m *RWMutex) RLock() {
m.RWMutex.RLock()
atomic.StoreUint32(&m.flags[0], 0x1)
}
func (m *RWMutex) TryRLock() bool {
if atomic.CompareAndSwapUint32(&m.flags[0], 0x0, 0x1) {
m.RWMutex.RLock()
return true
}
return false
}
func (m *RWMutex) RUnlock() {
atomic.StoreUint32(&m.flags[0], 0x0)
m.RWMutex.RUnlock()
}
func (m *RWMutex) Lock() {
m.RWMutex.Lock()
atomic.StoreUint32(&m.flags[1], 0x1)
}
func (m *RWMutex) TryLock() bool {
if atomic.CompareAndSwapUint32(&m.flags[1], 0x0, 0x1) {
m.RWMutex.Lock()
return true
}
return false
}
func (m *RWMutex) Unlock() {
atomic.StoreUint32(&m.flags[1], 0x0)
m.RWMutex.Unlock()
}
|
package main
import (
"fmt"
"net/http"
"blockchain"
"crypto/rand"
"log"
"os"
)
func UUID() string {
b := make([]byte, 16)
_, err := rand.Read(b)
if err != nil {
return ""
}
return fmt.Sprintf("%X-%X-%X-%X-%X", b[0:4], b[4:6], b[6:8], b[8:10], b[10:])
}
func main() {
address := os.Getenv("CHAIN_ADDRESS")
if address == "" {
address = "0.0.0.0:8000"
}
broadcast := os.Getenv("CHAIN_BROADCAST")
if broadcast == "" {
broadcast = "127.0.0.1:8000"
}
nodeID := UUID()
bc, err := blockchain.NewBlockChain(nodeID,broadcast)
if err != nil {
panic(err)
}
log.Printf("[%s]监听[%s]中......%s", nodeID, address, broadcast)
http.ListenAndServe(address, blockchain.NewHandler(bc))
}
|
package hive
import (
"errors"
"testing"
)
func TestError_Error(t *testing.T) {
type fields struct {
Code string
Message string
Op string
Err error
}
tests := []struct {
name string
fields fields
want string
}{
{"Err", fields{Err: errors.New("example")}, "example"},
{"OpMessage", fields{Op: "testing", Message: "error"}, "testing: error"},
{"CodeMessage", fields{Code: ErrInternal, Message: "error"}, "<INTERNAL> error"},
{"OpCodeMessage", fields{Op: "testing", Code: ErrInternal, Message: "error"},
"testing: <INTERNAL> error"},
{"OpCodeMessageError", fields{Op: "testing", Code: ErrInternal, Message: "error", Err: errors.New("example")},
"testing: example"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
e := &Error{
Code: tt.fields.Code,
Message: tt.fields.Message,
Op: tt.fields.Op,
Err: tt.fields.Err,
}
if got := e.Error(); got != tt.want {
t.Errorf("Error.Error() = %v, want %v", got, tt.want)
}
})
}
}
func TestErrorCode(t *testing.T) {
type args struct {
err error
}
tests := []struct {
name string
args args
want string
}{
{"Nil", args{err: nil}, ""},
{"Standard", args{err: errors.New("example")}, ErrInternal},
{"Error", args{err: &Error{Code: ErrMissingParameter}}, ErrMissingParameter},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := ErrorCode(tt.args.err); got != tt.want {
t.Errorf("ErrorCode() = %v, want %v", got, tt.want)
}
})
}
}
func TestErrorMessage(t *testing.T) {
type args struct {
err error
}
tests := []struct {
name string
args args
want string
}{
{"Nil", args{err: nil}, ""},
{"Standard", args{err: errors.New("example")}, "internal error has occurred"},
{"Error", args{err: &Error{Message: "message"}}, "message"},
{"NestedError", args{err: &Error{Err: &Error{Message: "message"}}}, "message"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := ErrorMessage(tt.args.err); got != tt.want {
t.Errorf("ErrorMessage() = %v, want %v", got, tt.want)
}
})
}
}
|
package main
import (
"fmt"
"testing"
"google.golang.org/grpc"
)
func Test_main(t *testing.T) {
_, err := grpc.Dial("127.0.0.1:2680", grpc.WithInsecure())
if err != nil {
fmt.Println(err)
return
}
}
|
// Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package core
import (
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/planner/util"
)
// preparePossibleProperties traverses the plan tree by a post-order method,
// recursively calls LogicalPlan PreparePossibleProperties interface.
func preparePossibleProperties(lp LogicalPlan) [][]*expression.Column {
childrenProperties := make([][][]*expression.Column, 0, len(lp.Children()))
for _, child := range lp.Children() {
childrenProperties = append(childrenProperties, preparePossibleProperties(child))
}
return lp.PreparePossibleProperties(lp.Schema(), childrenProperties...)
}
// PreparePossibleProperties implements LogicalPlan PreparePossibleProperties interface.
func (ds *DataSource) PreparePossibleProperties(_ *expression.Schema, _ ...[][]*expression.Column) [][]*expression.Column {
result := make([][]*expression.Column, 0, len(ds.possibleAccessPaths))
for _, path := range ds.possibleAccessPaths {
if path.IsIntHandlePath {
col := ds.getPKIsHandleCol()
if col != nil {
result = append(result, []*expression.Column{col})
}
continue
}
if len(path.IdxCols) == 0 {
continue
}
result = append(result, make([]*expression.Column, len(path.IdxCols)))
copy(result[len(result)-1], path.IdxCols)
for i := 0; i < path.EqCondCount && i+1 < len(path.IdxCols); i++ {
result = append(result, make([]*expression.Column, len(path.IdxCols)-i-1))
copy(result[len(result)-1], path.IdxCols[i+1:])
}
}
return result
}
// PreparePossibleProperties implements LogicalPlan PreparePossibleProperties interface.
func (ts *LogicalTableScan) PreparePossibleProperties(_ *expression.Schema, _ ...[][]*expression.Column) [][]*expression.Column {
if ts.HandleCols != nil {
cols := make([]*expression.Column, ts.HandleCols.NumCols())
for i := 0; i < ts.HandleCols.NumCols(); i++ {
cols[i] = ts.HandleCols.GetCol(i)
}
return [][]*expression.Column{cols}
}
return nil
}
// PreparePossibleProperties implements LogicalPlan PreparePossibleProperties interface.
func (is *LogicalIndexScan) PreparePossibleProperties(_ *expression.Schema, _ ...[][]*expression.Column) [][]*expression.Column {
if len(is.IdxCols) == 0 {
return nil
}
result := make([][]*expression.Column, 0, is.EqCondCount+1)
for i := 0; i <= is.EqCondCount; i++ {
result = append(result, make([]*expression.Column, len(is.IdxCols)-i))
copy(result[i], is.IdxCols[i:])
}
return result
}
// PreparePossibleProperties implements LogicalPlan PreparePossibleProperties interface.
func (*TiKVSingleGather) PreparePossibleProperties(_ *expression.Schema, childrenProperties ...[][]*expression.Column) [][]*expression.Column {
return childrenProperties[0]
}
// PreparePossibleProperties implements LogicalPlan PreparePossibleProperties interface.
func (*LogicalSelection) PreparePossibleProperties(_ *expression.Schema, childrenProperties ...[][]*expression.Column) [][]*expression.Column {
return childrenProperties[0]
}
// PreparePossibleProperties implements LogicalPlan PreparePossibleProperties interface.
func (p *LogicalWindow) PreparePossibleProperties(_ *expression.Schema, _ ...[][]*expression.Column) [][]*expression.Column {
result := make([]*expression.Column, 0, len(p.PartitionBy)+len(p.OrderBy))
for i := range p.PartitionBy {
result = append(result, p.PartitionBy[i].Col)
}
for i := range p.OrderBy {
result = append(result, p.OrderBy[i].Col)
}
return [][]*expression.Column{result}
}
// PreparePossibleProperties implements LogicalPlan PreparePossibleProperties interface.
func (p *LogicalSort) PreparePossibleProperties(_ *expression.Schema, _ ...[][]*expression.Column) [][]*expression.Column {
propCols := getPossiblePropertyFromByItems(p.ByItems)
if len(propCols) == 0 {
return nil
}
return [][]*expression.Column{propCols}
}
// PreparePossibleProperties implements LogicalPlan PreparePossibleProperties interface.
func (lt *LogicalTopN) PreparePossibleProperties(_ *expression.Schema, _ ...[][]*expression.Column) [][]*expression.Column {
propCols := getPossiblePropertyFromByItems(lt.ByItems)
if len(propCols) == 0 {
return nil
}
return [][]*expression.Column{propCols}
}
func getPossiblePropertyFromByItems(items []*util.ByItems) []*expression.Column {
cols := make([]*expression.Column, 0, len(items))
for _, item := range items {
col, ok := item.Expr.(*expression.Column)
if !ok {
break
}
cols = append(cols, col)
}
return cols
}
// PreparePossibleProperties implements LogicalPlan PreparePossibleProperties interface.
func (*baseLogicalPlan) PreparePossibleProperties(_ *expression.Schema, _ ...[][]*expression.Column) [][]*expression.Column {
return nil
}
// PreparePossibleProperties implements LogicalPlan PreparePossibleProperties interface.
func (p *LogicalProjection) PreparePossibleProperties(_ *expression.Schema, childrenProperties ...[][]*expression.Column) [][]*expression.Column {
childProperties := childrenProperties[0]
oldCols := make([]*expression.Column, 0, p.schema.Len())
newCols := make([]*expression.Column, 0, p.schema.Len())
for i, expr := range p.Exprs {
if col, ok := expr.(*expression.Column); ok {
newCols = append(newCols, p.schema.Columns[i])
oldCols = append(oldCols, col)
}
}
tmpSchema := expression.NewSchema(oldCols...)
newProperties := make([][]*expression.Column, 0, len(childProperties))
for _, childProperty := range childProperties {
newChildProperty := make([]*expression.Column, 0, len(childProperty))
for _, col := range childProperty {
pos := tmpSchema.ColumnIndex(col)
if pos < 0 {
break
}
newChildProperty = append(newChildProperty, newCols[pos])
}
if len(newChildProperty) != 0 {
newProperties = append(newProperties, newChildProperty)
}
}
return newProperties
}
// PreparePossibleProperties implements LogicalPlan PreparePossibleProperties interface.
func (p *LogicalJoin) PreparePossibleProperties(_ *expression.Schema, childrenProperties ...[][]*expression.Column) [][]*expression.Column {
leftProperties := childrenProperties[0]
rightProperties := childrenProperties[1]
// TODO: We should consider properties propagation.
p.leftProperties = leftProperties
p.rightProperties = rightProperties
if p.JoinType == LeftOuterJoin || p.JoinType == LeftOuterSemiJoin {
rightProperties = nil
} else if p.JoinType == RightOuterJoin {
leftProperties = nil
}
resultProperties := make([][]*expression.Column, len(leftProperties)+len(rightProperties))
for i, cols := range leftProperties {
resultProperties[i] = make([]*expression.Column, len(cols))
copy(resultProperties[i], cols)
}
leftLen := len(leftProperties)
for i, cols := range rightProperties {
resultProperties[leftLen+i] = make([]*expression.Column, len(cols))
copy(resultProperties[leftLen+i], cols)
}
return resultProperties
}
// PreparePossibleProperties implements LogicalPlan PreparePossibleProperties interface.
func (la *LogicalAggregation) PreparePossibleProperties(_ *expression.Schema, childrenProperties ...[][]*expression.Column) [][]*expression.Column {
childProps := childrenProperties[0]
// If there's no group-by item, the stream aggregation could have no order property. So we can add an empty property
// when its group-by item is empty.
if len(la.GroupByItems) == 0 {
la.possibleProperties = [][]*expression.Column{nil}
return nil
}
resultProperties := make([][]*expression.Column, 0, len(childProps))
groupByCols := la.GetGroupByCols()
for _, possibleChildProperty := range childProps {
sortColOffsets := getMaxSortPrefix(possibleChildProperty, groupByCols)
if len(sortColOffsets) == len(groupByCols) {
prop := possibleChildProperty[:len(groupByCols)]
resultProperties = append(resultProperties, prop)
}
}
la.possibleProperties = resultProperties
return resultProperties
}
|
package ds
/**
*
*
Given an integer rowIndex, return the rowIndexth row of the Pascal's triangle.
Notice that the row index starts from 0.
In Pascal's triangle, each number is the sum of the two numbers directly above it.
Follow up:
Could you optimize your algorithm to use only O(k) extra space?
Example 1:
Input: rowIndex = 3
Output: [1,3,3,1]
Example 2:
Input: rowIndex = 0
Output: [1]
Example 3:
Input: rowIndex = 1
Output: [1,1]
[
[1],
[1,1],
[1,2,1],
[1,3,3,1],
[1,4,6,4,1]
]
*
*
*
*
*/
/**
* @param {number} rowIndex
* @return {number[]}
*/
func getRow(rowIndex int) []int {
arr := make([]int, rowIndex+1)
arr[0] = 1
for i := 1; i < rowIndex+1; i++ {
for j := i; j > 0; j-- {
arr[j] += arr[j-1]
}
}
return arr
}
|
package main
import (
"fmt"
)
type Items interface{}
func Map(items []Items, mapFun func(Items) Items) []Items {
for i, item := range items {
items[i] = mapFun(item)
}
return items
}
func add2(iface Items) Items {
switch v := iface.(type) {
case int:
return v * 2
case string:
return v + v
}
return iface
}
func main() {
ilist := []Items{1, 2, 3, 4, 5, 6, 7, 8, 9}
slist := []Items{"H", "e", "l", "l", "o", "!"}
fmt.Printf("Integer List: %v\n", Map(ilist, add2))
fmt.Printf("String List: %v\n", Map(slist, add2))
}
|
package handler
import (
"fmt"
"net/http"
"strings"
"time"
"github.com/chonla/oddsvr-api/jwt"
jwtgo "github.com/dgrijalva/jwt-go"
"github.com/globalsign/mgo/bson"
"github.com/labstack/echo"
)
func (h *Handler) Vr(c echo.Context) error {
id := c.Param("id")
if h.vr.Exists(id) {
vr, e := h.vr.FromLink(id)
if e != nil {
return c.JSON(http.StatusInternalServerError, e)
}
return c.JSON(http.StatusOK, vr)
}
return c.NoContent(http.StatusNotFound)
}
func (h *Handler) Vrs(c echo.Context) error {
vrs, e := h.vr.UnexpiredRuns()
if e != nil {
return c.JSON(http.StatusInternalServerError, e)
}
return c.JSON(http.StatusOK, vrs)
}
func (h *Handler) UpdateVr(c echo.Context) error {
user := c.Get("user").(*jwtgo.Token)
claims := user.Claims.(*jwt.Claims)
uid := claims.ID
id := c.Param("id")
if h.vr.Exists(id) {
vr, e := h.vr.FromLink(id)
if vr.CreatedBy != uid {
return c.NoContent(http.StatusForbidden)
}
vrContext, e := h.vr.FromContext(c)
if e != nil {
return c.JSON(http.StatusInternalServerError, e)
}
vr.Title = vrContext.Title
vr.Period = vrContext.Period
vr.Detail = vrContext.Detail
e = h.vr.UpdateVr(&vr)
if e != nil {
return c.JSON(http.StatusInternalServerError, e)
}
c.Response().Header().Add("Location", fmt.Sprintf("/vr/%s", vr.Link))
c.Response().Header().Add("X-Updated-Vr-ID", vr.ID.String())
return c.JSON(http.StatusOK, vr)
}
return c.NoContent(http.StatusNotFound)
}
func (h *Handler) CreateVr(c echo.Context) error {
user := c.Get("user").(*jwtgo.Token)
claims := user.Claims.(*jwt.Claims)
uid := claims.ID
vrContext, e := h.vr.FromContext(c)
if e != nil {
return c.JSON(http.StatusInternalServerError, e)
}
vrContext.ID = bson.NewObjectId()
vrContext.Link = h.vr.CreateSafeVrLink()
vrContext.CreatedBy = uid
vrContext.CreatedDateTime = time.Now().Format(time.RFC3339)
e = h.vr.SaveVr(vrContext)
if e != nil {
return c.JSON(http.StatusInternalServerError, e)
}
c.Response().Header().Add("Location", fmt.Sprintf("/vr/%s", vrContext.Link))
c.Response().Header().Add("X-New-Vr-ID", vrContext.ID.String())
return c.JSON(http.StatusCreated, vrContext)
}
func (h *Handler) JoinedVrs(c echo.Context) error {
user := c.Get("user").(*jwtgo.Token)
claims := user.Claims.(*jwt.Claims)
uid := claims.ID
vrs, e := h.vr.Joined(uid)
if e != nil {
return c.JSON(http.StatusInternalServerError, e)
}
return c.JSON(http.StatusOK, vrs)
}
func (h *Handler) JoinVr(c echo.Context) error {
user := c.Get("user").(*jwtgo.Token)
claims := user.Claims.(*jwt.Claims)
uid := claims.ID
vre := h.vr.NewEngagement()
eng, e := vre.FromContext(c)
if e != nil {
return c.JSON(http.StatusInternalServerError, e)
}
id := c.Param("id")
if h.vr.Exists(id) {
me, e := h.vr.Profile(uid)
if e != nil {
return c.JSON(http.StatusInternalServerError, e)
}
eng.AthleteID = uid
eng.AthleteName = strings.TrimSpace(fmt.Sprintf("%s %s", me.FirstName, me.LastName))
e = h.vr.Join(id, eng)
if e != nil {
return c.JSON(http.StatusInternalServerError, e)
}
return c.NoContent(http.StatusCreated)
}
return c.NoContent(http.StatusNotFound)
}
func (h *Handler) LeaveVr(c echo.Context) error {
user := c.Get("user").(*jwtgo.Token)
claims := user.Claims.(*jwt.Claims)
uid := claims.ID
id := c.Param("id")
if h.vr.Exists(id) {
e := h.vr.Leave(id, uid)
if e != nil {
return c.JSON(http.StatusInternalServerError, e)
}
return c.NoContent(http.StatusOK)
}
return c.NoContent(http.StatusNotFound)
}
func (h *Handler) DeleteVr(c echo.Context) error {
user := c.Get("user").(*jwtgo.Token)
claims := user.Claims.(*jwt.Claims)
uid := claims.ID
id := c.Param("id")
if h.vr.Exists(id) {
vr, e := h.vr.FromLink(id)
if vr.CreatedBy != uid {
return c.NoContent(http.StatusForbidden)
}
e = h.vr.Delete(id)
if e != nil {
return c.JSON(http.StatusInternalServerError, e)
}
return c.NoContent(http.StatusOK)
}
return c.NoContent(http.StatusNotFound)
}
|
package proc
import (
"os"
"path/filepath"
"regexp"
"strconv"
)
type Process struct {
PID int
CmdLine string
}
func Processes() ([]*Process, error) {
return findProc(func(process *Process) bool {
return true
})
}
func ProcessesByPattern(pattern string) ([]*Process, error) {
var expression = regexp.MustCompile(pattern)
return findProc(func(process *Process) bool {
return expression.MatchString(process.CmdLine)
})
}
func findProc(filter func(*Process) bool) ([]*Process, error) {
fh, err := os.Open(root)
if err != nil {
return nil, err
}
dirNames, err := fh.Readdirnames(-1)
fh.Close()
if err != nil {
return nil, err
}
var processes []*Process
for _, dirName := range dirNames {
pid, err := strconv.Atoi(dirName)
if err != nil {
// Not a number, so not a PID subdir.
continue
}
procDir := filepath.Join(root, dirName)
err = checkIfProcExists(procDir)
if err != nil {
// Process is be gone by now, or we don't have access.
continue
}
cmdLine, err := extractCmdLine(procDir)
if err != nil {
continue
}
p := &Process{PID: pid, CmdLine: cmdLine}
if filter(p) {
processes = append(processes, p)
}
}
return processes, nil
}
|
// Copyright 2012 The Freetype-Go Authors. All rights reserved.
// Use of this source code is governed by your choice of either the
// FreeType License or the GNU General Public License version 2 (or
// any later version), both of which can be found in the LICENSE file.
package truetype
// This file implements a Truetype bytecode interpreter.
// The opcodes are described at https://developer.apple.com/fonts/TTRefMan/RM05/Chap5.html
import (
"errors"
)
// callStackEntry is a bytecode call stack entry.
type callStackEntry struct {
program []byte
pc int
loopCount int32
}
// Hinter implements bytecode hinting. Pass a Hinter to GlyphBuf.Load to hint
// the resulting glyph. A Hinter can be re-used to hint a series of glyphs from
// a Font.
type Hinter struct {
stack, store []int32
// functions is a map from function number to bytecode.
functions map[int32][]byte
// g, font and scale are the glyph buffer, font and scale last used for
// this Hinter. Changing the font will require running the new font's
// fpgm bytecode. Changing either will require running the font's prep
// bytecode.
g *GlyphBuf
font *Font
scale int32
// gs and defaultGS are the current and default graphics state. The
// default graphics state is the global default graphics state after
// the font's fpgm and prep programs have been run.
gs, defaultGS graphicsState
}
// graphicsState is described at https://developer.apple.com/fonts/TTRefMan/RM04/Chap4.html
type graphicsState struct {
// Projection vector, freedom vector and dual projection vector.
pv, fv, dv [2]f2dot14
// Reference points and zone pointers.
rp, zp [3]int32
// Control Value / Single Width Cut-In.
controlValueCutIn, singleWidthCutIn, singleWidth f26dot6
// Delta base / shift.
deltaBase, deltaShift int32
// Minimum distance.
minDist f26dot6
// Loop count.
loop int32
// Rounding policy.
roundPeriod, roundPhase, roundThreshold f26dot6
// Auto-flip.
autoFlip bool
}
var globalDefaultGS = graphicsState{
pv: [2]f2dot14{0x4000, 0}, // Unit vector along the X axis.
fv: [2]f2dot14{0x4000, 0},
dv: [2]f2dot14{0x4000, 0},
zp: [3]int32{1, 1, 1},
controlValueCutIn: (17 << 6) / 16, // 17/16 as an f26dot6.
deltaBase: 9,
deltaShift: 3,
minDist: 1 << 6, // 1 as an f26dot6.
loop: 1,
roundPeriod: 1 << 6, // 1 as an f26dot6.
roundThreshold: 1 << 5, // 1/2 as an f26dot6.
autoFlip: true,
}
func (h *Hinter) init(g *GlyphBuf, f *Font, scale int32) error {
h.g = g
rescale := h.scale != scale
if h.font != f {
h.font, rescale = f, true
if h.functions == nil {
h.functions = make(map[int32][]byte)
} else {
for k := range h.functions {
delete(h.functions, k)
}
}
if x := int(f.maxStackElements); x > len(h.stack) {
x += 255
x &^= 255
h.stack = make([]int32, x)
}
if x := int(f.maxStorage); x > len(h.store) {
x += 15
x &^= 15
h.store = make([]int32, x)
}
if len(f.fpgm) != 0 {
if err := h.run(f.fpgm); err != nil {
return err
}
}
}
if rescale {
h.scale = scale
h.defaultGS = globalDefaultGS
if len(f.prep) != 0 {
if err := h.run(f.prep); err != nil {
return err
}
h.defaultGS = h.gs
// The MS rasterizer doesn't allow the following graphics state
// variables to be modified by the CVT program.
h.defaultGS.pv = globalDefaultGS.pv
h.defaultGS.fv = globalDefaultGS.fv
h.defaultGS.dv = globalDefaultGS.dv
h.defaultGS.rp = globalDefaultGS.rp
h.defaultGS.zp = globalDefaultGS.zp
h.defaultGS.loop = globalDefaultGS.loop
}
}
return nil
}
func (h *Hinter) run(program []byte) error {
h.gs = h.defaultGS
if len(program) > 50000 {
return errors.New("truetype: hinting: too many instructions")
}
var (
steps, pc, top int
opcode uint8
callStack [32]callStackEntry
callStackTop int
)
for 0 <= pc && pc < len(program) {
steps++
if steps == 100000 {
return errors.New("truetype: hinting: too many steps")
}
opcode = program[pc]
if popCount[opcode] == q {
return errors.New("truetype: hinting: unimplemented instruction")
}
if top < int(popCount[opcode]) {
return errors.New("truetype: hinting: stack underflow")
}
switch opcode {
case opSVTCA0:
h.gs.pv = [2]f2dot14{0, 0x4000}
h.gs.fv = [2]f2dot14{0, 0x4000}
h.gs.dv = [2]f2dot14{0, 0x4000}
case opSVTCA1:
h.gs.pv = [2]f2dot14{0x4000, 0}
h.gs.fv = [2]f2dot14{0x4000, 0}
h.gs.dv = [2]f2dot14{0x4000, 0}
case opSPVTCA0:
h.gs.pv = [2]f2dot14{0, 0x4000}
h.gs.dv = [2]f2dot14{0, 0x4000}
case opSPVTCA1:
h.gs.pv = [2]f2dot14{0x4000, 0}
h.gs.dv = [2]f2dot14{0x4000, 0}
case opSFVTCA0:
h.gs.fv = [2]f2dot14{0, 0x4000}
case opSFVTCA1:
h.gs.fv = [2]f2dot14{0x4000, 0}
case opSPVFS:
top -= 2
h.gs.pv[0] = f2dot14(h.stack[top+0])
h.gs.pv[1] = f2dot14(h.stack[top+1])
// TODO: normalize h.gs.pv ??
// TODO: h.gs.dv = h.gs.pv ??
case opSFVFS:
top -= 2
h.gs.fv[0] = f2dot14(h.stack[top+0])
h.gs.fv[1] = f2dot14(h.stack[top+1])
// TODO: normalize h.gs.fv ??
case opGPV:
if top+1 >= len(h.stack) {
return errors.New("truetype: hinting: stack overflow")
}
h.stack[top+0] = int32(h.gs.pv[0])
h.stack[top+1] = int32(h.gs.pv[1])
top += 2
case opGFV:
if top+1 >= len(h.stack) {
return errors.New("truetype: hinting: stack overflow")
}
h.stack[top+0] = int32(h.gs.fv[0])
h.stack[top+1] = int32(h.gs.fv[1])
top += 2
case opSFVTPV:
h.gs.fv = h.gs.pv
case opSRP0, opSRP1, opSRP2:
top--
h.gs.rp[opcode-opSRP0] = h.stack[top]
case opSZP0, opSZP1, opSZP2:
top--
h.gs.zp[opcode-opSZP0] = h.stack[top]
case opSZPS:
top--
h.gs.zp[0] = h.stack[top]
h.gs.zp[1] = h.stack[top]
h.gs.zp[2] = h.stack[top]
case opSLOOP:
top--
if h.stack[top] <= 0 {
return errors.New("truetype: hinting: invalid data")
}
h.gs.loop = h.stack[top]
case opRTG:
h.gs.roundPeriod = 1 << 6
h.gs.roundPhase = 0
h.gs.roundThreshold = 1 << 5
case opRTHG:
h.gs.roundPeriod = 1 << 6
h.gs.roundPhase = 1 << 5
h.gs.roundThreshold = 1 << 5
case opSMD:
top--
h.gs.minDist = f26dot6(h.stack[top])
case opELSE:
opcode = 1
goto ifelse
case opJMPR:
top--
pc += int(h.stack[top])
continue
case opSCVTCI:
top--
h.gs.controlValueCutIn = f26dot6(h.stack[top])
case opSSWCI:
top--
h.gs.singleWidthCutIn = f26dot6(h.stack[top])
case opSSW:
top--
h.gs.singleWidth = f26dot6(h.stack[top])
case opDUP:
if top >= len(h.stack) {
return errors.New("truetype: hinting: stack overflow")
}
h.stack[top] = h.stack[top-1]
top++
case opPOP:
top--
case opCLEAR:
top = 0
case opSWAP:
h.stack[top-1], h.stack[top-2] = h.stack[top-2], h.stack[top-1]
case opDEPTH:
if top >= len(h.stack) {
return errors.New("truetype: hinting: stack overflow")
}
h.stack[top] = int32(top)
top++
case opCINDEX, opMINDEX:
x := int(h.stack[top-1])
if x <= 0 || x >= top {
return errors.New("truetype: hinting: invalid data")
}
h.stack[top-1] = h.stack[top-1-x]
if opcode == opMINDEX {
copy(h.stack[top-1-x:top-1], h.stack[top-x:top])
top--
}
case opLOOPCALL, opCALL:
if callStackTop >= len(callStack) {
return errors.New("truetype: hinting: call stack overflow")
}
top--
f, ok := h.functions[h.stack[top]]
if !ok {
return errors.New("truetype: hinting: undefined function")
}
callStack[callStackTop] = callStackEntry{program, pc, 1}
if opcode == opLOOPCALL {
top--
if h.stack[top] == 0 {
break
}
callStack[callStackTop].loopCount = h.stack[top]
}
callStackTop++
program, pc = f, 0
continue
case opFDEF:
// Save all bytecode up until the next ENDF.
startPC := pc + 1
fdefloop:
for {
pc++
if pc >= len(program) {
return errors.New("truetype: hinting: unbalanced FDEF")
}
switch program[pc] {
case opFDEF:
return errors.New("truetype: hinting: nested FDEF")
case opENDF:
top--
h.functions[h.stack[top]] = program[startPC : pc+1]
break fdefloop
default:
var ok bool
pc, ok = skipInstructionPayload(program, pc)
if !ok {
return errors.New("truetype: hinting: unbalanced FDEF")
}
}
}
case opENDF:
if callStackTop == 0 {
return errors.New("truetype: hinting: call stack underflow")
}
callStackTop--
callStack[callStackTop].loopCount--
if callStack[callStackTop].loopCount != 0 {
callStackTop++
pc = 0
continue
}
program, pc = callStack[callStackTop].program, callStack[callStackTop].pc
case opMDAP0, opMDAP1:
points := h.g.points(h.gs.zp[0])
top--
i := int(h.stack[top])
if i < 0 || len(points) <= i {
return errors.New("truetype: hinting: point out of range")
}
p := &points[i]
distance := f26dot6(0)
if opcode == opMDAP1 {
distance = dotProduct(f26dot6(p.X), f26dot6(p.Y), h.gs.pv)
// TODO: metrics compensation.
distance = h.round(distance) - distance
}
h.move(p, distance)
h.gs.rp[0] = int32(i)
h.gs.rp[1] = int32(i)
case opALIGNRP:
if top < int(h.gs.loop) {
return errors.New("truetype: hinting: stack underflow")
}
i, points := int(h.gs.rp[0]), h.g.points(h.gs.zp[0])
if i < 0 || len(points) <= i {
return errors.New("truetype: hinting: point out of range")
}
ref := &points[i]
points = h.g.points(h.gs.zp[1])
for ; h.gs.loop != 0; h.gs.loop-- {
top--
i = int(h.stack[top])
if i < 0 || len(points) <= i {
return errors.New("truetype: hinting: point out of range")
}
p := &points[i]
h.move(p, -dotProduct(f26dot6(p.X-ref.X), f26dot6(p.Y-ref.Y), h.gs.pv))
}
h.gs.loop = 1
case opRTDG:
h.gs.roundPeriod = 1 << 5
h.gs.roundPhase = 0
h.gs.roundThreshold = 1 << 4
case opNPUSHB:
opcode = 0
goto push
case opNPUSHW:
opcode = 0x80
goto push
case opWS:
top -= 2
i := int(h.stack[top])
if i < 0 || len(h.store) <= i {
return errors.New("truetype: hinting: invalid data")
}
h.store[i] = h.stack[top+1]
case opRS:
i := int(h.stack[top-1])
if i < 0 || len(h.store) <= i {
return errors.New("truetype: hinting: invalid data")
}
h.stack[top-1] = h.store[i]
case opMPPEM, opMPS:
if top >= len(h.stack) {
return errors.New("truetype: hinting: stack overflow")
}
// For MPS, point size should be irrelevant; we return the PPEM.
h.stack[top] = h.scale >> 6
top++
case opFLIPON, opFLIPOFF:
h.gs.autoFlip = opcode == opFLIPON
case opDEBUG:
// No-op.
case opLT:
top--
h.stack[top-1] = bool2int32(h.stack[top-1] < h.stack[top])
case opLTEQ:
top--
h.stack[top-1] = bool2int32(h.stack[top-1] <= h.stack[top])
case opGT:
top--
h.stack[top-1] = bool2int32(h.stack[top-1] > h.stack[top])
case opGTEQ:
top--
h.stack[top-1] = bool2int32(h.stack[top-1] >= h.stack[top])
case opEQ:
top--
h.stack[top-1] = bool2int32(h.stack[top-1] == h.stack[top])
case opNEQ:
top--
h.stack[top-1] = bool2int32(h.stack[top-1] != h.stack[top])
case opODD, opEVEN:
i := h.round(f26dot6(h.stack[top-1])) >> 6
h.stack[top-1] = int32(i&1) ^ int32(opcode-opODD)
case opIF:
top--
if h.stack[top] == 0 {
opcode = 0
goto ifelse
}
case opEIF:
// No-op.
case opAND:
top--
h.stack[top-1] = bool2int32(h.stack[top-1] != 0 && h.stack[top] != 0)
case opOR:
top--
h.stack[top-1] = bool2int32(h.stack[top-1]|h.stack[top] != 0)
case opNOT:
h.stack[top-1] = bool2int32(h.stack[top-1] == 0)
case opSDB:
top--
h.gs.deltaBase = h.stack[top]
case opSDS:
top--
h.gs.deltaShift = h.stack[top]
case opADD:
top--
h.stack[top-1] += h.stack[top]
case opSUB:
top--
h.stack[top-1] -= h.stack[top]
case opDIV:
top--
if h.stack[top] == 0 {
return errors.New("truetype: hinting: division by zero")
}
h.stack[top-1] = int32(f26dot6(h.stack[top-1]).div(f26dot6(h.stack[top])))
case opMUL:
top--
h.stack[top-1] = int32(f26dot6(h.stack[top-1]).mul(f26dot6(h.stack[top])))
case opABS:
if h.stack[top-1] < 0 {
h.stack[top-1] = -h.stack[top-1]
}
case opNEG:
h.stack[top-1] = -h.stack[top-1]
case opFLOOR:
h.stack[top-1] &^= 63
case opCEILING:
h.stack[top-1] += 63
h.stack[top-1] &^= 63
case opROUND00, opROUND01, opROUND10, opROUND11:
// The four flavors of opROUND are equivalent. See the comment below on
// opNROUND for the rationale.
h.stack[top-1] = int32(h.round(f26dot6(h.stack[top-1])))
case opNROUND00, opNROUND01, opNROUND10, opNROUND11:
// No-op. The spec says to add one of four "compensations for the engine
// characteristics", to cater for things like "different dot-size printers".
// https://developer.apple.com/fonts/TTRefMan/RM02/Chap2.html#engine_compensation
// This code does not implement engine compensation, as we don't expect to
// be used to output on dot-matrix printers.
case opSROUND, opS45ROUND:
top--
switch (h.stack[top] >> 6) & 0x03 {
case 0:
h.gs.roundPeriod = 1 << 5
case 1, 3:
h.gs.roundPeriod = 1 << 6
case 2:
h.gs.roundPeriod = 1 << 7
}
if opcode == opS45ROUND {
// The spec says to multiply by √2, but the C Freetype code says 1/√2.
// We go with 1/√2.
h.gs.roundPeriod *= 46341
h.gs.roundPeriod /= 65536
}
h.gs.roundPhase = h.gs.roundPeriod * f26dot6((h.stack[top]>>4)&0x03) / 4
if x := h.stack[top] & 0x0f; x != 0 {
h.gs.roundThreshold = h.gs.roundPeriod * f26dot6(x-4) / 8
} else {
h.gs.roundThreshold = h.gs.roundPeriod - 1
}
case opJROT:
top -= 2
if h.stack[top+1] != 0 {
pc += int(h.stack[top])
continue
}
case opJROF:
top -= 2
if h.stack[top+1] == 0 {
pc += int(h.stack[top])
continue
}
case opROFF:
h.gs.roundPeriod = 0
h.gs.roundPhase = 0
h.gs.roundThreshold = 0
case opRUTG:
h.gs.roundPeriod = 1 << 6
h.gs.roundPhase = 0
h.gs.roundThreshold = 1<<6 - 1
case opRDTG:
h.gs.roundPeriod = 1 << 6
h.gs.roundPhase = 0
h.gs.roundThreshold = 0
case opSANGW, opAA:
// These ops are "anachronistic" and no longer used.
top--
case opSCANCTRL:
// We do not support dropout control, as we always rasterize grayscale glyphs.
top--
case opGETINFO:
res := int32(0)
if h.stack[top-1]&(1<<0) != 0 {
// Set the engine version. We hard-code this to 35, the same as
// the C freetype code, which says that "Version~35 corresponds
// to MS rasterizer v.1.7 as used e.g. in Windows~98".
res |= 35
}
if h.stack[top-1]&(1<<5) != 0 {
// Set that we support grayscale.
res |= 1 << 12
}
// We set no other bits, as we do not support rotated or stretched glyphs.
h.stack[top-1] = res
case opIDEF:
// IDEF is for ancient versions of the bytecode interpreter, and is no longer used.
return errors.New("truetype: hinting: unsupported IDEF instruction")
case opROLL:
h.stack[top-1], h.stack[top-3], h.stack[top-2] =
h.stack[top-3], h.stack[top-2], h.stack[top-1]
case opMAX:
top--
if h.stack[top-1] < h.stack[top] {
h.stack[top-1] = h.stack[top]
}
case opMIN:
top--
if h.stack[top-1] > h.stack[top] {
h.stack[top-1] = h.stack[top]
}
case opSCANTYPE:
// We do not support dropout control, as we always rasterize grayscale glyphs.
top--
case opPUSHB000, opPUSHB001, opPUSHB010, opPUSHB011,
opPUSHB100, opPUSHB101, opPUSHB110, opPUSHB111:
opcode -= opPUSHB000 - 1
goto push
case opPUSHW000, opPUSHW001, opPUSHW010, opPUSHW011,
opPUSHW100, opPUSHW101, opPUSHW110, opPUSHW111:
opcode -= opPUSHW000 - 1
opcode += 0x80
goto push
case opMDRP00000, opMDRP00001, opMDRP00010, opMDRP00011,
opMDRP00100, opMDRP00101, opMDRP00110, opMDRP00111,
opMDRP01000, opMDRP01001, opMDRP01010, opMDRP01011,
opMDRP01100, opMDRP01101, opMDRP01110, opMDRP01111,
opMDRP10000, opMDRP10001, opMDRP10010, opMDRP10011,
opMDRP10100, opMDRP10101, opMDRP10110, opMDRP10111,
opMDRP11000, opMDRP11001, opMDRP11010, opMDRP11011,
opMDRP11100, opMDRP11101, opMDRP11110, opMDRP11111:
i, points := int(h.gs.rp[0]), h.g.points(h.gs.zp[0])
if i < 0 || len(points) <= i {
return errors.New("truetype: hinting: point out of range")
}
ref := &points[i]
top--
i = int(h.stack[top])
points = h.g.points(h.gs.zp[1])
if i < 0 || len(points) <= i {
return errors.New("truetype: hinting: point out of range")
}
p := &points[i]
origDist := f26dot6(0)
if h.gs.zp[0] == 0 && h.gs.zp[1] == 0 {
p0 := &h.g.Unhinted[i]
p1 := &h.g.Unhinted[h.gs.rp[0]]
origDist = dotProduct(f26dot6(p0.X-p1.X), f26dot6(p0.Y-p1.Y), h.gs.dv)
} else {
p0 := &h.g.InFontUnits[i]
p1 := &h.g.InFontUnits[h.gs.rp[0]]
origDist = dotProduct(f26dot6(p0.X-p1.X), f26dot6(p0.Y-p1.Y), h.gs.dv)
origDist = f26dot6(h.font.scale(h.scale * int32(origDist)))
}
// Single-width cut-in test.
if x := (origDist - h.gs.singleWidth).abs(); x < h.gs.singleWidthCutIn {
if origDist >= 0 {
origDist = h.gs.singleWidthCutIn
} else {
origDist = -h.gs.singleWidthCutIn
}
}
// Rounding bit.
// TODO: metrics compensation.
distance := origDist
if opcode&0x04 != 0 {
distance = h.round(origDist)
}
// Minimum distance bit.
if opcode&0x08 != 0 {
if origDist >= 0 {
if distance < h.gs.minDist {
distance = h.gs.minDist
}
} else {
if distance > -h.gs.minDist {
distance = -h.gs.minDist
}
}
}
// Set-RP0 bit.
if opcode&0x10 != 0 {
h.gs.rp[0] = int32(i)
}
h.gs.rp[1] = h.gs.rp[0]
h.gs.rp[2] = int32(i)
// Move the point.
origDist = dotProduct(f26dot6(p.X-ref.X), f26dot6(p.Y-ref.Y), h.gs.pv)
h.move(p, distance-origDist)
default:
return errors.New("truetype: hinting: unrecognized instruction")
}
pc++
continue
ifelse:
// Skip past bytecode until the next ELSE (if opcode == 0) or the
// next EIF (for all opcodes). Opcode == 0 means that we have come
// from an IF. Opcode == 1 means that we have come from an ELSE.
{
ifelseloop:
for depth := 0; ; {
pc++
if pc >= len(program) {
return errors.New("truetype: hinting: unbalanced IF or ELSE")
}
switch program[pc] {
case opIF:
depth++
case opELSE:
if depth == 0 && opcode == 0 {
break ifelseloop
}
case opEIF:
depth--
if depth < 0 {
break ifelseloop
}
default:
var ok bool
pc, ok = skipInstructionPayload(program, pc)
if !ok {
return errors.New("truetype: hinting: unbalanced IF or ELSE")
}
}
}
pc++
continue
}
push:
// Push n elements from the program to the stack, where n is the low 7 bits of
// opcode. If the low 7 bits are zero, then n is the next byte from the program.
// The high bit being 0 means that the elements are zero-extended bytes.
// The high bit being 1 means that the elements are sign-extended words.
{
width := 1
if opcode&0x80 != 0 {
opcode &^= 0x80
width = 2
}
if opcode == 0 {
pc++
if pc >= len(program) {
return errors.New("truetype: hinting: insufficient data")
}
opcode = program[pc]
}
pc++
if top+int(opcode) > len(h.stack) {
return errors.New("truetype: hinting: stack overflow")
}
if pc+width*int(opcode) > len(program) {
return errors.New("truetype: hinting: insufficient data")
}
for ; opcode > 0; opcode-- {
if width == 1 {
h.stack[top] = int32(program[pc])
} else {
h.stack[top] = int32(int8(program[pc]))<<8 | int32(program[pc+1])
}
top++
pc += width
}
continue
}
}
return nil
}
func (h *Hinter) move(p *Point, distance f26dot6) {
if h.gs.fv[0] == 0 {
p.Y += int32(distance)
p.Flags |= flagTouchedY
return
}
if h.gs.fv[1] == 0 {
p.X += int32(distance)
p.Flags |= flagTouchedX
return
}
fvx := int64(h.gs.fv[0])
fvy := int64(h.gs.fv[1])
pvx := int64(h.gs.pv[0])
pvy := int64(h.gs.pv[1])
fvDotPv := (fvx*pvx + fvy*pvy) >> 14
p.X += int32(int64(distance) * fvx / fvDotPv)
p.Y += int32(int64(distance) * fvy / fvDotPv)
p.Flags |= flagTouchedX | flagTouchedY
}
// skipInstructionPayload increments pc by the extra data that follows a
// variable length PUSHB or PUSHW instruction.
func skipInstructionPayload(program []byte, pc int) (newPC int, ok bool) {
switch program[pc] {
case opNPUSHB:
pc++
if pc >= len(program) {
return 0, false
}
pc += int(program[pc])
case opNPUSHW:
pc++
if pc >= len(program) {
return 0, false
}
pc += 2 * int(program[pc])
case opPUSHB000, opPUSHB001, opPUSHB010, opPUSHB011,
opPUSHB100, opPUSHB101, opPUSHB110, opPUSHB111:
pc += int(program[pc] - (opPUSHB000 - 1))
case opPUSHW000, opPUSHW001, opPUSHW010, opPUSHW011,
opPUSHW100, opPUSHW101, opPUSHW110, opPUSHW111:
pc += 2 * int(program[pc]-(opPUSHW000-1))
}
return pc, true
}
// f2dot14 is a 2.14 fixed point number.
type f2dot14 int16
// f26dot6 is a 26.6 fixed point number.
type f26dot6 int32
// abs returns abs(x) in 26.6 fixed point arithmetic.
func (x f26dot6) abs() f26dot6 {
if x < 0 {
return -x
}
return x
}
// div returns x/y in 26.6 fixed point arithmetic.
func (x f26dot6) div(y f26dot6) f26dot6 {
return f26dot6((int64(x) << 6) / int64(y))
}
// mul returns x*y in 26.6 fixed point arithmetic.
func (x f26dot6) mul(y f26dot6) f26dot6 {
return f26dot6(int64(x) * int64(y) >> 6)
}
func dotProduct(x, y f26dot6, q [2]f2dot14) f26dot6 {
px := int64(x)
py := int64(y)
qx := int64(q[0])
qy := int64(q[1])
return f26dot6((px*qx + py*qy) >> 14)
}
// round rounds the given number. The rounding algorithm is described at
// https://developer.apple.com/fonts/TTRefMan/RM02/Chap2.html#rounding
func (h *Hinter) round(x f26dot6) f26dot6 {
if h.gs.roundPeriod == 0 {
return x
}
neg := x < 0
x -= h.gs.roundPhase
x += h.gs.roundThreshold
if x >= 0 {
x = (x / h.gs.roundPeriod) * h.gs.roundPeriod
} else {
x -= h.gs.roundPeriod
x += 1
x = (x / h.gs.roundPeriod) * h.gs.roundPeriod
}
x += h.gs.roundPhase
if neg {
if x >= 0 {
x = h.gs.roundPhase - h.gs.roundPeriod
}
} else if x < 0 {
x = h.gs.roundPhase
}
return x
}
func bool2int32(b bool) int32 {
if b {
return 1
}
return 0
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.