text
stringlengths 11
4.05M
|
|---|
package field
import "github.com/naruta/terraform-provider-kintone/kintone"
type SingleLineText struct {
code kintone.FieldCode
label string
}
func NewSingleLineText(code kintone.FieldCode, label string) *SingleLineText {
return &SingleLineText{
code: code,
label: label,
}
}
func (s *SingleLineText) Type() kintone.FieldType {
return "SINGLE_LINE_TEXT"
}
func (s *SingleLineText) Code() kintone.FieldCode {
return s.code
}
func (s *SingleLineText) Label() string {
return s.label
}
|
// Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package tikvgroup
import (
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
"k8s.io/apimachinery/pkg/util/errors"
"k8s.io/klog"
)
type ControlInterface interface {
ReconcileTiKVGroup(ta *v1alpha1.TiKVGroup) error
}
func NewDefaultTikvGroupControl() ControlInterface {
return &defaultTiKVGroupControl{}
}
type defaultTiKVGroupControl struct {
// TODO: sync manager who control the TiKVGroup
}
func (dtc *defaultTiKVGroupControl) ReconcileTiKVGroup(tg *v1alpha1.TiKVGroup) error {
var errs []error
if err := dtc.reconcileTiKVGroup(tg); err != nil {
errs = append(errs, err)
}
return errors.NewAggregate(errs)
}
func (dtc *defaultTiKVGroupControl) reconcileTiKVGroup(tg *v1alpha1.TiKVGroup) error {
//TODO: start syncing
klog.Infof("sync TiKVGroup[%s/%s]", tg.Namespace, tg.Name)
return nil
}
|
// Copyright 2017 The OpenSDS Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package targets
import (
"errors"
"fmt"
"io/ioutil"
"net"
"os"
"os/exec"
"strconv"
"strings"
log "github.com/golang/glog"
"github.com/sodafoundation/dock/pkg/utils"
)
const (
opensdsPrefix = "opensds-"
tgtAdminCmd = "tgt-admin"
)
type ISCSITarget interface {
CreateISCSITarget(volId, tgtIqn, path, hostIp, initiator string, chapAuth []string) error
GetISCSITarget(iqn string) int
RemoveISCSITarget(volId, iqn, hostIp string) error
GetLun(path string) int
}
func NewISCSITarget(bip, tgtConfDir string) ISCSITarget {
return &tgtTarget{
TgtConfDir: tgtConfDir,
BindIp: bip,
}
}
type tgtTarget struct {
BindIp string
TgtConfDir string
}
func (t *tgtTarget) GetLun(path string) int {
out, err := t.execCmd(tgtAdminCmd, "--show")
if err != nil {
log.Errorf("Fail to exec '%s' to display iscsi target:%v", tgtAdminCmd, err)
return -1
}
var lun = -1
var lines = strings.Split(out, "\n")
for num, line := range lines {
if strings.Contains(line, path) {
for i := 1; i < num; i++ {
if strings.Contains(lines[num-i], "LUN") {
lunString := strings.Fields(lines[num-i])[1]
lun, err = strconv.Atoi(lunString)
if err != nil {
return -1
}
return lun
}
}
}
}
log.Info("Got lun id:", lun)
return -1
}
func (t *tgtTarget) getTgtConfPath(volId string) string {
return t.TgtConfDir + "/" + opensdsPrefix + volId + ".conf"
}
type configMap map[string][]string
func CreateScsiIDFromVolID(volID string) string {
//Construct a 32 digit NAA.6 ( Network Addressing Authority) Identifier for the volume.
scsi_id := strings.Replace(volID, "-", "", -1)
out := []rune(scsi_id)
// Make the first digit 6 , which specifies the IEEE registerd extended format for WWN.
out[0] = '6'
return string(out)
}
func (t *tgtTarget) CreateISCSITarget(volId, tgtIqn, path, hostIp, initiator string, chapAuth []string) error {
// Multi-attach require a specific ip
if hostIp == "" || hostIp == "ALL" {
msg := fmt.Sprintf("create ISCSI target failed: host ip %s cannot be empty or ALL, iscsi only allows specific ip access, not all", hostIp)
log.Error(msg)
return errors.New(msg)
}
result := net.ParseIP(hostIp)
if result == nil {
msg := fmt.Sprintf("%s is not a valid ip, please give the proper ip", hostIp)
log.Error(msg)
return errors.New(msg)
}
if exist, _ := utils.PathExists(t.TgtConfDir); !exist {
os.MkdirAll(t.TgtConfDir, 0755)
}
config := make(configMap)
configFile := t.getTgtConfPath(volId)
scsiID := CreateScsiIDFromVolID(volId)
if IsExist(configFile) {
data, err := ioutil.ReadFile(configFile)
if err != nil {
return err
}
config.parse(string(data))
}
var charStr string
if len(chapAuth) != 0 {
charStr = fmt.Sprintf("%s %s", chapAuth[0], chapAuth[1])
config.updateConfigmap("incominguser", charStr)
}
config.updateConfigmap("initiator-address", hostIp)
config.updateConfigmap("driver", "iscsi")
config.updateConfigmap("backing-store", path)
config.updateConfigmap("write-cache", "on")
config.updateConfigmap("scsi_id", scsiID)
err := config.writeConfig(configFile, tgtIqn)
if err != nil {
log.Errorf("failed to update config file %s %v", t.getTgtConfPath(volId), err)
return err
}
if info, err := t.execCmd(tgtAdminCmd, "--force", "--update", tgtIqn); err != nil {
log.Errorf("Fail to exec '%s' to create iscsi target, %s,%v", tgtAdminCmd, string(info), err)
return err
}
if t.GetISCSITarget(tgtIqn) == -1 {
log.Errorf("Failed to create iscsi target for Volume "+
"ID: %s. It could be caused by problem "+
"with concurrency. "+
"Also please ensure your tgtd config "+
"file contains 'include %s/*'",
volId, t.TgtConfDir)
return fmt.Errorf("failed to create volume(%s) attachment", volId)
}
return nil
}
func (t *tgtTarget) GetISCSITarget(iqn string) int {
out, err := t.execCmd(tgtAdminCmd, "--show")
if err != nil {
log.Errorf("Fail to exec '%s' to display iscsi target:%v", tgtAdminCmd, err)
return -1
}
var tid = -1
for _, line := range strings.Split(out, "\n") {
if strings.Contains(line, iqn) {
tidString := strings.Fields(strings.Split(line, ":")[0])[1]
tid, err = strconv.Atoi(tidString)
if err != nil {
return -1
}
break
}
}
return tid
}
func (t *tgtTarget) RemoveISCSITarget(volId, iqn, hostIp string) error {
if hostIp == "" {
return errors.New("remove ISCSI target failed, host ip cannot be empty")
}
tgtConfPath := t.getTgtConfPath(volId)
if exist, _ := utils.PathExists(tgtConfPath); !exist {
log.Warningf("Volume path %s does not exist, nothing to remove.", tgtConfPath)
return nil
}
config := make(configMap)
data, err := ioutil.ReadFile(tgtConfPath)
if err != nil {
return err
}
config.parse(string(data))
ips := config["initiator-address"]
for i, v := range ips {
if v == hostIp {
ips = append(ips[:i], ips[i+1:]...)
break
}
}
config["initiator-address"] = ips
if len(ips) == 0 {
if info, err := t.execCmd(tgtAdminCmd, "--force", "--delete", iqn); err != nil {
log.Errorf("Fail to exec '%s' to forcely remove iscsi target, %s, %v",
tgtAdminCmd, string(info), err)
return err
}
os.Remove(tgtConfPath)
} else {
err := config.writeConfig(t.getTgtConfPath(volId), iqn)
if err != nil {
log.Errorf("failed to update config file %s %v", t.getTgtConfPath(volId), err)
return err
}
if info, err := t.execCmd(tgtAdminCmd, "--force", "--update", iqn); err != nil {
log.Errorf("Fail to exec '%s' to create iscsi target, %s,%v", tgtAdminCmd, string(info), err)
return err
}
if t.GetISCSITarget(iqn) == -1 {
log.Errorf("Failed to create iscsi target for Volume "+
"ID: %s. It could be caused by problem "+
"with concurrency. "+
"Also please ensure your tgtd config "+
"file contains 'include %s/*'",
volId, t.TgtConfDir)
return fmt.Errorf("failed to create volume(%s) attachment", volId)
}
}
return nil
}
func (*tgtTarget) execCmd(name string, cmd ...string) (string, error) {
ret, err := exec.Command(name, cmd...).Output()
log.Infoln("Command:", cmd, strings.Join(cmd, " "))
log.V(8).Infof("result:%s", string(ret))
if err != nil {
log.Error("error info:", err)
}
return string(ret), err
}
func (m *configMap) parse(data string) {
var lines = strings.Split(data, "\n")
for _, line := range lines {
for _, key := range []string{"backing-store", "driver", "initiator-address", "write-cache"} {
if strings.Contains(line, key) {
s := strings.TrimSpace(line)
if (*m)[key] == nil {
(*m)[key] = []string{strings.Split(s, " ")[1]}
} else {
(*m)[key] = append((*m)[key], strings.Split(s, " ")[1])
}
}
}
}
}
func (m *configMap) updateConfigmap(key, value string) {
v := (*m)[key]
if v == nil {
(*m)[key] = []string{value}
} else {
if !utils.Contains(v, value) {
v = append(v, value)
(*m)[key] = v
}
}
}
func (m configMap) writeConfig(file, tgtIqn string) error {
f, err := os.Create(file)
if err != nil {
return err
}
defer f.Close()
f.WriteString(fmt.Sprintf("<target %s>\n", tgtIqn))
for k, v := range m {
for _, vl := range v {
f.WriteString(fmt.Sprintf(" %s %s\n", k, vl))
}
}
f.WriteString("</target>")
f.Sync()
return nil
}
func IsExist(f string) bool {
_, err := os.Stat(f)
return err == nil || os.IsExist(err)
}
|
package main
import "fmt"
// Some version constants.
const (
AppVendor = "hexaflex"
AppName = "srv"
AppVersion = "v0.0.1"
)
// Version returns the version string.
func Version() string {
return fmt.Sprintf("%s %s %s", AppVendor, AppName, AppVersion)
}
|
package Wallet
import (
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/sha256"
"fmt"
"log"
"github.com/mr-tron/base58"
"golang.org/x/crypto/ripemd160"
)
type Wallet struct {
PrivateKey ecdsa.PrivateKey
PublicKey []byte
Token int
}
type Wallets struct {
Wallets map[string]*Wallet
}
var addresses []string
const (
checksumLength = 4
version = byte(0x00)
)
// create new key pair for private and public key
func NewKeyPair() (ecdsa.PrivateKey, []byte) {
curve := elliptic.P256()
private, err := ecdsa.GenerateKey(curve, rand.Reader)
if err != nil {
log.Panic(err)
}
pub := append(private.PublicKey.X.Bytes(), private.PublicKey.Y.Bytes()...)
return *private, pub
}
func MakeWallet() *Wallet {
private, public := NewKeyPair()
wallet := Wallet{private, public, 100}
return &wallet
}
func (ws *Wallets) AddWallet() string {
wallet := MakeWallet()
address := fmt.Sprintf("%s", wallet.Address())
// fmt.Println(address)
if ws.Wallets == nil {
ws.Wallets = make(map[string]*Wallet)
ws.Wallets[address] = wallet
} else {
ws.Wallets[address] = wallet
}
addresses = append(addresses, address)
return address
}
// generate address from private and public keys
func (w Wallet) Address() []byte {
pubHash := PublicKeyHash(w.PublicKey)
versionHash := append([]byte{version}, pubHash...)
checksum := Checksum(versionHash)
fullHash := append(versionHash, checksum...)
address := Base58Encode(fullHash)
// fmt.Printf("pub key: %x\n", w.PublicKey)
// fmt.Printf("pub Hash: %x\n", pubHash)
// fmt.Printf("address: %x\n", address)
return address
}
func PublicKeyHash(pubKey []byte) []byte {
pubHash := sha256.Sum256(pubKey)
hasher := ripemd160.New()
_, err := hasher.Write(pubHash[:])
if err != nil {
log.Panic(err)
}
publicRipMD := hasher.Sum(nil)
return publicRipMD
}
func Checksum(payload []byte) []byte {
firstHash := sha256.Sum256(payload)
secondHash := sha256.Sum256(firstHash[:])
return secondHash[:checksumLength]
}
func Base58Encode(input []byte) []byte {
encode := base58.Encode(input)
return []byte(encode)
}
func Base58Decode(input []byte) []byte {
decode, err := base58.Decode(string(input[:]))
if err != nil {
log.Panic(err)
}
return decode
}
func GetAllAddresses() []string {
return addresses
}
func GetAllWalletDetails() *Wallets {
return &Wallets{}
}
// func main() {
// wallets := &Wallets{}
// address := wallets.AddWallet()
// fmt.Println(address)
// for address := range wallets.Wallets {
// fmt.Println("address : ", address, "\nToken : ", wallets.Wallets[address].Token)
// }
// fmt.Println(wallets.Wallets)
// }
|
package prompt
import (
"reflect"
"testing"
)
func TestFilter(t *testing.T) {
var scenarioTable = []struct {
scenario string
filter Filter
list []Suggest
substr string
ignoreCase bool
expected []Suggest
}{
{
scenario: "Contains don't ignore case",
filter: FilterContains,
list: []Suggest{
{Text: "abcde"},
{Text: "fghij"},
{Text: "ABCDE"},
},
substr: "cd",
ignoreCase: false,
expected: []Suggest{
{Text: "abcde"},
},
},
{
scenario: "Contains ignore case",
filter: FilterContains,
list: []Suggest{
{Text: "abcde"},
{Text: "fghij"},
{Text: "ABCDE"},
},
substr: "cd",
ignoreCase: true,
expected: []Suggest{
{Text: "abcde"},
{Text: "ABCDE"},
},
},
{
scenario: "HasPrefix don't ignore case",
filter: FilterHasPrefix,
list: []Suggest{
{Text: "abcde"},
{Text: "fghij"},
{Text: "ABCDE"},
},
substr: "abc",
ignoreCase: false,
expected: []Suggest{
{Text: "abcde"},
},
},
{
scenario: "HasPrefix ignore case",
filter: FilterHasPrefix,
list: []Suggest{
{Text: "abcde"},
{Text: "fabcj"},
{Text: "ABCDE"},
},
substr: "abc",
ignoreCase: true,
expected: []Suggest{
{Text: "abcde"},
{Text: "ABCDE"},
},
},
{
scenario: "HasSuffix don't ignore case",
filter: FilterHasSuffix,
list: []Suggest{
{Text: "abcde"},
{Text: "fcdej"},
{Text: "ABCDE"},
},
substr: "cde",
ignoreCase: false,
expected: []Suggest{
{Text: "abcde"},
},
},
{
scenario: "HasSuffix ignore case",
filter: FilterHasSuffix,
list: []Suggest{
{Text: "abcde"},
{Text: "fcdej"},
{Text: "ABCDE"},
},
substr: "cde",
ignoreCase: true,
expected: []Suggest{
{Text: "abcde"},
{Text: "ABCDE"},
},
},
{
scenario: "Fuzzy don't ignore case",
filter: FilterFuzzy,
list: []Suggest{
{Text: "abcde"},
{Text: "fcdej"},
{Text: "ABCDE"},
},
substr: "ae",
ignoreCase: false,
expected: []Suggest{
{Text: "abcde"},
},
},
{
scenario: "Fuzzy ignore case",
filter: FilterFuzzy,
list: []Suggest{
{Text: "abcde"},
{Text: "fcdej"},
{Text: "ABCDE"},
},
substr: "ae",
ignoreCase: true,
expected: []Suggest{
{Text: "abcde"},
{Text: "ABCDE"},
},
},
}
for _, s := range scenarioTable {
if actual := s.filter(s.list, s.substr, s.ignoreCase); !reflect.DeepEqual(actual, s.expected) {
t.Errorf("%s: Should be %#v, but got %#v", s.scenario, s.expected, actual)
}
}
}
func TestFuzzyMatch(t *testing.T) {
tests := []struct {
s string
sub string
match bool
}{
{"dog house", "dog", true},
{"dog house", "", true},
{"", "", true},
{"this is much longer", "hhg", true},
{"this is much longer", "hhhg", false},
{"long", "longer", false},
{"can we do unicode 文字 with this 今日", "文字今日", true},
{"can we do unicode 文字 with this 今日", "d文字tt今日", true},
{"can we do unicode 文字 with this 今日", "d文字ttt今日", false},
}
for _, test := range tests {
if fuzzyMatch(test.s, test.sub) != test.match {
t.Errorf("fuzzymatch, %s in %s: expected %v, got %v", test.sub, test.s, test.match, !test.match)
}
}
}
|
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package variable
import (
"testing"
"github.com/stretchr/testify/require"
)
// mockStatistics represents mocked statistics.
type mockStatistics struct{}
const (
testStatus = "test_status"
testSessionStatus = "test_session_status"
testStatusVal = "test_status_val"
)
var specificStatusScopes = map[string]ScopeFlag{
testSessionStatus: ScopeSession,
}
func (ms *mockStatistics) GetScope(status string) ScopeFlag {
scope, ok := specificStatusScopes[status]
if !ok {
return DefaultStatusVarScopeFlag
}
return scope
}
func (ms *mockStatistics) Stats(_ *SessionVars) (map[string]interface{}, error) {
m := make(map[string]interface{}, len(specificStatusScopes))
m[testStatus] = testStatusVal
return m, nil
}
func TestStatusVar(t *testing.T) {
ms := &mockStatistics{}
RegisterStatistics(ms)
scope := ms.GetScope(testStatus)
require.Equal(t, DefaultStatusVarScopeFlag, scope)
scope = ms.GetScope(testSessionStatus)
require.Equal(t, ScopeSession, scope)
vars, err := GetStatusVars(nil)
require.NoError(t, err)
v := &StatusVal{Scope: DefaultStatusVarScopeFlag, Value: testStatusVal}
require.EqualValues(t, vars[testStatus], v)
}
|
package Problem0055
func canJump(nums []int) bool {
for i := len(nums) - 2; i >= 0; i-- {
// 找到数值为 0 的元素
if nums[i] != 0 {
continue
}
j := i - 1
for ; j >= 0; j-- {
if i-j < nums[j] {
// 在 j 号位置上,可以跨过 0 元素
i = j
break
}
}
if j == -1 {
// 在 0 元素之前,没有位置可以跨过 0
return false
}
}
return true
}
|
package models
import "time"
// Job represents a single unit of work that is delivered to the worker.
type Job struct {
Id string // unique ID
Input []byte // task payload
Progress uint8
Logs []string
CreatedAt time.Time
DeliveredAt time.Time
FinishedAt time.Time
}
|
package goil
import (
"io"
"mime/multipart"
"path/filepath"
"sync"
)
// An attachment
type Attachment struct {
// Filename sent to the server, optional
BasePath string
// File to be sent, be sure to check max length
Reader io.Reader
mutex *sync.Mutex
}
const (
MB uint = 1 << (10 * 2)
AttachmentMaxSizePictures = 5 * MB
AttachmentMaxSizeVideos = 800 * MB
AttachmentMaxSizeAudio = 20 * MB
AttachmentMaxSizeDocuments = 10 * MB
)
// Attachments filepath
type Attachments struct {
/* Restrictions
- can't be more than 5Mo in size each
- must be of PNG, JPEG or GIF format
*/
Pictures []Attachment
/* Restrictions
- can't be more than 800Mo in size each
- must have an audio track
*/
Videos []Attachment
/* Restrictions
- can't be more than 20Mo in size each
- must have an .mp3 extension (and probably must be mp3 themselves)
*/
Audio []Attachment
/* Restrictions
- can't be more than 10Mo in size each
- must have an extension, (i.e must match regex "\.[a-z0-9]{2,4}")
- but can't have a .jpg, .png, .gif, .mp3, .flv extension (i.e must not match regex "\.(jpg|png|gif|mp3|flv)")
*/
Documents []Attachment
}
// Checks if there are any attachments
func (as Attachments) Populated() bool {
return len(as.Pictures)+len(as.Videos)+len(as.Audio)+len(as.Documents) != 0
}
// Attach a picture given a filepath and a io.Reader
func (as Attachments) AttachPicture(filename string, r io.Reader) {
as.Pictures = append(as.Pictures, Attachment{BasePath: filename, Reader: r})
}
// Attach a video given a filepath and a io.Reader
func (as Attachments) AttachVideo(filename string, r io.Reader) {
as.Videos = append(as.Videos, Attachment{BasePath: filename, Reader: r})
}
// Attach an audio file given a filepath and a io.Reader
func (as Attachments) AttachAudio(filename string, r io.Reader) {
as.Audio = append(as.Audio, Attachment{BasePath: filename, Reader: r})
}
// Attach a document given a filepath and a io.Reader
func (as Attachments) AttachDocument(filename string, r io.Reader) {
as.Documents = append(as.Documents, Attachment{BasePath: filename, Reader: r})
}
// Add a single attachment to the writer
func (a Attachment) writeToMultipart(w *multipart.Writer, key string, maxSize uint) error {
// Lock the attachment
a.mutex.Lock()
// Create the form field
part, err := w.CreateFormFile(key, filepath.Base(a.BasePath))
if err != nil {
return err
}
// Copy the file to the form
reader := io.LimitReader(a.Reader, int64(maxSize))
_, err = io.Copy(part, reader)
return err
}
// Write attachments to the multipart.Writer
func (as Attachments) writeToMultipart(w *multipart.Writer) error {
// Helper func that adds each attachment to its corresponding fields
addToForm := func(key string, at []Attachment, maxSize uint) error {
// Now add the files for the pictures
for _, a := range at {
a.writeToMultipart(w, key, maxSize)
}
return nil
}
// Add the files
err := addToForm("attachment_photo[]", as.Pictures, AttachmentMaxSizePictures)
if err != nil {
return err
}
err = addToForm("attachment_video[]", as.Videos, AttachmentMaxSizeVideos)
if err != nil {
return err
}
err = addToForm("attachment_audio[]", as.Audio, AttachmentMaxSizeAudio)
if err != nil {
return err
}
err = addToForm("attachment_file[]", as.Documents, AttachmentMaxSizeDocuments)
return err
}
|
package data
// symbolsConv defines a map of symbols and crypto currency names.
type symbolsConv map[string]string
// fromCurrencies updates the symbolsConv with the map of name and currency model.
func (s *Service) updateSymConv() {
// construct
for name, curr := range s.Currencies {
s.symconv[curr.Symbol] = name
}
}
|
package main
import "testing"
func TestRedisCase(t *testing.T) {
RedisCase()
}
|
package authentication
import (
"glsamaker/pkg/app/handler/authentication/auth_session"
"glsamaker/pkg/app/handler/authentication/templates"
"glsamaker/pkg/app/handler/authentication/utils"
"glsamaker/pkg/database/connection"
"glsamaker/pkg/models/users"
"golang.org/x/crypto/argon2"
"net/http"
)
func Login(w http.ResponseWriter, r *http.Request) {
// in case '/login' is request but the user is
// already authenticated we will redirect to '/'
if utils.IsAuthenticated(w, r) {
http.Redirect(w, r, "/", 301)
}
username, pass, cameFrom, bindLoginToIP, _ := getParams(r)
if IsValidPassword(username, pass) {
user, _ := getLoginUser(username)
auth_session.Create(w, r, user, bindLoginToIP, user.IsUsing2FA())
if user.IsUsing2FA() {
http.Redirect(w, r, "/login/2fa", 301)
} else {
http.Redirect(w, r, cameFrom, 301)
}
} else {
templates.RenderLoginTemplate(w, r)
}
}
func SecondFactorLogin(w http.ResponseWriter, r *http.Request) {
user := utils.GetAuthenticatedUser(r)
if user == nil || !user.IsUsing2FA() {
// this should not occur
http.NotFound(w, r)
return
}
if user.IsUsingTOTP {
templates.RenderTOTPTemplate(w, r)
} else if user.IsUsingWebAuthn {
templates.RenderWebAuthnTemplate(w, r)
} else {
// this should not occur
http.NotFound(w, r)
}
}
// utility functions
func getLoginUser(username string) (*users.User, bool) {
var potenialUsers []*users.User
err := connection.DB.Model(&potenialUsers).Where("nick = ?", username).Select()
isValidUser := err == nil
if len(potenialUsers) < 1 {
return &users.User{}, false
}
return potenialUsers[0], isValidUser
}
func getParams(r *http.Request) (string, string, string, bool, error) {
err := r.ParseForm()
if err != nil {
return "", "", "", false, err
}
username := r.Form.Get("username")
password := r.Form.Get("password")
cameFrom := r.Form.Get("cameFrom")
restrictLogin := r.Form.Get("restrictlogin")
return username, password, cameFrom, restrictLogin == "on", err
}
func IsValidPassword(username string, password string) bool {
user, isValidUser := getLoginUser(username)
if !isValidUser {
return false
}
hashedPassword := argon2.IDKey(
[]byte(password),
user.Password.Salt,
user.Password.Time,
user.Password.Memory,
user.Password.Threads,
user.Password.KeyLen)
if user != nil && !user.Disabled && string(user.Password.Hash) == string(hashedPassword) {
return true
}
return false
}
|
package main
import (
"fmt"
"log"
"time"
"os"
"os/signal"
"github.com/ddouglas/phoenix/app"
_ "github.com/go-sql-driver/mysql"
"github.com/gorilla/websocket"
"github.com/pkg/errors"
)
func main() {
app, err := app.New()
if err != nil {
err = errors.Wrap(err, "Unable to parse env variable into struct")
fmt.Println(err)
os.Exit(1)
}
defer app.Discord.Close()
defer app.Zkill.Close()
done := make(chan struct{})
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
go func() {
defer close(done)
app.ReadKillStream()
}()
ticker := time.NewTicker(time.Second * 10)
defer ticker.Stop()
for {
select {
case <-done:
return
case t := <-ticker.C:
fmt.Println(app.Discord.LastHeartbeatSent, app.Discord.LastHeartbeatAck, t.String())
err := app.Zkill.WriteMessage(websocket.TextMessage, []byte(t.String()))
if err != nil {
log.Println("write:", err)
return
}
case <-interrupt:
log.Println("interrupt")
// Cleanly close the connection by sending a close message and then
// waiting (with timeout) for the server to close the connection.
err := app.Zkill.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, ""))
if err != nil {
log.Println("write close: ", err)
return
}
select {
case <-done:
case <-time.After(time.Second):
}
return
}
}
}
|
package spool
import (
"time"
"github.com/cloudspannerecosystem/spool/model"
)
// FilterNotUsedWithin returns a function which reports whether sdb is not used within d.
func FilterNotUsedWithin(d time.Duration) func(sdb *model.SpoolDatabase) bool {
return func(sdb *model.SpoolDatabase) bool {
return !sdb.UpdatedAt.Add(d).After(time.Now())
}
}
// FilterState returns a function which reports whether sdb.State is state.
func FilterState(state State) func(sdb *model.SpoolDatabase) bool {
return func(sdb *model.SpoolDatabase) bool {
return sdb.State == state.Int64()
}
}
func filter(sdbs []*model.SpoolDatabase, filters ...func(sdb *model.SpoolDatabase) bool) []*model.SpoolDatabase {
res := make([]*model.SpoolDatabase, 0, len(sdbs))
for _, sdb := range sdbs {
var skip bool
for _, filter := range filters {
if !filter(sdb) {
skip = true
break
}
}
if !skip {
res = append(res, sdb)
}
}
return res
}
|
// Copyright © 2019 Developer Network, LLC
//
// This file is subject to the terms and conditions defined in
// file 'LICENSE', which is part of this source code package.
package main
import (
"fmt"
"os"
"go.atomizer.io/cmd"
_ "go.atomizer.io/montecarlopi"
)
func main() {
err := cmd.Initialize("Atomizer Test Agent")
if err != nil {
fmt.Println(err.Error())
os.Exit(1)
}
}
|
package strain
// Implement the `keep` and `discard` operation on collections.
// Given a collection and a predicate on the collection's elements,
// `keep` returns a new collection containing those elements where
// the predicate is true, while `discard` returns a new collection
// containing those elements where the predicate is false.
// Note that the union of keep and discard is all the elements.
// implement with basic tools rather than standard library
type Ints []int
type Lists [][]int
type Strings []string
func (list Ints) Keep(f func(int) bool) Ints {
var newlist Ints
for _, v := range list {
if f(v) {
newlist = append(newlist, v)
}
}
return newlist
}
func (list Ints) Discard(f func(int) bool) Ints {
var newlist Ints
for _, v := range list {
if !f(v) {
newlist = append(newlist, v)
}
}
return newlist
}
func (lists Lists) Keep(f func([]int) bool) Lists {
var newlists Lists
for _, l := range lists {
if f(l) {
newlists = append(newlists, l)
}
}
return newlists
}
func (strs Strings) Keep(f func(string) bool) Strings {
var newlist Strings
for _, v := range strs {
if f(v) {
newlist = append(newlist, v)
}
}
return newlist
}
|
// Package magicbytes Some file formats are intended to be read different than text-based files because they have special
// formats. In order to determine a file's format/type, magic bytes/magical numbers are used to mark files with special
// signatures, located at the beginning of the file, mostly. You are assigned to create a Go package to find files
// recursively in a target directory with the following API for given file meta information.
package magicbytes
import (
"bufio"
"bytes"
"context"
"errors"
"io/fs"
"log"
"os"
"path/filepath"
"runtime"
)
// MaxMetaArrayLength holds Maximum length for meta type array
const MaxMetaArrayLength = 1000
// ErrMetaArrayLengthExceeded holds meta array length exceeded max value error
var ErrMetaArrayLengthExceeded = errors.New("Meta array length exceeded max value")
// Meta holds the name, magical bytes, and offset of the magical bytes to be searched.
type Meta struct {
Type string // name of the file/meta type.
Bytes []byte // magical bytes.
Offset int64 // offset of the magical bytes from the file start position.
}
// OnMatchFunc represents a function to be called when Search function finds a match.
// Returning false must immediately stop Search process.
type OnMatchFunc func(path, metaType string) bool
// Search searches the given target directory to find files recursively using meta information.
// For every match, onMatch callback is called concurrently.
func Search(ctx context.Context, targetDir string, metas []*Meta, onMatch OnMatchFunc) error {
if len(metas) > MaxMetaArrayLength {
return ErrMetaArrayLengthExceeded
}
// no need to search files if meta array is empty
if len(metas) == 0 {
return nil
}
PathChannel := make(chan string, runtime.NumCPU())
defer close(PathChannel)
for i := 0; i < runtime.NumCPU(); i++ {
go findMatchWorker(PathChannel, onMatch, metas)
}
// need to fix file path for running os
p := filepath.FromSlash(targetDir)
err := walkDir(ctx, p, PathChannel)
if err != nil {
log.Println("walkDir error: ", err)
return err
}
return nil
}
// findMatchWorker receives the path messages from channel and runs the findMath function
func findMatchWorker(pathChannel <-chan string, onMatch OnMatchFunc, metas []*Meta) {
defer func() {
if recover() != nil {
return
}
}()
for path := range pathChannel {
metaType, status := findMatch(path, metas)
if status {
if !onMatch(path, metaType) {
return
}
}
}
}
// walkDir gets the file list and sends it via channel by using filePath.WalkDir method
func walkDir(ctx context.Context, root string, pathChannel chan<- string) error {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
err := filepath.WalkDir(root, func(path string, d fs.DirEntry, err error) error {
if err != nil {
log.Println("Unable to read directory: ", err)
} else if d.Type().IsRegular() {
pathChannel <- path
}
return nil
})
if err != nil {
log.Println("filepath walk error: ", err)
}
return nil
}
// findMatch mission is find the file with given meta data
func findMatch(path string, meta []*Meta) (string, bool) {
for i := 0; i < len(meta); i++ {
if checkMetaData(path, *meta[i]) {
return meta[i].Type, true
}
}
return "", false
}
// checkMetaData checks file's initial bytes and return true if given meta data is matched with file
func checkMetaData(filename string, meta Meta) bool {
file, err := os.Open(filename)
defer file.Close()
if err != nil {
log.Println("Unable to open file: ", err)
return false
}
fi, err := file.Stat()
if err != nil {
log.Println("file stat error: ", err)
return false
}
var size int64 = meta.Offset + int64(len(meta.Bytes))
if size > int64(fi.Size()) {
log.Println("file size is not enough", filename, meta.Type)
return false
}
bufr := bufio.NewReader(file)
bytesToRead := make([]byte, size)
_, err = bufr.Read(bytesToRead)
if err != nil {
log.Println("Unable to read file: ", err, filename)
return false
}
return bytes.Equal(meta.Bytes, bytesToRead[meta.Offset:])
}
|
package sensors
import (
"encoding/json"
"time"
)
type Sensor struct {
Id string
Value int
RecordedAt time.Time
}
func (sensor *Sensor) MarshalJSON() ([]byte, error) {
return json.Marshal(
&struct {
Id string `json:"id"`
Value int `json:"value"`
RecordedAt int64 `json:"read_at"`
}{
Id: sensor.Id,
Value: sensor.Value,
RecordedAt: sensor.RecordedAt.Unix(),
},
)
}
|
package blc
//BlockChain 区块链结构
type BlockChain struct {
Blocks []*Block //区块的基本机构
}
//CreateBlockChainWithGenesis 初始化区块链
func CreateBlockChainWithGenesis() *BlockChain {
block := CreateGenesisBlock([]byte("bc init"))
return &BlockChain{[]*Block{block}}
}
//AddBlock 添加区块到区块链中
func (bc *BlockChain) AddBlock(height int64, prevBlockHash []byte, data []byte) {
newBlock := NewBlock(height, prevBlockHash, data)
bc.Blocks = append(bc.Blocks, newBlock)
}
|
package craft
import (
"complie/src/AST"
"complie/src/tokentype"
"fmt"
)
type SimpleParser struct {
rootNode ASTNode
//script string
}
func NewSimpleParser() *SimpleParser {
return &SimpleParser{
rootNode: nil,
//script: "",
}
}
func (this *SimpleParser) GetRoot() ASTNode {
return this.rootNode
}
func (this *SimpleParser) DumpAST(node ASTNode, indent string) {
fmt.Println(indent, " ", node.GetText())
for _, child := range node.GetChildren() {
this.DumpAST(child, indent+"\t")
}
}
func (this *SimpleParser) Parser(script string) {
var lexer *SimpleLexer = NewSimpleLexer()
var tokens TokenReader = lexer.Tokenize(script)
this.rootNode = this.prog(tokens)
}
func (this *SimpleParser) prog(tokens TokenReader) *SimpleASTNode {
var node *SimpleASTNode = NewSimpleASTNode(AST.Programm, "pwc")
for tokens.Peek() != nil {
var child *SimpleASTNode = this.intDeclare(tokens)
if child == nil {
child = this.doubleDeclare(tokens)
}
if child == nil {
child = this.expressionStatment(tokens)
}
if child == nil {
child = this.assignmentStatement(tokens)
}
if child != nil {
node.AddChild(child)
} else {
panic("unknown statement")
}
}
return node
}
func (this *SimpleParser) expressionStatment(tokens TokenReader) *SimpleASTNode {
var pos int = tokens.GetPosition()
var node *SimpleASTNode = this.additive(tokens)
if node != nil {
var token Token = tokens.Peek()
if token != nil && token.GetType() == tokentype.SemiColon {
tokens.Read()
} else {
node = nil
tokens.SetPosition(pos)
}
}
return node
}
func (this *SimpleParser) assignmentStatement(tokens TokenReader) *SimpleASTNode {
var node *SimpleASTNode
var token Token = tokens.Peek()
if token != nil && token.GetType() == tokentype.Identifier {
token = tokens.Read()
node = NewSimpleASTNode(AST.AssignmentStmt, token.GetText())
token = tokens.Peek()
if token != nil && token.GetType() == tokentype.Assignment {
tokens.Read()
var child *SimpleASTNode = this.additive(tokens)
if child == nil {
panic("invalide assignment statement, expecting an expression")
} else {
node.AddChild(child)
token = tokens.Peek()
if token != nil && token.GetType() == tokentype.SemiColon {
tokens.Read()
} else {
panic("invalid statement, expecting semicolon")
}
}
} else {
tokens.Unread()
node = nil
}
}
return node
}
func (this *SimpleParser) doubleDeclare(tokens TokenReader) *SimpleASTNode {
var node *SimpleASTNode
var token Token = tokens.Peek()
if token != nil && token.GetType() == tokentype.Double {
token = tokens.Read()
if tokens.Peek().GetType() == tokentype.Identifier {
token = tokens.Read()
node = NewSimpleASTNode(AST.DoubleDeclaration, token.GetText())
token = tokens.Peek()
if token != nil && token.GetType() == tokentype.Assignment {
tokens.Read()
var child *SimpleASTNode = this.additive(tokens)
if child == nil {
panic("invalide variable initialization, expecting an expression")
} else {
node.AddChild(child)
}
}
} else {
panic("variable name expected")
}
if node != nil {
token = tokens.Peek()
if token != nil && token.GetType() == tokentype.SemiColon {
tokens.Read()
} else {
panic("invalid statement, expecting semicolon")
}
}
}
return node
}
func (this *SimpleParser) intDeclare(tokens TokenReader) *SimpleASTNode {
var node *SimpleASTNode
var token Token = tokens.Peek()
if token != nil && token.GetType() == tokentype.Int {
token = tokens.Read()
if tokens.Peek().GetType() == tokentype.Identifier {
token = tokens.Read()
node = NewSimpleASTNode(AST.IntDeclaration, token.GetText())
token = tokens.Peek()
if token != nil && token.GetType() == tokentype.Assignment {
tokens.Read()
var child *SimpleASTNode = this.additive(tokens)
if child == nil {
panic("invalide variable initialization, expecting an expression")
} else {
node.AddChild(child)
}
}
} else {
panic("variable name expected")
}
if node != nil {
token = tokens.Peek()
if token != nil && token.GetType() == tokentype.SemiColon {
tokens.Read()
} else {
panic("invalid statement, expecting semicolon")
}
}
}
return node
}
func (this *SimpleParser) additive(tokens TokenReader) *SimpleASTNode {
var child1 *SimpleASTNode = this.multiplicative(tokens)
var node *SimpleASTNode = child1
var token Token = tokens.Peek()
if child1 != nil && token != nil {
if token.GetType() == tokentype.Plus || token.GetType() == tokentype.Minus {
token = tokens.Read()
var child2 *SimpleASTNode = this.additive(tokens)
if child2 != nil {
node = NewSimpleASTNode(AST.Additive, token.GetText())
node.AddChild(child1)
node.AddChild(child2)
} else {
panic("invalid additive expression, expecting the right part.")
}
}
}
return node
}
func (this *SimpleParser) multiplicative(tokens TokenReader) *SimpleASTNode {
var child1 *SimpleASTNode = this.primary(tokens)
var node *SimpleASTNode = child1
var token Token = tokens.Peek()
if child1 != nil && token != nil {
if token.GetType() == tokentype.Star || token.GetType() == tokentype.Slash {
token = tokens.Read()
//var child2 *SimpleASTNode = this.primary(tokens)
var child2 *SimpleASTNode = this.multiplicative(tokens)
if child2 != nil {
node = NewSimpleASTNode(AST.Multicative, token.GetText())
node.AddChild(child1)
node.AddChild(child2)
} else {
panic("invalid multiplicative expression, expecting the right part.")
}
}
}
return node
}
func (this *SimpleParser) primary(tokens TokenReader) *SimpleASTNode {
var node *SimpleASTNode
token := tokens.Peek()
if token == nil {
return nil
}
if token.GetType() == tokentype.IntLiteral {
token = tokens.Read()
node = NewSimpleASTNode(AST.IntLiteral, token.GetText())
} else if token.GetType() == tokentype.DoubleLiteral {
token = tokens.Read()
node = NewSimpleASTNode(AST.DoubleLiteral, token.GetText())
} else if token.GetType() == tokentype.Identifier {
token = tokens.Read()
node = NewSimpleASTNode(AST.Identifier, token.GetText())
} else if token.GetType() == tokentype.LeftParen {
tokens.Read()
node = this.additive(tokens)
if node != nil {
token = tokens.Peek()
if token != nil && token.GetType() == tokentype.RightParen {
tokens.Read()
} else {
panic("expecting right parenthesis")
}
} else {
panic("expecting an additive expression inside parenthesis")
}
}
return node
}
|
// Package deferlog implements deferpanic error logging.
package deferlog
import (
"fmt"
"github.com/deferpanic/deferclient/deferclient"
"runtime"
)
// Token is your deferpanic token available in settings
var Token string
// Environment sets an environment tag to differentiate between separate
// environments - default is production.
var Environment = "production"
// AppGroup sets an optional tag to differentiate between your various
// services - default is default
var AppGroup = "default"
// Backtrace grabs the backtrace
func BackTrace() (body string) {
for skip := 1; ; skip++ {
pc, file, line, ok := runtime.Caller(skip)
if !ok {
break
}
if file[len(file)-1] == 'c' {
continue
}
f := runtime.FuncForPC(pc)
body += fmt.Sprintf("%s:%d %s()\n", file, line, f.Name())
}
return body
}
// Wrap wraps an error and ships the backtrace to deferpanic
func Wrap(err error) {
stack := BackTrace()
deferclient.Token = Token
deferclient.Environment = Environment
deferclient.AppGroup = AppGroup
go deferclient.ShipTrace(stack, err.Error())
}
|
package controllers
import (
"github.com/AnhNguyenQuoc/go-blog/lib"
"github.com/jinzhu/gorm"
"github.com/julienschmidt/httprouter"
"net/http"
)
var layoutService LayoutService
type LayoutService struct {
DB *gorm.DB
}
func LayoutRouter(r *httprouter.Router, db *gorm.DB) {
layoutService = LayoutService{DB: db}
r.GET("/", HelloWorld)
}
func HelloWorld(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
w.WriteHeader(http.StatusOK)
user, ok := CurrentUser(w, r)
if !ok {
lib.ParseTemplate(w, "layout/index", nil)
return
}
lib.ParseTemplate(w, "layout/index", user)
}
|
package converter
import (
"fmt"
"regexp"
"strconv"
"strings"
"unicode"
)
var validationRegExp *regexp.Regexp
var groupRegExp *regexp.Regexp
func init() {
validationRegExp = regexp.MustCompile(`((^|[^\\])\\([^\\\d]|$))|^\d+`)
groupRegExp = regexp.MustCompile(`(\\{2}\d+)|(\\{2})|(\\\d{2,})|(\D\d+)`)
}
// NewStringConverter returns StringConverter instance
func NewStringConverter(input string) StringConverter {
return StringConverter{
inputString: input,
validationRegExp: validationRegExp,
groupRegExp: groupRegExp,
}
}
// StringConverter performs a primitive string unpacking containing repeated characters / runes
type StringConverter struct {
inputString string
validationRegExp *regexp.Regexp
groupRegExp *regexp.Regexp
}
// validate validates input string
func (sc StringConverter) validate() error {
if match := sc.validationRegExp.MatchString(sc.inputString); match {
return fmt.Errorf("invalid string")
}
return nil
}
// do grouping complex chars, for example a4 or \4 or \\ in input string
// Or if there is no special char in input string, all string in do
func (sc StringConverter) do() string {
result := make([]string, 0)
// if special chars not found return original string as single do
matches := sc.groupRegExp.FindAllIndex([]byte(sc.inputString), -1)
if matches == nil {
return sc.inputString
}
// else find all special do by indexes are find by regExp from input string
currentIndex := 0
for _, match := range matches {
if currentIndex < match[0] {
result = append(result, sc.inputString[currentIndex:match[0]])
}
unpackedString := sc.unpack(sc.inputString[match[0]:match[1]])
result = append(result, unpackedString)
currentIndex = match[1]
}
if currentIndex != len(sc.inputString) {
result = append(result, sc.inputString[currentIndex:])
}
return strings.Join(result, "")
}
// unpack extracting groups
func (sc StringConverter) unpack(group string) string {
unpackedString := make([]string, 0)
runes := []rune(group)
if unicode.IsDigit(runes[1]) {
digits, _ := strconv.Atoi(string(runes[1:]))
tmp := sc.extendLine(string(runes[0]), digits)
unpackedString = append(unpackedString, tmp)
} else if len(runes) == 2 {
unpackedString = append(unpackedString, string(runes[0]))
} else {
digits, _ := strconv.Atoi(string(runes[2:]))
tmp := sc.extendLine(string(runes[0]), digits)
unpackedString = append(unpackedString, tmp)
}
return strings.Join(unpackedString, "")
}
// extendLine returns characters char count times
func (sc StringConverter) extendLine(char string, count int) string {
out := ""
for i := 0; i < count; i++ {
out += char
}
return out
}
// Do launch string converter
func (sc StringConverter) Do() (string, error) {
if err := sc.validate(); err != nil {
return "", err
}
result := sc.do()
return result, nil
}
|
package crashparser
import (
"bufio"
)
// Reads strings from source Scanner and pass them line by line to output channel.
func Read(s *bufio.Scanner, onFinish func()) (<-chan string, error) {
output := make(chan string)
go func() {
defer close(output)
defer onFinish()
for s.Scan() {
output <- s.Text()
}
}()
return output, nil
}
|
package utils
import (
"regexp"
"time"
)
// 获取当前时间
func NowTime() string {
return time.Unix(time.Now().Unix(), 0).Format("2006-01-02 15:04:05")
}
// 获取当前时间戳
func NowUnix() int64 {
return time.Now().Unix()
}
// 获取当前时间
func UnixToFormatTime(timeStamp int64) string {
return time.Unix(timeStamp, 0).Format("2006-01-02 15:04:05")
}
// 校验手机号
func VerifyMobileFormat(mobileNum string) bool {
regular := "^((13[0-9])|(14[5,7])|(15[0-3,5-9])|(17[0,3,5-8])|(18[0-9])|166|198|199|(147))\\d{8}$"
reg := regexp.MustCompile(regular)
return reg.MatchString(mobileNum)
}
//bool转[]bytez
func BoolIntoByte(b bool) []byte {
if b {
return []byte{1}
} else {
return []byte{0}
}
}
//byte转bool
func ByteIntoBool(b []byte) bool {
if b[0] == 1 {
return true
} else {
return false
}
}
//byte转int
func ByteIntoInt(b []byte) uint8 {
if b[0] == 1 {
return 1
} else {
return 0
}
}
//bool转int
func BoolIntoInt(b bool) int {
if b {
return 1
} else {
return 0
}
}
//用于转化前端字符串布尔值为[]byte
func StrBoolIntoByte(s string) []byte {
if s == "true" {
return []byte{1}
} else {
return []byte{0}
}
}
//用于转化前端字符串布尔值为[]byte
func StrGenderIntoByte(s string) []byte {
if s == "男" {
return []byte{1}
} else {
return []byte{0}
}
}
// 启用未启用状态布尔转字符串
func ByteEnabledToString(b []byte) string {
if b[0] == 0 {
return `禁用`
} else {
return `启用`
}
}
|
package device
import (
"fmt"
"regexp"
"strconv"
"github.com/uhppoted/uhppote-core/types"
"github.com/uhppoted/uhppoted-lib/locales"
"github.com/uhppoted/uhppoted-lib/uhppoted"
"github.com/uhppoted/uhppoted-mqtt/common"
)
type Event struct {
DeviceID uint32 `json:"device-id"`
Index uint32 `json:"event-id"`
Type uint8 `json:"event-type"`
TypeText string `json:"event-type-text"`
Granted bool `json:"access-granted"`
Door uint8 `json:"door-id"`
Direction uint8 `json:"direction"`
DirectionText string `json:"direction-text"`
CardNumber uint32 `json:"card-number"`
Timestamp types.DateTime `json:"timestamp"`
Reason uint8 `json:"event-reason"`
ReasonText string `json:"event-reason-text"`
}
func (d *Device) GetEvents(impl uhppoted.IUHPPOTED, request []byte) (any, error) {
body := struct {
DeviceID uint32 `json:"device-id"`
Count int `json:"count,omitempty"`
}{}
if response, err := unmarshal(request, &body); err != nil {
return response, err
}
if body.DeviceID == 0 {
return common.MakeError(StatusBadRequest, "Invalid/missing device ID", nil), fmt.Errorf("invalid/missing device ID")
}
deviceID := body.DeviceID
count := body.Count
events := []any{}
if count > 0 {
list, err := impl.GetEvents(deviceID, count)
if err != nil {
return common.MakeError(StatusInternalServerError, fmt.Sprintf("Could not retrieve events from %d", deviceID), err), err
}
for _, e := range list {
events = append(events, Transmogrify(e))
}
}
first, last, current, err := impl.GetEventIndices(deviceID)
if err != nil {
return common.MakeError(StatusInternalServerError, fmt.Sprintf("Could not retrieve events from %d", deviceID), err), err
}
response := struct {
DeviceID uint32 `json:"device-id,omitempty"`
First uint32 `json:"first,omitempty"`
Last uint32 `json:"last,omitempty"`
Current uint32 `json:"current,omitempty"`
Events []any `json:"events,omitempty"`
}{
DeviceID: deviceID,
First: first,
Last: last,
Current: current,
Events: events,
}
return response, nil
}
func (d *Device) GetEvent(impl uhppoted.IUHPPOTED, request []byte) (any, error) {
var deviceID uint32
var index string
body := struct {
DeviceID uint32 `json:"device-id"`
Index any `json:"event-index"`
}{}
if response, err := unmarshal(request, &body); err != nil {
return response, err
}
if body.DeviceID == 0 {
return common.MakeError(StatusBadRequest, "Invalid/missing device ID", nil), fmt.Errorf("invalid/missing device ID")
} else {
deviceID = body.DeviceID
}
if body.Index == nil {
return common.MakeError(StatusBadRequest, "Invalid/missing event index", nil), fmt.Errorf("invalid/missing event index")
}
// ... parse event index
if matches := regexp.MustCompile("^([0-9]+|first|last|current|next)$").FindStringSubmatch(fmt.Sprintf("%v", body.Index)); matches == nil {
return common.MakeError(StatusBadRequest, "Invalid/missing event index", nil), fmt.Errorf("invalid/missing event index")
} else {
index = matches[1]
}
// .. get event indices
first, last, current, err := impl.GetEventIndices(deviceID)
if err != nil {
return common.MakeError(StatusInternalServerError, fmt.Sprintf("Could not retrieve event indices from %v", deviceID), err), err
}
// ... get event
switch index {
case "first":
return getEvent(impl, deviceID, first)
case "last":
return getEvent(impl, deviceID, last)
case "current":
return getEvent(impl, deviceID, current)
case "next":
return getNextEvent(impl, deviceID)
default:
if v, err := strconv.ParseUint(index, 10, 32); err != nil {
return common.MakeError(StatusBadRequest, fmt.Sprintf("Invalid event index (%v)", body.Index), nil), fmt.Errorf("invalid event index (%v)", index)
} else {
return getEvent(impl, deviceID, uint32(v))
}
}
}
// Handler for the special-events MQTT message. Extracts the 'enabled' value from the request
// and invokes the uhppoted-lib.RecordSpecialEvents API function to update the controller
// 'record special events' flag.
func (d *Device) RecordSpecialEvents(impl uhppoted.IUHPPOTED, request []byte) (any, error) {
body := struct {
DeviceID *uhppoted.DeviceID `json:"device-id"`
Enabled *bool `json:"enabled"`
}{}
if response, err := unmarshal(request, &body); err != nil {
return response, err
}
if body.DeviceID == nil {
return common.MakeError(StatusBadRequest, "Invalid/missing device ID", nil), fmt.Errorf("invalid/missing device ID")
}
if body.Enabled == nil {
return common.MakeError(StatusBadRequest, "Invalid/missing 'enabled'", nil), fmt.Errorf("invalid/missing 'enable'")
}
deviceID := uint32(*body.DeviceID)
enabled := *body.Enabled
updated, err := impl.RecordSpecialEvents(deviceID, enabled)
if err != nil {
return common.MakeError(StatusInternalServerError, fmt.Sprintf("Could not update 'record special events' flag for %d", *body.DeviceID), err), err
}
response := struct {
DeviceID uint32 `json:"device-id"`
Enabled bool `json:"enabled"`
Updated bool `json:"updated"`
}{
DeviceID: deviceID,
Enabled: enabled,
Updated: updated,
}
return response, nil
}
func getEvent(impl uhppoted.IUHPPOTED, deviceID uint32, index uint32) (any, error) {
event, err := impl.GetEvent(deviceID, index)
if err != nil {
return common.MakeError(StatusInternalServerError, fmt.Sprintf("Could not retrieve event %v from %v", index, deviceID), err), err
} else if event == nil {
return common.MakeError(StatusNotFound, fmt.Sprintf("No event at %v on %v", index, deviceID), nil), fmt.Errorf("no event at %v on %v", index, deviceID)
}
response := struct {
DeviceID uint32 `json:"device-id"`
Event any `json:"event"`
}{
DeviceID: deviceID,
Event: Transmogrify(*event),
}
return &response, nil
}
func getNextEvent(impl uhppoted.IUHPPOTED, deviceID uint32) (any, error) {
response := struct {
DeviceID uint32 `json:"device-id"`
Event any `json:"event"`
}{
DeviceID: deviceID,
}
events, err := impl.GetEvents(deviceID, 1)
if err != nil {
return common.MakeError(StatusInternalServerError, fmt.Sprintf("Could not retrieve event from %v", deviceID), err), err
} else if events == nil {
return common.MakeError(StatusNotFound, fmt.Sprintf("No 'next' event for %v", deviceID), nil), fmt.Errorf("no 'next' event for %v", deviceID)
} else if len(events) > 0 {
response.Event = Transmogrify(events[0])
}
return &response, nil
}
func Transmogrify(e uhppoted.Event) any {
lookup := func(key string) string {
if v, ok := locales.Lookup(key); ok {
return v
}
return ""
}
return Event{
DeviceID: e.DeviceID,
Index: e.Index,
Type: e.Type,
TypeText: lookup(fmt.Sprintf("event.type.%v", e.Type)),
Granted: e.Granted,
Door: e.Door,
Direction: e.Direction,
DirectionText: lookup(fmt.Sprintf("event.direction.%v", e.Direction)),
CardNumber: e.CardNumber,
Timestamp: e.Timestamp,
Reason: e.Reason,
ReasonText: lookup(fmt.Sprintf("event.reason.%v", e.Reason)),
}
}
|
package types
import (
"time"
"github.com/jinzhu/gorm"
// HOFSTADTER_START import
// HOFSTADTER_END import
)
/*
Name: Post
About: The blog post type
*/
// HOFSTADTER_START start
// HOFSTADTER_END start
/*
Where's your docs doc?!
*/
type Post struct {
/* ORM: server.api.databases.[name==postgres]*/
gorm.Model
UUID string `json:"uuid" xml:"uuid" yaml:"uuid" form:"uuid" query:"uuid" validate:"uuidv4" `
Title string `json:"title" xml:"title" yaml:"title" form:"title" query:"title" validate:"required|alphanumunicode|min=1|max=256" `
Content string `json:"content" xml:"content" yaml:"content" form:"content" query:"content" validate:"alphanumunicode" `
Draft bool `json:"draft" xml:"draft" yaml:"draft" form:"draft" query:"draft" `
PublishTime *time.Time `json:"publish_time" xml:"publish_time" yaml:"publish_time" form:"publish_time" query:"publish_time" `
// Relations ----
// Should be using some type lookup and package resolution type things
// owned-by (the other half of has-one/many)
UserID uint `json:"user_id" xml:"user_id" yaml:"user_id" form:"user_id" query:"user_id" `
// has-many
Comments []Comment `json:"comments" xml:"comments" yaml:"comments" form:"comments" query:"comments" `
// many-to-many
Likes []User `json:"likes" xml:"likes" yaml:"likes" form:"likes" query:"likes" gorm:"many2many:user_likes"`
}
// HOFSTADTER_BELOW
|
package Controllers
import (
"resource-api/Models"
"fmt"
"net/http"
"github.com/gin-gonic/gin"
)
//GetClients ... Get all clients
func GetClients(c *gin.Context) {
var client []Models.Client
err := Models.GetAllClients(&client)
if err != nil {
c.AbortWithStatus(http.StatusNotFound)
} else {
c.JSON(http.StatusOK, client)
}
}
//CreateClient ... Create Client
func CreateClient(c *gin.Context) {
var client Models.Client
c.BindJSON(&client)
err := Models.CreateClient(&client)
if err != nil {
fmt.Println(err.Error())
c.AbortWithStatus(http.StatusNotFound)
} else {
c.JSON(http.StatusOK, client)
}
}
//GetClientByID ... Get the client by id
func GetClientByID(c *gin.Context) {
id := c.Params.ByName("id")
var client Models.Client
err := Models.GetClientByID(&client, id)
if err != nil {
c.AbortWithStatus(http.StatusNotFound)
} else {
c.JSON(http.StatusOK, client)
}
}
//UpdateClient ... Update the client information
func UpdateClient(c *gin.Context) {
var client Models.Client
id := c.Params.ByName("id")
err := Models.GetClientByID(&client, id)
if err != nil {
c.JSON(http.StatusNotFound, client)
}
c.BindJSON(&client)
err = Models.UpdateClient(&client, id)
if err != nil {
c.AbortWithStatus(http.StatusNotFound)
} else {
c.JSON(http.StatusOK, client)
}
}
//DeleteClient ... Delete the client
func DeleteClient(c *gin.Context) {
var client Models.Client
id := c.Params.ByName("id")
err := Models.DeleteClient(&client, id)
if err != nil {
c.AbortWithStatus(http.StatusNotFound)
} else {
c.JSON(http.StatusOK, gin.H{"id" + id: "is deleted"})
}
}
|
package pathfileops
import (
"strings"
"testing"
)
func TestFileMgr_CopyFileToDirByLinkByIo_01(t *testing.T) {
fileName := "newerFileForTest_01.txt"
sourceFile := "../filesfortest/newfilesfortest/" + fileName
fh := FileHelper{}
absoluteSourceFile, err := fh.MakeAbsolutePath(sourceFile)
if err != nil {
t.Errorf("Error returned by fh.MakeAbsolutePath(sourceFile).\n"+
"sourceFile='%v'\nError='%v'\n",
sourceFile, err.Error())
return
}
srcFMgr, err := FileMgr{}.New(absoluteSourceFile)
if err != nil {
t.Errorf("Error returned by FileMgr{}.New(absoluteSourceFile).\n"+
"absoluteSourceFile='%v'\nError='%v'\n",
absoluteSourceFile, err.Error())
return
}
doesFileExist, err := srcFMgr.DoesThisFileExist()
if err != nil {
t.Errorf("Non-Path Error returned by srcFMgr.DoesThisFileExist().\n"+
"srcFMgr='%v'\nError='%v'\n",
srcFMgr.GetAbsolutePath(), err.Error())
return
}
if !doesFileExist {
t.Errorf("Error: Source Test File DOES NOT EXIST!\nSource File='%v'\n",
srcFMgr.GetAbsolutePathFileName())
return
}
rawDestPath := fh.AdjustPathSlash("../checkfiles/checkfiles02")
destDMgr, err := DirMgr{}.New(rawDestPath)
if err != nil {
t.Errorf("Error returned from DirMgr{}.New(rawDestPath).\n"+
"rawDestPath='%v'\nError='%v'\n", rawDestPath, err.Error())
return
}
newFileMgr, err := FileMgr{}.NewFromDirMgrFileNameExt(destDMgr, fileName)
if err != nil {
t.Errorf("Error returned from FileMgr{}."+
"NewFromDirMgrFileNameExt(destDMgr, fileName).\n"+
"destDMgr='%v'\nfileName='%v'\nError='%v'\n",
destDMgr.GetAbsolutePath(), fileName, err.Error())
return
}
doesFileExist, err = newFileMgr.DoesThisFileExist()
if err != nil {
t.Errorf("Non-Path Error returned from newFileMgr.DoesThisFileExist().\n"+
"newFileMgr='%v'\nError='%v'\n",
newFileMgr.GetAbsolutePath(), err.Error())
return
}
if doesFileExist {
err = newFileMgr.DeleteThisFile()
if err != nil {
t.Errorf("Error returned from newFileMgr.DeleteThisFile().\n"+
"newFileMgr='%v'\nError='%v'\n",
newFileMgr.GetAbsolutePath(), err.Error())
return
}
}
err = srcFMgr.CopyFileToDirByLinkByIo(destDMgr)
if err != nil {
t.Errorf("Error returned from srcFMgr.CopyFileToDirByLinkByIo(destDMgr)\n. "+
"destPath='%v'\nError='%v'\n",
destDMgr.GetAbsolutePath(), err.Error())
_ = newFileMgr.DeleteThisFile()
return
}
doesFileExist, err = newFileMgr.DoesThisFileExist()
if err != nil {
t.Errorf("Non-Path Error returned from newFileMgr.DoesThisFileExist().\n"+
"newFileMgr='%v'\nError='%v'\n",
newFileMgr.GetAbsolutePath(), err.Error())
return
}
if !doesFileExist {
t.Errorf("Error: File Copy Failed!\n"+
"Source File='%v'\nDestination File='%v'\n",
srcFMgr.GetAbsolutePathFileName(), newFileMgr.GetAbsolutePathFileName())
return
}
err = newFileMgr.DeleteThisFile()
if err != nil {
t.Errorf("Test Clean-Up Error returned by newFileMgr.DeleteThisFile().\n"+
"newFileMgr= '%v'\nError='%v'\n",
newFileMgr.GetAbsolutePathFileName(), err.Error())
}
}
func TestFileMgr_CopyFileToDirByLinkByIo_02(t *testing.T) {
fileName := "newerFileForTest_01.txt"
sourceFile := "../filesfortest/newfilesfortest/" + fileName
fh := FileHelper{}
absoluteSourceFile, err := fh.MakeAbsolutePath(sourceFile)
if err != nil {
t.Errorf("Error returned by fh.MakeAbsolutePath(sourceFile).\n"+
"sourceFile='%v'\nError='%v'\n",
sourceFile, err.Error())
return
}
srcFMgr, err := FileMgr{}.New(absoluteSourceFile)
if err != nil {
t.Errorf("Error returned by FileMgr{}.New(absoluteSourceFile). "+
"absoluteSourceFile='%v'\nError='%v'\n",
absoluteSourceFile, err.Error())
return
}
doesFileExist, err := srcFMgr.DoesThisFileExist()
if err != nil {
t.Errorf("Non-Path Error returned by srcFMgr.DoesThisFileExist().\n"+
"srcFMgr='%v'\nError='%v'\n",
srcFMgr.GetAbsolutePath(), err.Error())
return
}
if !doesFileExist {
t.Errorf("Error: Source Test File DOES NOT EXIST!\nSource File='%v'\n",
srcFMgr.GetAbsolutePathFileName())
return
}
rawDestPath := fh.AdjustPathSlash("../checkfiles/checkfiles02")
destDMgr, err := DirMgr{}.New(rawDestPath)
if err != nil {
t.Errorf("Error returned from DirMgr{}.New(rawDestPath).\n"+
"rawDestPath='%v'\nError='%v'\n", rawDestPath, err.Error())
return
}
newFileMgr, err := FileMgr{}.NewFromDirMgrFileNameExt(destDMgr, fileName)
if err != nil {
t.Errorf("Error returned from FileMgr{}."+
"NewFromDirMgrFileNameExt(destDMgr, fileName).\n"+
"destDMgr='%v'\nfileName='%v'\nError='%v'\n",
destDMgr.GetAbsolutePath(), fileName, err.Error())
return
}
doesFileExist, err = newFileMgr.DoesThisFileExist()
if err != nil {
t.Errorf("Non-Path Error returned from newFileMgr.DoesThisFileExist().\n"+
"newFileMgr='%v'\nError='%v'\n",
newFileMgr.GetAbsolutePath(), err.Error())
return
}
if doesFileExist {
err = newFileMgr.DeleteThisFile()
if err != nil {
t.Errorf("Error returned from newFileMgr.DeleteThisFile().\n"+
"newFileMgr='%v'\nError='%v'\n",
newFileMgr.GetAbsolutePath(), err.Error())
return
}
}
srcFMgr.isInitialized = false
err = srcFMgr.CopyFileToDirByLinkByIo(destDMgr)
if err == nil {
t.Error("Expected an error from srcFMgr.CopyFileToDirByLinkByIo(destDMgr) because " +
"srcFMgr.isInitialized==false. However, NO ERROR WAS RETURNED!")
}
err = newFileMgr.DeleteThisFile()
if err != nil {
t.Errorf("Test Clean-Up Error returned by newFileMgr.DeleteThisFile().\n"+
"newFileMgr= '%v'\nError='%v'\n",
newFileMgr.GetAbsolutePathFileName(), err.Error())
}
}
func TestFileMgr_CopyFileToDirByLinkByIo_03(t *testing.T) {
fileName := "newerFileForTest_01.txt"
sourceFile := "../filesfortest/newfilesfortest/" + fileName
fh := FileHelper{}
absoluteSourceFile, err := fh.MakeAbsolutePath(sourceFile)
if err != nil {
t.Errorf("Error returned by fh.MakeAbsolutePath(sourceFile).\n"+
"sourceFile='%v'\nError='%v'\n",
sourceFile, err.Error())
return
}
srcFMgr, err := FileMgr{}.New(absoluteSourceFile)
if err != nil {
t.Errorf("Error returned by FileMgr{}.New(absoluteSourceFile).\n"+
"absoluteSourceFile='%v'\nError='%v'\n",
absoluteSourceFile, err.Error())
return
}
doesFileExist, err := srcFMgr.DoesThisFileExist()
if err != nil {
t.Errorf("Non-Path Error returned by srcFMgr.DoesThisFileExist().\n"+
"srcFMgr='%v'\nError='%v'\n",
srcFMgr.GetAbsolutePath(), err.Error())
return
}
if !doesFileExist {
t.Errorf("Error: Source Test File DOES NOT EXIST!\nSource File='%v'\n",
srcFMgr.GetAbsolutePathFileName())
return
}
rawDestPath := fh.AdjustPathSlash("../checkfiles/checkfiles02")
destDMgr, err := DirMgr{}.New(rawDestPath)
if err != nil {
t.Errorf("Error returned from DirMgr{}.New(rawDestPath).\n"+
"rawDestPath='%v'\nError='%v'\n", rawDestPath, err.Error())
return
}
newFileMgr, err := FileMgr{}.NewFromDirMgrFileNameExt(destDMgr, fileName)
if err != nil {
t.Errorf("Error returned from FileMgr{}."+
"NewFromDirMgrFileNameExt(destDMgr, fileName).\n"+
"destDMgr='%v'\nfileName='%v'\nError='%v'\n",
destDMgr.GetAbsolutePath(), fileName, err.Error())
return
}
doesFileExist, err = newFileMgr.DoesThisFileExist()
if err != nil {
t.Errorf("Non-Path Error returned from newFileMgr.DoesThisFileExist().\n"+
"newFileMgr='%v'\nError='%v'\n",
newFileMgr.GetAbsolutePath(), err.Error())
return
}
if doesFileExist {
err = newFileMgr.DeleteThisFile()
if err != nil {
t.Errorf("Error returned from newFileMgr.DeleteThisFile().\n"+
"newFileMgr='%v'\nError='%v'\n",
newFileMgr.GetAbsolutePath(), err.Error())
return
}
}
destDMgr.isInitialized = false
err = srcFMgr.CopyFileToDirByLinkByIo(destDMgr)
if err == nil {
t.Error("Expected an error from srcFMgr.CopyFileToDirByLinkByIo(destDMgr)\n" +
"because destDMgr.isInitialized==false.\nHowever, NO ERROR WAS RETURNED!\n")
}
err = newFileMgr.DeleteThisFile()
if err != nil {
t.Errorf("Test Clean-Up Error returned by newFileMgr.DeleteThisFile().\n"+
"newFileMgr= '%v'\nError='%v'\n",
newFileMgr.GetAbsolutePathFileName(), err.Error())
}
}
func TestFileMgr_CopyFileToDirByLinkByIo_04(t *testing.T) {
fileName := "newerFileForTest_01.txt"
sourceFile := "../filesfortest/newfilesfortest/" + fileName
fh := FileHelper{}
absoluteSourceFile, err := fh.MakeAbsolutePath(sourceFile)
if err != nil {
t.Errorf("Error returned by fh.MakeAbsolutePath(sourceFile).\n"+
"sourceFile='%v'\nError='%v'\n",
sourceFile, err.Error())
return
}
srcFMgr, err := FileMgr{}.New(absoluteSourceFile)
if err != nil {
t.Errorf("Error returned by FileMgr{}.New(absoluteSourceFile).\n"+
"absoluteSourceFile='%v'\nError='%v'\n",
absoluteSourceFile, err.Error())
return
}
doesFileExist, err := srcFMgr.DoesThisFileExist()
if err != nil {
t.Errorf("Non-Path Error returned by srcFMgr.DoesThisFileExist().\n"+
"srcFMgr='%v'\nError='%v'\n",
srcFMgr.GetAbsolutePath(), err.Error())
return
}
if !doesFileExist {
t.Errorf("Error: Source Test File DOES NOT EXIST!\nSource File='%v'\n",
srcFMgr.GetAbsolutePathFileName())
return
}
destDMgr := srcFMgr.GetDirMgr()
err = srcFMgr.CopyFileToDirByLinkByIo(destDMgr)
if err == nil {
t.Error("Expected an error from srcFMgr.CopyFileToDirByLinkByIo(destDMgr)\n" +
"because source directory manager equals destination directory manager.\n" +
"However, NO ERROR WAS RETURNED!\n")
}
}
func TestFileMgr_CopyFileToDirByLinkByIo_05(t *testing.T) {
sourceFile := "../filesfortest/newfilesfortest/iDoNotExist.txt"
fh := FileHelper{}
adjustedSourceFile := fh.AdjustPathSlash(sourceFile)
absoluteSourceFile, err := fh.MakeAbsolutePath(adjustedSourceFile)
if err != nil {
t.Errorf("Error returned by fh.MakeAbsolutePath(adjustedSourceFile).\n"+
"Error='%v'\n", err.Error())
return
}
srcFMgr, err := FileMgr{}.New(absoluteSourceFile)
if err != nil {
t.Errorf("Error returned by FileMgr{}.New(absoluteSourceFile).\n"+
"Error='%v' ", err.Error())
return
}
rawDestPath := fh.AdjustPathSlash("../checkfiles/checkfiles02")
destDMgr, err := DirMgr{}.New(rawDestPath)
err = srcFMgr.CopyFileToDirByLinkByIo(destDMgr)
if err == nil {
t.Error("Expected an error from srcFMgr.CopyFileToDirByLinkByIo(destDMgr)\n" +
"because source file does NOT exist.\n" +
"However, NO ERROR WAS RETURNED!\n")
}
}
func TestFileMgr_CopyFileToDirByLink_01(t *testing.T) {
fileName := "newerFileForTest_01.txt"
sourceFile := "../filesfortest/newfilesfortest/" + fileName
fh := FileHelper{}
absoluteSourceFile, err := fh.MakeAbsolutePath(sourceFile)
if err != nil {
t.Errorf("Error returned by fh.MakeAbsolutePath(sourceFile).\n"+
"sourceFile='%v'\nError='%v'\n",
sourceFile, err.Error())
return
}
srcFMgr, err := FileMgr{}.New(absoluteSourceFile)
if err != nil {
t.Errorf("Error returned by FileMgr{}.New(absoluteSourceFile).\n"+
"absoluteSourceFile='%v'\nError='%v'\n",
absoluteSourceFile, err.Error())
return
}
doesFileExist, err := srcFMgr.DoesThisFileExist()
if err != nil {
t.Errorf("Non-Path Error returned by srcFMgr.DoesThisFileExist().\n"+
"srcFMgr='%v'\nError='%v'\n",
srcFMgr.GetAbsolutePath(), err.Error())
return
}
if !doesFileExist {
t.Errorf("Error: Source Test File DOES NOT EXIST!\nSource File='%v'\n",
srcFMgr.GetAbsolutePathFileName())
return
}
rawDestPath := fh.AdjustPathSlash("../checkfiles/checkfiles02")
destDMgr, err := DirMgr{}.New(rawDestPath)
if err != nil {
t.Errorf("Error returned from DirMgr{}.New(rawDestPath).\n"+
"rawDestPath='%v'\nError='%v'\n", rawDestPath, err.Error())
return
}
newFileMgr, err := FileMgr{}.NewFromDirMgrFileNameExt(destDMgr, fileName)
if err != nil {
t.Errorf("Error returned from FileMgr{}."+
"NewFromDirMgrFileNameExt(destDMgr, fileName).\n"+
"destDMgr='%v'\nfileName='%v'\nError='%v'\n",
destDMgr.GetAbsolutePath(), fileName, err.Error())
return
}
doesFileExist, err = newFileMgr.DoesThisFileExist()
if err != nil {
t.Errorf("Non-Path Error returned from newFileMgr.DoesThisFileExist().\n"+
"newFileMgr='%v'\nError='%v'\n",
newFileMgr.GetAbsolutePath(), err.Error())
return
}
if doesFileExist {
err = newFileMgr.DeleteThisFile()
if err != nil {
t.Errorf("Error returned from newFileMgr.DeleteThisFile().\n"+
"newFileMgr='%v'\nError='%v'\n",
newFileMgr.GetAbsolutePath(), err.Error())
return
}
}
err = srcFMgr.CopyFileToDirByLink(destDMgr)
if err != nil {
t.Errorf("Error returned from srcFMgr.CopyFileToDirByLink(destDMgr).\n"+
"destDMgr='%v'\nError='%v'\n",
destDMgr.GetAbsolutePath(), err.Error())
_ = newFileMgr.DeleteThisFile()
return
}
doesFileExist, err = newFileMgr.DoesThisFileExist()
if err != nil {
t.Errorf("Non-Path Error returned from newFileMgr.DoesThisFileExist().\n"+
"newFileMgr='%v'\nError='%v'\n",
newFileMgr.GetAbsolutePath(), err.Error())
return
}
if !doesFileExist {
t.Errorf("Error: File Copy Failed!\n"+
"Source File='%v'\nDestination File='%v'\n",
srcFMgr.GetAbsolutePathFileName(), newFileMgr.GetAbsolutePathFileName())
return
}
err = newFileMgr.DeleteThisFile()
if err != nil {
t.Errorf("Test Clean-Up Error returned by newFileMgr.DeleteThisFile().\n"+
"newFileMgr= '%v'\nError='%v'\n",
newFileMgr.GetAbsolutePathFileName(), err.Error())
}
}
func TestFileMgr_CopyFileToDirByLink_02(t *testing.T) {
fileName := "newerFileForTest_01.txt"
sourceFile := "../filesfortest/newfilesfortest/" + fileName
fh := FileHelper{}
absoluteSourceFile, err := fh.MakeAbsolutePath(sourceFile)
if err != nil {
t.Errorf("Error returned by fh.MakeAbsolutePath(sourceFile).\n"+
"sourceFile='%v'\nError='%v'\n",
sourceFile, err.Error())
return
}
srcFMgr, err := FileMgr{}.New(absoluteSourceFile)
if err != nil {
t.Errorf("Error returned by FileMgr{}.New(absoluteSourceFile).\n"+
"absoluteSourceFile='%v'\nError='%v'\n",
absoluteSourceFile, err.Error())
return
}
doesFileExist, err := srcFMgr.DoesThisFileExist()
if err != nil {
t.Errorf("Non-Path Error returned by srcFMgr.DoesThisFileExist().\n"+
"srcFMgr='%v'\nError='%v'\n",
srcFMgr.GetAbsolutePath(), err.Error())
return
}
if !doesFileExist {
t.Errorf("Error: Source Test File DOES NOT EXIST!\nSource File='%v'\n",
srcFMgr.GetAbsolutePathFileName())
return
}
rawDestPath := fh.AdjustPathSlash("../checkfiles/checkfiles02")
destDMgr, err := DirMgr{}.New(rawDestPath)
if err != nil {
t.Errorf("Error returned from DirMgr{}.New(rawDestPath).\n"+
"rawDestPath='%v'\nError='%v'\n", rawDestPath, err.Error())
return
}
newFileMgr, err := FileMgr{}.NewFromDirMgrFileNameExt(destDMgr, fileName)
if err != nil {
t.Errorf("Error returned from FileMgr{}."+
"NewFromDirMgrFileNameExt(destDMgr, fileName).\n"+
"destDMgr='%v'\nfileName='%v'\nError='%v'\n",
destDMgr.GetAbsolutePath(), fileName, err.Error())
return
}
doesFileExist, err = newFileMgr.DoesThisFileExist()
if err != nil {
t.Errorf("Non-Path Error returned from newFileMgr.DoesThisFileExist().\n"+
"newFileMgr='%v'\nError='%v'\n",
newFileMgr.GetAbsolutePath(), err.Error())
return
}
if doesFileExist {
err = newFileMgr.DeleteThisFile()
if err != nil {
t.Errorf("Error returned from newFileMgr.DeleteThisFile().\n"+
"newFileMgr='%v'\nError='%v'\n",
newFileMgr.GetAbsolutePath(), err.Error())
return
}
}
srcFMgr.isInitialized = false
err = srcFMgr.CopyFileToDirByLink(destDMgr)
if err == nil {
t.Error("Expected an error return from srcFMgr.CopyFileToDirByLink(destDMgr)\n" +
"because srcFMgr.isInitialized == false.\nHowever, NO ERROR WAS RETURNED!\n")
}
err = newFileMgr.DeleteThisFile()
if err != nil {
t.Errorf("Test Clean-Up Error returned by newFileMgr.DeleteThisFile().\n"+
"newFileMgr= '%v'\nError='%v'\n",
newFileMgr.GetAbsolutePathFileName(), err.Error())
}
}
func TestFileMgr_CopyFileToDirByLink_03(t *testing.T) {
fileName := "newerFileForTest_01.txt"
sourceFile := "../filesfortest/newfilesfortest/" + fileName
fh := FileHelper{}
absoluteSourceFile, err := fh.MakeAbsolutePath(sourceFile)
if err != nil {
t.Errorf("Error returned by fh.MakeAbsolutePath(sourceFile).\n"+
"sourceFile='%v'\nError='%v'\n",
sourceFile, err.Error())
return
}
srcFMgr, err := FileMgr{}.New(absoluteSourceFile)
if err != nil {
t.Errorf("Error returned by FileMgr{}.New(absoluteSourceFile).\n"+
"absoluteSourceFile='%v'\nError='%v'\n",
absoluteSourceFile, err.Error())
return
}
doesFileExist, err := srcFMgr.DoesThisFileExist()
if err != nil {
t.Errorf("Non-Path Error returned by srcFMgr.DoesThisFileExist().\n"+
"srcFMgr='%v'\nError='%v'\n",
srcFMgr.GetAbsolutePath(), err.Error())
return
}
if !doesFileExist {
t.Errorf("Error: Source Test File DOES NOT EXIST!\nSource File='%v'\n",
srcFMgr.GetAbsolutePathFileName())
return
}
rawDestPath := fh.AdjustPathSlash("../checkfiles/checkfiles02")
destDMgr, err := DirMgr{}.New(rawDestPath)
if err != nil {
t.Errorf("Error returned from DirMgr{}.New(rawDestPath).\n"+
"rawDestPath='%v'\nError='%v'\n", rawDestPath, err.Error())
return
}
newFileMgr, err := FileMgr{}.NewFromDirMgrFileNameExt(destDMgr, fileName)
if err != nil {
t.Errorf("Error returned from FileMgr{}."+
"NewFromDirMgrFileNameExt(destDMgr, fileName).\n"+
"destDMgr='%v'\nfileName='%v'\nError='%v'\n",
destDMgr.GetAbsolutePath(), fileName, err.Error())
return
}
doesFileExist, err = newFileMgr.DoesThisFileExist()
if err != nil {
t.Errorf("Non-Path Error returned from newFileMgr.DoesThisFileExist().\n"+
"newFileMgr='%v'\nError='%v'\n",
newFileMgr.GetAbsolutePath(), err.Error())
return
}
if doesFileExist {
err = newFileMgr.DeleteThisFile()
if err != nil {
t.Errorf("Error returned from newFileMgr.DeleteThisFile().\n"+
"newFileMgr='%v'\nError='%v'\n",
newFileMgr.GetAbsolutePath(), err.Error())
return
}
}
destDMgr.isInitialized = false
err = srcFMgr.CopyFileToDirByLink(destDMgr)
if err == nil {
t.Error("Expected an error return from destDMgr.CopyFileToDirByLink(destDMgr)\n" +
"because destDMgr.isInitialized == false.\nHowever, NO ERROR WAS RETURNED!\n")
}
err = newFileMgr.DeleteThisFile()
if err != nil {
t.Errorf("Test Clean-Up Error returned by newFileMgr.DeleteThisFile().\n"+
"newFileMgr= '%v'\nError='%v'\n",
newFileMgr.GetAbsolutePathFileName(), err.Error())
}
}
func TestFileMgr_CopyFileToDirByLink_04(t *testing.T) {
fileName := "newerFileForTest_01.txt"
sourceFile := "../filesfortest/newfilesfortest/" + fileName
fh := FileHelper{}
absoluteSourceFile, err := fh.MakeAbsolutePath(sourceFile)
if err != nil {
t.Errorf("Error returned by fh.MakeAbsolutePath(sourceFile).\n"+
"sourceFile='%v'\nError='%v'\n",
sourceFile, err.Error())
return
}
srcFMgr, err := FileMgr{}.New(absoluteSourceFile)
if err != nil {
t.Errorf("Error returned by FileMgr{}.New(absoluteSourceFile).\n"+
"absoluteSourceFile='%v'\nError='%v'\n",
absoluteSourceFile, err.Error())
return
}
doesFileExist, err := srcFMgr.DoesThisFileExist()
if err != nil {
t.Errorf("Non-Path Error returned by srcFMgr.DoesThisFileExist().\n"+
"srcFMgr='%v'\nError='%v'\n",
srcFMgr.GetAbsolutePath(), err.Error())
return
}
if !doesFileExist {
t.Errorf("Error: Source Test File DOES NOT EXIST!\nSource File='%v'\n",
srcFMgr.GetAbsolutePathFileName())
return
}
destDMgr := srcFMgr.GetDirMgr()
err = srcFMgr.CopyFileToDirByLink(destDMgr)
if err == nil {
t.Error("Expected an error return from destDMgr.CopyFileToDirByLink(destDMgr)\n" +
"because source directory equals destination directory.\n" +
"However, NO ERROR WAS RETURNED!\n")
}
}
func TestFileMgr_CopyFileToDirByLink_05(t *testing.T) {
sourceFile := "../filesfortest/newfilesfortest/iDoNotExist.txt"
fh := FileHelper{}
adjustedSourceFile := fh.AdjustPathSlash(sourceFile)
absoluteSourceFile, err := fh.MakeAbsolutePath(adjustedSourceFile)
if err != nil {
t.Errorf("Error returned by fh.MakeAbsolutePath(adjustedSourceFile).\n"+
"Error='%v'\n", err.Error())
return
}
srcFMgr, err := FileMgr{}.New(absoluteSourceFile)
if err != nil {
t.Errorf("Error returned by FileMgr{}.New(absoluteSourceFile).\n"+
"Error='%v'\n", err.Error())
return
}
rawDestPath := fh.AdjustPathSlash("../checkfiles/checkfiles02")
destDMgr, err := DirMgr{}.New(rawDestPath)
if err != nil {
t.Errorf("Error returned from DirMgr{}.New(rawDestPath).\n"+
"rawDestPath='%v'\nError='%v'\n",
rawDestPath, err.Error())
return
}
err = srcFMgr.CopyFileToDirByLink(destDMgr)
if err == nil {
t.Error("Expected an error return from destDMgr.CopyFileToDirByLink(destDMgr)\n" +
"because source file does NOT exist.\n" +
"However, NO ERROR WAS RETURNED!\n")
}
}
func TestFileMgr_CopyFromStrings_01(t *testing.T) {
fh := FileHelper{}
sourceFile := "../filesfortest/newfilesfortest/newerFileForTest_01.txt"
sourceFile = fh.AdjustPathSlash(sourceFile)
destFile := "../createFilesTest/TestFileMgr_CopyFromStrings_01.txt"
destFile = fh.AdjustPathSlash(destFile)
err := fh.DeleteDirFile(destFile)
if err != nil {
t.Errorf("Test Setup Error returned by fh.DeleteDirFile(destFile)\n"+
"destFile='%v'\nError='%v'\n",
destFile, err.Error())
return
}
fMgrSrc, fMgrDest, err := FileMgr{}.CopyFromStrings(sourceFile, destFile)
if err != nil {
t.Errorf("Error returned by FileMgr{}."+
"CopyFromStrings(sourceFile, destFile)\n"+
"sourceFile='%v'\ndestFile='%v'\nError='%v'",
sourceFile, destFile, err.Error())
_ = fh.DeleteDirFile(destFile)
return
}
absSourcePath, err := fh.MakeAbsolutePath(sourceFile)
if err != nil {
t.Errorf("Error returned by fh."+
"MakeAbsolutePath(sourceFile)\n"+
"sourceFile='%v',Error='%v'\n",
sourceFile, err.Error())
_ = fh.DeleteDirFile(destFile)
return
}
absSourcePath = strings.ToLower(absSourcePath)
if absSourcePath != strings.ToLower(fMgrSrc.absolutePathFileName) {
t.Errorf("Error: Expected source path and file name are NOT EQUAL\n"+
"to actual source path and file name!\n"+
"Expected source file='%v'\n"+
"Actual source file='%v'\n",
absSourcePath, strings.ToLower(fMgrSrc.absolutePathFileName))
}
absDestPath, err := fh.MakeAbsolutePath(destFile)
if err != nil {
t.Errorf("Error returned by fh."+
"MakeAbsolutePath(destFile)\n"+
"destFile='%v',Error='%v'\n",
destFile, err.Error())
_ = fh.DeleteDirFile(destFile)
return
}
absDestPath = strings.ToLower(absDestPath)
if absDestPath != strings.ToLower(fMgrDest.absolutePathFileName) {
t.Errorf("Error: Expected destination path and file name are NOT EQUAL\n"+
"to actual destination path and file name!\n"+
"Expected destination file='%v'\n"+
"Actual destination file='%v'\n",
absDestPath, strings.ToLower(fMgrDest.absolutePathFileName))
}
if !fh.DoesFileExist(absSourcePath) {
t.Errorf("Error: Source File DOES NOT EXIST!\n"+
"Source File='%v'\n", absSourcePath)
_ = fh.DeleteDirFile(absDestPath)
return
}
if !fh.DoesFileExist(absDestPath) {
t.Errorf("Error: After Copy Operation Destination "+
"File DOES NOT EXIST!\n"+
"Destination File='%v'\n", absDestPath)
return
}
if !fMgrSrc.DoesFileExist() {
t.Errorf("Error returned by fMgrSrc.DoesFileExist()\n"+
"Source File DOES NOT EXIST!\n"+
"Source File='%v'", fMgrSrc.absolutePathFileName)
}
if !fMgrDest.DoesFileExist() {
t.Errorf("Error returned by fMgrDest.DoesFileExist()\n"+
"Destination File DOES NOT EXIST!\n"+
"Destination File='%v'", fMgrDest.absolutePathFileName)
}
err = fh.DeleteDirFile(absDestPath)
if err != nil {
t.Errorf("Error returned by fh.DeleteDirFile(absDestPath)\n"+
"absDestPath='%v'\nError='%v'\n",
absDestPath, err.Error())
}
}
func TestFileMgr_CopyFromStrings_02(t *testing.T) {
fh := FileHelper{}
sourceFile := "../checkfiles/iDoNotExist.txt"
sourceFile = fh.AdjustPathSlash(sourceFile)
destFile := "../createFilesTest/TestFileMgr_CopyFromStrings_02.txt"
destFile = fh.AdjustPathSlash(destFile)
err := fh.DeleteDirFile(destFile)
if err != nil {
t.Errorf("Test Setup Error returned by fh.DeleteDirFile(destFile)\n"+
"destFile='%v'\nError='%v'\n",
destFile, err.Error())
return
}
_, _, err = FileMgr{}.CopyFromStrings(sourceFile, destFile)
if err == nil {
t.Error("Expected Error return from FileMgr{}." +
"CopyFromStrings(sourceFile, destFile)\n" +
"because 'sourceFile' DOES NOT EXIST!\n" +
"However, NO ERROR WAS RETURNED!!!\n")
}
err = fh.DeleteDirFile(destFile)
if err != nil {
t.Errorf("Error returned by fh.DeleteDirFile(destFile)\n"+
"destFile='%v'\nError='%v'\n",
destFile, err.Error())
}
}
func TestFileMgr_CopyFromStrings_03(t *testing.T) {
fh := FileHelper{}
sourceFile := ""
destFile := "../createFilesTest/TestFileMgr_CopyFromStrings_03.txt"
destFile = fh.AdjustPathSlash(destFile)
err := fh.DeleteDirFile(destFile)
if err != nil {
t.Errorf("Test Setup Error returned by fh.DeleteDirFile(destFile)\n"+
"destFile='%v'\nError='%v'\n",
destFile, err.Error())
return
}
_, _, err = FileMgr{}.CopyFromStrings(sourceFile, destFile)
if err == nil {
t.Error("Expected Error return from FileMgr{}." +
"CopyFromStrings(sourceFile, destFile)\n" +
"because 'sourceFile' is an empty string!\n" +
"However, NO ERROR WAS RETURNED!!!\n")
}
err = fh.DeleteDirFile(destFile)
if err != nil {
t.Errorf("Error returned by fh.DeleteDirFile(destFile)\n"+
"destFile='%v'\nError='%v'\n",
destFile, err.Error())
}
}
func TestFileMgr_CopyFromStrings_04(t *testing.T) {
fh := FileHelper{}
sourceFile := "../filesfortest/newfilesfortest/newerFileForTest_01.txt"
sourceFile = fh.AdjustPathSlash(sourceFile)
destFile := ""
_, _, err := FileMgr{}.CopyFromStrings(sourceFile, destFile)
if err == nil {
t.Error("Expected Error return from FileMgr{}." +
"CopyFromStrings(sourceFile, destFile)\n" +
"because 'destFile' is an empty string!\n" +
"However, NO ERROR WAS RETURNED!!!\n")
}
if !fh.DoesFileExist(sourceFile) {
t.Errorf("Error: Source File DOES NOT EXIST!\n"+
"sourceFile='%v'\n",
sourceFile)
}
}
func TestFileMgr_CreateDir_01(t *testing.T) {
fh := FileHelper{}
pathFileName :=
"../createFilesTest/Level01/Level02/Level03/TestFileMgr_CreateDir_01.txt"
testFile := fh.AdjustPathSlash(pathFileName)
fileMgr, err := FileMgr{}.NewFromPathFileNameExtStr(testFile)
if err != nil {
t.Errorf("Error thrown on FileMgr{}.NewFromPathFileNameExtStr(testFile)\n"+
"testFile='%v'\nError='%v'\n",
testFile, err.Error())
return
}
doesThisFileExist, err :=
fh.DoesThisFileExist(fileMgr.dMgr.absolutePath)
if err != nil {
t.Errorf("Setup Non-Path Error returned by "+
"fh.DoesThisFileExist(fileMgr.dMgr.absolutePath)\n"+
"fileMgr.dMgr.absolutePath='%v'\nError='%v'\n",
fileMgr.dMgr.absolutePath, err.Error())
return
}
if doesThisFileExist {
err = fh.DeleteDirPathAll(fileMgr.dMgr.absolutePath)
if err != nil {
t.Errorf("Error thrown on fh.DeleteDirPathAll(fileMgr.dMgr.absolutePath).\n"+
"Attempted Directory Deletion FAILED!\n"+
"fileMgr.dMgr.absolutePath='%v'\nError='%v'\n",
fileMgr.dMgr.absolutePath, err.Error())
return
}
}
err = fileMgr.CreateDir()
if err != nil {
t.Errorf("Error returned from fileMgr.CreateDir().\n"+
"fileMgr='%v'\nError='%v'\n",
fileMgr.GetAbsolutePath(), err.Error())
_ = fh.DeleteDirPathAll(fileMgr.dMgr.absolutePath)
return
}
dirMgr := fileMgr.GetDirMgr()
doesThisFileExist, err = fh.DoesThisFileExist(dirMgr.GetAbsolutePath())
if err != nil {
t.Errorf("Non-Path Error returned from path!\n"+
"fileMgr.CreateDir() FAILED!\n"+
"Path='%v'\nError='%v'\n",
dirMgr.GetAbsolutePath(), err.Error())
return
}
if !doesThisFileExist {
t.Errorf("Error: Failed to create directory path!\n"+
"Directory Path='%v'\n",
dirMgr.GetAbsolutePath())
return
} else {
err = dirMgr.DeleteAll()
if err != nil {
t.Errorf("Error returned from dirMgr.DeleteAll().\n"+
"dirMgr='%v'\nError='%v'\n",
dirMgr.GetAbsolutePath(), err.Error())
return
}
doesThisFileExist, err = fh.DoesThisFileExist(dirMgr.GetAbsolutePath())
if err != nil {
t.Errorf("#2 Non-Path Error returned from path!\n"+
"Final Deletion of Directory Path FAILED!\n"+
"Path='%v'\nError='%v'\n",
dirMgr.GetAbsolutePath(), err.Error())
return
}
if doesThisFileExist {
t.Errorf("ERROR: Final Deletion of Directory Path FAILED!\n"+
"File Manager Directory Path='%v'\n",
dirMgr.GetAbsolutePath())
return
}
}
}
func TestFileMgr_CreateDir_02(t *testing.T) {
fileMgr := FileMgr{}
err := fileMgr.CreateDir()
if err == nil {
t.Error("Expected error return from fileMgr.CreateDir()\n" +
"because 'fileMgr' (File Manager) was NOT initialized.\n" +
"However, NO ERROR WAS RETURNED!\n")
}
}
func TestFileMgr_CreateDirAndFile_01(t *testing.T) {
fh := FileHelper{}
testFile := fh.AdjustPathSlash(
"../createFilesTest/Level01/Level02/Level03/TestFileMgr_CreateDirAndFile_01.txt")
fileMgr, err := FileMgr{}.NewFromPathFileNameExtStr(testFile)
if err != nil {
t.Errorf("Error thrown on FileMgr{}.NewFromPathFileNameExtStr(testFile)\n"+
"testFile='%v', Error='%v'",
testFile, err.Error())
return
}
doesThisFileExist, err :=
fh.DoesThisFileExist(fileMgr.dMgr.absolutePath)
if err != nil {
t.Errorf("Setup Non-Path Error returned by "+
"fh.DoesThisFileExist(fileMgr.dMgr.absolutePath)\n"+
"fileMgr.dMgr.absolutePath='%v'\nError='%v'\n",
fileMgr.dMgr.absolutePath, err.Error())
return
}
if doesThisFileExist {
err = fh.DeleteDirPathAll(fileMgr.dMgr.absolutePath)
if err != nil {
t.Errorf("Error thrown on fh.DeleteDirPathAll(fileMgr.dMgr.absolutePath).\n"+
"Attempted Directory Deletion FAILED!\n"+
"fileMgr.dMgr.absolutePath='%v'\nError='%v'\n",
fileMgr.dMgr.absolutePath, err.Error())
return
}
}
err = fileMgr.CreateDirAndFile()
if err != nil {
t.Errorf("Failed to Create Directory and File\n"+
"Error returned by fileMgr.CreateDirAndFile()\n"+
"fileMgr='%v'\nError='%v'",
fileMgr.absolutePathFileName, err.Error())
_ = fh.DeleteDirPathAll(fileMgr.dMgr.absolutePath)
return
}
err = fileMgr.CloseThisFile()
if err != nil {
t.Errorf("Error returned by fileMgr.CloseThisFile().\n"+
"Error='%v'\n", err.Error())
}
doesThisFileExist, err = fh.DoesThisFileExist(fileMgr.absolutePathFileName)
if err != nil {
t.Errorf("Non-Path Error retrned by fh.DoesThisFileExist("+
"fileMgr.absolutePathFileName)\n"+
"fileMgr.absolutePathFileName='%v'\nError='%v'\n",
fileMgr.absolutePathFileName, err.Error())
_ = fh.DeleteDirPathAll(fileMgr.dMgr.absolutePath)
return
}
if !doesThisFileExist {
t.Errorf("File Verfication failed!\n"+
"File DOES NOT EXIST!"+
"Path File Name='%v'", fileMgr.absolutePathFileName)
_ = fh.DeleteDirPathAll(fileMgr.dMgr.absolutePath)
return
}
s := "Created by File:'xt_filemanger_03_test.go' " +
"Test Method: TestFileHelper_CreateDirAndFile()"
_, err = fileMgr.WriteStrToFile(s)
if err != nil {
t.Errorf("Received error from fileMgr.WriteStrToFile(s).\n"+
"s='%v'\n\nError='%v'\n", s, err.Error())
}
err = fileMgr.CloseThisFile()
if err != nil {
t.Errorf("Received error from fileMgr.CloseThisFile().\n"+
"fileMgr='%v'\nError='%v'\n",
fileMgr.absolutePathFileName, err.Error())
}
err = fileMgr.dMgr.DeleteAll()
if err != nil {
t.Errorf("Error returned by fileMgr.dMgr.DeleteAll().\n"+
"Attempted Directory Deletion Failed!!\n"+
"Directory=%v\nFileName='%v'\nError='%v'",
fileMgr.absolutePathFileName,
fileMgr.GetFileNameExt(),
err.Error())
}
}
func TestFileMgr_CreateDirAndFile_02(t *testing.T) {
fh := FileHelper{}
fileName := "TestFileMgr_CreateDirAndFile_02"
testFile := fh.AdjustPathSlash(
"../createFilesTest/Level01/Level02/Level03/" + fileName)
err := fh.DeleteDirFile(testFile)
if err != nil {
t.Errorf("Test Startup Error returned by "+
"fh.DeleteDirFile(testFile)\n"+
"testFile='%v'\nError='%v'\n",
testFile, err.Error())
return
}
fileMgr, err := FileMgr{}.NewFromPathFileNameExtStr(testFile)
if err != nil {
t.Errorf("Error thrown on FileMgr{}.NewFromPath"+
"FileNameExtStr(testFile)\n"+
"testFile='%v'\nError='%v'\n",
testFile, err.Error())
return
}
fileMgr.isInitialized = false
err = fileMgr.CreateDirAndFile()
if err == nil {
t.Error("Expected an error return from fileMgr.CreateDirAndFile()\n" +
"because fileMgr is invalid.\n" +
"However, NO ERROR WAS RETURNED!\n")
}
_ = fh.DeleteDirFile(testFile)
}
func TestFileMgr_CreateThisFile_01(t *testing.T) {
fh := FileHelper{}
testFile := fh.AdjustPathSlash(
"../createFilesTest/Level01/Level02/Level03/TestFileMgr_CreateThisFile_01.txt")
err := fh.DeleteDirFile(testFile)
if err != nil {
t.Errorf("Error returned by fh.DeleteDirFile(testFile).\n"+
"Attempted deletion of 'testFile' FAILED!!\n"+
"testFile='%v'\nError='%v'\n",
testFile, err.Error())
return
}
fileMgr, err := FileMgr{}.NewFromPathFileNameExtStr(testFile)
if err != nil {
t.Errorf("Error returned by FileMgr{}.NewFromPathFileNameExtStr(testFile).\n"+
"testFile='%v'\nError='%v'\n",
testFile, err.Error())
return
}
fileMgr.isInitialized = false
err = fileMgr.CreateThisFile()
if err == nil {
t.Error("Expected error return from fileMgr.CreateThisFile() because " +
"fileMgr is invalid.\n" +
"However, NO ERROR WAS RETURNED!\n")
}
err = fh.DeleteDirFile(testFile)
if err != nil {
t.Errorf("Test Clean-Up Error returned by fh.DeleteDirFile(testFile).\n"+
"Attempted deletion of 'testFile' FAILED!!\n"+
"testFile='%v'\nError='%v'\n",
testFile, err.Error())
return
}
}
func TestFileMgr_CreateThisFile_02(t *testing.T) {
fh := FileHelper{}
testFile := fh.AdjustPathSlash(
"../iDoNotExist/TestFileMgr_CreateThisFile_02.txt")
fileMgr, err := FileMgr{}.NewFromPathFileNameExtStr(testFile)
if err != nil {
t.Errorf("Error returned by FileMgr{}.NewFromPathFileNameExtStr(testFile)\n"+
"testFile='%v'\nError='%v'\n",
testFile, err.Error())
return
}
dirMgr := fileMgr.GetDirMgr()
_ = dirMgr.DeleteAll()
err = fileMgr.CreateThisFile()
if err == nil {
t.Error("Expected error return from fileMgr.CreateThisFile() because\n" +
"the fileMgr directory does NOT exist.\n" +
"However, NO ERROR WAS RETURNED!\n")
}
_ = dirMgr.DeleteAll()
}
|
package article
import (
"html/template"
)
type Article struct {
// title of article
Title string
// Unique identifier used internally
ID uint64
// Unique identifier of the Author
AuthorID uint64
// Date of release, used for sorting articles on run page
// TODO change the type to time.Time
Timestamp uint64
// type template.HTML allows unescaped html
Content template.HTML
}
|
// +build integration
package integration
import (
"bytes"
"github.com/stretchr/testify/assert"
"os/exec"
"testing"
)
func TestMainFunction(t *testing.T) {
var cmdLine []string
cmdLine = append(cmdLine, "exec", "-i")
cmdLine = append(cmdLine, "simple_nginx_with_curl")
cmdLine = append(cmdLine, "curl", "-s", "fromgotok8s:8080")
cmd := exec.Command("docker", cmdLine...)
var outbuf, errbuf bytes.Buffer
cmd.Stdout = &outbuf
cmd.Stderr = &errbuf
err := cmd.Run()
out := outbuf.String()
errOut := errbuf.String()
assert.Equal(t, "http://web answered with statusCode: 200", out)
assert.Nil(t, err)
assert.Equal(t, "", errOut)
}
|
package heatshrink
/*
#cgo CFLAGS: -I./
#cgo LDFLAGS: -L./ -lheatshrink
#cgo LDFLAGS: -L./ -lirzip
#include "heatshrink_app.h"
#include "heatshrink_common.h"
#include "heatshrink_config.h"
#include "heatshrink_decoder.h"
#include "heatshrink_encoder.h"
#include "irzip.h"
#include <stdlib.h>
*/
import "C"
import (
"encoding/json"
"fmt"
"io/ioutil"
"strconv"
"strings"
"sync"
"unsafe"
)
//定义配置文件数据格式
type IrRemoteCfg struct {
RemoteId string
Frequency string
CmdLen string
OnoffArray string
ModelArray string
TempArray string
WindSpeedArray string
}
var instance *map[string]IrRemoteCfg
var once sync.Once
/*
进行heatshrink热压缩编码
*/
func GoLumiHeatshrinkEncode(encode_in_buf string) string {
cs := C.CString(encode_in_buf)
encode_len := len(encode_in_buf)
p := C.malloc(C.size_t(len(encode_in_buf)))
defer C.free(p)
ret := C.LumiHeatshrinkBase64Encode(cs, C.int(encode_len), (*C.char)(p))
ret_len := (int)(ret)
if ret_len > 0 && ret_len < encode_len {
data := C.GoStringN((*C.char)(p), ret)
return data
}
return ""
}
/*
进行heatshrink热压缩解码
*/
func GoLumiHeatshrinkDecode(decode_in_buf string) string {
cs := C.CString(decode_in_buf)
decode_len := len(decode_in_buf)
p := C.malloc(C.size_t(decode_len * 10)) // 解码比例,不知道压缩比例,fixme
defer C.free(p)
ret := C.LumiHeatshrinkBase64Decode(cs, C.int(decode_len), (*C.char)(p))
ret_len := (int)(ret)
if ret_len > 0 {
data := C.GoStringN((*C.char)(p), ret)
return data
}
return ""
}
/*
对红外时间序列进行特征值提取
*/
func GoLumiIrZip(ir_in_buf string) (string, string) {
cs := C.CString(ir_in_buf)
defer C.free(unsafe.Pointer(cs))
chararistorData := C.malloc(C.size_t(C.MAX_CHARARISTOR_UNIT*4 + 1))
zipData := C.malloc(C.size_t(C.MAX_ZIP_UNIT*2 + 1))
defer C.free(chararistorData)
defer C.free(zipData)
C.irzip(cs, (*C.char)(chararistorData), (*C.char)(zipData))
chaRetData := C.GoString((*C.char)(chararistorData))
zipRetData := C.GoString((*C.char)(zipData))
return chaRetData, zipRetData
}
/*
按截取位数对压缩值进行截取
*/
func ExtractArray(cmdData string, temp string) string {
var substring = ""
indexArray := strings.Split(temp, ",");
for _, value := range indexArray {
index, _ := strconv.ParseInt(value, 10, 32)
substring += string(cmdData[index-1])
}
return substring
}
/*
按截取位数对压缩值进行截取
*/
func ShortCmdToAcKey(shortCmd string) string {
char01 := shortCmd[0:1]
char12 := shortCmd[1:2]
char23 := shortCmd[2:3]
//char34 := shortCmd[3:4]
char45 := shortCmd[4:5]
char56 := shortCmd[5:6]
var switchState, model, windSpeed, temperature string
windDirect := "D0"
//开关
if char01 == "0" {
switchState = "P0"
} else if char01 == "1" {
switchState = "P1"
}
//模式
if char12 == "0" {
//制热
model = "M1";
} else if char12 == "1" {
//制冷
model = "M0";
} else if char12 == "2" {
//自动
model = "M2";
} else if char12 == "3" {
//除湿
model = "M4";
} else if char12 == "4" {
//送风
model = "M3";
}
//风速
if char23 == "0" {
windSpeed = "S1";
} else if char23 == "1" {
windSpeed = "S2";
} else if char23 == "2" {
windSpeed = "S3";
} else if char23 == "3" {
windSpeed = "S0";
}
//温度
iChar45, _ := strconv.ParseInt(char45, 16, 32)
iChar56, _ := strconv.ParseInt(char56, 16, 32)
iTemp := iChar45*16 + iChar56
if iTemp > -1 && iTemp < 241 {
temperature = "T" + strconv.Itoa(int(iTemp))
}
return switchState + "_" + model + "_" + temperature + "_" + windSpeed + "_" + windDirect
}
/*
单例获取配置
*/
func GetInstance() *map[string]IrRemoteCfg {
once.Do(func() {
instance, _ = readFile("heatshrink/ir_sync_cfg.json")
})
return instance
}
func readFile(filename string) (*map[string]IrRemoteCfg, error) {
bytes, err := ioutil.ReadFile(filename)
if err != nil {
fmt.Println("ReadFile: ", err.Error())
return nil, err
}
var cfgMap = &map[string]IrRemoteCfg{}
if err := json.Unmarshal(bytes, &cfgMap); err != nil {
fmt.Println("Unmarshal: ", err.Error())
return nil, err
}
return cfgMap, nil
}
|
package main
import (
"crypto/ecdsa"
"crypto/rsa"
"encoding/json"
"fmt"
"net/mail"
"net/url"
"os"
"path/filepath"
"reflect"
"regexp"
"strings"
"time"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/authelia/authelia/v4/internal/configuration/schema"
"github.com/authelia/authelia/v4/internal/model"
"github.com/authelia/authelia/v4/internal/utils"
)
func getPFlagPath(flags *pflag.FlagSet, flagNames ...string) (fullPath string, err error) {
if len(flagNames) == 0 {
return "", fmt.Errorf("no flag names")
}
var p string
for i, flagName := range flagNames {
if p, err = flags.GetString(flagName); err != nil {
return "", fmt.Errorf("failed to lookup flag '%s': %w", flagName, err)
}
if i == 0 {
fullPath = p
} else {
fullPath = filepath.Join(fullPath, p)
}
}
return fullPath, nil
}
func buildCSP(defaultSrc string, ruleSets ...[]CSPValue) string {
var rules []string
for _, ruleSet := range ruleSets {
for _, rule := range ruleSet {
switch rule.Name {
case "default-src":
rules = append(rules, fmt.Sprintf("%s %s", rule.Name, defaultSrc))
default:
rules = append(rules, fmt.Sprintf("%s %s", rule.Name, rule.Value))
}
}
}
return strings.Join(rules, "; ")
}
var decodedTypes = []reflect.Type{
reflect.TypeOf(mail.Address{}),
reflect.TypeOf(regexp.Regexp{}),
reflect.TypeOf(url.URL{}),
reflect.TypeOf(time.Duration(0)),
reflect.TypeOf(schema.Address{}),
reflect.TypeOf(schema.AddressTCP{}),
reflect.TypeOf(schema.AddressUDP{}),
reflect.TypeOf(schema.AddressLDAP{}),
reflect.TypeOf(schema.AddressSMTP{}),
reflect.TypeOf(schema.X509CertificateChain{}),
reflect.TypeOf(schema.PasswordDigest{}),
reflect.TypeOf(rsa.PrivateKey{}),
reflect.TypeOf(ecdsa.PrivateKey{}),
}
func containsType(needle reflect.Type, haystack []reflect.Type) (contains bool) {
for _, t := range haystack {
if needle.Kind() == reflect.Ptr {
if needle.Elem() == t {
return true
}
} else if needle == t {
return true
}
}
return false
}
func readVersion(cmd *cobra.Command) (version *model.SemanticVersion, err error) {
var (
pathPackageJSON string
dataPackageJSON []byte
packageJSON PackageJSON
)
if pathPackageJSON, err = getPFlagPath(cmd.Flags(), cmdFlagRoot, cmdFlagWeb, cmdFlagFileWebPackage); err != nil {
return nil, err
}
if dataPackageJSON, err = os.ReadFile(pathPackageJSON); err != nil {
return nil, err
}
if err = json.Unmarshal(dataPackageJSON, &packageJSON); err != nil {
return nil, fmt.Errorf("failed to unmarshall package.json: %w", err)
}
return model.NewSemanticVersion(packageJSON.Version)
}
//nolint:gocyclo
func readTags(prefix string, t reflect.Type, envSkip, deprecatedSkip bool) (tags []string) {
tags = make([]string, 0)
if envSkip && (t.Kind() == reflect.Slice || t.Kind() == reflect.Map) {
return
}
if t.Kind() != reflect.Struct {
if t.Kind() == reflect.Slice {
tags = append(tags, readTags(getKeyNameFromTagAndPrefix(prefix, "", true, false), t.Elem(), envSkip, deprecatedSkip)...)
}
return
}
for i := 0; i < t.NumField(); i++ {
field := t.Field(i)
if deprecatedSkip && isDeprecated(field) {
continue
}
tag := field.Tag.Get("koanf")
if tag == "" {
tags = append(tags, prefix)
continue
}
switch kind := field.Type.Kind(); kind {
case reflect.Struct:
if !containsType(field.Type, decodedTypes) {
tags = append(tags, readTags(getKeyNameFromTagAndPrefix(prefix, tag, false, false), field.Type, envSkip, deprecatedSkip)...)
continue
}
case reflect.Slice, reflect.Map:
k := field.Type.Elem().Kind()
if envSkip && !isValueKind(k) {
continue
}
switch k {
case reflect.Struct:
if !containsType(field.Type.Elem(), decodedTypes) {
tags = append(tags, getKeyNameFromTagAndPrefix(prefix, tag, false, false))
tags = append(tags, readTags(getKeyNameFromTagAndPrefix(prefix, tag, kind == reflect.Slice, kind == reflect.Map), field.Type.Elem(), envSkip, deprecatedSkip)...)
continue
}
case reflect.Slice:
tags = append(tags, readTags(getKeyNameFromTagAndPrefix(prefix, tag, kind == reflect.Slice, kind == reflect.Map), field.Type.Elem(), envSkip, deprecatedSkip)...)
}
case reflect.Ptr:
switch field.Type.Elem().Kind() {
case reflect.Struct:
if !containsType(field.Type.Elem(), decodedTypes) {
tags = append(tags, readTags(getKeyNameFromTagAndPrefix(prefix, tag, false, false), field.Type.Elem(), envSkip, deprecatedSkip)...)
continue
}
case reflect.Slice, reflect.Map:
k := field.Type.Elem().Elem().Kind()
if envSkip && !isValueKind(k) {
continue
}
if k == reflect.Struct {
if !containsType(field.Type.Elem(), decodedTypes) {
tags = append(tags, readTags(getKeyNameFromTagAndPrefix(prefix, tag, true, false), field.Type.Elem(), envSkip, deprecatedSkip)...)
continue
}
}
}
}
tags = append(tags, getKeyNameFromTagAndPrefix(prefix, tag, false, false))
}
return tags
}
func isValueKind(kind reflect.Kind) bool {
switch kind {
case reflect.Struct, reflect.Map, reflect.Slice, reflect.Array, reflect.Chan, reflect.Func, reflect.Interface, reflect.Pointer, reflect.UnsafePointer, reflect.Invalid, reflect.Uintptr:
return false
default:
return true
}
}
func isDeprecated(field reflect.StructField) bool {
var (
value string
ok bool
)
if value, ok = field.Tag.Lookup("jsonschema"); !ok {
return false
}
return utils.IsStringInSlice("deprecated", strings.Split(value, ","))
}
func getKeyNameFromTagAndPrefix(prefix, name string, isSlice, isMap bool) string {
nameParts := strings.SplitN(name, ",", 2)
if prefix == "" {
return nameParts[0]
}
if len(nameParts) == 2 && nameParts[1] == "squash" {
return prefix
}
switch {
case isMap:
if name == "" {
return fmt.Sprintf("%s.*", prefix)
}
return fmt.Sprintf("%s.%s.*", prefix, nameParts[0])
case isSlice:
if name == "" {
return fmt.Sprintf("%s[]", prefix)
}
return fmt.Sprintf("%s.%s[]", prefix, nameParts[0])
default:
return fmt.Sprintf("%s.%s", prefix, nameParts[0])
}
}
|
package controller
import (
"bubble/models"
"github.com/gin-gonic/gin"
"net/http"
)
func IndexHandle(c *gin.Context) {
c.HTML(http.StatusOK,"index.html",nil)
}
func CreateTodo(c *gin.Context) {
//获取json数据
var todo models.Todo
c.BindJSON(&todo)
err := models.CreateOneTodo(&todo)
if err !=nil {
c.JSON(http.StatusOK,gin.H{
"error":err.Error(),
})
}else {
c.JSON(http.StatusOK,todo)
}
}
func GetTodoList(c *gin.Context) {
todolist, err := models.GetAllTodo()
if err !=nil{
c.JSON(http.StatusOK,gin.H{
"error":err.Error(),
})
}else {
c.JSON(http.StatusOK,todolist)
}
}
//func GetOneTodo(c *gin.Context) {
// id := c.Param("id")
// c.JSON(http.StatusOK,gin.H{
// "data":models.GetOneTodo(id),
// })
//}
func UpdateOneTodo(c *gin.Context) {
id,ok := c.Params.Get("id")
if !ok{
c.JSON(http.StatusOK,gin.H{
"err":"无效id",
})
return
}
todo,err := models.GetATodo(id)
if err != nil{
c.JSON(http.StatusOK,gin.H{
"error":err.Error(),
})
return
}
c.BindJSON(&todo)
err = models.UpdateATodo(todo) //?为什么么不用传指针
if err!= nil{
c.JSON(http.StatusOK,gin.H{"error":err.Error()})
}else {
c.JSON(http.StatusOK,todo)
}
}
func DeleteOneTodo (c *gin.Context) {
id,ok := c.Params.Get("id")
if !ok{
c.JSON(http.StatusOK,gin.H{
"err":"无效id",
})
return
}
err := models.DeleteATodo(id)
if err != nil{
c.JSON(http.StatusOK,gin.H{"error":err.Error()})
}
c.JSON(http.StatusOK,gin.H{
id:"delete",
})
}
|
package sleepy
type SentPacketBuffer struct {
buf SequenceBuffer
entries []SentPacket
}
func NewSentPacketBuffer(cap uint16) *SentPacketBuffer {
return &SentPacketBuffer{buf: NewSequenceBuffer(cap), entries: make([]SentPacket, cap, cap)}
}
func (s *SentPacketBuffer) Insert(seq uint16) *SentPacket {
// Packet is outdated. Ignore.
if s.IsOutdated(seq) {
return nil
}
// Packet sent has a sequence number larger than all other packets in the sent back buffer. Empty out stale entries
// and increment the latest known-so-far sent packet sequence number.
if seqGreaterThan(seq+1, s.buf.latest) {
s.buf.RemoveRange(s.buf.latest, seq)
s.buf.latest = seq + 1
}
i := seq % uint16(cap(s.entries))
s.buf.entries[i] = uint32(seq)
return &s.entries[i]
}
func (s *SentPacketBuffer) Find(seq uint16) *SentPacket {
if i := seq % uint16(cap(s.entries)); s.buf.entries[i] == uint32(seq) {
return &s.entries[i]
}
return nil
}
func (s *SentPacketBuffer) IsOutdated(seq uint16) bool {
return seqLessThan(seq, s.buf.latest-uint16(cap(s.entries)))
}
|
package main
import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"os/user"
"strings"
)
func main() {
if len(os.Args) < 2 {
terror(errors.New("missing gopath"))
}
gopath, err := format(os.Args[1])
if err != nil {
terror(err)
}
shrc, err := shellrc()
if err != nil {
terror(err)
}
if err := backup(shrc, shrc+".pre.gos"); err != nil {
terror(err)
}
if err := modify(shrc, gopath); err != nil {
terror(err)
}
}
func format(gopath string) (string, error) {
if gopath[len(gopath)-1] == '/' {
gopath = gopath[:len(gopath)-1]
}
fi, err := os.Stat(gopath)
if err != nil {
return gopath, err
}
if !fi.IsDir() {
return gopath, fmt.Errorf("%s no such directory", fi.Name())
}
return gopath, nil
}
func shellrc() (string, error) {
var rc string
u, err := user.Current()
if err != nil {
return rc, err
}
rc = u.HomeDir
if sh := os.Getenv("SHELL"); !strings.Contains(sh, "zsh") {
return rc, fmt.Errorf("%s is not supported", sh)
}
return rc + "/.zshrc", nil
}
func backup(src, dest string) error {
sf, err := os.Open(src)
if err != nil {
return err
}
defer sf.Close()
df, err := os.Create(dest)
if err != nil {
return err
}
defer df.Close()
if _, err = io.Copy(df, sf); err != nil {
return err
}
if err := df.Sync(); err != nil {
return err
}
return nil
}
func modify(path, gopath string) error {
bs, err := ioutil.ReadFile(path)
if err != nil {
return err
}
lines := strings.Split(string(bs), "\n")
pos, err := position(lines)
if err != nil {
return err
}
lines[pos] = fmt.Sprintf("export GOPATH=%s", gopath)
output := strings.Join(lines, "\n")
if err := ioutil.WriteFile(path, []byte(output), 0644); err != nil {
return err
}
return nil
}
func position(lines []string) (int, error) {
for i, l := range lines {
if strings.HasPrefix(l, "export GOPATH=") {
return i, nil
}
}
return -1, errors.New("GOPATH not found")
}
func terror(err error) {
fmt.Fprintln(os.Stderr, err.Error())
os.Exit(2)
}
|
package counter
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestGaugeCounter(t *testing.T) {
key := "server"
g := &Group{
New: func() Counter {
return NewGauge()
},
}
g.Add(key, 1)
g.Add(key, 2)
g.Add(key, 3)
g.Add(key, -1)
assert.Equal(t, g.Value(key), int64(5))
g.Reset(key)
assert.Equal(t, g.Value(key), int64(0))
}
func TestRollingCounter(t *testing.T) {
key := "server"
g := &Group{
New: func() Counter {
return NewRolling(time.Second, 10)
},
}
t.Run("add_key_b1", func(t *testing.T) {
g.Add(key, 1)
assert.Equal(t, g.Value(key), int64(1))
})
time.Sleep(time.Millisecond * 110)
t.Run("add_key_b2", func(t *testing.T) {
g.Add(key, 1)
assert.Equal(t, g.Value(key), int64(2))
})
time.Sleep(time.Millisecond * 900) // expire one bucket, 110 + 900
t.Run("expire_b1", func(t *testing.T) {
assert.Equal(t, g.Value(key), int64(1))
g.Add(key, 1)
assert.Equal(t, g.Value(key), int64(2)) // expire one bucket
})
time.Sleep(time.Millisecond * 1100)
t.Run("expire_all", func(t *testing.T) {
assert.Equal(t, g.Value(key), int64(0))
})
t.Run("reset", func(t *testing.T) {
g.Reset(key)
assert.Equal(t, g.Value(key), int64(0))
})
}
|
/**
* The count-and-say sequence is the sequence of integers beginning as follows:
* 1, 11, 21, 1211, 111221, ...
*/
func countAndSay(n int) string {
if 1 == n {
return "1"
}
s, sb := []byte{1}, make([]byte, 0)
for k := 1; k < n; k++ {
pre, i := s[0], byte(1)
for j, count := 1, len(s); j < count; j++ {
c := s[j]
if (c == pre) {
i++
} else {
sb = append(sb, i)
sb = append(sb, pre)
i = 1
}
pre = c
}
sb = append(sb, i)
sb = append(sb, pre)
s = sb
sb = make([]byte, 0)
//sb = sb[:0]
}
for i, count := 0, len(s); i < count; i++ {
s[i] += 48
}
return string(s)
}
|
package problem0367
import "testing"
func TestIsPerfectSquare(t *testing.T) {
t.Log(isPerfectSquare(1))
t.Log(isPerfectSquare(100))
t.Log(isPerfectSquare(108))
}
|
package netqos
import (
"time"
"testing"
)
func TestQos(t *testing.T){
q := NewServerQos()
q.Stat()
for i:=0;i<100;i++{
q.StatAccpetConns()
go func(){
t2 := time.NewTicker(2*time.Second)
for{
select {
case <-t2.C:
q.StatReadMsgs()
}
}
}()
}
time.Sleep(time.Minute*1)
}
|
package main
import (
"github.com/munusamy/cms"
"os"
)
func main() {
p := &cms.Page{
Title: "Hello, world!",
Content: "This is the body of our webapge",
}
cms.Tmpl.ExecuteTemplate(os.Stdout, "index", p)
}
|
package main
import (
"fmt"
"github.com/asppj/droneDeploy/conf"
http2 "github.com/asppj/droneDeploy/internal/http"
"github.com/spf13/pflag"
)
func main() {
versionInfo := pflag.BoolP("version", "v", false, "show version info.")
pflag.Parse()
fmt.Printf("%s\n\n", conf.BuildVersion())
if versionInfo != nil && *versionInfo {
return
}
http2.Init()
}
|
// DRUNKWATER TEMPLATE(add description and prototypes)
// Question Title and Description on leetcode.com
// Function Declaration and Function Prototypes on leetcode.com
//321. Create Maximum Number
//Given two arrays of length m and n with digits 0-9 representing two numbers. Create the maximum number of length k <= m + n from digits of the two. The relative order of the digits from the same array must be preserved. Return an array of the k digits. You should try to optimize your time and space complexity.
//Example 1:
//nums1 = [3, 4, 6, 5]
//nums2 = [9, 1, 2, 5, 8, 3]
//k = 5
//return [9, 8, 6, 5, 3]
//Example 2:
//nums1 = [6, 7]
//nums2 = [6, 0, 4]
//k = 5
//return [6, 7, 6, 0, 4]
//Example 3:
//nums1 = [3, 9]
//nums2 = [8, 9]
//k = 3
//return [9, 8, 9]
//Credits:
//Special thanks to @dietpepsi for adding this problem and creating all test cases.
//func maxNumber(nums1 []int, nums2 []int, k int) []int {
//}
// Time Is Money
|
// Copyright 2019-2023 The sakuracloud_exporter Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package collector
import (
"context"
"fmt"
"log/slog"
"sync"
"github.com/prometheus/client_golang/prometheus"
"github.com/sacloud/iaas-api-go"
"github.com/sacloud/sakuracloud_exporter/platform"
)
// ESMECollector collects metrics about all esme.
type ESMECollector struct {
ctx context.Context
logger *slog.Logger
errors *prometheus.CounterVec
client platform.ESMEClient
ESMEInfo *prometheus.Desc
MessageCount *prometheus.Desc
}
// NewESMECollector returns a new ESMECollector.
func NewESMECollector(ctx context.Context, logger *slog.Logger, errors *prometheus.CounterVec, client platform.ESMEClient) *ESMECollector {
errors.WithLabelValues("esme").Add(0)
labels := []string{"id", "name"}
infoLabels := append(labels, "tags", "description")
messageLabels := append(labels, "status")
return &ESMECollector{
ctx: ctx,
logger: logger,
errors: errors,
client: client,
ESMEInfo: prometheus.NewDesc(
"sakuracloud_esme_info",
"A metric with a constant '1' value labeled by ESME information",
infoLabels, nil,
),
MessageCount: prometheus.NewDesc(
"sakuracloud_esme_message_count",
"A count of messages handled by ESME",
messageLabels, nil,
),
}
}
// Describe sends the super-set of all possible descriptors of metrics
// collected by this Collector.
func (c *ESMECollector) Describe(ch chan<- *prometheus.Desc) {
ch <- c.ESMEInfo
ch <- c.MessageCount
}
// Collect is called by the Prometheus registry when collecting metrics.
func (c *ESMECollector) Collect(ch chan<- prometheus.Metric) {
searched, err := c.client.Find(c.ctx)
if err != nil {
c.errors.WithLabelValues("esme").Add(1)
c.logger.Warn(
"can't list ESME",
slog.Any("err", err),
)
}
var wg sync.WaitGroup
wg.Add(len(searched))
for i := range searched {
func(esme *iaas.ESME) {
defer wg.Done()
c.collectESMEInfo(ch, esme)
wg.Add(1)
go func() {
c.collectLogs(ch, esme)
wg.Done()
}()
}(searched[i])
}
wg.Wait()
}
func (c *ESMECollector) esmeLabels(esme *iaas.ESME) []string {
return []string{
esme.ID.String(),
esme.Name,
}
}
func (c *ESMECollector) collectESMEInfo(ch chan<- prometheus.Metric, esme *iaas.ESME) {
labels := append(c.esmeLabels(esme),
flattenStringSlice(esme.Tags),
esme.Description,
)
ch <- prometheus.MustNewConstMetric(
c.ESMEInfo,
prometheus.GaugeValue,
float64(1.0),
labels...,
)
}
func (c *ESMECollector) collectLogs(ch chan<- prometheus.Metric, esme *iaas.ESME) {
logs, err := c.client.Logs(c.ctx, esme.ID)
if err != nil {
c.errors.WithLabelValues("esme").Add(1)
c.logger.Warn(
fmt.Sprintf("can't collect logs of the esme[%s]", esme.ID.String()),
slog.Any("err", err),
)
return
}
labels := c.esmeLabels(esme)
labelsForAll := append(labels, "All")
ch <- prometheus.MustNewConstMetric(
c.MessageCount,
prometheus.GaugeValue,
float64(len(logs)),
labelsForAll...,
)
// count logs per status
statusCounts := make(map[string]int)
for _, l := range logs {
if _, ok := statusCounts[l.Status]; !ok {
statusCounts[l.Status] = 0
}
statusCounts[l.Status]++
}
for key, v := range statusCounts {
labelsPerStatus := append(labels, key)
ch <- prometheus.MustNewConstMetric(
c.MessageCount,
prometheus.GaugeValue,
float64(v),
labelsPerStatus...,
)
}
}
|
package mat
import (
"github.com/stretchr/testify/assert"
"math"
"testing"
)
var rt2 = math.Sqrt(2.0) / 2.0
func TestNewLight(t *testing.T) {
p := NewPoint(0, 0, 0)
color := NewColor(1, 1, 1)
light := NewLight(p, color)
assert.True(t, TupleEquals(light.Position, p))
assert.True(t, TupleEquals(light.Intensity, color))
}
func setupBase() (Shape, Material, Tuple4) {
return NewSphere(), NewDefaultMaterial(), NewPoint(0, 0, 0)
}
func TestLightEyeBetweenLightAndSphere(t *testing.T) {
s, material, position := setupBase()
eyev := NewVector(0, 0, -1)
normalv := NewVector(0, 0, -1)
light := NewLight(NewPoint(0, 0, -10), NewColor(1, 1, 1))
result := LightingPointLight(material, s, light, position, eyev, normalv, false, NewLightData())
assert.InEpsilon(t, 1.9, result.Get(0), Epsilon)
assert.InEpsilon(t, 1.9, result.Get(1), Epsilon)
assert.InEpsilon(t, 1.9, result.Get(2), Epsilon)
}
func TestLight180ToSurfaceEye45(t *testing.T) {
s, material, position := setupBase()
eyev := NewVector(0, 0, -1)
normalv := NewVector(0, 0, -1)
light := NewLight(NewPoint(0, 10, -10), NewColor(1, 1, 1))
result := LightingPointLight(material, s, light, position, eyev, normalv, false, NewLightData())
assert.InEpsilon(t, 0.7364, result.Get(0), Epsilon)
assert.InEpsilon(t, 0.7364, result.Get(1), Epsilon)
assert.InEpsilon(t, 0.7364, result.Get(2), Epsilon)
}
func TestLight45ToSurfaceEye180(t *testing.T) {
s, material, position := setupBase()
eyev := NewVector(0, rt2, -rt2)
normalv := NewVector(0, 0, -1)
light := NewLight(NewPoint(0, 0, -10), NewColor(1, 1, 1))
result := LightingPointLight(material, s, light, position, eyev, normalv, false, NewLightData())
assert.Equal(t, 1.0, result.Get(0))
assert.Equal(t, 1.0, result.Get(1))
assert.Equal(t, 1.0, result.Get(2))
}
func TestLight45ToSurfaceEye45(t *testing.T) {
s, material, position := setupBase()
eyev := NewVector(0, -rt2, -rt2)
normalv := NewVector(0, 0, -1)
light := NewLight(NewPoint(0, 10, -10), NewColor(1, 1, 1))
result := LightingPointLight(material, s, light, position, eyev, normalv, false, NewLightData())
assert.InEpsilon(t, 1.6364, result.Get(0), Epsilon)
assert.InEpsilon(t, 1.6364, result.Get(1), Epsilon)
assert.InEpsilon(t, 1.6364, result.Get(2), Epsilon)
}
func TestLightBehind(t *testing.T) {
s, material, position := setupBase()
eyev := NewVector(0, 0, -1)
normalv := NewVector(0, 0, -1)
light := NewLight(NewPoint(0, 0, 10), NewColor(1, 1, 1))
result := LightingPointLight(material, s, light, position, eyev, normalv, false, NewLightData())
assert.Equal(t, 0.1, result.Get(0))
assert.Equal(t, 0.1, result.Get(1))
assert.Equal(t, 0.1, result.Get(2))
}
func TestCreateAreaLight(t *testing.T) {
corner := NewPoint(0, 0, 0)
v1 := NewVector(2, 0, 0)
v2 := NewVector(0, 0, 1)
light := NewAreaLight(corner, v1, 4, v2, 2, NewColor(1, 1, 1))
assert.Equal(t, corner, light.Corner)
assert.Equal(t, NewVector(0.5, 0, 0), light.UVec)
assert.Equal(t, 4, light.USteps)
assert.Equal(t, NewVector(0, 0, 0.5), light.VVec)
assert.Equal(t, 2, light.VSteps)
assert.Equal(t, 8.0, light.Samples)
assert.True(t, TupleXYZEq(NewPoint(1, 0, 0.5), light.Position))
}
func TestFindPointOnAreaLight(t *testing.T) {
corner := NewPoint(0, 0, 0)
v1 := NewVector(2, 0, 0)
v2 := NewVector(0, 0, 1)
light := NewAreaLight(corner, v1, 4, v2, 2, NewColor(1, 1, 1))
testcases := []struct {
u float64
v float64
result Tuple4
}{
{u: 0, v: 0, result: NewPoint(0.25, 0, 0.25)},
{u: 1, v: 0, result: NewPoint(0.75, 0, 0.25)},
{u: 0, v: 1, result: NewPoint(0.25, 0, 0.75)},
{u: 2, v: 0, result: NewPoint(1.25, 0, 0.25)},
{u: 3, v: 1, result: NewPoint(1.75, 0, 0.75)},
}
for _, tc := range testcases {
pt := PointOnLightNoRandom(light, tc.u, tc.v)
assert.True(t, TupleXYZEq(pt, tc.result))
}
}
func TestLightingSamplesAreaLight(t *testing.T) {
corner := NewPoint(-0.5, -0.5, -5)
v1 := NewVector(1, 0, 0)
v2 := NewVector(0, 1, 0)
light := NewAreaLight(corner, v1, 2, v2, 2, NewColor(1, 1, 1))
sh := NewSphere()
sh.Material.Ambient = 0.1
sh.Material.Diffuse = 0.9
sh.Material.Specular = 0
sh.Material.Color = NewColor(1, 1, 1)
eye := NewPoint(0, 0, -5)
testcases := []struct {
point Tuple4
color Tuple4
}{
{point: NewPoint(0, 0, -1), color: NewColor(0.9965, 0.9965, 0.9965)},
{point: NewPoint(0, 0.7071, -0.7071), color: NewColor(0.6232, 0.6232, 0.6232)},
}
for _, tc := range testcases {
eyev := Normalize(Sub(eye, tc.point))
normalv := NewVector(tc.point[0], tc.point[1], tc.point[2])
result := Lighting(sh.Material, sh, light, tc.point, eyev, normalv, 1.0, NewLightData())
assert.InEpsilon(t, tc.color[0], result[0], Epsilon*100)
assert.InEpsilon(t, tc.color[1], result[1], Epsilon*100)
assert.InEpsilon(t, tc.color[2], result[2], Epsilon*100)
}
}
|
package docgen
import (
"bytes"
"io"
"github.com/saschagrunert/go-docgen/internal/writer"
"github.com/cpuguy83/go-md2man/md2man"
"github.com/urfave/cli"
)
// CliToMarkdown converts a given `cli.App` to a markdown string.
// The function errors if either parsing or writing of the string fails.
func CliToMarkdown(app *cli.App) (string, error) {
var w bytes.Buffer
if err := write(app, &w); err != nil {
return "", err
}
return w.String(), nil
}
// CliToMan converts a given `cli.App` to a man page string.
// The function errors if either parsing or writing of the string fails.
func CliToMan(app *cli.App) (string, error) {
var w bytes.Buffer
if err := write(app, &w); err != nil {
return "", err
}
man := md2man.Render(w.Bytes())
return string(man), nil
}
func write(app *cli.App, w io.Writer) error {
return writer.New(app).Write(w)
}
|
package token
import "fmt"
type Token struct {
Type
Lit
}
type Type string
type Lit []rune
// Types
const (
INVALID = "INVALID"
EOF = "EOF"
COMMA = "COMMA"
COLON = "COLON"
EQUAL = "EQUAL"
LBRACE = "LBRACE"
RBRACE = "RBRACE"
LBRACKET = "LBRACKET"
RBRACKET = "RBRACKET"
STRING = "STRING"
INTEGER = "INTEGER"
NEWLINE = "NEWLINE"
LIBRARY = "LIBRARY"
PLUS = "PLUS"
LTHAN = "LTHAN"
BOOLEAN = "BOOLEAN"
)
func NewToken(typ Type, lit string) Token {
return Token{Type: typ, Lit: []rune(lit)}
}
func (t *Token) String() string {
return fmt.Sprintf("token.%s: %s", t.Type, string(t.Lit))
}
|
package test
import (
"path/filepath"
"github.com/astaxie/beego"
"runtime"
"testing"
"github.com/SungKing/blogsystem/models/dao"
"github.com/SungKing/blogsystem/models/entity"
"time"
"fmt"
"github.com/astaxie/beego/orm"
"encoding/json"
"os"
)
func init() {
_, file, _, _ := runtime.Caller(1)
apppath, _ := filepath.Abs(filepath.Dir(filepath.Join(file, ".." + string(filepath.Separator))))
beego.TestBeegoInit(apppath)
orm.RegisterDriver("mysql",orm.DRMySQL)
db_user :=beego.AppConfig.String("db.user")
db_pwd :=beego.AppConfig.String("db.password")
db_addr :=beego.AppConfig.String("db.addr")
db_database :=beego.AppConfig.String("db.database")
dataSourceUrl :=fmt.Sprintf("%v:%v@tcp(%v)/%v?charset=utf8", db_user, db_pwd, db_addr, db_database)
orm.RegisterDataBase("default","mysql",dataSourceUrl,30,30)
//register model
orm.RegisterModel(
new(entity.User),
new(entity.Blog),
new(entity.Image),
new(entity.Comment),
new(entity.Tag),
new(entity.UserSetting),
)
//开启测试模式
db_debug,_ := beego.AppConfig.Bool("db.debug")
if db_debug {
orm.Debug = true
}
orm.RunSyncdb("default",false,true)
}
func TestInsert(t *testing.T){
var blogDao= new(dao.BlogDao)
i:=blogDao.Insert([]entity.Blog{
entity.Blog{
//Id:1,
Title:"文章2",
CreateTime:time.Now(),
VisitCount:0,
Context:"<h2>hello world!</h2>",
Tag:1,
Words:37,
CommentNum:0,
UserId:1,
CoverPic:"",
Summary:"test",
},
})
fmt.Println(i)
}
func TestQuerySimple(t *testing.T) {
var blogDao= new(dao.BlogDao)
blogs:=blogDao.QuerySimple(1,10,0)
b,_:=json.Marshal(blogs)
fmt.Println(string(b))
}
func TestGetOne(t *testing.T){
var blogDao= new(dao.BlogDao)
blog:=blogDao.Get(1)
b,_:=json.Marshal(blog)
fmt.Println(string(b))
}
func TestInsert_2(t *testing.T) {
var con = "<h1>这是标题</h1>\n" +
" <h2>这是标题</h2>\n" +
" <h3>这是标题</h3>\n" +
" <h4>这是标题</h4>\n" +
" <h5>这是标题</h5>\n" +
" <h1>HEADER ONE</h1>\n" +
" <h2>HEADER ONE</h2>\n" +
" <h3>HEADER ONE</h3>\n" +
" <h4>HEADER ONE</h4>\n" +
" <h5>HEADER ONE</h5>\n" +
" <h1>表格:</h1>\n" +
" <table class=\"am-table am-table-bordered\">\n" +
" <thead>\n" +
" <tr>\n" +
" <th>网站名称</th>\n" +
" <th>网址</th>\n" +
" <th>创建时间</th>\n" +
" </tr>\n" +
" </thead>\n" +
" <tbody>\n" +
" <tr>\n" +
" <td>Amaze UI</td>\n" +
" <td>http://amazeui.org</td>\n" +
" <td>2012-10-01</td>\n" +
" </tr>\n" +
" <tr>\n" +
" <td>Amaze UI</td>\n" +
" <td>http://amazeui.org</td>\n" +
" <td>2012-10-01</td>\n" +
" </tr>\n" +
" <tr>\n" +
" <td>Amaze UI(Active)</td>\n" +
" <td>http://amazeui.org</td>\n" +
" <td>2012-10-01</td>\n" +
" </tr>\n" +
" <tr>\n" +
" <td>Amaze UI</td>\n" +
" <td>http://amazeui.org</td>\n" +
" <td>2012-10-01</td>\n" +
" </tr>\n" +
" <tr>\n" +
" <td>Amaze UI</td>\n" +
" <td>http://amazeui.org</td>\n" +
" <td>2012-10-01</td>\n" +
" </tr>\n" +
" </tbody>\n" +
" </table>\n" +
" <hr>\n" +
" <h1>自定义列表:</h1>\n" +
" <ul class=\"am-list am-list-border\">\n" +
" <li><a href=\"#\"><i class=\"am-icon-home am-icon-fw\"></i>\n" +
" 每个人都有一个死角, 自己走不出来,别人也闯不进去。</a></li>\n" +
" <li><a href=\"#\"> <i class=\"am-icon-book am-icon-fw\"></i>\n" +
" 我把最深沉的秘密放在那里。</a></li>\n" +
" <li><a href=\"#\"><i class=\"am-icon-pencil am-icon-fw\"></i>你不懂我,我不怪你。</a></li>\n" +
" </ul>\n" +
" <h1>有序列表:</h1>\n" +
" <ol>\n" +
" <li>List item one<ol>\n" +
" <li>List item one<ol>\n" +
" <li>List item one</li>\n" +
" <li>List item two</li>\n" +
" <li>List item three</li>\n" +
" <li>List item four</li>\n" +
" </ol>\n" +
" </li>\n" +
" <li>List item two</li>\n" +
" <li>List item three</li>\n" +
" <li>List item four</li>\n" +
" </ol>\n" +
" </li>\n" +
" <li>List item two</li>\n" +
" <li>List item three</li>\n" +
" <li>List item four</li>\n" +
" </ol>\n" +
" <h1>无序列表:</h1>\n" +
" <ul>\n" +
" <li>List item one<ul>\n" +
" <li>List item one<ul>\n" +
" <li>List item one</li>\n" +
" <li>List item two</li>\n" +
" <li>List item three</li>\n" +
" <li>List item four</li>\n" +
" </ul>\n" +
" </li>\n" +
" <li>List item two</li>\n" +
" <li>List item three</li>\n" +
" <li>List item four</li>\n" +
" </ul>\n" +
" </li>\n" +
" <li>List item two</li>\n" +
" <li>List item three</li>\n" +
" <li>List item four</li>\n" +
" </ul>\n" +
" <h1>一大段文字:</h1>\n" +
" <p>我遇见了你,你一直觉得自己不够美好,充满惊慌与卑微,但是,你即使那样害怕,却守在我的身后,不曾后退。人们总是崇拜英雄,惊喜与他们随时随地的光芒,但是最真实的事情永远是我们都会恐惧都会失败,有时受尽打击,又是无能为力。你说我是英雄,其实我不敢告诉你,如果不是你,你傻傻地倔强地站在那里,我一回头就能看见你,那么,或许,我早就做了命运的逃兵。<br><br>\n" +
" 红衣佳人白衣友,朝与同歌暮同酒,世人谓我恋长安,其实只恋长安某。 ——殊同 <br>\n" +
"\n" +
" 水银泻地的时候,忧愁穿过墙壁,又和着嘶哑的音乐,使我羞惭、灰心。整个夏季,仿佛一场,没有主题的游戏。 ——赵野 <br>\n" +
"\n" +
" 我感到自己仿佛变得蔚蓝,变得无边无际,眼睛和指尖上,栖息着无数的星辰。 ——马林·索雷斯库 <br>\n" +
"\n" +
" 手中的书页犹如薄薄的剃刀片白亮亮闪着寒光。在凌晨四时寂寞的时刻里,我可以听到孤独之根正一点点伸长的声音。 ——村上春树 <br>\n" +
"\n" +
" 你的明眸是映现我灵魂颤动的湖,我那成群结队的梦想,为寻求解脱,而纷纷投入你这秋波深处。 ——波德莱尔 <br>\n" +
"\n" +
" 当我们只剩下虚无,唯有沉默的太阳够好,是你的静寂,是无声的声音。 ——佩索阿\n" +
" </p>\n" +
" <p></p>"
blog := entity.Blog{
Title:"我的文章",
CreateTime: time.Now(),
VisitCount:12,
Context: con,
Tag:1,
Words:15,
CommentNum:0,
UserId:0,
CoverPic:"/static/images/f10.jpg",
Summary:"<p>People think focus means saying yes to the thing you’ve got to focus on. But that’s not\n" +
" what it means at all. It means saying no to the hundred other good ideas that there\n" +
" are. You have to pick carefully. I’m actually as proud of the things we haven’t done as\n" +
" the things I have done. Innovation is saying no to 1,000 things.</p>\n" +
" <footer><cite>Steve Jobs</cite> – Apple Worldwide Developers’ Conference, 1997</footer>",
}
var blogDao= new(dao.BlogDao)
blogDao.Insert([]entity.Blog{blog})
}
func TestInsert_3(t *testing.T) {
var imgDao = new(dao.ImageDao)
imgDao.Insert(entity.Image{
Id:1,
Path:"/Users/song.wang/go/src/github.com/SungKing/blogsystem/static/images/01.jpg",
UId:"",
CreateTime:time.Now(),
})
}
func TestQuery_1(t *testing.T) {
var imgDao = new(dao.ImageDao)
a:=imgDao.GetOne(1)
fmt.Println(a)
}
func TestMkDir(t *testing.T) {
err:=os.MkdirAll("/Users/song.wang/file/images",0666)
fmt.Println(err)
}
|
package kimono
import (
"testing"
"time"
. "github.com/smartystreets/goconvey/convey"
)
func TestGetClient(t *testing.T) {
Convey("Get api client", t, func() {
id := "95k55308"
key := "dMes7SXhes5WG150GAdzOLfglPuDkz5o"
client, err := GetClient(key, id)
So(err, ShouldBeNil)
So(client, ShouldNotBeNil)
So(client.Id(), ShouldEqual, id)
So(client.Name(), ShouldEqual, "KKP")
So(client.Frequency(), ShouldEqual, FrequencyManually)
So(client.CreatedOn(), ShouldHappenBefore, time.Now())
So(client.LastRun(), ShouldHappenBefore, time.Now())
So(client.LastRunStatus(), ShouldEqual, "success")
So(client.LastSuccess(), ShouldHappenBefore, time.Now())
So(client.NewData(), ShouldBeTrue)
So(client.TargetUrl(), ShouldEqual, "http://kentarokobayashiproduce.net/news.html")
})
Convey("Failed to get api client", t, func() {
client, err := GetClient("dMes7SXhes5WG150GAdzOLfglPuDkz5o", "xxxxxxxx")
So(err, ShouldNotBeNil)
So(client, ShouldBeNil)
})
}
func TestGetList(t *testing.T) {
Convey("Get api list", t, func() {
key := "dMes7SXhes5WG150GAdzOLfglPuDkz5o"
list, err := GetList(key)
So(err, ShouldBeNil)
So(list, ShouldNotBeNil)
So(len(list), ShouldEqual, 3)
for _, item := range list {
So(item.Id(), ShouldNotBeNil)
So(item.Id(), ShouldNotBeEmpty)
So(item.Name(), ShouldNotBeNil)
So(item.Name(), ShouldNotBeEmpty)
}
})
}
func TestSetOptionsUrl(t *testing.T) {
Convey("Get api client", t, func() {
id := "8ucf7jke"
key := "dMes7SXhes5WG150GAdzOLfglPuDkz5o"
client, err := GetClient(key, id)
So(err, ShouldBeNil)
So(client, ShouldNotBeNil)
Convey("Set target url", func() {
validUrl := "http://www.rahmens.net/"
def, err := client.SetTargetUrl(validUrl)
So(err, ShouldBeNil)
So(def.Success, ShouldBeTrue)
So(def.Definition.TargetUrl, ShouldEqual, validUrl)
So(client.TargetUrl(), ShouldEqual, validUrl)
})
Convey("Reset target url", func() {
resetUrl := "http://www.rahmens.net/news.html"
def, err := client.SetTargetUrl(resetUrl)
So(err, ShouldBeNil)
So(def.Success, ShouldBeTrue)
So(def.Definition.TargetUrl, ShouldEqual, resetUrl)
So(client.TargetUrl(), ShouldEqual, resetUrl)
})
Convey("Set frequency", func() {
validFrequency := FrequencyHalfHourly
def, err := client.SetFrequency(validFrequency)
So(err, ShouldBeNil)
So(def.Success, ShouldBeTrue)
So(def.Definition.Frequency, ShouldEqual, validFrequency)
So(client.Frequency(), ShouldEqual, validFrequency)
})
Convey("Reset frequency", func() {
resetFreq := FrequencyManually
def, err := client.SetFrequency(resetFreq)
So(err, ShouldBeNil)
So(def.Success, ShouldBeTrue)
So(def.Definition.Frequency, ShouldEqual, resetFreq)
So(client.Frequency(), ShouldEqual, resetFreq)
})
Convey("Set crawl limit", func() {
validCrawlLimit := 3
def, err := client.SetCrawlLimit(validCrawlLimit)
So(err, ShouldBeNil)
So(def.Success, ShouldBeTrue)
So(def.Definition.Instructions.Limit, ShouldEqual, validCrawlLimit)
So(client.Instructions().Limit(), ShouldEqual, validCrawlLimit)
})
Convey("Reset crawl limit", func() {
resetLimit := 1
def, err := client.SetCrawlLimit(resetLimit)
So(err, ShouldBeNil)
So(def.Success, ShouldBeTrue)
So(def.Definition.Instructions.Limit, ShouldEqual, resetLimit)
So(client.Instructions().Limit(), ShouldEqual, resetLimit)
})
Convey("Set crawl urls", func() {
validCrawlUrls := []string{"http://www.google.com/"}
def, err := client.SetCrawlUrls(validCrawlUrls)
So(err, ShouldBeNil)
So(def.Success, ShouldBeTrue)
So(len(def.Definition.Instructions.Urls), ShouldEqual, len(validCrawlUrls))
So(def.Definition.Instructions.Urls[0], ShouldEqual, validCrawlUrls[0])
So(len(client.Instructions().Urls()), ShouldEqual, len(validCrawlUrls))
So(client.Instructions().Urls()[0], ShouldEqual, validCrawlUrls[0])
})
Convey("Reset crawl urls", func() {
resetUrls := make([]string, 0)
def, err := client.SetCrawlUrls(resetUrls)
So(err, ShouldBeNil)
So(def.Success, ShouldBeTrue)
So(len(def.Definition.Instructions.Urls), ShouldEqual, len(resetUrls))
So(len(client.Instructions().Urls()), ShouldEqual, len(resetUrls))
})
})
}
func TestStartCrawl(t *testing.T) {
Convey("Get api client", t, func() {
id := "8ucf7jke"
key := "dMes7SXhes5WG150GAdzOLfglPuDkz5o"
client, err := GetClient(key, id)
So(err, ShouldBeNil)
So(client, ShouldNotBeNil)
Convey("Start crawls", func() {
def, err := client.StartCrawl()
So(err, ShouldBeNil)
So(def, ShouldNotBeNil)
So(def.Definition.LastRunStatus, ShouldEqual, "in progress")
})
})
}
func TestGetData(t *testing.T) {
Convey("Get Data", t, func() {
id := "95k55308"
key := "dMes7SXhes5WG150GAdzOLfglPuDkz5o"
client, err := GetClient(key, id)
So(err, ShouldBeNil)
So(client, ShouldNotBeNil)
var data struct {
Collection1 []struct {
Date string `json:"date"`
Content string `json:"content"`
} `json:"collection1"`
}
res, err := client.GetData(&data)
So(res.Name, ShouldEqual, "KKP")
So(res.Count, ShouldEqual, 2)
So(data.Collection1[0].Date, ShouldEqual, "14.3.19")
So(data.Collection1[1].Date, ShouldEqual, "13.3.16")
})
}
|
package goub
type CoubService struct {
client *Client
}
type Web struct {
Template *string
Types []string
Versions []string
}
type WebChunks struct {
Template *string
Types []string
Versions []string
Chuncks []int
}
type HTML5 struct {
Template *string
Chunks []int
}
type IPhone struct {
URL *string
}
type Mobile struct {
AudioURL *string
Base64URl *string
Base64Files *string
FramesCount *int
}
type AudioVersionsChuncks struct {
Template *string
Versions []string
Chuncks []int
}
type AudioVersions struct {
Template *string
Version []string
Chunks *AudioVersionsChuncks
}
type FLVAudioVersionsFile struct {
Template *string
Version []string
}
type FLVAudioVersionsChuncks struct {
Template *string
Version []string
Chuncks []int
}
type FLVAudioVersions struct {
File *FLVAudioVersionsFile
Chuncks *FLVAudioVersionsChuncks
}
type ImageVersions struct {
Template *string
Version []string
}
type FirstFrameVersions struct {
Template *string
Version []string
}
type Dimenstions struct {
Big [][2]int
Med [][2]int
Small [][2]int
}
type ExternalDownload struct {
Type *string
URL *string
}
type AvatarVersions struct {
Type *string
Versions []string
}
type Channel struct {
ID *int
Permalink *string
Description *string
Title *string
IFollowHim *bool
FollowersCount *int
FollowingCount *int
}
type TrackMeta struct {
Year *string
Album *string
Title *string
Artist *string
}
type AudioTrack struct {
ID *int
Title *string
URL *string
Image *string
ImageRetina *string
ItunesURL *string
AmazonURL *string
BadcampURL *string
SoundCloudURL *string
TrackName *string
TrackArtist *string
TrackAlbum *string
Meta *TrackMeta
}
type ExternalVideo struct {
ID *int
Title *string
URL *string
Image *string
ImageRetina *string
ItunesURL *string
AmazonURL *string
BadcampURL *string
SoundCloudURL *string
}
type MediaBlocks struct {
CoubsAudioTrack *AudioTrack
CoubsExternalVideo *ExternalVideo
}
type FileVersions struct {
Web *Web
WebChuncks *WebChunks
HTML5 *HTML5
IPhone *IPhone
Mobile *Mobile
}
type Tag struct {
ID *int
Type *string
Value *string
}
type Coub struct {
ID *int
TYPE *string
Permalink *string
Title *string
VisabilityType *string
ChannelID *int
CreatedAt *Timestamp
UpdatedAt *Timestamp
IsDone *bool
Duration *float64
ViewsCount *int
COTD *bool
COTDAt *Timestamp
Recoub *bool
Like *bool
RecoubsCount *int
LikesCount *int
RecoubTo *int
Flag *bool
OriginalSound *bool
HasSound *bool
FileVersions *FileVersions
AudioVersions *AudioVersions
FLVAudioVersions *FLVAudioVersions
ImageVersions *ImageVersions
FirstFrameVersions *FirstFrameVersions
Dimenstions *Dimenstions
AgeResticted *bool
AgeRestictedByAdmin *bool
AllowReuse *bool
Banned *bool
ExternalDownload *ExternalDownload
Channel *Channel
PersentDone *int
Tags []Tag
RawVideoID *int
MediaBlocks *MediaBlocks
RawVideoThumbNail *string
RawVideoTitle *string
}
func (c *CoubService) Get() {}
|
package healthcheck
import (
"reflect"
"testing"
"github.com/stretchr/testify/assert"
)
func TestHealthCheckHandler(t *testing.T) {
host := "127.0.0.1:12345"
invalidhost := "invalidhost"
checkers := []Checker{
ParseConn{Host: host},
FetchConn{Host: host},
RedisConn{
Network: "tcp",
Host: "127.0.0.1:6379",
},
SplashConn{Host: "127.0.0.1:8050"},
}
status := CheckServices(checkers...)
eq := reflect.DeepEqual(map[string]string{"DFK Parse Service": "Ok", "DFK Fetch Service": "Ok", "Redis": "Ok", "Splash": "Ok"}, status)
assert.Equal(t, eq, true)
checkers = []Checker{
ParseConn{Host: invalidhost},
FetchConn{Host: invalidhost},
RedisConn{
Network: "tcp",
Host: invalidhost + ":12345",
},
SplashConn{Host: invalidhost},
}
status1 := CheckServices(checkers...)
for _, v := range status1{
assert.NotEqual(t, "Ok", v)
}
}
|
package sliding_window
import (
"fmt"
"testing"
)
func TestMinWindowSubStr(t *testing.T) {
fmt.Println(MinWindowSubStr("EBBANCF", "ABC"))
}
|
package database
import (
"github.com/janwiemers/up/models"
"github.com/spf13/viper"
"gorm.io/driver/sqlite"
"gorm.io/gorm"
)
// Connect establishes a connection to the Database
func connect() *gorm.DB {
db, err := gorm.Open(sqlite.Open(viper.GetString("DB_PATH")), &gorm.Config{})
if err != nil {
panic("failed to connect database")
}
db.AutoMigrate(&models.Application{})
db.AutoMigrate(&models.Check{})
return db
}
// CreateAndUpdateApplication creates an application record if it does not exist already
func CreateAndUpdateApplication(application models.Application) models.Application {
db := connect()
var app models.Application
db.FirstOrCreate(&app, "name = ?", application.Name)
db.Model(&app).Updates(application)
return app
}
// InsertCheck inserts a new check into the databse
func InsertCheck(application models.Application, up bool) (*models.Check, error) {
db := connect()
var app models.Application
db.Select([]string{"ID", "Name"}).First(&app, "name = ?", application.Name)
c := &models.Check{UP: up, ApplicationID: app.ID}
db.Create(c)
return c, nil
}
|
// Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"encoding/json"
"flag"
"fmt"
"log"
"os"
"os/exec"
"path/filepath"
"text/template"
"time"
"github.com/BurntSushi/toml"
)
var (
pkgDir string
outDir string
)
const codeTemplate = `
package main
import (
"github.com/pingcap/tidb/plugin"
)
func PluginManifest() *plugin.Manifest {
return plugin.ExportManifest(&plugin.{{.kind}}Manifest{
Manifest: plugin.Manifest{
Kind: plugin.{{.kind}},
Name: "{{.name}}",
Description: "{{.description}}",
Version: {{.version}},
RequireVersion: map[string]uint16{},
License: "{{.license}}",
BuildTime: "{{.buildTime}}",
{{if .validate }}
Validate: {{.validate}},
{{end}}
{{if .onInit }}
OnInit: {{.onInit}},
{{end}}
{{if .onShutdown }}
OnShutdown: {{.onShutdown}},
{{end}}
{{if .onFlush }}
OnFlush: {{.onFlush}},
{{end}}
},
{{range .export}}
{{.extPoint}}: {{.impl}},
{{end}}
})
}
`
func init() {
flag.StringVar(&pkgDir, "pkg-dir", "", "plugin package folder path")
flag.StringVar(&outDir, "out-dir", "", "plugin packaged folder path")
flag.Usage = usage
}
func usage() {
log.Printf("Usage: %s --pkg-dir [plugin source pkg folder] --out-dir [plugin packaged folder path]\n", filepath.Base(os.Args[0]))
flag.PrintDefaults()
os.Exit(1)
}
func main() {
flag.Parse()
if pkgDir == "" || outDir == "" {
flag.Usage()
}
pkgDir, err := filepath.Abs(pkgDir)
if err != nil {
log.Printf("unable to resolve absolute representation of package path , %+v\n", err)
flag.Usage()
}
outDir, err := filepath.Abs(outDir)
if err != nil {
log.Printf("unable to resolve absolute representation of output path , %+v\n", err)
flag.Usage()
}
var manifest map[string]interface{}
_, err = toml.DecodeFile(filepath.Join(pkgDir, "manifest.toml"), &manifest)
if err != nil {
log.Printf("read pkg %s's manifest failure, %+v\n", pkgDir, err)
os.Exit(1)
}
manifest["buildTime"] = time.Now().String()
pluginName := manifest["name"].(string)
if pluginName != filepath.Base(pkgDir) {
log.Printf("plugin package must be same with plugin name in manifest file\n")
os.Exit(1)
}
version := manifest["version"].(string)
tmpl, err := template.New("gen-plugin").Parse(codeTemplate)
if err != nil {
log.Printf("generate code failure during parse template, %+v\n", err)
os.Exit(1)
}
genFileName := filepath.Join(pkgDir, filepath.Base(pkgDir)+".gen.go")
genFile, err := os.OpenFile(genFileName, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0700) // #nosec G302
if err != nil {
log.Printf("generate code failure during prepare output file, %+v\n", err)
os.Exit(1)
}
defer func() {
err1 := os.Remove(genFileName)
if err1 != nil {
log.Printf("remove tmp file %s failure, please clean up manually at %v", genFileName, err1)
}
}()
err = tmpl.Execute(genFile, manifest)
if err != nil {
log.Printf("generate code failure during generating code, %+v\n", err)
os.Exit(1)
}
outputFile := filepath.Join(outDir, pluginName+"-"+version+".so")
ctx := context.Background()
buildCmd := exec.CommandContext(ctx, "go", "build",
"-buildmode=plugin",
"-o", outputFile, pkgDir)
buildCmd.Dir = pkgDir
buildCmd.Stderr = os.Stderr
buildCmd.Stdout = os.Stdout
buildCmd.Env = append(os.Environ(), "GO111MODULE=on")
err = buildCmd.Run()
if err != nil {
log.Printf("compile plugin source code failure, %+v\n", err)
os.Exit(1)
}
fmt.Printf(`Package "%s" as plugin "%s" success.`+"\nManifest:\n", pkgDir, outputFile)
encoder := json.NewEncoder(os.Stdout)
encoder.SetIndent(" ", "\t")
err = encoder.Encode(manifest)
if err != nil {
log.Printf("print manifest detail failure, err: %v", err)
}
}
|
// DRUNKWATER TEMPLATE(add description and prototypes)
// Question Title and Description on leetcode.com
// Function Declaration and Function Prototypes on leetcode.com
//337. House Robber III
//The thief has found himself a new place for his thievery again. There is only one entrance to this area, called the "root." Besides the root, each house has one and only one parent house. After a tour, the smart thief realized that "all houses in this place forms a binary tree". It will automatically contact the police if two directly-linked houses were broken into on the same night.
//Determine the maximum amount of money the thief can rob tonight without alerting the police.
//Example 1:
// 3
// / \
// 2 3
// \ \
// 3 1
//Maximum amount of money the thief can rob = 3 + 3 + 1 = 7.
//Example 2:
// 3
// / \
// 4 5
// / \ \
// 1 3 1
//Maximum amount of money the thief can rob = 4 + 5 = 9.
//Credits:
//Special thanks to @dietpepsi for adding this problem and creating all test cases.
///**
// * Definition for a binary tree node.
// * type TreeNode struct {
// * Val int
// * Left *TreeNode
// * Right *TreeNode
// * }
// */
//func rob(root *TreeNode) int {
//}
// Time Is Money
|
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package handler
import (
"chatapp/infra"
"chatapp/util/logger"
"context"
"encoding/json"
"net/http"
"os"
"sync"
"github.com/go-redis/redis/v8"
"github.com/google/uuid"
"github.com/gorilla/websocket"
"github.com/sirupsen/logrus"
)
var upgrader = websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 1024,
CheckOrigin: func(r *http.Request) bool {
return true
},
}
// serveWs handles websocket requests from the peer.
func (h *Hub) serveWs(w http.ResponseWriter, r *http.Request) {
conn, err := upgrader.Upgrade(w, r, nil)
if err != nil {
return
}
client := &Client{
hub: h,
conn: conn,
id: uuid.New().String(),
send: make(chan []byte, 256),
rchan: make(chan wsRoomActionMessage, 100),
rooms: make([]string, maxRoomSize),
logger: logger.Get(),
}
client.rooms = []string{client.id}
client.hub.register <- client
// Allow collection of memory referenced by the caller by doing all work in
// new goroutines.
go client.roomPump()
go client.writePump()
go client.readPump()
client.sendIdentityMsg()
client.welcome()
}
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Hub maintains the set of active clients and broadcasts messages to the
// clients.
type Hub struct {
// Node Id
nodeId string
// Registered clients.
clients map[*Client]bool
// Outbound message for specific client
directMsg chan wsDirectMessage
// Outbound messages
broadcast chan []byte
// Outbound message for a specific room
room chan wsMessageForRoom
// Register requests from the clients.
register chan *Client
// Unregister requests from clients.
unregister chan *Client
// Pubsub channel
pubSubRoomChannel string
pubSubBroadcastChannel string
subscribeRoomChan <-chan *redis.Message
subscribeBroadcastChan <-chan *redis.Message
// Logger
logger *logrus.Logger
}
var hub *Hub
var onceInitHub sync.Once
// getHub return singleton hub
func getHub() *Hub {
onceInitHub.Do(func() {
// pubSubRoomChannel := "chat_app_room_chan"
// pubSubBroadcastChannel := "chat_app_broadcast_chan"
// redisSubscribeRoom := infra.GetRedis().Subscribe(context.Background(), pubSubRoomChannel)
// redisSubscribeBroadcast := infra.GetRedis().Subscribe(context.Background(), pubSubBroadcastChannel)
hub = &Hub{
nodeId: os.Getenv("node_id"),
directMsg: make(chan wsDirectMessage),
broadcast: make(chan []byte),
room: make(chan wsMessageForRoom),
register: make(chan *Client),
unregister: make(chan *Client),
clients: make(map[*Client]bool),
// pubSubRoomChannel: "chat_app_room_chan",
// pubSubBroadcastChannel: "chat_app_broadcast_chan",
// subscribeRoomChan: redisSubscribeRoom.Channel(),
// subscribeBroadcastChan: redisSubscribeBroadcast.Channel(),
logger: logger.Get(),
}
go hub.run()
})
return hub
}
func (h *Hub) sendMsgToRoom(roomId string, message []byte) {
h.room <- wsMessageForRoom{
NodeId: h.nodeId,
RoomId: roomId,
Message: message,
}
}
func (h *Hub) broadcastMsg(msg []byte) {
hub.broadcast <- msg
}
func (h *Hub) doSendMsg(message wsDirectMessage) {
if ok := h.clients[message.c]; ok {
select {
case message.c.send <- message.message:
default:
delete(h.clients, message.c)
go message.c.clean()
}
}
}
func (h *Hub) doBroadcastMsg(message []byte) {
for client := range h.clients {
select {
case client.send <- message:
default:
delete(h.clients, client)
go client.clean()
}
}
}
func (h *Hub) doBroadcastRoomMsg(message wsMessageForRoom) {
for client := range h.clients {
ok := client.exist(message.RoomId)
if ok {
select {
case client.send <- message.Message:
default:
delete(h.clients, client)
go client.clean()
}
}
}
}
func (h *Hub) pushRoomMsgToRedis(message wsMessageForRoom) {
b, _ := json.Marshal(message)
if err := infra.GetRedis().Publish(context.Background(), h.pubSubRoomChannel, b).Err(); err != nil {
h.logger.Error(err)
}
}
func (h *Hub) pushBroadcastMsgToRedis(message []byte) {
msg := wsBroadcastMessage{
NodeId: h.nodeId,
Message: message,
}
b, _ := json.Marshal(msg)
infra.GetRedis().Publish(context.Background(), h.pubSubBroadcastChannel, b)
}
func (h *Hub) processRedisRoomMsg(message *redis.Message) {
m := wsMessageForRoom{}
if err := json.Unmarshal([]byte(message.Payload), &m); err != nil {
h.logger.Error(err)
}
if m.NodeId != h.nodeId {
h.doBroadcastRoomMsg(m)
}
}
func (h *Hub) processRedisBroadcastMsg(message *redis.Message) {
msg := wsBroadcastMessage{}
if err := json.Unmarshal([]byte(message.Payload), &msg); err != nil {
h.logger.Error(err)
}
if msg.NodeId != h.nodeId {
h.doBroadcastMsg(msg.Message)
}
}
func (h *Hub) run() {
for {
select {
// register and deregister client
case client := <-h.register:
h.clients[client] = true
case client := <-h.unregister:
if _, ok := h.clients[client]; ok {
delete(h.clients, client)
go client.clean()
}
// send message to specific client
case message := <-h.directMsg:
h.doSendMsg(message)
// broadcast and push message to redis channel
case message := <-h.broadcast:
// go h.pushBroadcastMsgToRedis(message)
h.doBroadcastMsg(message)
// broadast message for client in this node then push to redis channel
case message := <-h.room:
// go h.pushRoomMsgToRedis(message)
h.doBroadcastRoomMsg(message)
// Two pubsub channel for receiving message from other node
// case message := <-h.subscribeRoomChan:
// h.processRedisRoomMsg(message)
// case message := <-h.subscribeBroadcastChan:
// h.processRedisBroadcastMsg(message)
}
}
}
|
/*
* @lc app=leetcode.cn id=23 lang=golang
*
* [23] 合并K个升序链表
*/
// @lc code=start
/**
* Definition for singly-linked list.
* type ListNode struct {
* Val int
* Next *ListNode
* }
*/
package main
import "fmt"
import "container/heap"
type ListNode struct {
Val int
Next *ListNode
}
type heapList []*ListNode
func (h heapList) Less(i, j int) bool {
return h[i].Val < h[j].Val
}
func (h heapList) Swap(i, j int) {
h[i], h[j] = h[j], h[i]
}
func (h heapList) Len() int {
return len(h)
}
func (h *heapList) Pop() (v interface{}) {
v = (*h)[h.Len()-1]
*h = (*h)[:h.Len()-1]
return v
}
func (h *heapList) Push(v interface{}) {
*h = append(*h, v.(*ListNode))
}
func mergeKLists(lists []*ListNode) *ListNode {
if lists == nil || len(lists) == 0 {
return nil
}
myHeapList := heapList{}
for i := 0 ; i < len(lists) ; i++ {
if lists[i] != nil {
myHeapList = append(myHeapList, lists[i])
}
}
heap.Init(&myHeapList)
root := &ListNode{}
cur := root
for myHeapList.Len() != 0 {
tmp := heap.Pop(&myHeapList).(*ListNode)
// fmt.Printf("small is %d\n", tmp.Val)
cur.Next = tmp
cur = tmp
wait := tmp.Next
if wait != nil {
heap.Push(&myHeapList, wait)
} else {
continue
}
}
return root.Next
}
// @lc code=end
func print(l *ListNode){
for l != nil {
fmt.Printf("%d ", l.Val)
l = l.Next
}
fmt.Println()
}
func main(){
a11 := ListNode{Val: 1}
a12 := ListNode{Val: 4}
a13 := ListNode{Val: 5}
a11.Next = &a12
a12.Next = &a13
print(&a11)
a21 := ListNode{Val: 1}
a22 := ListNode{Val: 3}
a23 := ListNode{Val: 4}
a21.Next = &a22
a22.Next = &a23
print(&a21)
a31 := ListNode{Val: 2}
a32 := ListNode{Val: 6}
a31.Next = &a32
print(&a31)
// res := mergeKLists([]*ListNode{&a11, &a21, &a31})
// print(res)
// res2 := mergeKLists(nil)
// print(res2)
res3 := mergeKLists([]*ListNode{&a11, nil})
print(res3)
}
|
package double_pointer
import (
"fmt"
"testing"
)
func TestRemoveDuplicates(t *testing.T) {
fmt.Println(RemoveDuplicates([]int{0, 0, 1, 1, 1, 2, 3, 4, 4, 5}))
fmt.Println(RemoveDuplicates([]int{0, 1, 1, 1, 1, 1, 5, 5, 11, 23, 23, 23, 44}))
}
func TestDeleteDuplicates(t *testing.T) {
head := &Node{
val: 0,
next: &Node{
val: 1,
next: &Node{
val: 1,
next: &Node{
val: 3,
next: &Node{
val: 4,
next: &Node{
val: 4,
next: nil,
},
},
},
},
},
}
h := DeleteDuplicates(head)
for h != nil {
fmt.Print(h.val, " >> ")
h = h.next
}
fmt.Println("nil")
}
|
package sync
import (
"strings"
"time"
"github.com/devspace-cloud/devspace/cmd"
"github.com/devspace-cloud/devspace/cmd/flags"
"github.com/devspace-cloud/devspace/e2e/utils"
"github.com/devspace-cloud/devspace/pkg/util/log"
"github.com/pkg/errors"
)
func runDownloadOnly(f *customFactory, logger log.Logger) error {
logger.Info("Run sub test 'download-only' of 'sync' test")
logger.StartWait("Run test...")
defer logger.StopWait()
sc := &cmd.SyncCmd{
GlobalFlags: &flags.GlobalFlags{
Namespace: f.Namespace,
NoWarn: true,
Silent: true,
},
LocalPath: "./../foo",
ContainerPath: "/home",
NoWatch: true,
DownloadOnly: true,
DownloadOnInitialSync: true,
}
ec := &cmd.EnterCmd{
GlobalFlags: &flags.GlobalFlags{
Namespace: f.Namespace,
NoWarn: true,
Silent: true,
},
Container: "container-0",
}
err := sc.Run(f, nil, nil)
defer close(f.interrupt)
if err != nil {
return err
}
time.Sleep(time.Second * 5)
check0 := "bar.go"
check1 := "/first/abc.txt"
check2 := "/second"
check3 := "/second/abc.txt"
// Below checks if bar.go was NOT uploaded to remote
done := utils.Capture()
err = ec.Run(f, nil, []string{"ls", "home"})
if err != nil {
return err
}
capturedOutput, err := done()
if err != nil {
return err
}
capturedOutput = strings.TrimSpace(capturedOutput)
if strings.Index(capturedOutput, check0) != -1 {
return errors.Errorf("file '%s' should not have been uploaded to remote", check0)
}
// Check if /alpha/abc.txt was created locally
err = utils.IsFileOrFolderExist(f.DirPath + "/foo" + check1)
if err != nil {
return err
}
err = ec.Run(f, nil, []string{"mkdir", "home/second"})
if err != nil {
return err
}
time.Sleep(time.Second * 5)
// Check if /second was created locally
err = utils.IsFileOrFolderExist(f.DirPath + "/foo" + check2)
if err != nil {
return err
}
err = ec.Run(f, nil, []string{"touch", "home/second/abc.txt"})
if err != nil {
return err
}
time.Sleep(time.Second * 5)
// Check if /second/abc.txt was created locally
err = utils.IsFileOrFolderExist(f.DirPath + "/foo" + check3)
if err != nil {
return err
}
return nil
}
|
package fin
import (
"path"
"path/filepath"
"strings"
"github.com/valyala/fasthttp"
)
type IRouter interface {
Use(middleware ...HandlerFunc)
Handle(relativePath string, method string, handlers ...HandlerFunc)
ANY(relativePath string, handlers ...HandlerFunc)
GET(relativePath string, handlers ...HandlerFunc)
POST(relativePath string, handlers ...HandlerFunc)
DELETE(relativePath string, handlers ...HandlerFunc)
PATCH(relativePath string, handlers ...HandlerFunc)
PUT(relativePath string, handlers ...HandlerFunc)
OPTIONS(relativePath string, handlers ...HandlerFunc)
HEAD(relativePath string, handlers ...HandlerFunc)
Static(relativePath string, root string)
}
type Router struct {
path string
engine *Engine
middlewares []HandlerFunc
}
// 检查Router是否实现了IRouter接口
var _ IRouter = &Router{}
// handle 注册一个新的路由函数
func (r *Router) handle(relativePath string, method string, h ...HandlerFunc) {
// 计算路由的绝对路径
absPath := r.path + relativePath
// 组合路由的中间件到handlers
handlers := make([]HandlerFunc, len(r.middlewares)+len(h))
copy(handlers, r.middlewares)
copy(handlers[len(r.middlewares):], h)
// 注册路由
r.engine.addRoute(absPath, method, handlers...)
}
func (r *Router) Use(middlewares ...HandlerFunc) {
r.middlewares = append(r.middlewares, middlewares...)
}
// Handle 注册一个新的路由函数
func (r *Router) Handle(relativePath string, method string, h ...HandlerFunc) {
r.handle(relativePath, method, h...)
}
func (r *Router) GET(relativePath string, h ...HandlerFunc) {
r.Handle(relativePath, "GET", h...)
}
func (r *Router) POST(relativePath string, h ...HandlerFunc) {
r.Handle(relativePath, "POST", h...)
}
func (r *Router) DELETE(relativePath string, h ...HandlerFunc) {
r.Handle(relativePath, "DELETE", h...)
}
func (r *Router) PUT(relativePath string, h ...HandlerFunc) {
r.Handle(relativePath, "PUT", h...)
}
func (r *Router) PATCH(relativePath string, h ...HandlerFunc) {
r.Handle(relativePath, "PATCH", h...)
}
func (r *Router) HEAD(relativePath string, h ...HandlerFunc) {
r.Handle(relativePath, "HEAD", h...)
}
func (r *Router) OPTIONS(relativePath string, h ...HandlerFunc) {
r.Handle(relativePath, "OPTIONS", h...)
}
func (r *Router) ANY(relativePath string, h ...HandlerFunc) {
r.Handle(relativePath, "GET", h...)
r.Handle(relativePath, "POST", h...)
r.Handle(relativePath, "DELETE", h...)
r.Handle(relativePath, "PUT", h...)
r.Handle(relativePath, "PATCH", h...)
r.Handle(relativePath, "HEAD", h...)
r.Handle(relativePath, "OPTIONS", h...)
r.Handle(relativePath, "CONNECT", h...)
r.Handle(relativePath, "TRACE", h...)
}
// Group新建一个路由分组
func (r *Router) Group(relativePath string, handlers ...HandlerFunc) *Router {
// 计算路由的绝对路径
absPath := r.path + relativePath
// 复制当前路由的中间件到下一级
middleware := make([]HandlerFunc, len(r.middlewares)+len(handlers))
copy(middleware[0:len(r.middlewares)], r.middlewares)
copy(middleware[len(r.middlewares):], handlers)
router := &Router{
path: absPath,
engine: r.engine,
middlewares: middleware,
}
return router
}
func (r *Router) StaticFile(relativePath string, filepath string) {
if strings.Contains(relativePath, ":") || strings.Contains(relativePath, "*") {
panic("URL parameters can not be used when serving a static file")
}
handler := func(c *Context) {
c.SendFile(filepath)
}
r.GET(relativePath, handler)
r.HEAD(relativePath, handler)
}
func (r *Router) Static(relativePath string, dir string) {
if strings.Contains(relativePath, ":") || strings.Contains(relativePath, "*") {
panic("URL parameters can not be used when serving a static folder")
}
fs := &fasthttp.FS{
Root: dir,
AcceptByteRange: true,
Compress: false,
}
handler := r.createStaticHandler(fs)
urlPattern := path.Join(relativePath, "/*filepath")
r.GET(urlPattern, handler)
r.HEAD(urlPattern, handler)
}
func (r *Router) createStaticHandler(fs *fasthttp.FS) HandlerFunc {
h := fs.NewRequestHandler()
return func(c *Context) {
file := c.Param("filepath")
if len(file) == 0 || file[0] != '/' {
var err error
if file, err = filepath.Abs(file); err != nil {
c.engine.HandleNotFound(c)
return
}
}
before := string(c.Path())
c.Request.SetRequestURI(file)
h(c.RequestCtx)
c.Request.SetRequestURI(before)
}
}
|
// Package xs contains eXtended actions (xactions) except storage services
// (mirror, ec) and extensions (downloader, lru).
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
*/
package xs
import (
"github.com/NVIDIA/aistore/3rdparty/glog"
"github.com/NVIDIA/aistore/cluster"
"github.com/NVIDIA/aistore/cmn"
"github.com/NVIDIA/aistore/fs"
"github.com/NVIDIA/aistore/fs/mpather"
"github.com/NVIDIA/aistore/xaction"
"github.com/NVIDIA/aistore/xaction/xreg"
)
type (
llcFactory struct {
t cluster.Target
xact *xactLLC
uuid string
}
xactLLC struct {
xaction.XactBckJog
}
)
// interface guard
var (
_ cluster.Xact = (*xactLLC)(nil)
_ xreg.BckFactory = (*llcFactory)(nil)
)
////////////////
// llcFactory //
////////////////
func (*llcFactory) New(args xreg.Args) xreg.BucketEntry {
return &llcFactory{t: args.T, uuid: args.UUID}
}
func (p *llcFactory) Start(bck cmn.Bck) error {
xact := newXactLLC(p.t, p.uuid, bck)
p.xact = xact
go xact.Run()
return nil
}
func (*llcFactory) Kind() string { return cmn.ActLoadLomCache }
func (p *llcFactory) Get() cluster.Xact { return p.xact }
// overriding xreg.BaseBckEntry because it would return `false, nil`.
func (*llcFactory) PreRenewHook(_ xreg.BucketEntry) (bool, error) { return true, nil }
func (*llcFactory) PostRenewHook(_ xreg.BucketEntry) {}
/////////////
// xactLLC //
/////////////
func newXactLLC(t cluster.Target, uuid string, bck cmn.Bck) *xactLLC {
return &xactLLC{
XactBckJog: *xaction.NewXactBckJog(uuid, cmn.ActLoadLomCache, bck, &mpather.JoggerGroupOpts{
T: t,
Bck: bck,
CTs: []string{fs.ObjectType},
VisitObj: func(_ *cluster.LOM, _ []byte) error { return nil },
DoLoad: mpather.Load,
}),
}
}
func (r *xactLLC) Run() {
r.XactBckJog.Run()
glog.Infoln(r.String())
err := r.XactBckJog.Wait()
r.Finish(err)
}
|
package routes
import "net/http"
func HomePageHandler(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
p := "./statics/indexpage.html"
http.ServeFile(w, r, p)
}
func SignUpPageHandler(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
p := "./statics/signuppage.html"
http.ServeFile(w, r, p)
}
func LoginPageHandler(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
p := "./statics/loginpage.html"
http.ServeFile(w, r, p)
}
func AccountPageHandler(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
p := "./statics/accountpage.html"
http.ServeFile(w, r, p)
}
func ProfilePageHandler(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
p := "./statics/profilepage.html"
http.ServeFile(w, r, p)
}
func LoanPageHandler(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
p := "./statics/loanpage.html"
http.ServeFile(w, r, p)
}
|
package telegram
import (
"fmt"
"net/http"
"testing"
"time"
"github.com/metalmatze/alertmanager-bot/pkg/telegram"
"gopkg.in/tucnak/telebot.v2"
)
var statusWorkflows = []workflow{{
name: "Status",
messages: []telebot.Update{{
Message: &telebot.Message{
Sender: admin,
Chat: chatFromUser(admin),
Text: telegram.CommandStatus,
},
}},
replies: []reply{{
recipient: "123",
message: "*AlertManager*\nVersion: alertmanager\nUptime: 1 minute\n*AlertManager Bot*\nVersion: bot\nUptime: 1 minute",
}},
counter: map[string]uint{telegram.CommandStatus: 1},
logs: []string{
"level=debug msg=\"message received\" text=/status",
},
alertmanagerStatus: func(t *testing.T, r *http.Request) string {
return fmt.Sprintf(
`{"uptime":%q,"versionInfo":{"version":"alertmanager"}}`,
time.Now().Add(-time.Minute).Format(time.RFC3339),
)
},
}}
|
package handler
import (
"encoding/json"
"golang-api/model"
"net/http"
"net/http/httptest"
"testing"
)
func TestProcessStats(t *testing.T) {
req, err := http.NewRequest("GET", "/stats", nil)
if err != nil {
t.Fatal(err)
}
model.Stats = model.HashRequestStats{1, 5001,5001}
r := httptest.NewRecorder()
handler := http.HandlerFunc(ProcessStats)
handler.ServeHTTP(r, req)
if status := r.Code; status != http.StatusOK {
t.Errorf("Handler unexpectedly returned status code %v instead of %v", status, http.StatusOK)
}
expected, err := json.Marshal(model.Stats)
if r.Body.String() != string(expected) {
t.Errorf("Handler unexpectedly returned body %v instead of %v", r.Body.String(), string(expected))
}
}
func TestProcessStatsUnsupportedMethod(t *testing.T) {
req, err := http.NewRequest("HEAD", "/stats", nil)
if err != nil {
t.Fatal(err)
}
r := httptest.NewRecorder()
handler := http.HandlerFunc(ProcessStats)
handler.ServeHTTP(r, req)
if status := r.Code; status != http.StatusBadRequest {
t.Errorf("Handler unexpectedly returned status code %v instead of %v", status, http.StatusBadRequest)
}
}
|
package main
import "fmt"
func main() {
name := "Fah"
var age int
age = 19
fmt.Println(name, age)
}
|
package main
import (
"flag"
"fmt"
)
// MyStr 存放传进的参数与长度
type MyStr struct {
v string // 存放值
l int // 存放长度
}
func (m *MyStr) String() string {
return string(m.v)
}
// Set 赋值操作
func (m *MyStr) Set(value string) error {
*m = MyStr{v: value, l: len(value)}
return nil
}
// MyStrVar 解析参数
func MyStrVar(m *MyStr, name string, val string, usage string) {
flag.CommandLine.Var(m, name, usage)
}
func main() {
var name MyStr
MyStrVar(&name, "name", "***", "your name")
flag.Parse()
fmt.Printf("%T\n", name)
fmt.Println(name)
}
|
package snow
import (
"fmt"
"sort"
"strconv"
"strings"
"github.com/HuiOnePos/flysnow/models"
"github.com/HuiOnePos/flysnow/utils"
"github.com/sirupsen/logrus"
"gopkg.in/mgo.v2/bson"
)
type StatReq struct {
Term string
Index bson.M
DataQuery bson.M
STime int64
ETime int64
Span int64
SpanD string
Group []string
Sort []interface{}
Limit, Skip int
IsSort, IsGroup, IsSpan bool
}
func (s *StatReq) GroupKeyRedis(key string, dm map[string]interface{}) {
id := ""
tm := map[string]string{}
tl := strings.Split(key, "_")
for i, v := range tl {
if v == "" {
continue
}
if v[:1] == "@" {
tm[v[1:]] = tl[i+1]
}
}
if s.IsGroup {
for _, i := range s.Group {
id += tm[i]
}
}
dm["@groupkey"] = id
dm["@index"] = tm
return
}
func (s *StatReq) GroupKeyMgo(index map[string]interface{}) (id string) {
// group 只能是index中的key
if s.IsGroup {
for _, k := range s.Group {
if v, ok := index[k]; ok {
id += strings.Replace(v.(string), "_", ".", -1)
}
}
}
return
}
func (s *StatReq) GSKey(d map[string]interface{}) (skip bool, id string) {
// 计算每条数据的分组key
id = d["@groupkey"].(string)
if s.IsSpan {
t := utils.TInt64(d["e_time"]) - 1
e_time := utils.DurationMap[s.SpanD](t, s.Span)
s_time := utils.DurationMap[s.SpanD+"l"](e_time, s.Span)
if utils.TInt64(d["e_time"]) <= e_time && s_time <= utils.TInt64(d["s_time"]) {
d["s_time"], d["e_time"] = s_time, e_time
id += fmt.Sprintf("%d%d", s_time, e_time)
} else {
// 时间条件不满足的,跳过
skip = true
}
}
return
}
func Stat(d []byte, tag string) (error, interface{}) {
req := StatReq{}
err := utils.JsonDecode(d, &req)
if err != nil {
return err, nil
}
if len(req.Group) != 0 {
req.IsGroup = true
}
if req.Span != 0 {
req.IsSpan = true
}
if req.ETime == 0 {
req.ETime = utils.DurationMap["d"](utils.GetNowSec(), 1)
}
mgos := utils.MgoSessionDupl()
defer mgos.Close()
mc := mgos.DB(utils.MongoPrefix + tag)
query := bson.M{}
if len(req.DataQuery) > 0 {
query = req.DataQuery
}
if len(req.Index) > 0 {
for k, v := range req.Index {
query["index."+k] = v
}
}
query["s_time"] = bson.M{"$gte": req.STime}
query["e_time"] = bson.M{"$lte": req.ETime}
// 获取数据
rdsconn := utils.NewRedisConn()
defer rdsconn.Close()
tl := []map[string]interface{}{}
var keys interface{}
termConfig := models.TermConfigMap[tag][req.Term]
for _, tmpkey := range utils.GetRdsKeyByIndex(req.Index, termConfig.Key) {
if tmpkey.Re {
rdsk := utils.RDSPrefix + "_" + tag + "_" + tmpkey.Key
// get from redis
keys, err = rdsconn.Dos("KEYS", rdsk)
if err != nil {
continue
}
} else {
keys = []interface{}{[]byte(utils.RDSPrefix + "_" + tag + "_" + tmpkey.Key)}
}
for _, k := range keys.([]interface{}) {
tk := string(k.([]byte))
rdsd, err := rdsconn.Dos("HGETALL", tk)
tb := rdsd.([]interface{})
if err == nil && len(tb) != 0 {
dm := map[string]interface{}{}
for i := 0; i < len(tb); i = i + 2 {
dm[string(tb[i].([]uint8))], _ = strconv.ParseFloat(string(tb[i+1].([]uint8)), 64)
}
if utils.TInt64(dm["s_time"]) >= req.STime && (utils.TInt64(dm["e_time"]) <= req.ETime || req.ETime == 0) {
req.GroupKeyRedis(tk, dm)
tl = append(tl, dm)
}
}
}
}
// redis end
// mgo start
mgoList := []map[string]interface{}{}
groupMap := map[string]string{}
objs := []map[string]interface{}{}
mc.C(utils.MongoOBJ + req.Term).Find(query).All(&objs)
if len(objs) > 0 {
var tmpKey string
var tmpIndex map[string]interface{}
for _, v := range objs {
tmpKey = v["key"].(string)
tmpIndex = v["index"].(map[string]interface{})
if groupkey, ok := groupMap[tmpKey]; ok {
v["@groupkey"] = groupkey
} else {
groupMap[tmpKey] = req.GroupKeyMgo(tmpIndex)
v["@groupkey"] = groupMap[tmpKey]
}
v["@index"] = tmpIndex
mgoList = append(mgoList, v)
}
}
tl = append(mgoList, tl...)
// mongo end
// group and span
groupdata := map[string]int{}
data := []map[string]interface{}{}
for _, l := range tl {
skip, gsk := req.GSKey(l)
l["@groupkey"] = gsk
if skip {
// 时间不满足,跳过
continue
}
if v, ok := groupdata[gsk]; ok {
// 相同分组的累加到一起
rotateObj(l, data[v], termConfig.SpKey)
} else {
// 新的一组
data = append(data, l)
groupdata[gsk] = len(data) - 1
}
}
sortdata := []interface{}{}
total := map[string]interface{}{}
for _, v := range data {
// 计算总数
rotateObj(v, total, termConfig.SpKey)
// 处理单项特殊key并加入排序集合
sortdata = append(sortdata, spkeystat(v, termConfig.SpKey))
}
// spkey,处理合计的特殊key
total = spkeystat(total, termConfig.SpKey)
// 按照时间skip的,补充无时间统计的数据
if req.IsSpan && (req.STime != 0 || len(sortdata) > 0) {
emptyIndex := map[string]string{}
for _, k := range termConfig.Key {
emptyIndex[k[1:]] = ""
}
keymaps := map[int64][]interface{}{}
var dataTime int64
for _, data := range sortdata {
dataTime = utils.TInt64(data.(map[string]interface{})["s_time"])
if _, ok := keymaps[dataTime]; !ok {
keymaps[dataTime] = []interface{}{data}
} else {
keymaps[dataTime] = append(keymaps[dataTime], data)
}
}
nums := len(sortdata)
sortdata = []interface{}{}
var stime, etime int64
etime = req.ETime
for {
etime = utils.DurationMap[req.SpanD](etime-1, req.Span)
stime = utils.DurationMap[req.SpanD+"l"](etime, req.Span)
if etime <= req.STime {
break
}
if req.STime == 0 && nums == 0 {
logrus.Infoln("1")
break
}
if v, ok := keymaps[stime]; ok {
sortdata = append(sortdata, v...)
nums--
} else {
sortdata = append(sortdata, map[string]interface{}{"s_time": stime, "e_time": etime, "@index": emptyIndex})
}
etime = stime
}
}
// sort
if len(req.Sort) == 2 {
sortdata = SortMapList(sortdata, req.Sort[0], req.Sort[1].(bool))
}
lens := len(sortdata)
// limit
lm := req.Limit + req.Skip
if lm != 0 {
start, end := 0, 0
if lm >= lens {
end = lens
} else {
end = lm
}
if req.Skip <= lens {
start = req.Skip
} else {
start = end
}
sortdata = sortdata[start:end]
}
return nil, map[string]interface{}{"num": lens, "list": sortdata, "total": total}
}
func SortMapList(source []interface{}, name interface{}, asc bool) []interface{} {
s := &SortMapLister{
source,
func(a, b interface{}) bool {
a1, b1 := a.(map[string]interface{}), b.(map[string]interface{})
va, vb := a1[name.(string)], b1[name.(string)]
if va == nil {
return true
} else if vb == nil {
return false
} else {
switch va.(type) {
case int, int64, float32, float64:
return utils.TFloat64(va) < utils.TFloat64(vb)
case string:
return va.(string) < vb.(string)
default:
return false
}
}
},
}
if asc {
sort.Sort(s)
} else {
sort.Sort(sort.Reverse(s))
}
return s.List
}
type SortMapLister struct {
List []interface{}
FrontFunc func(a, b interface{}) bool
}
func (s SortMapLister) Len() int {
return len(s.List)
}
func (s SortMapLister) Swap(i, j int) {
s.List[i], s.List[j] = s.List[j], s.List[i]
}
func (s SortMapLister) Less(i, j int) bool {
return s.FrontFunc(s.List[i], s.List[j])
}
func spkeystat(data map[string]interface{}, spkey map[string]string) map[string]interface{} {
for tk, _ := range data {
if tk != "s_time" && tk != "e_time" && tk[:1] != "@" {
if tpe, ok := spkey[tk]; ok {
utils.StatSpKeyFuncs(tpe, tk, data)
}
}
}
return data
}
|
package kubernetes
import (
"fmt"
"regexp"
"testing"
"github.com/coredns/coredns/plugin/test"
"github.com/miekg/dns"
)
func TestKubernetesEndpointPodNames(t *testing.T) {
var tests = []struct {
test.Case
TargetRegEx string
AnswerCount, ExtraCount int
}{
{
Case: test.Case{Qname: "headless-1.test-3.svc.cluster.local.", Qtype: dns.TypeSRV, Rcode: dns.RcodeSuccess},
AnswerCount: 1,
ExtraCount: 1,
TargetRegEx: "^test-name\\.headless-1\\.test-3\\.svc\\.cluster\\.local\\.$",
},
{
Case: test.Case{Qname: "headless-2.test-3.svc.cluster.local.", Qtype: dns.TypeSRV, Rcode: dns.RcodeSuccess},
AnswerCount: 1,
ExtraCount: 1,
// The pod name selected by headless-2 exceeds the valid dns label length, so it should fallback to the dashed-ip
TargetRegEx: "^[0-9]+-[0-9]+-[0-9]+-[0-9]+\\.headless-2\\.test-3\\.svc\\.cluster\\.local\\.$",
},
}
// namespace test-3 contains headless services/deployments for this test.
// enable endpoint_pod_names in Corefile
corefile := ` .:53 {
health
ready
errors
log
kubernetes cluster.local 10.in-addr.arpa {
namespaces test-3
endpoint_pod_names
}
}
`
err := LoadCorefile(corefile)
if err != nil {
t.Fatalf("Could not load corefile: %s", err)
}
namespace := "test-1"
err = StartClientPod(namespace)
if err != nil {
t.Fatalf("failed to start client pod: %s", err)
}
for _, expected := range tests {
t.Run(fmt.Sprintf("%s %s", expected.Qname, dns.TypeToString[expected.Qtype]), func(t *testing.T) {
result, err := DoIntegrationTest(expected.Case, namespace)
if err != nil {
t.Errorf(err.Error())
}
if len(result.Answer) != expected.AnswerCount {
t.Errorf("Expected %v answers, got %v.", expected.AnswerCount, len(result.Answer))
}
if len(result.Extra) != expected.ExtraCount {
t.Errorf("Expected %v additionals, got %v.", expected.ExtraCount, len(result.Extra))
}
if len(result.Answer) > 0 {
match, err := regexp.Match(expected.TargetRegEx, []byte(result.Answer[0].(*dns.SRV).Target))
if err != nil {
t.Error(err)
}
if !match {
t.Errorf("Answer target %q did not match regex %q", result.Answer[0].(*dns.SRV).Target, expected.TargetRegEx)
}
}
if len(result.Extra) > 0 {
match, err := regexp.Match(expected.TargetRegEx, []byte(result.Extra[0].Header().Name))
if err != nil {
t.Error(err)
}
if !match {
t.Errorf("Extra name %q did not match regex %q", result.Extra[0].Header().Name, expected.TargetRegEx)
}
}
if t.Failed() {
t.Errorf("coredns log: %s", CorednsLogs())
}
})
}
}
|
package suites
import (
"crypto/tls"
"fmt"
"io"
"net/http"
"strings"
"testing"
"github.com/stretchr/testify/suite"
"github.com/valyala/fasthttp"
)
func NewRequestMethodScenario() *RequestMethodScenario {
return &RequestMethodScenario{}
}
type RequestMethodScenario struct {
suite.Suite
client *http.Client
}
func (s *RequestMethodScenario) SetupSuite() {
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, //nolint:gosec // Needs to be enabled in suites. Not used in production.
}
s.client = &http.Client{
Transport: tr,
CheckRedirect: func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
},
}
}
func (s *RequestMethodScenario) TestShouldRespondWithAppropriateMethodNotAllowedHeaders() {
testCases := []struct {
name string
method string
uri string
expected []string
}{
{"RootPathShouldShowAllowedMethodsOnInvalidRequest", fasthttp.MethodPost, AutheliaBaseURL, []string{fasthttp.MethodGet, fasthttp.MethodHead, fasthttp.MethodOptions}},
{"OpenAPISpecificationShouldShowAllowedMethodsOnInvalidRequest", fasthttp.MethodPost, fmt.Sprintf("%s/api/openapi.yml", AutheliaBaseURL), []string{fasthttp.MethodGet, fasthttp.MethodHead, fasthttp.MethodOptions}},
{"LocalesShouldShowAllowedMethodsOnInvalidRequest", fasthttp.MethodPost, fmt.Sprintf("%s/locales/en/portal.json", AutheliaBaseURL), []string{fasthttp.MethodGet, fasthttp.MethodHead, fasthttp.MethodOptions}},
}
for _, tc := range testCases {
s.Run(tc.name, func() {
req, err := http.NewRequest(tc.method, tc.uri, nil)
s.Assert().NoError(err)
res, err := s.client.Do(req)
s.Assert().NoError(err)
s.Assert().Equal(fasthttp.StatusMethodNotAllowed, res.StatusCode)
s.Assert().Equal(strings.Join(tc.expected, ", "), res.Header.Get(fasthttp.HeaderAllow))
})
}
}
func (s *RequestMethodScenario) TestShouldRespondWithAppropriateResponseWithMethodHEAD() {
testCases := []struct {
name string
uri string
expectedStatus int
expectedContentLength bool
}{
{"RootPathShouldShowContentLengthAndRespondOK", AutheliaBaseURL, fasthttp.StatusOK, true},
{"OpenAPISpecShouldShowContentLengthAndRespondOK", fmt.Sprintf("%s/api/openapi.yml", AutheliaBaseURL), fasthttp.StatusOK, true},
{"LocalesShouldShowContentLengthAndRespondOK", fmt.Sprintf("%s/locales/en/portal.json", AutheliaBaseURL), fasthttp.StatusOK, true},
}
for _, tc := range testCases {
s.Run(tc.name, func() {
req, err := http.NewRequest(fasthttp.MethodHead, tc.uri, nil)
s.Assert().NoError(err)
res, err := s.client.Do(req)
s.Assert().NoError(err)
s.Assert().Equal(tc.expectedStatus, res.StatusCode)
if tc.expectedContentLength {
s.Assert().NotEqual(0, res.ContentLength)
} else {
s.Assert().Equal(0, res.ContentLength)
}
data, err := io.ReadAll(res.Body)
s.Assert().NoError(err)
s.Assert().Len(data, 0)
})
}
}
func TestRunRequestMethod(t *testing.T) {
if testing.Short() {
t.Skip("skipping suite test in short mode")
}
suite.Run(t, NewRequestMethodScenario())
}
|
//
// Copyright (C) 2019-2021 vdaas.org vald team <vald@vdaas.org>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Package config providers configuration type and load configuration logic
package config
import (
"github.com/vdaas/vald/internal/db/kvs/redis"
"github.com/vdaas/vald/internal/net/tcp"
"github.com/vdaas/vald/internal/tls"
)
type Redis struct {
Addrs []string `json:"addrs" yaml:"addrs"`
DB int `json:"db" yaml:"db"`
DialTimeout string `json:"dial_timeout" yaml:"dial_timeout"`
IdleCheckFrequency string `json:"idle_check_frequency" yaml:"idle_check_frequency"`
IdleTimeout string `json:"idle_timeout" yaml:"idle_timeout"`
InitialPingTimeLimit string `json:"initial_ping_time_limit" yaml:"initial_ping_time_limit"`
InitialPingDuration string `json:"initial_ping_duration" yaml:"initial_ping_duration"`
KeyPref string `json:"key_pref" yaml:"key_pref"`
MaxConnAge string `json:"max_conn_age" yaml:"max_conn_age"`
MaxRedirects int `json:"max_redirects" yaml:"max_redirects"`
MaxRetries int `json:"max_retries" yaml:"max_retries"`
MaxRetryBackoff string `json:"max_retry_backoff" yaml:"max_retry_backoff"`
MinIdleConns int `json:"min_idle_conns" yaml:"min_idle_conns"`
MinRetryBackoff string `json:"min_retry_backoff" yaml:"min_retry_backoff"`
Password string `json:"password" yaml:"password"`
PoolSize int `json:"pool_size" yaml:"pool_size"`
PoolTimeout string `json:"pool_timeout" yaml:"pool_timeout"`
ReadOnly bool `json:"read_only" yaml:"read_only"`
ReadTimeout string `json:"read_timeout" yaml:"read_timeout"`
RouteByLatency bool `json:"route_by_latency" yaml:"route_by_latency"`
RouteRandomly bool `json:"route_randomly" yaml:"route_randomly"`
TLS *TLS `json:"tls" yaml:"tls"`
TCP *TCP `json:"tcp" yaml:"tcp"`
WriteTimeout string `json:"write_timeout" yaml:"write_timeout"`
KVPrefix string `json:"kv_prefix" yaml:"kv_prefix"`
VKPrefix string `json:"vk_prefix" yaml:"vk_prefix"`
PrefixDelimiter string `json:"prefix_delimiter" yaml:"prefix_delimiter"`
}
func (r *Redis) Bind() *Redis {
if r.TLS != nil {
r.TLS.Bind()
} else {
r.TLS = new(TLS)
}
if r.TCP != nil {
r.TCP.Bind()
} else {
r.TCP = new(TCP)
}
r.Addrs = GetActualValues(r.Addrs)
r.DialTimeout = GetActualValue(r.DialTimeout)
r.DialTimeout = GetActualValue(r.DialTimeout)
r.IdleCheckFrequency = GetActualValue(r.IdleCheckFrequency)
r.IdleTimeout = GetActualValue(r.IdleTimeout)
r.KeyPref = GetActualValue(r.KeyPref)
r.MaxConnAge = GetActualValue(r.MaxConnAge)
r.MaxRetryBackoff = GetActualValue(r.MaxRetryBackoff)
r.MinRetryBackoff = GetActualValue(r.MinRetryBackoff)
r.Password = GetActualValue(r.Password)
r.PoolTimeout = GetActualValue(r.PoolTimeout)
r.ReadTimeout = GetActualValue(r.ReadTimeout)
r.WriteTimeout = GetActualValue(r.WriteTimeout)
r.KVPrefix = GetActualValue(r.KVPrefix)
r.VKPrefix = GetActualValue(r.VKPrefix)
r.PrefixDelimiter = GetActualValue(r.PrefixDelimiter)
r.InitialPingTimeLimit = GetActualValue(r.InitialPingTimeLimit)
r.InitialPingDuration = GetActualValue(r.InitialPingDuration)
return r
}
func (r *Redis) Opts() (opts []redis.Option, err error) {
opts = []redis.Option{
redis.WithAddrs(r.Addrs...),
redis.WithDialTimeout(r.DialTimeout),
redis.WithIdleCheckFrequency(r.IdleCheckFrequency),
redis.WithIdleTimeout(r.IdleTimeout),
redis.WithKeyPrefix(r.KeyPref),
redis.WithMaximumConnectionAge(r.MaxConnAge),
redis.WithRetryLimit(r.MaxRetries),
redis.WithMaximumRetryBackoff(r.MaxRetryBackoff),
redis.WithMinimumIdleConnection(r.MinIdleConns),
redis.WithMinimumRetryBackoff(r.MinRetryBackoff),
redis.WithOnConnectFunction(func(conn *redis.Conn) error {
return nil
}),
// redis.WithOnNewNodeFunction(f func(*redis.Client)) ,
redis.WithPassword(r.Password),
redis.WithPoolSize(r.PoolSize),
redis.WithPoolTimeout(r.PoolTimeout),
// redis.WithReadOnlyFlag(readOnly bool) ,
redis.WithReadTimeout(r.ReadTimeout),
redis.WithRouteByLatencyFlag(r.RouteByLatency),
redis.WithRouteRandomlyFlag(r.RouteRandomly),
redis.WithWriteTimeout(r.WriteTimeout),
redis.WithInitialPingDuration(r.InitialPingDuration),
redis.WithInitialPingTimeLimit(r.InitialPingTimeLimit),
}
if r.TLS != nil && r.TLS.Enabled {
tls, err := tls.New(r.TLS.Opts()...)
if err != nil {
return nil, err
}
opts = append(opts, redis.WithTLSConfig(tls))
}
if r.TCP != nil {
dialer, err := tcp.NewDialer(r.TCP.Opts()...)
if err != nil {
return nil, err
}
opts = append(opts, redis.WithDialer(dialer))
}
if len(r.Addrs) > 1 {
opts = append(opts,
redis.WithRedirectLimit(r.MaxRedirects),
)
} else {
opts = append(opts,
redis.WithDB(r.DB),
)
}
return opts, nil
}
|
package tar
import (
"github.com/root-gg/utils"
)
// BackendConfig object
type BackendConfig struct {
Tar string
Compress string
Options string
}
// NewTarBackendConfig instantiate a new Backend Configuration
// from config map passed as argument
func NewTarBackendConfig(config map[string]interface{}) (tb *BackendConfig) {
tb = new(BackendConfig)
tb.Tar = "/bin/tar"
tb.Compress = "gzip"
utils.Assign(tb, config)
return
}
|
package git
import (
"fmt"
"git-get/pkg/git/test"
"os"
"path/filepath"
"reflect"
"testing"
"github.com/stretchr/testify/assert"
)
func TestUncommitted(t *testing.T) {
tests := []struct {
name string
repoMaker func(*testing.T) *test.Repo
want int
}{
{
name: "empty",
repoMaker: test.RepoEmpty,
want: 0,
},
{
name: "single untracked",
repoMaker: test.RepoWithUntracked,
want: 0,
},
{
name: "single tracked",
repoMaker: test.RepoWithStaged,
want: 1,
},
{
name: "committed",
repoMaker: test.RepoWithCommit,
want: 0,
},
{
name: "untracked and uncommitted",
repoMaker: test.RepoWithUncommittedAndUntracked,
want: 1,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
r, _ := Open(test.repoMaker(t).Path())
got, err := r.Uncommitted()
if err != nil {
t.Errorf("got error %q", err)
}
if got != test.want {
t.Errorf("expected %d; got %d", test.want, got)
}
})
}
}
func TestUntracked(t *testing.T) {
tests := []struct {
name string
repoMaker func(*testing.T) *test.Repo
want int
}{
{
name: "empty",
repoMaker: test.RepoEmpty,
want: 0,
},
{
name: "single untracked",
repoMaker: test.RepoWithUntracked,
want: 0,
},
{
name: "single tracked ",
repoMaker: test.RepoWithStaged,
want: 1,
},
{
name: "committed",
repoMaker: test.RepoWithCommit,
want: 0,
},
{
name: "untracked and uncommitted",
repoMaker: test.RepoWithUncommittedAndUntracked,
want: 1,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
r, _ := Open(test.repoMaker(t).Path())
got, err := r.Uncommitted()
if err != nil {
t.Errorf("got error %q", err)
}
if got != test.want {
t.Errorf("expected %d; got %d", test.want, got)
}
})
}
}
func TestCurrentBranch(t *testing.T) {
tests := []struct {
name string
repoMaker func(*testing.T) *test.Repo
want string
}{
// TODO: maybe add wantErr to check if error is returned correctly?
// {
// name: "empty",
// repoMaker: newTestRepo,
// want: "",
// },
{
name: "only master branch",
repoMaker: test.RepoWithCommit,
want: master,
},
{
name: "checked out new branch",
repoMaker: test.RepoWithBranch,
want: "feature/branch",
},
{
name: "checked out new tag",
repoMaker: test.RepoWithTag,
want: head,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
r, _ := Open(test.repoMaker(t).Path())
got, err := r.CurrentBranch()
if err != nil {
t.Errorf("got error %q", err)
}
if got != test.want {
t.Errorf("expected %q; got %q", test.want, got)
}
})
}
}
func TestBranches(t *testing.T) {
tests := []struct {
name string
repoMaker func(*testing.T) *test.Repo
want []string
}{
{
name: "empty",
repoMaker: test.RepoEmpty,
want: []string{""},
},
{
name: "only master branch",
repoMaker: test.RepoWithCommit,
want: []string{"master"},
},
{
name: "new branch",
repoMaker: test.RepoWithBranch,
want: []string{"feature/branch", "master"},
},
{
name: "checked out new tag",
repoMaker: test.RepoWithTag,
want: []string{"master"},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
r, _ := Open(test.repoMaker(t).Path())
got, err := r.Branches()
if err != nil {
t.Errorf("got error %q", err)
}
if !reflect.DeepEqual(got, test.want) {
t.Errorf("expected %+v; got %+v", test.want, got)
}
})
}
}
func TestUpstream(t *testing.T) {
tests := []struct {
name string
repoMaker func(*testing.T) *test.Repo
branch string
want string
}{
{
name: "empty",
repoMaker: test.RepoEmpty,
branch: "master",
want: "",
},
// TODO: add wantErr
{
name: "wrong branch name",
repoMaker: test.RepoWithCommit,
branch: "wrong_branch_name",
want: "",
},
{
name: "master with upstream",
repoMaker: test.RepoWithBranchWithUpstream,
branch: "master",
want: "origin/master",
},
{
name: "branch with upstream",
repoMaker: test.RepoWithBranchWithUpstream,
branch: "feature/branch",
want: "origin/feature/branch",
},
{
name: "branch without upstream",
repoMaker: test.RepoWithBranchWithoutUpstream,
branch: "feature/branch",
want: "",
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
r, _ := Open(test.repoMaker(t).Path())
got, _ := r.Upstream(test.branch)
// TODO:
// if err != nil {
// t.Errorf("got error %q", err)
// }
if !reflect.DeepEqual(got, test.want) {
t.Errorf("expected %+v; got %+v", test.want, got)
}
})
}
}
func TestAheadBehind(t *testing.T) {
tests := []struct {
name string
repoMaker func(*testing.T) *test.Repo
branch string
want []int
}{
{
name: "fresh clone",
repoMaker: test.RepoWithBranchWithUpstream,
branch: "master",
want: []int{0, 0},
},
{
name: "branch ahead",
repoMaker: test.RepoWithBranchAhead,
branch: "feature/branch",
want: []int{1, 0},
},
{
name: "branch behind",
repoMaker: test.RepoWithBranchBehind,
branch: "feature/branch",
want: []int{0, 1},
},
{
name: "branch ahead and behind",
repoMaker: test.RepoWithBranchAheadAndBehind,
branch: "feature/branch",
want: []int{2, 1},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
r, _ := Open(test.repoMaker(t).Path())
upstream, err := r.Upstream(test.branch)
if err != nil {
t.Errorf("got error %q", err)
}
ahead, behind, err := r.AheadBehind(test.branch, upstream)
if err != nil {
t.Errorf("got error %q", err)
}
if ahead != test.want[0] || behind != test.want[1] {
t.Errorf("expected %+v; got [%d, %d]", test.want, ahead, behind)
}
})
}
}
func TestCleanupFailedClone(t *testing.T) {
// Test dir structure:
// root
// └── a/
// ├── b/
// │ └── c/
// └── x/
// └── y/
// └── file.txt
tests := []struct {
path string // path to cleanup
wantGone string // this path should be deleted, if empty - nothing should be deleted
wantStay string // this path shouldn't be deleted
}{
{
path: "a/b/c/repo",
wantGone: "a/b/c/repo",
wantStay: "a",
}, {
path: "a/b/c/repo",
wantGone: "a/b",
wantStay: "a",
}, {
path: "a/b/repo",
wantGone: "",
wantStay: "a/b/c",
}, {
path: "a/x/y/repo",
wantGone: "",
wantStay: "a/x/y",
},
}
for i, test := range tests {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
root := createTestDirTree(t)
path := filepath.Join(root, test.path)
cleanupFailedClone(path)
if test.wantGone != "" {
wantGone := filepath.Join(root, test.wantGone)
assert.NoDirExists(t, wantGone, "%s dir should be deleted during the cleanup", wantGone)
}
if test.wantStay != "" {
wantLeft := filepath.Join(root, test.wantStay)
assert.DirExists(t, wantLeft, "%s dir should not be deleted during the cleanup", wantLeft)
}
})
}
}
func createTestDirTree(t *testing.T) string {
root := test.TempDir(t, "")
err := os.MkdirAll(filepath.Join(root, "a", "b", "c"), os.ModePerm)
err = os.MkdirAll(filepath.Join(root, "a", "x", "y"), os.ModePerm)
_, err = os.Create(filepath.Join(root, "a", "x", "y", "file.txt"))
if err != nil {
t.Fatal(err)
}
return root
}
|
package main
/**
* This process creates or updates a record for each summoner ID
* in the list provided as an input. Each record includes a "daily"
* key that contains a bunch of records with summary stats for a
* given day.
*
* ./join-summoners --date=2014-08-07
*/
import (
gproto "code.google.com/p/goprotobuf/proto"
data "datamodel"
"flag"
"fmt"
beanstalk "github.com/iwanbk/gobeanstalk"
"log"
"proto"
"snapshot"
"sort"
"sync"
"time"
)
var GR_GROUP sync.WaitGroup
/**
* Goroutine that generates a report for a single summoner ID. It reads
* through all game records and retains those that were played by the
* target summoner ID. It then condenses them into a single PlayerSnapshot
* and saves it to MongoDB.
*/
func handle_summoner(request proto.JoinRequest, sid uint32) {
games := make([]*data.GameRecord, 0, 10)
game_ids := make([]uint64, 0, 10)
// Keep reading from the channel until nil comes through, then we're
// done receiving info. If the summoner this goroutine is responsible
// for played in the game, keep it. Otherwise forget about it.
retriever := data.LoLRetriever{}
for _, qd := range request.Quickdates {
games_iter := retriever.GetQuickdateGamesIter(qd)
for games_iter.HasNext() {
result := games_iter.Next()
// Skip this record if the gameid is zero.
if result.GameId == 0 {
continue
}
keeper := false
for _, team := range result.Teams {
for _, player := range team.Players {
if player.Player.SummonerId == sid {
keeper = true
}
}
}
if keeper {
games = append(games, &result)
game_ids = append(game_ids, result.GameId)
}
}
}
// Now all games have been processed. We need to save the set of
// games to a PlayerSnapshot for today.
snap := data.PlayerSnapshot{}
snap.CreationTimestamp = (uint64)(time.Now().Unix())
snap.SummonerId = (uint32)(sid)
snap.GamesList = game_ids
snap.Stats = make(map[string]data.Metric)
// Update each snapshot with new computations.
for _, comp := range snapshot.Computations {
name, metric := comp(snap, games)
snap.Stats[name] = metric
}
// Fetch the summoner that this applies to.
summoner, exists := retriever.GetSummoner(sid)
// If the summoner doesn't exist, create it.
if !exists {
log.Println(fmt.Sprintf("Notice: Couldn't find summoner #%d; creating new instance.", sid))
summoner = data.SummonerRecord{}
summoner.SummonerId = sid
}
// Append the snapshot.
if summoner.Daily == nil {
summoner.Daily = make(map[string]*data.PlayerSnapshot)
}
sort.Strings(request.Quickdates)
quickdate_label := request.Quickdates[0]
// Store the snapshot in the right bucket, depending on the label name.
if *request.Label == "daily" {
summoner.Daily[quickdate_label] = &snap
} else if *request.Label == "weekly" {
summoner.Weekly[quickdate_label] = &snap
} else if *request.Label == "monthly" {
summoner.Monthly[quickdate_label] = &snap
} else {
log.Fatal("Unknown time label:", request.Label)
}
// Store the revised summoner.
retriever.StoreSummoner(&summoner)
log.Println(fmt.Sprintf("Saved %s snapshot for summoner #%d on %s",
*request.Label,
sid,
request.Quickdates[0]))
GR_GROUP.Done()
}
/**
* The main function reads in all of the summoner ID's that this process
* will be responsible for and forks off a separate goroutine for each
* of them. It then reads all relevant games (see get_games) and passes
* pointers to each of the goroutines for filtering.
*
* Finally, it waits for all goroutines to terminate before terminating
* itself.
*/
func main() {
flag.Parse()
log.Println("Establishing connection to beanstalk...")
bs, cerr := beanstalk.Dial("localhost:11300")
if cerr != nil {
log.Fatal(cerr)
}
for {
// Wait until there's a message available.
j, err := bs.Reserve()
log.Println("Received request", j.ID)
if err != nil {
log.Fatal(err)
}
// Unmarshal the request and kick off a bunch of goroutines, one
// per summoner included in the request.
request := proto.JoinRequest{}
gproto.Unmarshal(j.Body, &request)
for _, summoner := range request.Summoners {
go handle_summoner(request, summoner)
GR_GROUP.Add(1)
}
// Wait until all summoners are done before moving on to the next request.
GR_GROUP.Wait()
// The task is done; we can delete it from the queue.
bs.Delete(j.ID)
}
}
|
package main
import (
"fmt"
"golang.org/x/tour/tree"
)
// Walk walks the tree t sending all values
// from the tree to the channel ch.
func Walk(t *tree.Tree, ch chan int) {
if t != nil {
ch <- t.Value
Walk(t.Left, ch)
Walk(t.Right, ch)
}
close(ch)
}
// Same determines whether the trees
// t1 and t2 contain the same values.
func Same(t1, t2 *tree.Tree) bool {
ch1 := make(chan int)
ch2 := make(chan int)
go Walk(t1, ch1)
go Walk(t2, ch2)
for i := range ch1 {
j, ok := <-ch2
if i != j || !ok {
fmt.Println(i, j)
return false
}
}
return true
}
func main() {
t1 := tree.New(2)
t2 := tree.New(2)
fmt.Println(t1)
fmt.Println(t2)
fmt.Println(Same(t1, t2))
}
|
package abstract_factory
type tensorFlowModel struct {
}
type tensorFlowPredictor struct {
}
type tensorFlowConverter struct {
}
func (model *tensorFlowModel) Name() string {
return "tensorFlowModel"
}
func (predictor *tensorFlowPredictor) Predict() float32 {
return 1.0
}
func (converter *tensorFlowConverter) Convert(vo ModelVO) MLModel {
return &tensorFlowModel{}
}
|
package Problem0273
import (
"strings"
)
var lessThan21 = []string{
"",
"One",
"Two",
"Three",
"Four",
"Five",
"Six",
"Seven",
"Eight",
"Nine",
"Ten",
"Eleven",
"Twelve",
"Thirteen",
"Fourteen",
"Fifteen",
"Sixteen",
"Seventeen",
"Eighteen",
"Nineteen",
"Twenty",
}
var ten = []string{
"",
"",
"Twenty",
"Thirty",
"Forty",
"Fifty",
"Sixty",
"Seventy",
"Eighty",
"Ninety",
}
var thousand = []string{
"",
"Thousand",
"Million",
"Billion",
}
func numberToWords(num int) string {
if num == 0 {
return "Zero"
}
res := ""
i := 0
for num > 0 {
if num%1000 != 0 {
res = lessK(num%1000) + thousand[i] + " " + res
}
num /= 1000
i++
}
return strings.TrimRight(res, " ")
}
// 处理小于 1000 的数字
func lessK(num int) string {
if num == 0 {
return ""
}
if num <= 20 {
return lessThan21[num] + " "
}
if num < 100 {
return ten[num/10] + " " + lessK(num%10)
}
return lessThan21[num/100] + " Hundred " + lessK(num%100)
}
|
package main
import "github.com/nsf/termbox-go"
type Competition struct {
x int
y int
turn Cell
board *Board
}
func NewCompetition(first Cell) *Competition {
return &Competition{
turn: first,
board: NewBoard(),
}
}
func (c *Competition) SupressCursorXY() {
switch {
case c.x < 0:
c.x = 0
case 2 < c.x:
c.x = 2
}
switch {
case c.y < 0:
c.y = 0
case 2 < c.y:
c.y = 2
}
}
func (c *Competition) React(g *Game, e termbox.Event) error {
switch e.Type {
case termbox.EventError:
return e.Err
case termbox.EventKey:
switch e.Ch {
case 'q':
g.Close()
}
switch e.Key {
case termbox.KeyCtrlC:
g.Close()
case termbox.KeyArrowLeft:
c.x--
case termbox.KeyArrowRight:
c.x++
case termbox.KeyArrowUp:
c.y--
case termbox.KeyArrowDown:
c.y++
case termbox.KeySpace, termbox.KeyEnter:
c.SupressCursorXY()
err := c.board.Put(c.x, c.y, c.turn)
switch err {
case ErrIndexOutOfBoard:
return err
case ErrAlreadyPlaced:
return nil
case nil:
c.turn = c.turn.Reversed()
}
}
}
if c.board.Finished() {
w := c.board.Winner()
switch w {
case Circle:
g.CountUpCircleWin()
case Cross:
g.CountUpCrossWin()
}
g.SetScene(NewResult(c.board, w))
}
c.SupressCursorXY()
return nil
}
func (c *Competition) SetView() {
cenX, cenY := tbxCenterXY()
switch c.turn {
case Circle:
tbxSetText(cenX+4, cenY-1, "Circle",
termbox.ColorGreen|termbox.AttrBold, termbox.ColorDefault)
tbxSetText(cenX+4, cenY+1, "Cross",
termbox.ColorGreen, termbox.ColorDefault)
case Cross:
tbxSetText(cenX+4, cenY-1, "Circle",
termbox.ColorGreen, termbox.ColorDefault)
tbxSetText(cenX+4, cenY+1, "Cross",
termbox.ColorGreen|termbox.AttrBold, termbox.ColorDefault)
}
termbox.SetCursor(cenX+c.x-1, cenY+c.y-1)
for x := 0; x < 3; x++ {
for y := 0; y < 3; y++ {
termbox.SetCell(cenX+x-1, cenY+y-1, c.board[x][y].Appearance(),
termbox.ColorGreen, termbox.ColorDefault)
}
}
}
|
package web
import (
"github.com/tdewolff/minify"
minHTML "github.com/tdewolff/minify/html"
minSVG "github.com/tdewolff/minify/svg"
)
// DefaultMinifier is a default minifier configuration to use.
func DefaultMinifier() *minify.M {
m := minify.New()
m.Add("text/html", minHTML.DefaultMinifier)
m.Add("image/svg+xml", minSVG.DefaultMinifier)
return m
}
|
package repository
import (
"database/sql"
"kz.nitec.digidocs.pcr/internal/models"
"kz.nitec.digidocs.pcr/pkg/logger"
)
type BuildServiceRepository struct {
db *sql.DB
}
func NewBuildServiceRepsoitory(db *sql.DB) *BuildServiceRepository {
return &BuildServiceRepository{db}
}
func (brepo *BuildServiceRepository) GetDocInfoByCode(code string) (*models.Document, error) {
row := brepo.db.QueryRow("SELECT name_en, name_ru, name_kk FROM document_type WHERE code=$1", code)
doc := &models.Document{Code: code}
err := row.Scan(&doc.NameEn, &doc.NameRu, &doc.NameKK)
if err != nil {
return nil, logger.CreateMessageLog(err)
}
return doc, nil
}
|
package main
import (
"encoding/binary"
"fmt"
"log"
"os"
)
type MetaCommandResult int
type PrepareResult int
type StatementType int
type ExecuteResult int
type Statement struct {
Type StatementType
RowToInsert Row
}
type Row struct {
ID uint32
UserName []byte
Email []byte
}
// pointは書き込まれた値の最後の文字のインデックス
type Page struct {
buf []byte
point uint32
}
type Pager struct {
fileDescriptor *os.File
pages []*Page
point uint32
}
type Table struct {
NumRows uint32
Pager *Pager
}
func newPager() *Pager {
file, err := os.OpenFile(FILE_NAME, os.O_CREATE|os.O_APPEND|os.O_RDWR, 0600)
if err != nil {
log.Println(err)
}
p := &Pager{
fileDescriptor: file,
pages: make([]*Page, TABLE_MAX_PAGES),
}
for i := uint32(0); i < TABLE_MAX_PAGES; i++ {
buf := make([]byte, PAGE_SIZE)
p.pages[i] = &Page{buf: buf}
}
readData(p)
return p
}
func newTable() *Table {
table := &Table{
NumRows: 0,
Pager: newPager(),
}
return table
}
func (table *Table) closeTable() {
// データの永続化
for i := uint32(0); i <= table.Pager.point; i++ {
err := binary.Write(table.Pager.fileDescriptor, binary.BigEndian, table.Pager.pages[i].buf)
if err != nil {
log.Println(err)
}
}
err := table.Pager.fileDescriptor.Close()
if err != nil {
log.Println(err)
}
}
func readData(pager *Pager) {
// TODO:
// とりあえず1ページのみ読み出しにする
err := binary.Read(pager.fileDescriptor, binary.BigEndian, pager.pages[0].buf)
if err != nil {
log.Println(err)
}
fmt.Println(pager.pages[0].buf)
}
|
package Plugins
import (
"../Misc"
"../Parse"
"fmt"
"github.com/jlaffaye/ftp"
"sync"
"time"
//"os"
)
func Ftp(info Misc.HostInfo, ch chan int, wg *sync.WaitGroup) {
var err error
addr := fmt.Sprintf("%s:%d", info.Host, info.Port)
client, err := ftp.Dial(addr, ftp.DialWithTimeout(time.Duration(info.Timeout)*time.Second))
if err != nil && info.ErrShow {
Misc.ErrPrinter.Println(err.Error())
} else if err == nil {
err = client.Login(info.Username, info.Password)
if err != nil && info.ErrShow {
info.PrintFail()
} else if err == nil {
client.Quit()
client.Logout()
success += 1
info.PrintSuccess()
if info.Output != "" {
info.OutputTXT()
}
}
}
wg.Done()
<-ch
}
func FtpConn(info *Misc.HostInfo, ch chan int) {
var hosts, usernames, passwords []string
var err error
var wg = sync.WaitGroup{}
stime := time.Now()
if info.Ports == "" {
info.Port = FTPPORT
} else {
p, _ := Parse.ParsePort(info.Ports)
info.Port = p[0]
}
hosts, err = Parse.ParseIP(info.Host)
Misc.CheckErr(err)
usernames, err = Parse.ParseUser(info)
Misc.CheckErr(err)
passwords, err = Parse.ParsePass(info)
Misc.CheckErr(err)
wg.Add(len(hosts) * len(usernames) * len(passwords))
Misc.InfoPrinter.Println("Total length", len(hosts)*len(usernames)*len(passwords))
for _, host := range hosts {
for _, user := range usernames {
for _, pass := range passwords {
info.Host = host
info.Username = user
info.Password = pass
go Ftp(*info, ch, &wg)
ch <- 1
}
}
}
wg.Wait()
end := time.Since(stime)
Misc.InfoPrinter.Println("All Done")
Misc.InfoPrinter.Println("Number of successes:", success)
Misc.InfoPrinter.Println("Time consumed:", end)
}
|
package main
import (
"errors"
"example/service"
"fmt"
"github.com/linxlib/logs"
"github.com/robfig/cron/v3"
)
func main() {
logs.AddFileHook("example")
logs.Error(fmt.Errorf("Test Error: %+v", errors.New("error example")))
service.Init()
logs.Infoln("sdasd")
logs.Traceln("sjahdasd")
logs.Debugln("djahsda")
logs.Warn("dsada")
logs.Warn("sadhasd")
c := cron.New(cron.WithSeconds())
c.AddJob("0 0 6/2 * * ? ", new(service.WeatherJob))
c.Start()
select {}
}
|
package jwt_test
import (
"crypto/x509"
"encoding/base64"
"encoding/json"
"io/ioutil"
"strings"
"testing"
"time"
"github.com/docker/licensing/lib/go-auth/identity"
"github.com/docker/licensing/lib/go-auth/jwt"
"github.com/stretchr/testify/require"
)
func load(t *testing.T, fname string) []byte {
b, err := ioutil.ReadFile(fname)
require.NoError(t, err)
return b
}
func defaultRootCertChain(t *testing.T) *x509.CertPool {
rootCerts := load(t, "testdata/root-certs")
roots := x509.NewCertPool()
ok := roots.AppendCertsFromPEM(rootCerts)
if !ok {
t.Fatal("could not load root certs")
}
return roots
}
func defaultIdentity() identity.DockerIdentity {
username := "testuser"
dockerID := "00557eca-6a92-4b97-8af2-f966572ac11e"
email := "testuser@gmail.com"
return identity.DockerIdentity{
Username: username,
DockerID: dockerID,
Email: email,
Scopes: []string{"scopea", "scopeb"},
}
}
func TestDecodeBadToken(t *testing.T) {
t.Parallel()
rootCerts := defaultRootCertChain(t)
badToken := "foo"
_, err := jwt.Decode(badToken, jwt.DecodeOptions{
CertificateChain: rootCerts,
})
require.Error(t, err)
require.Regexp(t, err, "malformed token error: token contains an invalid number of segments")
}
func TestDecodeBadCertChain(t *testing.T) {
t.Parallel()
privateKey := load(t, "testdata/private-key")
trustedCert := load(t, "testdata/trusted-cert")
// empty cert pool
rootCerts := x509.NewCertPool()
identity := defaultIdentity()
tokenStr, err := jwt.Encode(identity, jwt.EncodeOptions{
SigningKey: privateKey,
Certificate: trustedCert,
Expiration: time.Now().Add(time.Hour * 72).Unix(),
})
require.NoError(t, err)
_, err = jwt.Decode(tokenStr, jwt.DecodeOptions{
CertificateChain: rootCerts,
})
require.Error(t, err)
require.Regexp(t, "certificate signed by unknown authority", err)
}
func TestExpiredToken(t *testing.T) {
t.Parallel()
privateKey := load(t, "testdata/private-key")
trustedCert := load(t, "testdata/trusted-cert")
rootCerts := defaultRootCertChain(t)
identity := defaultIdentity()
tokenStr, err := jwt.Encode(identity, jwt.EncodeOptions{
SigningKey: privateKey,
Certificate: trustedCert,
Expiration: time.Now().Add(-(time.Hour * 72)).Unix(),
})
require.NoError(t, err)
_, err = jwt.Decode(tokenStr, jwt.DecodeOptions{
CertificateChain: rootCerts,
})
require.Error(t, err)
require.Regexp(t, "token expiration error", err)
}
func TestIsExpired(t *testing.T) {
t.Parallel()
privateKey := load(t, "testdata/private-key")
trustedCert := load(t, "testdata/trusted-cert")
rootCerts := defaultRootCertChain(t)
identity := defaultIdentity()
// expired
tokenStr, err := jwt.Encode(identity, jwt.EncodeOptions{
SigningKey: privateKey,
Certificate: trustedCert,
Expiration: time.Now().Add(-(time.Hour * 72)).Unix(),
})
require.NoError(t, err)
expired, err := jwt.IsExpired(tokenStr, jwt.DecodeOptions{
CertificateChain: rootCerts,
})
require.NoError(t, err)
require.True(t, expired)
// not expired
tokenStr, err = jwt.Encode(identity, jwt.EncodeOptions{
SigningKey: privateKey,
Certificate: trustedCert,
Expiration: time.Now().Add(time.Hour * 72).Unix(),
})
require.NoError(t, err)
expired, err = jwt.IsExpired(tokenStr, jwt.DecodeOptions{
CertificateChain: rootCerts,
})
require.NoError(t, err)
require.False(t, expired)
}
func TestEncodeExpectedFields(t *testing.T) {
t.Parallel()
privateKey := load(t, "testdata/private-key")
trustedCert := load(t, "testdata/trusted-cert")
identity := defaultIdentity()
expiration := time.Now().Add(time.Hour * 72).Unix()
tokenStr, err := jwt.Encode(identity, jwt.EncodeOptions{
SigningKey: privateKey,
Certificate: trustedCert,
Expiration: expiration,
})
require.NoError(t, err)
// manually parse the token
tokenParts := strings.Split(tokenStr, ".")
encodedClaims := tokenParts[1]
claimsJSON, err := base64.StdEncoding.DecodeString(encodedClaims)
require.NoError(t, err)
var claims map[string]interface{}
err = json.Unmarshal(claimsJSON, &claims)
require.NoError(t, err)
require.Equal(t, identity.Username, claims["username"])
require.Equal(t, identity.DockerID, claims["sub"])
require.Equal(t, identity.Email, claims["email"])
require.Equal(t, strings.Join(identity.Scopes, " "), claims["scope"])
var str string
require.IsType(t, str, claims["jti"])
require.NotEmpty(t, claims["jti"])
var flt float64
require.IsType(t, flt, claims["iat"])
require.NotEmpty(t, claims["iat"])
require.Equal(t, float64(expiration), claims["exp"])
}
func TestEncodeBadSigningKey(t *testing.T) {
t.Parallel()
privateKey := []byte("foo")
trustedCert := load(t, "testdata/trusted-cert")
identity := defaultIdentity()
_, err := jwt.Encode(identity, jwt.EncodeOptions{
SigningKey: privateKey,
Certificate: trustedCert,
Expiration: time.Now().Add(time.Hour * 72).Unix(),
})
require.Error(t, err)
require.Regexp(t, "Invalid Key", err)
}
func TestTrustedToken(t *testing.T) {
t.Parallel()
privateKey := load(t, "testdata/private-key")
trustedCert := load(t, "testdata/trusted-cert")
rootCerts := defaultRootCertChain(t)
identity := defaultIdentity()
tokenStr, err := jwt.Encode(identity, jwt.EncodeOptions{
SigningKey: privateKey,
Certificate: trustedCert,
Expiration: time.Now().Add(time.Hour * 72).Unix(),
})
require.NoError(t, err)
decoded, err := jwt.Decode(tokenStr, jwt.DecodeOptions{
CertificateChain: rootCerts,
})
require.NoError(t, err)
require.Equal(t, identity.Email, decoded.Email)
require.Equal(t, identity.Username, decoded.Username)
require.Equal(t, identity.DockerID, decoded.DockerID)
require.Equal(t, identity.Scopes, decoded.Scopes)
}
func TestUntrustedToken(t *testing.T) {
t.Parallel()
privateKey := load(t, "testdata/private-key")
untrustedCert := load(t, "testdata/untrusted-cert")
rootCerts := defaultRootCertChain(t)
identity := defaultIdentity()
tokenStr, err := jwt.Encode(identity, jwt.EncodeOptions{
SigningKey: privateKey,
Certificate: untrustedCert,
Expiration: time.Now().Add(time.Hour * 72).Unix(),
})
require.NoError(t, err)
_, err = jwt.Decode(tokenStr, jwt.DecodeOptions{
CertificateChain: rootCerts,
})
require.Error(t, err)
require.Regexp(t, "certificate signed by unknown authority", err)
}
|
package config
import (
"context"
_ "github.com/lib/pq"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/gridfs"
"go.mongodb.org/mongo-driver/mongo/options"
"log"
"time"
)
// database
var DB *mongo.Database
var UserCol *mongo.Collection
var PostCol *mongo.Collection
var FsFilesCol *mongo.Collection
var DBFiles *mongo.Database
var Bucket *gridfs.Bucket
func init() {
// get a mongo sessions
// Set client options
//HAD TO SET CONNECTION TIMEOUT TO NEVER
//Would not do in production
//Dev environment has low speeds
clientOptions := options.Client().ApplyURI("mongodb+srv://<specificDatabaseUsername>:<specificDatabasePassword>@<DatabaseConnectionString>").SetConnectTimeout(0)
//Create context for mongodb
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
// Connect to MongoDB
client, err := mongo.Connect(ctx, clientOptions)
//Needs to be log Fatal
//If Mongo does not start server can't run
if err != nil {
log.Fatal(err, "Didn't connect to MongoDB")
}
// Check the connection
err = client.Ping(context.TODO(), nil)
if err != nil {
log.Fatal(err, "Mongo Connection test failed")
}
//Connects to database
DB = client.Database("websiteDatabase")
//Connects to users collection
UserCol = DB.Collection("users")
//Connects to posts collection
PostCol = DB.Collection("posts")
//Connects to myfiles database
DBFiles = client.Database("myfiles")
//Connects to fs.files collection
FsFilesCol = DBFiles.Collection("fs.files")
//Creates a bucket in the DBFiles database
//Used for Mongodb gridfs commands
Bucket, _ = gridfs.NewBucket(
DBFiles,
)
log.Println("Connected to MongoDB!")
}
|
package functions
import "math"
func SoftMax(x []float64) []float64 {
var max float64 = x[0]
for _, n := range x {
max = math.Max(max, n)
}
a := make([]float64, len(x))
var sum float64 = 0
for i, n := range x {
a[i] -= math.Exp(n - max)
sum += a[i]
}
for i, n := range a {
a[i] = n / sum
}
return a
}
|
// Copyright 2019 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package submission
import (
"context"
"crypto/sha256"
"encoding/json"
"errors"
"fmt"
"regexp"
"testing"
"time"
ct "github.com/google/certificate-transparency-go"
"github.com/google/certificate-transparency-go/client"
"github.com/google/certificate-transparency-go/ctpolicy"
"github.com/google/certificate-transparency-go/loglist"
"github.com/google/certificate-transparency-go/testdata"
"github.com/google/certificate-transparency-go/tls"
"github.com/google/certificate-transparency-go/x509"
"github.com/google/certificate-transparency-go/x509util"
"github.com/google/go-cmp/cmp"
)
// readCertFile returns the first certificate it finds in file provided.
func readCertFile(filename string) string {
data, err := x509util.ReadPossiblePEMFile(filename, "CERTIFICATE")
if err != nil {
return ""
}
return string(data[0])
}
type rootInfo struct {
raw string
filename string
}
var (
RootsCerts = map[string][]rootInfo{
"ct.googleapis.com/aviator/": {
rootInfo{filename: "../trillian/testdata/fake-ca-1.cert"},
rootInfo{filename: "testdata/some.cert"},
},
"ct.googleapis.com/rocketeer/": {
rootInfo{filename: "../trillian/testdata/fake-ca.cert"},
rootInfo{filename: "../trillian/testdata/fake-ca-1.cert"},
rootInfo{filename: "testdata/some.cert"},
rootInfo{filename: "testdata/another.cert"},
},
"ct.googleapis.com/icarus/": {
rootInfo{raw: "aW52YWxpZDAwMA=="}, // encoded 'invalid000'
rootInfo{filename: "testdata/another.cert"},
},
"uncollectable-roots/log/": {
rootInfo{raw: "invalid"},
},
}
)
// buildNoLogClient is LogClientBuilder that always fails.
func buildNoLogClient(log *loglist.Log) (client.AddLogClient, error) {
return nil, errors.New("bad client builder")
}
// Stub for AddLogClient interface
type emptyLogClient struct {
}
func (e emptyLogClient) AddChain(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) {
return nil, nil
}
func (e emptyLogClient) AddPreChain(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) {
return nil, nil
}
func (e emptyLogClient) GetAcceptedRoots(ctx context.Context) ([]ct.ASN1Cert, error) {
return nil, nil
}
// buildEmptyLogClient produces empty stub Log clients.
func buildEmptyLogClient(log *loglist.Log) (client.AddLogClient, error) {
return emptyLogClient{}, nil
}
func sampleLogList(t *testing.T) *loglist.LogList {
t.Helper()
var loglist loglist.LogList
err := json.Unmarshal([]byte(testdata.SampleLogList), &loglist)
if err != nil {
t.Fatalf("Unable to Unmarshal testdata.SampleLogList %v", err)
}
return &loglist
}
func sampleValidLogList(t *testing.T) *loglist.LogList {
t.Helper()
ll := sampleLogList(t)
// Id of invalid Log description Racketeer
inval := 3
ll.Logs = append(ll.Logs[:inval], ll.Logs[inval+1:]...)
return ll
}
func sampleUncollectableLogList(t *testing.T) *loglist.LogList {
t.Helper()
ll := sampleValidLogList(t)
// Append loglist that is unable to provide roots on request.
ll.Logs = append(ll.Logs, loglist.Log{
Description: "Does not return roots", Key: []byte("VW5jb2xsZWN0YWJsZUxvZ0xpc3Q="),
MaximumMergeDelay: 123, OperatedBy: []int{0},
URL: "uncollectable-roots/log/",
DNSAPIEndpoint: "uncollectavle.ct.googleapis.com",
})
return ll
}
func TestNewDistributorLogClients(t *testing.T) {
testCases := []struct {
name string
ll *loglist.LogList
lcBuilder LogClientBuilder
errRegexp *regexp.Regexp
}{
{
name: "ValidLogClients",
ll: sampleValidLogList(t),
lcBuilder: buildEmptyLogClient,
},
{
name: "NoLogClients",
ll: sampleValidLogList(t),
lcBuilder: buildNoLogClient,
errRegexp: regexp.MustCompile("failed to create log client"),
},
{
name: "NoLogClientsEmptyLogList",
ll: &loglist.LogList{},
lcBuilder: buildNoLogClient,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
_, err := NewDistributor(tc.ll, ctpolicy.ChromeCTPolicy{}, tc.lcBuilder)
if gotErr, wantErr := err != nil, tc.errRegexp != nil; gotErr != wantErr {
var unwantedErr string
if gotErr {
unwantedErr = fmt.Sprintf(" (%q)", err)
}
t.Errorf("Got error = %v%s, expected error = %v", gotErr, unwantedErr, wantErr)
} else if tc.errRegexp != nil && !tc.errRegexp.MatchString(err.Error()) {
t.Errorf("Error %q did not match expected regexp %q", err, tc.errRegexp)
}
})
}
}
// TestSCT builds a mock SCT for given logURL.
func testSCT(logURL string) *ct.SignedCertificateTimestamp {
var keyID [sha256.Size]byte
copy(keyID[:], logURL)
return &ct.SignedCertificateTimestamp{
SCTVersion: ct.V1,
LogID: ct.LogID{KeyID: keyID},
Timestamp: 1234,
Extensions: []byte{},
Signature: ct.DigitallySigned{
Algorithm: tls.SignatureAndHashAlgorithm{
Hash: tls.SHA256,
Signature: tls.ECDSA,
},
},
}
}
// Stub for AddLogCLient interface
type stubLogClient struct {
logURL string
}
func (m stubLogClient) AddChain(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) {
return nil, nil
}
func (m stubLogClient) AddPreChain(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) {
if _, ok := RootsCerts[m.logURL]; ok {
return testSCT(m.logURL), nil
}
return nil, fmt.Errorf("Log %q has no roots", m.logURL)
}
func (m stubLogClient) GetAcceptedRoots(ctx context.Context) ([]ct.ASN1Cert, error) {
roots := []ct.ASN1Cert{}
if certInfos, ok := RootsCerts[m.logURL]; ok {
for _, certInfo := range certInfos {
if len(certInfo.raw) > 0 {
roots = append(roots, ct.ASN1Cert{Data: []byte(certInfo.raw)})
} else {
roots = append(roots, ct.ASN1Cert{Data: []byte(readCertFile(certInfo.filename))})
}
}
}
return roots, nil
}
func buildStubLogClient(log *loglist.Log) (client.AddLogClient, error) {
return stubLogClient{logURL: log.URL}, nil
}
func TestNewDistributorRootPools(t *testing.T) {
testCases := []struct {
name string
ll *loglist.LogList
rootNum map[string]int
}{
{
name: "InactiveZeroRoots",
ll: sampleValidLogList(t),
rootNum: map[string]int{"ct.googleapis.com/aviator/": 0, "ct.googleapis.com/rocketeer/": 4, "ct.googleapis.com/icarus/": 1}, // aviator is not active; 1 of 2 icarus roots is not x509 struct
},
{
name: "CouldNotCollect",
ll: sampleUncollectableLogList(t),
rootNum: map[string]int{"ct.googleapis.com/aviator/": 0, "ct.googleapis.com/rocketeer/": 4, "ct.googleapis.com/icarus/": 1, "uncollectable-roots/log/": 0}, // aviator is not active; uncollectable client cannot provide roots
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
dist, _ := NewDistributor(tc.ll, ctpolicy.ChromeCTPolicy{}, buildStubLogClient)
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
go dist.Run(ctx)
// First Log refresh expected.
<-ctx.Done()
dist.mu.Lock()
defer dist.mu.Unlock()
for logURL, wantNum := range tc.rootNum {
gotNum := 0
if roots, ok := dist.logRoots[logURL]; ok {
gotNum = len(roots.RawCertificates())
}
if wantNum != gotNum {
t.Errorf("Expected %d root(s) for Log %s, got %d", wantNum, logURL, gotNum)
}
}
})
}
}
func pemFileToDERChain(t *testing.T, filename string) [][]byte {
t.Helper()
rawChain, err := x509util.ReadPossiblePEMFile(filename, "CERTIFICATE")
if err != nil {
t.Fatalf("failed to load testdata: %v", err)
}
return rawChain
}
func getSCTMap(l []*AssignedSCT) map[string]*AssignedSCT {
m := map[string]*AssignedSCT{}
for _, asct := range l {
m[asct.LogURL] = asct
}
return m
}
// Stub CT policy to run tests.
type stubCTPolicy struct {
baseNum int
}
// Builds simplistic policy requiring n SCTs from any Logs for each cert.
func buildStubCTPolicy(n int) stubCTPolicy {
return stubCTPolicy{baseNum: n}
}
func (stubP stubCTPolicy) LogsByGroup(cert *x509.Certificate, approved *loglist.LogList) (ctpolicy.LogPolicyData, error) {
baseGroup, err := ctpolicy.BaseGroupFor(approved, stubP.baseNum)
groups := ctpolicy.LogPolicyData{baseGroup.Name: &baseGroup}
return groups, err
}
func TestDistributorAddPreChain(t *testing.T) {
testCases := []struct {
name string
ll *loglist.LogList
plc ctpolicy.CTPolicy
rawChain [][]byte
scts []*AssignedSCT
wantErr bool
}{
{
name: "MalformedChainRequest",
ll: sampleValidLogList(t),
plc: ctpolicy.ChromeCTPolicy{},
rawChain: pemFileToDERChain(t, "../trillian/testdata/subleaf.misordered.chain"),
scts: nil,
wantErr: true,
},
{
name: "CallBeforeInit",
ll: sampleValidLogList(t),
plc: ctpolicy.ChromeCTPolicy{},
rawChain: nil,
scts: nil,
wantErr: true,
},
{
name: "InsufficientSCTsForPolicy",
ll: sampleValidLogList(t),
plc: ctpolicy.AppleCTPolicy{},
rawChain: pemFileToDERChain(t, "../trillian/testdata/subleaf.chain"), // subleaf chain is fake-ca-1-rooted
scts: []*AssignedSCT{},
wantErr: true, // Not enough SCTs for policy
},
{
name: "FullChain1Policy",
ll: sampleValidLogList(t),
plc: buildStubCTPolicy(1),
rawChain: pemFileToDERChain(t, "../trillian/testdata/subleaf.chain"),
scts: []*AssignedSCT{
{
LogURL: "ct.googleapis.com/rocketeer/",
SCT: testSCT("ct.googleapis.com/rocketeer/"),
},
},
wantErr: false,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
dist, _ := NewDistributor(tc.ll, tc.plc, buildStubLogClient)
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
dist.Run(ctx)
scts, err := dist.AddPreChain(context.Background(), tc.rawChain)
if gotErr := (err != nil); gotErr != tc.wantErr {
t.Errorf("Expected to get errors is %v while actually getting errors is %v", tc.wantErr, gotErr)
}
if got, want := len(scts), len(tc.scts); got != want {
t.Errorf("Expected to get %d SCTs on AddPreChain request, got %d", want, got)
}
gotMap := getSCTMap(tc.scts)
for _, asct := range scts {
if wantedSCT, ok := gotMap[asct.LogURL]; !ok {
t.Errorf("dist.AddPreChain() = (_, %v), want err? %t", err, tc.wantErr)
} else if diff := cmp.Diff(asct, wantedSCT); diff != "" {
t.Errorf("Got unexpected SCT for Log %q", asct.LogURL)
}
}
})
}
}
|
package mop
import (
"fmt"
"testing"
od "github.com/barchart/barchart-ondemand-client-golang"
)
func Test(t *testing.T) {
od := od.New("FREE_API_KEY", false)
od.BaseURL = "https://marketdata.websol.barchart.com/"
m := NewMarket(od)
fmt.Println("MM", m.Fetch())
}
|
package rabbitmqworker
import (
"Edwardz43/tgbot/config"
"Edwardz43/tgbot/err"
"Edwardz43/tgbot/log"
"Edwardz43/tgbot/message/from"
"Edwardz43/tgbot/worker"
"encoding/json"
"fmt"
"github.com/streadway/amqp"
)
var failOnError = err.FailOnError
// GetInstance returns a instance of rabbitmq worker
func GetInstance(l log.Logger) worker.Worker {
return &Worker{
logger: l,
}
}
// Worker is the worker uses rabbitmq
type Worker struct {
channel *amqp.Channel
queueName string
Result *from.Result
logger log.Logger
}
// connect creates a rabbitmq client connection
func (r *Worker) connect() bool {
conn, err := amqp.Dial(config.GetRabbitDNS())
if err != nil {
r.logger.ERROR(fmt.Sprintf("Failed to connect to RabbitMQ : %s", err))
return false
}
//defer conn.Close()
ch, err := conn.Channel()
if err != nil {
r.logger.ERROR(fmt.Sprintf("Failed to open a channel : %s", err))
return false
}
q, err := ch.QueueDeclare(
"tgbot_message", // name
true, // durable
false, // delete when unused
false, // exclusive
false, // no-wait
nil, // arguments
)
if err != nil {
r.logger.ERROR(fmt.Sprintf("Failed to declare a queue : %s", err))
return false
}
err = ch.Qos(
1, // prefetch count
0, // prefetch size
false, // global
)
if err != nil {
r.logger.ERROR(fmt.Sprintf("Failed to set QoS : %s", err))
return false
}
r.channel = ch
r.queueName = q.Name
return true
}
// Do executes job
func (r *Worker) Do(job func(args ...interface{}) error) {
ok := r.connect()
if !ok {
r.logger.ERROR(fmt.Sprintln("Failed to connect to rabbitmq channel"))
return
}
msgs, err := r.channel.Consume(
r.queueName, // queue
"", // consumer
false, // auto-ack
false, // exclusive
false, // no-local
false, // no-wait
nil, // args
)
failOnError(err, "Failed to register a consumer")
defer r.channel.Close()
forever := make(chan bool)
go func() {
for d := range msgs {
r.logger.INFO(fmt.Sprintf("Worker Received a message: %s", d.Body))
json.Unmarshal(d.Body, &r.Result)
if r.Result == nil {
r.logger.PANIC("json unmarshal failed")
}
err := job(r.Result)
failOnError(err, "Failed : error from job")
r.logger.INFO("Work Done")
d.Ack(false)
}
}()
<-forever
}
|
// +build !windows
package runconfig
import (
"fmt"
"runtime"
"strings"
)
// IsValid indicates is an isolation level is valid
func (i IsolationLevel) IsValid() bool {
return i.IsDefault()
}
// IsPrivate indicates whether container uses it's private network stack.
func (n NetworkMode) IsPrivate() bool {
return !(n.IsHost() || n.IsContainer())
}
// IsDefault indicates whether container uses the default network stack.
func (n NetworkMode) IsDefault() bool {
return n == "default"
}
// DefaultDaemonNetworkMode returns the default network stack the daemon should
// use.
func DefaultDaemonNetworkMode() NetworkMode {
return NetworkMode("bridge")
}
// NetworkName returns the name of the network stack.
func (n NetworkMode) NetworkName() string {
if n.IsBridge() {
return "bridge"
} else if n.IsHost() {
return "host"
} else if n.IsContainer() {
return "container"
} else if n.IsNone() {
return "none"
} else if n.IsDefault() {
return "default"
} else if n.IsUserDefined() {
return n.UserDefined()
}
return ""
}
// IsBridge indicates whether container uses the bridge network stack
func (n NetworkMode) IsBridge() bool {
return n == "bridge"
}
// IsHost indicates whether container uses the host network stack.
func (n NetworkMode) IsHost() bool {
return n == "host"
}
// IsContainer indicates whether container uses a container network stack.
func (n NetworkMode) IsContainer() bool {
parts := strings.SplitN(string(n), ":", 2)
return len(parts) > 1 && parts[0] == "container"
}
// IsNone indicates whether container isn't using a network stack.
func (n NetworkMode) IsNone() bool {
return n == "none"
}
// ConnectedContainer is the id of the container which network this container is connected to.
func (n NetworkMode) ConnectedContainer() string {
parts := strings.SplitN(string(n), ":", 2)
if len(parts) > 1 {
return parts[1]
}
return ""
}
// IsUserDefined indicates user-created network
func (n NetworkMode) IsUserDefined() bool {
return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer()
}
// IsPreDefinedNetwork indicates if a network is predefined by the daemon
func IsPreDefinedNetwork(network string) bool {
n := NetworkMode(network)
return n.IsBridge() || n.IsHost() || n.IsNone()
}
//UserDefined indicates user-created network
func (n NetworkMode) UserDefined() string {
if n.IsUserDefined() {
return string(n)
}
return ""
}
// MergeConfigs merges the specified container Config and HostConfig.
// It creates a ContainerConfigWrapper.
func MergeConfigs(config *Config, hostConfig *HostConfig) *ContainerConfigWrapper {
return &ContainerConfigWrapper{
config,
hostConfig,
"", nil,
}
}
// ValidateNetMode ensures that the various combinations of requested
// network settings are valid.
func ValidateNetMode(c *Config, hc *HostConfig) error {
// We may not be passed a host config, such as in the case of docker commit
if hc == nil {
return nil
}
parts := strings.Split(string(hc.NetworkMode), ":")
if parts[0] == "container" {
if len(parts) < 2 || parts[1] == "" {
return fmt.Errorf("--net: invalid net mode: invalid container format container:<name|id>")
}
}
if (hc.NetworkMode.IsHost() || hc.NetworkMode.IsContainer()) && c.Hostname != "" {
return ErrConflictNetworkHostname
}
if hc.NetworkMode.IsHost() && len(hc.Links) > 0 {
return ErrConflictHostNetworkAndLinks
}
if hc.NetworkMode.IsContainer() && len(hc.Links) > 0 {
return ErrConflictContainerNetworkAndLinks
}
if hc.NetworkMode.IsUserDefined() && len(hc.Links) > 0 {
return ErrConflictUserDefinedNetworkAndLinks
}
if (hc.NetworkMode.IsHost() || hc.NetworkMode.IsContainer()) && len(hc.DNS) > 0 {
return ErrConflictNetworkAndDNS
}
if (hc.NetworkMode.IsContainer() || hc.NetworkMode.IsHost()) && len(hc.ExtraHosts) > 0 {
return ErrConflictNetworkHosts
}
if (hc.NetworkMode.IsContainer() || hc.NetworkMode.IsHost()) && c.MacAddress != "" {
return ErrConflictContainerNetworkAndMac
}
if hc.NetworkMode.IsContainer() && (len(hc.PortBindings) > 0 || hc.PublishAllPorts == true) {
return ErrConflictNetworkPublishPorts
}
if hc.NetworkMode.IsContainer() && len(c.ExposedPorts) > 0 {
return ErrConflictNetworkExposePorts
}
return nil
}
// ValidateIsolationLevel performs platform specific validation of the
// isolation level in the hostconfig structure. Linux only supports "default"
// which is LXC container isolation
func ValidateIsolationLevel(hc *HostConfig) error {
// We may not be passed a host config, such as in the case of docker commit
if hc == nil {
return nil
}
if !hc.Isolation.IsValid() {
return fmt.Errorf("invalid --isolation: %q - %s only supports 'default'", hc.Isolation, runtime.GOOS)
}
return nil
}
|
package main
import "fmt"
func maxSubArray(nums []int) int {
max := -1 << 31
sum := 0
for _, n := range(nums) {
sum += n
if sum > max {
max = sum
}
if sum < 0 {
sum = 0
}
}
return max
}
func main() {
// fmt.Printf("%v\n", parse(342))
// fmt.Printf("%v\n", parse(465))
fmt.Printf("%v\n", maxSubArray([]int{-2,1,-3,4,-1,2,1,-5,4}))
}
|
package persistence
import (
"context"
"fmt"
"os"
"time"
"github.com/majid-cj/go-docker-mongo/domain/repository"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
// Repository ...
type Repository struct {
Member repository.MemberRepository
VerifyCode repository.VerificationCodeRepository
Ctx context.Context
Client *mongo.Client
}
// NewRepository ...
func NewRepository() (*Repository, error) {
URL := fmt.Sprintf("%s://%s:%s/?connect=direct", os.Getenv("DB_DRIVER"), os.Getenv("DB_HOST"), os.Getenv("DB_PORT"))
clientOption := options.Client().ApplyURI(URL)
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
client, err := mongo.Connect(ctx, clientOption)
db := client.Database(os.Getenv("DB_NAME"))
defer cancel()
if err != nil {
return nil, err
}
return &Repository{
Member: NewMemberRepository(db),
VerifyCode: NewVerifyCodeRepository(db),
Ctx: ctx,
Client: client,
}, nil
}
const (
// MEMBER ...
MEMBER = "member"
// VERIFYCODE ...
VERIFYCODE = "verify_code"
)
|
package point2
import "fmt"
type Point2 struct {
X float64
Y float64
}
func (p Point2) Dimensions() int {
return 2
}
func (p Point2) Dimension(i int) float64 {
if i == 0 {
return p.X
}
return p.Y
}
func (p Point2) String() string {
return fmt.Sprintf("(%f,%f)", p.X, p.Y)
}
|
package main
import (
"fmt"
"math/rand"
"ms/sun/shared/helper"
"ms/sun/servises/event_service"
"ms/sun/shared/x"
"time"
)
func main() {
//x.LogTableSqlReq.Event = false
go func() {
param := event_service.SubParam{
Liked_Post_Event: true,
}
sub := event_service.NewSub(param)
for evn := range sub.Liked_Post_Event {
fmt.Println("event,", evn)
}
}()
go func() {
for {
time.Sleep(time.Millisecond * 100)
evnt := x.Event{
EventId: helper.NextRowsSeqId(),
EventType: 0,
ByUserId: rand.Intn(80) + 1,
PeerUserId: 0,
PostId: 12,
CommentId: 0,
ActionId: 0,
}
event_service.SaveEvent(event_service.LIKED_POST_EVENT, evnt)
}
}()
time.Sleep(time.Hour)
}
|
package main
import "github.com/sashko/go-uinput"
func touchPadExample() {
touchPad, err := uinput.CreateTouchPad(0, 1919, 0, 1079)
if err != nil {
return
}
defer touchPad.Close()
touchPad.MoveTo(300, 200)
touchPad.RightClick()
}
|
package Dao
import (
"blog/model"
"fmt"
"github.com/jinzhu/gorm"
)
var DB*gorm.DB
func InitDB() *gorm.DB {
driverName := "mysql"
host := "localhost"
port := "3306"
username := "root"
password := "512612lj"
database := "gin"
charset := "utf8"
args := fmt.Sprintf("%s:%s@tcp(%s:%s)/%s?charset=%s&parseTime=true",
username,
password,
host,
port,
database,
charset,
)
db ,err:= gorm.Open(driverName, args)
if err!=nil {
fmt.Println("failed connect database",err.Error())
}
db.AutoMigrate(&model.User{})
DB=db
return db
}
func GetDB()*gorm.DB {
return DB
}
|
package fetch
import (
"bytes"
"context"
"encoding/json"
"errors"
"io/ioutil"
"net/http"
"net/url"
"strings"
"github.com/go-kit/kit/endpoint"
httptransport "github.com/go-kit/kit/transport/http"
"github.com/slotix/dataflowkit/splash"
)
// NewHTTPClient returns an Fetch Service backed by an HTTP server living at the
// remote instance. We expect instance to come from a service discovery system,
// so likely of the form "host:port". We bake-in certain middlewares,
// implementing the client library pattern.
func NewHTTPClient(instance string) (Service, error) {
// Quickly sanitize the instance string.
if !strings.HasPrefix(instance, "http") {
instance = "http://" + instance
}
u, err := url.Parse(instance)
if err != nil {
return nil, err
}
// Each individual endpoint is an http/transport.Client (which implements
// endpoint.Endpoint) that gets wrapped with various middlewares. If you
// made your own client library, you'd do this work there, so your server
// could rely on a consistent set of client behavior.
// var splashFetchEndpoint endpoint.Endpoint
// {
// splashFetchEndpoint = httptransport.NewClient(
// "POST",
// copyURL(u, "/fetch/splash"),
// encodeHTTPGenericRequest,
// decodeSplashFetcherContent,
// ).Endpoint()
// }
var splashResponseEndpoint endpoint.Endpoint
{
splashResponseEndpoint = httptransport.NewClient(
"POST",
copyURL(u, "/response/splash"),
encodeHTTPGenericRequest,
decodeSplashFetcherResponse,
).Endpoint()
}
// var baseFetchEndpoint endpoint.Endpoint
// {
// baseFetchEndpoint = httptransport.NewClient(
// "POST",
// copyURL(u, "/fetch/base"),
// encodeHTTPGenericRequest,
// decodeBaseFetcherContent,
// ).Endpoint()
// }
var baseResponseEndpoint endpoint.Endpoint
{
baseResponseEndpoint = httptransport.NewClient(
"POST",
copyURL(u, "/response/base"),
encodeHTTPGenericRequest,
decodeBaseFetcherResponse,
).Endpoint()
}
// Returning the endpoint.Set as a service.Service relies on the
// endpoint.Set implementing the Service methods. That's just a simple bit
// of glue code.
return Endpoints{
//SplashFetchEndpoint: splashFetchEndpoint,
SplashResponseEndpoint: splashResponseEndpoint,
//BaseFetchEndpoint: baseFetchEndpoint,
BaseResponseEndpoint: baseResponseEndpoint,
}, nil
}
// encodeHTTPGenericRequest is a transport/http.EncodeRequestFunc that
// JSON-encodes any request to the request body. Primarily useful in a client.
func encodeHTTPGenericRequest(ctx context.Context, r *http.Request, request interface{}) error {
var buf bytes.Buffer
if err := json.NewEncoder(&buf).Encode(request); err != nil {
return err
}
r.Body = ioutil.NopCloser(&buf)
return nil
}
// decodeSplashFetcherContent is a transport/http.DecodeResponseFunc that decodes a
// JSON-encoded splash fetcher response from the HTTP response body. If the response has a
// non-200 status code, we will interpret that as an error and attempt to decode
// the specific error message from the response body. Primarily useful in a
// client.
// func decodeSplashFetcherContent(ctx context.Context, r *http.Response) (interface{}, error) {
// if r.StatusCode != http.StatusOK {
// return nil, errors.New(r.Status)
// }
// data, err := ioutil.ReadAll(r.Body)
// if err != nil {
// return nil, err
// }
// return data, nil
// }
func decodeSplashFetcherResponse(ctx context.Context, r *http.Response) (interface{}, error) {
if r.StatusCode != http.StatusOK {
return nil, errors.New(r.Status)
}
var resp splash.Response
err := json.NewDecoder(r.Body).Decode(&resp)
return resp, err
}
// func decodeBaseFetcherContent(ctx context.Context, r *http.Response) (interface{}, error) {
// if r.StatusCode != http.StatusOK {
// return nil, errors.New(r.Status)
// }
// data, err := ioutil.ReadAll(r.Body)
// if err != nil {
// return nil, err
// }
// return data, nil
// }
func decodeBaseFetcherResponse(ctx context.Context, r *http.Response) (interface{}, error) {
if r.StatusCode != http.StatusOK {
return nil, errors.New(r.Status)
}
var resp BaseFetcherResponse
err := json.NewDecoder(r.Body).Decode(&resp)
return resp, err
}
func copyURL(base *url.URL, path string) *url.URL {
next := *base
next.Path = path
return &next
}
// func (e Endpoints) Fetch(req FetchRequester) (io.ReadCloser, error) {
// // ctx := context.Background()
// // var resp interface{}
// // var err error
// // switch req.Type() {
// // case "base":
// // resp, err = e.BaseFetchEndpoint(ctx, req)
// // if err != nil {
// // return nil, err
// // }
// // case "splash":
// // resp, err = e.SplashFetchEndpoint(ctx, req)
// // if err != nil {
// // return nil, err
// // }
// // }
// // readCloser := ioutil.NopCloser(bytes.NewReader(resp.([]byte)))
// // return readCloser, nil
// resp, err := e.Response(req)
// if err != nil {
// return nil, err
// }
// return resp.GetHTML()
// }
func (e Endpoints) Response(req FetchRequester) (FetchResponser, error) {
ctx := context.Background()
var r FetchResponser
var err error
switch req.Type() {
case "base":
resp, err := e.BaseResponseEndpoint(ctx, req)
if err != nil {
return nil, err
}
response := resp.(BaseFetcherResponse)
r = &response
//case splash.Request, *splash.Request:
case "splash":
resp, err := e.SplashResponseEndpoint(ctx, req)
if err != nil {
return nil, err
}
response := resp.(splash.Response)
r = &response
}
return r, err
}
|
// This file was generated by counterfeiter
package propertypricehistorycomfakes
import (
"sync"
"github.com/DennisDenuto/property-price-collector/site"
"github.com/DennisDenuto/property-price-collector/site/propertypricehistorycom"
)
type FakePostcodeSuburbLookup struct {
LoadStub func() error
loadMutex sync.RWMutex
loadArgsForCall []struct{}
loadReturns struct {
result1 error
}
GetSuburbStub func(int) ([]site.Suburb, bool)
getSuburbMutex sync.RWMutex
getSuburbArgsForCall []struct {
arg1 int
}
getSuburbReturns struct {
result1 []site.Suburb
result2 bool
}
invocations map[string][][]interface{}
invocationsMutex sync.RWMutex
}
func (fake *FakePostcodeSuburbLookup) Load() error {
fake.loadMutex.Lock()
fake.loadArgsForCall = append(fake.loadArgsForCall, struct{}{})
fake.recordInvocation("Load", []interface{}{})
fake.loadMutex.Unlock()
if fake.LoadStub != nil {
return fake.LoadStub()
} else {
return fake.loadReturns.result1
}
}
func (fake *FakePostcodeSuburbLookup) LoadCallCount() int {
fake.loadMutex.RLock()
defer fake.loadMutex.RUnlock()
return len(fake.loadArgsForCall)
}
func (fake *FakePostcodeSuburbLookup) LoadReturns(result1 error) {
fake.LoadStub = nil
fake.loadReturns = struct {
result1 error
}{result1}
}
func (fake *FakePostcodeSuburbLookup) GetSuburb(arg1 int) ([]site.Suburb, bool) {
fake.getSuburbMutex.Lock()
fake.getSuburbArgsForCall = append(fake.getSuburbArgsForCall, struct {
arg1 int
}{arg1})
fake.recordInvocation("GetSuburb", []interface{}{arg1})
fake.getSuburbMutex.Unlock()
if fake.GetSuburbStub != nil {
return fake.GetSuburbStub(arg1)
} else {
return fake.getSuburbReturns.result1, fake.getSuburbReturns.result2
}
}
func (fake *FakePostcodeSuburbLookup) GetSuburbCallCount() int {
fake.getSuburbMutex.RLock()
defer fake.getSuburbMutex.RUnlock()
return len(fake.getSuburbArgsForCall)
}
func (fake *FakePostcodeSuburbLookup) GetSuburbArgsForCall(i int) int {
fake.getSuburbMutex.RLock()
defer fake.getSuburbMutex.RUnlock()
return fake.getSuburbArgsForCall[i].arg1
}
func (fake *FakePostcodeSuburbLookup) GetSuburbReturns(result1 []site.Suburb, result2 bool) {
fake.GetSuburbStub = nil
fake.getSuburbReturns = struct {
result1 []site.Suburb
result2 bool
}{result1, result2}
}
func (fake *FakePostcodeSuburbLookup) Invocations() map[string][][]interface{} {
fake.invocationsMutex.RLock()
defer fake.invocationsMutex.RUnlock()
fake.loadMutex.RLock()
defer fake.loadMutex.RUnlock()
fake.getSuburbMutex.RLock()
defer fake.getSuburbMutex.RUnlock()
return fake.invocations
}
func (fake *FakePostcodeSuburbLookup) recordInvocation(key string, args []interface{}) {
fake.invocationsMutex.Lock()
defer fake.invocationsMutex.Unlock()
if fake.invocations == nil {
fake.invocations = map[string][][]interface{}{}
}
if fake.invocations[key] == nil {
fake.invocations[key] = [][]interface{}{}
}
fake.invocations[key] = append(fake.invocations[key], args)
}
var _ propertypricehistorycom.PostcodeSuburbLookup = new(FakePostcodeSuburbLookup)
|
package main
import (
"github.com/gdamore/tcell/v2"
"github.com/rivo/tview"
)
func selectContext(clusters []cluster, selectedContext string) (string, error) {
var userSelectedContext string
app := tview.NewApplication()
rootNode := tview.NewTreeNode("Clusters").SetSelectable(false)
treeView := tview.NewTreeView().SetRoot(rootNode)
var firstContextNode *tview.TreeNode
for _, cluster := range clusters {
clusterNode := tview.NewTreeNode(cluster.Name).SetSelectable(false)
clusterNode.SetColor(tcell.ColorGreen)
for _, c := range cluster.Contexts {
contextNode := tview.NewTreeNode(c.Name).SetSelectable(true)
if c.Name == selectedContext {
// this is the currently selected context
contextNode.SetColor(tcell.ColorYellow)
treeView.SetCurrentNode(contextNode)
} else {
contextNode.SetColor(tcell.ColorTurquoise)
}
// put iterator variable value in block-local variable so the lambda function
// does not access the wrong field afterwards
contextName := c.Name
contextNode.SetSelectedFunc(func() {
userSelectedContext = contextName
app.Stop()
})
if firstContextNode == nil {
// remember first visible node as fallback for default selection
firstContextNode = contextNode
}
clusterNode.AddChild(contextNode)
}
rootNode.AddChild(clusterNode)
}
if treeView.GetCurrentNode() == nil {
// no default selection? fall back to first visible node
treeView.SetCurrentNode(firstContextNode)
}
app.SetRoot(treeView, true)
if err := app.Run(); err != nil {
return "", err
}
return userSelectedContext, nil
}
|
package concator
import (
"context"
"fmt"
"io"
"io/ioutil"
"path/filepath"
"sync"
"time"
"github.com/pkg/errors"
"github.com/Laisky/go-fluentd/libs"
"github.com/Laisky/go-fluentd/monitor"
utils "github.com/Laisky/go-utils"
"github.com/Laisky/go-utils/journal"
"github.com/Laisky/zap"
)
const (
defaultInnerJournalDataChanLen = 10000
defaultInnerJournalIDChanLen = 10000
minimalBufSizeByte = 10485760 // 10 MB
intervalToStartingLegacy = 3 * time.Second
intervalForceGC = 1 * time.Minute
)
type JournalCfg struct {
BufDirPath string
BufSizeBytes int64
JournalOutChanLen, CommitIdChanLen int
IsCompress bool
MsgPool *sync.Pool
CommittedIDTTL time.Duration
}
// Journal dumps all messages to files,
// then check every msg with committed id to make sure no msg lost
type Journal struct {
*JournalCfg
legacyLock *utils.Mutex
outChan chan *libs.FluentMsg
commitChan chan *libs.FluentMsg
baseJournalDir string
baseJournalCfg *journal.JournalConfig
jjLock *sync.Mutex
tag2JMap *sync.Map // map[string]*journal.Journal
tag2JJInchanMap *sync.Map // map[string]chan *libs.FluentMsg
tag2JJCommitChanMap *sync.Map // map[string]chan *libs.FluentMsg
tag2CtxCancelMap *sync.Map // map[string]context.CancelFunc
}
// NewJournal create new Journal with `bufDirPath` and `BufSizeBytes`
func NewJournal(ctx context.Context, cfg *JournalCfg) *Journal {
utils.Logger.Info("create new journal",
zap.String("filepath", cfg.BufDirPath),
zap.Int64("size", cfg.BufSizeBytes))
if cfg.BufSizeBytes < minimalBufSizeByte {
utils.Logger.Warn("journal buf file size too small", zap.Int64("size", cfg.BufSizeBytes))
}
jcfg := journal.NewConfig()
jcfg.BufDirPath = cfg.BufDirPath
jcfg.BufSizeBytes = cfg.BufSizeBytes
jcfg.IsCompress = cfg.IsCompress
jcfg.CommittedIDTTL = cfg.CommittedIDTTL
// jcfg.RotateDuration = 3 * time.Second // TODO
j := &Journal{
JournalCfg: cfg,
legacyLock: &utils.Mutex{},
commitChan: make(chan *libs.FluentMsg, cfg.CommitIdChanLen),
outChan: make(chan *libs.FluentMsg, cfg.JournalOutChanLen),
jjLock: &sync.Mutex{},
baseJournalDir: jcfg.BufDirPath,
baseJournalCfg: jcfg,
tag2JMap: &sync.Map{},
tag2JJInchanMap: &sync.Map{},
tag2JJCommitChanMap: &sync.Map{},
tag2CtxCancelMap: &sync.Map{},
}
j.initLegacyJJ(ctx)
j.registerMonitor()
j.startCommitRunner(ctx)
return j
}
func (j *Journal) CloseTag(tag string) error {
if fi, ok := j.tag2CtxCancelMap.Load(tag); !ok {
return fmt.Errorf("tag %v not exists in tag2CtxCancelMap", tag)
} else {
j.jjLock.Lock()
fi.(func())()
j.tag2JMap.Delete(tag)
j.tag2JJInchanMap.Delete(tag)
j.tag2JJCommitChanMap.Delete(tag)
j.tag2CtxCancelMap.Delete(tag)
j.jjLock.Unlock()
}
return nil
}
// initLegacyJJ process existed legacy data and ids
func (j *Journal) initLegacyJJ(ctx context.Context) {
files, err := ioutil.ReadDir(j.baseJournalDir)
if err != nil {
utils.Logger.Warn("try to read dir of journal got error", zap.Error(err))
return
}
for _, dir := range files {
if dir.IsDir() {
j.createJournalRunner(ctx, dir.Name())
}
}
}
// LoadMaxID load the max committed id from journal
func (j *Journal) LoadMaxID() (id int64, err error) {
var (
nid int64
tag string
jj *journal.Journal
err2 error
)
j.tag2JMap.Range(func(k, v interface{}) bool {
tag = k.(string)
jj = v.(*journal.Journal)
if nid, err2 = jj.LoadMaxId(); err2 != nil {
if nid > id {
id = nid
}
} else {
err = errors.Wrapf(err2, "try to load max id with tag `%v` got error", tag)
}
return true
})
return nid, err
}
func (j *Journal) ProcessLegacyMsg(dumpChan chan *libs.FluentMsg) (maxID int64, err2 error) {
if !j.legacyLock.TryLock() {
return 0, fmt.Errorf("another legacy is running")
}
defer j.legacyLock.ForceRelease()
utils.Logger.Debug("starting to process legacy data...")
var (
wg = &sync.WaitGroup{}
l = &sync.Mutex{}
)
j.tag2JMap.Range(func(k, v interface{}) bool {
wg.Add(1)
go func(tag string, jj *journal.Journal) {
defer wg.Done()
var (
innerMaxID int64
err error
msg *libs.FluentMsg
data = &journal.Data{Data: map[string]interface{}{}}
)
if !jj.LockLegacy() { // avoid rotate
return
}
startTs := utils.Clock.GetUTCNow()
for {
msg = j.MsgPool.Get().(*libs.FluentMsg)
data.Data["message"] = nil // alloc new map to avoid old data contaminate
if err = jj.LoadLegacyBuf(data); err == io.EOF {
utils.Logger.Debug("load legacy buf done",
zap.Float64("sec", utils.Clock.GetUTCNow().Sub(startTs).Seconds()),
)
j.MsgPool.Put(msg)
l.Lock()
if innerMaxID > maxID {
maxID = innerMaxID
}
l.Unlock()
return
} else if err != nil {
utils.Logger.Error("load legacy data got error", zap.Error(err))
j.MsgPool.Put(msg)
if !jj.LockLegacy() {
l.Lock()
if innerMaxID > maxID {
maxID = innerMaxID
}
err2 = err
l.Unlock()
return
}
continue
}
if data.Data["message"] == nil {
utils.Logger.Warn("lost message")
j.MsgPool.Put(msg)
continue
}
msg.Id = data.ID
msg.Tag = string(data.Data["tag"].(string))
msg.Message = data.Data["message"].(map[string]interface{})
if msg.Id > innerMaxID {
innerMaxID = msg.Id
}
utils.Logger.Debug("load msg from legacy",
zap.String("tag", msg.Tag),
zap.Int64("id", msg.Id))
// rewrite data into journal
// only committed id can really remove a msg
dumpChan <- msg
}
}(k.(string), v.(*journal.Journal))
return true
})
wg.Wait()
utils.Logger.Debug("process legacy done")
return
}
// createJournalRunner create journal for a tag,
// and return commit channel and dump channel
func (j *Journal) createJournalRunner(ctx context.Context, tag string) {
j.jjLock.Lock()
defer j.jjLock.Unlock()
var ok bool
if _, ok = j.tag2JMap.Load(tag); ok {
return // double check to prevent duplicate create jj runner
}
ctxForTag, cancel := context.WithCancel(ctx)
if _, ok = j.tag2CtxCancelMap.LoadOrStore(tag, cancel); ok {
utils.Logger.Panic("tag already exists in tag2CtxCancelMap", zap.String("tag", tag))
}
jcfg := journal.NewConfig()
jcfg.BufDirPath = j.baseJournalCfg.BufDirPath
jcfg.BufSizeBytes = j.baseJournalCfg.BufSizeBytes
jcfg.IsCompress = j.baseJournalCfg.IsCompress
jcfg.CommittedIDTTL = j.baseJournalCfg.CommittedIDTTL
jcfg.IsAggresiveGC = false
jcfg.BufDirPath = filepath.Join(j.baseJournalDir, tag)
utils.Logger.Info("createJournalRunner for tag", zap.String("tag", tag))
if _, ok = j.tag2JMap.Load(tag); ok {
utils.Logger.Panic("tag already exists in tag2JMap", zap.String("tag", tag))
}
utils.Logger.Info("create new journal.Journal", zap.String("tag", tag))
jj := journal.NewJournal(ctxForTag, jcfg)
j.tag2JMap.Store(tag, jj)
if _, ok = j.tag2JJInchanMap.Load(tag); ok {
utils.Logger.Panic("tag already exists in tag2JJInchanMap", zap.String("tag", tag))
}
j.tag2JJInchanMap.Store(tag, make(chan *libs.FluentMsg, defaultInnerJournalDataChanLen))
if _, ok = j.tag2JJCommitChanMap.Load(tag); ok {
utils.Logger.Panic("tag already exists in tag2JJCommitChanMap", zap.String("tag", tag))
}
j.tag2JJCommitChanMap.Store(tag, make(chan *libs.FluentMsg, defaultInnerJournalIDChanLen))
// create ids writer
go func(ctx context.Context) {
var (
mid int64
err error
nRetry int
maxRetry = 2
msg *libs.FluentMsg
ok bool
)
chani, ok := j.tag2JJCommitChanMap.Load(tag)
if !ok {
utils.Logger.Panic("tag must in `j.tag2JJCommitChanMap`", zap.String("tag", tag))
}
defer utils.Logger.Info("journal ids writer exit")
for {
select {
case <-ctx.Done():
return
case msg, ok = <-chani.(chan *libs.FluentMsg):
if !ok {
utils.Logger.Info("tag2JJCommitChan closed", zap.String("tag", tag))
return
}
}
nRetry = 0
for nRetry < maxRetry {
if err = jj.WriteId(msg.Id); err != nil {
nRetry++
}
break
}
if err != nil && nRetry == maxRetry {
utils.Logger.Error("try to write id to journal got error", zap.Error(err))
}
if msg.ExtIds != nil {
for _, mid = range msg.ExtIds {
nRetry = 0
for nRetry < maxRetry {
if err = jj.WriteId(mid); err != nil {
nRetry++
}
break
}
if err != nil && nRetry == maxRetry {
utils.Logger.Error("try to write id to journal got error", zap.Error(err))
}
}
msg.ExtIds = nil
}
j.MsgPool.Put(msg)
}
}(ctxForTag)
// create data writer
go func(ctx context.Context) {
var (
data = &journal.Data{Data: map[string]interface{}{}}
err error
nRetry int
maxRetry = 2
ok bool
msg *libs.FluentMsg
)
chani, ok := j.tag2JJInchanMap.Load(tag)
if !ok {
utils.Logger.Panic("tag should in `j.tag2JJInchanMap`", zap.String("tag", tag))
}
defer utils.Logger.Info("journal data writer exit", zap.String("msg", fmt.Sprint(msg)))
for {
select {
case <-ctx.Done():
return
case msg, ok = <-chani.(chan *libs.FluentMsg):
if !ok {
utils.Logger.Info("tag2JJInchan closed", zap.String("tag", tag))
return
}
}
data.ID = msg.Id
data.Data["message"] = msg.Message
data.Data["tag"] = msg.Tag
nRetry = 0
for nRetry < maxRetry {
if err = jj.WriteData(data); err != nil {
nRetry++
}
break
}
if err != nil && nRetry == maxRetry {
utils.Logger.Error("try to write msg to journal got error",
zap.Error(err),
zap.String("tag", msg.Tag),
)
}
select {
case j.outChan <- msg:
default:
// msg will reproduce in legacy stage,
// so you can discard msg without any side-effect.
j.MsgPool.Put(msg)
}
}
}(ctxForTag)
}
func (j *Journal) GetOutChan() chan *libs.FluentMsg {
return j.outChan
}
func (j *Journal) ConvertMsg2Buf(msg *libs.FluentMsg, data *map[string]interface{}) {
(*data)["id"] = msg.Id
(*data)["tag"] = msg.Tag
(*data)["message"] = msg.Message
}
func (j *Journal) DumpMsgFlow(ctx context.Context, msgPool *sync.Pool, dumpChan, skipDumpChan chan *libs.FluentMsg) chan *libs.FluentMsg {
// deal with legacy
go func() {
defer utils.Logger.Info("legacy processor exit")
var err error
for { // try to starting legacy loading
select {
case <-ctx.Done():
return
default:
}
if _, err = j.ProcessLegacyMsg(dumpChan); err != nil {
utils.Logger.Error("process legacy got error", zap.Error(err))
}
time.Sleep(intervalToStartingLegacy)
}
}()
// start periodic gc
go func() {
defer utils.Logger.Info("gc runner exit")
for {
select {
case <-ctx.Done():
return
default:
}
utils.ForceGC()
time.Sleep(intervalForceGC)
}
}()
// deal with msgs that skip dump
go func() {
var (
msg *libs.FluentMsg
ok bool
)
defer utils.Logger.Info("skipDumpChan goroutine exit", zap.String("msg", fmt.Sprint(msg)))
for {
select {
case <-ctx.Done():
return
case msg, ok = <-skipDumpChan:
if !ok {
utils.Logger.Info("skipDumpChan closed")
return
}
}
j.outChan <- msg
}
}()
go func() {
var (
ok bool
jji interface{}
msg *libs.FluentMsg
)
defer utils.Logger.Info("legacy dumper exit", zap.String("msg", fmt.Sprint(msg)))
for {
select {
case <-ctx.Done():
return
case msg, ok = <-dumpChan:
if !ok {
utils.Logger.Info("dumpChan closed")
return
}
}
utils.Logger.Debug("try to dump msg", zap.String("tag", msg.Tag))
if jji, ok = j.tag2JJInchanMap.Load(msg.Tag); !ok {
j.createJournalRunner(ctx, msg.Tag)
jji, _ = j.tag2JJInchanMap.Load(msg.Tag)
}
select {
case jji.(chan *libs.FluentMsg) <- msg:
case j.outChan <- msg:
default:
utils.Logger.Error("discard msg since of journal & downstream busy",
zap.String("tag", msg.Tag),
zap.String("msg", fmt.Sprint(msg)),
)
j.MsgPool.Put(msg)
}
}
}()
return j.outChan
}
func (j *Journal) GetCommitChan() chan<- *libs.FluentMsg {
return j.commitChan
}
func (j *Journal) startCommitRunner(ctx context.Context) {
go func() {
var (
ok bool
chani interface{}
msg *libs.FluentMsg
)
defer utils.Logger.Info("id commitor exit", zap.String("msg", fmt.Sprint(msg)))
for {
select {
case <-ctx.Done():
return
case msg, ok = <-j.commitChan:
if !ok {
utils.Logger.Info("commitChan closed")
return
}
}
utils.Logger.Debug("try to commit msg",
zap.String("tag", msg.Tag),
zap.Int64("id", msg.Id))
if chani, ok = j.tag2JJCommitChanMap.Load(msg.Tag); !ok {
j.createJournalRunner(ctx, msg.Tag)
chani, _ = j.tag2JJCommitChanMap.Load(msg.Tag)
}
select {
case chani.(chan *libs.FluentMsg) <- msg:
default:
select {
case j.commitChan <- msg:
utils.Logger.Warn("reset committed msg",
zap.String("tag", msg.Tag),
zap.Int64("id", msg.Id),
)
default:
utils.Logger.Error("discard committed msg because commitChan is busy",
zap.String("tag", msg.Tag),
zap.Int64("id", msg.Id),
)
j.MsgPool.Put(msg)
}
}
}
}()
}
func (j *Journal) registerMonitor() {
monitor.AddMetric("journal", func() map[string]interface{} {
result := map[string]interface{}{}
j.tag2JMap.Range(func(k, v interface{}) bool {
result[k.(string)] = v.(*journal.Journal).GetMetric()
return true
})
return result
})
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.