text stringlengths 11 4.05M |
|---|
// Install view - shows install menu, and executes install upon chosen options
// =================================================
package views
import (
"strings"
"net/http"
"text/template"
)
type InstallData struct {
Logo string
ClientSecret string
FoldersMap map[string]string
AvailableOpts string
RepoOpts string
BaseURL string
URLMask string
}
func ServeInstall(w http.ResponseWriter, r *http.Request, baseurl string, client_secret string, logo string, directory string, alphabet string, foldersMap map[string]string, urlMask string) {
// variable for holding avilable oprions
menuItems := make(map[string]string)
// iterate over options
for i := 0; i < len(alphabet); i++ {
// exit if no keys in map anymore
key := string(alphabet[i])
if _, ok := foldersMap[key]; !ok {
continue
}
// add item to map
menuItems[key] = foldersMap[key]
}
// create variable with available options for cross checking input
keys := make([]string, 0)
for key := range foldersMap {
keys = append(keys, key)
}
availableOpts := strings.Join(keys, "")
// generate body of bash case with repo packages
repoPackages := repoPackagesCasePrint(foldersMap, false, directory, baseurl)
// build data for template
data := InstallData{strings.ReplaceAll(logo,"'","'\"'\"'"), client_secret, menuItems, availableOpts, repoPackages, baseurl, urlMask}
// adding functions for template
funcMap := template.FuncMap{
// implement increment
"inc": func(i int) int {
return i + 1
},
// modulo function
"mod": func(i, j int) bool {
return i%j == 0
},
}
// render template
tmpl, err := template.New("install").Funcs(funcMap).Parse(tmplInstall)
if err != nil { panic(err) }
err = tmpl.Execute(w, data)
if err != nil { panic(err) }
}
var tmplInstall = bashTemplHead + gitCloneTmpl + `
tput clear
echo -e '{{.Logo}}'
barPrint
printf "%2s%s\n%2s%s\e[32m%s\e[0m%s\n\n" "" "Choose dotfiles to be installed." "" "Select by typing keys (" "green" ") and confirm with enter."
barPrint
{{ $index := 0 }}
{{ range $key, $value := .FoldersMap }}
printf " \e[32m%s\e[0m)\e[35m %-15s\e[0m" "{{ $key }} " "{{ $value }}"
{{ $index = inc $index }}
{{ if mod $index 3 }}echo ""{{ end }}
{{ end }}
SECRET="{{ .ClientSecret }}"
selectPackage() {
case "$1" in
{{ .RepoOpts }}
esac
}
OPTS="{{ .AvailableOpts }}"
exec 3<>/dev/tty
echo ""
read -u 3 -p " Chosen packages: " words
echo ""
if [ -z $words ]; then
echo -e " Nothing to do... exiting."
exit 0
fi
barPrint
echo -ne " Follwing dotfiles will be installed in order:\n "
COMMA=""
for CHAR in $(echo "$words" | fold -w1); do
test "${OPTS#*$CHAR}" != "$OPTS" || continue
echo -en "$COMMA"
selectPackage $CHAR False
COMMA=", "
done
if [ "$COMMA" == "" ]; then
echo -e "\n Nothing to do... exiting."
exit 0
fi
GITINSTALL=false
if [ -f "$HOME/.dotman/managed" ]; then
if [ -d "$HOME/.dotman/dotfiles" ]; then
GITINSTALL=true
echo -e "\n\n \e[33;5mWarning!\e[0m\n Git install method used.\n This will update any other dotfiles managed by dotman."
fi
else
if command -v git >/dev/null 2>&1; then
echo -e "\n\n Fresh install. GIT command present. Install using git symlink method? [Y/n]"
read -u 3 -n 1 -r -s
[[ ! $REPLY =~ ^[Nn]$ ]] && GITINSTALL=true
fi
fi
confirmPrompt
mkdir -p "$HOME/.dotman"; touch "$HOME/.dotman/managed"
if command -v git >/dev/null 2>&1; then
"$GITINSTALL" && mkdir -p "$HOME/.dotman/dotfiles" || rm -rf "$HOME/.dotman/dotfiles"
fi
barPrint
echo " Installing dotfiles:"
if [ -d "$HOME/.dotman/dotfiles" ]; then
gitCloneIfPresent "$SECRET"
fi
for CHAR in $(echo "$words" | fold -w1); do
test "${OPTS#*$CHAR}" != "$OPTS" || continue
selectPackage $CHAR
done
`
|
package main
//392. 判断子序列
//给定字符串 s 和 t ,判断 s 是否为 t 的子序列。
//
//你可以认为 s 和 t 中仅包含英文小写字母。字符串 t 可能会很长(长度 ~= 500,000),而 s 是个短字符串(长度 <=100)。
//
//字符串的一个子序列是原始字符串删除一些(也可以不删除)字符而不改变剩余字符相对位置形成的新字符串。(例如,"ace"是"abcde"的一个子序列,而"aec"不是)。
//
//示例1:
//s = "abc", t = "ahbgdc"
//
//返回true.
//
//示例2:
//s = "axc", t = "ahbgdc"
//
//返回false.
//
//后续挑战 :
//
//如果有大量输入的 S,称作S1, S2, ... , Sk 其中 k >= 10亿,你需要依次检查它们是否为 T 的子序列。在这种情况下,你会怎样改变代码?
//思路 双指针
func isSubsequence(s string, t string) bool {
p1, p2 := 0, 0
for p1 < len(s) && p2 < len(t) {
if s[p1] == t[p2] {
p1++
p2++
} else {
p2++
}
}
return p1 == len(s)
}
|
package engineserver
import (
"encoding/json"
"github.com/engelsjk/faadb/internal/codes"
"github.com/engelsjk/faadb/internal/service"
"github.com/engelsjk/faadb/internal/utils"
)
type EngineService struct {
Name string
svc *service.Service
codes Codes
}
func NewEngineService(dataPath, dbPath string, reload bool) (*EngineService, error) {
name := "engine"
numFields := 7
e := &EngineService{Name: name}
e.codes = initCodes()
var err error
e.svc, err = service.NewService(service.Settings{
Name: name,
NumFields: numFields,
DataPath: dataPath,
DBPath: dbPath,
Reload: reload,
}, e.DecodeLine)
if err != nil {
return nil, err
}
return e, nil
}
func (e *EngineService) DecodeLine(line []string) (string, string, error) {
record := Record{
ManufacturerModelCode: utils.ToUpper(line[0]),
ManufacturerName: utils.ToUpper(line[1]),
ModelName: utils.ToUpper(line[2]),
EngineType: codes.Description{
Code: utils.ToUpper(line[3]),
Description: codes.DecodeDescription(line[3], e.codes.EngineType),
},
Horsepower: codes.ParseInt32(line[4]),
PoundsOfThrust: codes.ParseInt32(line[5]),
}
key := record.ManufacturerModelCode
b, err := json.Marshal(record)
return key, string(b), err
}
type Record struct {
ManufacturerModelCode string `json:"manufacturer_model_code"`
ManufacturerName string `json:"manufacturer_name"`
ModelName string `json:"engine_model_name"`
EngineType codes.Description `json:"engine_type"`
Horsepower int32 `json:"horsepower"`
PoundsOfThrust int32 `json:"pounds_of_thrust"`
}
func (r *Record) MarshalJSON() ([]byte, error) {
type Alias Record
a := &struct {
*Alias
}{
Alias: (*Alias)(r),
}
return json.Marshal(a)
}
func (r *Record) UnmarshalJSON(b []byte) error {
type Alias Record
a := &struct {
*Alias
}{
Alias: (*Alias)(r),
}
return json.Unmarshal(b, &a)
}
|
package middle_test
import (
"net/http"
"net/http/httptest"
"testing"
"github.com/labstack/echo/v4"
"github.com/shandysiswandi/echo-service/internal/infrastructure/app/middle"
"github.com/stretchr/testify/assert"
)
func TestLogger(t *testing.T) {
// setup
req := httptest.NewRequest(http.MethodGet, "/", nil)
rec := httptest.NewRecorder()
e := echo.New()
c := e.NewContext(req, rec)
// testing
h := middle.Logger()(func(c echo.Context) error {
return c.String(http.StatusOK, "logger")
})
h(c)
// assertion
assert.Equal(t, 200, rec.Code)
assert.Equal(t, "logger", rec.Body.String())
}
|
package debug
import (
"net/http"
"strconv"
"github.com/pokemium/worldwide/pkg/util"
)
func (d *Debugger) Trace(w http.ResponseWriter, req *http.Request) {
switch req.Method {
case "GET":
if !*d.pause {
http.Error(w, "trace API is available on pause state", http.StatusBadRequest)
return
}
q := req.URL.Query()
steps := uint16(0)
for key, val := range q {
if key == "step" {
s, _ := strconv.ParseUint(val[0], 10, 16)
steps = uint16(s)
}
}
if steps == 0 {
http.Error(w, "`step` is needed on query parameter(e.g. ?step=20)", http.StatusBadRequest)
return
}
result := ""
for s := uint16(0); s < steps; s++ {
d.g.Step()
for _, callback := range d.g.Callbacks {
if callback.Priority == util.PRIO_BREAKPOINT {
continue
}
if callback.Func() {
break
}
}
result += stringfyCurInst(d.g.Inst) + "\n"
}
w.Header().Set("Content-Type", "text/plain")
w.Write([]byte(result))
default:
http.NotFound(w, req)
return
}
}
|
package kubectl
import (
"context"
"github.com/pkg/errors"
"io"
v1 "k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes/scheme"
)
// ReadLogs reads the logs and returns a string
func (client *client) ReadLogs(ctx context.Context, namespace, podName, containerName string, lastContainerLog bool, tail *int64) (string, error) {
readCloser, err := client.Logs(ctx, namespace, podName, containerName, lastContainerLog, tail, false)
if err != nil {
return "", err
}
logs, err := io.ReadAll(readCloser)
if err != nil {
return "", err
}
return string(logs), nil
}
// Logs prints the container logs
func (client *client) Logs(ctx context.Context, namespace, podName, containerName string, lastContainerLog bool, tail *int64, follow bool) (io.ReadCloser, error) {
lines := int64(500)
if tail != nil {
lines = *tail
}
request := client.KubeClient().CoreV1().RESTClient().Get().Namespace(namespace).Name(podName).Resource("pods").SubResource("log").VersionedParams(&v1.PodLogOptions{
Container: containerName,
TailLines: &lines,
Previous: lastContainerLog,
Follow: follow,
}, scheme.ParameterCodec)
if request.URL().String() == "" {
return nil, errors.New("Request url is empty")
}
return request.Stream(ctx)
}
|
package main
import (
"encoding/json"
"fmt"
"log"
"net/http"
"database/sql"
"github.com/gorilla/mux"
_ "github.com/lib/pq"
)
const (
ACCIDENTS_QUERY = `SELECT id,regis_no,ev_id,acft_make,afm_hrs,afm_hrs_last_insp,date_last_insp,owner_acft FROM events WHERE regis_no ~* '%s'`
EVENTS_QUERY = `SELECT id,regis_no,ev_id,acft_make,afm_hrs,afm_hrs_last_insp,date_last_insp,owner_acft
FROM events`
DESCRIPTION_URL = "http://www.ntsb.gov/_layouts/ntsb.aviation/brief.aspx?ev_id=%s"
)
type Accident struct {
RegistrationNumber string
EventId string
Description string
AircraftMake string
LastInspectedDate string
AmountHrsSinceLastInspection string
AmountOfHours string
Owner string
}
type Accidents []Accident
type AccidentResponse struct {
Success bool
Objects Accidents
}
func (l *Accidents) addElement(registrationNumber string, eventId string, aircraftMake string, lastInspectedDate string, amountHrsSinceLastInspection string, amountOfHours string, owner string) {
e := &Accident{
RegistrationNumber: registrationNumber,
EventId: eventId,
AircraftMake: aircraftMake,
LastInspectedDate: lastInspectedDate,
AmountHrsSinceLastInspection: amountHrsSinceLastInspection,
AmountOfHours: amountOfHours,
Owner: owner,
}
e.getDescription()
*l = append(*l, *e)
}
func (l *Accident) getDescription() {
l.Description = "Test"
}
func AccidentEventEndpoint(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
accidentId := vars["accident_id"]
QUERY := fmt.Sprintf(ACCIDENTS_QUERY, accidentId)
accidents := Accidents{}
db, err := sql.Open("postgres", "user='' dbname=plane sslmode=disable")
if err != nil {
log.Fatal(err)
}
var (
id int
regis_no string
ev_id string
acft_make string
date_last_insp string
afm_hrs_last_insp string
afm_hrs string
owner_acft string
)
rows, err := db.Query(QUERY)
if err != nil {
fmt.Println(err)
return
}
defer rows.Close()
for rows.Next() {
err := rows.Scan(&id, ®is_no, &ev_id, &acft_make, &afm_hrs, &afm_hrs_last_insp, &date_last_insp, &owner_acft)
if err != nil {
log.Fatal(err)
}
accidents.addElement(regis_no, ev_id, acft_make, date_last_insp, afm_hrs_last_insp, afm_hrs, owner_acft)
}
response := AccidentResponse{
Success: true,
Objects: accidents,
}
json.NewEncoder(w).Encode(response)
}
func AccidentQueryEndPoint(w http.ResponseWriter, r *http.Request) {
reg_num := r.FormValue("regis_no")
QUERY := EVENTS_QUERY
if reg_num != "" {
QUERY = fmt.Sprintf(ACCIDENTS_QUERY, reg_num)
}
accidents := Accidents{}
db, err := sql.Open("postgres", "user='' dbname=plane sslmode=disable")
if err != nil {
log.Fatal(err)
}
var (
id int
regis_no string
ev_id string
acft_make string
date_last_insp string
afm_hrs_last_insp string
afm_hrs string
owner_acft string
)
rows, err := db.Query(QUERY)
if err != nil {
fmt.Println(err)
return
}
defer rows.Close()
for rows.Next() {
err := rows.Scan(&id, ®is_no, &ev_id, &acft_make, &afm_hrs, &afm_hrs_last_insp, &date_last_insp, &owner_acft)
if err != nil {
log.Fatal(err)
}
accidents.addElement(regis_no, ev_id, acft_make, date_last_insp, afm_hrs_last_insp, afm_hrs, owner_acft)
}
response := AccidentResponse{
Success: true,
Objects: accidents,
}
json.NewEncoder(w).Encode(response)
}
func main() {
router := mux.NewRouter().StrictSlash(true)
router.HandleFunc("/api/v1/accidents", AccidentQueryEndPoint).Methods("GET")
router.HandleFunc("/api/v1/accidents/{accident_id}", AccidentEventEndpoint).Methods("GET")
fmt.Println("Your webserver is running on port 8000")
fmt.Println("(name sure nginx is running!!!)")
log.Fatal(http.ListenAndServe(":8000", router))
}
|
package oauth2bearer
import (
"fmt"
"log"
"reflect"
"time"
"golang.org/x/oauth2"
)
// this loops on one goroutine, waiting until the token is about to expire
// and then grabbing a new one
func mainRefreshLoop(source TokenSource) {
timeToWait := 0.0
for {
time.Sleep(time.Duration(timeToWait) * time.Second)
initRaw, err := source.retrieveRawToken()
if err != nil {
log.Panic("cannot refresh raw token")
}
log.Print("got new raw token")
source.refreshChannel <- initRaw
// log.Print("send complete")
waitDurationS := initRaw.Expiry.Sub(time.Now()).Seconds()
timeToWait = waitDurationS - source.params.RefreshMargin
}
}
// this loop receives updates from the mainRefreshLoop and sends back
// up-to-date tokens when queries by the user code
func tokenControllerLoop(ts TokenSource) {
userChans := ts.controlChannels
refreshChan := ts.refreshChannel
rawToken := <-refreshChan
// log.Print("got first raw token")
for {
cases := make([]reflect.SelectCase, 0)
cases = append(cases, reflect.SelectCase{
Dir: reflect.SelectRecv,
Chan: reflect.ValueOf(refreshChan),
})
for _, v := range userChans {
cases = append(cases, reflect.SelectCase{
Dir: reflect.SelectRecv,
Chan: reflect.ValueOf(v),
})
}
chosen, val, ok := reflect.Select(cases)
if !ok {
panic("got error in select")
}
if chosen == 0 {
// got refresh message
fmt.Println("got refresh message")
asToken, ok := val.Interface().(*oauth2.Token)
if !ok {
panic("token type assert failed")
}
rawToken = asToken
// fmt.Println("done refresh message")
} else {
chosenChan := userChans[chosen-1]
// got control message
asControl, ok := val.Interface().(controlMessage)
if !ok {
panic("control type assert failed")
}
action := asControl.action
if action == getToken {
// fmt.Println("get token")
chosenChan <- controlMessage{
action: sendToken,
token: rawToken,
}
// fmt.Println("token sent")
} else if action == refresh {
fmt.Println("control refresh")
// do refresh
newRawToken, err := ts.retrieveRawToken()
if err != nil {
panic("force refresh got error")
}
rawToken = newRawToken
} else if action == registerChannel {
userChans = append(userChans, asControl.channel)
}
}
}
}
// this is how we get a token from the controller
// it sends a message on the controller channel and then
// returns what comes back
func getAccessToken(controllerChan chan controlMessage) *oauth2.Token {
// fmt.Println("sending request for token")
controllerChan <- controlMessage{
action: getToken,
}
// fmt.Println("reading reply")
reply := <-controllerChan
if reply.action != sendToken {
panic("didnt get token back")
} else {
// fmt.Println("got token back")
}
return reply.token
}
// eof
|
package statefulset
import (
"context"
"hash/fnv"
"reflect"
"strconv"
dynatracev1beta1 "github.com/Dynatrace/dynatrace-operator/src/api/v1beta1"
"github.com/Dynatrace/dynatrace-operator/src/controllers"
"github.com/Dynatrace/dynatrace-operator/src/controllers/dynakube/activegate/capability"
"github.com/Dynatrace/dynatrace-operator/src/controllers/dynakube/activegate/internal/authtoken"
"github.com/Dynatrace/dynatrace-operator/src/controllers/dynakube/activegate/internal/customproperties"
"github.com/Dynatrace/dynatrace-operator/src/controllers/dynakube/activegate/internal/statefulset/builder"
"github.com/Dynatrace/dynatrace-operator/src/kubeobjects"
"github.com/Dynatrace/dynatrace-operator/src/kubesystem"
"github.com/pkg/errors"
appsv1 "k8s.io/api/apps/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
)
var _ controllers.Reconciler = &Reconciler{}
type Reconciler struct {
client client.Client
dynakube *dynatracev1beta1.DynaKube
apiReader client.Reader
scheme *runtime.Scheme
capability capability.Capability
modifiers []builder.Modifier
}
func NewReconciler(clt client.Client, apiReader client.Reader, scheme *runtime.Scheme, dynakube *dynatracev1beta1.DynaKube, capability capability.Capability) *Reconciler { //nolint:revive // argument-limit doesn't apply to constructors
return &Reconciler{
client: clt,
apiReader: apiReader,
scheme: scheme,
dynakube: dynakube,
capability: capability,
modifiers: []builder.Modifier{},
}
}
type NewReconcilerFunc = func(clt client.Client, apiReader client.Reader, scheme *runtime.Scheme, dynakube *dynatracev1beta1.DynaKube, capability capability.Capability) *Reconciler
func (r *Reconciler) Reconcile() error {
err := r.manageStatefulSet()
if err != nil {
log.Error(err, "could not reconcile stateful set")
return errors.WithStack(err)
}
return nil
}
func (r *Reconciler) manageStatefulSet() error {
desiredSts, err := r.buildDesiredStatefulSet()
if err != nil {
return errors.WithStack(err)
}
if err := controllerutil.SetControllerReference(r.dynakube, desiredSts, r.scheme); err != nil {
return errors.WithStack(err)
}
created, err := r.createStatefulSetIfNotExists(desiredSts)
if created || err != nil {
return errors.WithStack(err)
}
deleted, err := r.deleteStatefulSetIfSelectorChanged(desiredSts)
if deleted || err != nil {
return errors.WithStack(err)
}
updated, err := r.updateStatefulSetIfOutdated(desiredSts)
if updated || err != nil {
return errors.WithStack(err)
}
return nil
}
func (r *Reconciler) buildDesiredStatefulSet() (*appsv1.StatefulSet, error) {
kubeUID, err := kubesystem.GetUID(r.apiReader)
if err != nil {
return nil, errors.WithStack(err)
}
activeGateConfigurationHash, err := r.calculateActiveGateConfigurationHash()
if err != nil {
return nil, errors.WithStack(err)
}
statefulSetBuilder := NewStatefulSetBuilder(kubeUID, activeGateConfigurationHash, *r.dynakube, r.capability)
desiredSts, err := statefulSetBuilder.CreateStatefulSet(r.modifiers)
return desiredSts, errors.WithStack(err)
}
func (r *Reconciler) getStatefulSet(desiredSts *appsv1.StatefulSet) (*appsv1.StatefulSet, error) {
var sts appsv1.StatefulSet
err := r.client.Get(context.TODO(), client.ObjectKey{Name: desiredSts.Name, Namespace: desiredSts.Namespace}, &sts)
if err != nil {
return nil, errors.WithStack(err)
}
return &sts, nil
}
func (r *Reconciler) createStatefulSetIfNotExists(desiredSts *appsv1.StatefulSet) (bool, error) {
_, err := r.getStatefulSet(desiredSts)
if err != nil && k8serrors.IsNotFound(errors.Cause(err)) {
log.Info("creating new stateful set for " + r.capability.ShortName())
return true, r.client.Create(context.TODO(), desiredSts)
}
return false, err
}
func (r *Reconciler) updateStatefulSetIfOutdated(desiredSts *appsv1.StatefulSet) (bool, error) {
currentSts, err := r.getStatefulSet(desiredSts)
if err != nil {
return false, err
}
if !kubeobjects.IsHashAnnotationDifferent(currentSts, desiredSts) {
return false, nil
}
if kubeobjects.LabelsNotEqual(currentSts.Spec.Selector.MatchLabels, desiredSts.Spec.Selector.MatchLabels) {
return r.recreateStatefulSet(currentSts, desiredSts)
}
log.Info("updating existing stateful set")
if err = r.client.Update(context.TODO(), desiredSts); err != nil {
return false, err
}
return true, err
}
func (r *Reconciler) recreateStatefulSet(currentSts, desiredSts *appsv1.StatefulSet) (bool, error) {
log.Info("immutable section changed on statefulset, deleting and recreating", "name", desiredSts.Name)
err := r.client.Delete(context.TODO(), currentSts)
if err != nil {
return false, err
}
log.Info("deleted statefulset")
log.Info("recreating statefulset", "name", desiredSts.Name)
return true, r.client.Create(context.TODO(), desiredSts)
}
// the selector, e.g. MatchLabels, of a stateful set is immutable.
// if it changed, for example due to a new operator version, deleteStatefulSetIfSelectorChanged deletes the stateful set
// so it can be updated correctly afterwards.
func (r *Reconciler) deleteStatefulSetIfSelectorChanged(desiredSts *appsv1.StatefulSet) (bool, error) {
currentSts, err := r.getStatefulSet(desiredSts)
if err != nil {
return false, err
}
if hasSelectorChanged(desiredSts, currentSts) {
log.Info("deleting existing stateful set because selector changed")
if err = r.client.Delete(context.TODO(), desiredSts); err != nil {
return false, err
}
return true, nil
}
return false, nil
}
func hasSelectorChanged(desiredSts *appsv1.StatefulSet, currentSts *appsv1.StatefulSet) bool {
return !reflect.DeepEqual(currentSts.Spec.Selector, desiredSts.Spec.Selector)
}
func (r *Reconciler) calculateActiveGateConfigurationHash() (string, error) {
customPropertyData, err := r.getCustomPropertyValue()
if err != nil {
return "", errors.WithStack(err)
}
authTokenData, err := r.getAuthTokenValue()
if err != nil {
return "", errors.WithStack(err)
}
if len(customPropertyData) < 1 && len(authTokenData) < 1 {
return "", nil
}
hash := fnv.New32()
if _, err := hash.Write([]byte(customPropertyData + authTokenData)); err != nil {
return "", errors.WithStack(err)
}
return strconv.FormatUint(uint64(hash.Sum32()), 10), nil
}
func (r *Reconciler) getCustomPropertyValue() (string, error) {
if !needsCustomPropertyHash(r.capability.Properties().CustomProperties) {
return "", nil
}
customPropertyData, err := r.getDataFromCustomProperty(r.capability.Properties().CustomProperties)
if err != nil {
return "", errors.WithStack(err)
}
return customPropertyData, nil
}
func (r *Reconciler) getAuthTokenValue() (string, error) {
if !r.dynakube.UseActiveGateAuthToken() {
return "", nil
}
authTokenData, err := r.getDataFromAuthTokenSecret()
if err != nil {
return "", errors.WithStack(err)
}
return authTokenData, nil
}
func (r *Reconciler) getDataFromCustomProperty(customProperties *dynatracev1beta1.DynaKubeValueSource) (string, error) {
if customProperties.ValueFrom != "" {
return kubeobjects.GetDataFromSecretName(r.apiReader, types.NamespacedName{Namespace: r.dynakube.Namespace, Name: customProperties.ValueFrom}, customproperties.DataKey, log)
}
return customProperties.Value, nil
}
func (r *Reconciler) getDataFromAuthTokenSecret() (string, error) {
return kubeobjects.GetDataFromSecretName(r.apiReader, types.NamespacedName{Namespace: r.dynakube.Namespace, Name: r.dynakube.ActiveGateAuthTokenSecret()}, authtoken.ActiveGateAuthTokenName, log)
}
func needsCustomPropertyHash(customProperties *dynatracev1beta1.DynaKubeValueSource) bool {
return customProperties != nil && (customProperties.Value != "" || customProperties.ValueFrom != "")
}
|
package timetosell2
func maxProfit(prices []int) int {
if len(prices) <= 1 {
return 0
}
totalProfit := 0
subProfit := 0
buy := prices[0]
for i, this := range prices[1:] {
last := prices[i]
if last <= this {
// a profit to be made
subProfit = this - buy
} else {
// prices have gone down since yesterday
buy = this
totalProfit += subProfit
subProfit = 0
}
}
totalProfit += subProfit
return totalProfit
}
func maxProfit1(prices []int) int {
if len(prices) <= 1 {
return 0
}
totalProfit := 0
subProfit := 0
buy := prices[0]
for i, this := range prices[1:] {
last := prices[i]
if last == this {
// no change in price
continue
}
if last < this {
// a profit to be made
subProfit = this - buy
} else {
// prices have gone down since yesterday
buy = this
totalProfit += subProfit
subProfit = 0
}
}
totalProfit += subProfit
return totalProfit
}
|
package main
import (
"fmt"
"time"
"github.com/garyburd/redigo/redis"
)
func checkErr(errMasg error) {
if errMasg != nil {
panic(errMasg)
}
}
func main() {
//建立连接
c, err := redis.Dial("tcp", "127.0.0.1:6379")
checkErr(err)
defer c.Close()
//查看redis已有数据量
size, err := c.Do("DBSIZE")
fmt.Printf("size is %d \n", size)
//执行set命令,写入数据
_, err = c.Do("set", "name", "yuanye")
checkErr(err)
//取数据
name, err := redis.String(c.Do("get", "name"))
if err != nil {
checkErr(err)
} else {
fmt.Println(name)
}
//删除数据
//
_, err = c.Do("del", "name")
checkErr(err)
//检查name是否存在
has, err := redis.Bool(c.Do("exists", "name"))
if err != nil {
fmt.Println("name is", err)
} else {
fmt.Println(has)
}
//设置redis过期时间3s
//
_, err = c.Do("set", "myName", "hehe", "ex", 3)
checkErr(err) \
myName, err := redis.String(c.Do("get", "myName"))
fmt.Println("myName : ", myName)
//5s后取数据
time.Sleep(time.Second * 5)
myName, err = redis.String(c.Do("get", "myName"))
if err != nil {
fmt.Println("After 5s ", err)
} else {
fmt.Println("After 5s myName : ", myName)
}
} |
package matchers_test
import (
"errors"
"github.com/hashicorp/go-multierror"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
pkgerrors "github.com/pkg/errors"
. "github.com/rgalanakis/golangal/errmatch"
)
var _ = Describe("BeCausedBy matcher", func() {
e := errors.New("blah")
It("matches if the error passes MatchError", func() {
Expect(e).To(BeCausedBy(e))
Expect(e).To(BeCausedBy("blah"))
})
It("matches if the error is wrapped by pkgerrors.Wrap", func() {
Expect(pkgerrors.Wrap(e, "foo")).To(BeCausedBy("blah"))
Expect(pkgerrors.Wrap(e, "foo")).To(BeCausedBy("foo: blah"))
Expect(pkgerrors.Wrap(e, "foo")).To(BeCausedBy(e))
})
It("matches if the error is in a multierror", func() {
Expect(multierror.Append(e)).To(BeCausedBy("blah"))
Expect(multierror.Append(e)).To(BeCausedBy(e))
Expect(multierror.Append(pkgerrors.New("foo"), e)).To(BeCausedBy(e))
})
It("matches if the error is wrapped and a member of a multierror", func() {
Expect(multierror.Append(
pkgerrors.New("foo"),
pkgerrors.Wrap(e, "spam"),
)).To(BeCausedBy(e))
})
It("fails if actual does not match", func() {
matcher := BeCausedBy("foo")
success, err := matcher.Match(e)
Expect(success).To(BeFalse())
Expect(err).ToNot(HaveOccurred())
msg := matcher.FailureMessage(e)
Expect(msg).To(HavePrefix(`Expected
<*errors.errorString | `))
Expect(msg).To(HaveSuffix(`>: {s: "blah"}
to match error
<string>: foo`))
})
It("errors if actual is nil", func() {
matcher := BeCausedBy(e)
success, err := matcher.Match(nil)
Expect(success).To(BeFalse())
Expect(err).To(MatchError("Expected an error, got nil"))
})
It("errors if actual is not an error", func() {
matcher := BeCausedBy("abc")
success, err := matcher.Match(5)
Expect(success).To(BeFalse())
Expect(err).To(MatchError("Expected an error. Got:\n <int>: 5"))
})
})
|
package clock
import "fmt"
type clock struct {
hour, minute int
}
func New(hour, minute int) clock {
return clock{hour, minute}
}
func (c clock) Add(minute int) clock {
var m int = (c.hour * 60) + c.minute + minute
m %= 24 * 60
if m < 0 {
m += 24 * 60
}
c.hour = m / 60
c.minute = m % 60
return c
}
func (c clock) String() string {
return fmt.Sprintf("%02d:%02d", c.hour, c.minute)
}
|
package serializer
import (
"bytes"
"encoding/binary"
"fmt"
"math"
"math/big"
"reflect"
"sort"
"time"
)
type (
// ArrayOf12Bytes is an array of 12 bytes.
ArrayOf12Bytes = [12]byte
// ArrayOf20Bytes is an array of 20 bytes.
ArrayOf20Bytes = [20]byte
// ArrayOf32Bytes is an array of 32 bytes.
ArrayOf32Bytes = [32]byte
// ArrayOf38Bytes is an array of 38 bytes.
ArrayOf38Bytes = [38]byte
// ArrayOf64Bytes is an array of 64 bytes.
ArrayOf64Bytes = [64]byte
// ArrayOf49Bytes is an array of 49 bytes.
ArrayOf49Bytes = [49]byte
// SliceOfArraysOf32Bytes is a slice of arrays of which each is 32 bytes.
SliceOfArraysOf32Bytes = []ArrayOf32Bytes
// SliceOfArraysOf64Bytes is a slice of arrays of which each is 64 bytes.
SliceOfArraysOf64Bytes = []ArrayOf64Bytes
// ErrProducer might produce an error.
ErrProducer func(err error) error
// ErrProducerWithRWBytes might produce an error and is called with the currently read or written bytes.
ErrProducerWithRWBytes func(read []byte, err error) error
// ErrProducerWithLeftOver might produce an error and is called with the bytes left to read.
ErrProducerWithLeftOver func(left int, err error) error
// ReadObjectConsumerFunc gets called after an object has been deserialized from a Deserializer.
ReadObjectConsumerFunc func(seri Serializable)
// ReadObjectsConsumerFunc gets called after objects have been deserialized from a Deserializer.
ReadObjectsConsumerFunc func(seri Serializables)
)
// SeriLengthPrefixType defines the type of the value denoting the length of a collection.
type SeriLengthPrefixType byte
const (
// SeriLengthPrefixTypeAsByte defines a collection length to be denoted by a byte.
SeriLengthPrefixTypeAsByte SeriLengthPrefixType = iota
// SeriLengthPrefixTypeAsUint16 defines a collection length to be denoted by a uint16.
SeriLengthPrefixTypeAsUint16
// SeriLengthPrefixTypeAsUint32 defines a collection length to be denoted by a uint32.
SeriLengthPrefixTypeAsUint32
)
// NewSerializer creates a new Serializer.
func NewSerializer() *Serializer {
return &Serializer{}
}
// Serializer is a utility to serialize bytes.
type Serializer struct {
buf bytes.Buffer
err error
}
// Serialize finishes the serialization by returning the serialized bytes
// or an error if any intermediate step created one.
func (s *Serializer) Serialize() ([]byte, error) {
if s.err != nil {
return nil, s.err
}
return s.buf.Bytes(), nil
}
// AbortIf calls the given ErrProducer if the Serializer did not encounter an error yet.
// Return nil from the ErrProducer to indicate continuation of the serialization.
func (s *Serializer) AbortIf(errProducer ErrProducer) *Serializer {
if s.err != nil {
return s
}
if err := errProducer(nil); err != nil {
s.err = err
}
return s
}
// WithValidation runs errProducer if deSeriMode has DeSeriModePerformValidation.
func (s *Serializer) WithValidation(deSeriMode DeSerializationMode, errProducer ErrProducerWithRWBytes) *Serializer {
if s.err != nil {
return s
}
if !deSeriMode.HasMode(DeSeriModePerformValidation) {
return s
}
if err := errProducer(s.buf.Bytes(), s.err); err != nil {
s.err = err
return s
}
return s
}
// Do calls f in the Serializer chain.
func (s *Serializer) Do(f func()) *Serializer {
if s.err != nil {
return s
}
f()
return s
}
// Written returns the amount of bytes written into the Serializer.
func (s *Serializer) Written() int {
return s.buf.Len()
}
// WriteNum writes the given num v to the Serializer.
func (s *Serializer) WriteNum(v interface{}, errProducer ErrProducer) *Serializer {
if s.err != nil {
return s
}
if err := binary.Write(&s.buf, binary.LittleEndian, v); err != nil {
s.err = errProducer(err)
}
return s
}
// WriteUint256 writes the given *big.Int v representing an uint256 value to the Serializer.
func (s *Serializer) WriteUint256(num *big.Int, errProducer ErrProducer) *Serializer {
if s.err != nil {
return s
}
if num == nil {
s.err = errProducer(ErrUint256Nil)
return s
}
switch {
case num.Sign() == -1:
s.err = errProducer(ErrUint256NumNegative)
return s
case len(num.Bytes()) > UInt256ByteSize:
s.err = errProducer(ErrUint256TooBig)
return s
}
numBytes := num.Bytes()
// order to little endianness
for i, j := 0, len(numBytes)-1; i < j; i, j = i+1, j-1 {
numBytes[i], numBytes[j] = numBytes[j], numBytes[i]
}
//nolint:gocritic // false positive
padded := append(numBytes, make([]byte, 32-len(numBytes))...)
if _, err := s.buf.Write(padded); err != nil {
s.err = errProducer(err)
return s
}
return s
}
// WriteBool writes the given bool to the Serializer.
func (s *Serializer) WriteBool(v bool, errProducer ErrProducer) *Serializer {
if s.err != nil {
return s
}
var val byte
if v {
val = 1
}
if err := s.buf.WriteByte(val); err != nil {
s.err = errProducer(err)
}
return s
}
// WriteByte writes the given byte to the Serializer.
//
//nolint:stdmethods // false positive
func (s *Serializer) WriteByte(data byte, errProducer ErrProducer) *Serializer {
if s.err != nil {
return s
}
if err := s.buf.WriteByte(data); err != nil {
s.err = errProducer(err)
}
return s
}
// WriteBytes writes the given byte slice to the Serializer.
// Use this function only to write fixed size slices/arrays, otherwise
// use WriteVariableByteSlice instead.
func (s *Serializer) WriteBytes(data []byte, errProducer ErrProducer) *Serializer {
if s.err != nil {
return s
}
if _, err := s.buf.Write(data); err != nil {
s.err = errProducer(err)
}
return s
}
// writes the given length to the Serializer as the defined SeriLengthPrefixType.
func (s *Serializer) writeSliceLength(l int, lenType SeriLengthPrefixType, errProducer ErrProducer) {
if s.err != nil {
return
}
switch lenType {
case SeriLengthPrefixTypeAsByte:
if l > math.MaxUint8 {
s.err = errProducer(fmt.Errorf("unable to serialize collection length: length %d is out of range (0-%d)", l, math.MaxUint8))
return
}
if err := s.buf.WriteByte(byte(l)); err != nil {
s.err = errProducer(err)
return
}
case SeriLengthPrefixTypeAsUint16:
if l > math.MaxUint16 {
s.err = errProducer(fmt.Errorf("unable to serialize collection length: length %d is out of range (0-%d)", l, math.MaxUint16))
return
}
if err := binary.Write(&s.buf, binary.LittleEndian, uint16(l)); err != nil {
s.err = errProducer(err)
return
}
case SeriLengthPrefixTypeAsUint32:
if l > math.MaxUint32 {
s.err = errProducer(fmt.Errorf("unable to serialize collection length: length %d is out of range (0-%d)", l, math.MaxUint32))
return
}
if err := binary.Write(&s.buf, binary.LittleEndian, uint32(l)); err != nil {
s.err = errProducer(err)
return
}
default:
panic(fmt.Sprintf("unknown slice length type %v", lenType))
}
}
// WriteVariableByteSlice writes the given slice with its length to the Serializer.
func (s *Serializer) WriteVariableByteSlice(data []byte, lenType SeriLengthPrefixType, errProducer ErrProducer, minLen int, maxLen int) *Serializer {
if s.err != nil {
return s
}
sliceLen := len(data)
switch {
case maxLen > 0 && sliceLen > maxLen:
s.err = errProducer(fmt.Errorf("%w: slice (len %d) exceeds max length of %d ", ErrSliceLengthTooLong, sliceLen, maxLen))
return s
case minLen > 0 && sliceLen < minLen:
s.err = errProducer(fmt.Errorf("%w: slice (len %d) is less than min length of %d ", ErrSliceLengthTooShort, sliceLen, maxLen))
return s
}
s.writeSliceLength(len(data), lenType, errProducer)
if s.err != nil {
return s
}
if _, err := s.buf.Write(data); err != nil {
s.err = errProducer(err)
return s
}
return s
}
// Write32BytesArraySlice writes a slice of arrays of 32 bytes to the Serializer.
func (s *Serializer) Write32BytesArraySlice(slice SliceOfArraysOf32Bytes, deSeriMode DeSerializationMode, lenType SeriLengthPrefixType, arrayRules *ArrayRules, errProducer ErrProducer) *Serializer {
if s.err != nil {
return s
}
data := make([][]byte, len(slice))
for i := range slice {
data[i] = slice[i][:]
}
return s.WriteSliceOfByteSlices(data, deSeriMode, lenType, arrayRules, errProducer)
}
// Write64BytesArraySlice writes a slice of arrays of 64 bytes to the Serializer.
func (s *Serializer) Write64BytesArraySlice(slice SliceOfArraysOf64Bytes, deSeriMode DeSerializationMode, lenType SeriLengthPrefixType, arrayRules *ArrayRules, errProducer ErrProducer) *Serializer {
if s.err != nil {
return s
}
data := make([][]byte, len(slice))
for i := range slice {
data[i] = slice[i][:]
}
return s.WriteSliceOfByteSlices(data, deSeriMode, lenType, arrayRules, errProducer)
}
// WriteSliceOfObjects writes Serializables into the Serializer.
// For every written Serializable, the given WrittenObjectConsumer is called if it isn't nil.
func (s *Serializer) WriteSliceOfObjects(source interface{}, deSeriMode DeSerializationMode, deSeriCtx interface{}, lenType SeriLengthPrefixType, arrayRules *ArrayRules, errProducer ErrProducer) *Serializer {
if s.err != nil {
return s
}
seris := s.sourceToSerializables(source)
data := make([][]byte, len(seris))
for i, seri := range seris {
if deSeriMode.HasMode(DeSeriModePerformValidation) && arrayRules.Guards.WriteGuard != nil {
if err := arrayRules.Guards.WriteGuard(seri); err != nil {
s.err = errProducer(err)
return s
}
}
ser, err := seri.Serialize(deSeriMode, deSeriCtx)
if err != nil {
s.err = errProducer(err)
return s
}
data[i] = ser
}
return s.WriteSliceOfByteSlices(data, deSeriMode, lenType, arrayRules, errProducer)
}
// WriteSliceOfByteSlices writes slice of []byte into the Serializer.
func (s *Serializer) WriteSliceOfByteSlices(data [][]byte, deSeriMode DeSerializationMode, lenType SeriLengthPrefixType, sliceRules *ArrayRules, errProducer ErrProducer) *Serializer {
if s.err != nil {
return s
}
var eleValFunc ElementValidationFunc
if deSeriMode.HasMode(DeSeriModePerformValidation) {
if err := sliceRules.CheckBounds(uint(len(data))); err != nil {
s.err = errProducer(err)
return s
}
eleValFunc = sliceRules.ElementValidationFunc()
}
s.writeSliceLength(len(data), lenType, errProducer)
if s.err != nil {
return s
}
// we only auto sort if the rules require it
if deSeriMode.HasMode(DeSeriModePerformLexicalOrdering) && sliceRules.ValidationMode.HasMode(ArrayValidationModeLexicalOrdering) {
sort.Slice(data, func(i, j int) bool {
return bytes.Compare(data[i], data[j]) < 0
})
}
for i, ele := range data {
if eleValFunc != nil {
if err := eleValFunc(i, ele); err != nil {
s.err = errProducer(err)
return s
}
}
if _, err := s.buf.Write(ele); err != nil {
s.err = errProducer(err)
return s
}
}
return s
}
// WriteObject writes the given Serializable to the Serializer.
func (s *Serializer) WriteObject(seri Serializable, deSeriMode DeSerializationMode, deSeriCtx interface{}, guard SerializableWriteGuardFunc, errProducer ErrProducer) *Serializer {
if s.err != nil {
return s
}
if deSeriMode.HasMode(DeSeriModePerformValidation) {
if err := guard(seri); err != nil {
s.err = errProducer(err)
return s
}
}
seriBytes, err := seri.Serialize(deSeriMode, deSeriCtx)
if err != nil {
s.err = errProducer(err)
return s
}
if _, err := s.buf.Write(seriBytes); err != nil {
s.err = errProducer(err)
}
return s
}
func (s *Serializer) sourceToSerializables(source interface{}) Serializables {
var seris Serializables
switch x := source.(type) {
case Serializables:
seris = x
case SerializableSlice:
seris = x.ToSerializables()
default:
panic(fmt.Sprintf("invalid source: %T", source))
}
return seris
}
// WriteTime writes a marshaled Time value to the internal buffer.
func (s *Serializer) WriteTime(timeToWrite time.Time, errProducer ErrProducer) *Serializer {
if s.err != nil {
return s
}
if timeToWrite.IsZero() {
if err := binary.Write(&s.buf, binary.LittleEndian, int64(0)); err != nil {
s.err = errProducer(err)
}
} else {
if err := binary.Write(&s.buf, binary.LittleEndian, timeToWrite.UnixNano()); err != nil {
s.err = errProducer(err)
}
}
return s
}
// WritePayload writes the given payload Serializable into the Serializer.
// This is different to WriteObject as it also writes the length denotation of the payload.
func (s *Serializer) WritePayload(payload Serializable, deSeriMode DeSerializationMode, deSeriCtx interface{}, guard SerializableWriteGuardFunc, errProducer ErrProducer) *Serializer {
if s.err != nil {
return s
}
if payload == nil {
if err := s.writePayloadLength(0); err != nil {
s.err = errProducer(err)
}
return s
}
if guard != nil {
if err := guard(payload); err != nil {
s.err = errProducer(err)
return s
}
}
payloadBytes, err := payload.Serialize(deSeriMode, deSeriCtx)
if err != nil {
s.err = errProducer(fmt.Errorf("unable to serialize payload: %w", err))
return s
}
if err := s.writePayloadLength(len(payloadBytes)); err != nil {
s.err = errProducer(err)
}
if _, err := s.buf.Write(payloadBytes); err != nil {
s.err = errProducer(err)
}
return s
}
// WritePayloadLength write payload length token into serializer.
func (s *Serializer) WritePayloadLength(length int, errProducer ErrProducer) *Serializer {
if s.err != nil {
return s
}
if err := s.writePayloadLength(length); err != nil {
s.err = errProducer(err)
}
return s
}
func (s *Serializer) writePayloadLength(length int) error {
if err := binary.Write(&s.buf, binary.LittleEndian, uint32(length)); err != nil {
return fmt.Errorf("unable to serialize payload length: %w", err)
}
return nil
}
// WriteString writes the given string to the Serializer.
func (s *Serializer) WriteString(str string, lenType SeriLengthPrefixType, errProducer ErrProducer, minLen int, maxLen int) *Serializer {
if s.err != nil {
return s
}
strLen := len(str)
switch {
case maxLen > 0 && strLen > maxLen:
s.err = errProducer(fmt.Errorf("%w: string (len %d) exceeds max length of %d ", ErrStringTooLong, strLen, maxLen))
return s
case minLen > 0 && strLen < minLen:
s.err = errProducer(fmt.Errorf("%w: string (len %d) is less than min length of %d", ErrStringTooShort, strLen, minLen))
return s
}
s.writeSliceLength(strLen, lenType, errProducer)
if s.err != nil {
return s
}
if _, err := s.buf.Write([]byte(str)); err != nil {
s.err = errProducer(err)
}
return s
}
// NewDeserializer creates a new Deserializer.
func NewDeserializer(src []byte) *Deserializer {
return &Deserializer{src: src}
}
// Deserializer is a utility to deserialize bytes.
type Deserializer struct {
src []byte
offset int
err error
}
func (d *Deserializer) RemainingBytes() []byte {
return d.src[d.offset:]
}
// Skip skips the number of bytes during deserialization.
func (d *Deserializer) Skip(skip int, errProducer ErrProducer) *Deserializer {
if d.err != nil {
return d
}
if len(d.src[d.offset:]) < skip {
d.err = errProducer(ErrDeserializationNotEnoughData)
return d
}
d.offset += skip
return d
}
// ReadBool reads a bool into dest.
func (d *Deserializer) ReadBool(dest *bool, errProducer ErrProducer) *Deserializer {
if d.err != nil {
return d
}
if len(d.src[d.offset:]) == 0 {
d.err = errProducer(ErrDeserializationNotEnoughData)
return d
}
switch d.src[d.offset : d.offset+1][0] {
case 0:
*dest = false
case 1:
*dest = true
default:
d.err = errProducer(ErrDeserializationInvalidBoolValue)
return d
}
d.offset += OneByte
return d
}
// ReadByte reads a byte into dest.
//
//nolint:stdmethods // false positive
func (d *Deserializer) ReadByte(dest *byte, errProducer ErrProducer) *Deserializer {
if d.err != nil {
return d
}
if len(d.src[d.offset:]) == 0 {
d.err = errProducer(ErrDeserializationNotEnoughData)
return d
}
*dest = d.src[d.offset : d.offset+1][0]
d.offset += OneByte
return d
}
// ReadUint256 reads a little endian encoded uint256 into dest.
func (d *Deserializer) ReadUint256(dest **big.Int, errProducer ErrProducer) *Deserializer {
if d.err != nil {
return d
}
if len(d.src[d.offset:]) < UInt256ByteSize {
d.err = errProducer(ErrDeserializationNotEnoughData)
return d
}
source := make([]byte, UInt256ByteSize)
copy(source, d.src[d.offset:d.offset+UInt256ByteSize])
d.offset += UInt256ByteSize
// convert to big endian
for i, j := 0, len(source)-1; i < j; i, j = i+1, j-1 {
source[i], source[j] = source[j], source[i]
}
*dest = new(big.Int).SetBytes(source)
return d
}
// numSize returns the size of the data required to represent the data when encoded.
func numSize(data any) int {
switch data := data.(type) {
case bool, int8, uint8, *bool, *int8, *uint8:
return OneByte
case int16, *int16:
return Int16ByteSize
case uint16, *uint16:
return UInt16ByteSize
case int32, *int32:
return Int32ByteSize
case uint32, *uint32:
return UInt32ByteSize
case int64, *int64:
return Int64ByteSize
case uint64, *uint64:
return UInt64ByteSize
case float32, *float32:
return Float32ByteSize
case float64, *float64:
return Float64ByteSize
default:
panic(fmt.Sprintf("unsupported numSize type %T", data))
}
}
// ReadNum reads a number into dest.
func (d *Deserializer) ReadNum(dest any, errProducer ErrProducer) *Deserializer {
if d.err != nil {
return d
}
l := len(d.src[d.offset:])
dataSize := numSize(dest)
if l < dataSize {
d.err = errProducer(ErrDeserializationNotEnoughData)
return d
}
l = dataSize
data := d.src[d.offset : d.offset+l]
switch x := dest.(type) {
case *int8:
*x = int8(data[0])
case *uint8:
*x = data[0]
case *int16:
*x = int16(binary.LittleEndian.Uint16(data))
case *uint16:
*x = binary.LittleEndian.Uint16(data)
case *int32:
*x = int32(binary.LittleEndian.Uint32(data))
case *uint32:
*x = binary.LittleEndian.Uint32(data)
case *int64:
*x = int64(binary.LittleEndian.Uint64(data))
case *uint64:
*x = binary.LittleEndian.Uint64(data)
case *float32:
*x = math.Float32frombits(binary.LittleEndian.Uint32(data))
case *float64:
*x = math.Float64frombits(binary.LittleEndian.Uint64(data))
default:
panic(fmt.Sprintf("unsupported ReadNum type %T", dest))
}
d.offset += l
return d
}
// ReadBytes reads specified number of bytes.
// Use this function only to read fixed size slices/arrays, otherwise use ReadVariableByteSlice instead.
func (d *Deserializer) ReadBytes(slice *[]byte, numBytes int, errProducer ErrProducer) *Deserializer {
if d.err != nil {
return d
}
if len(d.src[d.offset:]) < numBytes {
d.err = errProducer(ErrDeserializationNotEnoughData)
return d
}
dest := make([]byte, numBytes)
copy(dest, d.src[d.offset:d.offset+numBytes])
*slice = dest
d.offset += numBytes
return d
}
// ReadBytesInPlace reads slice length amount of bytes into slice.
// Use this function only to read arrays.
func (d *Deserializer) ReadBytesInPlace(slice []byte, errProducer ErrProducer) *Deserializer {
if d.err != nil {
return d
}
numBytes := len(slice)
if len(d.src[d.offset:]) < numBytes {
d.err = errProducer(ErrDeserializationNotEnoughData)
return d
}
copy(slice, d.src[d.offset:d.offset+numBytes])
d.offset += numBytes
return d
}
// ReadVariableByteSlice reads a variable byte slice which is denoted by the given SeriLengthPrefixType.
func (d *Deserializer) ReadVariableByteSlice(slice *[]byte, lenType SeriLengthPrefixType, errProducer ErrProducer, minLen int, maxLen int) *Deserializer {
if d.err != nil {
return d
}
sliceLength, err := d.readSliceLength(lenType, errProducer)
if err != nil {
d.err = err
return d
}
switch {
case maxLen > 0 && sliceLength > maxLen:
d.err = errProducer(fmt.Errorf("%w: denoted %d bytes, max allowed %d ", ErrDeserializationLengthInvalid, sliceLength, maxLen))
case minLen > 0 && sliceLength < minLen:
d.err = errProducer(fmt.Errorf("%w: denoted %d bytes, min required %d ", ErrDeserializationLengthInvalid, sliceLength, minLen))
}
if sliceLength == 0 {
*slice = nil
return d
}
dest := make([]byte, sliceLength)
if len(d.src[d.offset:]) < sliceLength {
d.err = errProducer(ErrDeserializationNotEnoughData)
return d
}
copy(dest, d.src[d.offset:d.offset+sliceLength])
*slice = dest
d.offset += sliceLength
return d
}
// ReadArrayOf12Bytes reads an array of 12 bytes.
func (d *Deserializer) ReadArrayOf12Bytes(arr *ArrayOf12Bytes, errProducer ErrProducer) *Deserializer {
if d.err != nil {
return d
}
const length = 12
l := len(d.src[d.offset:])
if l < length {
d.err = errProducer(ErrDeserializationNotEnoughData)
return d
}
copy(arr[:], d.src[d.offset:d.offset+length])
d.offset += length
return d
}
// ReadArrayOf20Bytes reads an array of 20 bytes.
func (d *Deserializer) ReadArrayOf20Bytes(arr *ArrayOf20Bytes, errProducer ErrProducer) *Deserializer {
if d.err != nil {
return d
}
const length = 20
l := len(d.src[d.offset:])
if l < length {
d.err = errProducer(ErrDeserializationNotEnoughData)
return d
}
copy(arr[:], d.src[d.offset:d.offset+length])
d.offset += length
return d
}
// ReadArrayOf32Bytes reads an array of 32 bytes.
func (d *Deserializer) ReadArrayOf32Bytes(arr *ArrayOf32Bytes, errProducer ErrProducer) *Deserializer {
if d.err != nil {
return d
}
const length = 32
l := len(d.src[d.offset:])
if l < length {
d.err = errProducer(ErrDeserializationNotEnoughData)
return d
}
copy(arr[:], d.src[d.offset:d.offset+length])
d.offset += length
return d
}
// ReadArrayOf38Bytes reads an array of 38 bytes.
func (d *Deserializer) ReadArrayOf38Bytes(arr *ArrayOf38Bytes, errProducer ErrProducer) *Deserializer {
if d.err != nil {
return d
}
const length = 38
l := len(d.src[d.offset:])
if l < length {
d.err = errProducer(ErrDeserializationNotEnoughData)
return d
}
copy(arr[:], d.src[d.offset:d.offset+length])
d.offset += length
return d
}
// ReadArrayOf64Bytes reads an array of 64 bytes.
func (d *Deserializer) ReadArrayOf64Bytes(arr *ArrayOf64Bytes, errProducer ErrProducer) *Deserializer {
if d.err != nil {
return d
}
const length = 64
l := len(d.src[d.offset:])
if l < length {
d.err = errProducer(ErrDeserializationNotEnoughData)
return d
}
copy(arr[:], d.src[d.offset:d.offset+length])
d.offset += length
return d
}
// ReadArrayOf49Bytes reads an array of 49 bytes.
func (d *Deserializer) ReadArrayOf49Bytes(arr *ArrayOf49Bytes, errProducer ErrProducer) *Deserializer {
if d.err != nil {
return d
}
const length = 49
l := len(d.src[d.offset:])
if l < length {
d.err = errProducer(ErrDeserializationNotEnoughData)
return d
}
copy(arr[:], d.src[d.offset:d.offset+length])
d.offset += length
return d
}
// reads the length of a slice.
func (d *Deserializer) readSliceLength(lenType SeriLengthPrefixType, errProducer ErrProducer) (int, error) {
l := len(d.src[d.offset:])
var sliceLength int
switch lenType {
case SeriLengthPrefixTypeAsByte:
if l < OneByte {
return 0, errProducer(ErrDeserializationNotEnoughData)
}
l = OneByte
sliceLength = int(d.src[d.offset : d.offset+1][0])
case SeriLengthPrefixTypeAsUint16:
if l < UInt16ByteSize {
return 0, errProducer(ErrDeserializationNotEnoughData)
}
l = UInt16ByteSize
sliceLength = int(binary.LittleEndian.Uint16(d.src[d.offset : d.offset+UInt16ByteSize]))
case SeriLengthPrefixTypeAsUint32:
if l < UInt32ByteSize {
return 0, errProducer(ErrDeserializationNotEnoughData)
}
l = UInt32ByteSize
sliceLength = int(binary.LittleEndian.Uint32(d.src[d.offset : d.offset+UInt32ByteSize]))
default:
panic(fmt.Sprintf("unknown slice length type %v", lenType))
}
d.offset += l
return sliceLength, nil
}
// ReadSliceOfArraysOf32Bytes reads a slice of arrays of 32 bytes.
func (d *Deserializer) ReadSliceOfArraysOf32Bytes(slice *SliceOfArraysOf32Bytes, deSeriMode DeSerializationMode, lenType SeriLengthPrefixType, arrayRules *ArrayRules, errProducer ErrProducer) *Deserializer {
if d.err != nil {
return d
}
const length = 32
sliceLength, err := d.readSliceLength(lenType, errProducer)
if err != nil {
d.err = err
return d
}
var arrayElementValidator ElementValidationFunc
if arrayRules != nil && deSeriMode.HasMode(DeSeriModePerformValidation) {
if err := arrayRules.CheckBounds(uint(sliceLength)); err != nil {
d.err = errProducer(err)
return d
}
arrayElementValidator = arrayRules.ElementValidationFunc()
}
s := make(SliceOfArraysOf32Bytes, sliceLength)
for i := 0; i < sliceLength; i++ {
if len(d.src[d.offset:]) < length {
d.err = errProducer(ErrDeserializationNotEnoughData)
return d
}
if arrayElementValidator != nil {
if err := arrayElementValidator(i, d.src[d.offset:d.offset+length]); err != nil {
d.err = errProducer(err)
return d
}
}
copy(s[i][:], d.src[d.offset:d.offset+length])
d.offset += length
}
*slice = s
return d
}
// ReadSliceOfArraysOf64Bytes reads a slice of arrays of 64 bytes.
func (d *Deserializer) ReadSliceOfArraysOf64Bytes(slice *SliceOfArraysOf64Bytes, deSeriMode DeSerializationMode, lenType SeriLengthPrefixType, arrayRules *ArrayRules, errProducer ErrProducer) *Deserializer {
if d.err != nil {
return d
}
const length = 64
sliceLength, err := d.readSliceLength(lenType, errProducer)
if err != nil {
d.err = err
return d
}
var arrayElementValidator ElementValidationFunc
if arrayRules != nil && deSeriMode.HasMode(DeSeriModePerformValidation) {
if err := arrayRules.CheckBounds(uint(sliceLength)); err != nil {
d.err = errProducer(err)
return d
}
arrayElementValidator = arrayRules.ElementValidationFunc()
}
s := make(SliceOfArraysOf64Bytes, sliceLength)
for i := 0; i < sliceLength; i++ {
if len(d.src[d.offset:]) < length {
d.err = errProducer(ErrDeserializationNotEnoughData)
return d
}
if arrayElementValidator != nil {
if err := arrayElementValidator(i, d.src[d.offset:d.offset+length]); err != nil {
d.err = errProducer(err)
return d
}
}
copy(s[i][:], d.src[d.offset:d.offset+length])
d.offset += length
}
*slice = s
return d
}
// ReadObject reads an object, using the given SerializableReadGuardFunc.
func (d *Deserializer) ReadObject(target interface{}, deSeriMode DeSerializationMode, deSeriCtx interface{}, typeDen TypeDenotationType, serSel SerializableReadGuardFunc, errProducer ErrProducer) *Deserializer {
deserializer, _ := d.readObject(target, deSeriMode, deSeriCtx, typeDen, serSel, errProducer)
return deserializer
}
// GetObjectType reads object type but doesn't change the offset.
func (d *Deserializer) GetObjectType(typeDen TypeDenotationType) (uint32, error) {
l := len(d.src[d.offset:])
var ty uint32
switch typeDen {
case TypeDenotationUint32:
if l < UInt32ByteSize {
return 0, ErrDeserializationNotEnoughData
}
ty = binary.LittleEndian.Uint32(d.src[d.offset:])
case TypeDenotationByte:
if l < OneByte {
return 0, ErrDeserializationNotEnoughData
}
ty = uint32(d.src[d.offset : d.offset+1][0])
case TypeDenotationNone:
// object has no type denotation
return 0, nil
}
return ty, nil
}
func (d *Deserializer) readObject(target interface{}, deSeriMode DeSerializationMode, deSeriCtx interface{}, typeDen TypeDenotationType, serSel SerializableReadGuardFunc, errProducer ErrProducer) (*Deserializer, uint32) {
if d.err != nil {
return d, 0
}
ty, err := d.GetObjectType(typeDen)
if err != nil {
d.err = errProducer(err)
return d, 0
}
seri, err := serSel(ty)
if err != nil {
d.err = errProducer(err)
return d, 0
}
bytesConsumed, err := seri.Deserialize(d.src[d.offset:], deSeriMode, deSeriCtx)
if err != nil {
d.err = errProducer(err)
return d, 0
}
d.offset += bytesConsumed
d.readSerializableIntoTarget(target, seri)
return d, ty
}
// ReadSliceOfObjects reads a slice of objects.
func (d *Deserializer) ReadSliceOfObjects(
target interface{}, deSeriMode DeSerializationMode, deSeriCtx interface{}, lenType SeriLengthPrefixType,
typeDen TypeDenotationType, arrayRules *ArrayRules, errProducer ErrProducer,
) *Deserializer {
if d.err != nil {
return d
}
var seris Serializables
var seenTypes TypePrefixes
if deSeriMode.HasMode(DeSeriModePerformValidation) {
seenTypes = make(TypePrefixes, 0)
}
deserializeItem := func(b []byte) (bytesRead int, err error) {
var seri Serializable
// this mutates d.src/d.offset
subDeseri := NewDeserializer(b)
_, ty := subDeseri.readObject(func(readSeri Serializable) { seri = readSeri }, deSeriMode, deSeriCtx, typeDen, arrayRules.Guards.ReadGuard, func(err error) error {
return errProducer(err)
})
bytesRead, err = subDeseri.Done()
if err != nil {
return 0, err
}
if deSeriMode.HasMode(DeSeriModePerformValidation) {
seenTypes[ty] = struct{}{}
if arrayRules.Guards.PostReadGuard != nil {
if err := arrayRules.Guards.PostReadGuard(seri); err != nil {
return 0, err
}
}
}
seris = append(seris, seri)
return bytesRead, nil
}
d.ReadSequenceOfObjects(deserializeItem, deSeriMode, lenType, arrayRules, errProducer)
if d.err != nil {
return d
}
if deSeriMode.HasMode(DeSeriModePerformValidation) {
if !arrayRules.MustOccur.Subset(seenTypes) {
d.err = errProducer(fmt.Errorf("%w: should %v, has %v", ErrArrayValidationTypesNotOccurred, arrayRules.MustOccur, seenTypes))
return d
}
}
if len(seris) == 0 {
return d
}
d.readSerializablesIntoTarget(target, seris)
return d
}
// DeserializeFunc is a function that reads bytes from b and returns how much bytes was read.
type DeserializeFunc func(b []byte) (bytesRead int, err error)
// ReadSequenceOfObjects reads a sequence of objects and calls DeserializeFunc for evey encountered item.
func (d *Deserializer) ReadSequenceOfObjects(
itemDeserializer DeserializeFunc, deSeriMode DeSerializationMode,
lenType SeriLengthPrefixType, arrayRules *ArrayRules, errProducer ErrProducer,
) *Deserializer {
if d.err != nil {
return d
}
sliceLength, err := d.readSliceLength(lenType, errProducer)
if err != nil {
d.err = err
return d
}
var arrayElementValidator ElementValidationFunc
if deSeriMode.HasMode(DeSeriModePerformValidation) {
if err := arrayRules.CheckBounds(uint(sliceLength)); err != nil {
d.err = errProducer(err)
return d
}
arrayElementValidator = arrayRules.ElementValidationFunc()
}
if sliceLength == 0 {
return d
}
for i := 0; i < sliceLength; i++ {
// Remember where we were before reading the item.
srcBefore := d.src[d.offset:]
offsetBefore := d.offset
bytesRead, err := itemDeserializer(srcBefore)
if err != nil {
d.err = errProducer(err)
return d
}
d.offset = offsetBefore + bytesRead
if arrayElementValidator != nil {
if err := arrayElementValidator(i, srcBefore[:bytesRead]); err != nil {
d.err = errProducer(err)
return d
}
}
}
return d
}
func (d *Deserializer) readSerializablesIntoTarget(target interface{}, seris Serializables) {
switch x := target.(type) {
case func(seri Serializables):
x(seris)
case SerializableSlice:
x.FromSerializables(seris)
default:
panic("invalid target")
}
}
// ReadTime reads a Time value from the internal buffer.
func (d *Deserializer) ReadTime(dest *time.Time, errProducer ErrProducer) *Deserializer {
if d.err != nil {
return d
}
l := len(d.src[d.offset:])
if l < Int64ByteSize {
d.err = errProducer(ErrDeserializationNotEnoughData)
return d
}
l = Int64ByteSize
nanoSeconds := int64(binary.LittleEndian.Uint64(d.src[d.offset : d.offset+Int64ByteSize]))
if nanoSeconds == 0 {
*dest = time.Time{}
} else {
*dest = time.Unix(0, nanoSeconds)
}
d.offset += l
return d
}
// ReadPayloadLength reads the payload length from the deserializer.
func (d *Deserializer) ReadPayloadLength() (uint32, error) {
if len(d.src[d.offset:]) < PayloadLengthByteSize {
return 0, fmt.Errorf("%w: data is smaller than payload length denotation", ErrDeserializationNotEnoughData)
}
payloadLength := binary.LittleEndian.Uint32(d.src[d.offset:])
d.offset += PayloadLengthByteSize
return payloadLength, nil
}
// ReadPayload reads a payload.
func (d *Deserializer) ReadPayload(s interface{}, deSeriMode DeSerializationMode, deSeriCtx interface{}, sel SerializableReadGuardFunc, errProducer ErrProducer) *Deserializer {
if d.err != nil {
return d
}
payloadLength, err := d.ReadPayloadLength()
if err != nil {
d.err = errProducer(err)
return d
}
// nothing to do
if payloadLength == 0 {
return d
}
switch {
case len(d.src[d.offset:]) < MinPayloadByteSize:
d.err = errProducer(fmt.Errorf("%w: payload data is smaller than min. required length %d", ErrDeserializationNotEnoughData, MinPayloadByteSize))
return d
case len(d.src[d.offset:]) < int(payloadLength):
d.err = errProducer(fmt.Errorf("%w: payload length denotes more bytes than are available", ErrDeserializationNotEnoughData))
return d
}
payload, err := sel(binary.LittleEndian.Uint32(d.src[d.offset:]))
if err != nil {
d.err = errProducer(err)
return d
}
payloadBytesConsumed, err := payload.Deserialize(d.src[d.offset:], deSeriMode, deSeriCtx)
if err != nil {
d.err = errProducer(err)
return d
}
if payloadBytesConsumed != int(payloadLength) {
d.err = errProducer(fmt.Errorf("%w: denoted payload length (%d) doesn't equal the size of deserialized payload (%d)", ErrInvalidBytes, payloadLength, payloadBytesConsumed))
return d
}
d.offset += payloadBytesConsumed
d.readSerializableIntoTarget(s, payload)
return d
}
func (d *Deserializer) readSerializableIntoTarget(target interface{}, s Serializable) {
switch t := target.(type) {
case *Serializable:
*t = s
case func(seri Serializable):
t(s)
default:
if reflect.TypeOf(target).Kind() != reflect.Ptr {
panic("target parameter must be pointer or Serializable")
}
reflect.ValueOf(target).Elem().Set(reflect.ValueOf(s))
}
}
// ReadString reads a string.
func (d *Deserializer) ReadString(s *string, lenType SeriLengthPrefixType, errProducer ErrProducer, minLen int, maxLen int) *Deserializer {
if d.err != nil {
return d
}
strLen, err := d.readSliceLength(lenType, errProducer)
if err != nil {
d.err = err
return d
}
switch {
case maxLen > 0 && strLen > maxLen:
d.err = errProducer(fmt.Errorf("%w: string defined to be of %d bytes length but max %d is allowed", ErrDeserializationLengthInvalid, strLen, maxLen))
case minLen > 0 && strLen < minLen:
d.err = errProducer(fmt.Errorf("%w: string defined to be of %d bytes length but min %d is required", ErrDeserializationLengthInvalid, strLen, minLen))
}
if len(d.src[d.offset:]) < strLen {
d.err = errProducer(fmt.Errorf("%w: data is smaller than (%d) denoted string length of %d", ErrDeserializationNotEnoughData, len(d.src[d.offset:]), strLen))
return d
}
*s = string(d.src[d.offset : d.offset+strLen])
d.offset += strLen
return d
}
// AbortIf calls the given ErrProducer if the Deserializer did not encounter an error yet.
// Return nil from the ErrProducer to indicate continuation of the deserialization.
func (d *Deserializer) AbortIf(errProducer ErrProducer) *Deserializer {
if d.err != nil {
return d
}
if err := errProducer(nil); err != nil {
d.err = err
}
return d
}
// WithValidation runs errProducer if deSeriMode has DeSeriModePerformValidation.
func (d *Deserializer) WithValidation(deSeriMode DeSerializationMode, errProducer ErrProducerWithRWBytes) *Deserializer {
if d.err != nil {
return d
}
if !deSeriMode.HasMode(DeSeriModePerformValidation) {
return d
}
if err := errProducer(d.src[:d.offset], d.err); err != nil {
d.err = err
return d
}
return d
}
// CheckTypePrefix checks whether the type prefix corresponds to the expected given prefix.
// This function will advance the deserializer by the given TypeDenotationType length.
func (d *Deserializer) CheckTypePrefix(prefix uint32, prefixType TypeDenotationType, errProducer ErrProducer) *Deserializer {
if d.err != nil {
return d
}
var toSkip int
switch prefixType {
case TypeDenotationUint32:
if err := CheckType(d.src[d.offset:], prefix); err != nil {
d.err = errProducer(err)
return d
}
toSkip = UInt32ByteSize
case TypeDenotationByte:
if err := CheckTypeByte(d.src[d.offset:], byte(prefix)); err != nil {
d.err = errProducer(err)
return d
}
toSkip = OneByte
default:
panic("invalid type prefix in CheckTypePrefix()")
}
return d.Skip(toSkip, func(err error) error { return err })
}
// Do calls f in the Deserializer chain.
func (d *Deserializer) Do(f func()) *Deserializer {
if d.err != nil {
return d
}
f()
return d
}
// ConsumedAll calls the given ErrProducerWithLeftOver if not all bytes have been
// consumed from the Deserializer's src.
func (d *Deserializer) ConsumedAll(errProducer ErrProducerWithLeftOver) *Deserializer {
if d.err != nil {
return d
}
if len(d.src) != d.offset {
d.err = errProducer(len(d.src[d.offset:]), ErrDeserializationNotAllConsumed)
}
return d
}
// Done finishes the Deserializer by returning the read bytes and occurred errors.
func (d *Deserializer) Done() (int, error) {
return d.offset, d.err
}
|
// Implements the 'customer' web service
package services
/*
Copyright (C) 2015 J. Robert Wyatt
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
import (
"encoding/json"
"../Thrift/gen-go/messages"
"../common"
"io"
"net/http"
)
// The string base of the URI for which this service is registered
var root string
var addr string
var port int
// Set the root path ... optional
func InitService(r string, a string, p int) {
root = r
addr = a
port = p
}
// customer GET processing
func CustomerGET(c *common.ThriftConnection, w http.ResponseWriter, r *http.Request) {
common.Logger.Printf("GET: %s", r.URL)
result, err := c.Conn().GetCustomer(r.URL.String()[len(root):])
if err != nil {
common.Logger.Printf("Failed to create new customer: %s", err)
w.WriteHeader(http.StatusBadRequest)
return
}
bytes, err := json.Marshal(result)
if err != nil {
common.Logger.Printf("Failed to convert to JSON: %s", result)
w.WriteHeader(http.StatusInternalServerError)
return
}
header := w.Header()
header.Add("Content-Type","application/json")
w.Write(bytes)
}
// Customer POST processing
func CustomerPOST(c *common.ThriftConnection, w http.ResponseWriter, r *http.Request) {
common.Logger.Printf("POST: %s", r.URL)
if r.ContentLength <= 0 {
common.Logger.Printf("Failed to detect body content in POST")
w.WriteHeader(http.StatusBadRequest)
return
}
var body = make([]byte, r.ContentLength)
n, err := r.Body.Read(body)
if err != nil && err != io.EOF {
common.Logger.Printf("Failed to read body: %s", err)
w.WriteHeader(http.StatusBadRequest)
return
}
common.Logger.Printf("Body[%d]: %s", n, body)
var thriftCust *messages.Customer = messages.NewCustomer()
err = json.Unmarshal(body, &thriftCust)
common.Logger.Printf("Customer: %s", thriftCust)
result, err := c.Conn().CreateCustomer(thriftCust)
if err != nil {
common.Logger.Printf("Failed to create new customer: %s", err)
w.WriteHeader(http.StatusBadRequest)
return
}
common.Logger.Printf("Customer Created: %s", result)
bytes, err := json.Marshal(result)
if err != nil {
common.Logger.Printf("Failed to convert result to JSON: %s", result)
w.WriteHeader(http.StatusInternalServerError)
return
}
w.Header().Add("Content-Type","application/json")
w.Write(bytes)
}
// Handle registered hierarchy for HTTP methods (stored with 'SetRoot')
func Customer(w http.ResponseWriter, r *http.Request) {
c, err := common.NewThriftConnection(addr, port)
if err == nil {
defer c.Close()
switch r.Method {
case "GET":
CustomerGET(c, w, r)
case "POST":
CustomerPOST(c, w, r)
default:
w.WriteHeader(http.StatusBadRequest)
common.Logger.Printf("Unsupported method: %s %s", r.Method, r.URL)
}
} else {
common.Logger.Printf("Unable to make Thrift connection")
w.WriteHeader(http.StatusInternalServerError)
}
}
|
package main
import (
"fmt"
"io/ioutil"
"log"
"os"
)
func main() {
// ioutil.WriteFile will create/open, write a slice of byte and close.
// quick and dirty
err := ioutil.WriteFile("test.txt", []byte("Hey mamma!\r\nI'm on tv :)"), 0666)
if err != nil {
log.Fatalln(err)
}
file, err := os.OpenFile("test.txt", os.O_RDONLY, 0666)
if err != nil {
log.Fatalln(err)
}
defer file.Close()
r, err := ioutil.ReadAll(file)
if err != nil {
log.Fatalln(err)
}
fmt.Printf("Name of file: %v\r\n", file.Name())
fmt.Printf("Your file: %v\r\n", string(r))
}
|
package main
import (
"github.com/astaxie/beego/config"
"github.com/astaxie/beego/logs"
)
type Conf struct {
redisConf RedisConf
logConf LogConf
}
var (
// redisConf *RedisConf
myConf *Conf
)
func loadLogConf(conf config.Configer) {
myConf.logConf.LogPath = conf.String("log::log_path")
myConf.logConf.LogLevel = conf.String("log::log_level")
logs.Debug("log path: %v, log level: %v", myConf.logConf.LogPath, myConf.logConf.LogLevel)
}
// init redis conf and log conf
func initConf(confType, fileName string) (err error) {
conf, err := config.NewConfig(confType, fileName)
if err != nil {
logs.Error("new config failed, err: %v", err)
return
}
logs.Debug("new config succ!")
// redisConf = &RedisConf{}
myConf = &Conf{}
myConf.redisConf.Addr = conf.String("redis::redis_addr")
if len(myConf.redisConf.Addr) == 0 {
logs.Warn("got redis addr failed,and will use default!")
myConf.redisConf.Addr = "127.0.0.1:6379"
}
logs.Info("redis addr: %v", myConf.redisConf.Addr)
myConf.redisConf.Password = conf.String("redis::redis_passwd")
if len(myConf.redisConf.Password) == 0 {
logs.Warn("not got redis password!")
}
logs.Info("got redis password: %v", myConf.redisConf.Password)
myConf.redisConf.IdleConn, err = conf.Int("redis::redis_idle_conn")
if err != nil {
logs.Warn("not got redis idle conn:%v will use default", err)
myConf.redisConf.IdleConn = 16
}
logs.Info("redis idle conn: %v", myConf.redisConf.IdleConn)
myConf.redisConf.MaxConn, err = conf.Int("redis::redis_max_conn")
if err != nil {
logs.Warn("not got redis max conn:%v, will use default", err)
myConf.redisConf.MaxConn = 1024
}
logs.Info("redis max conn: %v", myConf.redisConf.MaxConn)
myConf.redisConf.IdleTimeout, err = conf.Int("redis::redis_time_out")
if err != nil {
logs.Warn("not got redis idle timeout: %v, will use default", err)
myConf.redisConf.IdleConn = 300
}
logs.Info("redis idle timeout: %v", myConf.redisConf.IdleTimeout)
// load log conf
loadLogConf(conf)
return
}
|
// Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sharedmem
import (
"math"
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/buffer"
"gvisor.dev/gvisor/pkg/eventfd"
"gvisor.dev/gvisor/pkg/tcpip/link/sharedmem/queue"
)
const (
nilID = math.MaxUint64
)
// tx holds all state associated with a tx queue.
type tx struct {
data []byte
q queue.Tx
ids idManager
bufs bufferManager
eventFD eventfd.Eventfd
sharedData []byte
sharedDataFD int
}
// init initializes all state needed by the tx queue based on the information
// provided.
//
// The caller always retains ownership of all file descriptors passed in. The
// queue implementation will duplicate any that it may need in the future.
func (t *tx) init(bufferSize uint32, c *QueueConfig) error {
// Map in all buffers.
txPipe, err := getBuffer(c.TxPipeFD)
if err != nil {
return err
}
rxPipe, err := getBuffer(c.RxPipeFD)
if err != nil {
unix.Munmap(txPipe)
return err
}
data, err := getBuffer(c.DataFD)
if err != nil {
unix.Munmap(txPipe)
unix.Munmap(rxPipe)
return err
}
sharedData, err := getBuffer(c.SharedDataFD)
if err != nil {
unix.Munmap(txPipe)
unix.Munmap(rxPipe)
unix.Munmap(data)
}
// Initialize state based on buffers.
t.q.Init(txPipe, rxPipe, sharedDataPointer(sharedData))
t.ids.init()
t.bufs.init(0, len(data), int(bufferSize))
t.data = data
t.eventFD = c.EventFD
t.sharedDataFD = c.SharedDataFD
t.sharedData = sharedData
return nil
}
// cleanup releases all resources allocated during init(). It must only be
// called if init() has previously succeeded.
func (t *tx) cleanup() {
a, b := t.q.Bytes()
unix.Munmap(a)
unix.Munmap(b)
unix.Munmap(t.data)
}
// transmit sends a packet made of bufs. Returns a boolean that specifies
// whether the packet was successfully transmitted.
func (t *tx) transmit(transmitBuf buffer.Buffer) bool {
// Pull completions from the tx queue and add their buffers back to the
// pool so that we can reuse them.
for {
id, ok := t.q.CompletedPacket()
if !ok {
break
}
if buf := t.ids.remove(id); buf != nil {
t.bufs.free(buf)
}
}
bSize := t.bufs.entrySize
total := uint32(transmitBuf.Size())
bufCount := (total + bSize - 1) / bSize
// Allocate enough buffers to hold all the data.
var buf *queue.TxBuffer
for i := bufCount; i != 0; i-- {
b := t.bufs.alloc()
if b == nil {
// Failed to get all buffers. Return to the pool
// whatever we had managed to get.
if buf != nil {
t.bufs.free(buf)
}
return false
}
b.Next = buf
buf = b
}
// Copy data into allocated buffers.
nBuf := buf
var dBuf []byte
transmitBuf.Apply(func(v *buffer.View) {
for v.Size() > 0 {
if len(dBuf) == 0 {
dBuf = t.data[nBuf.Offset:][:nBuf.Size]
nBuf = nBuf.Next
}
n := copy(dBuf, v.AsSlice())
v.TrimFront(n)
dBuf = dBuf[n:]
}
})
// Get an id for this packet and send it out.
id := t.ids.add(buf)
if !t.q.Enqueue(id, total, bufCount, buf) {
t.ids.remove(id)
t.bufs.free(buf)
return false
}
return true
}
// notify writes to the tx.eventFD to indicate to the peer that there is data to
// be read.
func (t *tx) notify() {
if t.q.NotificationsEnabled() {
t.eventFD.Notify()
}
}
// idDescriptor is used by idManager to either point to a tx buffer (in case
// the ID is assigned) or to the next free element (if the id is not assigned).
type idDescriptor struct {
buf *queue.TxBuffer
nextFree uint64
}
// idManager is a manager of tx buffer identifiers. It assigns unique IDs to
// tx buffers that are added to it; the IDs can only be reused after they have
// been removed.
//
// The ID assignments are stored so that the tx buffers can be retrieved from
// the IDs previously assigned to them.
type idManager struct {
// ids is a slice containing all tx buffers. The ID is the index into
// this slice.
ids []idDescriptor
// freeList a list of free IDs.
freeList uint64
}
// init initializes the id manager.
func (m *idManager) init() {
m.freeList = nilID
}
// add assigns an ID to the given tx buffer.
func (m *idManager) add(b *queue.TxBuffer) uint64 {
if i := m.freeList; i != nilID {
// There is an id available in the free list, just use it.
m.ids[i].buf = b
m.freeList = m.ids[i].nextFree
return i
}
// We need to expand the id descriptor.
m.ids = append(m.ids, idDescriptor{buf: b})
return uint64(len(m.ids) - 1)
}
// remove retrieves the tx buffer associated with the given ID, and removes the
// ID from the assigned table so that it can be reused in the future.
func (m *idManager) remove(i uint64) *queue.TxBuffer {
if i >= uint64(len(m.ids)) {
return nil
}
desc := &m.ids[i]
b := desc.buf
if b == nil {
// The provided id is not currently assigned.
return nil
}
desc.buf = nil
desc.nextFree = m.freeList
m.freeList = i
return b
}
// bufferManager manages a buffer region broken up into smaller, equally sized
// buffers. Smaller buffers can be allocated and freed.
type bufferManager struct {
freeList *queue.TxBuffer
curOffset uint64
limit uint64
entrySize uint32
}
// init initializes the buffer manager.
func (b *bufferManager) init(initialOffset, size, entrySize int) {
b.freeList = nil
b.curOffset = uint64(initialOffset)
b.limit = uint64(initialOffset + size/entrySize*entrySize)
b.entrySize = uint32(entrySize)
}
// alloc allocates a buffer from the manager, if one is available.
func (b *bufferManager) alloc() *queue.TxBuffer {
if b.freeList != nil {
// There is a descriptor ready for reuse in the free list.
d := b.freeList
b.freeList = d.Next
d.Next = nil
return d
}
if b.curOffset < b.limit {
// There is room available in the never-used range, so create
// a new descriptor for it.
d := &queue.TxBuffer{
Offset: b.curOffset,
Size: b.entrySize,
}
b.curOffset += uint64(b.entrySize)
return d
}
return nil
}
// free returns all buffers in the list to the buffer manager so that they can
// be reused.
func (b *bufferManager) free(d *queue.TxBuffer) {
// Find the last buffer in the list.
last := d
for last.Next != nil {
last = last.Next
}
// Push list onto free list.
last.Next = b.freeList
b.freeList = d
}
|
// Copyright (C) 2018 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package interval
import (
"github.com/google/gapid/core/math/u64"
)
type ValueList interface {
MutableList
GetValue(index int) interface{}
SetValue(index int, value interface{})
Insert(index int, count int)
Delete(index int, count int)
}
type ValueSpan struct {
Span U64Span
Value interface{}
}
type ValueSpanList []ValueSpan
// Length returns the number of elements in the list
// Implements `List.Length`
func (l *ValueSpanList) Length() int {
return len(*l)
}
// GetSpan returns the span for the element at index in the list
// Implements `List.GetSpan`
func (l *ValueSpanList) GetSpan(index int) U64Span {
return (*l)[index].Span
}
// SetSpan sets the span for the element at index in the list
// Implements `MutableList.SetSpan`
func (l *ValueSpanList) SetSpan(index int, span U64Span) {
(*l)[index].Span = span
}
// New creates a new element at the specifed index with the specified span
// Implements `MutableList.New`
func (l *ValueSpanList) New(index int, span U64Span) {
(*l)[index].Span = span
}
// Copy count list entries
// Implements `MutableList.Copy`
func (l *ValueSpanList) Copy(to, from, count int) {
copy((*l)[to:to+count], (*l)[from:from+count])
}
// Resize adjusts the length of the array
// Implements `MutableList.Resize`
func (l *ValueSpanList) Resize(length int) {
if cap(*l) > length {
*l = (*l)[:length]
} else {
old := *l
capacity := cap(*l) * 2
if capacity < length {
capacity = length
}
*l = make(ValueSpanList, length, capacity)
copy(*l, old)
}
}
func (l ValueSpanList) GetValue(index int) interface{} {
return l[index].Value
}
func (l *ValueSpanList) SetValue(index int, value interface{}) {
(*l)[index].Value = value
}
func (l *ValueSpanList) Insert(index int, count int) {
*l = append(*l, make(ValueSpanList, count)...)
if index+count < len(*l) {
copy((*l)[index+count:], (*l)[index:])
}
}
func (l *ValueSpanList) Delete(index int, count int) {
if index+count < len(*l) {
copy((*l)[index:], (*l)[index+count:])
}
*l = (*l)[:len(*l)-count]
}
// Update modifies the values in `span` by applying the function `f`.
// - Parts of `span` that are outside the intervals in `l` are inserted with
// value `f(nil)`.
// - If `f` returns `nil`, the corresponding span is removed.
// - Adjacent intervals with the same value are merged.
func Update(l ValueList, span U64Span, f func(interface{}) interface{}) {
k := Search(l, func(test U64Span) bool {
return span.Start < test.End
})
elems := []ValueSpan{}
add := func(val interface{}, start uint64, end uint64) {
if start >= end || val == nil {
return
}
if val == nil {
span.Start = end
return
}
if len(elems) > 0 {
// check if new span should be merged with last existing span
e := &elems[len(elems)-1]
if e.Value == val && e.Span.End == start {
e.Span.End = end
span.Start = end
return
}
}
elems = append(elems, ValueSpan{U64Span{start, end}, val})
span.Start = end
}
i := k
if i < l.Length() {
// Add the part of `a` before `span` (if it exists).
// This can only exist for the first `a`, since after that
// `span.Start` will always be the `End` of the previous `a`,
// which precedes the current `a`.
add(l.GetValue(i), l.GetSpan(i).Start, span.Start)
}
// For each overlapping span...
for ; i < l.Length(); i++ {
iSpan := l.GetSpan(i)
if iSpan.Start >= span.End {
break
}
// Add the part of `span` before `a`
add(f(nil), span.Start, iSpan.Start)
// Add the part of `span` that intersects `a`
add(f(l.GetValue(i)), span.Start, u64.Min(iSpan.End, span.End))
if iSpan.End > span.End {
add(l.GetValue(i), span.End, iSpan.End)
}
}
// Add the part of `span` after the last overlapping span
add(f(nil), span.Start, span.End)
if k > 0 && len(elems) > 0 {
// Merge first `elems` with the previous span, if necessary
s := l.GetSpan(k - 1)
e := elems[0]
if s.End == e.Span.Start && l.GetValue(k-1) == e.Value {
s.End = e.Span.End
l.SetSpan(k-1, s)
elems = elems[1:]
}
}
// Check for intervals that need to be merged
if i < l.Length() {
s := l.GetSpan(i)
if len(elems) > 0 {
// Merge the last `elems` with span `i`, if necessary
e := elems[len(elems)-1]
if s.Start == e.Span.End && l.GetValue(i) == e.Value {
s.Start = e.Span.Start
l.SetSpan(i, s)
elems = elems[:len(elems)-1]
}
}
}
if len(elems) == 0 && 0 < k && i < l.Length() {
// Not inserting any elements.
// Merge span `k-1` with span `i`, if necessary
si := l.GetSpan(i)
sk := l.GetSpan(k - 1)
if sk.End == si.Start && l.GetValue(k-1) == l.GetValue(i) {
sk.End = si.End
l.SetSpan(k-1, sk)
i++
}
}
// List elements `[k,i)` will be deleted, and `elems` will be inserted
// at index `k`. This may require inserting or deleting elements.
if len(elems) > i-k {
// Make room for the new elements
l.Insert(k, len(elems)-(i-k))
} else if len(elems) < i-k {
// Remove excess elements
l.Delete(k, i-k-len(elems))
}
// Assign `elems` to the list indices `k ... k+len(elems)`
for j, e := range elems {
l.SetSpan(k+j, e.Span)
l.SetValue(k+j, e.Value)
}
}
|
// Copyright (C) 2016-Present Pivotal Software, Inc. All rights reserved.
// This program and the accompanying materials are made available under the terms of the under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
package broker
import (
"context"
"errors"
"fmt"
"github.com/pivotal-cf/brokerapi/v10/domain/apiresponses"
"github.com/pivotal-cf/on-demand-service-broker/brokercontext"
"github.com/pivotal-cf/on-demand-service-broker/serviceadapter"
)
const (
GenericErrorPrefix = "There was a problem completing your request. Please contact your operations team providing the following information:"
PendingChangesErrorMessage = "The service broker has been updated, and this service instance is out of date. Please contact your operator."
OperationInProgressMessage = "An operation is in progress for your service instance. Please try again later."
UpdateLoggerAction = ""
)
type OperationInProgressError struct {
error
}
func NewOperationInProgressError(e error) error {
return OperationInProgressError{e}
}
type BrokerError interface {
ErrorForCFUser() error
Error() string
}
type DisplayableError struct {
errorForCFUser error
errorForOperator error
}
func (e DisplayableError) ErrorForCFUser() error {
return e.errorForCFUser
}
func (e DisplayableError) ExtendedCFError() error {
switch err := e.errorForCFUser.(type) {
case *apiresponses.FailureResponse:
return err.AppendErrorMessage(fmt.Sprintf("- error-message: %s", e.errorForOperator))
default:
return fmt.Errorf("%s - error-message: %s", e.errorForCFUser, e.errorForOperator)
}
}
func (e DisplayableError) Error() string {
return fmt.Sprintf("error: %s. error for user: %s.", e.errorForOperator, e.errorForCFUser)
}
func NewDisplayableError(errorForCFUser, errForOperator error) DisplayableError {
return DisplayableError{
errorForCFUser,
errForOperator,
}
}
func NewBoshRequestError(action string, requestError error) DisplayableError {
return DisplayableError{
fmt.Errorf("Currently unable to %s service instance, please try again later", action),
requestError,
}
}
func NewGenericError(ctx context.Context, err error) DisplayableError {
serviceName := brokercontext.GetServiceName(ctx)
instanceID := brokercontext.GetInstanceID(ctx)
reqID := brokercontext.GetReqID(ctx)
operation := brokercontext.GetOperation(ctx)
boshTaskID := brokercontext.GetBoshTaskID(ctx)
message := fmt.Sprintf(
"%s service: %s, service-instance-guid: %s, broker-request-id: %s",
GenericErrorPrefix,
serviceName,
instanceID,
reqID,
)
if boshTaskID != 0 {
message += fmt.Sprintf(", task-id: %d", boshTaskID)
}
if operation != "" {
message += fmt.Sprintf(", operation: %s", operation)
}
return DisplayableError{
errorForCFUser: errors.New(message),
errorForOperator: err,
}
}
func adapterToAPIError(ctx context.Context, err error) error {
if err == nil {
return nil
}
switch err.(type) {
case serviceadapter.BindingAlreadyExistsError:
return apiresponses.ErrBindingAlreadyExists
case serviceadapter.BindingNotFoundError:
return apiresponses.ErrBindingDoesNotExist
case serviceadapter.AppGuidNotProvidedError:
return apiresponses.ErrAppGuidNotProvided
case serviceadapter.UnknownFailureError:
if err.Error() == "" {
//Adapter returns an unknown error with no message
err = NewGenericError(ctx, err).ErrorForCFUser()
}
return err
default:
return NewGenericError(ctx, err).ErrorForCFUser()
}
}
type PlanNotFoundError struct {
PlanGUID string
}
func (e PlanNotFoundError) Error() string {
return fmt.Sprintf("plan %s does not exist", e.PlanGUID)
}
type DeploymentNotFoundError struct {
error
}
func NewDeploymentNotFoundError(e error) error {
return DeploymentNotFoundError{e}
}
type TaskInProgressError struct {
Message string
}
func (e TaskInProgressError) Error() string {
return e.Message
}
type ServiceError struct {
error
}
func NewServiceError(e error) error {
return ServiceError{error: e}
}
type PendingChangesNotAppliedError struct {
error
}
func NewPendingChangesNotAppliedError(e error) error {
return PendingChangesNotAppliedError{error: e}
}
type OperationAlreadyCompletedError struct {
error
}
func NewOperationAlreadyCompletedError(e error) error {
return OperationAlreadyCompletedError{error: e}
}
|
package lru
import (
"container/list"
)
/*
LRU: 最近最少使用,核心思想是(如果数据最近被访问过,那么将来被访问的几率也更高)
1. 新数据插入到链表头部;
2. 每当缓存命中(即缓存数据被访问),则将数据移到链表头部;
3. 当链表满的时候,将链表尾部的数据丢弃。
*/
type LRUCache struct {
capacity int
cache map[int]*list.Element
list *list.List
}
type Pair struct {
key int
value int
}
func Constructor(capacity int) LRUCache {
return LRUCache{
capacity: capacity,
cache: make(map[int]*list.Element),
list: list.New(),
}
}
func (lc *LRUCache) Get(key int) int {
if elem, ok := lc.cache[key]; ok {
lc.list.MoveToFront(elem)
return elem.Value.(Pair).value
}
return -1
}
func (lc *LRUCache) Put(key, value int) {
//hit cache, remove the elem to list head
if elem, ok := lc.cache[key]; ok {
elem.Value = Pair{key, value}
lc.list.MoveToFront(elem)
} else {
//del cache and last elem when list full
if lc.list.Len() >= lc.capacity {
delete(lc.cache, lc.list.Back().Value.(Pair).key)
lc.list.Remove(lc.list.Back())
}
lc.list.PushFront(Pair{key, value})
lc.cache[key] = lc.list.Front()
}
}
|
package participant
import "t32/game"
type spyCoordinates struct {
X, Y int
}
type spyClient struct {
Coordinates []spyCoordinates
game.Board
game.Player
Message string
ReqWaitingForOthers bool
ReqItsAnothersTurn bool
ReqItsYourTurn bool
ReqStalemate bool
ReqAnotherWon bool
ReqYouWon bool
ReqFlash bool
}
func (c *spyClient) WaitingForOthers() {
c.ReqWaitingForOthers = true
}
func (c *spyClient) ItsAnothersTurn(b game.Board, p game.Player) {
c.ReqItsAnothersTurn = true
c.Board = b
c.Player = p
}
func (c *spyClient) ItsYourTurn(b game.Board, p game.Player) (int, int) {
c.ReqItsYourTurn = true
c.Board = b
c.Player = p
var co spyCoordinates
if len(c.Coordinates) == 0 {
return co.X, co.Y
}
co, c.Coordinates = c.Coordinates[len(c.Coordinates)-1],
c.Coordinates[:len(c.Coordinates)-1]
return co.X, co.Y
}
func (c *spyClient) Stalemate(b game.Board) {
c.ReqStalemate = true
c.Board = b
}
func (c *spyClient) AnotherWon(b game.Board, p game.Player) {
c.ReqAnotherWon = true
c.Board = b
c.Player = p
}
func (c *spyClient) YouWon(b game.Board, p game.Player) {
c.ReqYouWon = true
c.Board = b
c.Player = p
}
func (c *spyClient) Flash(b game.Board, msg string) {
c.ReqFlash = true
c.Board = b
c.Message = msg
}
|
package swift
import (
"regexp"
"strings"
"github.com/anecsoiu/banking/country"
)
const (
// lengthSwift8 represents length of type Swift8 swift codes.
lengthSwift8 = 8
// lengthSwift11 represents length of type Swift11 swift codes.
lengthSwift11 = 11
)
var (
// regexBankCode holds Regexp for matching bank codes.
regexBankCode = regexp.MustCompile("^[A-Z]+$")
// regexCountryCode holds Regexp for matching country codes.
regexCountryCode = regexp.MustCompile("^[A-Z]+$")
// regexLocationCode holds Regexp for matching location codes.
regexLocationCode = regexp.MustCompile("^[A-Z0-9]+$")
// regexBranchCode holds Regexp for matching location codes.
regexBranchCode = regexp.MustCompile("^[A-Z0-9]+$")
)
func validateLength(value string) error {
if l := len(value); l != lengthSwift8 && l != lengthSwift11 {
return ErrInvalidLength
}
return nil
}
func validateCase(value string) error {
if value != strings.ToUpper(value) {
return ErrInvalidCase
}
return nil
}
func validateBankCode(value string) error {
if code := extractBankCode(value); !regexBankCode.MatchString(code) {
return ErrInvalidBankCode
}
return nil
}
func validateCountryCode(value string) error {
code := extractCountryCode(value)
if !regexCountryCode.MatchString(code) {
return ErrInvalidCountryCode
}
if !country.Exists(code) {
return ErrCountryCodeNotPresent
}
return nil
}
func validateLocationCode(value string) error {
if code := extractLocationCode(value); !regexLocationCode.MatchString(code) {
return ErrInvalidLocationCode
}
return nil
}
func validateBranchCode(value string) error {
if hasBranchCode(value) {
if code := extractBranchCode(value); !regexBranchCode.MatchString(code) {
return ErrInvalidBranchCode
}
}
return nil
}
func extractBankCode(value string) string {
return value[0:4]
}
func extractCountryCode(value string) string {
return value[4:6]
}
func extractLocationCode(value string) string {
return value[6:8]
}
func extractBranchCode(value string) string {
return value[8:11]
}
func hasBranchCode(value string) bool {
return len(value) == lengthSwift11
}
|
package mondohttp
import (
"net/http"
"net/url"
"strings"
)
// NewAuthCodeAccessRequest creates a request for exchanging authorization codes.
// https://getmondo.co.uk/docs/#exchange-the-authorization-code
func NewAuthCodeAccessRequest(clientID, clientSecret, redirectURI, authCode string) *http.Request {
body := strings.NewReader(url.Values{
"grant_type": {"authorization_code"},
"client_id": {clientID},
"client_secret": {clientSecret},
"redirect_uri": {redirectURI},
"code": {authCode},
}.Encode())
req, _ := http.NewRequest("POST", ProductionAPI+"oauth2/token", body)
req.Header.Set(formContentType())
return req
}
// NewRefreshAccessRequest creates a request for refreshing an access token.
// https://getmondo.co.uk/docs/#refreshing-access
func NewRefreshAccessRequest(clientID, clientSecret, refreshToken string) *http.Request {
body := strings.NewReader(url.Values{
"grant_type": {"refresh_token"},
"client_id": {clientID},
"client_secret": {clientSecret},
"refresh_token": {refreshToken},
}.Encode())
req, _ := http.NewRequest("POST", ProductionAPI+"oauth2/token", body)
req.Header.Set(formContentType())
return req
}
// NewWhoAmIRequest creates a request for verifying the authenticated identity.
// https://getmondo.co.uk/docs/#authenticating-requests
func NewWhoAmIRequest(accessToken string) *http.Request {
req, _ := http.NewRequest("GET", ProductionAPI+"ping/whoami", nil)
req.Header.Set(auth(accessToken))
return req
}
// NewPingRequest creates a request for pinging the API service.
// (Not documented, but is an example in the API Playground.)
func NewPingRequest() *http.Request {
req, _ := http.NewRequest("GET", ProductionAPI+"ping", nil)
return req
}
|
package mysql
import (
"reflect"
)
type Field struct {
*FieldStruct
*FieldValue
}
func NewField(fieldStruct *FieldStruct, fieldValue *FieldValue) *Field {
return &Field{
FieldStruct: fieldStruct,
FieldValue: fieldValue,
}
}
type FieldValue struct {
Value []byte
NeedUpdate bool
}
func NewInitFieldValue() *FieldValue {
return &FieldValue{
Value: []byte{},
NeedUpdate: false,
}
}
// func NewFieldValue(value []byte) *FieldValue {
// // var v []byte
// // var err error
//
// // rv, ok := value.(reflect.Value)
// // if ok {
// // v, err = getValue(rv)
// // if err != nil {
// // v = []byte{}
// // }
// // } else {
// // v, err = getValue(reflect.ValueOf(value))
// // if err != nil {
// // v = []byte{}
// // }
// // }
//
// return &FieldValue{
// Value: value,
// NeedUpdate: false,
// }
// }
func (fv *FieldValue) ValuePtr() *[]byte {
return &fv.Value
}
func (fv *FieldValue) Interface() interface{} {
var i interface{}
_ = setValue(reflect.ValueOf(i), fv.Value)
return i
}
func (fv *FieldValue) Update(value interface{}) {
var v []byte
var err error
rv, ok := value.(reflect.Value)
if ok {
v, err = getValue(rv)
if err != nil {
v = []byte{}
}
} else {
v, err = getValue(reflect.ValueOf(value))
if err != nil {
v = []byte{}
}
}
if !equal(fv.Value, v) {
fv.NeedUpdate = true
fv.Value = v
}
}
func (fv *FieldValue) RefreshUpdate() {
fv.NeedUpdate = false
}
type FieldStruct struct {
Name string
Index int
Type reflect.Type
}
func NewFieldStruct(name string, index int, typ reflect.Type) *FieldStruct {
return &FieldStruct{
Name: name,
Index: index,
Type: typ,
}
}
func (s *FieldStruct) NewValue() interface{} {
return reflect.New(s.Type).Interface()
}
|
package ispalindrome
import (
"strings"
"unicode"
)
func isPalindrome(s string) bool {
if len(s) <= 1 { // 空串和单个字符
return true
}
str := strings.ToLower(s)
var i, j int
for i, j := 0, len(str)-1; i < j; {
if unicode.IsLetter(rune(str[i])) == false &&
unicode.IsDigit(rune(str[i])) == false {
i++
continue
}
if unicode.IsLetter(rune(str[j])) == false &&
unicode.IsDigit(rune(str[j])) == false {
j--
continue
}
if str[i] == str[j] {
i++
j--
continue
} else {
return false
}
}
if i == j { // eg: "a."
return true
} else {
return false
}
}
|
package api
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"github.com/chadweimer/gomp/db"
"github.com/chadweimer/gomp/upload"
"github.com/go-chi/chi/v5"
"github.com/go-chi/chi/v5/middleware"
"github.com/rs/zerolog"
"github.com/rs/zerolog/hlog"
"github.com/rs/zerolog/log"
)
// ---- Begin Standard Errors ----
var errMismatchedId = errors.New("id in the path does not match the one specified in the request body")
// ---- End Standard Errors ----
// ---- Begin Context Keys ----
type contextKey string
func (k contextKey) String() string {
return "gomp context key: " + string(k)
}
const (
currentUserIdCtxKey = contextKey("current-user-id")
currentUserTokenCtxKey = contextKey("current-user-token")
)
// ---- End Context Keys ----
type apiHandler struct {
secureKeys []string
upl *upload.ImageUploader
db db.Driver
}
// NewHandler returns a new instance of http.Handler
func NewHandler(secureKeys []string, upl *upload.ImageUploader, db db.Driver) http.Handler {
h := apiHandler{
secureKeys: secureKeys,
upl: upl,
db: db,
}
r := chi.NewRouter()
r.Use(middleware.SetHeader("Content-Type", "application/json"))
r.Mount("/v1", HandlerWithOptions(NewStrictHandlerWithOptions(
h,
[]StrictMiddlewareFunc{},
StrictHTTPServerOptions{
RequestErrorHandlerFunc: func(w http.ResponseWriter, r *http.Request, err error) {
h.Error(w, r, http.StatusBadRequest, err)
},
ResponseErrorHandlerFunc: func(w http.ResponseWriter, r *http.Request, err error) {
h.Error(w, r, http.StatusInternalServerError, err)
},
}),
ChiServerOptions{
Middlewares: []MiddlewareFunc{h.checkScopes},
ErrorHandlerFunc: func(w http.ResponseWriter, r *http.Request, err error) {
h.Error(w, r, http.StatusBadRequest, err)
},
}))
r.NotFound(func(w http.ResponseWriter, r *http.Request) {
h.Error(w, r, http.StatusNotFound, fmt.Errorf("%s is not a valid API endpoint", r.URL.Path))
})
return r
}
func (apiHandler) JSON(w http.ResponseWriter, r *http.Request, status int, v interface{}) {
buf := new(bytes.Buffer)
if err := json.NewEncoder(buf).Encode(v); err != nil {
hlog.FromRequest(r).UpdateContext(func(c zerolog.Context) zerolog.Context {
return c.AnErr("encode-error", err).Int("original-status", status)
})
w.WriteHeader(http.StatusInternalServerError)
return
}
w.WriteHeader(status)
if _, err := w.Write(buf.Bytes()); err != nil {
// We tried everything. Time to panic
panic(err)
}
}
func (apiHandler) LogError(ctx context.Context, err error) {
log.Ctx(ctx).UpdateContext(func(c zerolog.Context) zerolog.Context {
return c.Err(err)
})
}
func (h apiHandler) Error(w http.ResponseWriter, r *http.Request, status int, err error) {
h.LogError(r.Context(), err)
status = getStatusFromError(err, status)
h.JSON(w, r, status, http.StatusText(status))
}
func getResourceIdFromCtx(ctx context.Context, idKey contextKey) (int64, error) {
idVal := ctx.Value(idKey)
id, ok := idVal.(int64)
if ok {
return id, nil
}
idPtr, ok := idVal.(*int64)
if ok {
return *idPtr, nil
}
return 0, fmt.Errorf("value of %s is not an integer", idKey)
}
func getStatusFromError(err error, fallback int) int {
if errors.Is(err, db.ErrNotFound) {
return http.StatusNotFound
} else if errors.Is(err, errMismatchedId) {
return http.StatusBadRequest
}
return fallback
}
|
package test
import (
"github.com/muidea/magicOrm/provider"
"testing"
"time"
"github.com/muidea/magicOrm/orm"
)
func TestLocalExecutor(t *testing.T) {
orm.Initialize()
defer orm.Uninitialize()
config := orm.NewConfig("localhost:3306", "testdb", "root", "rootkit")
provider := provider.NewLocalProvider("default")
o1, err := orm.NewOrm(provider, config)
defer o1.Release()
if err != nil {
t.Errorf("new Orm failed, err:%s", err.Error())
return
}
now, _ := time.ParseInLocation("2006-01-02 15:04:05:0000", "2018-01-02 15:04:05:0000", time.Local)
obj := &Unit{ID: 10, I64: uint64(78962222222), Name: "Hello world", Value: 12.3456, TimeStamp: now, Flag: true}
objList := []interface{}{&Unit{}}
registerModel(provider, objList)
objModel, objErr := provider.GetEntityModel(obj)
if objErr != nil {
t.Errorf("GetEntityModel failed, err:%s", objErr.Error())
return
}
err = o1.Create(objModel)
if err != nil {
t.Errorf("create obj failed, err:%s", err.Error())
return
}
objModel, objErr = o1.Insert(objModel)
if err != nil {
t.Errorf("insert obj failed, err:%s", err.Error())
return
}
obj = objModel.Interface(true).(*Unit)
obj.Name = "abababa"
obj.Value = 100.000
objModel, objErr = provider.GetEntityModel(obj)
if objErr != nil {
t.Errorf("GetEntityModel failed, err:%s", objErr.Error())
return
}
objModel, objErr = o1.Update(objModel)
if objErr != nil {
t.Errorf("update obj failed, err:%s", objErr.Error())
return
}
obj2 := &Unit{ID: obj.ID}
obj2Model, obj2Err := provider.GetEntityModel(obj2)
if obj2Err != nil {
t.Errorf("GetEntityModel failed, err:%s", obj2Err.Error())
return
}
obj2Model, obj2Err = o1.Query(obj2Model)
if obj2Err != nil {
t.Errorf("query obj failed, err:%s", obj2Err.Error())
return
}
obj2 = obj2Model.Interface(true).(*Unit)
if obj.Name != obj2.Name || obj.Value != obj2.Value {
t.Errorf("query obj failed, obj:%v, obj2:%v", obj, obj2)
return
}
_, countErr := o1.Count(obj2Model, nil)
if countErr != nil {
t.Errorf("count object failed, err:%s", countErr.Error())
return
}
_, err = o1.Delete(obj2Model)
if err != nil {
t.Errorf("query obj failed, err:%s", err.Error())
}
}
func TestLocalDepends(t *testing.T) {
orm.Initialize()
defer orm.Uninitialize()
config := orm.NewConfig("localhost:3306", "testdb", "root", "rootkit")
provider := provider.NewLocalProvider("default")
o1, err := orm.NewOrm(provider, config)
defer o1.Release()
if err != nil {
t.Errorf("new Orm failed, err:%s", err.Error())
return
}
now, _ := time.Parse("2006-01-02 15:04:05:0000", "2018-01-02 15:04:05:0000")
obj := &Unit{ID: 10, I64: uint64(78962222222), Name: "Hello world", Value: 12.3456, TimeStamp: now, Flag: true}
ext := &ExtUnit{Unit: obj}
objList := []interface{}{&Unit{}, &ExtUnit{}, &ExtUnitList{}}
registerModel(provider, objList)
extModel, extErr := provider.GetEntityModel(ext)
if extErr != nil {
t.Errorf("GetEntityModel failed, err:%s", extErr.Error())
return
}
err = o1.Drop(extModel)
if err != nil {
t.Errorf("drop ext failed, err:%s", err.Error())
return
}
err = o1.Create(extModel)
if err != nil {
t.Errorf("create ext failed, err:%s", err.Error())
return
}
extModel, extErr = o1.Insert(extModel)
if extErr != nil {
t.Errorf("insert ext failed, err:%s", extErr.Error())
return
}
ext = extModel.Interface(true).(*ExtUnit)
objModel, objErr := provider.GetEntityModel(obj)
if objErr != nil {
t.Errorf("GetEntityModel failed, err:%s", objErr.Error())
return
}
objModel, objErr = o1.Insert(objModel)
if objErr != nil {
t.Errorf("insert ext failed, err:%s", objErr.Error())
return
}
obj = objModel.Interface(true).(*Unit)
ext2 := &ExtUnitList{Unit: *obj, UnitList: []Unit{}}
ext2.UnitList = append(ext2.UnitList, *obj)
ext2Model, ext2Err := provider.GetEntityModel(ext2)
if ext2Err != nil {
t.Errorf("GetEntityModel failed, err:%s", ext2Err.Error())
return
}
err = o1.Drop(ext2Model)
if err != nil {
t.Errorf("drop ext2 failed, err:%s", err.Error())
return
}
err = o1.Create(ext2Model)
if err != nil {
t.Errorf("create ext2 failed, err:%s", err.Error())
return
}
ext2Model, ext2Err = o1.Insert(ext2Model)
if ext2Err != nil {
t.Errorf("insert ext2 failed, err:%s", ext2Err.Error())
return
}
_, err = o1.Delete(ext2Model)
if err != nil {
t.Errorf("delete ext2 failed, err:%s", err.Error())
return
}
}
|
/*
Copyright 2020 The Skaffold Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package errors
import (
"errors"
"strings"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/constants"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/instrumentation"
"github.com/GoogleContainerTools/skaffold/v2/proto/v1"
protoV2 "github.com/GoogleContainerTools/skaffold/v2/proto/v2"
)
const (
// Report issue text
reportIssueText = "If above error is unexpected, please open an issue to report this error at " + constants.GithubIssueLink
// PushImageErr is the error prepended.
PushImageErr = "could not push image"
)
var (
ReportIssueSuggestion = func(interface{}) []*proto.Suggestion {
return []*proto.Suggestion{{
SuggestionCode: proto.SuggestionCode_OPEN_ISSUE,
Action: reportIssueText,
}}
}
)
// ActionableErr returns an actionable error message with suggestions
func ActionableErr(cfg interface{}, phase constants.Phase, err error) *proto.ActionableErr {
errCode, suggestions := getErrorCodeFromError(cfg, phase, err)
return &proto.ActionableErr{
ErrCode: errCode,
Message: err.Error(),
Suggestions: suggestions,
}
}
// ActionableErrV2 returns an actionable error message with suggestions
func ActionableErrV2(cfg interface{}, phase constants.Phase, err error) *protoV2.ActionableErr {
errCode, suggestions := getErrorCodeFromError(cfg, phase, err)
suggestionsV2 := make([]*protoV2.Suggestion, len(suggestions))
for i, suggestion := range suggestions {
var suggestions2 protoV2.Suggestion
suggestions2.Action = suggestion.Action
suggestions2.SuggestionCode = suggestion.SuggestionCode
suggestions2.Action = suggestion.Action
suggestionsV2[i] = &suggestions2
}
return &protoV2.ActionableErr{
ErrCode: errCode,
Message: err.Error(),
Suggestions: suggestionsV2,
}
}
func V2fromV1(ae *proto.ActionableErr) *protoV2.ActionableErr {
suggestionsV2 := make([]*protoV2.Suggestion, len(ae.Suggestions))
for i, suggestion := range ae.Suggestions {
var suggestions2 protoV2.Suggestion
suggestions2.Action = suggestion.Action
suggestions2.SuggestionCode = suggestion.SuggestionCode
suggestions2.Action = suggestion.Action
suggestionsV2[i] = &suggestions2
}
return &protoV2.ActionableErr{
ErrCode: ae.ErrCode,
Message: ae.Message,
Suggestions: suggestionsV2,
}
}
func ShowAIError(cfg interface{}, err error) error {
if uErr := errors.Unwrap(err); uErr != nil {
err = uErr
}
if IsSkaffoldErr(err) {
instrumentation.SetErrorCode(err.(Error).StatusCode())
return err
}
if p, ok := isProblem(err); ok {
instrumentation.SetErrorCode(p.ErrCode)
return p.AIError(cfg, err)
}
for _, problems := range GetProblemCatalogCopy().allErrors {
for _, p := range problems {
if p.Regexp.MatchString(err.Error()) {
instrumentation.SetErrorCode(p.ErrCode)
return p.AIError(cfg, err)
}
}
}
return err
}
func getErrorCodeFromError(cfg interface{}, phase constants.Phase, err error) (proto.StatusCode, []*proto.Suggestion) {
var sErr Error
if errors.As(err, &sErr) {
return sErr.StatusCode(), sErr.Suggestions()
}
if problems, ok := GetProblemCatalogCopy().allErrors[phase]; ok {
for _, p := range problems {
if p.Regexp.MatchString(err.Error()) {
return p.ErrCode, p.Suggestion(cfg)
}
}
}
return unknownErrForPhase(phase), ReportIssueSuggestion(cfg)
}
func concatSuggestions(suggestions []*proto.Suggestion) string {
var s strings.Builder
for _, suggestion := range suggestions {
if s.String() != "" {
s.WriteString(" or ")
}
s.WriteString(suggestion.Action)
}
if s.String() == "" {
return ""
}
s.WriteString(".")
return s.String()
}
func unknownErrForPhase(phase constants.Phase) proto.StatusCode {
switch phase {
case constants.Build:
return proto.StatusCode_BUILD_UNKNOWN
case constants.Init:
return proto.StatusCode_INIT_UNKNOWN
case constants.Test:
return proto.StatusCode_TEST_UNKNOWN
case constants.Deploy:
return proto.StatusCode_DEPLOY_UNKNOWN
case constants.StatusCheck:
return proto.StatusCode_STATUSCHECK_UNKNOWN
case constants.Sync:
return proto.StatusCode_SYNC_UNKNOWN
case constants.DevInit:
return proto.StatusCode_DEVINIT_UNKNOWN
case constants.Cleanup:
return proto.StatusCode_CLEANUP_UNKNOWN
default:
return proto.StatusCode_UNKNOWN_ERROR
}
}
|
package main
import (
"encoding/json"
"fmt"
"regexp"
"sort"
"strings"
"syscall/js"
"github.com/fanaticscripter/EggContractor/api"
)
var _playerIdPattern = regexp.MustCompile(`(?i)^EI\d+$`)
type result struct {
Successful bool `json:"successful"`
Data interface{} `json:"data"`
Err string `json:"error"`
}
func dataResult(data interface{}) *result {
return &result{
Successful: true,
Data: data,
}
}
func errorResult(err error) *result {
return &result{
Successful: false,
Err: err.Error(),
}
}
func sanitizePlayerId(playerId string) (string, error) {
if _playerIdPattern.MatchString(playerId) {
return strings.ToUpper(playerId), nil
}
return "", fmt.Errorf("ID %v is not in the form EI1234567890123456; please consult \"Where do I find my ID?\"", playerId)
}
func retrieveMissions(playerId string) *result {
sanitized, err := sanitizePlayerId(playerId)
if err != nil {
return errorResult(err)
}
playerId = sanitized
fc, err := api.RequestFirstContact(&api.FirstContactRequestPayload{
EiUserId: playerId,
})
if err != nil {
return errorResult(err)
}
if fc.Data == nil || fc.Data.Progress == nil {
return errorResult(fmt.Errorf("server response empty, " +
"please check you have put in the correct ID (the game may silently update it)"))
}
hasProPermit := fc.Data.Progress.PermitLevel > 0
artifactsDB := fc.Data.ArtifactsDb
activeMissions := make([]*mission, 0)
launched := make([]*api.MissionInfo, 0)
for _, m := range artifactsDB.MissionInfos {
activeMissions = append(activeMissions, newMission(m))
if m.Status >= api.MissionInfo_EXPLORING {
launched = append(launched, m)
}
}
launched = append(launched, artifactsDB.MissionArchive...)
launchArchive := make([]*mission, len(launched))
for i, m := range launched {
launchArchive[i] = newMission(m)
}
sort.SliceStable(launchArchive, func(i, j int) bool {
return launchArchive[i].StartTimestamp > launchArchive[j].StartTimestamp
})
stats, progress := generateStatsFromMissionArchive(launched, hasProPermit)
afxProgress := getArtifactsProgress(artifactsDB)
return dataResult(struct {
ActiveMissions []*mission `json:"activeMissions"`
LaunchArchive []*mission `json:"launchArchive"`
MissionStats *missionStats `json:"missionStats"`
UnlockProgress *unlockProgress `json:"unlockProgress"`
ArtifactsProgress *artifactsProgress `json:"artifactsProgress"`
Save *api.FirstContact_Payload `json:"save"`
}{
ActiveMissions: activeMissions,
LaunchArchive: launchArchive,
MissionStats: stats,
UnlockProgress: progress,
ArtifactsProgress: afxProgress,
Save: fc.Data,
})
}
func main() {
// I can't think of any communications mechanism other than global variables
// and callbacks. (Note that we can't set a directly global variable for the
// result, since when we do that the global variable seems to be somehow
// "cached" for a while when accessed immediately, so if we run two
// instances with different input args, when accessing the result of the
// second run we would somehow still get the result of the first run... I
// didn't investigate further since the callback route works despite the
// increased complexity.)
//
// Related:
// https://github.com/golang/go/issues/25612
// https://stackoverflow.com/q/56398142
args := js.Global().Get("wasmArgs")
playerId := args.Get("0").String()
res := retrieveMissions(playerId)
encoded, _ := json.Marshal(res)
js.Global().Call("wasmCallback", js.ValueOf(string(encoded)))
}
|
package main
import (
"flag"
"log"
"net"
"golang.org/x/net/ipv4"
)
var (
listenAddr = flag.String("listen-addr", ":10000", "listen addr")
batchSize = flag.Int("batch-size", 1000, "batch size")
)
func main() {
flag.Parse()
ra, err := net.ResolveUDPAddr("udp", *listenAddr)
if err != nil {
log.Fatal(err)
}
conn, err := net.ListenUDP("udp", ra)
if err != nil {
log.Fatal(err)
}
pconn := ipv4.NewPacketConn(conn)
rb := make([]ipv4.Message, *batchSize)
for i := 0; i < *batchSize; i++ {
rb[i].Buffers = [][]byte{make([]byte, 1500)}
}
count := 0
bytes := 0
for {
n, err := pconn.ReadBatch(rb, 0)
if err != nil {
log.Fatal(err)
}
for _, m := range rb[:n] {
// m.Buffers[0][:m.N]
bytes += m.N
}
count += n
}
}
|
package goxf
import (
"encoding/json"
"fmt"
"github.com/kramerdust/goxf/client"
"io/ioutil"
"net/http"
"strings"
"time"
)
const oxfordURL = "https://od-api.oxforddictionaries.com:443/api/v2"
// Client is Dictionary API client
type Client struct {
appID string
appKey string
httpClient *http.Client
}
// NewClient returns a new instance of Client with
// provided appId and appKey
func NewClient(appID, appKey string) *Client {
httpClient := &http.Client{
Timeout: 10 * time.Second,
}
return &Client{appID, appKey, httpClient}
}
// GetEntry gets information about one single word from specific language.
// If there is no such word then it returns Not found error
func (c *Client) GetEntry(lang string, word string) (*client.RetrieveEntry, error) {
methodURL := fmt.Sprintf("%s/entries/%s/%s", oxfordURL, lang, strings.ToLower(word))
req, err := http.NewRequest(http.MethodGet, methodURL, nil)
if err != nil {
return nil, err
}
req.Header.Add("app_id", c.appID)
req.Header.Add("app_key", c.appKey)
res, err := c.httpClient.Do(req)
if err != nil {
return nil, err
}
if res.StatusCode != http.StatusOK {
data, _ := ioutil.ReadAll(res.Body)
return nil, fmt.Errorf("got http code: %d, body:\n%s", res.StatusCode, string(data))
}
defer res.Body.Close()
data, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, err
}
entry := &client.RetrieveEntry{}
json.Unmarshal(data, &entry)
return entry, nil
}
|
package commands
import (
"fmt"
"github.com/brooklyncentral/brooklyn-cli/api/entity_policies"
"github.com/brooklyncentral/brooklyn-cli/command_metadata"
"github.com/brooklyncentral/brooklyn-cli/error_handler"
"github.com/brooklyncentral/brooklyn-cli/net"
"github.com/brooklyncentral/brooklyn-cli/scope"
"github.com/codegangsta/cli"
)
type DestroyPolicy struct {
network *net.Network
}
func NewDestroyPolicy(network *net.Network) (cmd *DestroyPolicy) {
cmd = new(DestroyPolicy)
cmd.network = network
return
}
func (cmd *DestroyPolicy) Metadata() command_metadata.CommandMetadata {
return command_metadata.CommandMetadata{
Name: "destroy-policy",
Description: "Destroy a policy",
Usage: "BROOKLYN_NAME SCOPE destroy-policy POLICY",
Flags: []cli.Flag{},
}
}
func (cmd *DestroyPolicy) Run(scope scope.Scope, c *cli.Context) {
if err := net.VerifyLoginURL(cmd.network); err != nil {
error_handler.ErrorExit(err)
}
spec, err := entity_policies.DestroyPolicy(cmd.network, scope.Application, scope.Entity, c.Args().First())
if nil != err {
error_handler.ErrorExit(err)
}
fmt.Println(spec)
}
|
/*
* This file is simply a mirror of the interfaces in interfaces/interfaces.go.
* This was done in order to prevent an import cycle.
*/
package cop
import (
"fmt"
"os"
real "github.com/hyperledger/fabric/cop/api"
def "github.com/hyperledger/fabric/cop/lib/defaultImpl"
)
// Mgr is the main interface to COP functionality
type Mgr interface {
real.Mgr
}
// Client is a COP client
type Client interface {
real.Client
}
// CertMgr is a COP certificate manager
type CertMgr interface {
real.CertMgr
}
// JoinRequest is the state of a request to join the blockchain network
type JoinRequest struct {
real.JoinRequest
}
// JoinRequestListener is a listener for join requests
type JoinRequestListener real.JoinRequestListener
// JoinRequestStatus is the status of a join request
type JoinRequestStatus real.JoinRequestStatus
// Values denoting the possible values of the JoinRequestStatus
const (
JRSWaiting = real.JRSWaiting
JRSApproved = real.JRSApproved
JRSDenied = real.JRSDenied
)
// JoinResponseType are the types of responses which can be provided to a JoinRequest
type JoinResponseType real.JoinResponseType
// Values denoting the possible values of the JoinResponseType
const (
JRTApprove = real.JRTApprove
JRTDeny = real.JRTDeny
JRTAbstain = real.JRTAbstain
JRTCount = real.JRTCount
)
// CertHandler provides functions related to a certificate
type CertHandler interface {
real.CertHandler
}
// KeyHandler provides functions related to a key
type KeyHandler interface {
real.KeyHandler
}
// Registration information
type Registration struct {
real.Registration
}
// Identity is any type of identity which is opaque for now
type Identity real.Identity
// The following are all the error codes returned by COP.
// The values begin with "100000" to avoid overlap with CFSSL errors.
// Add all new errors to the end of the current list.
const (
// NotImplemented means not yet implemented but plans to support
NotImplemented = real.NotImplemented
// NotSupported means no current plans to support
NotSupported = real.NotSupported
InvalidProviderName = real.InvalidProviderName
TooManyArgs = real.TooManyArgs
NotInitialized = real.NotInitialized
)
// Error is an interface with a Code method
type Error interface {
real.Error
}
func init() {
provider := os.Getenv("COP.PROVIDER")
if provider == "" {
provider = "default"
}
if provider == "default" {
real.SetMgr(new(def.Mgr))
} else {
fmt.Printf("invalid COP provider: %s\n", provider)
os.Exit(1)
}
}
// NewClient creates a COP client
func NewClient() Client {
return real.NewClient()
}
// NewCertMgr creates a COP certificate manager
func NewCertMgr() CertMgr {
return real.NewCertMgr()
}
|
package micro
import (
"context"
"errors"
"fmt"
"net"
"net/http"
"net/http/httptest"
"testing"
"time"
"github.com/grpc-ecosystem/grpc-gateway/runtime"
"github.com/stretchr/testify/assert"
"google.golang.org/grpc"
)
var reverseProxyFunc ReverseProxyFunc
var httpPort, grpcPort uint16
func init() {
reverseProxyFunc = func(
ctx context.Context,
mux *runtime.ServeMux,
grpcHostAndPort string,
opts []grpc.DialOption,
) error {
return nil
}
httpPort = 8888
grpcPort = 9999
}
func TestNewService(t *testing.T) {
redoc := &RedocOpts{
Up: true,
}
redoc.AddSpec("PetStore", "https://rebilly.github.io/ReDoc/swagger.yaml")
s := NewService(
[]grpc.StreamServerInterceptor{},
[]grpc.UnaryServerInterceptor{},
redoc,
)
go func() {
if err := s.Start(httpPort, grpcPort, reverseProxyFunc); err != nil {
t.Errorf("failed to serve: %v", err)
}
}()
// wait 1 second for the server start
time.Sleep(1 * time.Second)
// check if the http server is up
httpHost := fmt.Sprintf(":%d", httpPort)
_, err := net.Listen("tcp", httpHost)
assert.Error(t, err)
// check if the grpc server is up
grpcHost := fmt.Sprintf(":%d", grpcPort)
_, err = net.Listen("tcp", grpcHost)
assert.Error(t, err)
// check if the http endpoint works
client := &http.Client{}
resp, err := client.Get(fmt.Sprintf("http://127.0.0.1:%d/swagger.json", httpPort))
if err != nil {
t.Error(err)
}
assert.Equal(t, 404, resp.StatusCode)
assert.Len(t, resp.Header.Get("X-Request-Id"), 36)
resp, err = client.Get(fmt.Sprintf("http://127.0.0.1:%d/docs", httpPort))
if err != nil {
t.Error(err)
}
assert.Equal(t, 200, resp.StatusCode)
assert.Len(t, resp.Header.Get("X-Request-Id"), 36)
resp, err = client.Get(fmt.Sprintf("http://127.0.0.1:%d/metrics", httpPort))
if err != nil {
t.Error(err)
}
assert.Equal(t, 200, resp.StatusCode)
assert.Len(t, resp.Header.Get("X-Request-Id"), 36)
// another service
s2 := NewService(
[]grpc.StreamServerInterceptor{},
[]grpc.UnaryServerInterceptor{},
&RedocOpts{
Up: false,
},
)
// http port 8888 already in use
err = s2.startGrpcGateway(httpPort, grpcPort, reverseProxyFunc)
assert.Error(t, err)
// grpc port 9999 alreday in use
err = s2.startGrpcServer(grpcPort)
assert.Error(t, err)
// stop the first server
s.Stop()
// run a new service again
s = NewService(
[]grpc.StreamServerInterceptor{},
[]grpc.UnaryServerInterceptor{},
&RedocOpts{
Up: false,
},
)
go func() {
if err := s.Start(httpPort, grpcPort, reverseProxyFunc); err != nil {
t.Errorf("failed to serve: %v", err)
}
}()
// wait 1 second for the server start
time.Sleep(1 * time.Second)
// the redoc is not up for the second server
resp, err = client.Get(fmt.Sprintf("http://127.0.0.1:%d/docs", httpPort))
if err != nil {
t.Error(err)
}
assert.Equal(t, http.StatusNotImplemented, resp.StatusCode)
assert.Len(t, resp.Header.Get("X-Request-Id"), 36)
}
func TestErrorReverseProxyFunc(t *testing.T) {
s := NewService(
[]grpc.StreamServerInterceptor{},
[]grpc.UnaryServerInterceptor{},
&RedocOpts{
Up: true,
},
)
// mock error from reverseProxyFunc
errText := "reverse proxy func error"
reverseProxyFunc = func(
ctx context.Context,
mux *runtime.ServeMux,
grpcHostAndPort string,
opts []grpc.DialOption,
) error {
return errors.New(errText)
}
err := s.startGrpcGateway(httpPort, grpcPort, reverseProxyFunc)
assert.EqualError(t, err, errText)
}
func TestDefaultAnnotator(t *testing.T) {
ctx := context.TODO()
req := httptest.NewRequest("GET", "/", nil)
req.Header.Set("X-Request-Id", "uuid")
md := DefaultAnnotator(ctx, req)
id, ok := md["x-request-id"]
assert.True(t, ok)
assert.Equal(t, "uuid", id[0])
}
|
/*-------------------------------------------------------------------------
*
* discoverer.go
* Discoverer interface
*
*
* Copyright (c) 2021, Alibaba Group Holding Limited
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* IDENTIFICATION
* internal/discover/discoverer.go
*-------------------------------------------------------------------------
*/
package discover
import (
"sync"
)
// Service : the discover service should be implemented
type Service interface {
DiscoverInit()
DiscoverRun(wg *sync.WaitGroup)
DiscoverStop() error
DiscoverExit() error
DiscoverFetch() (interface{}, bool)
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package typec
import (
"bytes"
"context"
"encoding/json"
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
"path"
"time"
"chromiumos/tast/ctxutil"
"chromiumos/tast/errors"
"chromiumos/tast/local/bundles/cros/typec/setup"
typecutilshelper "chromiumos/tast/local/bundles/cros/typec/typecutils"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/cswitch"
"chromiumos/tast/local/typecutils"
"chromiumos/tast/testing"
"chromiumos/tast/testing/hwdep"
)
func init() {
testing.AddTest(&testing.Test{
Func: TBTDockGatkexUSB4,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Verifies test TBT Docking station on USB4 gatkex card TBT port via USB4 port using 40G passive cable",
Contacts: []string{"ambalavanan.m.m@intel.com", "intel-chrome-system-automation-team@intel.com"},
SoftwareDeps: []string{"chrome"},
Attr: []string{"group:typec"},
Data: []string{"test_config.json", "testcert.p12", "bear-320x240.h264.mp4", "video.html", "playback.js"},
Vars: []string{"typec.dutTbtPort", "typec.cSwitchPort", "typec.domainIP", "typec.tbtDockPort", "ui.signinProfileTestExtensionManifestKey"},
Fixture: "chromeLoggedInThunderbolt",
HardwareDeps: hwdep.D(setup.ThunderboltSupportedDevices()),
Timeout: 7 * time.Minute,
})
}
func TBTDockGatkexUSB4(ctx context.Context, s *testing.State) {
cleanupCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 5*time.Second)
defer cancel()
cr := s.FixtValue().(*chrome.Chrome)
const (
// Config file which contains expected values of TBT parameters.
testConfig = "test_config.json"
// Source file name.
transFilename = "tbt_usb3_test_file.txt"
)
// TBT port ID in the DUT.
tbtPort := s.RequiredVar("typec.dutTbtPort")
// Dock port ID in the DUT.
dockPort := s.RequiredVar("typec.tbtDockPort")
// cswitch port ID.
cSwitchON := s.RequiredVar("typec.cSwitchPort")
// IP address of Tqc server hosting device.
domainIP := s.RequiredVar("typec.domainIP")
// Media removable path.
const mediaRemovable = "/media/removable/"
if err := typecutils.EnablePeripheralDataAccess(ctx, s.DataPath("testcert.p12")); err != nil {
s.Fatal("Failed to enable peripheral data access setting: ", err)
}
if err := cr.ContinueLogin(ctx); err != nil {
s.Fatal("Failed to login: ", err)
}
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
s.Fatal(s, "Failed to create test API connection: ", err)
}
// Read json config file.
jsonData, err := ioutil.ReadFile(s.DataPath(testConfig))
if err != nil {
s.Fatalf("Failed to open %v file : %v", testConfig, err)
}
var data map[string]interface{}
if err := json.Unmarshal(jsonData, &data); err != nil {
s.Fatal("Failed to read json: ", err)
}
// Checking for USB4 config data.
usb4Val, ok := data["USB4"].(map[string]interface{})
if !ok {
s.Fatal("Failed to find TBT config data in JSON file")
}
// Checking for TBT config data.
tbtVal, ok := data["TBT"].(map[string]interface{})
if !ok {
s.Fatal("Failed to find TBT config data in JSON file")
}
dirsBeforePlug, err := typecutilshelper.RemovableDirs(mediaRemovable)
if err != nil {
s.Fatal("Failed to get removable devices: ", err)
}
// Create C-Switch session that performs hot plug-unplug on TBT device.
sessionID, err := cswitch.CreateSession(ctx, domainIP)
if err != nil {
s.Fatal("Failed to create sessionID: ", err)
}
const cSwitchOFF = "0"
defer func(ctx context.Context) {
s.Log("Cleanup")
if err := cswitch.ToggleCSwitchPort(ctx, sessionID, cSwitchOFF, domainIP); err != nil {
s.Error("Failed to disable c-switch port: ", err)
}
if err := cswitch.CloseSession(cleanupCtx, sessionID, domainIP); err != nil {
s.Log("Failed to close sessionID: ", err)
}
}(cleanupCtx)
if err := cswitch.ToggleCSwitchPort(ctx, sessionID, cSwitchON, domainIP); err != nil {
s.Fatal("Failed to enable c-switch port: ", err)
}
if err := typecutils.CheckUSBPDMuxInfo(ctx, "USB4=1"); err != nil {
s.Fatal("Failed to verify dmesg logs: ", err)
}
if _, err := cswitch.IsDeviceEnumerated(ctx, usb4Val["device_name"].(string), tbtPort); err != nil {
s.Fatal("Failed to enumerate the TBT device: ", err)
}
if _, err := cswitch.IsDeviceEnumerated(ctx, tbtVal["device_name"].(string), dockPort); err != nil {
s.Fatal("Failed to enumerate the TBT device: ", err)
}
sourcePath, err := ioutil.TempDir("", "temp")
if err != nil {
s.Fatal("Failed to create temp directory: ", err)
}
defer os.RemoveAll(sourcePath)
// Source file path.
sourceFilePath := path.Join(sourcePath, transFilename)
// Create a file with size.
file, err := os.Create(sourceFilePath)
if err != nil {
s.Fatal("Failed to create file: ", err)
}
if err := file.Truncate(int64(1024 * 1024 * 1024 * 2)); err != nil {
s.Fatal("Failed to truncate file with size: ", err)
}
var dirsAfterPlug []string
// Waits for USB pendrive detection till timeout.
if err := testing.Poll(ctx, func(ctx context.Context) error {
dirsAfterPlug, err = typecutilshelper.RemovableDirs(mediaRemovable)
if err != nil {
return errors.Wrap(err, "failed to get removable devices")
}
if len(dirsBeforePlug) >= len(dirsAfterPlug) {
return errors.New("failed to mount removable devices")
}
return nil
}, &testing.PollOptions{Timeout: 40 * time.Second, Interval: 250 * time.Millisecond}); err != nil {
s.Fatal("Timeout waiting for USB pendrive detection: ", err)
}
// Verify USB pendrive speed.
speedOut, err := typecutilshelper.MassStorageUSBSpeed(ctx)
if err != nil {
s.Fatal("Failed to check for USB speed: ", err)
}
typecUSBSpeed := "5000M"
speedFound := false
for _, speed := range speedOut {
if speed == typecUSBSpeed {
speedFound = true
break
}
}
if !speedFound {
s.Fatalf("Unexpected USB device speed: want %q, got %q", typecUSBSpeed, speedOut)
}
devicePath := typecutilshelper.TbtMountPath(dirsAfterPlug, dirsBeforePlug)
if devicePath == "" {
s.Fatal("Failed to get vaild devicePath")
}
// Destination file path.
destinationFilePath := path.Join(mediaRemovable, devicePath, transFilename)
defer os.Remove(destinationFilePath)
localHash, err := typecutilshelper.FileChecksum(sourceFilePath)
if err != nil {
s.Error("Failed to calculate hash of the source file: ", err)
}
// Tranferring file from source to destination.
testing.ContextLogf(ctx, "Transferring file from %s to %s", sourceFilePath, destinationFilePath)
if err := typecutilshelper.CopyFile(sourceFilePath, destinationFilePath); err != nil {
s.Fatal("Failed to copy file: ", err)
}
destHash, err := typecutilshelper.FileChecksum(destinationFilePath)
if err != nil {
s.Error("Failed to calculate hash of the destination file: ", err)
}
if !bytes.Equal(localHash, destHash) {
s.Errorf("The hash doesn't match (destHash path: %q)", destHash)
}
// Tranferring file from destination to source.
testing.ContextLogf(ctx, "Transferring file from %s to %s", destinationFilePath, sourceFilePath)
if err := typecutilshelper.CopyFile(destinationFilePath, sourceFilePath); err != nil {
s.Fatal("Failed to copy file: ", err)
}
if err := typecutils.FindConnectedDisplay(ctx, 1); err != nil {
s.Fatal("Failed to find connected display: ", err)
}
if err := typecutils.CheckDisplayInfo(ctx, false, true); err != nil {
s.Fatal("Failed to check display info : ", err)
}
if err := typecutils.VerifyDisplay4KResolution(ctx); err != nil {
s.Fatal("Failed to Verify display 4K resolution: ", err)
}
// Set mirror mode display.
if err := typecutils.SetMirrorDisplay(ctx, tconn, true); err != nil {
s.Fatal("Failed to set mirror mode: ", err)
}
srv := httptest.NewServer(http.FileServer(s.DataFileSystem()))
defer srv.Close()
url := srv.URL + "/video.html"
conn, err := cr.NewConn(ctx, url)
if err != nil {
s.Fatal("Failed to load video.html: ", err)
}
defer conn.Close()
videoFile := "bear-320x240.h264.mp4"
if err := conn.Call(ctx, nil, "playRepeatedly", videoFile); err != nil {
s.Fatal("Failed to play video: ", err)
}
}
|
package flow
import (
"github.com/futurehomeno/fimpgo"
"github.com/futurehomeno/fimpgo/fimptype"
actfimp "github.com/thingsplex/tpflow/node/action/fimp"
trigfimp "github.com/thingsplex/tpflow/node/trigger/fimp"
"github.com/imdario/mergo"
"github.com/mitchellh/mapstructure"
"strings"
)
func (fl *Flow) SendInclusionReport() {
fl.getLog().Info("Generating inclusion report")
report := fimptype.ThingInclusionReport{}
report.Type = "flow"
report.Address = fl.Id
report.Alias = fl.FlowMeta.Name
report.CommTechnology = "flow"
report.PowerSource = "ac"
report.ProductName = fl.FlowMeta.Name
report.ProductHash = "flow_" + fl.Id
report.SwVersion = "1.0"
report.Groups = []string{}
report.ProductId = "flow_1"
report.ManufacturerId = "fh"
report.Security = "tls"
report.Groups = []string{}
var services []fimptype.Service
addGroup := func(group string) {
for i := range report.Groups {
if report.Groups[i] == group {
return
}
}
report.Groups = append(report.Groups, group)
}
getService := func(name string, group string) (*fimptype.Service, bool) {
for i := range services {
if services[i].Name == name {
if services[i].Groups[0] == group {
return &services[i], false
}
}
}
service := fimptype.Service{}
service.Name = name
service.Groups = []string{group}
service.Enabled = true
service.Tags = []string{}
service.Props = map[string]interface{}{}
addGroup(group)
return &service, true
}
for i := range fl.nodes {
if fl.nodes[i].IsStartNode() {
var config trigfimp.TriggerConfig
err := mapstructure.Decode(fl.nodes[i].GetMetaNode().Config, &config)
if err == nil {
if config.RegisterAsVirtualService {
fl.getLog().Debug("New trigger to add ")
group := config.VirtualServiceGroup
if group == "" {
group = string(fl.nodes[i].GetMetaNode().Id)
}
service, new := getService(fl.nodes[i].GetMetaNode().Service, group)
intf := fimptype.Interface{}
intf.Type = "in"
intf.MsgType = fl.nodes[i].GetMetaNode().ServiceInterface
intf.ValueType = config.InputVariableType
intf.Version = "1"
if new {
fl.getLog().Debug("Adding new trigger ")
service.Alias = fl.nodes[i].GetMetaNode().Label
address := strings.Replace(fl.nodes[i].GetMetaNode().Address, "pt:j1/mt:cmd", "", -1)
address = strings.Replace(address, "pt:j1/mt:evt", "", -1)
service.Address = address
service.Interfaces = []fimptype.Interface{intf}
} else {
service.Interfaces = append(service.Interfaces, intf)
}
if len(config.VirtualServiceProps) > 0 {
fl.getLog().Debug("Setting service props from Trigger :", config.VirtualServiceProps)
mergo.Merge(&service.Props,config.VirtualServiceProps)
}
if new {
services = append(services, *service)
}
}
} else {
fl.getLog().Error("Fail to register trigger.Error ", err)
}
}
if fl.nodes[i].GetMetaNode().Type == "action" {
//config,ok := fl.nodes[i].GetMetaNode().Config.(node.ActionNodeConfig)
config := actfimp.NodeConfig{}
err := mapstructure.Decode(fl.nodes[i].GetMetaNode().Config, &config)
if err == nil {
if config.RegisterAsVirtualService {
group := config.VirtualServiceGroup
if group == "" {
group = string(fl.nodes[i].GetMetaNode().Id)
}
service, new := getService(fl.nodes[i].GetMetaNode().Service, group)
intf := fimptype.Interface{}
intf.Type = "out"
intf.MsgType = fl.nodes[i].GetMetaNode().ServiceInterface
intf.ValueType = config.VariableType
intf.Version = "1"
if new {
service.Alias = fl.nodes[i].GetMetaNode().Label
address := strings.Replace(fl.nodes[i].GetMetaNode().Address, "pt:j1/mt:cmd", "", -1)
address = strings.Replace(address, "pt:j1/mt:evt", "", -1)
service.Address = address
service.Interfaces = []fimptype.Interface{intf}
}
service.Interfaces = append(service.Interfaces, intf)
if len(config.VirtualServiceProps) > 0 {
fl.getLog().Debug("Setting service props from Action :", config.VirtualServiceProps)
service.Props = config.VirtualServiceProps
}
if new {
services = append(services, *service)
}
}
} else {
fl.getLog().Error("Fail to register action .Error ", err)
}
}
}
report.Services = services
msg := fimpgo.NewMessage("evt.thing.inclusion_report", "flow", "object", report, nil, nil, nil)
addrString := "pt:j1/mt:evt/rt:ad/rn:flow/ad:1"
addr, _ := fimpgo.NewAddressFromString(addrString)
fimpTransportInstance := fl.connectorRegistry.GetInstance("fimpmqtt")
if fimpTransportInstance != nil {
msgTransport, ok := fimpTransportInstance.Connection.GetConnection().(*fimpgo.MqttTransport)
if !ok {
fl.getLog().Error("can't cast connection to mqttfimpgo ")
}
msgTransport.Publish(addr, msg)
} else {
fl.getLog().Error("Connector registry doesn't have fimp instance")
}
fl.getLog().Info("Inclusion report is sent")
}
func (fl *Flow) SendExclusionReport() {
report := fimptype.ThingExclusionReport{Address: fl.Id}
msg := fimpgo.NewMessage("evt.thing.exclusion_report", "flow", "object", report, nil, nil, nil)
addrString := "pt:j1/mt:evt/rt:ad/rn:flow/ad:1"
addr, _ := fimpgo.NewAddressFromString(addrString)
fimpTransportInstance := fl.connectorRegistry.GetInstance("fimpmqtt")
if fimpTransportInstance != nil {
msgTransport, ok := fimpTransportInstance.Connection.GetConnection().(*fimpgo.MqttTransport)
if !ok {
fl.getLog().Error("can't cast connection to mqttfimpgo ")
}
msgTransport.Publish(addr, msg)
} else {
fl.getLog().Error("Connector registry doesn't have fimp instance")
}
}
|
package validate
import (
"fmt"
"testing"
)
func ExampleV_Validate() {
type X struct {
A string `validate:"long"`
B string `validate:"short"`
C string `validate:"long,short"`
D string
}
vd := make(V)
vd["long"] = func(i interface{}) error {
s := i.(string)
if len(s) < 5 {
return fmt.Errorf("%q is too short", s)
}
return nil
}
vd["short"] = func(i interface{}) error {
s := i.(string)
if len(s) >= 5 {
return fmt.Errorf("%q is too long", s)
}
return nil
}
fmt.Println(vd.Validate(X{
A: "hello there",
B: "hi",
C: "help me",
D: "I am not validated",
}))
// Output: [field C is invalid: "help me" is too long]
}
func TestV_Validate_allgood(t *testing.T) {
type X struct {
A int `validate:"odd"`
}
vd := make(V)
vd["odd"] = func(i interface{}) error {
n := i.(int)
if n&1 == 0 {
return fmt.Errorf("%d is not odd", n)
}
return nil
}
errs := vd.Validate(X{
A: 1,
})
if errs != nil {
t.Fatalf("unexpected errors for a valid struct: %v", errs)
}
}
func TestV_Validate_allgoodptr(t *testing.T) {
type X struct {
A int `validate:"odd"`
}
vd := make(V)
vd["odd"] = func(i interface{}) error {
n := i.(int)
if n&1 == 0 {
return fmt.Errorf("%d is not odd", n)
}
return nil
}
errs := vd.Validate(&X{
A: 1,
})
if errs != nil {
t.Fatalf("unexpected errors for a valid struct: %v", errs)
}
}
func TestV_Validate_undef(t *testing.T) {
type X struct {
A string `validate:"oops"`
}
vd := make(V)
errs := vd.Validate(X{
A: "oh my",
})
if len(errs) == 0 {
t.Fatal("no errors returned for an undefined validator")
}
if len(errs) > 1 {
t.Fatalf("too many errors returns for an undefined validator: %v", errs)
}
if errs[0].Error() != `field A is invalid: undefined validator: "oops"` {
t.Fatal("wrong message for an undefined validator:", errs[0].Error())
}
}
func TestV_Validate_multi(t *testing.T) {
type X struct {
A int `validate:"nonzero,odd"`
}
vd := make(V)
vd["nonzero"] = func(i interface{}) error {
n := i.(int)
if n == 0 {
return fmt.Errorf("should be nonzero")
}
return nil
}
vd["odd"] = func(i interface{}) error {
n := i.(int)
if n&1 == 0 {
return fmt.Errorf("%d is not odd", n)
}
return nil
}
errs := vd.Validate(X{
A: 0,
})
if len(errs) != 2 {
t.Fatalf("wrong number of errors for two failures: %v", errs)
}
if errs[0].Error() != "field A is invalid: should be nonzero" {
t.Fatal("first error should be nonzero:", errs[0])
}
if errs[1].Error() != "field A is invalid: 0 is not odd" {
t.Fatal("second error should be odd:", errs[1])
}
}
func ExampleV_Validate_struct() {
type X struct {
A int `validate:"nonzero"`
}
type Y struct {
X `validate:"struct,odd"`
}
vd := make(V)
vd["nonzero"] = func(i interface{}) error {
n := i.(int)
if n == 0 {
return fmt.Errorf("should be nonzero")
}
return nil
}
vd["odd"] = func(i interface{}) error {
x := i.(X)
if x.A&1 == 0 {
return fmt.Errorf("%d is not odd", x.A)
}
return nil
}
errs := vd.Validate(Y{X{
A: 0,
}})
for _, err := range errs {
fmt.Println(err)
}
// Output: field X.A is invalid: should be nonzero
// field X is invalid: 0 is not odd
}
func TestV_Validate_uninterfaceable(t *testing.T) {
type X struct {
a int `validate:"nonzero"`
}
vd := make(V)
vd["nonzero"] = func(i interface{}) error {
n := i.(int)
if n == 0 {
return fmt.Errorf("should be nonzero")
}
return nil
}
errs := vd.Validate(X{
a: 0,
})
if len(errs) != 0 {
t.Fatal("wrong number of errors for two failures:", errs)
}
}
func TestV_Validate_nonstruct(t *testing.T) {
vd := make(V)
vd["wrong"] = func(i interface{}) error {
return fmt.Errorf("WRONG: %v", i)
}
errs := vd.Validate(7)
if errs != nil {
t.Fatalf("non-structs should always pass validation: %v", errs)
}
}
func TestV_ValidateAndTag(t *testing.T) {
type X struct {
A int `validate:"odd" somethin:"hiya"`
}
vd := make(V)
vd["odd"] = func(i interface{}) error {
n := i.(int)
if n&1 == 0 {
return fmt.Errorf("%d is not odd", n)
}
return nil
}
errs := vd.ValidateAndTag(X{
A: 2,
}, "somethin")
if len(errs) != 1 {
t.Fatalf("unexpected quantity of errors: %v", errs)
}
bf := errs[0].(BadField)
if bf.Field != "hiya" {
t.Fatalf("wrong field name in BadField: %q", bf.Field)
}
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package clipboardhistory
import (
"context"
"fmt"
"strings"
"time"
"chromiumos/tast/ctxutil"
"chromiumos/tast/errors"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/ash"
"chromiumos/tast/local/chrome/browser"
"chromiumos/tast/local/chrome/uiauto"
"chromiumos/tast/local/chrome/uiauto/faillog"
"chromiumos/tast/local/chrome/uiauto/launcher"
"chromiumos/tast/local/chrome/uiauto/nodewith"
"chromiumos/tast/local/chrome/uiauto/ossettings"
"chromiumos/tast/local/chrome/uiauto/role"
"chromiumos/tast/local/input"
"chromiumos/tast/testing"
)
type contextMenuClipboardTestParam struct {
testName string
testImpl clipboardTest
}
func init() {
testing.AddTest(&testing.Test{
Func: ContextMenuClipboard,
// TODO(b/243339088): There is no timeline for adding a clipboard option to the Lacros context menu,
// therefore, lacros will not be added to this test for now.
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Verifies the clipboard option in the context menu is working properly within several apps by left-clicking an option",
Contacts: []string{
"cienet-development@googlegroups.com",
"chromeos-sw-engprod@google.com",
"victor.chen@cienet.com",
},
Attr: []string{"group:mainline", "informational"},
SoftwareDeps: []string{"chrome"},
Vars: []string{"ui.gaiaPoolDefault"},
Fixture: "chromeLoggedIn",
Params: []testing.Param{{
Name: "browser",
Val: &contextMenuClipboardTestParam{
testName: "ash_Chrome",
testImpl: &browserTest{},
},
}, {
Name: "settings",
Val: &contextMenuClipboardTestParam{
testName: "settings_App",
testImpl: &settingsTest{},
},
}, {
Name: "launcher",
Val: &contextMenuClipboardTestParam{
testName: "bubble_launcher",
testImpl: &launcherTest{},
},
},
},
})
}
type clipboardResource struct {
ui *uiauto.Context
kb *input.KeyboardEventWriter
br *browser.Browser
tconn *chrome.TestConn
testContents []string
}
// ContextMenuClipboard verifies that it is possible to open clipboard history via various surfaces' context menus.
func ContextMenuClipboard(ctx context.Context, s *testing.State) {
cr := s.FixtValue().(chrome.HasChrome).Chrome()
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
s.Fatal("Failed to connect to test API: ", err)
}
kb, err := input.Keyboard(ctx)
if err != nil {
s.Fatal("Failed to get keyboard: ", err)
}
defer kb.Close()
cleanUpCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 5*time.Second)
defer cancel()
resource := &clipboardResource{
ui: uiauto.New(tconn),
kb: kb,
br: cr.Browser(),
tconn: tconn,
testContents: []string{"abc", "123"},
}
s.Log("Setup clipboard history")
for _, text := range resource.testContents {
if err := ash.SetClipboard(ctx, tconn, text); err != nil {
s.Fatalf("Failed to set up %q into clipboard history: %v", text, err)
}
}
param := s.Param().(*contextMenuClipboardTestParam)
if err := param.testImpl.openApp(ctx, resource); err != nil {
s.Fatal("Failed to open app: ", err)
}
defer param.testImpl.closeApp(cleanUpCtx)
defer faillog.DumpUITreeWithScreenshotOnError(cleanUpCtx, s.OutDir(), s.HasError, cr, fmt.Sprintf("%s_dump", param.testName))
if err := param.testImpl.pasteAndVerify(ctx, resource); err != nil {
s.Fatal("Failed to paste and verify: ", err)
}
}
// clearInputField clears the input field before pasting new contents.
func clearInputField(ctx context.Context, res *clipboardResource, inputFinder *nodewith.Finder) error {
if err := uiauto.Combine("clear input field",
res.ui.LeftClick(inputFinder),
res.ui.WaitUntilExists(inputFinder.Focused()),
res.kb.AccelAction("Ctrl+A"),
res.kb.AccelAction("Backspace"),
)(ctx); err != nil {
return err
}
nodeInfo, err := res.ui.Info(ctx, inputFinder)
if err != nil {
return errors.Wrap(err, "failed to get info for the input field")
}
if nodeInfo.Value != "" {
return errors.Errorf("failed to clear value: %q", nodeInfo.Value)
}
return nil
}
// pasteAndVerify returns a function that pastes contents from clipboard and verifies the context menu behavior.
func pasteAndVerify(res *clipboardResource, inputFinder *nodewith.Finder) uiauto.Action {
return func(ctx context.Context) error {
for _, text := range res.testContents {
testing.ContextLogf(ctx, "Paste %q", text)
if err := clearInputField(ctx, res, inputFinder); err != nil {
return errors.Wrap(err, "failed to clear input field before paste")
}
item := nodewith.Name(text).Role(role.MenuItem).HasClass("ClipboardHistoryTextItemView").First()
if err := uiauto.Combine(fmt.Sprintf("paste %q from clipboard history", text),
res.ui.RightClick(inputFinder),
res.ui.DoDefault(nodewith.NameStartingWith("Clipboard").Role(role.MenuItem)),
res.ui.WaitUntilGone(nodewith.HasClass("MenuItemView")),
res.ui.LeftClick(item),
res.ui.WaitForLocation(inputFinder),
)(ctx); err != nil {
return err
}
nodeInfo, err := res.ui.Info(ctx, inputFinder)
if err != nil {
return errors.Wrap(err, "failed to get info for the input field")
}
if !strings.Contains(nodeInfo.Value, text) {
return errors.Wrapf(nil, "input field didn't contain the word: got %q; want %q", nodeInfo.Value, text)
}
}
return nil
}
}
type clipboardTest interface {
openApp(ctx context.Context, res *clipboardResource) error
closeApp(ctx context.Context) error
pasteAndVerify(ctx context.Context, res *clipboardResource) error
}
type browserTest struct {
conn *chrome.Conn
}
func (b *browserTest) openApp(ctx context.Context, res *clipboardResource) error {
conn, err := res.br.NewConn(ctx, "")
if err != nil {
return errors.Wrap(err, "failed to connect to chrome")
}
b.conn = conn
return nil
}
func (b *browserTest) closeApp(ctx context.Context) error {
if b.conn != nil {
if err := b.conn.CloseTarget(ctx); err != nil {
testing.ContextLog(ctx, "Failed to close target: ", err)
}
if err := b.conn.Close(); err != nil {
testing.ContextLog(ctx, "Failed to close connection: ", err)
}
b.conn = nil
}
return nil
}
func (b *browserTest) pasteAndVerify(ctx context.Context, res *clipboardResource) error {
rootView := nodewith.NameStartingWith("about:blank").HasClass("BrowserRootView")
searchbox := nodewith.Role(role.TextField).Name("Address and search bar").Ancestor(rootView)
return pasteAndVerify(res, searchbox)(ctx)
}
type settingsTest struct {
app *ossettings.OSSettings
}
func (s *settingsTest) openApp(ctx context.Context, res *clipboardResource) error {
settings, err := ossettings.Launch(ctx, res.tconn)
if err != nil {
return errors.Wrap(err, "failed to launch OS settings")
}
s.app = settings
return nil
}
func (s *settingsTest) closeApp(ctx context.Context) error {
if s.app != nil {
if err := s.app.Close(ctx); err != nil {
return err
}
s.app = nil
}
return nil
}
func (s *settingsTest) pasteAndVerify(ctx context.Context, res *clipboardResource) error {
return pasteAndVerify(res, ossettings.SearchBoxFinder)(ctx)
}
type launcherTest struct {
tconn *chrome.TestConn
}
func (l *launcherTest) openApp(ctx context.Context, res *clipboardResource) error {
l.tconn = res.tconn
return launcher.OpenBubbleLauncher(l.tconn)(ctx)
}
func (l *launcherTest) closeApp(ctx context.Context) error {
return launcher.CloseBubbleLauncher(l.tconn)(ctx)
}
func (l *launcherTest) pasteAndVerify(ctx context.Context, res *clipboardResource) error {
search := nodewith.HasClass("SearchBoxView")
searchbox := nodewith.HasClass("Textfield").Role(role.TextField).Ancestor(search)
return pasteAndVerify(res, searchbox)(ctx)
}
|
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
//
package utility
import (
"testing"
"github.com/mattermost/mattermost-cloud/model"
"github.com/golang/mock/gomock"
log "github.com/sirupsen/logrus"
"github.com/stretchr/testify/require"
)
func TestNewHelmDeploymentWithDefaultConfigurationNodeProblemDetector(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
logger := log.New()
nodeProblemDetector := newNodeProblemDetectorHandle(&model.HelmUtilityVersion{Chart: "2.3.5"}, &model.Cluster{
UtilityMetadata: &model.UtilityMetadata{
ActualVersions: model.UtilityGroupVersions{},
},
}, "kubeconfig", logger)
require.NoError(t, nodeProblemDetector.validate(), "should not error when creating new node-problem-detector handler")
require.NotNil(t, nodeProblemDetector, "node-problem-detector should not be nil")
helmDeployment := nodeProblemDetector.newHelmDeployment(logger)
require.NotNil(t, helmDeployment, "helmDeployment should not be nil")
}
|
package engine
// movegen.go implements the move generator for Blunder.
import (
"fmt"
)
const (
// These masks help determine whether or not the squares between
// the king and it's rooks are clear for castling
F1_G1, B1_C1_D1 = 0x600000000000000, 0x7000000000000000
F8_G8, B8_C8_D8 = 0x6, 0x70
)
// Generate all pseduo-legal moves for a given position.
func GenMoves(pos *Position) (moves MoveList) {
// Go through each piece type, and each piece for that type,
// and generate the moves for that piece.
var piece uint8
for piece = Knight; piece < NoType; piece++ {
piecesBB := pos.PieceBB[pos.SideToMove][piece]
for piecesBB != 0 {
pieceSq := piecesBB.PopBit()
genPieceMoves(pos, piece, pieceSq, &moves, FullBB)
}
}
// Generate pawn moves.
genPawnMoves(pos, &moves, FullBB)
// Generate castling moves.
genCastlingMoves(pos, &moves)
return moves
}
// Generate all pseduo-legal captures for a given position.
func genCaptures(pos *Position) (moves MoveList) {
// Go through each piece type, and each piece for that type,
// and generate the moves for that piece.
targets := pos.SideBB[pos.SideToMove^1]
var piece uint8
for piece = Knight; piece < NoType; piece++ {
piecesBB := pos.PieceBB[pos.SideToMove][piece]
for piecesBB != 0 {
pieceSq := piecesBB.PopBit()
genPieceMoves(pos, piece, pieceSq, &moves, targets)
}
}
// Generate pawn moves.
genPawnMoves(pos, &moves, targets)
return moves
}
// Generate the moves a single piece,
func genPieceMoves(pos *Position, piece, sq uint8, moves *MoveList, targets Bitboard) {
// Get a bitboard representing our side and the enemy side.
usBB := pos.SideBB[pos.SideToMove]
enemyBB := pos.SideBB[pos.SideToMove^1]
// Figure out what type of piece we're dealing with, and
// generate the moves it has accordingly.
switch piece {
case Knight:
knightMoves := (KnightMoves[sq] & ^usBB) & targets
genMovesFromBB(pos, sq, knightMoves, enemyBB, moves)
case King:
kingMoves := (KingMoves[sq] & ^usBB) & targets
genMovesFromBB(pos, sq, kingMoves, enemyBB, moves)
case Bishop:
bishopMoves := (genBishopMoves(sq, usBB|enemyBB) & ^usBB) & targets
genMovesFromBB(pos, sq, bishopMoves, enemyBB, moves)
case Rook:
rookMoves := (genRookMoves(sq, usBB|enemyBB) & ^usBB) & targets
genMovesFromBB(pos, sq, rookMoves, enemyBB, moves)
case Queen:
bishopMoves := (genBishopMoves(sq, usBB|enemyBB) & ^usBB) & targets
rookMoves := (genRookMoves(sq, usBB|enemyBB) & ^usBB) & targets
genMovesFromBB(pos, sq, bishopMoves|rookMoves, enemyBB, moves)
}
}
// Generate rook moves.
func genRookMoves(sq uint8, blockers Bitboard) Bitboard {
magic := &RookMagics[sq]
blockers &= magic.Mask
return RookAttacks[sq][(uint64(blockers)*magic.MagicNo)>>magic.Shift]
}
// Generate rook moves.
func genBishopMoves(sq uint8, blockers Bitboard) Bitboard {
magic := &BishopMagics[sq]
blockers &= magic.Mask
return BishopAttacks[sq][(uint64(blockers)*magic.MagicNo)>>magic.Shift]
}
// Generate pawn moves for the current side. Pawns are treated
// seperately from the rest of the pieces as they have more
// complicated and exceptional rules for how they can move.
// Only generate the moves that align with the specified
// target squares.
func genPawnMoves(pos *Position, moves *MoveList, targets Bitboard) {
usBB := pos.SideBB[pos.SideToMove]
enemyBB := pos.SideBB[pos.SideToMove^1]
pawnsBB := pos.PieceBB[pos.SideToMove][Pawn]
// For each pawn on our side...
for pawnsBB != 0 {
from := pawnsBB.PopBit()
pawnOnePush := PawnPushes[pos.SideToMove][from] & ^(usBB | enemyBB)
pawnTwoPush := ((pawnOnePush & MaskRank[Rank6]) << 8) & ^(usBB | enemyBB)
if pos.SideToMove == White {
pawnTwoPush = ((pawnOnePush & MaskRank[Rank3]) >> 8) & ^(usBB | enemyBB)
}
// calculate the push move for the pawn...
pawnPush := (pawnOnePush | pawnTwoPush) & targets
// and the attacks.
pawnAttacks := PawnAttacks[pos.SideToMove][from] & (targets | SquareBB[pos.EPSq])
// Generate pawn push moves
for pawnPush != 0 {
to := pawnPush.PopBit()
if isPromoting(pos.SideToMove, to) {
makePromotionMoves(pos, from, to, moves)
continue
}
moves.AddMove(NewMove(from, to, Quiet, NoFlag))
}
// Generate pawn attack moves.
for pawnAttacks != 0 {
to := pawnAttacks.PopBit()
toBB := SquareBB[to]
// Check for en passant moves.
if to == pos.EPSq {
moves.AddMove(NewMove(from, to, Attack, AttackEP))
} else if toBB&enemyBB != 0 {
if isPromoting(pos.SideToMove, to) {
makePromotionMoves(pos, from, to, moves)
continue
}
moves.AddMove(NewMove(from, to, Attack, NoFlag))
}
}
}
}
// A helper function to determine if a pawn has reached the 8th or
// 1st rank and will promote.
func isPromoting(usColor, toSq uint8) bool {
if usColor == White {
return toSq >= 56 && toSq <= 63
}
return toSq <= 7
}
// Generate promotion moves for pawns
func makePromotionMoves(pos *Position, from, to uint8, moves *MoveList) {
moves.AddMove(NewMove(from, to, Promotion, KnightPromotion))
moves.AddMove(NewMove(from, to, Promotion, BishopPromotion))
moves.AddMove(NewMove(from, to, Promotion, RookPromotion))
moves.AddMove(NewMove(from, to, Promotion, QueenPromotion))
}
// Generate castling moves. Note testing whether or not castling has the king
// crossing attacked squares is not tested for here, as pseduo-legal move
// generation is the focus.
func genCastlingMoves(pos *Position, moves *MoveList) {
allPieces := pos.SideBB[pos.SideToMove] | pos.SideBB[pos.SideToMove^1]
if pos.SideToMove == White {
if pos.CastlingRights&WhiteKingsideRight != 0 && (allPieces&F1_G1) == 0 && (!sqIsAttacked(pos, pos.SideToMove, E1) &&
!sqIsAttacked(pos, pos.SideToMove, F1) && !sqIsAttacked(pos, pos.SideToMove, G1)) {
moves.AddMove(NewMove(E1, G1, Castle, NoFlag))
}
if pos.CastlingRights&WhiteQueensideRight != 0 && (allPieces&B1_C1_D1) == 0 && (!sqIsAttacked(pos, pos.SideToMove, E1) &&
!sqIsAttacked(pos, pos.SideToMove, D1) && !sqIsAttacked(pos, pos.SideToMove, C1)) {
moves.AddMove(NewMove(E1, C1, Castle, NoFlag))
}
} else {
if pos.CastlingRights&BlackKingsideRight != 0 && (allPieces&F8_G8) == 0 && (!sqIsAttacked(pos, pos.SideToMove, E8) &&
!sqIsAttacked(pos, pos.SideToMove, F8) && !sqIsAttacked(pos, pos.SideToMove, G8)) {
moves.AddMove(NewMove(E8, G8, Castle, NoFlag))
}
if pos.CastlingRights&BlackQueensideRight != 0 && (allPieces&B8_C8_D8) == 0 && (!sqIsAttacked(pos, pos.SideToMove, E8) &&
!sqIsAttacked(pos, pos.SideToMove, D8) && !sqIsAttacked(pos, pos.SideToMove, C8)) {
moves.AddMove(NewMove(E8, C8, Castle, NoFlag))
}
}
}
// From a bitboard representing possible squares a piece can move,
// serialize it, and generate a list of moves.
func genMovesFromBB(pos *Position, from uint8, movesBB, enemyBB Bitboard, moves *MoveList) {
for movesBB != 0 {
to := movesBB.PopBit()
toBB := SquareBB[to]
moveType := Quiet
if toBB&enemyBB != 0 {
moveType = Attack
}
moves.AddMove(NewMove(from, to, moveType, NoFlag))
}
}
// Given a side and a square, test if the square for the given side
// is under attack by the enemy side.
func sqIsAttacked(pos *Position, usColor, sq uint8) bool {
// The algorithm used here is to pretend to place a "superpiece" - a piece that
// can move like a queen and knight - on our square of interest. Rays are then sent
// out from this superpiece sitting on our square, and if any of these rays hit
// an enemy piece, we know our square is being attacked by an enemy piece.
enemyBB := pos.SideBB[usColor^1]
usBB := pos.SideBB[usColor]
enemyBishops := pos.PieceBB[usColor^1][Bishop]
enemyRooks := pos.PieceBB[usColor^1][Rook]
enemyQueens := pos.PieceBB[usColor^1][Queen]
enemyKnights := pos.PieceBB[usColor^1][Knight]
enemyKing := pos.PieceBB[usColor^1][King]
enemyPawns := pos.PieceBB[usColor^1][Pawn]
intercardinalRays := genBishopMoves(sq, enemyBB|usBB)
cardinalRaysRays := genRookMoves(sq, enemyBB|usBB)
if intercardinalRays&(enemyBishops|enemyQueens) != 0 {
return true
}
if cardinalRaysRays&(enemyRooks|enemyQueens) != 0 {
return true
}
if KnightMoves[sq]&enemyKnights != 0 {
return true
}
if KingMoves[sq]&enemyKing != 0 {
return true
}
if PawnAttacks[usColor][sq]&enemyPawns != 0 {
return true
}
return false
}
// Explore the move tree up to depth, and return the total
// number of nodes explored. This function is used to
// debug move generation and ensure it is working by comparing
// the results to the known results of other engines
func DividePerft(pos *Position, depth, divdeAt uint8) uint64 {
// If depth zero has been reached, return zero...
if depth == 0 {
return 1
}
// otherwise genrate the legal moves we have...
moves := GenMoves(pos)
var nodes uint64
// And make every move, recursively calling perft to get the number of subnodes
// for each move.
var idx uint8
for idx = 0; idx < moves.Count; idx++ {
move := moves.Moves[idx]
if pos.MakeMove(move) {
moveNodes := DividePerft(pos, depth-1, divdeAt)
if depth == divdeAt {
fmt.Printf("%v: %v\n", move, moveNodes)
}
nodes += moveNodes
}
pos.UnmakeMove(move)
}
// Return the total amount of nodes for the given position.
return nodes
}
// Same as divide perft but doesn't print subnode count
// for each move, only the final total.
func Perft(pos *Position, depth uint8) uint64 {
// If depth zero has been reached, return zero...
if depth == 0 {
return 1
}
// otherwise genrate the legal moves we have...
moves := GenMoves(pos)
var nodes uint64
// And make every move, recursively calling perft to get the number of subnodes
// for each move.
var idx uint8
for idx = 0; idx < moves.Count; idx++ {
if pos.MakeMove(moves.Moves[idx]) {
nodes += Perft(pos, depth-1)
}
pos.UnmakeMove(moves.Moves[idx])
}
// Return the total amount of nodes for the given position.
return nodes
}
|
package main
import (
"encoding/json"
"errors"
"net/http"
"strconv"
"strings"
"github.com/johnamadeo/server"
)
const (
// NoMaxRoundErr : TODO
NoMaxRoundErr = "converting driver.Value type <nil>"
// TimestampFormat : Postgres timestamp string template patterns can be found in https://www.postgresql.org/docs/8.1/functions-formatting.html
TimestampFormat = "YYYY-MM-DD HH24:MI:ssZ"
)
// GetRoundsResponse : Data structure for storing the dates for each round
type GetRoundsResponse struct {
Rounds []string `json:"rounds"`
}
// AddRoundHandler : HTTP handler for scheduling a new round on a certain date
func AddRoundHandler(w http.ResponseWriter, r *http.Request) {
function := "AddRoundHandler"
if r.Method != "POST" {
LogAndWriteErr(
w,
errors.New("Only POST requests are allowed at this route"),
http.StatusMethodNotAllowed,
function,
)
return
}
// TODO: Verify 'round' is a datestring in the YYYY-MM-DDTHH:mm:ss[Z] format
values, err := getQueryParams(r, []string{"org", "round"})
if err != nil {
LogAndWriteStatusBadRequest(w, err, function)
return
}
orgname := values[0]
roundDate := values[1]
err = addRound(orgname, roundDate)
if err != nil {
LogAndWriteStatusBadRequest(w, err, function)
return
}
LogAndWrite(
w,
server.StrToBytes("Successfully scheduled a new round"),
http.StatusCreated,
function,
)
}
// GetRoundsHandler : HTTP handler for retrieving the dates for all rounds scheduled
func GetRoundsHandler(w http.ResponseWriter, r *http.Request) {
function := "GetRoundsHandler"
if r.Method != "GET" {
LogAndWriteErr(
w,
errors.New("Only GET requests are allowed at this route"),
http.StatusMethodNotAllowed,
function,
)
return
}
orgname, err := getQueryParam(r, "org")
if err != nil {
LogAndWriteStatusBadRequest(w, err, function)
return
}
rounds, err := getRoundsFromDB(orgname)
if err != nil {
LogAndWriteStatusInternalServerError(w, err, function)
return
}
resp := GetRoundsResponse{Rounds: rounds}
bytes, err := json.Marshal(resp)
if err != nil {
LogAndWriteStatusInternalServerError(w, err, function)
return
}
LogAndWrite(w, bytes, http.StatusOK, function)
}
// RoundHandler : Combined HTTP handler for rounds
func RoundHandler(w http.ResponseWriter, r *http.Request) {
if r.Method == "POST" {
_, err := getQueryParam(r, "roundId")
if err != nil {
AddRoundHandler(w, r)
} else {
RescheduleRoundHandler(w, r)
}
} else if r.Method == "DELETE" {
RemoveRoundHandler(w, r)
} else {
LogAndWriteErr(
w,
errors.New("Only POST and DELETE requests are allowed at this route"),
http.StatusMethodNotAllowed,
"RoundHandler",
)
return
}
}
func getRoundsFromDB(orgname string) ([]string, error) {
db, err := server.CreateDBConnection(LocalDBConnection)
defer db.Close()
if err != nil {
return []string{}, err
}
rows, err := db.Query(
"SELECT to_char(scheduled_date, $1) FROM rounds WHERE organization = $2 ORDER BY id ASC",
TimestampFormat,
orgname,
)
if err != nil {
return []string{}, err
}
rounds := []string{}
for rows.Next() {
var roundDate string
err = rows.Scan(&roundDate)
if err != nil {
return []string{}, err
}
rounds = append(rounds, roundDate)
}
return rounds, nil
}
// RemoveRoundHandler : HTTP handler for cancelling a previously scheduled round
func RemoveRoundHandler(w http.ResponseWriter, r *http.Request) {
function := "RemoveRoundHandler"
if r.Method != "DELETE" {
LogAndWriteErr(
w,
errors.New("Only DELETE requests are allowed at this route"),
http.StatusMethodNotAllowed,
function,
)
return
}
values, err := getQueryParams(r, []string{"org", "roundId"})
if err != nil {
LogAndWriteStatusBadRequest(w, err, function)
return
}
orgname := values[0]
roundID, err := strconv.Atoi(values[1])
if err != nil {
LogAndWriteStatusInternalServerError(w, err, function)
return
}
err = removeRound(orgname, roundID)
if err != nil {
LogAndWriteStatusInternalServerError(w, err, function)
return
}
LogAndWrite(
w,
server.StrToBytes("Successfully removed round"),
http.StatusCreated,
function,
)
}
// RescheduleRoundHandler : HTTP handler for rescheduling the date of a particular round
func RescheduleRoundHandler(w http.ResponseWriter, r *http.Request) {
function := "RescheduleRoundHandler"
if r.Method != "POST" {
w.WriteHeader(http.StatusMethodNotAllowed)
w.Write(server.StrToBytes("Only POST requests are allowed at this route"))
return
}
values, err := getQueryParams(r, []string{"org", "round", "roundId"})
if err != nil {
LogAndWriteStatusBadRequest(w, err, function)
return
}
orgname := values[0]
roundDate := values[1]
roundID, err := strconv.Atoi(values[2])
if err != nil {
LogAndWriteStatusInternalServerError(w, err, function)
return
}
err = rescheduleRound(orgname, roundDate, roundID)
if err != nil {
LogAndWriteStatusInternalServerError(w, err, function)
return
}
LogAndWrite(
w,
server.StrToBytes("Succesfully changed date of the round"),
http.StatusCreated,
function,
)
}
func addRound(orgname string, roundDate string) error {
db, err := server.CreateDBConnection(LocalDBConnection)
defer db.Close()
if err != nil {
return err
}
rows, err := db.Query(
"SELECT MAX(id) FROM rounds WHERE organization = $1",
orgname,
)
if err != nil {
return err
}
defer rows.Close()
maxRoundID := -1
for rows.Next() {
err := rows.Scan(&maxRoundID)
if err != nil && !strings.Contains(err.Error(), NoMaxRoundErr) {
return err
}
break
}
_, err = db.Exec(
"INSERT INTO rounds (organization, id, scheduled_date, done) VALUES ($1, $2, $3, $4)",
orgname,
maxRoundID+1,
roundDate,
false,
)
if err != nil {
return err
}
return nil
}
func removeRound(orgname string, roundID int) error {
db, err := server.CreateDBConnection(LocalDBConnection)
defer db.Close()
if err != nil {
return err
}
_, err = db.Exec(
"DELETE FROM rounds WHERE organization = $1 AND id = $2",
orgname,
roundID,
)
if err != nil {
return err
}
for {
result, err := db.Exec(
"UPDATE rounds SET id = $1 WHERE organization = $2 AND id = $3",
roundID,
orgname,
roundID+1,
)
if err != nil {
return err
}
if numRows, _ := result.RowsAffected(); numRows == 0 {
break
}
roundID++
}
return nil
}
func rescheduleRound(orgname string, roundDate string, roundID int) error {
db, err := server.CreateDBConnection(LocalDBConnection)
defer db.Close()
if err != nil {
return err
}
_, err = db.Exec(
"UPDATE rounds SET scheduled_date = $1 WHERE organization = $2 AND id = $3",
roundDate,
orgname,
roundID,
)
if err != nil {
return err
}
return nil
}
|
package todo
import (
"errors"
"time"
"github.com/t-ash0410/tdd-sample/backend/internal/api/todo/entities"
)
type SuccessListUsecase struct{}
func (u SuccessListUsecase) Handle(result *[]entities.Task) error {
*result = append(*result, entities.Task{
Id: "",
Name: "",
Description: "",
UpdatedAt: time.Now(),
})
return nil
}
type EmptyListUsecase struct{}
func (u EmptyListUsecase) Handle(result *[]entities.Task) error {
return nil
}
type FailListUsecase struct{}
func (u FailListUsecase) Handle(result *[]entities.Task) error {
return errors.New("some error")
}
type SuccessAddUsecase struct{}
func (u SuccessAddUsecase) Handle(name string, description string) error {
return nil
}
type FailAddUsecase struct{}
func (u FailAddUsecase) Handle(name string, description string) error {
return errors.New("some error")
}
|
package pack
import (
"testing"
"time"
)
func Test_GetInfoFile_ReturnsCorrectTimeFormat(t *testing.T) {
goLaunchDate := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)
actual := getInfoFileFormattedTime(goLaunchDate)
expected := "2009-11-10T23:00:00Z"
if expected != actual {
t.Errorf("invalid infofile time format: expected %v actual %v", expected, actual)
}
}
func Test_GetZipPath_PathAndModulePathsAreSame(t *testing.T) {
modulePath := "/root/"
currentFilePath := "/root/app.go"
name := "root"
version := "v1.0.0"
actual := getZipPath(modulePath, currentFilePath, name, version)
expected := "root@v1.0.0/app.go"
if expected != actual {
t.Errorf("invalid zip path: expected %v actual %v", expected, actual)
}
}
func Test_GetZipPath_ModulePathIsChildOfPath(t *testing.T) {
modulePath := "/root/repository/username/app"
currentFilePath := "/root/repository/username/app/app.go"
name := "repository/username/app"
version := "v1.0.0"
actual := getZipPath(modulePath, currentFilePath, name, version)
expected := "repository/username/app@v1.0.0/app.go"
if expected != actual {
t.Errorf("invalid zip path: expected %v actual %v", expected, actual)
}
}
|
package main
import (
"fmt"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"math/rand"
"net/http"
"time"
)
func main() {
counter := prometheus.NewCounter(prometheus.CounterOpts{
Name:"example_counter",
})
rand.Seed(time.Now().Unix())
prometheus.MustRegister(counter)
go func() {
for{
val := rand.Float64()
counter.Add(val)
time.Sleep(time.Second)
}
}()
http.Handle("/metrics", promhttp.Handler())
fmt.Println(http.ListenAndServe(":9100", nil))
}
|
package main
import "fmt"
func requester(typeName string) string {
return fmt.Sprintf(`func (x *%[1]sRequestType) Request(eBayAuthToken, siteID string) (response %[1]sResponseType, err error) {
if x.RequesterCredentials == nil {
x.RequesterCredentials = &XMLRequesterCredentialsType{}
}
x.RequesterCredentials.EBayAuthToken.Set(eBayAuthToken)
if RequestValidation {
if err = x.Validate(); err != nil {
return
}
}
req := newRequester("%[1]s", siteID, &response)
if err = xml.NewEncoder(req.body).Encode(x); err != nil {
return
}
if err = req.request(); err != nil {
return
}
return
}
`, typeName)
}
func xmlEncoder(typeName string) string {
return fmt.Sprintf(`func (x %sRequestType) MarshalXMLEncode(w io.Writer) error {
if RequestValidation {
if err := x.Validate(); err != nil {
return err
}
}
return xml.NewEncoder(w).Encode(x)
}
`, typeName)
}
func xmlMarshaler(typeName string) string {
return fmt.Sprintf(`func (x %sRequestType) MarshalXML() ([]byte, error) {
if RequestValidation {
if err := x.Validate(); err != nil {
return nil, err
}
}
return xml.Marshal(x)
}
`, typeName)
}
func validator(typeName, body string) string {
return fmt.Sprintf(`func (x %[1]sRequestType) Validate() error {
%[2]s
return nil
}
`, typeName, body)
}
|
// Package c2netapi provides a rest api for c2net iot hub functions
package c2netapi
import (
"database/sql"
"encoding/json"
"fmt"
"net/http"
_ "github.com/mattn/go-sqlite3"
log "github.com/sirupsen/logrus"
)
type HubId struct {
Id int `json:"id"`
}
func InsertHubId(w http.ResponseWriter, r *http.Request) {
db, err := sql.Open("sqlite3", "/home/pi/C2NET/c2net-iot-hub/tables/c2net.db")
defer db.Close()
if err != nil {
json.NewEncoder(w).Encode(HttpResp{Status: 500, Description: "Couldn't open c2net sqlite db"})
return
}
decoder := json.NewDecoder(r.Body)
if err != nil {
json.NewEncoder(w).Encode(HttpResp{Status: 500, Description: "Couldn't open c2net sqlite db"})
return
}
stmt, _ := db.Prepare("DELETE FROM hubid")
if err != nil {
log.Error(err)
json.NewEncoder(w).Encode(HttpResp{Status: 500, Description: "Couldn't prepare delete on c2net sqlite db"})
return
}
_, err = stmt.Exec()
if err != nil {
log.Error(err)
json.NewEncoder(w).Encode(HttpResp{Status: 500, Description: "Couldn't execute delete hubid sqlite db"})
return
}
var hub HubId
err = decoder.Decode(&hub)
log.Info(hub)
stmt, _ = db.Prepare("INSERT INTO hubid (id) values (?)")
_, err = stmt.Exec(hub.Id)
if err != nil {
log.Info(err.Error()) // proper error handling instead of panic in your app
json.NewEncoder(w).Encode(HttpResp{Status: 500, Description: "Failed to insert id in database"})
} else {
json.NewEncoder(w).Encode(HttpResp{Status: 200, Description: "Successfully Inserted SensorArea Into the Database", Body: fmt.Sprintf("%+v\n", hub)})
}
}
|
package environment
import (
"testing"
"github.com/bingo-lang/bingo/object"
)
func TestEnvironment(t *testing.T) {
key1 := "random1"
key2 := "random2"
obj1 := object.Integer{Value: 1}
obj2 := object.Integer{Value: 2}
parent := New(nil)
parent.Set(key1, obj1)
parent.Set(key2, obj1)
environment := New(parent)
environment.Set(key2, obj2)
if gotten := environment.Get(key2); gotten != obj2 {
t.Fatalf("Expecting object %s, got %s instead", obj2, gotten)
}
if gotten := environment.Get(key1); gotten != obj1 {
t.Fatalf("Expecting object %s from parent, got %s instead", obj1, gotten)
}
}
|
package nsq
import (
"encoding/json"
"jkt/gateway/hotel"
"jkt/jktgo/log"
"jkt/jktgo/message"
"time"
"github.com/nsqio/go-nsq"
)
// ConsumerService 用于描述一个服务
type ConsumerService struct{}
// HandleMessage 用于处理消息
func (cs *ConsumerService) HandleMessage(msg *nsq.Message) error {
log.Debug("接受到的消息是:" + string(msg.Body))
hotelPushMessageObject := message.HotelPushMessage{}
json.Unmarshal(msg.Body, &hotelPushMessageObject)
switch hotelPushMessageObject.MsgType {
case message.MTALLHOTEL:
hotel.GetInstance().SendAllHotel(hotelPushMessageObject.Message)
case message.MTHOTEL:
hotel.GetInstance().SendMonoHotel(
hotelPushMessageObject.HotelID,
hotelPushMessageObject.Message)
case message.MTUID:
hotel.GetInstance().SendUID(
hotelPushMessageObject.HotelID,
hotelPushMessageObject.UID,
hotelPushMessageObject.Message)
}
return nil
}
// Service 用于描述一个服务
type Service struct {
consumer *nsq.Consumer
}
// NewWith 用于创建一个NSQ的服务
func NewWith(channel string) *Service {
var err error
config := nsq.NewConfig()
config.LookupdPollInterval = time.Second
config.HeartbeatInterval = time.Second
pNsqService := &Service{}
pNsqService.consumer, err = nsq.NewConsumer(message.TopicHotelGateway, channel, config)
if err != nil {
panic("init nsq server failed; because of " + err.Error())
}
//pNsqService.consumer.SetLogger(log.New(os.Stderr, "", log.Flags()), nsq.LogLevelWarning)
pNsqService.consumer.AddHandler(&ConsumerService{})
return pNsqService
}
// Stop 用于停止nsq服务
func (ns *Service) Stop() {
ns.consumer.Stop()
}
// Run 用于运行NSQ服务
func (ns *Service) Run(addr string) {
if err := ns.consumer.ConnectToNSQLookupd(addr); err != nil {
panic("run nsq service failed, because of " + err.Error())
}
}
|
package main
import (
"fmt"
"strconv"
"strings"
)
func isDigit(s string) bool {
digits := "+-*/"
return !strings.Contains(digits, s)
}
func evalRPN(s []string) int {
stack := []int{}
for i := 0; i < len(s); i++ {
if isDigit(s[i]) {
num, _ := strconv.Atoi(s[i])
stack = append(stack, num)
} else {
a, b := stack[len(stack)-2], stack[len(stack)-1]
stack = stack[:len(stack)-2]
result := calculate(a, b, s[i])
stack = append(stack, result)
}
}
return stack[0]
}
func calculate(a, b int, calculator string) int {
switch calculator {
case "+":
return a + b
case "*":
return a * b
case "/":
return a / b
case "-":
return a - b
default:
return -1
}
}
func main() {
// s := []string{"2", "1", "+", "3", "*"}
a := []string{"10", "6", "9", "3", "+", "-11", "*", "/", "*", "17", "+", "5", "+"}
// fmt.Println(evalRPN(s))
fmt.Println(evalRPN(a))
}
|
package api
import (
dbm "github.com/tendermint/tm-db"
"sync"
)
// frame stores all Iterators for one contract
type frame []dbm.Iterator
// iteratorStack contains one frame for each contract, indexed by a counter
// 10 is a rather arbitrary guess on how many frames might be needed simultaneously
var iteratorStack = make(map[uint64]frame, 10)
var iteratorStackMutex sync.Mutex
// this is a global counter when we create DBs
var dbCounter uint64
var dbCounterMutex sync.Mutex
// startContract is called at the beginning of a contract runtime to create a new frame on the iteratorStack
// updates dbCounter for an index
func startContract() uint64 {
dbCounterMutex.Lock()
defer dbCounterMutex.Unlock()
dbCounter += 1
return dbCounter
}
func popFrame(counter uint64) frame {
iteratorStackMutex.Lock()
defer iteratorStackMutex.Unlock()
// get the item from the stack
remove := iteratorStack[counter]
delete(iteratorStack, counter)
return remove
}
// endContract is called at the end of a contract runtime to remove one item from the IteratorStack
func endContract(counter uint64) {
// we pull popFrame in another function so we don't hold the mutex while cleaning up the popped frame
remove := popFrame(counter)
// free all iterators in the frame when we release it
for _, iter := range remove {
iter.Close()
}
}
// storeIterator will add this to the end of the latest stack and return a reference to it.
// We start counting with 1, so the 0 value is flagged as an error. This means we must
// remember to do idx-1 when retrieving
func storeIterator(dbCounter uint64, it dbm.Iterator) uint64 {
iteratorStackMutex.Lock()
defer iteratorStackMutex.Unlock()
frame := append(iteratorStack[dbCounter], it)
iteratorStack[dbCounter] = frame
return uint64(len(frame))
}
// retrieveIterator will recover an iterator based on index. This ensures it will not be garbage collected.
// We start counting with 1, in storeIterator so the 0 value is flagged as an error. This means we must
// remember to do idx-1 when retrieving
func retrieveIterator(dbCounter uint64, index uint64) dbm.Iterator {
iteratorStackMutex.Lock()
defer iteratorStackMutex.Unlock()
return iteratorStack[dbCounter][index-1]
}
|
package handlers
import (
"net/http"
"github.com/gtongy/demo-echo-app/errors"
"github.com/gtongy/demo-echo-app/models"
"github.com/gtongy/demo-echo-app/mysql"
"github.com/labstack/echo"
)
var Task task
type task struct{}
func (t *task) Get(c echo.Context) error {
var tasks []models.Task
db := mysql.GetDB()
defer db.Close()
err := db.Find(&tasks).Error
if err != nil {
return errors.APIError.JSONErrorHandler(err, c, http.StatusBadRequest, "Tasks are not found")
}
return c.JSON(http.StatusOK, models.Tasks{Tasks: tasks})
}
func (t *task) Create(c echo.Context) error {
var user models.User
db := mysql.GetDB()
defer db.Close()
db.Where("access_token = ?", c.Request().Header.Get("DEMO-ECHO-TOKEN")).Find(&user)
title := c.FormValue("title")
task := &models.Task{
Title: title,
UserID: user.ID,
}
if err := c.Bind(task); err != nil {
return errors.APIError.JSONErrorHandler(err, c, http.StatusBadRequest, "Request is invalid")
}
if err := c.Validate(task); err != nil {
return errors.APIError.JSONErrorHandler(err, c, http.StatusBadRequest, "Validate is failed")
}
db.Create(&task)
return c.JSON(http.StatusOK, task)
}
|
package number
import (
"shared/utility/rand"
"testing"
)
func TestNewYggdrasilMail(t *testing.T) {
set := NewSortedInt64sSet()
for i := 0; i < 1000; i++ {
set.Add(int64(rand.RangeInt(0, 1000)))
}
t.Log(set)
}
|
package generativerecursion
// File contains Gauss elimination algorithm
// SOE is a non empty matrix
// SOE = system of equations like
// 2x + 2y + 3z = 10
// 2x + 5y + 12z = 31
// 4x + y - 2z = 1
//
// data example: [][]int{{2,2,3,10}, {2,5,12,31}, {4,1,-2,1}}
type SOE []Equation
// TSOE is a triangular SOE
// such that the Equations are of decreasing length:
// n, n-1, ..., 2
type TSOE SOE
// Equation is an array [a0 a1 ... an b] of at least 2 numbers, where
// a[i]s are the left-hand-side and b is right-hand-side
type Equation []int
func Triangulate(soe SOE) TSOE {
if len(soe) == 1 {
return TSOE(soe)
}
substracted := mapSOE(soe[1:], func(e Equation) Equation {
return substract(soe[0], e, e[0], soe[0][0])
})
firsts := mapSOE(substracted, func(e Equation) Equation { return e[1:] })
if firsts[0][0] == 0 {
i, ok := findEquation(firsts, func(e Equation) bool { return e[0] != 0 })
if !ok {
return nil
}
firsts[0], firsts[i] = firsts[i], firsts[0]
}
return append(TSOE{soe[0]}, mapSOE(SOE(Triangulate(firsts)), func(e Equation) Equation {
eq := make(Equation, 0, len(e)+1)
eq = append(eq, 0)
return append(eq, e...)
})...)
}
// Solution is a SOE solution
type Solution []int
func CheckSolution(soe SOE, sol Solution) bool {
res := func(e Equation) int {
r := 0
for i, v := range lhs(e) {
r += sol[i] * v
}
return r
}
for _, eq := range soe {
if res(eq) != rhs(eq) {
return false
}
}
return true
}
func Solve(tsoe TSOE) Solution {
if len(tsoe) == 1 {
return Solution{tsoe[0][1] / tsoe[0][0]}
}
firsts := mapTSOE(tsoe[1:], func(e Equation) Equation { return e[1:] })
sol := Solve(firsts)
lst := len(tsoe[0]) - 1
for i := range sol {
tsoe[0][lst] -= tsoe[0][i+1] * sol[i]
}
return append(Solution{tsoe[0][lst] / tsoe[0][0]}, sol...)
}
// whishlist below
func substract(e1, e2 Equation, m1, m2 int) Equation {
if len(e1) == 1 {
return Equation{e1[0]*m1 - e2[0]*m2}
}
return append(Equation{e1[0]*m1 - e2[0]*m2}, substract(e1[1:], e2[1:], m1, m2)...)
}
func mapSOE(soe SOE, f func(Equation) Equation) SOE {
nsoe := make(SOE, 0, len(soe))
for _, e := range soe {
nsoe = append(nsoe, f(e))
}
return nsoe
}
func mapTSOE(tsoe TSOE, f func(Equation) Equation) TSOE {
ntsoe := make(TSOE, 0, len(tsoe))
for _, e := range tsoe {
ntsoe = append(ntsoe, f(e))
}
return ntsoe
}
func lhs(e Equation) []int {
vars := make([]int, 0, len(e)-1)
for _, v := range e[:len(e)-1] {
vars = append(vars, v)
}
return vars
}
func rhs(e Equation) int { return e[len(e)-1] }
func mapEquation(e Equation, f func(int) int) Equation {
eq := make(Equation, 0, len(e))
for _, v := range e {
eq = append(eq, f(v))
}
return eq
}
func findEquation(soe SOE, f func(Equation) bool) (int, bool) {
for i, e := range soe {
if f(e) {
return i, true
}
}
return -1, false
}
|
package main
import (
"encoding/json"
"github.com/gorilla/mux"
"log"
"net/http"
)
type Article struct {
Id string `json:"id,omitempty"`
Title string `json:"title,omitempty"`
Description string `json:"description,omitempty"`
}
var articles []Article
func viewHandler(response http.ResponseWriter, request *http.Request) {
header := response.Header()
header.Add("Content-Type", "application/json")
params := mux.Vars(request)
for _, item := range articles {
if item.Id == params["id"] {
json.NewEncoder(response).Encode(item)
return
}
}
json.NewEncoder(response).Encode(&Article{})
}
func main() {
articles = append(articles, Article{"1", "Some Title 1", "This is a test description"})
articles = append(articles, Article{"2", "My Title 2", "A really interesting article"})
router := mux.NewRouter()
router.HandleFunc("/{id}", viewHandler).Methods("GET")
log.Fatal(http.ListenAndServe(":8080", router))
}
|
package initialize
import (
"crypto/rand"
"fmt"
"log"
"os"
"github.com/ejcx/passgo/v2/pc"
"github.com/ejcx/passgo/v2/pio"
"golang.org/x/crypto/nacl/box"
)
const (
saltLen = 32
configFound = "A passgo config file was already found."
)
// Init will initialize a new password vault in the home directory.
func Init() {
var needsDir bool
var hasConfig bool
var hasVault bool
if dirExists, err := pio.PassDirExists(); err == nil {
if !dirExists {
needsDir = true
} else {
if _, err := pio.PassConfigExists(); err == nil {
hasConfig = true
}
if _, err := pio.SitesVaultExists(); err == nil {
hasVault = true
}
}
}
passDir, err := pio.GetPassDir()
if err != nil {
log.Fatalf("Could not get pass dir: %s", err.Error())
}
sitesFile, err := pio.GetSitesFile()
if err != nil {
log.Fatalf("Could not get sites dir: %s", err.Error())
}
configFile, err := pio.GetConfigPath()
if err != nil {
log.Fatalf("Could not get pass config: %s", err.Error())
}
// Prompt for the password immediately. The reason for doing this is
// because if the user quits before the vault is fully initialized
// (probably during password prompt since it's blocking), they will
// be able to run init again a second time.
pass, err := pio.PromptPass("Please enter a strong master password")
if err != nil {
log.Fatalf("Could not read password: %s", err.Error())
}
if needsDir {
err = os.Mkdir(passDir, 0700)
if err != nil {
log.Fatalf("Could not create passgo vault: %s", err.Error())
} else {
fmt.Printf("Created directory to store passwords: %s\n", passDir)
}
}
if fileDirExists, err := pio.PassFileDirExists(); err == nil {
if !fileDirExists {
encryptedFileDir, err := pio.GetEncryptedFilesDir()
if err != nil {
log.Fatalf("Could not get encrypted files dir: %s", err)
}
err = os.Mkdir(encryptedFileDir, 0700)
if err != nil {
log.Fatalf("Could not create encrypted file dir: %s", err)
}
}
}
// Don't just go around deleting things for users or prompting them
// to delete things. Make them do this manaully. Maybe this saves 1
// person an afternoon.
if hasConfig {
log.Fatalf(configFound)
}
// Create file with secure permission. os.Create() leaves file world-readable.
config, err := os.OpenFile(configFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
log.Fatalf("Could not create passgo config: %s", err.Error())
}
config.Close()
// Handle creation and initialization of the site vault.
if !hasVault {
// Create file, with secure permissions.
sf, err := os.OpenFile(sitesFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
log.Fatalf("Could not create pass sites vault: %s", err.Error())
}
// Initialize an empty SiteFile
siteFileContents := []byte("[]")
_, err = sf.Write(siteFileContents)
if err != nil {
log.Fatalf("Could not save site file: %s", err.Error())
}
sf.Close()
}
// Generate a master password salt.
var keySalt [32]byte
_, err = rand.Read(keySalt[:])
if err != nil {
log.Fatalf("Could not generate random salt: %s", err.Error())
}
// Create a new salt for encrypting public key.
var hmacSalt [32]byte
_, err = rand.Read(hmacSalt[:])
if err != nil {
log.Fatalf("Could not generate random salt: %s", err.Error())
}
// kdf the master password.
passKey, err := pc.Scrypt([]byte(pass), keySalt[:])
if err != nil {
log.Fatalf("Could not generate master key from pass: %s", err.Error())
}
pub, priv, err := box.GenerateKey(rand.Reader)
if err != nil {
log.Fatalf("Could not generate master key pair: %s", err.Error())
}
// Encrypt master private key with master password key.
sealedMasterPrivKey, err := pc.Seal(&passKey, priv[:])
if err != nil {
log.Fatalf("Could not encrypt master key: %s", err.Error())
}
passConfig := pio.ConfigFile{
MasterKeyPrivSealed: sealedMasterPrivKey,
MasterPubKey: *pub,
MasterPassKeySalt: keySalt,
}
if err = passConfig.SaveFile(); err != nil {
log.Fatalf("Could not write to config file: %s", err.Error())
}
fmt.Println("Password Vault successfully initialized")
}
|
package Contains_Duplicate_III
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestContainsDuplicates(t *testing.T) {
ast := assert.New(t)
ast.Equal(false, containsNearbyAlmostDuplicate([]int{1,5,9,1,5,9}, 2, 3))
ast.Equal(true, containsNearbyAlmostDuplicate([]int{1, 2, 3, 1}, 3, 0))
ast.Equal(false, containsNearbyAlmostDuplicate([]int{-1, 2147483647}, 1, 2147483647))
}
|
package logger
import (
"fmt"
"myRPC/util"
"os"
"path/filepath"
"time"
)
const (
default_path = "../logs/"
default_max_size = 50000000
)
//文件日志输出器
type FileOutputer struct {
//文件句柄
file *os.File
//文件最大
maxSize int64
//文件路径
path string
//原始文件名
originFileName string
//当前文件名
curFileName string
//创建时间
createTime string
//上次分割的日期
lastSplitDay int
}
//新建
func NewFileOutputer(params map[interface{}]interface{},originFileName string) (*FileOutputer, error) {
path := params["path"].(string)
if path == "" {
path = default_path
}
maxSize := params["max_size"].(int)
if maxSize == 0 {
maxSize = default_max_size
}
absPath, err := filepath.Abs(path)
if err != nil {
return nil, err
}
log := &FileOutputer{
maxSize:int64(maxSize),
path:absPath,
originFileName: originFileName,
createTime:"",
lastSplitDay:-1,
}
err = log.init()
return log, err
}
//初始化日志
func (f *FileOutputer) init() (err error) {
//先创建文件夹
if !util.IsFileExist(f.path) {
err := os.Mkdir(f.path, os.ModePerm)
if err != nil {
return err
}
}
//获取文件名
curFilename,createTime := f.getCurFilename("create")
f.createTime = createTime
f.curFileName = curFilename
//创建日志文件
f.file, err = f.initFile(curFilename)
if err != nil {
return err
}
f.lastSplitDay = time.Now().Day()
return nil
}
//获取文件名
func (f *FileOutputer) getCurFilename(nameType string) (curFilename string,nowTime string) {
now := time.Now()
createTime := fmt.Sprintf("%d-%d-%d %d-%d-%d",now.Year(),now.Month(),now.Day(),now.Hour(),now.Minute(),now.Second())
if nameType == "create" {
curFilename = fmt.Sprintf("[%s][%s].log", f.originFileName,
createTime)
}else if nameType == "close"{
curFilename = fmt.Sprintf("[%s][%s][%s].log", f.originFileName,
f.createTime,createTime)
}
return curFilename,createTime
}
//初始化日志文件
func (f *FileOutputer) initFile(filename string) (file *os.File, err error) {
file, err = os.OpenFile(fmt.Sprintf("%s\\%s",f.path,filename), os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0755)
if err != nil {
err = fmt.Errorf("open file %s failed, err:%v", filename, err)
return
}
return
}
//检测是否分割
func (f *FileOutputer) checkSplitFile(curTime time.Time,file *os.File)(error) {
day := curTime.Day()
//已经不是同一天了,需要分割
if day != f.lastSplitDay {
//关闭原有的文件,并改名字
err := f.Close()
if err != nil {
return err
}
//新建一个文件
err = f.init()
if err != nil {
return err
}
}
fileInfo,err := os.Stat(fmt.Sprintf("%s\\%s",f.path,f.curFileName))
if err != nil {
return err
}
//文件大小过大,也要分割
if fileInfo.Size() >= f.maxSize {
err := f.Close()
if err != nil {
return err
}
err = f.init()
if err != nil {
return err
}
}
return nil
}
//写日志
func (f *FileOutputer) Write(data *LogData)(error) {
//分割
err := f.checkSplitFile(data.curTime,f.file)
if err != nil {
return err
}
//往文件里面写
_,err = f.file.Write(data.Bytes())
return err
}
//关闭原有的文件,进行重命名
func (f *FileOutputer) Close()(error) {
curFilename,_ := f.getCurFilename("close")
err := os.Rename(fmt.Sprintf("%s\\%s",f.path,f.curFileName),curFilename)
if err != nil {
return err
}
return f.file.Close()
}
|
package anton
// SLAVE CONSOLIDATION CHECKED
import (
"fmt"
"strconv"
"strings"
)
/*
Compare Lines Function Breakdown:
- Line Method
- CompareSlaveLineToMasterLine
- ValidateSingleLine
- Master
- Slave
- ValidateAgainstProfile
- ValidateAgainstLine
- CompareJuiceValues
- CompareSpreadLine
- CompareTotalLine
- CompareTeamTotalLine
*/
func (slaveLine *Lines) CompareSlaveLineToMasterLine(masterLine Lines, slave Slave, profile Profile) {
// This flag is to ensure that we have met all other criteria prior to comparing, default "False"
preChecksValidFlag := "False"
var helper Helper
// I want to add the Team from MasterLine to SlaveLine
slaveLine.Team = masterLine.Team
// Now lets start these checks, Let's validate both Lines to see if values are populated correctly
slaveLine.ValidateSingleLine()
masterLine.ValidateSingleLine()
// Let's start by getting our parameters from the Profile
var juiceParameter float64
var spreadParameter float64
// If they both are Validated, that means they are populated correctly, so we can compare them now
if slaveLine.LineStatus == "Validated" && masterLine.LineStatus == "Validated" {
// Next, let's compare it to the profiles to see whether the Slave is following the master on these lines
if len(slave.Profiles) >= 1 {
// This variable is our parameter for what is the maximum difference of Juice between Slave and Master
// Because GoLang uses 0 if there is an error when converting to float/integers, we need to see if error or not
juiceParameter, _ = strconv.ParseFloat(slave.Profiles[0].JuiceParameter, 32)
if juiceParameter == 0 && profile.JuiceParameter != "0" && profile.JuiceParameter != "0.0" {
slaveLine.LineStatus = "Error"
slaveLine.ErrorLog = append(slaveLine.ErrorLog, "Could Not Parse Juice Parameter to Float Value")
}
// This variable is our parameter for what is the maximum difference of Spread values between Slave and Master
// Because GoLang uses 0 if there is an error when converting to float/integers, we need to see if error or not
spreadParameter, _ = strconv.ParseFloat(slave.Profiles[0].SpreadParameter, 32)
if spreadParameter == 0 && profile.SpreadParameter != "0" && profile.SpreadParameter != "0.0" {
slaveLine.ErrorLog = append(slaveLine.ErrorLog, "Could Not Parse Spread Parameter to Float Value")
}
slaveLine.ValidateAgainstProfile(slave.Profiles[0])
} else {
slaveLine.LineStatus = "Error"
slaveLine.ErrorLog = append(slaveLine.ErrorLog, "Slave does not have a Profile Attached")
}
}
// If slaveLine is still Validated, then we can pre-check versus the Master Line
if slaveLine.LineStatus == "Validated" && masterLine.LineStatus == "Validated" {
slaveLine.ValidateAgainst(masterLine)
}
// If slaveLine is still Validated after single line validation, profile validation, pre-check validation, flip flag
if slaveLine.LineStatus == "Validated" && masterLine.LineStatus == "Validated" {
preChecksValidFlag = "True"
}
// This will print if it failed the Single Validation or the Pre Check Comparisons
if slaveLine.LineStatus == "Error" || masterLine.LineStatus == "Error" {
if len(masterLine.ErrorLog) >= 1 {
for i := range masterLine.ErrorLog {
slaveLine.ErrorLog = append(slaveLine.ErrorLog, "MasterLine Error: "+masterLine.ErrorLog[i])
}
}
slaveLine.LineStatus = "Error"
slaveLine.FunctionLog = "[#CompareSlaveLineToMasterLine] Comparing Slave Line to an Error'd Master Line"
slaveLine.PrintComparedLines(masterLine)
}
// Now that we've checked, we can compare the two Lines now by calling the functions below
if preChecksValidFlag == "True" {
// First, lets compare the juice to see if it is within the parameter
slaveLine.compareJuiceValues(masterLine, juiceParameter)
// Only continue if Juice Values comparisons are passed
if strings.HasPrefix(slaveLine.FunctionLog, "[#CompareJuiceValues Passed]") {
// Since MoneyLine doesn't have anything else to compare since Juice is done, let's do Total & Spread
if slaveLine.LineType == "Total" && masterLine.LineType == "Total" {
slaveLine.compareTotalLine(masterLine, spreadParameter)
} else if slaveLine.LineType == "Spread" && masterLine.LineType == "Spread" {
slaveLine.compareSpreadLine(masterLine, spreadParameter)
} else if slaveLine.LineType == "TeamTotal" && masterLine.LineType == "TeamTotal" {
slaveLine.compareTeamTotalLine(masterLine, spreadParameter)
} else if slaveLine.LineType != "MoneyLine" && masterLine.LineType != "MoneyLine" {
slaveLine.ErrorLog = append(slaveLine.ErrorLog, "Unable to triage Line Type to Compare")
}
} else if strings.HasPrefix(slaveLine.FunctionLog, "[#CompareJuiceValues Failed]") {
slaveLine.LineStatus = "Skipped"
slaveLine.FunctionLog = helper.ReplaceParameters(slaveLine.FunctionLog, "[#CompareJuiceValues Failed]", "[#CompareJuiceValues Skipped]")
} else if strings.HasPrefix(slaveLine.FunctionLog, "[#CompareJuiceValues Skipped]") {
slaveLine.LineStatus = "Skipped"
}
// Let's validate slave line one more time, let's create a new variable so we don't mess up any function logs
finalValidation := *slaveLine
finalValidation.ValidateSingleLine()
if finalValidation.LineStatus == "Error" {
slaveLine.ErrorLog = append(slaveLine.ErrorLog, "Slave Line did not pass Final "+
"Validation after Line Comparisons")
}
// If there are anything attached to the Error Log, we know there is Errors, so Flip Status to Error
if len(slaveLine.ErrorLog) > 0 {
slaveLine.LineStatus = "Error"
slaveLine.FunctionLog = "[#ComparedLines] Error Occurred during Lines Comparison"
}
}
}
// This function validates the shared properties between Slave and Master, and is used by the two functions above
func (line *Lines) ValidateSingleLine() {
// Declare the helper struct to access the helper functions
var helper Helper
// Let's start by validating the values that we know are fixed in slices in the configurableParameters file
// ------------------------------------------------------------------------------- BetType
// All Lines need to be differentiated between being an Master or Slave Line
// Values are: Master, Slave
betTypeValues := GetBetTypeValues()
if helper.FindIfStringInSlice(line.BetType, betTypeValues) == "False" {
line.ErrorLog = append(line.ErrorLog, "{betType} {lineType}: BetType -> Should be either: "+
strings.Join(betTypeValues, ", "))
}
// ------------------------------------------------------------------------------- LineType
// All Lines need to be differentiated between having some sort of LineType
// Values are: MoneyLine, Total, Spread, TeamTotal
lineTypeValues := GetLineTypeValues()
if helper.FindIfStringInSlice(line.LineType, lineTypeValues) == "False" {
line.ErrorLog = append(line.ErrorLog, "{betType} {lineType}: LineType -> Should be either: "+
strings.Join(lineTypeValues, ", "))
}
// ------------------------------------------------------------------------------- HomeAway
// Removed HomeAway as required field, only throw Error if it's not empty and is not either one of the Values
// Values are: Home, Away
homeAwayValues := GetHomeAwayValues()
if line.HomeAway != "" {
if helper.FindIfStringInSlice(line.HomeAway, homeAwayValues) == "False" {
line.ErrorLog = append(line.ErrorLog, "{betType} {lineType}: HomeAway -> Should be either: "+
strings.Join(homeAwayValues, ", "))
}
}
// ------------------------------------------------------------------------------- CreatedViaFunction
// All Lines should be created through Functions
// Values are: True
createdViaFunctionValues := GetCreatedViaFunctionValues()
if line.BetType != "Master" {
if helper.FindIfStringInSlice(line.CreatedViaFunction, createdViaFunctionValues) == "False" {
line.ErrorLog = append(line.ErrorLog, "{betType} {lineType}: CreatedViaFunction -> Line "+
"should be created leveraging Create Function")
}
}
// ------------------------------------------------------------------------------- FavoredUnderdog
// This should only be applicable to LineType = Spread
// Values are: Favored, Underdog
favoredUnderdogValues := GetFavoredUnderdogValues()
if line.LineType == "Spread" {
if helper.FindIfStringInSlice(line.FavoredUnderdog, favoredUnderdogValues) == "False" {
line.ErrorLog = append(line.ErrorLog, "{betType} {lineType}: FavoredUnderdog -> Should be "+
"either: "+strings.Join(favoredUnderdogValues, ", "))
}
}
// ------------------------------------------------------------------------------- OverUnder
// This should only be applicable to LineType = Total
// Values are: Over, Under
overUnderValues := GetOverUnderValues()
if line.LineType == "Total" {
if helper.FindIfStringInSlice(line.OverUnder, overUnderValues) == "False" {
line.ErrorLog = append(line.ErrorLog, "{betType} {lineType}: OverUnder -> Should be either: "+
strings.Join(overUnderValues, ", "))
}
}
// Let's now check the fields that should be required, but do not have set values, and check if they are blank
// ------------------------------------------------------------------------------- BettingSite
// Need to make sure that BettingSites are correctly configured and not empty
if line.BetType == "Master" {
if line.MasterSite == "" {
line.ErrorLog = append(line.ErrorLog, "{betType} {lineType}: BettingSite -> Please assign the"+
"Site Name where Line is found")
} else if !strings.HasPrefix(line.MasterSite, "http") || !strings.HasSuffix(line.MasterSite, "/") {
line.ErrorLog = append(line.ErrorLog, "{betType} {lineType}: BettingSite -> Incorrectly "+
"structured, ex. http://247sports.bet/")
}
} else if line.BetType == "Slave" {
if line.SlaveSite == "" {
line.ErrorLog = append(line.ErrorLog, "{betType} {lineType}: BettingSite -> Please assign the"+
"Site Name where Line is found")
} else if !strings.HasPrefix(line.SlaveSite, "http") || !strings.HasSuffix(line.SlaveSite, "/") {
line.ErrorLog = append(line.ErrorLog, "{betType} {lineType}: BettingSite -> Incorrectly "+
"structured, ex. http://247sports.bet/")
}
}
// ------------------------------------------------------------------------------- MasterName & MasterPass
if line.BetType == "Master" {
if line.MasterName == "" {
line.ErrorLog = append(line.ErrorLog, "{betType} {lineType}: MasterName -> Please assign"+
" the Master Name where Line is found")
}
if line.MasterPass == "" {
line.ErrorLog = append(line.ErrorLog, "{betType} {lineType}: MasterPass -> Please assign"+
" the Master Pass where Line is found")
}
}
// ------------------------------------------------------------------------------- MasterName & MasterPass
if line.BetType == "Slave" {
if line.SlaveName == "" {
line.ErrorLog = append(line.ErrorLog, "{betType} {lineType}: SlaveName -> Please assign"+
" the Slave Name where Line is found")
}
if line.SlavePass == "" {
line.ErrorLog = append(line.ErrorLog, "{betType} {lineType}: SlavePass -> Please assign"+
" the Slave Pass where Line is found")
}
}
// ------------------------------------------------------------------------------- RotationNumber
if line.RotationNumber == "" {
line.ErrorLog = append(line.ErrorLog, "{betType} {lineType}: RotationNumber -> Please "+
"assign the RotationNumber")
}
// Convert to Float and if it doesn't match the current Float value, there is an Error
tempRotationFloatValue, _ := strconv.ParseFloat(line.RotationNumber, 32)
// Rotation Numbers should always be a Positive Number, change it to a Float and see if it is greater than 0
if tempRotationFloatValue <= 0 {
line.ErrorLog = append(line.ErrorLog, "{betType} {lineType}: RotationNumber -> Please "+
"assign a string value of a Number")
}
// ------------------------------------------------------------------------------- TicketID
/*
if line.TicketID == "" {
line.ErrorLog = append(line.ErrorLog, "{betType} {lineType}: TicketID -> Please "+
"assign the TicketID")
}
*/
// Let's now do specific requirement values for specific LineTypes: MoneyLine vs Spread vs Total
// ------------------------------------------------------------------------------- LineSpread
// Let's check if LineSpread is even populated, if not, add failed check
if line.LineSpread == "" {
line.ErrorLog = append(line.ErrorLog, "{betType} {lineType}: LineSpread -> Please assign the Spread "+
"Value")
}
// Let's first make sure MoneyLine has a LineSpread of some EVEN values
lineSpreadEvenValues := GetSpreadEvenValues()
// Let's see if the LineSpread is an EVEN value, this will be "False" if the Spread is not an EVEN Value
lineSpreadEvenFlag := helper.FindIfStringInSlice(strings.ToUpper(line.LineSpread), lineSpreadEvenValues)
if line.LineType == "MoneyLine" && lineSpreadEvenFlag == "False" {
line.ErrorLog = append(line.ErrorLog, "{betType} {lineType}: LineSpread -> LineType is MoneyLine but "+
"LineSpread is not an EVEN value")
}
// ------------------------------------------------------------------------------- LineSpreadFloat
// Let's check if LineSpread is even populated, if not, add failed check
if line.LineSpreadFloat == 0 && lineSpreadEvenFlag == "False" {
line.ErrorLog = append(line.ErrorLog, "{betType} {lineType}: LineSpreadFloat -> LineSpread is not an "+
"EVEN value but LineSpreadFloat is 0")
}
// We know it errors if not populated, we can start Validating the values and see if LineSpreadFloat is okay
// Lets start first by checking "0" or EVEN values, this checks if it is a value in the slice lineSpreadEvenValues
if line.LineSpreadFloat != 0 && lineSpreadEvenFlag != "False" {
line.ErrorLog = append(line.ErrorLog, "{betType} {lineType}: LineSpreadFloat -> LineSpread is an EVEN"+
" value but LineSpreadFloat is not 0")
}
// Convert to Float and if it doesn't match the current Float value, there is an Error
tempSpreadFloatValue, _ := strconv.ParseFloat(line.LineSpread, 32)
// The exception is that Total Lines will have their spread always positive, we'll check this next
if line.LineType != "Total" {
if tempSpreadFloatValue != line.LineSpreadFloat && tempSpreadFloatValue != (line.LineSpreadFloat*-1) {
line.ErrorLog = append(line.ErrorLog, "{betType} {lineType}: LineSpreadFloat -> LineSpread did "+
"not get properly parsed into a Float Value")
}
}
// Let's check for consistency on LineSpreadFloats, All Total Spread Floats should be positive
if line.LineType == "Total" && line.LineSpreadFloat < 0 {
line.ErrorLog = append(line.ErrorLog, "{betType} {lineType}: LineSpreadFloat -> Need Consistency, "+
"Value is Negative, LineType = Total, Float should always be positive")
}
// -------------------- Spread - Spread Floats
if line.LineType == "Spread" {
// All Spread Favored should be negatives, if it is positive, multiply it by (-1)
if line.LineSpreadFloat > 0 && line.FavoredUnderdog == "Favored" {
line.ErrorLog = append(line.ErrorLog, "{betType} {lineType}: LineSpreadFloat -> Need Consistency,"+
" Value is Positive, LineType = Spread & Favored, Float should be Negative")
}
// All Spread Underdog should be positive, if it is negative, multiply it by (-1)
if line.LineSpreadFloat < 0 && line.FavoredUnderdog == "Underdog" {
line.ErrorLog = append(line.ErrorLog, "{betType} {lineType}: LineSpreadFloat -> Need Consistency,"+
" Value is Negative, LineType = Spread & Underdog, Float should be Positive")
}
if line.LineSpreadFloat == 0 && line.FavoredUnderdog != "Pick" {
line.ErrorLog = append(line.ErrorLog, "{betType} {lineType}: LineSpreadFloat -> Need Consistency,"+
" Value is Not Zero, LineType = Spread & Pick, Float should be Zero")
}
}
// ------------------------------------------------------------------------------- LineJuice
// Let's check if LineJuice is even populated, if not, add failed check
if line.LineJuice == "" {
line.ErrorLog = append(line.ErrorLog, "{betType} {lineType}: LineJuice -> Please assign Juice Value")
}
// Let's first make sure MoneyLine has a LineJuice of some EVEN values
lineJuiceEvenValues := GetJuiceEvenValues()
// Let's see if the LineJuice is an EVEN value, this will be "False" if the Spread is not an EVEN Value
lineJuiceEvenFlag := helper.FindIfStringInSlice(strings.ToUpper(line.LineJuice), lineJuiceEvenValues)
// ------------------------------------------------------------------------------- LineJuiceFloat
// We know if the LineJuice is not in the EVEN slice, the Flag will not be "False", check that vs if Float is 100
if lineJuiceEvenFlag != "False" && line.LineJuiceFloat != 100 {
line.ErrorLog = append(line.ErrorLog, "{betType} {lineType}: LineJuiceFloat -> LineJuice is an EVEN "+
"value but LineJuiceFloat is not 100")
}
// Let's check if LineJuiceFloat is even populated, default value is 0 and if it is not an EVEN Value, Error
if lineJuiceEvenFlag == "False" && line.LineJuiceFloat == 100 {
line.ErrorLog = append(line.ErrorLog, "{betType} {lineType}: LineJuiceFloat -> LineJuice is not an "+
"EVEN value but LineJuiceFloat is 100")
}
// Convert to Float and if it doesn't match the current Float value, there is an Error
tempJuiceFloatValue, _ := strconv.ParseFloat(line.LineJuice, 32)
if tempJuiceFloatValue != line.LineJuiceFloat && tempJuiceFloatValue != (line.LineJuiceFloat*-1) {
line.ErrorLog = append(line.ErrorLog, "{betType} {lineType}: LineJuiceFloat -> LineJuice did not get "+
"properly parsed into a Float Value")
}
// I also know that FloatValues shouldn't be between -99 and 99 so I will check for that
if line.LineJuiceFloat <= 99 && line.LineJuiceFloat >= -99 {
line.ErrorLog = append(line.ErrorLog, "{betType} {lineType}: Line Juice Float Value is between +99 "+
"and -99")
}
// Now I can start ending this function
// ------------------------------------------------------------------------------- LineStatus
// Values are: New, Validated, Authorized, Placed, Error
lineStatusValues := GetLineStatusValues()
if line.LineStatus == "" {
line.LineStatus = "Error"
line.ErrorLog = append(line.ErrorLog, "{betType} {lineType}: LineStatus -> Currently Blank, Please assign one"+
" of the values: "+strings.Join(lineStatusValues, ", "))
} else if helper.FindIfStringInSlice(line.LineStatus, lineStatusValues) == "False" {
line.ErrorLog = append(line.ErrorLog, "{betType} {lineType}: LineStatus -> Should be either: "+
strings.Join(lineStatusValues, ", "))
}
// Let's now replace all Error log's {betType} and {lineType}
for i := range line.ErrorLog {
line.ErrorLog[i] = helper.ReplaceParameters(line.ErrorLog[i], "{betType}", line.BetType, "{lineType}",
line.LineType)
}
// ------------------------------------------------------------------------------- LineLog
// Slave Line Logs
// If there are failed checks from this function and Log is empty, log it
if line.BetType == "Slave" && len(line.ErrorLog) != 0 {
line.LineStatus = "Error"
line.FunctionLog = "[#validateSingleLine] New Slave Line has not yet passed Validation"
// If there are no failed checks and Log is empty, Log it and Change Status to Validated
} else if line.BetType == "Slave" && len(line.ErrorLog) == 0 {
line.LineStatus = "Validated"
line.FunctionLog = "[#validateLine] New Slave Line passed Validation"
}
// Master Line Logs
// If there are failed checks from this function and Log is empty, log it
if line.BetType == "Master" && len(line.ErrorLog) != 0 {
line.LineStatus = "Error"
line.FunctionLog = "[#validateLine] New Master Line has not yet passed Validation"
// If there are no failed checks and Log is empty, Log it and Change Status to Validated
} else if line.BetType == "Master" && len(line.ErrorLog) == 0 {
line.LineStatus = "Validated"
line.FunctionLog = "[#validateLine] New Master Line passed Validation"
}
}
// Check Line against a profile
func (line *Lines) ValidateAgainstProfile(profile Profile) {
// Things we need to check for are Period and LineType after checking League first
if line.League == "MLB" {
// These are the four different LineTypes we will check against the profile
if line.LineType == "MoneyLine" && profile.SportsSettings.MLB.MoneyLine != "Yes" {
line.FunctionLog = "[#ValidateAgainstProfile] Slave is not following MLB MoneyLine from this Master"
} else if line.LineType == "Spread" && profile.SportsSettings.MLB.Spread != "Yes" {
line.FunctionLog = "[#ValidateAgainstProfile] Slave is not following MLB Spread from this Master"
} else if line.LineType == "Total" && profile.SportsSettings.MLB.Total != "Yes" {
line.FunctionLog = "[#ValidateAgainstProfile] Slave is not following MLB Total from this Master"
} else if line.LineType == "TeamTotal" && profile.SportsSettings.MLB.TeamTotal != "Yes" {
line.FunctionLog = "[#ValidateAgainstProfile] Slave is not following MLB TeamTotal from this Master"
}
// These are the different Periods, for MLB, there is only two
if line.Period == "" && profile.SportsSettings.MLB.OneFiveInnings != "Yes" {
line.FunctionLog = "[#ValidateAgainstProfile] Slave is not following MLB 1st Five Innings from this Master"
} else if line.Period == "" && profile.SportsSettings.MLB.Game != "Yes" {
line.FunctionLog = "[#ValidateAgainstProfile] Slave is not following MLB Game from this Master"
}
} else if line.League == "NBA" {
// These are the four different LineTypes we will check against the profile
if line.LineType == "MoneyLine" && profile.SportsSettings.NBA.MoneyLine != "Yes" {
line.FunctionLog = "[#ValidateAgainstProfile] Slave is not following NBA MoneyLine from this Master"
} else if line.LineType == "Spread" && profile.SportsSettings.NBA.Spread != "Yes" {
line.FunctionLog = "[#ValidateAgainstProfile] Slave is not following NBA Spread from this Master"
} else if line.LineType == "Total" && profile.SportsSettings.NBA.Total != "Yes" {
line.FunctionLog = "[#ValidateAgainstProfile] Slave is not following NBA Total from this Master"
} else if line.LineType == "TeamTotal" && profile.SportsSettings.NBA.TeamTotal != "Yes" {
line.FunctionLog = "[#ValidateAgainstProfile] Slave is not following NBA TeamTotal from this Master"
}
// These are the different Periods, for NBA, there is six
if line.Period == "" && profile.SportsSettings.NBA.Game != "Yes" {
line.FunctionLog = "[#ValidateAgainstProfile] Slave is not following NBA Game from this Master"
} else if line.Period == "" && profile.SportsSettings.NBA.OneHalf != "Yes" {
line.FunctionLog = "[#ValidateAgainstProfile] Slave is not following NBA First Half from this Master"
} else if line.Period == "" && profile.SportsSettings.NBA.TwoHalf != "Yes" {
line.FunctionLog = "[#ValidateAgainstProfile] Slave is not following NBA Second Half from this Master"
} else if line.Period == "" && profile.SportsSettings.NBA.OneQuarter != "Yes" {
line.FunctionLog = "[#ValidateAgainstProfile] Slave is not following NBA First Quarter from this Master"
} else if line.Period == "" && profile.SportsSettings.NBA.TwoQuarter != "Yes" {
line.FunctionLog = "[#ValidateAgainstProfile] Slave is not following NBA Second Quarter from this Master"
} else if line.Period == "" && profile.SportsSettings.NBA.ThreeQuarter != "Yes" {
line.FunctionLog = "[#ValidateAgainstProfile] Slave is not following NBA Third Quarter from this Master"
} else if line.Period == "" && profile.SportsSettings.NBA.FourQuarter != "Yes" {
line.FunctionLog = "[#ValidateAgainstProfile] Slave is not following NBA Fourth Quarter from this Master"
}
} else if line.League == "NFL" {
// These are the four different LineTypes we will check against the profile
if line.LineType == "MoneyLine" && profile.SportsSettings.NFL.MoneyLine != "Yes" {
line.FunctionLog = "[#ValidateAgainstProfile] Slave is not following NFL MoneyLine from this Master"
} else if line.LineType == "Spread" && profile.SportsSettings.NFL.Spread != "Yes" {
line.FunctionLog = "[#ValidateAgainstProfile] Slave is not following NFL Spread from this Master"
} else if line.LineType == "Total" && profile.SportsSettings.NFL.Total != "Yes" {
line.FunctionLog = "[#ValidateAgainstProfile] Slave is not following NFL Total from this Master"
} else if line.LineType == "TeamTotal" && profile.SportsSettings.NFL.TeamTotal != "Yes" {
line.FunctionLog = "[#ValidateAgainstProfile] Slave is not following NFL TeamTotal from this Master"
}
// These are the different Periods, for NFL, there is six
if line.Period == "" && profile.SportsSettings.NFL.Game != "Yes" {
line.FunctionLog = "[#ValidateAgainstProfile] Slave is not following NFL Game from this Master"
} else if line.Period == "" && profile.SportsSettings.NFL.OneHalf != "Yes" {
line.FunctionLog = "[#ValidateAgainstProfile] Slave is not following NFL First Half from this Master"
} else if line.Period == "" && profile.SportsSettings.NFL.TwoHalf != "Yes" {
line.FunctionLog = "[#ValidateAgainstProfile] Slave is not following NFL Second Half from this Master"
} else if line.Period == "" && profile.SportsSettings.NFL.OneQuarter != "Yes" {
line.FunctionLog = "[#ValidateAgainstProfile] Slave is not following NFL First Quarter from this Master"
} else if line.Period == "" && profile.SportsSettings.NFL.TwoQuarter != "Yes" {
line.FunctionLog = "[#ValidateAgainstProfile] Slave is not following NFL Second Quarter from this Master"
} else if line.Period == "" && profile.SportsSettings.NFL.ThreeQuarter != "Yes" {
line.FunctionLog = "[#ValidateAgainstProfile] Slave is not following NFL Third Quarter from this Master"
} else if line.Period == "" && profile.SportsSettings.NFL.FourQuarter != "Yes" {
line.FunctionLog = "[#ValidateAgainstProfile] Slave is not following NFL Fourth Quarter from this Master"
}
}
// If there's a function log from above criteria, we know the Slave is not following the Master on Profile
if strings.HasPrefix(line.FunctionLog, "[#ValidateAgainstProfile]") {
line.LineStatus = "Skipped"
} else {
line.FunctionLog = "[#ValidateAgainstProfile] Slave is following Master on this Line"
}
}
// This function is to help do Pre-Checks prior to Comparing Lines
func (slaveLine *Lines) ValidateAgainst(masterLine Lines) {
// Declare the helper struct to access the helper functions
var helper Helper
// Apply inheritance values from MasterLine
slaveLine.MasterName = masterLine.MasterName
slaveLine.MasterPass = masterLine.MasterPass
slaveLine.MasterSite = masterLine.MasterSite
slaveLine.MasterTicketID = masterLine.TicketID
// Let's start by making sure both Slave Line has passed the Single Line Validation
// We do not yet know how Master Lines will be formatted when they come in so I won't check for their LineStatus yet
// But I do know that it shouldn't masterLines shouldn't be "New" OR "Error" OR "Ignore"
if slaveLine.LineStatus != "Validated" && strings.HasPrefix(slaveLine.FunctionLog,
"[#validateLine]") && len(slaveLine.ErrorLog) > 0 {
slaveLine.ErrorLog = append(slaveLine.ErrorLog, "Slave {lineType}: has not been Validated"+
" prior to Pre-Checks")
}
// If Rotation Number / LineTypes don't match, I want to set the LineStatus to "Ignored", because there are multiple
// inputs for each Rotation Number, think of it mapping like X and Y on a graph, both must match for it to compare
// I don't want to call it out as an "Error" so I'm putting it as "Ignored" so it won't get printed because of this
// Rotation Numbers - Same between Slave and Master Lines
if slaveLine.RotationNumber != masterLine.RotationNumber {
slaveLine.LineStatus = "Ignored"
slaveLine.FunctionLog = "[#PreCheck Failed] Slave Line Ignored, Rotation Numbers are not matching"
slaveLine.ErrorLog = append(slaveLine.ErrorLog, "Slave {lineType}: Rotation Numbers do "+
"not match with Master")
}
// LineType - Same between Slave, Master
if slaveLine.LineType != masterLine.LineType {
slaveLine.LineStatus = "Ignored"
slaveLine.FunctionLog = "[#PreCheck Failed] Slave Line Ignored, LineTypes are not matching"
slaveLine.ErrorLog = append(slaveLine.ErrorLog, "Slave {lineType}: Line Types do not "+
"match between Slave and Master")
}
// Now, we don't know what LineStatus Master will come in as, but we do know what it should not be: Error, etc,
if masterLine.LineStatus == "Error" {
slaveLine.ErrorLog = append(slaveLine.ErrorLog, "Slave {lineType}: Comparing to an "+
"Error'ed Master Line")
}
// The rest of the Pre-Checks will turn the Slave Line to an Error
// OverUnder - If both are Total or TeamTotal, make sure they are the same between Slave, Master
if (slaveLine.LineType == "Total" && masterLine.LineType == "Total") || (slaveLine.LineType == "TeamTotal" &&
masterLine.LineType == "TeamTotal") {
if slaveLine.OverUnder != masterLine.OverUnder {
slaveLine.ErrorLog = append(slaveLine.ErrorLog, "Slave Total: OverUnder Values are not matching")
}
}
/*
Commented out, spread FavoredUnderdogs should be consistent due to rotation numbers matching, otherwise if it flips
between +1 and -1 spread, it won't place
// FavoredUnderdog - If both are Spread, Values should be the same between Slave, Master
if slaveLine.FavoredUnderdog != masterLine.FavoredUnderdog {
if slaveLine.FavoredUnderdog != "Pick" && masterLine.FavoredUnderdog != "Pick" {
if slaveLine.LineType == "Spread" && masterLine.LineType == "Spread" {
slaveLine.ErrorLog = append(slaveLine.ErrorLog, "Slave Spread: FavoredUnderdog Values"+
" are not matching")
}
}
}
*/
// If LineType == "MoneyLine" and either OverUnder or FavoredUnderdog is not blank
if slaveLine.LineType == "MoneyLine" && masterLine.LineType == "MoneyLine" {
if slaveLine.OverUnder != "" {
slaveLine.ErrorLog = append(slaveLine.ErrorLog, "Slave MoneyLine: OverUnder Values"+
" shouldn't be populated")
}
if masterLine.OverUnder != "" {
slaveLine.ErrorLog = append(slaveLine.ErrorLog, "Master MoneyLine: OverUnder Values"+
" shouldn't be populated")
}
}
// BetType - Slave is "Slave", Master is "Master"
if slaveLine.BetType != "Slave" {
slaveLine.ErrorLog = append(slaveLine.ErrorLog, "Slave {lineType}: Line for Pre-Check is"+
" not BetType = 'Slave'")
}
if masterLine.BetType != "Master" {
slaveLine.ErrorLog = append(slaveLine.ErrorLog, "Master {lineType}: Line for Pre-Check is not"+
" BetType = 'Master'")
}
// ErrorLog = Is Empty for Master, Slave will be checked at the end of this function
if len(masterLine.ErrorLog) != 0 {
slaveLine.ErrorLog = append(slaveLine.ErrorLog, "Slave {lineType}: Compared Master Line"+
" has Error Logs attached")
slaveLine.ErrorLog = append(slaveLine.ErrorLog, masterLine.ErrorLog...)
}
// Now we can start towards ending this function
// If there are any ErrorLogs, that means it failed whether a check from Validation or this Pre-Check
if len(slaveLine.ErrorLog) > 0 {
for i := range slaveLine.ErrorLog {
slaveLine.ErrorLog[i] = helper.ReplaceParameters(slaveLine.ErrorLog[i], "{lineType}",
slaveLine.LineType)
}
if slaveLine.LineStatus == "Ignored" {
slaveLine.FunctionLog = "[#Pre-Check Failed] Ignored due to either Rotation Number or LineType not" +
" matching"
} else if slaveLine.LineStatus == "Validated" && slaveLine.FunctionLog == "[#validateLine] New "+
"Slave Line passed Validation" {
// If it's in here, we know that the Slave Line passed Single Line Validation but failed Pre-Checks
slaveLine.LineStatus = "Error"
slaveLine.FunctionLog = "[#Pre-Check Failed] Slave Line Passed Singular Validation Checks, But " +
"Failed Pre-Checks prior to Comparisons"
} else if slaveLine.LineStatus != "Ignored" {
// If in here, we know that the Slave Line failed Single Line Validation, which should fail Pre-Checks
// The only exception is if Rotation and LineType's don't match, which should be ignored
slaveLine.LineStatus = "Error"
slaveLine.FunctionLog = "[#Pre-Check Failed] Slave Line Failed Singular Validation Checks, And " +
"therefore Pre-Checks, prior to Comparisons"
}
} else if slaveLine.LineStatus != "Ignored" {
// This means that the Slave Line has passed Validation and Pre-Check Validation
slaveLine.LineStatus = "Validated"
slaveLine.FunctionLog = "[#Pre-Check Passed] Slave Line has Passed Singular Line & Pre-Check Validation"
}
}
// ----------------------------------- Helper Function #1 - Compare Juice values ------------------------------------ //
// -------------- Goal of this function is to take Slave and Master Line Struct & Compare Juice values -------------- //
func (slaveLine *Lines) compareJuiceValues(masterLine Lines, juiceParam float64) {
// Declare the helper struct to access the helper functions
var helper Helper
// Assigning these to new variables for easier to read
slaveJuiceFloat := slaveLine.LineJuiceFloat
masterJuiceFloat := masterLine.LineJuiceFloat
// --------------------------------------------------------
// First, lets see if any of them are EVEN values
if slaveJuiceFloat == 100 || masterJuiceFloat == 100 {
// Remember I set these Juice values to positive 100 if they are EVEN, so I will need to catch it to see whether
// to use +100 or -100 to compare against the juiceParam, can't use Absolute Value because they can be inverted
// If Slave Juice is EVEN (100) and Master Juice is negatives (-120), change Slave Juice to -100, if
// Master Juice is positive and over, or equal to +100, then leaving Slave as +100 as it is will be fine
if slaveJuiceFloat == 100 && masterJuiceFloat < 0 {
slaveJuiceFloat = slaveJuiceFloat * (-1)
slaveLine.LineJuiceFloat = slaveJuiceFloat
}
// If Master Juice is EVEN (100) and Slave Juice is negatives (-120), change Master Juice to -100, if
// Slave Juice is positive and over, or equal to +100, then leaving Master as +100 as it is will be fine
if slaveJuiceFloat < 100 && masterJuiceFloat == 100 {
masterJuiceFloat = masterJuiceFloat * (-1)
}
}
// Lets first see if they are inverted juice values (one negative, one positive), inverted needs to be within params
// We know if we multiply them, and it is negative, then it is inverted
if slaveJuiceFloat*masterJuiceFloat < 0 {
// Let's create two variables to track the positive diff (from +100) and negative diff (from -100)
var positiveDiff float64
var negativeDiff float64
// Now we need to find out which one is the positive and which one is the negative
if slaveJuiceFloat >= 100 && masterJuiceFloat <= -100 {
positiveDiff = slaveJuiceFloat - 100 // Since Slave Juice is positive, 105 - 100 = 5
negativeDiff = (masterJuiceFloat + 100) * -1 // Since Master Juice is negative, -105 + 100 = -5 * -1 = 5
}
if slaveJuiceFloat <= -100 && masterJuiceFloat >= 100 {
positiveDiff = masterJuiceFloat - 100 // Since Master Juice is positive, 105 - 100 = 5
negativeDiff = (slaveJuiceFloat + 100) * -1 // Since Slave Juice is neg, -105 + 100 = -5 * -1 = 5
}
// If subtracted up and it is less than or equal to the juice parameter, we know it is within the parameters
if positiveDiff+negativeDiff <= juiceParam {
slaveLine.FunctionLog = fmt.Sprintf("[#CompareJuiceValues Passed] Master Juice (%v) w/ Juice "+
"Parameter (%v) vs Slave Juice (%v)", masterJuiceFloat, juiceParam, slaveJuiceFloat)
} else {
slaveLine.FunctionLog = fmt.Sprintf("[#CompareJuiceValues Failed] Master Juice (%v) w/ Juice "+
"Parameter (%v) vs Slave Juice (%v)", masterJuiceFloat, juiceParam, slaveJuiceFloat)
}
}
// Now if they are not inverted, multiplying them will be positive
if slaveJuiceFloat*masterJuiceFloat > 0 {
// If we know that Slave Juice Float is greater than Master, then we know we should place if within parameter
if masterJuiceFloat <= slaveJuiceFloat+juiceParam {
slaveLine.FunctionLog = fmt.Sprintf("[#CompareJuiceValues Passed] Master Juice (%v) w/ Juice "+
"Parameter (%v) vs Slave Juice (%v)", masterJuiceFloat, juiceParam, slaveJuiceFloat)
// If we know that Slave Juice Float is less than Master, then we know we should skip the line
} else {
slaveLine.FunctionLog = fmt.Sprintf("[#CompareJuiceValues Skipped] Master Juice (%v) w/ Juice "+
"Parameter (%v) vs Slave Juice (%v)", masterJuiceFloat, juiceParam, slaveJuiceFloat)
}
}
// Since MoneyLine doesn't need spread comparison, we can "Authorized" or "Skipped" it now
if slaveLine.LineType == "MoneyLine" && masterLine.LineType == "MoneyLine" {
// This prefix means that it passed Juice Comparisons, Authorized if it's a MoneyLine
if strings.HasPrefix(slaveLine.FunctionLog, "[#CompareJuiceValues Passed]") {
slaveLine.FunctionLog = helper.ReplaceParameters(slaveLine.FunctionLog,
"[#CompareJuiceValues Passed]", "[#CompareJuiceValues Authorized]")
slaveLine.LineStatus = "Authorized"
}
// This prefix means that it did not pass Juice Comparisons
if strings.HasPrefix(slaveLine.FunctionLog, "[#CompareJuiceValues Failed]") {
slaveLine.FunctionLog = helper.ReplaceParameters(slaveLine.FunctionLog,
"[#CompareJuiceValues Failed]", "[#CompareJuiceValues Skipped]")
slaveLine.LineStatus = "Skipped"
}
}
}
func (slaveLine *Lines) compareSpreadLine(approvedLine Lines, spreadParam float64) {
// Regardless if it is Favored or Underdog, it'll use the same function
/*
eitherValueIsPick := "False"
if slaveLine.FavoredUnderdog == "Pick" || approvedLine.FavoredUnderdog == "Pick" {
eitherValueIsPick = "True"
}
*/
// Since Rotation Numbers are already checked, we do not need to have the same FavoredUnderdog
//if slaveLine.FavoredUnderdog == approvedLine.FavoredUnderdog || eitherValueIsPick == "True" {
if approvedLine.LineSpreadFloat <= slaveLine.LineSpreadFloat+spreadParam {
slaveLine.FunctionLog = fmt.Sprintf("[#CompareSpreadLine Authorized] Master Spread (%v) w/ "+
"Spread Parameter (%v) vs. Slave Spread: (%v)", approvedLine.LineSpreadFloat, spreadParam,
slaveLine.LineSpreadFloat)
} else {
slaveLine.FunctionLog = fmt.Sprintf("[#CompareSpreadLine Skipped] Master Spread (%v) w/ Spread "+
"Parameter (%v) vs. Slave Spread: (%v)", approvedLine.LineSpreadFloat, spreadParam,
slaveLine.LineSpreadFloat)
}
//}
// This prefix means that it passed Juice Comparisons
if strings.HasPrefix(slaveLine.FunctionLog, "[#CompareSpreadLine Authorized]") {
slaveLine.LineStatus = "Authorized"
}
// This prefix means that it did not pass Juice Comparisons
if strings.HasPrefix(slaveLine.FunctionLog, "[#CompareSpreadLine Skipped]") {
slaveLine.LineStatus = "Skipped"
}
}
func (slaveLine *Lines) compareTotalLine(approvedLine Lines, spreadParam float64) {
if slaveLine.OverUnder == "Over" && approvedLine.OverUnder == "Over" {
if approvedLine.LineSpreadFloat >= slaveLine.LineSpreadFloat-spreadParam {
slaveLine.FunctionLog = fmt.Sprintf("[#CompareTotalLine Authorized] Master Over (%v) w/ Spread "+
"Parameter (%v) vs. Slave Spread: (%v)", approvedLine.LineSpreadFloat, spreadParam,
slaveLine.LineSpreadFloat)
} else {
slaveLine.FunctionLog = fmt.Sprintf("[#CompareTotalLine Skipped] Master Over (%v) w/ Spread "+
"Parameter (%v) vs. Slave Spread: (%v)", approvedLine.LineSpreadFloat, spreadParam,
slaveLine.LineSpreadFloat)
}
} else if slaveLine.OverUnder == "Under" && approvedLine.OverUnder == "Under" {
if approvedLine.LineSpreadFloat <= slaveLine.LineSpreadFloat+spreadParam {
slaveLine.FunctionLog = fmt.Sprintf("[#CompareTotalLine Authorized] Master Under (%v) w/ Spread"+
" Parameter (%v) vs. Slave Spread: (%v)", approvedLine.LineSpreadFloat, spreadParam,
slaveLine.LineSpreadFloat)
} else {
slaveLine.FunctionLog = fmt.Sprintf("[#CompareTotalLine Skipped] Master Under (%v) w/ Spread "+
"Parameter (%v) vs. Slave Spread: (%v)", approvedLine.LineSpreadFloat, spreadParam,
slaveLine.LineSpreadFloat)
}
} else {
slaveLine.ErrorLog = append(slaveLine.ErrorLog, "Slave Total: Inverted OverUnder with "+
"Authorized bet")
}
// This prefix means that it passed Juice Comparisons
if strings.HasPrefix(slaveLine.FunctionLog, "[#CompareTotalLine Authorized]") {
slaveLine.LineStatus = "Authorized"
}
// This prefix means that it did not pass Juice Comparisons
if strings.HasPrefix(slaveLine.FunctionLog, "[#CompareTotalLine Skipped]") {
slaveLine.LineStatus = "Skipped"
}
}
func (slaveLine *Lines) compareTeamTotalLine(approvedLine Lines, spreadParam float64) {
if slaveLine.OverUnder == "Over" && approvedLine.OverUnder == "Over" {
if approvedLine.LineSpreadFloat >= slaveLine.LineSpreadFloat-spreadParam {
slaveLine.FunctionLog = fmt.Sprintf("[#CompareTotalLine Authorized] Master Over (%v) w/ Spread "+
"Parameter (%v) vs. Slave Spread: (%v)", approvedLine.LineSpreadFloat, spreadParam,
slaveLine.LineSpreadFloat)
} else {
slaveLine.FunctionLog = fmt.Sprintf("[#CompareTotalLine Skipped] Master Over (%v) w/ Spread "+
"Parameter (%v) vs. Slave Spread: (%v)", approvedLine.LineSpreadFloat, spreadParam,
slaveLine.LineSpreadFloat)
}
} else if slaveLine.OverUnder == "Under" && approvedLine.OverUnder == "Under" {
if approvedLine.LineSpreadFloat <= slaveLine.LineSpreadFloat+spreadParam {
slaveLine.FunctionLog = fmt.Sprintf("[#CompareTotalLine Authorized] Master Under (%v) w/ Spread"+
" Parameter (%v) vs. Slave Spread: (%v)", approvedLine.LineSpreadFloat, spreadParam,
slaveLine.LineSpreadFloat)
} else {
slaveLine.FunctionLog = fmt.Sprintf("[#CompareTotalLine Skipped] Master Under (%v) w/ Spread "+
"Parameter (%v) vs. Slave Spread: (%v)", approvedLine.LineSpreadFloat, spreadParam,
slaveLine.LineSpreadFloat)
}
} else {
slaveLine.ErrorLog = append(slaveLine.ErrorLog, "Slave Total: Inverted OverUnder with "+
"Authorized bet")
}
// This prefix means that it passed Juice Comparisons
if strings.HasPrefix(slaveLine.FunctionLog, "[#CompareTotalLine Authorized]") {
slaveLine.LineStatus = "Authorized"
}
// This prefix means that it did not pass Juice Comparisons
if strings.HasPrefix(slaveLine.FunctionLog, "[#CompareTotalLine Skipped]") {
slaveLine.LineStatus = "Skipped"
}
}
|
// Copyright 2019 Yunion
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package azure
import (
"net/url"
"strings"
"yunion.io/x/jsonutils"
"yunion.io/x/log"
api "yunion.io/x/onecloud/pkg/apis/compute"
"yunion.io/x/onecloud/pkg/cloudprovider"
"yunion.io/x/onecloud/pkg/multicloud"
)
type SnapshotSku struct {
Name string
Tier string
}
type SSnapshot struct {
multicloud.SResourceBase
multicloud.AzureTags
region *SRegion
ID string
Name string
Location string
ManagedBy string
Sku *SnapshotSku
Properties DiskProperties
Type string
}
func (self *SSnapshot) GetId() string {
return self.ID
}
func (self *SSnapshot) GetGlobalId() string {
return strings.ToLower(self.ID)
}
func (self *SSnapshot) GetName() string {
return self.Name
}
func (self *SSnapshot) GetStatus() string {
switch self.Properties.ProvisioningState {
case "Succeeded":
return api.SNAPSHOT_READY
default:
log.Errorf("Unknow azure snapshot %s status: %s", self.ID, self.Properties.ProvisioningState)
return api.SNAPSHOT_UNKNOWN
}
}
func (self *SRegion) CreateSnapshot(diskId, name, desc string) (*SSnapshot, error) {
params := map[string]interface{}{
"Name": name,
"Location": self.Name,
"Properties": map[string]interface{}{
"CreationData": map[string]string{
"CreateOption": "Copy",
"SourceResourceID": diskId,
},
},
"Type": "Microsoft.Compute/snapshots",
}
snapshot := &SSnapshot{region: self}
return snapshot, self.create("", jsonutils.Marshal(params), snapshot)
}
func (self *SSnapshot) Delete() error {
return self.region.DeleteSnapshot(self.ID)
}
func (self *SSnapshot) GetSizeMb() int32 {
return self.Properties.DiskSizeGB.Int32() * 1024
}
func (self *SRegion) DeleteSnapshot(snapshotId string) error {
return self.del(snapshotId)
}
type AccessURIOutput struct {
AccessSas string
}
type AccessProperties struct {
Output AccessURIOutput
}
type AccessURI struct {
Name string
Properties AccessProperties
}
func (self *SRegion) GrantAccessSnapshot(snapshotId string) (string, error) {
params := map[string]interface{}{
"access": "Read",
"durationInSeconds": 3600 * 24,
}
body, err := self.perform(snapshotId, "beginGetAccess", jsonutils.Marshal(params))
if err != nil {
return "", err
}
accessURI := AccessURI{}
return accessURI.Properties.Output.AccessSas, body.Unmarshal(&accessURI)
}
func (self *SSnapshot) Refresh() error {
snapshot, err := self.region.GetSnapshot(self.ID)
if err != nil {
return err
}
return jsonutils.Update(self, snapshot)
}
func (self *SRegion) GetISnapshotById(snapshotId string) (cloudprovider.ICloudSnapshot, error) {
return self.GetSnapshot(snapshotId)
}
func (self *SRegion) GetISnapshots() ([]cloudprovider.ICloudSnapshot, error) {
snapshots, err := self.ListSnapshots()
if err != nil {
return nil, err
}
ret := []cloudprovider.ICloudSnapshot{}
for i := range snapshots {
snapshots[i].region = self
ret = append(ret, &snapshots[i])
}
return ret, nil
}
func (self *SSnapshot) GetDiskId() string {
return strings.ToLower(self.Properties.CreationData.SourceResourceID)
}
func (self *SSnapshot) GetDiskType() string {
return ""
}
func (self *SSnapshot) GetProjectId() string {
return getResourceGroup(self.ID)
}
func (region *SRegion) GetSnapshot(snapshotId string) (*SSnapshot, error) {
snapshot := SSnapshot{region: region}
return &snapshot, region.get(snapshotId, url.Values{}, &snapshot)
}
func (region *SRegion) ListSnapshots() ([]SSnapshot, error) {
result := []SSnapshot{}
err := region.list("Microsoft.Compute/snapshots", url.Values{}, &result)
if err != nil {
return nil, err
}
return result, nil
}
|
package interfaces
type ErrorResponse struct {
Data string `json:"data"`
Status int `json:"status"`
}
type SpectraCreatedResponse struct {
Data SpectraIdResponse `json:"data"`
Status int `json:"status"`
}
type SpectraIdResponse struct {
Id string `json:"id"`
}
|
package autocomplete
import (
"testing"
)
func TestCompletionForBash(t *testing.T) {
cmd := NewAutoCompleteCommand()
err := cmd.Execute()
if err != nil {
t.Fatal(err)
}
}
func TestCompletionForNotBash(t *testing.T) {
opts := &autocompleteOptions{
acType: "zsh",
}
err := runAutoComplete(nil, opts)
if err == nil {
t.Fatal("expected to find error on non bash shell type")
}
}
|
package constant
const (
OrderCommandBusAddr = "192.168.99.100:4150"
OrderCommandTopic = "command_order"
)
|
package main
import (
"LeetCodeGo/base"
"LeetCodeGo/utils"
"fmt"
"sort"
)
type NodeInfo struct {
value int
y int
}
type NodeInfoSlice []NodeInfo
func (slice NodeInfoSlice) Swap(i, j int) {
slice[i], slice[j] = slice[j], slice[i]
}
func (slice NodeInfoSlice) Len() int {
return len(slice)
}
// 按照y坐标轴从大到小排序,如果相等则按照值从小到大排序
func (slice NodeInfoSlice) Less(i, j int) bool {
if slice[i].y > slice[j].y {
return true
} else if slice[i].y == slice[j].y && slice[i].value < slice[j].value {
return true
} else {
return false
}
}
func recursive(node *base.TreeNode, x, y int, memory *map[int][]NodeInfo) {
if node == nil {
return
}
info := NodeInfo{value: node.Val, y: y}
if ys, ok := (*memory)[x]; ok {
ys := append(ys, info)
(*memory)[x] = ys
} else {
ys := []NodeInfo{info}
(*memory)[x] = ys
}
if node.Left != nil {
recursive(node.Left, x-1, y-1, memory)
}
if node.Right != nil {
recursive(node.Right, x+1, y-1, memory)
}
}
func verticalTraversal(root *base.TreeNode) [][]int {
result := make([][]int, 0)
if root == nil {
return result
}
memory := make(map[int][]NodeInfo, 0)
recursive(root, 0, 0, &memory)
keys := make([]int, 0)
for k := range memory {
keys = append(keys, k)
}
sort.Sort(utils.IntList(keys))
for _, key := range keys {
values := memory[key]
sort.Sort(NodeInfoSlice(values))
ys := make([]int, 0)
for _, v := range values {
ys = append(ys, v.value)
}
result = append(result, ys)
}
return result
}
func main() {
values := []string{"0", "2", "1", "3", "nil", "nil", "nil", "4", "5", "nil", "7", "6", "nil", "10", "8", "11", "9"}
root := base.GetBinaryTree(values)
result := verticalTraversal(root)
fmt.Println(result)
values = []string{"1", "2", "3", "4", "5", "6", "7"}
root = base.GetBinaryTree(values)
result = verticalTraversal(root)
fmt.Println(result)
}
|
package printout
import (
"fmt"
"time"
"strings"
"strconv"
"../ds"
"github.com/jroimartin/gocui"
)
func Overview(g *gocui.Gui) error {
var yearO, monthO, dayO string
var year, month, day string
var yearN, monthN, dayN int
var maxX int
var err error
var v *gocui.View
var index int
var minX int = 136
maxX, _ = g.Size()
// Print Overview Title Also
overview_Title(g)
//Update Array before starting
ds.Update_FilteredEntries()
if v, err = g.View("overview"); err != nil {
return err
}
v.Clear()
if ds.FilteredEntries == nil {
fmt.Fprintf(v, " No Entries :( ")
} else {
for index = range ds.FilteredEntries {
year = ds.FilteredEntries[index].Year
month = ds.FilteredEntries[index].Month
day = ds.FilteredEntries[index].Day
yearN, _ = strconv.Atoi(year)
monthN, _ = strconv.Atoi(month)
dayN, _ = strconv.Atoi(day)
t := time.Date(yearN, time.Month(monthN), dayN, 0, 0, 0, 0, time.Local)
if year == yearO && month == monthO && day == dayO {
if maxX < minX {
fmt.Fprintf(v, strings.Repeat(" ", 8))
} else {
fmt.Fprintf(v, strings.Repeat(" ", 16))
}
if is_Next_Date_The_Same(index, year, month, day) {
fmt.Fprintf(v, " \x1b[0;34m├─ \x1b[0;37m")
} else {
fmt.Fprintf(v, " \x1b[0;34m└─ \x1b[0;37m")
}
if ds.FilteredEntries[index].Enabled == false {
fmt.Fprintf(v, "[H]")
}
} else {
fmt.Fprintf(v, "\x1b[0;34m%0.15s\x1b[0;37m", t.Format(" Mon: ") )
if maxX < minX {
fmt.Fprintf(v, "%0.2s", t.Format("02") )
} else {
fmt.Fprintf(v, "%0.10s", t.Format("01/02/2006") + strings.Repeat(" ", 10))
}
if is_Next_Date_The_Same(index, year, month, day) {
fmt.Fprintf(v, " \x1b[0;34m┬─ \x1b[0;37m")
} else {
fmt.Fprintf(v, " \x1b[0;34m── \x1b[0;37m")
}
if ds.FilteredEntries[index].Enabled == false {
fmt.Fprintf(v, "[H]")
}
yearO = year
monthO = month
dayO = day
}
fmt.Fprintf(v, "%0.55s", ds.FilteredEntries[index].Title + strings.Repeat(" ", 50))
fmt.Fprintf(v, " ")
if ds.KConfig.Mode == 0 {
fmt.Fprintf(v, "%0.15s", ds.FilteredEntries[index].AOF + strings.Repeat(" ", 50))
fmt.Fprintf(v, " ")
}
fmt.Fprintf(v, "%0.25s", ds.FilteredEntries[index].Category + strings.Repeat(" ", 25))
fmt.Fprintf(v, "%0.25s", ds.FilteredEntries[index].Topic + strings.Repeat(" ", 25))
if ds.FilteredEntries[index].Duration > 0 {
fmt.Fprintf(v, "%dmin ", ds.FilteredEntries[index].Duration)
}
fmt.Fprintf(v, strings.Repeat(" ", maxX))
fmt.Fprintf(v, "\n")
}
}
return nil
}
func is_Next_Date_The_Same(index int, year, month, day string) bool {
if index < len(ds.FilteredEntries)-1 {
if year == ds.FilteredEntries[index+1].Year && month == ds.FilteredEntries[index+1].Month && day == ds.FilteredEntries[index+1].Day {
return true
} else {
return false
}
} else {
return false
}
}
func overview_Title(g *gocui.Gui) error {
var err error
var v *gocui.View
if v, err = g.View("overview"); err != nil {
return err
}
if ds.UserFilterString == "" {
v.Title = "[ Overview ]"
} else {
v.Title = "[ Overview (Filter: '" + ds.UserFilterString + "') ]"
}
return nil
}
|
//+build integration
package collections
import (
"strings"
"github.com/crowleyfelix/star-wars-api/server/database/mongodb/models"
"github.com/crowleyfelix/star-wars-api/server/errors"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/satori/go.uuid"
)
var _ = Describe("Planets", func() {
var (
coll = NewPlanets()
planet *models.Planet
id int
name string
)
BeforeSuite(func() {
uid, err := uuid.NewV4()
Expect(err).To(BeNil())
name = uid.String()
})
BeforeEach(func() {
planet = &models.Planet{
ID: id,
Name: name,
Terrain: "grasslands, mountains",
Climate: "temperate",
}
})
Describe("Insert(): When inserting a planet", func() {
var (
actual *models.Planet
err error
)
JustBeforeEach(func() {
actual, err = coll.Insert(planet)
if actual != nil {
id = actual.ID
}
})
Context("with unexistent data", func() {
It("should set id", func() {
Expect(err).To(BeNil())
Expect(actual).To(Equal(planet))
})
})
Context("with duplicated name", func() {
BeforeEach(func() {
planet.Name = strings.ToLower(planet.Name)
})
It("should return an error", func() {
Expect(err).To(BeAssignableToTypeOf(new(errors.UnprocessableEntity)))
})
})
})
Describe("Find(): When finding planets", func() {
var (
pagination *Pagination
)
var (
actual *models.PlanetPage
expect *models.PlanetPage
err error
)
BeforeEach(func() {
pagination = &Pagination{
Page: 1,
Size: 1,
}
query := &PlanetSearchQuery{
Name: &planet.Name,
}
actual, err = coll.Find(query, pagination)
expect = &models.PlanetPage{
Page: &models.Page{
MaxSize: 1,
Size: 1,
Current: 1,
Previous: nil,
Next: nil,
},
Planets: []models.Planet{*planet},
}
})
It("should get planet", func() {
Expect(err).To(BeNil())
Expect(actual).To(Equal(expect))
})
})
Describe("FindByID(): When finding planet by id", func() {
var (
actual *models.Planet
err error
)
BeforeEach(func() {
actual, err = coll.FindByID(planet.ID)
})
It("should get planet", func() {
Expect(err).To(BeNil())
Expect(actual).To(BeEquivalentTo(planet))
})
})
Describe("Update(): When updating a planet", func() {
var (
changed *models.Planet
err error
)
JustBeforeEach(func() {
err = coll.Update(planet)
changed, _ = coll.FindByID(planet.ID)
})
Context("with existent entity", func() {
BeforeEach(func() {
uid, err := uuid.NewV4()
Expect(err).To(BeNil())
planet.Name = uid.String()
})
It("should update planet", func() {
Expect(err).To(BeNil())
Expect(changed).To(Equal(planet))
})
})
Context("with unexistent entity", func() {
BeforeEach(func() {
planet.ID = planet.ID + 1
})
It("should return an error", func() {
Expect(err).To(BeAssignableToTypeOf(new(errors.NotFound)))
})
})
})
Describe("Delete(): When deleting a planet", func() {
var (
err error
)
JustBeforeEach(func() {
err = coll.Delete(planet.ID)
})
Context("with existent entity", func() {
It("should not return an error", func() {
Expect(err).To(BeNil())
})
})
Context("with unexistent entity", func() {
BeforeEach(func() {
planet.ID = planet.ID + 1
})
It("should return an error", func() {
Expect(err).To(BeAssignableToTypeOf(new(errors.NotFound)))
})
})
})
})
|
package daemon
import (
"fmt"
"log"
"os"
"os/exec"
"path/filepath"
"time"
"github.com/kiyonlin/dawn/config"
)
const envDaemon = "DAWN_DAEMON"
const envDaemonWorker = "DAWN_DAEMON_WORKER"
var stdoutLogFile *os.File
var stderrLogFile *os.File
var osExit = os.Exit
func Run() {
if isWorker() {
return
}
// Panic if the initial spawned daemon process has error
if _, err := spawn(true); err != nil {
panic(fmt.Sprintf("dawn: failed to run in daemon mode: %s", err))
}
setupLogFiles()
defer teardownLogFiles()
var (
cmd *exec.Cmd
err error
count int
start time.Time
max = config.GetInt("daemon.tries", 10)
logger = log.New(stderrLogFile, "", log.LstdFlags)
)
for {
if count++; count > max {
break
}
start = time.Now()
if cmd, err = spawn(false); err != nil {
continue
}
err = cmd.Wait()
logger.Printf("dawn: (pid:%d)%v exist with err: %v", cmd.Process.Pid, cmd.Args, err)
if time.Since(start) > time.Second*10 {
// reset count
count = 0
}
}
logger.Printf("dawn: already attempted %d times", max)
osExit(1)
}
func spawn(skip bool) (cmd *exec.Cmd, err error) {
if isDaemon() && skip {
return
}
args, env := setupArgsAndEnv()
cmd = &exec.Cmd{
Path: args[0],
Args: args,
Env: env,
SysProcAttr: newSysProcAttr(),
}
if isDaemon() {
if stdoutLogFile != nil {
cmd.Stdout = stdoutLogFile
}
if stderrLogFile != nil {
cmd.Stderr = stderrLogFile
}
}
if err = cmd.Start(); err != nil {
return
}
// Exit main process
if !isDaemon() {
osExit(0)
}
return
}
func setupArgsAndEnv() ([]string, []string) {
args, env := os.Args, os.Environ()
if !isDaemon() {
args = append(args, "master process dawn")
env = append(env, envDaemon+"=")
} else if !isWorker() {
args[len(args)-1] = "worker process"
env = append(env, envDaemonWorker+"=")
}
return args, env
}
func isDaemon() bool {
_, ok := os.LookupEnv(envDaemon)
return ok
}
func isWorker() bool {
_, ok := os.LookupEnv(envDaemonWorker)
return ok
}
func setupLogFiles() {
var err error
if f := config.GetString("daemon.stdoutLogFile"); f != "" {
if stdoutLogFile, err = os.OpenFile(filepath.Clean(f), os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0600); err != nil {
panic(fmt.Sprintf("dawn: failed to open stdout log file %s: %s", f, err))
}
}
if f := config.GetString("daemon.stderrLogFile"); f != "" {
if stderrLogFile, err = os.OpenFile(filepath.Clean(f), os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0600); err != nil {
panic(fmt.Sprintf("dawn: failed to open stderr log file %s: %s", f, err))
}
}
}
func teardownLogFiles() {
if stdoutLogFile != nil {
_ = stdoutLogFile.Close()
}
if stderrLogFile != nil {
_ = stderrLogFile.Close()
}
}
|
// Copyright (c) 2016-2019 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dispatch
import (
"testing"
"github.com/uber/kraken/utils/bitsetutil"
"github.com/stretchr/testify/require"
)
func TestSyncBitfieldDuplicateSetDoesNotDoubleCount(t *testing.T) {
require := require.New(t)
b := newSyncBitfield(bitsetutil.FromBools(false, false))
require.False(b.Complete())
b.Set(0, true)
require.False(b.Complete())
b.Set(0, true)
require.False(b.Complete())
b.Set(1, true)
require.True(b.Complete())
b.Set(1, false)
require.False(b.Complete())
b.Set(1, false)
require.False(b.Complete())
b.Set(1, true)
require.True(b.Complete())
}
func TestSyncBitfieldNewCountsNumComplete(t *testing.T) {
require := require.New(t)
b := newSyncBitfield(bitsetutil.FromBools(true, true, true))
require.True(b.Complete())
}
func TestSyncBitfieldString(t *testing.T) {
require := require.New(t)
b := newSyncBitfield(bitsetutil.FromBools(true, false, true, false))
require.Equal("1010", b.String())
}
|
package main
import(
"fmt"
"io"
"os"
)
type MyReader struct{
Str string
}
func (mr MyReader) Read(b []byte)( count int, err error){
var buffLen, strLen, readLen int = len(b),len(mr.Str), 0;
readLen = buffLen;
if strLen< readLen{
readLen = strLen;
}
for i:=0; i< readLen; i++{
b[i] = mr.Str[i]
count++
}
return count, nil
}
func Validate(r io.Reader) {
b := make([]byte, 1024, 2048)
i, o := 0, 0
for ; i < 1<<20 && o < 1<<20; i++ { // test 1mb
n, err := r.Read(b)
for i, v := range b[:n] {
if v != 'A' {
fmt.Fprintf(os.Stderr, "got byte %x at offset %v, want 'A'\n", v, o+i)
return
}
}
o += n
if err != nil {
fmt.Fprintf(os.Stderr, "read error: %v\n", err)
return
}
}
if o == 0 {
fmt.Fprintf(os.Stderr, "read zero bytes after %d Read calls\n", i)
return
}
fmt.Println("OK!")
}
type rot13Reader struct {
r io.Reader
}
func (rt rot13Reader) Read(b []byte)( int, error){
var tempb []byte = make([] byte, 50)
count, errs := rt.r.Read(tempb);
if errs == nil{
fmt.Println("tempb len",count)
for i:=0; i<count;i++ {
b[i] =rot13(tempb[i])
}
return count, errs
} else{
return 0, io.EOF
}
}
func rot13(p byte) byte {
switch {
case p >= 'A' && p <= 'M':
p += 13
case p >= 'N' && p <= 'Z':
p -= 13
case p >= 'a' && p <= 'm':
p += 13
case p >= 'n' && p <= 'z':
p -= 13
}
return p
} |
package model
import (
"github.com/layer5io/meshkit/errors"
)
const (
ErrInvalidRequestCode = "1000"
ErrNilClientCode = "1001"
ErrCreateDataCode = "1002"
ErrQueryCode = "1003"
ErrMeshsyncSubscriptionCode = "1004"
ErrOperatorSubscriptionCode = "1005"
ErrAddonSubscriptionCode = "1006"
ErrControlPlaneSubscriptionCode = "1007"
ErrMesheryClientCode = "1008"
ErrSubscribeChannelCode = "1009"
ErrPublishBrokerCode = "1010"
ErrNoMeshSyncCode = "1011"
ErrNoExternalEndpointCode = "1012"
)
var (
ErrEmptyHandler = errors.New(ErrNoMeshSyncCode, errors.Alert, []string{"Database handler not initialized"}, []string{"Meshery Database handler is not accessible to perform operations"}, []string{"Meshery Database is crashed or not reachable"}, []string{"Restart Meshery Server", "Please check if Meshery server is accessible to the Database"})
)
func ErrCreateData(err error) error {
return errors.New(ErrCreateDataCode, errors.Alert, []string{"Error while writing meshsync data", err.Error()}, []string{"Unable to write MeshSync data to the Meshery Database"}, []string{"Meshery Database is crashed or not reachable"}, []string{"Restart Meshery Server", "Please check if Meshery server is accessible to the Database"})
}
func ErrUpdateData(err error) error {
return errors.New(ErrCreateDataCode, errors.Alert, []string{"Error while updating meshsync data", err.Error()}, []string{"Unable to update MeshSync data to the Meshery Database"}, []string{"Meshery Database is crashed or not reachable"}, []string{"Restart Meshery Server", "Please check if Meshery server is accessible to the Database"})
}
func ErrDeleteData(err error) error {
return errors.New(ErrCreateDataCode, errors.Alert, []string{"Error while deleting meshsync data", err.Error()}, []string{"Unable to read MeshSync data to the Meshery Database"}, []string{"Meshery Database is crashed or not reachable"}, []string{"Restart Meshery Server", "Please check if Meshery server is accessible to the Database"})
}
func ErrQuery(err error) error {
return errors.New(ErrQueryCode, errors.Alert, []string{"Error while querying data", err.Error()}, []string{"Invalid Query performed in Meshery Database"}, []string{}, []string{})
}
func ErrMeshsyncSubscription(err error) error {
return errors.New(ErrMeshsyncSubscriptionCode, errors.Alert, []string{"MeshSync Subscription failed", err.Error()}, []string{"GraphQL subscription for MeshSync stopped"}, []string{"Could be a network issue"}, []string{"Check if meshery server is reachable from the browser"})
}
func ErrSubscribeChannel(err error) error {
return errors.New(ErrSubscribeChannelCode, errors.Alert, []string{"Unable to subscribe to channel", err.Error()}, []string{"Unable to create a broker subscription"}, []string{"Could be a network issue", "Meshery Broker could have crashed"}, []string{"Check if Meshery Broker is reachable from Meshery Server", "Check if Meshery Broker is up and running inside the configured cluster"})
}
func ErrPublishBroker(err error) error {
return errors.New(ErrPublishBrokerCode, errors.Alert, []string{"Unable to publish to broker", err.Error()}, []string{"Unable to create a broker publisher"}, []string{"Could be a network issue", "Meshery Broker could have crashed"}, []string{"Check if Meshery Broker is reachable from Meshery Server", "Check if Meshery Broker is up and running inside the configured cluster"})
}
func ErrMesheryClient(err error) error {
if err != nil {
return errors.New(ErrMesheryClientCode, errors.Alert, []string{"Meshery kubernetes client not initialized", err.Error()}, []string{"Kubernetes config is not initialized with Meshery"}, []string{}, []string{"Upload your kubernetes config via the settings dashboard. If uploaded, wait for a minute for it to get initialized"})
}
return errors.New(ErrMesheryClientCode, errors.Alert, []string{"Meshery kubernetes client not initialized"}, []string{"Kubernetes config is not initialized with Meshery"}, []string{}, []string{"Upload your kubernetes config via the settings dashboard. If uploaded, wait for a minute for it to get initialized"})
}
|
package bus
import (
"context"
"fmt"
"log"
"strconv"
"sync"
"time"
)
// stderrLogger 默认错误日志
type stderrLogger struct{}
func (stderrLogger) Errorf(format string, args ...interface{}) {
log.Println(fmt.Sprintf("easy-bus: %s", fmt.Sprintf(format, args...)))
}
// nullIdempotent 空的幂等实现
type nullIdempotent struct{}
func (ni nullIdempotent) Acquire(key string) (bool, error) { return false, nil }
func (ni nullIdempotent) Release(key string) error { return nil }
// internalIdempotent 内部幂等实现
type internalIdempotent struct {
sync.Mutex
dataMap map[string]bool
}
func (ii *internalIdempotent) Acquire(key string) (bool, error) {
ii.Lock()
defer ii.Unlock()
if ii.dataMap == nil {
ii.dataMap = make(map[string]bool)
}
if _, ok := ii.dataMap[key]; ok {
return false, nil
}
ii.dataMap[key] = true
return true, nil
}
func (ii *internalIdempotent) Release(key string) error {
delete(ii.dataMap, key)
return nil
}
// nullDLStorage 空的死信存储
type nullDLStorage struct{}
func (nd nullDLStorage) Store(queue string, data []byte) error { return nil }
func (nd nullDLStorage) Fetch(queue string) (map[string][]byte, error) {
return nil, nil
}
func (nd nullDLStorage) Remove(id string) error { return nil }
// internalDLStorage 内部死信存储
type internalDLStorage struct {
index map[string]string
dataMap map[string]map[string][]byte
}
func (id *internalDLStorage) Store(queue string, data []byte) error {
if id.dataMap == nil {
id.index = make(map[string]string)
id.dataMap = make(map[string]map[string][]byte)
}
if _, ok := id.dataMap[queue]; !ok {
id.dataMap[queue] = make(map[string][]byte)
}
pid := strconv.Itoa(len(id.dataMap[queue]))
id.index[pid], id.dataMap[queue][pid] = queue, data
return nil
}
func (id *internalDLStorage) Fetch(queue string) (map[string][]byte, error) {
return id.dataMap[queue], nil
}
func (id *internalDLStorage) Remove(pid string) error {
queue := id.index[pid]
delete(id.dataMap[queue], pid)
return nil
}
// internalTXStorage 内部事务存储
type internalTXStorage struct {
dataMap map[string][]byte
}
func (it *internalTXStorage) Store(data []byte) (string, error) {
if it.dataMap == nil {
it.dataMap = make(map[string][]byte)
}
id := generateSeqId()
it.dataMap[id] = data
return id, nil
}
func (it *internalTXStorage) Fetch(id string) ([]byte, error) {
return it.dataMap[id], nil
}
func (it *internalTXStorage) Remove(id string) error {
delete(it.dataMap, id)
return nil
}
// internalDriver 内部驱动实现
type internalDriver struct {
queues map[string]*internalQueue
relation map[string]map[string]map[string]*internalQueue
}
// internalQueue 内部队列结构
type internalQueue struct {
name string
delay time.Duration
msgChan chan internalData
}
// internalData 内部消息结构
type internalData struct {
data []byte
delay time.Duration
}
func (id *internalDriver) CreateQueue(name string, delay time.Duration) error {
if id.queues == nil {
id.queues = make(map[string]*internalQueue)
}
id.queues[name] = &internalQueue{
name: name,
delay: delay,
msgChan: make(chan internalData, 9),
}
return nil
}
func (id *internalDriver) CreateTopic(name string) error {
if id.relation == nil {
id.relation = make(map[string]map[string]map[string]*internalQueue)
}
if _, ok := id.relation[name]; !ok {
id.relation[name] = make(map[string]map[string]*internalQueue)
}
return nil
}
func (id *internalDriver) Subscribe(topic, queue, routeKey string) error {
if _, ok := id.relation[topic][queue]; !ok {
id.relation[topic][queue] = make(map[string]*internalQueue)
}
id.relation[topic][queue][routeKey] = id.queues[queue]
return nil
}
func (id *internalDriver) UnSubscribe(topic, queue, routeKey string) error {
delete(id.relation[topic][queue], routeKey)
return nil
}
func (id *internalDriver) SendToQueue(queue string, content []byte, delay time.Duration) error {
id.queues[queue].msgChan <- internalData{delay: delay, data: content}
return nil
}
func (id *internalDriver) SendToTopic(topic string, content []byte, routeKey string) error {
for _, queues := range id.relation[topic] {
for rk, queue := range queues {
if rk == routeKey {
queue.msgChan <- internalData{delay: queue.delay, data: content}
}
}
}
return nil
}
func (id *internalDriver) ReceiveMessage(ctx context.Context, queue string, errChan chan error, handler func([]byte) bool) {
for {
select {
case <-ctx.Done():
return
case msg := <-id.queues[queue].msgChan:
goroutine(func() {
if msg.delay > 0 {
<-time.NewTimer(msg.delay).C
}
if handler(msg.data) == false {
_ = id.SendToQueue(queue, msg.data, msg.delay)
}
})
}
}
}
|
/*
* @lc app=leetcode id=283 lang=golang
*
* [283] Move Zeroes
*
* https://leetcode.com/problems/move-zeroes/description/
*
* algorithms
* Easy (57.54%)
* Likes: 3731
* Dislikes: 121
* Total Accepted: 825.9K
* Total Submissions: 1.4M
* Testcase Example: '[0,1,0,3,12]'
*
* Given an array nums, write a function to move all 0's to the end of it while
* maintaining the relative order of the non-zero elements.
*
* Example:
*
*
* Input: [0,1,0,3,12]
* Output: [1,3,12,0,0]
*
* Note:
*
*
* You must do this in-place without making a copy of the array.
* Minimize the total number of operations.
*
*/
// @lc code=start
func moveZeroes(nums []int) {
moveZeroes2(nums)
}
// 简化版本,直接两两交互
func moveZeroes2(nums []int) {
for i, j := 0, 0; i < len(nums); i++ {
if nums[i] != 0 {
if i != j { // 如果i,j相同,不用进行替换
nums[i], nums[j] = nums[j], nums[i]
}
j++
}
}
}
func moveZeroes1(nums []int) {
count := 0
for i := 0; i < len(nums); i++ {
if nums[i] != 0 {
if count != i {
nums[count] = nums[i]
}
count++
}
}
for i := count; i < len(nums); i++ {
nums[i] = 0
}
}
// @lc code=end
|
package complex
import (
"errors"
"log"
"os"
"time"
flutter "github.com/go-flutter-desktop/go-flutter"
"github.com/go-flutter-desktop/go-flutter/plugin"
)
// Example demonstrates how to call a platform-specific API to retrieve
// a complex data structure
type Example struct {
channel *plugin.MethodChannel
}
var _ flutter.Plugin = &Example{}
// InitPlugin creates a MethodChannel and set a HandleFunc to the
// shared 'getData' method.
func (p *Example) InitPlugin(messenger plugin.BinaryMessenger) error {
p.channel = plugin.NewMethodChannel(messenger, "instance.id/go/data", plugin.StandardMethodCodec{})
p.channel.HandleFunc("getData", getRemotesFunc)
p.channel.HandleFunc("mutualCall", p.mutualCall)
p.channel.HandleFunc("getError", getErrorFunc)
p.channel.CatchAllHandleFunc(catchAllTest)
return nil
}
// mutualCall is attached to the plugin struct
func (p *Example) mutualCall(arguments interface{}) (reply interface{}, err error) {
go func() {
time.Sleep(3 * time.Second)
if rep, err := p.channel.InvokeMethodWithReply("InvokeMethodWithReply", "text_from_golang"); err != nil {
log.Println("InvokeMethod error:", err)
} else {
if rep.(string) != "text_from_dart" {
log.Println("InvokeMethod error: rep.(string) != \"text_from_dart\"")
os.Exit(1)
}
}
}()
if arguments.(string) != "hello_from_dart" {
log.Println("InvokeMethod error: arguments.(string) != \"hello_from_dart\"")
os.Exit(1)
}
return "hello_from_go", nil
}
func catchAllTest(methodCall interface{}) (reply interface{}, err error) {
method := methodCall.(plugin.MethodCall)
// return the randomized Method Name
return method.Method, nil
}
func getRemotesFunc(arguments interface{}) (reply interface{}, err error) {
dartMsg := arguments.(string) // reading the string argument
if dartMsg != "HelloFromDart" {
return nil, errors.New("wrong message value, expecting 'HelloFromDart' got '" + dartMsg + "'")
}
var sectionList = make([]interface{}, 4)
sectionList[0] = map[interface{}]interface{}{
"instanceid": int32(1023),
"pcbackup": "test",
"brbackup": "test2",
}
sectionList[1] = map[interface{}]interface{}{
"instanceid": int32(1024),
"pcbackup": "test",
"brbackup": "test2",
}
sectionList[2] = map[interface{}]interface{}{
"instanceid": int32(1056),
"pcbackup": "coucou",
"brbackup": "coucou2",
}
sectionList[3] = map[interface{}]interface{}{
"instanceid": int32(3322),
"pcbackup": "finaly",
"brbackup": "finaly2",
}
return sectionList, nil
}
func getErrorFunc(arguments interface{}) (reply interface{}, err error) {
return nil, plugin.NewError("customErrorCode", errors.New("Some error"))
}
|
// Each new term in the Fibonacci sequence is generated by adding the previous
// two terms. By starting with 1 and 2, the first 10 terms will be:
//
// 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ...
//
// By considering the terms in the Fibonacci sequence whose values do not
// exceed four million, find the sum of the even-valued terms.
package main
import "fmt"
func main() {
total := 0
limit := 4000000
prev2 := 1
prev := 1
term := 2
for term <= limit {
if term % 2 == 0 {
total += term
}
prev2 = prev
prev = term
term = prev + prev2
}
fmt.Println(total)
}
|
// Showcase the `binding` feature from gin. based on:
// https://github.com/gin-gonic/gin#model-binding-and-validation
//
// Uage examples:
//
// 1. curl --data '{"user":"andi", "pass":"123", "pin":""}' -X POST 'localhost:8080/login'
//
// results: {"status":"you are logged in"}
//
// 2. curl --data '{"user":"carl", "pin":""}' -X POST 'localhost:8080/login'
//
// results: {"status":"unauthorized"}
//
// 3. curl --data '{"pass":"123"}' -X POST 'localhost:8080/login'
//
// results: {"error":"Key: 'Login.User' Error:Field validation for 'User' failed on the 'required' tag"}
//
// Binding seems to be even more capable. See:
// https://blog.logrocket.com/gin-binding-in-go-a-tutorial-with-examples/
//
// `binding:"required"` fails when you give it zero-values, e.g. 0, "" or false.
// If you want to allow for zero-values, use a pointer type.
//
// Generally, consider to use the package `Validator` as opposed to Gin's own validation:
// https://github.com/go-playground/validator/blob/master/_examples/simple/main.go
// `Validator`` It seems much more capable.
package main
import (
"net/http"
"github.com/gin-gonic/gin"
)
// Binding from JSON
type Login struct {
// binding:"required" will also break when the value is a zero-value `""`
User string `json:"user" binding:"required"`
// binding:"-" seems to be like not writing it at all
// see: https://github.com/gin-gonic/gin#:~:text=the%20%27required%27%20tag%22%7D-,Skip%20validate,-When%20running%20the
Password string `json:"pass" binding:"-"`
// binding:"required" on a pointer allows for the value to be zero-value `""`
Pin *string `json:"pin" binding:"required"`
}
func main() {
router := gin.Default()
router.POST("/login", func(c *gin.Context) {
var json Login
if err := c.ShouldBindJSON(&json); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
if json.User != "andi" || json.Password != "123" {
c.JSON(http.StatusUnauthorized, gin.H{"status": "unauthorized"})
return
}
c.JSON(http.StatusOK, gin.H{"status": "you are logged in"})
})
router.Run(":8080")
}
|
package gha
import (
"bufio"
"fmt"
"io/ioutil"
"os"
"syscall"
"golang.org/x/crypto/ssh/terminal"
)
func fileExist(fname string) bool {
_, err := os.Stat(fname)
return err == nil
}
// CLI gets Psersonal access token of GitHub.
// username and password are got from STDIN
// And save key to the file.
// If you have the key already at the file, CLI returns this.
func CLI(fname string, r *Request) (string, error) {
if fileExist(fname) {
b, err := ioutil.ReadFile(fname)
return string(b), err
}
user, pass, err := getUserInfo()
if err != nil {
return "", err
}
key, err := Auth(user, pass, r)
if err != nil {
return "", err
}
if err := ioutil.WriteFile(fname, []byte(key), 0600); err != nil {
return "", err
}
return key, nil
}
func getUserInfo() (string, string, error) {
user, err := readusername()
if err != nil {
return "", "", err
}
pass, err := readPassword(user)
if err != nil {
return "", "", err
}
return user, pass, nil
}
func readusername() (string, error) {
tty, err := os.Open("/dev/tty")
if err != nil {
return "", err
}
defer tty.Close()
fmt.Print("username: ")
sc := bufio.NewScanner(tty)
sc.Split(bufio.ScanLines)
sc.Scan()
return sc.Text(), nil
}
func readPassword(user string) (string, error) {
fmt.Printf("password for %s (never stored): ", user)
res := make([]byte, 0)
for {
v, err := readCharAsPassword()
if err != nil {
return "", err
}
if (v == 127 || v == 8) && len(res) > 0 {
res = res[:len(res)-1]
os.Stdout.Write([]byte("\b \b"))
}
if v == 13 || v == 10 {
fmt.Println()
return string(res), nil
}
// C-c or C-d
if v == 3 || v == 4 {
return "", fmt.Errorf("Exited by user")
}
if v != 0 {
res = append(res, v)
}
}
}
func readCharAsPassword() (byte, error) {
tty, err := os.Open("/dev/tty")
if err != nil {
return 0, nil
}
defer tty.Close()
fd := int(tty.Fd())
if oldState, err := terminal.MakeRaw(fd); err != nil {
return 0, nil
} else {
defer terminal.Restore(fd, oldState)
}
var buf [1]byte
if n, err := syscall.Read(fd, buf[:]); n == 0 || err != nil {
return 0, err
}
return buf[0], nil
}
|
package routers
import (
"github.com/astaxie/beego"
"webserver/controllers/account"
"webserver/controllers/auth"
"webserver/controllers/chat"
"webserver/controllers/forum"
"webserver/controllers/home"
"webserver/controllers/message"
//"webserver/controllers/phonecall"
"webserver/controllers/notify"
//"webserver/controllers/service"
//"webserver/controllers/commonservice"
"webserver/controllers/statistic"
"webserver/controllers/thirdpay"
"webserver/controllers/user"
"webserver/controllers/utils"
)
func init() {
beego.Router("/user/register", &user.RegisterController{})
beego.Router("/user/login", &user.LoginController{})
beego.Router("/user/edit", &user.EditController{})
beego.Router("/user/profile", &user.InfoController{})
beego.Router("/user/reset", &user.ResetController{})
beego.Router("/user/modify", &user.ModifyController{})
beego.Router("/user/cellphone", &user.ChangeController{})
beego.Router("/user/logout", &user.LogoutController{})
//beego.Router("/user/avatar", &user.AvatarController{})
//beego.Router("/user/icon", &user.IconController{})
//beego.Router("/user/avatar/remove", &user.AvatarRemoveController{})
beego.Router("/user/comment", &user.AddUserCommentController{})
beego.Router("/user/commentlist", &user.UserCommentListController{})
beego.Router("/user/position", &user.PositionController{})
beego.Router("/user/ststoken", &user.OssStsTokenController{})
beego.Router("/user/black", &user.BlackCellphoneController{})
beego.Router("/chat/user/token", &chat.GetTokenController{})
beego.Router("/chat/user/query", &chat.QuerysController{})
beego.Router("/chat/user/unseen", &chat.UnseenController{})
beego.Router("/chat/user/filter", &chat.UserFilterController{})
beego.Router("/chat/user/operate", &chat.OperateController{})
beego.Router("/chat/user/info", &chat.ChatInfoController{})
beego.Router("/chat/group/info", &chat.GroupInfoController{})
beego.Router("/topic/article/add", &forum.AddTopicController{})
beego.Router("/topic/comment/add", &forum.AddCommentController{})
beego.Router("/topic/comment/detail", &forum.CommentDetailController{})
beego.Router("/topic/article/list", &forum.TopicListController{})
beego.Router("/topic/article/mylist", &forum.MyListController{})
beego.Router("/topic/article/query", &forum.QueryListController{})
//beego.Router("/topic/detail/:id", &forum.DetailController{})
beego.Router("/topic/comment/list", &forum.CommentListController{})
beego.Router("/topic/comment/me", &forum.CommentMeController{})
beego.Router("/topic/handle", &forum.HandlerController{})
beego.Router("/topic/article/detail", &forum.TopicDetailController{})
beego.Router("/sms/verify", &utils.SmsVerifyController{})
beego.Router("/sms/check", &utils.SmsCheckController{})
//beego.Router("/utils/image", &utils.UploadFileController{})
beego.Router("/utils/feedback", &utils.AddFeedBackController{})
beego.Router("/utils/banner", &utils.HomeImageController{})
beego.Router("/utils/bannerinfo", &utils.BannerController{})
beego.Router("/utils/share", &utils.ShareController{})
beego.Router("/utils/appinfo", &utils.AppInfoController{})
beego.Router("/utils/clientlog", &utils.ClientLogController{})
beego.Router("/utils/paper/*", &utils.PaperController{})
//beego.Router("/commonservice/handle", &commonservice.HandleServiceController{})
//beego.Router("/commonservice/list", &commonservice.GetServiceListController{})
//beego.Router("/commonservice/add", &commonservice.AddServiceListController{})
//beego.Router("/commonservice/order", &commonservice.OrderListController{})
//beego.Router("/commonservice/publish", &commonservice.PublishListController{})
//beego.Router("/service/home", &service.GetHomeController{})
//beego.Router("/service/handle", &service.HandleServiceController{})
//beego.Router("/service/list", &service.GetServiceListController{})
//beego.Router("/service/add", &service.AddServiceListController{})
//beego.Router("/service/order", &service.OrderListController{})
//beego.Router("/service/detail", &service.GetServiceItemController{})
//beego.Router("/service/publish", &service.PublishListController{})
beego.Router("/account/recharge", &account.RechargeController{})
beego.Router("/account/withdraw", &account.WithdrawController{})
beego.Router("/account/order/add", &account.AddOrderController{})
beego.Router("/account/order/handle", &account.HandleOrderController{})
beego.Router("/account/info", &account.InfoController{})
beego.Router("/account/withdraw/list", &account.WithdrawListController{})
beego.Router("/account/order/pay", &account.OrderPayController{})
beego.Router("/account/order/extra", &account.OderExtraController{})
beego.Router("/account/recharge/ensure", &account.RechargeEnsureController{})
beego.Router("/account/tradelist", &account.TradeListController{})
//beego.Router("/account/deposit/list", &account.DepositListController{})
beego.Router("/auth/authenticate", &auth.AuthenticateController{})
beego.Router("/auth/select", &auth.SelectController{})
beego.Router("/auth/authenticate/add", &auth.AddAuthenticateController{})
beego.Router("/auth/query", &auth.QueryController{})
beego.Router("/message/list", &message.MessageListController{})
beego.Router("/message/read", &message.MessageHandleController{})
//beego.Router("/phonecall/extension", &phonecall.GetExtensionNumber{})
//beego.Router("/phonecall/getnumber", &phonecall.TianRunController{})
beego.Router("/thirdpay/wepay", &thirdpay.WepayController{})
beego.Router("/thirdpay/alipay", &thirdpay.AliPayController{})
beego.Router("/notify/test", ¬ify.TestNotifyController{})
beego.Router("/notify/rongyun", ¬ify.TestRongyunController{})
beego.Router("/statistic/record", &statistic.ClickStatisController{})
beego.Router("/*", &home.HomeController{})
}
|
package crcind
import (
"github.com/jgolang/config"
"github.com/jgolang/log"
"github.com/jhuygens/searcher-engine"
)
var crcindSearcher = Searcher{}
func init() {
name := config.GetString("searchers.crcind")
err := searcher.RegisterSearcher(name, crcindSearcher)
if err != nil {
log.Fatal(err)
return
}
log.Infof("Searcher %v has been register", name)
}
|
package majiangserver
import (
"logger"
//"sort"
)
type MaJiangPattern struct {
id int32
ptype int32
cType int32
cards []*MaJiangCard
isShowPattern bool
}
//新建一个模式
func NewPattern(ptype int32, cards []*MaJiangCard, isShowPattern bool) *MaJiangPattern {
if cards == nil || len(cards) <= 0 {
logger.Error("NewPattern:selfs is nil.")
return nil
}
o := new(MaJiangPattern)
o.ptype = ptype
o.isShowPattern = isShowPattern
o.Init(cards)
return o
}
//初始化一个模式
func (self *MaJiangPattern) Init(cards []*MaJiangCard) {
// if self.ptype == PTSZ {
// sort.Sort(CardList(cards))
// }
self.cards = cards
self.checkCardType()
self.calcID()
}
//计算模式的ID
func (self *MaJiangPattern) calcID() {
var id int32 = 0
for _, v := range self.cards {
id += v.value
}
self.id = self.ptype*1000 + self.cType*100 + id
}
//检查牌的花色
func (self *MaJiangPattern) checkCardType() {
if self.cards == nil || len(self.cards) <= 0 {
logger.Error("checkBigCard:using before must init.")
}
self.cType, _ = self.cards[0].CurValue()
for _, v := range self.cards {
cType, _ := v.CurValue()
if cType != self.cType {
self.cType = UnknowCardType
break
}
}
}
//是否相等
func (self *MaJiangPattern) IsEqual(p *MaJiangPattern) bool {
if p == nil {
return false
}
if self.ptype != p.ptype {
return false
}
if self.cType != p.cType {
return false
}
nSelfCards, hzSelfCards := SplitCards(self.cards)
nPCards, hzPCards := SplitCards(p.cards)
if len(nSelfCards) != len(nPCards) || len(hzSelfCards) != len(hzPCards) {
return false
}
temp := make([]*MaJiangCard, len(nSelfCards))
copy(temp, nSelfCards)
for _, c := range nPCards {
removedSuccess := true
removedSuccess, temp = RemoveCardByType(temp, c.cType, c.value)
if !removedSuccess {
return false
}
}
if len(temp) > 0 {
return false
}
return true
}
//是否是全红中
func (self *MaJiangPattern) IsAllHZ() bool {
for _, c := range self.cards {
if !c.IsHongZhong() {
return false
}
}
return true
}
|
package ewkb
import (
"testing"
"github.com/paulmach/orb"
"github.com/paulmach/orb/encoding/internal/wkbcommon"
)
func TestLineString(t *testing.T) {
large := orb.LineString{}
for i := 0; i < wkbcommon.MaxPointsAlloc+100; i++ {
large = append(large, orb.Point{float64(i), float64(-i)})
}
cases := []struct {
name string
data []byte
srid int
expected orb.LineString
}{
{
name: "large line string",
data: MustMarshal(large, 4326),
srid: 4326,
expected: large,
},
{
name: "line string",
data: MustDecodeHex("0102000020E610000002000000CDCCCCCCCC0C5FC00000000000004540713D0AD7A3005EC01F85EB51B8FE4440"),
srid: 4326,
expected: orb.LineString{{-124.2, 42}, {-120.01, 41.99}},
},
{
name: "another line string",
data: MustDecodeHex("0020000002000010e6000000023ff0000000000000400000000000000040080000000000004010000000000000"),
srid: 4326,
expected: orb.LineString{{1, 2}, {3, 4}},
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
compare(t, tc.expected, tc.srid, tc.data)
})
}
}
func TestMultiLineString(t *testing.T) {
large := orb.MultiLineString{}
for i := 0; i < wkbcommon.MaxMultiAlloc+100; i++ {
large = append(large, orb.LineString{})
}
cases := []struct {
name string
data []byte
srid int
expected orb.MultiLineString
}{
{
name: "large",
srid: 4326,
data: MustMarshal(large, 4326),
expected: large,
},
{
name: "one string",
data: MustDecodeHex("0105000020e610000001000000010200000002000000000000000000f03f000000000000004000000000000008400000000000001040"),
srid: 4326,
expected: orb.MultiLineString{{{1, 2}, {3, 4}}},
},
{
name: "two strings",
data: MustDecodeHex("0020000005000010e6000000020000000002000000023ff000000000000040000000000000004008000000000000401000000000000000000000020000000240140000000000004018000000000000401c0000000000004020000000000000"),
srid: 4326,
expected: orb.MultiLineString{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}},
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
compare(t, tc.expected, tc.srid, tc.data)
})
}
}
|
package sandstormhttpbridge
import (
"context"
"errors"
"net"
"os"
"time"
bridge "zenhack.net/go/tempest/capnp/sandstorm-http-bridge"
"capnproto.org/go/capnp/v3"
"capnproto.org/go/capnp/v3/rpc"
)
// Connect to the API socket, using exponential backoff to wait for the bridge to
// start listening.
//
// TODO: it would be nice if the bridge would set this up in a way such that we could
// assume the socket is already there on start; see about sending a patch upstream.
func connectSocket() (net.Conn, error) {
conn, err := tryConnectSocket()
delay := time.Second / 100
for delay < time.Second && errors.Is(err, os.ErrNotExist) {
time.Sleep(delay)
conn, err = tryConnectSocket()
delay *= 2
}
return conn, err
}
func tryConnectSocket() (net.Conn, error) {
return net.Dial("unix", "/tmp/sandstorm-api")
}
func connectBridge(ctx context.Context, hooks capnp.Client) (ret bridge.SandstormHttpBridge, err error) {
conn, err := connectSocket()
if err != nil {
return
}
var options *rpc.Options
if (hooks != capnp.Client{}) {
options = &rpc.Options{
BootstrapClient: hooks,
}
}
transport := rpc.NewStreamTransport(conn)
ret = bridge.SandstormHttpBridge(rpc.NewConn(transport, options).Bootstrap(ctx))
return
}
func ConnectWithHooks(ctx context.Context, hooks bridge.AppHooks) (bridge.SandstormHttpBridge, error) {
return connectBridge(ctx, capnp.Client(hooks))
}
func Connect(ctx context.Context) (bridge.SandstormHttpBridge, error) {
return connectBridge(ctx, capnp.Client{})
}
|
package routers
import (
"opscenter/controllers"
"github.com/astaxie/beego"
)
func init() {
beego.Router("/", &controllers.IndexController{})
beego.Router("/regist",&controllers.RegistController{})
beego.Router("/login",&controllers.LoginController{})
beego.Router("/logout",&controllers.LogoutUserController{})
beego.Router("/home",&controllers.HomeController{})
beego.Router("/show/loading",&controllers.ShowloadingController{})
}
|
package sync
import (
"fmt"
"os/exec"
"strings"
"time"
log "github.com/sirupsen/logrus"
)
// StartRsync path to target rsync server on given interval
func StartRsync(done <-chan struct{}, host string, port int, syncs []Sync, interval time.Duration) {
go func() {
for {
select {
case <-time.After(interval):
for _, sync := range syncs {
err := executeRsync(sync.Source, fmt.Sprintf("rsync://%s:%d/%s/", host, port, strings.Replace(sync.Destination, "/", "_", -1)))
if err != nil {
log.Errorf("Error while running rsync: %s", err)
}
}
case <-done:
return
}
}
}()
}
func executeRsync(sourceDir, destination string) error {
cmd := exec.Command("/usr/bin/rsync", "--recursive", "--times", "--links", "--devices", "--specials", "--compress", sourceDir, destination)
// TODO: How to display sync completed /failed? Cannot print terminal because messing out terminal attachment. Desktop notification?
return cmd.Run()
}
|
package devices
import (
"math/rand"
"time"
)
func randomizeCollection() time.Duration {
min := 0
max := 300
rand.Seed(time.Now().UTC().UnixNano())
i := rand.Intn(max - min) + min
return time.Duration(int64(i))
} |
package session
import (
"github.com/garyburd/redigo/redis"
"sync"
)
type RedisSession struct {
pool *redis.Pool
rwlock sync.RWMutex
}
func NewRedisSession(id string) *RedisSession {
return &RedisSession{}
}
func (r *RedisSession) Set(key string, value interface{}) (err error) {
return
}
func (r *RedisSession) Get(key string) (value interface{}, err error) {
return
}
func (r *RedisSession) Del(key string) (err error) {
return
}
func (r *RedisSession) Save() (err error) {
return
}
|
/*
Copyright 2021 The Skaffold Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubectl
import (
"bytes"
"context"
"path/filepath"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/graph"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/render"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/schema/latest"
"github.com/GoogleContainerTools/skaffold/v2/testutil"
)
const (
// Raw manifests
podYaml = `apiVersion: v1
kind: Pod
metadata:
name: leeroy-web
spec:
containers:
- image: leeroy-web
name: leeroy-web`
// manifests with image tags and label
labeledPodYaml = `apiVersion: v1
kind: Pod
metadata:
labels:
run.id: test
name: leeroy-web
spec:
containers:
- image: leeroy-web:v1
name: leeroy-web`
// manifests with image tags
taggedPodYaml = `apiVersion: v1
kind: Pod
metadata:
name: leeroy-web
spec:
containers:
- image: leeroy-web:v1
name: leeroy-web`
podWithNamespaceYaml = `apiVersion: v1
kind: Pod
metadata:
name: leeroy-web
namespace: mynamespace
spec:
containers:
- image: leeroy-web:v1
name: leeroy-web`
)
func TestRender(t *testing.T) {
tests := []struct {
description string
renderConfig latest.RenderConfig
labels map[string]string
expected string
cmpOptions cmp.Options
namespaceFlag string
}{
{
description: "single manifest with no labels",
renderConfig: latest.RenderConfig{
Generate: latest.Generate{RawK8s: []string{"pod.yaml"}},
},
expected: taggedPodYaml,
cmpOptions: []cmp.Option{testutil.YamlObj(t)},
},
{
description: "single manifest with labels",
renderConfig: latest.RenderConfig{
Generate: latest.Generate{RawK8s: []string{"pod.yaml"}},
},
labels: map[string]string{"run.id": "test"},
expected: labeledPodYaml,
cmpOptions: []cmp.Option{testutil.YamlObj(t)},
},
{
description: "single manifest with namespace flag",
namespaceFlag: "mynamespace",
renderConfig: latest.RenderConfig{
Generate: latest.Generate{RawK8s: []string{"pod.yaml"}},
},
expected: podWithNamespaceYaml,
cmpOptions: []cmp.Option{testutil.YamlObj(t)},
},
}
for _, test := range tests {
testutil.Run(t, test.description, func(t *testutil.T) {
tmpDirObj := t.NewTempDir()
tmpDirObj.Write("pod.yaml", podYaml).
Touch("empty.ignored").
Chdir()
mockCfg := render.MockConfig{
WorkingDir: tmpDirObj.Root(),
}
injectNs := test.namespaceFlag != ""
r, err := New(mockCfg, test.renderConfig, test.labels, "default", test.namespaceFlag, nil, injectNs)
t.CheckNoError(err)
var b bytes.Buffer
manifestList, errR := r.Render(context.Background(), &b, []graph.Artifact{{ImageName: "leeroy-web", Tag: "leeroy-web:v1"}},
false)
t.CheckNoError(errR)
t.CheckDeepEqual(test.expected, manifestList.String(), test.cmpOptions)
})
}
}
func TestDependencies(t *testing.T) {
tests := []struct {
description string
manifests []string
expected []string
}{
{
description: "no manifest",
manifests: []string(nil),
expected: []string(nil),
},
{
description: "missing manifest file",
manifests: []string{"missing.yaml"},
expected: []string(nil),
},
{
description: "ignore non-manifest",
manifests: []string{"*.ignored"},
expected: []string(nil),
},
{
description: "single manifest",
manifests: []string{"deployment.yaml"},
expected: []string{"deployment.yaml"},
},
{
description: "keep manifests order",
manifests: []string{"01_name.yaml", "00_service.yaml"},
expected: []string{"01_name.yaml", "00_service.yaml"},
},
{
description: "sort children",
manifests: []string{"01/*.yaml", "00/*.yaml"},
expected: []string{filepath.Join("01", "a.yaml"), filepath.Join("01", "b.yaml"), filepath.Join("00", "a.yaml"), filepath.Join("00", "b.yaml")},
},
{
description: "http manifest",
manifests: []string{"deployment.yaml", "http://remote.yaml"},
expected: []string{"deployment.yaml"},
},
}
for _, test := range tests {
testutil.Run(t, test.description, func(t *testutil.T) {
tmpDir := t.NewTempDir()
tmpDir.Touch("deployment.yaml", "01_name.yaml", "00_service.yaml", "empty.ignored").
Touch("01/a.yaml", "01/b.yaml").
Touch("00/b.yaml", "00/a.yaml").
Chdir()
mockCfg := render.MockConfig{WorkingDir: tmpDir.Root()}
rCfg := latest.RenderConfig{
Generate: latest.Generate{RawK8s: test.manifests},
}
r, err := New(mockCfg, rCfg, map[string]string{}, "default", "", nil, false)
t.CheckNoError(err)
dependencies, err := r.ManifestDeps()
t.CheckNoError(err)
if len(dependencies) == 0 {
t.CheckDeepEqual(test.expected, dependencies)
} else {
expected := make([]string, len(test.expected))
for i, p := range test.expected {
expected[i] = filepath.Join(tmpDir.Root(), p)
}
t.CheckDeepEqual(expected, dependencies)
}
})
}
}
|
package main
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"strings"
ctx "golang.org/x/net/context"
"golang.org/x/oauth2/clientcredentials"
)
// UploadConfig represents the basic configuration necessary to connect
// to the database.
type UploadConfig struct {
SiteURL *url.URL
ClientID string
ClientSecret string
TokenURI string
TokenRefreshURI string
UserAgent string
Ctx ctx.Context
}
// ConfigValid returns if the required fields for the upload configuration
// have been populated.
func (up *UploadConfig) ConfigValid() bool {
rtnVal := false
if up != nil &&
len(up.UserAgent) > 0 {
return true
}
return rtnVal
}
// DefaultUploadConfig returns a new Config instance with defaults populated
// The default configuration is:
//
// * EndpointURI: ""
func DefaultUploadConfig() UploadConfig {
var defaultConfig = UploadConfig{
UserAgent: "datasipper-poc@0.0.1",
}
c, _ := ctx.WithCancel(ctx.Background())
defaultConfig.Ctx = c
return defaultConfig
}
// UploadResults uploads a set of rows represented as a JSON array to a
// REST API endpoint
//
// params -keys: rows
func (up *UploadConfig) UploadResults(rows *[]interface{}) error {
_, err := up.apiRequest("POST", rows)
return err
}
// Private function to execute API requests
func (up *UploadConfig) apiRequest(method string, params *[]interface{}) (map[string]interface{}, error) {
var httpClient *http.Client
//Check if oAuth authentication should be used
if len(up.ClientID) > 0 && len(up.ClientSecret) > 0 && len(up.TokenURI) > 0 {
// Define an oauth configuration to connect to API
oauth := &clientcredentials.Config{
ClientID: up.ClientID,
ClientSecret: up.ClientSecret,
TokenURL: up.SiteURL.Scheme + "://" + up.SiteURL.Host + up.TokenURI,
}
// Use base oauth configuration to build an HTTP client which will automatically manage
// requesting tokens and including tokens in request headers
httpClient = oauth.Client(up.Ctx)
} else {
httpClient = http.DefaultClient
}
// Build the API request based on the request method given
var req *http.Request
var err error
if method == "POST" || method == "PUT" {
// Marshal the map object into JSON format expressed as []byte
ba, err := json.Marshal(params)
if err != nil {
return nil, err
}
// Build the POST/PUT request with the body streamed via a bytes.Buffer
req, err = http.NewRequest(method, up.SiteURL.String(), bytes.NewBuffer(ba))
if err != nil {
return nil, err
}
} else {
/**** FUTURE USE: Support GET requests ****/
// Build all other requests (currently only GET) using a query string and no request body
/*req, err = http.NewRequest(method, up.SiteURL+up.EndpointURI+formatQueryString(params), nil)
if err != nil {
return nil, err
}*/
}
// Add the appropriate request headers
req.Header.Add("Content-Type", "application/json")
req.Header.Add("User-Agent", up.UserAgent)
//fmt.Println("Request: ", req)
// Execute request to API using http.Client with OAuth transport configured
resp, err := httpClient.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
// Check that the API request was successful
if !strings.Contains(resp.Status, "200") {
return nil, errors.New(resp.Status)
}
// Read the entire JSON response into a []byte
body, _ := ioutil.ReadAll(resp.Body)
// Unmarshal the JSON response into a map object
var rtnVal map[string]interface{}
if method != "POST" {
if err := json.Unmarshal(body, &rtnVal); err != nil {
return nil, err
}
}
return rtnVal, nil
}
// formatQueryString converts a set of key/value pairs into
// a query string format that can be appended to URLs
func formatQueryString(params *map[string]interface{}) string {
var rtnStr = "?"
if params != nil {
for key, value := range *params {
rtnStr += fmt.Sprintf("%s=%s&", key, value)
}
}
// Remove the very last character which is either a '&' or a '?'
rtnStr = rtnStr[:len(rtnStr)-1]
return rtnStr
}
|
package nopaste
import (
"log"
"net/http"
)
const MsgrRoot = "/irc-msgr"
func RunMsgr(configFile string) error {
var err error
config, err = LoadConfig(configFile)
if err != nil {
return err
}
var chs []MessageChan
if config.IRC != nil {
ircCh := make(IRCMessageChan, MsgBufferLen)
chs = append(chs, ircCh)
go RunIRCAgent(config, ircCh)
}
if config.Slack != nil {
slackCh := make(SlackMessageChan, MsgBufferLen)
chs = append(chs, slackCh)
go RunSlackAgent(config, slackCh)
}
http.HandleFunc(MsgrRoot+"/post", func(w http.ResponseWriter, req *http.Request) {
msgrPostHandler(w, req, chs)
})
log.Fatal(http.ListenAndServe(config.Listen, nil))
return nil
}
func msgrPostHandler(w http.ResponseWriter, req *http.Request, chs []MessageChan) {
channel := req.FormValue("channel")
msg := req.FormValue("msg")
if channel == "" || msg == "" || req.Method != "POST" {
code := http.StatusBadRequest
http.Error(w, http.StatusText(code), code)
return
}
for _, ch := range chs {
ch.PostMsgr(req)
}
w.WriteHeader(http.StatusCreated)
w.Write([]byte{})
}
|
package cli
import (
"fmt"
"os"
"path"
"github.com/bitrise-io/bitrise-plugins-analytics/configs"
"github.com/bitrise-io/bitrise-plugins-analytics/version"
bitriseConfigs "github.com/bitrise-io/bitrise/configs"
"github.com/bitrise-io/bitrise/plugins"
log "github.com/bitrise-io/go-utils/log"
"github.com/urfave/cli"
)
var commands = []cli.Command{
createSwitchCommand(true),
createSwitchCommand(false),
}
var flags = []cli.Flag{
cli.StringFlag{
Name: "loglevel, l",
Usage: "Log level (options: debug, info, warn, error, fatal, panic).",
EnvVar: "LOGLEVEL",
},
}
func before(c *cli.Context) error {
configs.DataDir = os.Getenv(plugins.PluginInputDataDirKey)
configs.IsCIMode = (os.Getenv(bitriseConfigs.CIModeEnvKey) == "true")
return nil
}
func printVersion(c *cli.Context) {
fmt.Println(c.App.Version)
}
func action(c *cli.Context) {
if os.Getenv(plugins.PluginInputPluginModeKey) == string(plugins.TriggerMode) {
sendAnalytics()
return
}
if err := cli.ShowAppHelp(c); err != nil {
log.Errorf("Failed to show help, error: %s", err)
os.Exit(1)
}
}
func createApp() *cli.App {
app := cli.NewApp()
app.Name = path.Base(os.Args[0])
app.Usage = "Bitrise Analytics plugin"
app.Version = version.VERSION
app.Author = ""
app.Email = ""
app.Before = before
app.Flags = flags
app.Commands = commands
app.Action = action
return app
}
// Run ...
func Run() {
cli.VersionPrinter = printVersion
if err := createApp().Run(os.Args); err != nil {
log.Errorf("Finished with Error: %s", err)
os.Exit(1)
}
}
|
package model
// WordCount はまとめ記事へのワードの出現回数を扱うための構造体
type WordCount struct {
Word string
Count int
}
|
package xmppim
import (
"encoding/xml"
"github.com/rez-go/xmpplib/xmppcore"
)
const ClientPresenceElementName = xmppcore.JabberClientNS + " presence"
// RFC 6121 2.2.1 and 4.7.1
const (
PresenceTypeUnavailable = "unavailable"
PresenceTypeError = "error"
PresenceTypeProbe = "probe"
PresenceTypeSubscribe = "subscribe"
PresenceTypeSubscribed = "subscribed"
PresenceTypeUnsubscribe = "unsubscribe"
PresenceTypeUnsubscribed = "unsubscribed"
)
const (
ShowValueAway = "away"
ShowValueChat = "chat"
ShowValueDND = "dnd"
ShowValueXA = "xa"
)
// RFC 6121 4.7.
type ClientPresence struct {
XMLName xml.Name `xml:"jabber:client presence"`
ID string `xml:"id,attr,omitempty"`
Type string `xml:"type,attr,omitempty"`
From string `xml:"from,attr,omitempty"`
To string `xml:"to,attr,omitempty"`
Error *xmppcore.StanzaError `xml:",omitempty"`
Show *ClientShow `xml:"show,omitempty"`
Status []ClientStatus `xml:"status,omitempty"`
Priority *ClientPriority `xml:"priority,omitempty"`
//TODO: 4.7.2.
CapsC *CapsC `xml:",omitempty"`
//TODO: X
}
type ClientShow struct {
XMLName xml.Name `xml:"jabber:client show"`
Value string `xml:",chardata"`
}
type ClientStatus struct {
XMLName xml.Name `xml:"jabber:client status"`
Value string `xml:",chardata"`
Lang string `xml:"lang,attr,omitempty"`
}
type ClientPriority struct {
XMLName xml.Name `xml:"jabber:client priority"`
Value int32 `xml:",chardata"`
}
|
package redis
import (
"github.com/go-redis/redis"
"github.com/ypyf/salmon/store"
)
type RedisStore struct {
engine *redis.Client
}
func New() store.Store {
// redis.Client代表连接池,它是Goroutine安全的
// 多个goroutine对redis.Clien的并发使用是安全的
return &RedisStore{engine: redis.NewClient(&redis.Options{
Addr: "10.10.110.191:6379",
Password: "",
DB: 0, // use default DB
})}
}
func (store *RedisStore) Get(key string) (string, error) {
return store.engine.Get("opsbot:" + key).Result()
}
func (store *RedisStore) Set(key, value string) {
store.engine.Set("opsbot:"+key, value, 0)
}
func (store *RedisStore) Delete(key string) {
store.engine.Del("opsbot:" + key)
}
func (store *RedisStore) Close() {
store.engine.Close()
}
|
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
package remoteconfig
import (
"testing"
corev1 "k8s.io/api/core/v1"
apicommon "github.com/DataDog/datadog-operator/apis/datadoghq/common"
apicommonv1 "github.com/DataDog/datadog-operator/apis/datadoghq/common/v1"
"github.com/DataDog/datadog-operator/apis/datadoghq/v2alpha1"
apiutils "github.com/DataDog/datadog-operator/apis/utils"
"github.com/DataDog/datadog-operator/controllers/datadogagent/feature"
"github.com/DataDog/datadog-operator/controllers/datadogagent/feature/fake"
"github.com/DataDog/datadog-operator/controllers/datadogagent/feature/test"
"github.com/google/go-cmp/cmp"
"github.com/stretchr/testify/assert"
)
func Test_rcFeature_Configure(t *testing.T) {
ddav2RCDisabled := v2alpha1.DatadogAgent{
Spec: v2alpha1.DatadogAgentSpec{
Features: &v2alpha1.DatadogFeatures{
RemoteConfiguration: &v2alpha1.RemoteConfigurationFeatureConfig{
Enabled: apiutils.NewBoolPointer(false),
},
},
},
}
ddav2RCEnabled := v2alpha1.DatadogAgent{
Spec: v2alpha1.DatadogAgentSpec{
Features: &v2alpha1.DatadogFeatures{
RemoteConfiguration: &v2alpha1.RemoteConfigurationFeatureConfig{
Enabled: apiutils.NewBoolPointer(true),
},
},
},
}
ddav2RCDefault := v2alpha1.DatadogAgent{
Spec: v2alpha1.DatadogAgentSpec{},
}
tests := test.FeatureTestSuite{
//////////////////////////
// v2Alpha1.DatadogAgent
//////////////////////////
{
Name: "v2alpha1 RC not enabled",
DDAv2: ddav2RCDisabled.DeepCopy(),
WantConfigure: true,
Agent: rcAgentNodeWantFunc("false"),
},
{
Name: "v2alpha1 RC enabled",
DDAv2: ddav2RCEnabled.DeepCopy(),
WantConfigure: true,
Agent: rcAgentNodeWantFunc("true"),
},
{
Name: "v2alpha1 RC default",
DDAv2: ddav2RCDefault.DeepCopy(),
WantConfigure: true,
Agent: rcAgentNodeWantFunc("false"),
},
}
tests.Run(t, buildRCFeature)
}
func rcAgentNodeWantFunc(value string) *test.ComponentTest {
return test.NewDefaultComponentTest().WithWantFunc(
func(t testing.TB, mgrInterface feature.PodTemplateManagers) {
// Check environment variable
mgr := mgrInterface.(*fake.PodTemplateManagers)
coreAgentWant := []*corev1.EnvVar{
{
Name: apicommon.DDRemoteConfigurationEnabled,
Value: value,
},
}
coreAgentEnvVars := mgr.EnvVarMgr.EnvVarsByC[apicommonv1.AllContainers]
assert.True(t, apiutils.IsEqualStruct(coreAgentEnvVars, coreAgentWant), "Core agent env vars \ndiff = %s", cmp.Diff(coreAgentEnvVars, coreAgentWant))
},
)
}
|
type MessageReceipt struct {
exitCode UInt
returnValue Bytes
gasUsed UInt
} // representation tuple
|
package main
import (
"flag"
"fmt"
"os"
pb "github.com/azmodb/exp/azmo/azmopb"
"golang.org/x/net/context"
)
var putCmd = command{
Help: `
Put sets the value for a key. If the key exists and tombstone is true
then its previous versions will be overwritten. Supplied key and
value must remain valid for the life of the database.
It the key exists and the value data type differ, it returns an error.
`,
Short: "sets the value for a key",
Args: "[options] key value",
Run: put,
}
func put(ctx context.Context, d *dialer, args []string) error {
flags := flag.FlagSet{}
flags.Usage = func() {
fmt.Fprintf(os.Stderr, "%s: put [options] key value\n", self)
fmt.Fprintf(os.Stderr, "\nOptions:\n")
flags.PrintDefaults()
os.Exit(2)
}
tombstone := flags.Bool("tombstone", false, "overwrite previous values")
flags.Parse(args)
if flags.NArg() != 2 {
flags.Usage()
}
args = flags.Args()
req := &pb.PutRequest{
Key: []byte(args[0]),
Value: []byte(args[1]),
Tombstone: *tombstone,
}
c := d.dial()
defer c.Close()
ev, err := c.Put(ctx, req)
if err != nil {
return err
}
return encode(ev)
}
|
package service
import (
"github.com/zdnscloud/gorest/resource"
common "github.com/zdnscloud/cluster-agent/commonresource"
)
type InnerService struct {
resource.ResourceBase `json:",inline"`
Name string `json:"name"`
Workloads []*Workload `json:"workloads"`
}
func (s InnerService) GetParents() []resource.ResourceKind {
return []resource.ResourceKind{common.Namespace{}}
}
type Workload struct {
Name string `json:"name"`
Kind string `json:"kind"`
Pods []Pod `json:"pods"`
}
type Pod struct {
Name string `json:"name"`
State string `json:"state"`
}
type OuterService struct {
resource.ResourceBase `json:",inline"`
EntryPoint string `json:"entryPoint"`
Services map[string]InnerService `json:"services"`
}
func (s OuterService) GetParents() []resource.ResourceKind {
return []resource.ResourceKind{common.Namespace{}}
}
type InnerServiceByName []*InnerService
func (a InnerServiceByName) Len() int { return len(a) }
func (a InnerServiceByName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a InnerServiceByName) Less(i, j int) bool { return a[i].Name < a[j].Name }
type OuterServiceByEntryPoint []*OuterService
func (a OuterServiceByEntryPoint) Len() int { return len(a) }
func (a OuterServiceByEntryPoint) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a OuterServiceByEntryPoint) Less(i, j int) bool { return a[i].EntryPoint < a[j].EntryPoint }
|
package main
import "fmt"
/*
Given two arrays of length m and n with digits 0-9 representing two numbers. Create the maximum number of length k <= m + n from digits of the two. The relative order of the digits from the same array must be preserved. Return an array of the k digits.
Note: You should try to optimize your time and space complexity.
Example 1:
Input:
nums1 = [3, 4, 6, 5]
nums2 = [9, 1, 2, 5, 8, 3]
k = 5
Output:
[9, 8, 6, 5, 3]
Example 2:
Input:
nums1 = [6, 7]
nums2 = [6, 0, 4]
k = 5
Output:
[6, 7, 6, 0, 4]
Example 3:
Input:
nums1 = [3, 9]
nums2 = [8, 9]
k = 3
Output:
[9, 8, 9]
*/
func maxNumInArryC(nums []int,k int) []int {
drop := len(nums)-k
ret := make([]int,0)
for i:=0;i<len(nums);i++ {
for drop > 0 && len(ret) > 0 && nums[i]>ret[len(ret)-1] {
ret = ret[:len(ret)-1]
drop -= 1
}
ret = append(ret,nums[i])
}
for drop > 0 && len(ret) > 0 {
ret = ret[:len(ret)-1]
drop -= 1
}
return ret
}
func maxNumInArryC1(nums []int,k int) []int {
if k == 0 {return nil}
if k >= len(nums){return nums}
max := 0
res :=make([]int,0)
ret :=make([]int,0)
maxNumInArry(nums,0,1,k,&max,&res,&ret)
return ret
}
func maxNumInArry(nums []int,pos,sum int,k int,max *int, res *[]int,ret *[]int) {
if k == 0 && sum > *max {
*max = sum
*ret = make([]int,0)
for i:=0;i<len(*res);i++ {
*ret = append(*ret,(*res)[i])
}
return
}
for i:=pos;i<len(nums);i++ {
*res = append(*res,nums[i])
maxNumInArry(nums,i+1,10*sum+nums[i],k-1,max,res,ret)
*res = (*res)[:len(*res)-1]
}
}
func isbiggerdifflen(nums1,nums2 []int)bool {
if len(nums1) <= 0 {return false}
if len(nums2) <= 0 {return true}
if nums1[0] > nums2[0] {
return true
} else if nums1[0]<nums2[0] {
return false
}
return isbiggerdifflen(nums1[1:],nums2[1:])
}
func mergeNum(nums1,nums2 []int) []int {
ret := make([]int,len(nums1)+len(nums2))
i,j,idx := 0,0,0
n1,n2 := nums1,nums2
for len(n1) + len(n2)>0{
if isbiggerdifflen(n1,n2) {
ret[idx] = n1[0]
i+=1
idx+=1
n1 = n1[1:]
} else {
ret[idx] = n2[0]
j+=1
idx+=1
n2 = n2[1:]
}
}
return ret
}
func maxNumber(nums1 []int, nums2 []int, k int) []int {
m,n := len(nums1),len(nums2)
start := 0
if k-n > 0 {
start = k-n
}
end := k
if m < k {
end=m
}
fmt.Println(start,end)
var max []int
for i:=start;i<=end;i++ {
fmt.Println(i,k-i)
x := mergeNum(maxNumInArryC(nums1,i),maxNumInArryC(nums2,k-i))
fmt.Println(x)
if max == nil || isbigger(x,max) {
max=x
}
}
return max
}
func isbigger(x,max []int) bool {
for i:=0;i<len(x);i++ {
if x[i] > max[i] {
return true
} else if x[i] < max[i] {
return false
}
}
return true
}
func main() {
//fmt.Println(maxNumber([]int{3, 4, 6, 5},[]int{9, 1, 2, 5, 8, 3},5))
//fmt.Println(maxNumber([]int{6,7},[]int{6,0,4},5))
//[2,5,6,4,4,0]
//[7,3,8,0,6,5,7,6,2]
//15
//fmt.Println(maxNumber([]int{2,5,6,4,4,0},[]int{7,3,8,0,6,5,7,6,2},15))
//[6,7,5]
//[4,8,1]
fmt.Println(maxNumber([]int{6,7,5},[]int{4,8,1},3))
//fmt.Println(maxNumInArryC([]int{1,2,3},0))
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package wifi
import (
"bytes"
"context"
"net"
"time"
cip "chromiumos/tast/common/network/ip"
"chromiumos/tast/common/shillconst"
"chromiumos/tast/errors"
"chromiumos/tast/remote/bundles/cros/wifi/wifiutil"
"chromiumos/tast/remote/network/ip"
"chromiumos/tast/remote/wificell"
"chromiumos/tast/remote/wificell/dutcfg"
"chromiumos/tast/remote/wificell/hostapd"
"chromiumos/tast/remote/wificell/router/common/support"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: MARSSIDRoam,
Desc: "Tests MAC Address randomization during roam between APs of different SSIDs",
Contacts: []string{
"jck@semihalf.com", // Author.
"chromeos-wifi-champs@google.com", // WiFi oncall rotation; or http://b/new?component=893827
},
Attr: []string{"group:wificell", "wificell_func", "wificell_unstable"},
ServiceDeps: []string{wificell.TFServiceName},
Fixture: "wificellFixt",
})
}
func MARSSIDRoam(ctx context.Context, s *testing.State) {
// Goal of this test is to verify if MAC address gets updated when we move between different SSIDs.
// It's not roaming in the full sense, but relies on the similar mechanics as roaming.
// Steps:
// * Configure AP1/SSID1,
// * Connect to AP1,
// * Check MAC Address (MAC-AP1),
// * Configure AP2/SSID2,
// * Connect to AP2,
// * Check MAC Address (MAC-AP2),
// * Unconfigure AP2,
// * Wait until MAC changes to MAC-AP1,
// * Verify MAC-AP2 is no longer used while sending traffic.
tf := s.FixtValue().(*wificell.TestFixture)
// Get the MAC address of WiFi interface.
iface, err := tf.ClientInterface(ctx)
if err != nil {
s.Fatal("Failed to get WiFi interface of DUT: ", err)
}
ipr := ip.NewRemoteRunner(s.DUT().Conn())
hwMAC, err := ipr.MAC(ctx, iface)
if err != nil {
s.Fatal("Failed to get MAC of WiFi interface: ", err)
}
s.Log("Read HW MAC: ", hwMAC)
defer func(ctx context.Context, iface string, mac net.HardwareAddr) {
if err := ipr.SetLinkDown(ctx, iface); err != nil {
s.Error("Failed to set the interface down: ", err)
}
if err := ipr.SetMAC(ctx, iface, mac); err != nil {
s.Error("Failed to revert the original MAC: ", err)
}
if err := ipr.SetLinkUp(ctx, iface); err != nil {
s.Error("Failed to set the interface up: ", err)
}
}(ctx, iface, hwMAC)
// Make sure the device is up.
link, err := ipr.State(ctx, iface)
if err != nil {
s.Fatal("Failed to get link state")
}
if link != cip.LinkStateUp {
if err := ipr.SetLinkUp(ctx, iface); err != nil {
s.Error("Failed to set the interface up: ", err)
}
}
const (
ap1Channel = 48
ap2Channel = 1
)
// Generate BSSIDs for the two APs.
mac1, err := hostapd.RandomMAC()
if err != nil {
s.Fatal("Failed to generate BSSID: ", err)
}
mac2, err := hostapd.RandomMAC()
if err != nil {
s.Fatal("Failed to generate BSSID: ", err)
}
ap1BSSID := mac1.String()
ap2BSSID := mac2.String()
// Configure the initial AP.
optionsAP1 := []hostapd.Option{hostapd.Mode(hostapd.Mode80211nPure), hostapd.Channel(ap1Channel), hostapd.HTCaps(hostapd.HTCapHT20), hostapd.BSSID(ap1BSSID)}
ap1, err := tf.ConfigureAP(ctx, optionsAP1, nil)
if err != nil {
s.Fatal("Failed to configure the AP: ", err)
}
defer func(ctx context.Context) {
if err := tf.DeconfigAP(ctx, ap1); err != nil {
s.Error("Failed to deconfig the AP: ", err)
}
}(ctx)
ctx, cancel := tf.ReserveForDeconfigAP(ctx, ap1)
defer cancel()
// Connect with PersistentRandom policy.
configProps := map[string]interface{}{
shillconst.ServicePropertyWiFiRandomMACPolicy: shillconst.MacPolicyPersistentRandom,
}
// We want control over capturer start/stop so we don't use fixture with
// pcap but spawn it here and use manually.
pcapDevice, ok := tf.Pcap().(support.Capture)
if !ok {
s.Fatal("Device without capture support - device type: ", tf.Pcap().RouterType())
}
freqOpts, err := ap1.Config().PcapFreqOptions()
if err != nil {
s.Fatal("Failed to get Freq Opts: ", err)
}
pcapPath, err := wifiutil.CollectPcapForAction(ctx, pcapDevice, "connect", ap1.Config().Channel, freqOpts,
func(ctx context.Context) error {
_, err := tf.ConnectWifiAP(ctx, ap1, dutcfg.ConnProperties(configProps))
if err != nil {
return errors.Wrap(err, "DUT: failed to connect to WiFi")
}
return nil
})
if err != nil {
s.Fatal("Failed to collect pcap or perform action: ", err)
}
connMAC, err := ipr.MAC(ctx, iface)
if err != nil {
s.Fatal("Failed to get MAC of WiFi interface: ", err)
}
s.Log("MAC after connection: ", connMAC)
if err := wifiutil.VerifyMACIsChanged(ctx, connMAC, pcapPath, []net.HardwareAddr{hwMAC}); err != nil {
s.Fatal("Failed to randomize MAC during connection: ", err)
}
roamSucceeded := false
defer func(ctx context.Context) {
if !roamSucceeded {
if err := tf.CleanDisconnectWifi(ctx); err != nil {
s.Error("Failed to disconnect WiFi: ", err)
}
}
}(ctx)
ctx, cancel = tf.ReserveForDisconnect(ctx)
defer cancel()
if err := tf.VerifyConnection(ctx, ap1); err != nil {
s.Fatal("DUT: failed to verify connection: ", err)
}
// Set up the second AP interface on the same device with the same
// SSID, but on different band (5 GHz for AP1 and 2.4 GHz for AP2).
optionsAP2 := []hostapd.Option{hostapd.Mode(hostapd.Mode80211nPure), hostapd.Channel(ap2Channel), hostapd.HTCaps(hostapd.HTCapHT20), hostapd.BSSID(ap2BSSID)}
ap2, err := tf.ConfigureAP(ctx, optionsAP2, nil)
if err != nil {
s.Fatal("Failed to configure the AP: ", err)
}
deconfigured := false
defer func(ctx context.Context) {
if !deconfigured {
if err := tf.DeconfigAP(ctx, ap2); err != nil {
s.Error("Failed to deconfig the AP: ", err)
}
}
}(ctx)
ctx, cancel = tf.ReserveForDeconfigAP(ctx, ap2)
defer cancel()
freqOpts, err = ap2.Config().PcapFreqOptions()
if err != nil {
s.Fatal("Failed to get Freq Opts: ", err)
}
pcapPath, err = wifiutil.CollectPcapForAction(ctx, pcapDevice, "connect2", ap2.Config().Channel, freqOpts,
func(ctx context.Context) error {
_, err := tf.ConnectWifiAP(ctx, ap2, dutcfg.ConnProperties(configProps))
if err != nil {
return errors.Wrap(err, "DUT: failed to connect to WiFi")
}
return nil
})
if err != nil {
s.Fatal("Failed to collect pcap or perform action: ", err)
}
connMAC2, err := ipr.MAC(ctx, iface)
if err != nil {
s.Fatal("Failed to get MAC of WiFi interface: ", err)
}
s.Log("MAC after 2nd connection: ", connMAC2)
if err := wifiutil.VerifyMACIsChanged(ctx, connMAC2, pcapPath, []net.HardwareAddr{hwMAC}); err != nil {
s.Fatal("Failed to randomize MAC during connection: ", err)
}
roamSucceeded = true
defer func(ctx context.Context) {
if err := tf.CleanDisconnectWifi(ctx); err != nil {
s.Error("Failed to disconnect WiFi: ", err)
}
}(ctx)
// Trigger roaming.
if err := tf.DeconfigAP(ctx, ap2); err != nil {
s.Error("Failed to deconfig the AP: ", err)
}
deconfigured = true
if err = testing.Poll(ctx, func(ctx context.Context) error {
roamMAC, err := ipr.MAC(ctx, iface)
if err != nil {
return testing.PollBreak(errors.Wrap(err, "failed to get MAC of WiFi interface"))
}
if bytes.Equal(roamMAC, connMAC) {
return nil
}
testing.Sleep(ctx, time.Second)
return errors.Errorf("current MAC is %s", roamMAC)
}, &testing.PollOptions{Timeout: 60 * time.Second}); err != nil {
s.Fatal("Failed change MAC address of WiFi interface: ", err)
}
s.Log("DUT: roamed")
roamMAC, err := ipr.MAC(ctx, iface)
if err != nil {
s.Fatal("Failed to get MAC of WiFi interface: ", err)
}
s.Log("MAC after forced roaming: ", roamMAC)
pcapPath, err = wifiutil.CollectPcapForAction(ctx, pcapDevice, "verify", ap1.Config().Channel, freqOpts,
func(ctx context.Context) error {
if err := tf.VerifyConnection(ctx, ap1); err != nil {
return errors.Wrap(err, "DUT: failed to verify connection")
}
return nil
})
if err != nil {
s.Fatal("Failed to collect pcap or perform action: ", err)
}
if err := wifiutil.VerifyMACIsChanged(ctx, roamMAC, pcapPath, []net.HardwareAddr{hwMAC, connMAC2}); err != nil {
s.Fatal("Failed to randomize MAC during connection: ", err)
}
}
|
// Copyright 2019 tsuru authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package test
import (
"context"
"encoding/json"
"fmt"
"os/exec"
"strings"
"k8s.io/apimachinery/pkg/runtime"
)
func createNamespace(ns string) (func() error, error) {
nilFunc := func() error { return nil }
if out, err := kubectl("create", "namespace", ns); err != nil {
if strings.Contains(string(out)+err.Error(), "AlreadyExists") {
return nilFunc, nil
}
return nilFunc, fmt.Errorf("failed to create namespace %q: %v - out: %v", ns, err, string(out))
}
return func() error {
return deleteNamespace(ns)
}, nil
}
func deleteNamespace(ns string) error {
if _, err := kubectl("delete", "namespace", ns); err != nil {
return fmt.Errorf("failed to delete namespace %q: %v", ns, err)
}
return nil
}
func apply(file string, ns string) error {
if _, err := kubectl("apply", "-f", file, "--namespace", ns); err != nil {
return fmt.Errorf("failed to apply %q: %v", file, err)
}
return nil
}
func delete(file string, ns string) error {
if _, err := kubectl("delete", "-f", file, "--namespace", ns); err != nil {
return fmt.Errorf("failed to apply %q: %v", file, err)
}
return nil
}
func get(obj runtime.Object, name, ns string) error {
out, err := kubectl("get", obj.GetObjectKind().GroupVersionKind().Kind, "-o", "json", name, "--namespace", ns)
if err != nil {
return err
}
return json.Unmarshal(out, obj)
}
func getList(obj runtime.Object, resource, labelSelector, ns string) error {
out, err := kubectl("get", resource, "-o", "json", "-l", labelSelector, "--namespace", ns)
if err != nil {
return err
}
return json.Unmarshal(out, obj)
}
func kubectl(arg ...string) ([]byte, error) {
cmd := exec.CommandContext(context.TODO(), "kubectl", arg...)
out, err := cmd.CombinedOutput()
if err != nil {
return nil, fmt.Errorf("Err running cmd %v: %v. Output: %s", cmd, err, string(out))
}
return out, nil
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.