text stringlengths 11 4.05M |
|---|
/*
Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package controller is used to provide the core functionalities of machine-controller-manager
package controller
import (
"context"
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
intstrutil "k8s.io/apimachinery/pkg/util/intstr"
"github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1"
"github.com/gardener/machine-controller-manager/pkg/util/provider/cache"
"k8s.io/klog/v2"
)
const (
// OverShootingReplicaCount freeze reason when replica count overshoots
OverShootingReplicaCount = "OverShootingReplicaCount"
// MachineDeploymentStateSync freeze reason when machineDeployment was found with inconsistent state
MachineDeploymentStateSync = "MachineDeploymentStateSync"
// UnfreezeAnnotation indicates the controllers to unfreeze this object
UnfreezeAnnotation = "safety.machine.sapcloud.io/unfreeze"
)
// reconcileClusterMachineSafetyOvershooting checks all machineSet/machineDeployment
// if the number of machine objects backing them is way beyond its desired replicas
func (c *controller) reconcileClusterMachineSafetyOvershooting(_ string) error {
ctx := context.Background()
stopCh := make(chan struct{})
defer close(stopCh)
reSyncAfter := c.safetyOptions.MachineSafetyOvershootingPeriod.Duration
defer c.machineSafetyOvershootingQueue.AddAfter("", reSyncAfter)
klog.V(4).Infof("reconcileClusterMachineSafetyOvershooting: Start")
defer klog.V(4).Infof("reconcileClusterMachineSafetyOvershooting: End, reSync-Period: %v", reSyncAfter)
err := c.checkAndFreezeORUnfreezeMachineSets(ctx)
if err != nil {
klog.Errorf("SafetyController: %v", err)
}
cache.WaitForCacheSync(stopCh, c.machineSetSynced, c.machineDeploymentSynced)
err = c.syncMachineDeploymentFreezeState(ctx)
if err != nil {
klog.Errorf("SafetyController: %v", err)
}
cache.WaitForCacheSync(stopCh, c.machineDeploymentSynced)
err = c.unfreezeMachineDeploymentsWithUnfreezeAnnotation(ctx)
if err != nil {
klog.Errorf("SafetyController: %v", err)
}
cache.WaitForCacheSync(stopCh, c.machineSetSynced)
err = c.unfreezeMachineSetsWithUnfreezeAnnotation(ctx)
if err != nil {
klog.Errorf("SafetyController: %v", err)
}
return err
}
// unfreezeMachineDeploymentsWithUnfreezeAnnotation unfreezes machineDeployment with unfreeze annotation
func (c *controller) unfreezeMachineDeploymentsWithUnfreezeAnnotation(ctx context.Context) error {
machineDeployments, err := c.machineDeploymentLister.List(labels.Everything())
if err != nil {
klog.Error("SafetyController: Error while trying to LIST machineDeployments - ", err)
return err
}
for _, machineDeployment := range machineDeployments {
if _, exists := machineDeployment.Annotations[UnfreezeAnnotation]; exists {
klog.V(2).Infof("SafetyController: UnFreezing MachineDeployment %q due to setting unfreeze annotation", machineDeployment.Name)
err := c.unfreezeMachineDeployment(ctx, machineDeployment, "UnfreezeAnnotation")
if err != nil {
return err
}
// Apply UnfreezeAnnotation on all machineSets backed by the machineDeployment
machineSets, err := c.getMachineSetsForMachineDeployment(ctx, machineDeployment)
if err == nil {
for _, machineSet := range machineSets {
// Get the latest version of the machineSet so that we can avoid conflicts
machineSet, err := c.controlMachineClient.MachineSets(machineSet.Namespace).Get(ctx, machineSet.Name, metav1.GetOptions{})
if err != nil {
// Some error occued while fetching object from API server
klog.Errorf("SafetyController: Failed to GET machineSet. Error: %s", err)
return err
}
clone := machineSet.DeepCopy()
if clone.Annotations == nil {
clone.Annotations = make(map[string]string)
}
clone.Annotations[UnfreezeAnnotation] = "True"
machineSet, err = c.controlMachineClient.MachineSets(clone.Namespace).Update(ctx, clone, metav1.UpdateOptions{})
if err != nil {
klog.Errorf("SafetyController: MachineSet %s UPDATE failed. Error: %s", machineSet.Name, err)
return err
}
}
}
}
}
return nil
}
// unfreezeMachineSetsWithUnfreezeAnnotation unfreezes machineSets with unfreeze annotation
func (c *controller) unfreezeMachineSetsWithUnfreezeAnnotation(ctx context.Context) error {
machineSets, err := c.machineSetLister.List(labels.Everything())
if err != nil {
klog.Error("SafetyController: Error while trying to LIST machineSets - ", err)
return err
}
for _, machineSet := range machineSets {
if _, exists := machineSet.Annotations[UnfreezeAnnotation]; exists {
klog.V(2).Infof("SafetyController: UnFreezing MachineSet %q due to setting unfreeze annotation", machineSet.Name)
err := c.unfreezeMachineSet(ctx, machineSet)
if err != nil {
return err
}
}
}
return nil
}
// syncMachineDeploymentFreezeState syncs freeze labels and conditions to keep it consistent
func (c *controller) syncMachineDeploymentFreezeState(ctx context.Context) error {
machineDeployments, err := c.machineDeploymentLister.List(labels.Everything())
if err != nil {
klog.Error("SafetyController: Error while trying to LIST machineDeployments - ", err)
return err
}
for _, machineDeployment := range machineDeployments {
machineDeploymentFreezeLabelPresent := (machineDeployment.Labels["freeze"] == "True")
machineDeploymentFrozenConditionPresent := (GetMachineDeploymentCondition(machineDeployment.Status, v1alpha1.MachineDeploymentFrozen) != nil)
machineDeploymentHasFrozenMachineSet := false
machineSets, err := c.getMachineSetsForMachineDeployment(ctx, machineDeployment)
if err == nil {
for _, machineSet := range machineSets {
machineSetFreezeLabelPresent := (machineSet.Labels["freeze"] == "True")
machineSetFrozenConditionPresent := (GetCondition(&machineSet.Status, v1alpha1.MachineSetFrozen) != nil)
if machineSetFreezeLabelPresent || machineSetFrozenConditionPresent {
machineDeploymentHasFrozenMachineSet = true
break
}
}
}
if machineDeploymentHasFrozenMachineSet {
// If machineDeployment has atleast one frozen machine set backing it
if !machineDeploymentFreezeLabelPresent || !machineDeploymentFrozenConditionPresent {
// Either the freeze label or freeze condition is not present on the machineDeployment
message := "MachineDeployment State was inconsistent, hence safety controller has fixed this and frozen it"
err := c.freezeMachineDeployment(ctx, machineDeployment, MachineDeploymentStateSync, message)
if err != nil {
return err
}
}
} else {
// If machineDeployment has no frozen machine set backing it
if machineDeploymentFreezeLabelPresent || machineDeploymentFrozenConditionPresent {
// Either the freeze label or freeze condition is present present on the machineDeployment
err := c.unfreezeMachineDeployment(ctx, machineDeployment, MachineDeploymentStateSync)
if err != nil {
return err
}
}
}
}
return nil
}
// checkAndFreezeORUnfreezeMachineSets freezes/unfreezes machineSets/machineDeployments
// which have much greater than desired number of replicas of machine objects
func (c *controller) checkAndFreezeORUnfreezeMachineSets(ctx context.Context) error {
machineSets, err := c.machineSetLister.List(labels.Everything())
if err != nil {
klog.Error("SafetyController: Error while trying to LIST machineSets - ", err)
return err
}
for _, machineSet := range machineSets {
filteredMachines, err := c.machineLister.List(labels.Everything())
if err != nil {
klog.Error("SafetyController: Error while trying to LIST machines - ", err)
return err
}
fullyLabeledReplicasCount := int32(0)
templateLabel := labels.Set(machineSet.Spec.Template.Labels).AsSelectorPreValidated()
for _, machine := range filteredMachines {
if templateLabel.Matches(labels.Set(machine.Labels)) &&
len(machine.OwnerReferences) >= 1 {
for i := range machine.OwnerReferences {
if machine.OwnerReferences[i].Name == machineSet.Name {
fullyLabeledReplicasCount++
}
}
}
}
// Freeze machinesets when replica count exceeds by SafetyUP
higherThreshold := 2*machineSet.Spec.Replicas + c.safetyOptions.SafetyUp
// Unfreeze machineset when replica count reaches higherThreshold - SafetyDown
lowerThreshold := higherThreshold - c.safetyOptions.SafetyDown
machineDeployments := c.getMachineDeploymentsForMachineSet(machineSet)
// if we have a parent machineDeployment than we use a different higherThreshold and lowerThreshold,
// keeping in mind the rolling update scenario, as we won't want to freeze during a normal rolling update.
if len(machineDeployments) >= 1 {
machineDeployment := machineDeployments[0]
if machineDeployment != nil {
surge, err := intstrutil.GetValueFromIntOrPercent(
machineDeployment.Spec.Strategy.RollingUpdate.MaxSurge,
int(machineDeployment.Spec.Replicas),
true,
)
if err != nil {
klog.Error("SafetyController: Error while trying to GET surge value - ", err)
return err
}
higherThreshold = machineDeployment.Spec.Replicas + int32(surge) + c.safetyOptions.SafetyUp
lowerThreshold = higherThreshold - c.safetyOptions.SafetyDown
}
}
klog.V(4).Infof(
"checkAndFreezeORUnfreezeMachineSets: MS:%q LowerThreshold:%d FullyLabeledReplicas:%d HigherThreshold:%d",
machineSet.Name,
lowerThreshold,
fullyLabeledReplicasCount,
higherThreshold,
)
machineSetFrozenCondition := GetCondition(&machineSet.Status, v1alpha1.MachineSetFrozen)
if machineSet.Labels["freeze"] != "True" &&
fullyLabeledReplicasCount >= higherThreshold {
message := fmt.Sprintf(
"The number of machines backing MachineSet: %s is %d >= %d which is the Max-ScaleUp-Limit",
machineSet.Name,
fullyLabeledReplicasCount,
higherThreshold,
)
return c.freezeMachineSetAndDeployment(ctx, machineSet, OverShootingReplicaCount, message)
} else if fullyLabeledReplicasCount <= lowerThreshold &&
(machineSet.Labels["freeze"] == "True" || machineSetFrozenCondition != nil) {
// Unfreeze if number of replicas is less than or equal to lowerThreshold
// and freeze label or condition exists on machineSet
return c.unfreezeMachineSetAndDeployment(ctx, machineSet)
}
}
return nil
}
// addMachineToSafetyOvershooting enqueues into machineSafetyOvershootingQueue when a new machine is added
func (c *controller) addMachineToSafetyOvershooting(obj interface{}) {
machine := obj.(*v1alpha1.Machine)
c.enqueueMachineSafetyOvershootingKey(machine)
}
// enqueueMachineSafetyOvershootingKey enqueues into machineSafetyOvershootingQueue
func (c *controller) enqueueMachineSafetyOvershootingKey(_ interface{}) {
c.machineSafetyOvershootingQueue.Add("")
}
// freezeMachineSetAndDeployment freezes machineSet and machineDeployment (who is the owner of the machineSet)
func (c *controller) freezeMachineSetAndDeployment(ctx context.Context, machineSet *v1alpha1.MachineSet, reason string, message string) error {
klog.V(2).Infof("SafetyController: Freezing MachineSet %q due to %q", machineSet.Name, reason)
// Get the latest version of the machineSet so that we can avoid conflicts
machineSet, err := c.controlMachineClient.MachineSets(machineSet.Namespace).Get(ctx, machineSet.Name, metav1.GetOptions{})
if err != nil {
// Some error occued while fetching object from API server
klog.Errorf("SafetyController: Failed to GET machineSet. Error: %s", err)
return err
}
clone := machineSet.DeepCopy()
newStatus := clone.Status
mscond := NewMachineSetCondition(v1alpha1.MachineSetFrozen, v1alpha1.ConditionTrue, reason, message)
SetCondition(&newStatus, mscond)
clone.Status = newStatus
machineSet, err = c.controlMachineClient.MachineSets(clone.Namespace).UpdateStatus(ctx, clone, metav1.UpdateOptions{})
if err != nil {
klog.Errorf("SafetyController: MachineSet/status UPDATE failed. Error: %s", err)
return err
}
clone = machineSet.DeepCopy()
if clone.Labels == nil {
clone.Labels = make(map[string]string)
}
clone.Labels["freeze"] = "True"
_, err = c.controlMachineClient.MachineSets(clone.Namespace).Update(ctx, clone, metav1.UpdateOptions{})
if err != nil {
klog.Errorf("SafetyController: MachineSet UPDATE failed. Error: %s", err)
return err
}
machineDeployments := c.getMachineDeploymentsForMachineSet(machineSet)
if len(machineDeployments) >= 1 {
machineDeployment := machineDeployments[0]
if machineDeployment != nil {
err := c.freezeMachineDeployment(ctx, machineDeployment, reason, message)
if err != nil {
return err
}
}
}
klog.V(2).Infof("SafetyController: Froze MachineSet %q due to overshooting of replicas", machineSet.Name)
return nil
}
// unfreezeMachineSetAndDeployment unfreezes machineSets and machineDeployment (who is the owner of the machineSet)
func (c *controller) unfreezeMachineSetAndDeployment(ctx context.Context, machineSet *v1alpha1.MachineSet) error {
klog.V(2).Infof("SafetyController: UnFreezing MachineSet %q due to lesser than lower threshold replicas", machineSet.Name)
machineDeployments := c.getMachineDeploymentsForMachineSet(machineSet)
if len(machineDeployments) >= 1 {
machineDeployment := machineDeployments[0]
err := c.unfreezeMachineDeployment(ctx, machineDeployment, "UnderShootingReplicaCount")
if err != nil {
return err
}
}
err := c.unfreezeMachineSet(ctx, machineSet)
if err != nil {
return err
}
return nil
}
// unfreezeMachineSetsAndDeployments unfreezes machineSets
func (c *controller) unfreezeMachineSet(ctx context.Context, machineSet *v1alpha1.MachineSet) error {
if machineSet == nil {
err := fmt.Errorf("SafetyController: Machine Set not passed")
klog.Errorf(err.Error())
return err
}
// Get the latest version of the machineSet so that we can avoid conflicts
machineSet, err := c.controlMachineClient.MachineSets(machineSet.Namespace).Get(ctx, machineSet.Name, metav1.GetOptions{})
if err != nil {
// Some error occued while fetching object from API server
klog.Errorf("SafetyController: Failed to GET machineSet. Error: %s", err)
return err
}
clone := machineSet.DeepCopy()
newStatus := clone.Status
RemoveCondition(&newStatus, v1alpha1.MachineSetFrozen)
clone.Status = newStatus
machineSet, err = c.controlMachineClient.MachineSets(clone.Namespace).UpdateStatus(ctx, clone, metav1.UpdateOptions{})
if err != nil {
klog.Errorf("SafetyController: MachineSet/status UPDATE failed. Error: %s", err)
return err
}
clone = machineSet.DeepCopy()
if clone.Annotations == nil {
clone.Annotations = make(map[string]string)
}
delete(clone.Annotations, UnfreezeAnnotation)
if clone.Labels == nil {
clone.Labels = make(map[string]string)
}
delete(clone.Labels, "freeze")
machineSet, err = c.controlMachineClient.MachineSets(clone.Namespace).Update(ctx, clone, metav1.UpdateOptions{})
if err != nil {
klog.Errorf("SafetyController: MachineSet UPDATE failed. Error: %s", err)
return err
}
klog.V(2).Infof("SafetyController: Unfroze MachineSet %q", machineSet.Name)
return nil
}
// freezeMachineDeployment freezes the machineDeployment
func (c *controller) freezeMachineDeployment(ctx context.Context, machineDeployment *v1alpha1.MachineDeployment, reason string, message string) error {
// Get the latest version of the machineDeployment so that we can avoid conflicts
machineDeployment, err := c.controlMachineClient.MachineDeployments(machineDeployment.Namespace).Get(ctx, machineDeployment.Name, metav1.GetOptions{})
if err != nil {
klog.Errorf("SafetyController: Failed to GET machineDeployment. Error: %s", err)
return err
}
clone := machineDeployment.DeepCopy()
newStatus := clone.Status
mdcond := NewMachineDeploymentCondition(v1alpha1.MachineDeploymentFrozen, v1alpha1.ConditionTrue, reason, message)
SetMachineDeploymentCondition(&newStatus, *mdcond)
clone.Status = newStatus
machineDeployment, err = c.controlMachineClient.MachineDeployments(clone.Namespace).UpdateStatus(ctx, clone, metav1.UpdateOptions{})
if err != nil {
klog.Errorf("SafetyController: MachineDeployment/status UPDATE failed. Error: %s", err)
return err
}
clone = machineDeployment.DeepCopy()
if clone.Labels == nil {
clone.Labels = make(map[string]string)
}
clone.Labels["freeze"] = "True"
_, err = c.controlMachineClient.MachineDeployments(clone.Namespace).Update(ctx, clone, metav1.UpdateOptions{})
if err != nil {
klog.Errorf("SafetyController: MachineDeployment UPDATE failed. Error: %s", err)
return err
}
klog.V(2).Infof("SafetyController: Froze MachineDeployment %q due to %s", machineDeployment.Name, reason)
return nil
}
// unfreezeMachineDeployment unfreezes the machineDeployment
func (c *controller) unfreezeMachineDeployment(ctx context.Context, machineDeployment *v1alpha1.MachineDeployment, reason string) error {
if machineDeployment == nil {
err := fmt.Errorf("SafetyController: Machine Deployment not passed")
klog.Errorf(err.Error())
return err
}
// Get the latest version of the machineDeployment so that we can avoid conflicts
machineDeployment, err := c.controlMachineClient.MachineDeployments(machineDeployment.Namespace).Get(ctx, machineDeployment.Name, metav1.GetOptions{})
if err != nil {
// Some error occued while fetching object from API server
klog.Errorf("SafetyController: Failed to GET machineDeployment. Error: %s", err)
return err
}
clone := machineDeployment.DeepCopy()
newStatus := clone.Status
RemoveMachineDeploymentCondition(&newStatus, v1alpha1.MachineDeploymentFrozen)
clone.Status = newStatus
machineDeployment, err = c.controlMachineClient.MachineDeployments(clone.Namespace).UpdateStatus(ctx, clone, metav1.UpdateOptions{})
if err != nil {
klog.Errorf("SafetyController: MachineDeployment/status UPDATE failed. Error: %s", err)
return err
}
clone = machineDeployment.DeepCopy()
if clone.Annotations == nil {
clone.Annotations = make(map[string]string)
}
delete(clone.Annotations, UnfreezeAnnotation)
if clone.Labels == nil {
clone.Labels = make(map[string]string)
}
delete(clone.Labels, "freeze")
machineDeployment, err = c.controlMachineClient.MachineDeployments(clone.Namespace).Update(ctx, clone, metav1.UpdateOptions{})
if err != nil {
klog.Errorf("SafetyController: MachineDeployment UPDATE failed. Error: %s", err)
return err
}
klog.V(2).Infof("SafetyController: Unfroze MachineDeployment %q due to %s", machineDeployment.Name, reason)
return nil
}
|
package logger
import (
"bufio"
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"regexp"
"strconv"
"strings"
"sync"
"time"
)
const (
bufferSize = 256 * 1024
)
func getLastCheck(now time.Time) uint64 {
return uint64(now.Year())*1000000 + uint64(now.Month())*10000 + uint64(now.Day())*100 + uint64(now.Hour())
}
type syncBuffer struct {
*bufio.Writer
file *os.File
count uint64
cur int
filePath string
parent *FileBackend
}
func (self *syncBuffer) Sync() error {
return self.file.Sync()
}
func (self *syncBuffer) close() {
self.Flush()
self.Sync()
self.file.Close()
}
func (self *syncBuffer) write(b []byte) {
if !self.parent.rotateByHour && self.parent.maxSize > 0 && self.parent.rotateNum > 0 && self.count+uint64(len(b)) >= self.parent.maxSize {
os.Rename(self.filePath, self.filePath+fmt.Sprintf(".%03d", self.cur))
self.cur++
if self.cur >= self.parent.rotateNum {
self.cur = 0
}
self.count = 0
}
self.count += uint64(len(b))
self.Writer.Write(b)
}
type FileBackend struct {
mu sync.Mutex
dir string // directory for log files
files [numSeverity]syncBuffer
flushInterval time.Duration
rotateNum int
maxSize uint64
fall bool
rotateByHour bool
lastCheck uint64
reg *regexp.Regexp // for rotatebyhour log del...
keepHours uint // keep how many hours old, only make sense when rotatebyhour is T
}
func (self *FileBackend) Flush() {
self.mu.Lock()
defer self.mu.Unlock()
for i := 0; i < numSeverity; i++ {
self.files[i].Flush()
self.files[i].Sync()
}
}
func (self *FileBackend) Close() {
self.Flush()
}
func (self *FileBackend) flushDaemon() {
for {
time.Sleep(self.flushInterval)
self.Flush()
}
}
func shouldDel(fileName string, left uint) bool {
// tag should be like 2016071114
tagInt, err := strconv.Atoi(strings.Split(fileName, ".")[2])
if err != nil {
return false
}
point := time.Now().Unix() - int64(left*3600)
if getLastCheck(time.Unix(point, 0)) > uint64(tagInt) {
return true
}
return false
}
func (self *FileBackend) rotateByHourDaemon() {
for {
time.Sleep(time.Second * 1)
if self.rotateByHour {
check := getLastCheck(time.Now())
if self.lastCheck < check {
for i := 0; i < numSeverity; i++ {
os.Rename(self.files[i].filePath, self.files[i].filePath+fmt.Sprintf(".%d", self.lastCheck))
}
self.lastCheck = check
}
// also check log dir to del overtime files
files, err := ioutil.ReadDir(self.dir)
if err == nil {
for _, file := range files {
// exactly match, then we
if file.Name() == self.reg.FindString(file.Name()) &&
shouldDel(file.Name(), self.keepHours) {
os.Remove(filepath.Join(self.dir, file.Name()))
}
}
}
}
}
}
func (self *FileBackend) monitorFiles() {
for range time.NewTicker(time.Second * 5).C {
for i := 0; i < numSeverity; i++ {
fileName := path.Join(self.dir, severityName[i]+".log")
if _, err := os.Stat(fileName); err != nil && os.IsNotExist(err) {
if f, err := os.OpenFile(fileName, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644); err == nil {
self.mu.Lock()
self.files[i].close()
self.files[i].Writer = bufio.NewWriterSize(f, bufferSize)
self.files[i].file = f
self.mu.Unlock()
}
}
}
}
}
func (self *FileBackend) Log(s Severity, msg []byte) {
self.mu.Lock()
switch s {
case FATAL:
self.files[FATAL].write(msg)
case ERROR:
self.files[ERROR].write(msg)
case WARNING:
self.files[WARNING].write(msg)
case INFO:
self.files[INFO].write(msg)
case DEBUG:
self.files[DEBUG].write(msg)
}
if self.fall && s < INFO {
self.files[INFO].write(msg)
}
self.mu.Unlock()
if s == FATAL {
self.Flush()
}
}
func (self *FileBackend) Rotate(rotateNum1 int, maxSize1 uint64) {
self.rotateNum = rotateNum1
self.maxSize = maxSize1
}
func (self *FileBackend) SetRotateByHour(rotateByHour bool) {
self.rotateByHour = rotateByHour
if self.rotateByHour {
self.lastCheck = getLastCheck(time.Now())
} else {
self.lastCheck = 0
}
}
func (self *FileBackend) SetKeepHours(hours uint) {
self.keepHours = hours
}
func (self *FileBackend) Fall() {
self.fall = true
}
func (self *FileBackend) SetFlushDuration(t time.Duration) {
if t >= time.Second {
self.flushInterval = t
} else {
self.flushInterval = time.Second
}
}
func NewFileBackend(dir string) (*FileBackend, error) {
if err := os.MkdirAll(dir, 0755); err != nil {
return nil, err
}
var fb FileBackend
fb.dir = dir
for i := 0; i < numSeverity; i++ {
fileName := path.Join(dir, severityName[i]+".log")
f, err := os.OpenFile(fileName, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return nil, err
}
count := uint64(0)
stat, err := f.Stat()
if err == nil {
count = uint64(stat.Size())
}
fb.files[i] = syncBuffer{
Writer: bufio.NewWriterSize(f, bufferSize),
file: f,
filePath: fileName,
parent: &fb,
count: count,
}
}
// default
fb.flushInterval = time.Second * 3
fb.rotateNum = 20
fb.maxSize = 1024 * 1024 * 1024
fb.rotateByHour = false
fb.lastCheck = 0
// init reg to match files
// ONLY cover this centry...
fb.reg = regexp.MustCompile("(INFO|ERROR|WARNING|DEBUG|FATAL)\\.log\\.20[0-9]{8}")
fb.keepHours = 24 * 7
go fb.flushDaemon()
go fb.monitorFiles()
go fb.rotateByHourDaemon()
return &fb, nil
}
func Rotate(rotateNum1 int, maxSize1 uint64) {
if fileback != nil {
fileback.Rotate(rotateNum1, maxSize1)
}
}
func Fall() {
if fileback != nil {
fileback.Fall()
}
}
func SetFlushDuration(t time.Duration) {
if fileback != nil {
fileback.SetFlushDuration(t)
}
}
func SetRotateByHour(rotateByHour bool) {
if fileback != nil {
fileback.SetRotateByHour(rotateByHour)
}
}
func SetKeepHours(hours uint) {
if fileback != nil {
fileback.SetKeepHours(hours)
}
}
|
package emergencykit
import (
"strings"
"testing"
)
func TestGenerateHTML(t *testing.T) {
out, err := GenerateHTML(&Input{
FirstEncryptedKey: "MyFirstEncryptedKey",
SecondEncryptedKey: "MySecondEncryptedKey",
}, "en")
if err != nil {
t.Fatal(err)
}
if len(out.VerificationCode) != 6 {
t.Fatal("expected verification code to have length 6")
}
if !strings.Contains(out.HTML, out.VerificationCode) {
t.Fatal("expected output html to contain verification code")
}
if !strings.Contains(out.HTML, "MyFirstEncryptedKey") {
t.Fatal("expected output html to contain first encrypted key")
}
if !strings.Contains(out.HTML, "MySecondEncryptedKey") {
t.Fatal("expected output html to contain second encrypted key")
}
if !strings.Contains(out.HTML, `<ul class="descriptors">`) {
t.Fatal("expected output html to contain output descriptors")
}
if !strings.Contains(out.HTML, `<span class="f">wsh</span>`) {
t.Fatal("expected output html to contain output descriptor scripts")
}
}
func TestGenerateHTMLWithFingerprints(t *testing.T) {
data := &Input{
FirstEncryptedKey: "MyFirstEncryptedKey",
FirstFingerprint: "abababab",
SecondEncryptedKey: "MySecondEncryptedKey",
SecondFingerprint: "cdcdcdcd",
}
out, err := GenerateHTML(data, "en")
if err != nil {
t.Fatal(err)
}
if len(out.VerificationCode) != 6 {
t.Fatal("expected verification code to have length 6")
}
if !strings.Contains(out.HTML, out.VerificationCode) {
t.Fatal("expected output html to contain verification code")
}
if !strings.Contains(out.HTML, "MyFirstEncryptedKey") {
t.Fatal("expected output html to contain first encrypted key")
}
if !strings.Contains(out.HTML, "MySecondEncryptedKey") {
t.Fatal("expected output html to contain second encrypted key")
}
if !strings.Contains(out.HTML, `<ul class="descriptors">`) {
t.Fatal("expected output html to contain output descriptors")
}
if !strings.Contains(out.HTML, `<span class="f">wsh</span>`) {
t.Fatal("expected output html to contain output descriptor scripts")
}
if !strings.Contains(out.HTML, data.FirstFingerprint) {
t.Fatal("expected output html to contain FirstFingerprint")
}
if !strings.Contains(out.HTML, data.SecondFingerprint) {
t.Fatal("expected output html to contain SecondFingerprint")
}
}
func TestGenerateDeterministicCode(t *testing.T) {
mockFirstKey := "foo"
mockSecondKey := "bar"
expectedCode := "223695"
mockInputs := &Input{
FirstEncryptedKey: mockFirstKey,
SecondEncryptedKey: mockSecondKey,
}
code := generateDeterministicCode(mockInputs)
if code != expectedCode {
t.Fatalf("expected code from (%s, %s) to be %s, not %s", mockFirstKey, mockSecondKey, expectedCode, code)
}
}
|
package main
import (
"sync"
"time"
)
//sync.WaitGroup
func main() {
var wg sync.WaitGroup
for i := 0; i < 10; i++ {
wg.Add(1) //最外层等结果的routine来设置累加计数, add一次起一个routine
go func(id int) {
defer wg.Done() //每个routine内部递减计数
time.Sleep(time.Second)
println("goroutine", id, "done")
}(i)
}
println("main......")
wg.Wait() //最外层等结果的routine来调用wait(), 调用后阻塞, 直到计数器归零
println("main exit")
}
|
package fsm
import (
"fmt"
"sort"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/swf"
. "github.com/sclasen/swfsm/log"
. "github.com/sclasen/swfsm/sugar"
)
const (
FilterStatusAll = "ALL" // open + closed
FilterStatusOpen = "OPEN" // open only
FilterStatusOpenPriority = "OPEN_PRIORITY" // open (+ closed, only if open is totally empty)
FilterStatusOpenPriorityWorkflow = "OPEN_PRIORITY_WORKFLOW" // open (+ closed, only if open not present workflow-by-workflow)
FilterStatusClosed = "CLOSED" // closed only
)
type Finder interface {
FindAll(*FindInput) (*FindOutput, error)
FindLatestByWorkflowID(workflowID string) (*swf.WorkflowExecution, error)
Reset()
}
func NewFinder(domain string, c ClientSWFOps) Finder {
return &finder{
domain: domain,
c: c,
workflowIdIndex: make(map[string]struct{}),
}
}
type FindInput struct {
MaximumPageSize *int64
OpenNextPageToken *string
ClosedNextPageToken *string
ReverseOrder *bool
StatusFilter string
StartTimeFilter *swf.ExecutionTimeFilter
CloseTimeFilter *swf.ExecutionTimeFilter // only closed
ExecutionFilter *swf.WorkflowExecutionFilter
TagFilter *swf.TagFilter
TypeFilter *swf.WorkflowTypeFilter
CloseStatusFilter *swf.CloseStatusFilter // only closed
}
type FindOutput struct {
ExecutionInfos []*swf.WorkflowExecutionInfo
OpenNextPageToken *string
ClosedNextPageToken *string
}
type finder struct {
domain string
c ClientSWFOps
workflowIdIndex map[string]struct{}
}
func (f *finder) FindAll(input *FindInput) (output *FindOutput, err error) {
if input.StatusFilter == "" {
if (input.CloseStatusFilter != nil && input.CloseStatusFilter.Status != nil) ||
(input.CloseTimeFilter != nil &&
(input.CloseTimeFilter.OldestDate != nil || input.CloseTimeFilter.LatestDate != nil)) {
input.StatusFilter = FilterStatusClosed
} else {
input.StatusFilter = FilterStatusOpenPriority
}
}
if !stringsContain([]string{
FilterStatusAll,
FilterStatusOpen,
FilterStatusOpenPriority,
FilterStatusOpenPriorityWorkflow,
FilterStatusClosed,
},
input.StatusFilter) {
return nil, fmt.Errorf("Invalid status filter")
}
selectiveFilter := f.mostSelectiveFilters(input)
output = &FindOutput{}
if input.OpenNextPageToken != nil || (input.OpenNextPageToken == nil && input.ClosedNextPageToken == nil) {
if stringsContain([]string{
FilterStatusAll,
FilterStatusOpen,
FilterStatusOpenPriority,
FilterStatusOpenPriorityWorkflow,
}, input.StatusFilter) {
openInput := &swf.ListOpenWorkflowExecutionsInput{
Domain: &f.domain,
ReverseOrder: input.ReverseOrder,
MaximumPageSize: input.MaximumPageSize,
NextPageToken: input.OpenNextPageToken,
StartTimeFilter: input.StartTimeFilter,
ExecutionFilter: selectiveFilter.ExecutionFilter,
TagFilter: selectiveFilter.TagFilter,
TypeFilter: selectiveFilter.TypeFilter,
}
resp, err := f.c.ListOpenWorkflowExecutions(openInput)
if err != nil {
return nil, err
}
f.append(input, output, resp.ExecutionInfos)
output.OpenNextPageToken = resp.NextPageToken
}
}
if input.StatusFilter == FilterStatusOpenPriority {
output.ExecutionInfos = f.applyInputLocally(input, output.ExecutionInfos)
if len(output.ExecutionInfos) > 0 {
return output, nil
}
}
if input.ClosedNextPageToken != nil || (input.OpenNextPageToken == nil && input.ClosedNextPageToken == nil) {
if stringsContain([]string{
FilterStatusAll,
FilterStatusClosed,
FilterStatusOpenPriority,
FilterStatusOpenPriorityWorkflow,
}, input.StatusFilter) {
closedInput := &swf.ListClosedWorkflowExecutionsInput{
Domain: &f.domain,
ReverseOrder: input.ReverseOrder,
MaximumPageSize: input.MaximumPageSize,
NextPageToken: input.ClosedNextPageToken,
StartTimeFilter: selectiveFilter.StartTimeFilter,
CloseTimeFilter: selectiveFilter.CloseTimeFilter,
ExecutionFilter: selectiveFilter.ExecutionFilter,
TagFilter: selectiveFilter.TagFilter,
TypeFilter: selectiveFilter.TypeFilter,
CloseStatusFilter: selectiveFilter.CloseStatusFilter,
}
resp, err := f.c.ListClosedWorkflowExecutions(closedInput)
if err != nil {
return nil, err
}
f.append(input, output, resp.ExecutionInfos)
output.ClosedNextPageToken = resp.NextPageToken
}
}
output.ExecutionInfos = f.applyInputLocally(input, output.ExecutionInfos)
return output, nil
}
type listAnyWorkflowExecutionsInput swf.ListClosedWorkflowExecutionsInput
func (f *finder) mostSelectiveFilters(input *FindInput) listAnyWorkflowExecutionsInput {
filter := &listAnyWorkflowExecutionsInput{}
f.setMostSelectiveMetadataFilter(input, filter)
f.setMostSelectiveTimeFilter(input, filter)
return *filter
}
func (f *finder) setMostSelectiveMetadataFilter(input *FindInput, output *listAnyWorkflowExecutionsInput) {
if input.ExecutionFilter != nil {
output.ExecutionFilter = input.ExecutionFilter
return
}
if input.TagFilter != nil {
output.TagFilter = input.TagFilter
return
}
if input.TypeFilter != nil {
output.TypeFilter = input.TypeFilter
return
}
if input.CloseStatusFilter != nil {
output.CloseStatusFilter = input.CloseStatusFilter
return
}
}
// result only used for for closed list. open list uses input.StartTimeFilter directly
func (f *finder) setMostSelectiveTimeFilter(input *FindInput, output *listAnyWorkflowExecutionsInput) {
if input.CloseTimeFilter != nil {
output.CloseTimeFilter = input.CloseTimeFilter
return
}
if input.StartTimeFilter != nil {
output.StartTimeFilter = input.StartTimeFilter
return
}
}
func (f *finder) append(input *FindInput, output *FindOutput, infos []*swf.WorkflowExecutionInfo) {
for _, info := range infos {
if input.StatusFilter == FilterStatusOpenPriorityWorkflow {
if _, ok := f.workflowIdIndex[*info.Execution.WorkflowId]; ok {
continue
}
f.workflowIdIndex[*info.Execution.WorkflowId] = struct{}{}
}
output.ExecutionInfos = append(output.ExecutionInfos, info)
}
}
func (f *finder) applyInputLocally(input *FindInput, infos []*swf.WorkflowExecutionInfo) []*swf.WorkflowExecutionInfo {
applied := []*swf.WorkflowExecutionInfo{}
for _, info := range infos {
if input.ExecutionFilter != nil {
if *input.ExecutionFilter.WorkflowId != *info.Execution.WorkflowId {
continue
}
}
if input.TagFilter != nil {
found := false
for _, tag := range info.TagList {
if *input.TagFilter.Tag == *tag {
found = true
break
}
}
if !found {
continue
}
}
if input.TypeFilter != nil && input.TypeFilter.Name != nil {
if *input.TypeFilter.Name != *info.WorkflowType.Name {
continue
}
if input.TypeFilter.Version != nil {
if *input.TypeFilter.Version != *info.WorkflowType.Version {
continue
}
}
}
if input.CloseStatusFilter != nil && input.CloseStatusFilter.Status != nil && info.CloseStatus != nil {
if *input.CloseStatusFilter.Status != *info.CloseStatus {
continue
}
}
if input.StartTimeFilter != nil {
if input.StartTimeFilter.OldestDate != nil && (*input.StartTimeFilter.OldestDate).After(*info.StartTimestamp) {
continue
}
if input.StartTimeFilter.LatestDate != nil && (*input.StartTimeFilter.LatestDate).Before(*info.StartTimestamp) {
continue
}
}
if input.CloseTimeFilter != nil && info.CloseTimestamp != nil {
if input.CloseTimeFilter.OldestDate != nil && (*input.CloseTimeFilter.OldestDate).After(*info.CloseTimestamp) {
continue
}
if input.CloseTimeFilter.LatestDate != nil && (*input.CloseTimeFilter.LatestDate).Before(*info.CloseTimestamp) {
continue
}
}
applied = append(applied, info)
}
// swf default order is descending, so reverseOrder is ascending
if input.ReverseOrder != nil && *input.ReverseOrder {
sort.Sort(sortExecutionInfos(applied))
} else {
sort.Sort(sort.Reverse(sortExecutionInfos(applied)))
}
if input.MaximumPageSize != nil && *input.MaximumPageSize > 0 && int64(len(applied)) > *input.MaximumPageSize {
applied = applied[:*input.MaximumPageSize]
}
return applied
}
type sortExecutionInfos []*swf.WorkflowExecutionInfo
func (e sortExecutionInfos) Len() int { return len(e) }
func (e sortExecutionInfos) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
func (e sortExecutionInfos) Less(i, j int) bool {
return (*e[i].StartTimestamp).Before(*e[j].StartTimestamp)
}
func (f *finder) FindLatestByWorkflowID(workflowID string) (exec *swf.WorkflowExecution, err error) {
output, err := f.FindAll(&FindInput{
StatusFilter: FilterStatusOpenPriority,
MaximumPageSize: I(1),
ReverseOrder: aws.Bool(false), // keep descending default
StartTimeFilter: &swf.ExecutionTimeFilter{OldestDate: aws.Time(time.Unix(0, 0))},
ExecutionFilter: &swf.WorkflowExecutionFilter{
WorkflowId: S(workflowID),
},
})
if err != nil {
if ae, ok := err.(awserr.Error); ok {
Log.Printf("component=client fn=findExecution at=list-open error-type=%s message=%s", ae.Code(), ae.Message())
} else {
Log.Printf("component=client fn=findExecution at=list-open error=%q", err)
}
return nil, err
}
if len(output.ExecutionInfos) == 1 {
return output.ExecutionInfos[0].Execution, nil
}
return nil, nil
}
func (f *finder) Reset() {
f.workflowIdIndex = make(map[string]struct{})
}
|
// basic hello world web application just using net/http package
|
package scene
import "time"
// DeltaTime : Delta Time,每次屏幕刷新之间的时间差
type DeltaTime struct {
Last time.Time
Dt float64
}
// NewDT : 生成新的 Delta 实例
func NewDT() DeltaTime {
return DeltaTime{Last: time.Now()}
}
// Update : 刷新并返回Delta Time
func (d *DeltaTime) Update() float64 {
dt := time.Since(d.Last).Seconds()
d.Dt = dt
d.Last = time.Now()
return dt
}
|
package dfm
import "fmt"
type token struct {
tokenType tokenType
text string
// line and col both start at 1.
line, col int
}
// tokenType is a rune because single characters are used directly as their
// token type, e.g. ',' '+' or ':'.
type tokenType rune
const (
tokenIllegal tokenType = -1
tokenEOF tokenType = 0
tokenWhiteSpace tokenType = 256
tokenWord tokenType = 257
tokenInteger tokenType = 258
tokenString tokenType = 259
tokenCharacter tokenType = 260
tokenFloat tokenType = 261
)
func (t token) String() string {
return fmt.Sprintf("%v: %q at %d:%d", t.tokenType, t.text, t.line, t.col)
}
func (t tokenType) String() string {
switch t {
case tokenIllegal:
return "illegal token"
case tokenEOF:
return "end of file"
case tokenWhiteSpace:
return "white space"
case tokenWord:
return "word"
case tokenInteger:
return "integer"
case tokenString:
return "string"
case tokenCharacter:
return "character"
case tokenFloat:
return "floating point number"
default:
return fmt.Sprintf("token %q (%d)", string(t), int(t))
}
}
|
package main
import "fmt"
// fibonacci is a function that returns
// a function that returns an int.
func fibonacci() func() int {
i := 0
var curr int
var prev int
return func() int {
if i == 0 || i == 1 {
curr = i
i += 1
return i
}
temp := curr
curr += prev
prev = temp
return curr
}
}
func main() {
f := fibonacci()
for i := 0; i < 10; i++ {
fmt.Println(f())
}
}
|
package i18n
const (
SYSTEM_ERROR = "api.system.error"
PARAM_ERROR = "api.context.invalid_body_param.app_error"
MISSING_DATA_ERROR = "api.sql.missing.data"
MISSING_USER_ERROR = "api.sql.missing.user"
MISSING_INVITE_USER_ERROR = "api.sql.missing.invite_user"
MISSING_BANK_ERROR = "api.sql.missing.bank"
SQL_ERROR = "api.sql.error"
ACCOUNT_ADDRESS_EXIST_ERROR = "api.sql.save_account.address_exists"
ACCOUNT_COIN_SYMBOL_EXIST_ERROR = "api.sql.save_account.coin_symbol_exists"
SESSION_EXPIRED_ERROR = "api.session_expired.app_error"
JWT_PARSE_ERROR = "api.jwt_parse.app_error"
APIKEY_PARSE_ERROR = "api.api_key_parse.app_error"
APIKEY_IPLIMIT_ERROR = "api.api_key_ip_limit.app_error"
DEAL_OVERDUE_ERROR = "api.deal_pay.overdue.error"
DEAL_OPERA_STATUS_ERROR = "api.deal.opera.status.error"
APPEAL_OPERA_STATUS_ERROR = "api.appeal.opera.status.error"
OPERA_STATUS_ERROR = "api.opera.status.error"
OPERA_PERMISSION_DENIED_ERROR = "api.opera.permission.denied"
OPERA_REPEAT_ERROR = "api.opera.repeat.denied"
AVAILABLE_BALANCE_NOT_ENOUGH_ERROR = "api.available_balance.not_enough"
ITEM_ESTABLISH_ERROR = "api.item.establish.error"
ITEM_ESTABLISH_SELF_ERROR = "api.item.establish.self.error"
AMOUNT_OUT_LIMIT_ERROR = "api.amount.out.limit"
VERIFIED_CODE_ERROR = "api.user.verified_code.get_token.error"
USER_PWD_VALID_ERROR = "api.user.is_valid.pwd"
USER_FUND_PWD_VALID_ERROR = "api.user.is_valid.fund_pwd"
USER_LOGIN_BLANK_PWD_ERROR = "api.user.login.blank_pwd.error"
USER_LOGIN_UPDATE_IMTOKEN_ERROR = "api.user.login.update_imtoken.error"
USER_LOGIN_INACTIVE_ERROR = "api.user.login.inactive.app_error"
USER_LOGIN_ATTEMPTS_TOO_MANY_ERROR = "api.user.check_user_login_attempts.too_many.error"
VERIFICATION_CODE_SEND_FREQUENTLY_ERROR = "api.user.send_email_verification_code.frequently.error"
SEND_VERIFY_ERROR = "api.user.send_verify_email_and_forget.failed.error"
SEND_VERIFY_NONE_AVAILABLE_ERROR = "api.user.send_email_verification_code.none_locales_available"
FILE_TOO_LARGE_ERROR = "api.user.upload_profile_user.too_large.error"
FILE_PARSE_ERROR = "api.user.upload_profile_user.parse.error"
FILE_NAME_ERROR = "api.user.upload_profile_user.name.error"
FILE_NO_FIND_ERROR = "api.user.upload_profile_user.no_file.error"
FILE_SAVE_ERROR = "api.user.upload_profile_user.upload_profile.error"
FILE_READ_ERROR = "api.user.create_profile.default_font.error"
EMAIL_EXIST_ERROR = "api.user.email.exist.error"
USERNAME_EXIST_ERROR = "api.user.name.exist.error"
NICKNAME_EXIST_ERROR = "api.nick.name.exist.error"
INVITECODE_PARAM_ERROR = "api.invite.code.param.err"
WHITE_BLACK_ADD_SELF_ERROR = "api.white.black.add.self.error"
PASS_DEAL_MESSAGE = "api.pass.deal.message"
BUY_DEAL_MESSAGE = "api.buy.deal.message"
WITHDRAW_SUCCESS_MESSAGE = "api.withdraw.success.message"
APPEAL_DEAL_MESSAGE = "api.appeal.deal.message"
APPEAL_DEAL_MESSAGE2 = "api.appeal.deal.message2"
NO_COMPLETE_DEAL = "api.item.remove.no.complete.deal"
WALLET_HTTP_ERROR = "api.wallet.http.error"
IM_REGISTER_ERROR = "api.im.register.error"
REQUEST_RATE_LIMIT = "api.request.rate.limit"
FEE_NOT_ENOUGH = "api.fee.not.enough"
AMOUNT_NOT_ENOUGH = "api.amount.not.enough"
USER_USERNAME_ERROR = "model.user.is_valid.username.app_error"
USER_NICKNAME_ERROR = "model.user.is_valid.nickname.app_error"
EMAIL_NOT_VERIFIED = "api.email.not.verified"
ITEM_USE_BANK_ERROR = "api.item.use.bank.error"
BANK_METHOD_LIMIT = "api.bank.method.limit.error"
CANCLE_TOO_MUCH = "api.cancel.too.much.error"
AMOUNT_ERROR = "api.amount.error"
AMOUNT_TOO_MUCH = "api.amount.too.much.error"
SYMBOL_ERROR = "api.symbol.error"
)
const (
EMAIL_HELLO = "api.email.user"
EMAIL_INTRODUCE1 = "api.email.introduce1"
EMAIL_INTRODUCE2 = "api.email.introduce2"
EMAIL_ORDERT1 = "api.email.order.t.1"
EMAIL_ORDERT2 = "api.email.order.t.2"
EMAIL_ORDER = "api.email.order"
EMAIL_ITEM = "api.email.item"
EMAIL_BUSINESST = "api.email.business.t"
EMAIL_AMOUNTT = "api.email.amount.t"
EMAIL_PRICET = "api.email.price.t"
EMAIL_QUANTITYT = "api.email.quantity.t"
EMAIL_METHODT = "api.email.method.t"
EMAIL_PAYCODET = "api.email.pay.code.t"
EMAIL_CREATEATT = "api.email.create.at.t"
EMAIL_STATUST = "api.email.status.t"
EMAIL_STATUS_SUCCESS = "api.email.status.success"
EMAIL_STATUS_WAITING = "api.email.status.waiting"
EMAIL_STATUS_APPEAL = "api.email.status.appeal"
EMAIL_STATUS_SOLD = "api.email.status.sold"
EMAIL_STATUS_NOTSALE = "api.email.status.notsale"
EMAIL_STATUS_ACTIVE = "api.email.status.active"
EMAIL_STATUS_CANCEL = "api.email.status.cancel"
EMAIL_NOTET = "api.email.note.t"
EMAIL_NOTE1 = "api.email.note.1"
EMAIL_NOTE2 = "api.email.note.2"
EMAIL_NOTE3 = "api.email.note.3"
EMAIL_NOTE4 = "api.email.note.4"
EMAIL_NOTE5 = "api.email.note.5"
EMAIL_TIPS = "api.email.tips"
)
const (
BANK_1 = "api.bank.1"
BANK_2 = "api.bank.2"
BANK_3 = "api.bank.3"
BANK_4 = "api.bank.4"
BANK_5 = "api.bank.5"
)
const (
ORDER_FILTER_PRICE_LESS = "api.filter.price.less"
ORDER_FILTER_PRICE_GREATER = "api.filter.price.grater"
ORDER_FILTER_PRICE_NOT_LAWFUL = "api.filter.price.not.lawful"
ORDER_FILTER_PRICE_FLUCTUATE_BIG = "api.filter.price.fluctuate.big"
ORDER_FILTER_QUANTITY_LESS = "api.filter.quantity.less"
ORDER_FILTER_QUANTITY_GREATER = "api.filter.quantity.grater"
ORDER_FILTER_QUANTITY_NOT_LAWFUL = "api.filter.quantity.not.lawful"
ORDER_FILTER_QUOTEASSET_NOT_LAWFUL = "api.filter.quoteasset.not.lawful"
)
|
package service
import (
"context"
"errors"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"geektime/Go-000/Week04/api/user/v1"
"geektime/Go-000/Week04/internal/biz"
"geektime/Go-000/Week04/internal/data"
)
type UserService struct {
v1.UnimplementedUserServer
uc *biz.UserUseCase
}
func NewUserService(uc *biz.UserUseCase) *UserService {
return &UserService{uc: uc}
}
func (srv *UserService) GetUserById(ctx context.Context, r *v1.GetUserByIdRequest) (*v1.GetUserByIdReply, error) {
// TODO: dto对象应该变为do对象
doId := r.Id
user, err := srv.uc.GetUserInfoById(ctx, doId)
if err != nil {
if errors.Is(err, data.ErrRecordNotFound) {
return nil, status.Error(codes.NotFound, "user is not found.")
}
return nil, status.Errorf(codes.Internal, "error:%v", err)
}
return &v1.GetUserByIdReply{Id: user.Id, Name: user.Name, Mobile: user.Mobile}, nil
}
|
/*
* Copyright 2020-present Open Networking Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Package core Common Logger initialization
package test
import (
"context"
"testing"
"github.com/opencord/voltha-go/rw_core/config"
"github.com/opencord/voltha-go/rw_core/core/adapter"
cm "github.com/opencord/voltha-go/rw_core/mocks"
"github.com/opencord/voltha-lib-go/v4/pkg/adapters"
com "github.com/opencord/voltha-lib-go/v4/pkg/adapters/common"
"github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore"
"github.com/opencord/voltha-lib-go/v4/pkg/kafka"
"github.com/opencord/voltha-lib-go/v4/pkg/log"
mock_etcd "github.com/opencord/voltha-lib-go/v4/pkg/mocks/etcd"
"github.com/opencord/voltha-lib-go/v4/pkg/version"
"github.com/opencord/voltha-protos/v4/go/voltha"
"github.com/phayes/freeport"
"github.com/stretchr/testify/assert"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
const (
OltAdapter = iota
OnuAdapter
)
//CreateMockAdapter creates mock OLT and ONU adapters
func CreateMockAdapter(ctx context.Context, adapterType int, kafkaClient kafka.Client, coreInstanceID string, coreName string, adapterName string) (adapters.IAdapter, error) {
var err error
var adapter adapters.IAdapter
adapterKafkaICProxy := kafka.NewInterContainerProxy(
kafka.MsgClient(kafkaClient),
kafka.DefaultTopic(&kafka.Topic{Name: adapterName}))
adapterCoreProxy := com.NewCoreProxy(ctx, adapterKafkaICProxy, adapterName, coreName)
var adapterReqHandler *com.RequestHandlerProxy
switch adapterType {
case OltAdapter:
adapter = cm.NewOLTAdapter(ctx, adapterCoreProxy)
case OnuAdapter:
adapter = cm.NewONUAdapter(ctx, adapterCoreProxy)
default:
logger.Fatalf(ctx, "invalid-adapter-type-%d", adapterType)
}
adapterReqHandler = com.NewRequestHandlerProxy(coreInstanceID, adapter, adapterCoreProxy)
if err = adapterKafkaICProxy.Start(ctx); err != nil {
logger.Errorw(ctx, "Failure-starting-adapter-intercontainerProxy", log.Fields{"error": err})
return nil, err
}
if err = adapterKafkaICProxy.SubscribeWithRequestHandlerInterface(ctx, kafka.Topic{Name: adapterName}, adapterReqHandler); err != nil {
logger.Errorw(ctx, "Failure-to-subscribe-onu-request-handler", log.Fields{"error": err})
return nil, err
}
return adapter, nil
}
//CreateAndregisterAdapters creates mock ONU and OLT adapters and egisters them to rw-core
func CreateAndregisterAdapters(ctx context.Context, t *testing.T, kClient kafka.Client, coreInstanceID string, oltAdapterName string, onuAdapterName string, adapterMgr *adapter.Manager) (*cm.OLTAdapter, *cm.ONUAdapter) {
// Setup the mock OLT adapter
oltAdapter, err := CreateMockAdapter(ctx, OltAdapter, kClient, coreInstanceID, "rw_core", oltAdapterName)
assert.Nil(t, err)
assert.NotNil(t, oltAdapter)
// Register the adapter
registrationData := &voltha.Adapter{
Id: oltAdapterName,
Vendor: "Voltha-olt",
Version: version.VersionInfo.Version,
Type: oltAdapterName,
CurrentReplica: 1,
TotalReplicas: 1,
Endpoint: oltAdapterName,
}
types := []*voltha.DeviceType{{Id: oltAdapterName, Adapter: oltAdapterName, AcceptsAddRemoveFlowUpdates: true}}
deviceTypes := &voltha.DeviceTypes{Items: types}
if _, err := adapterMgr.RegisterAdapter(ctx, registrationData, deviceTypes); err != nil {
logger.Errorw(ctx, "failed-to-register-adapter", log.Fields{"error": err})
assert.NotNil(t, err)
}
// Setup the mock ONU adapter
onuAdapter, err := CreateMockAdapter(ctx, OnuAdapter, kClient, coreInstanceID, "rw_core", onuAdapterName)
assert.Nil(t, err)
assert.NotNil(t, onuAdapter)
// Register the adapter
registrationData = &voltha.Adapter{
Id: onuAdapterName,
Vendor: "Voltha-onu",
Version: version.VersionInfo.Version,
Type: onuAdapterName,
CurrentReplica: 1,
TotalReplicas: 1,
Endpoint: onuAdapterName,
}
types = []*voltha.DeviceType{{Id: onuAdapterName, Adapter: onuAdapterName, AcceptsAddRemoveFlowUpdates: true}}
deviceTypes = &voltha.DeviceTypes{Items: types}
if _, err := adapterMgr.RegisterAdapter(ctx, registrationData, deviceTypes); err != nil {
logger.Errorw(ctx, "failed-to-register-adapter", log.Fields{"error": err})
assert.NotNil(t, err)
}
return oltAdapter.(*cm.OLTAdapter), onuAdapter.(*cm.ONUAdapter)
}
//StartEmbeddedEtcdServer creates and starts an Embedded etcd server locally.
func StartEmbeddedEtcdServer(ctx context.Context, configName, storageDir, logLevel string) (*mock_etcd.EtcdServer, int, error) {
kvClientPort, err := freeport.GetFreePort()
if err != nil {
return nil, 0, err
}
peerPort, err := freeport.GetFreePort()
if err != nil {
return nil, 0, err
}
etcdServer := mock_etcd.StartEtcdServer(ctx, mock_etcd.MKConfig(ctx, configName, kvClientPort, peerPort, storageDir, logLevel))
if etcdServer == nil {
return nil, 0, status.Error(codes.Internal, "Embedded server failed to start")
}
return etcdServer, kvClientPort, nil
}
//StopEmbeddedEtcdServer stops the embedded etcd server
func StopEmbeddedEtcdServer(ctx context.Context, server *mock_etcd.EtcdServer) {
if server != nil {
server.Stop(ctx)
}
}
//SetupKVClient creates a new etcd client
func SetupKVClient(ctx context.Context, cf *config.RWCoreFlags, coreInstanceID string) kvstore.Client {
client, err := kvstore.NewEtcdClient(ctx, cf.KVStoreAddress, cf.KVStoreTimeout, log.FatalLevel)
if err != nil {
panic("no kv client")
}
return client
}
|
// Change root of a tree
// eg.
// +------0------+
// | | |
// +-1-+ +-2-+ +-3-+
// | | | | | |
// 4 5 6 7 8 9
// re-orientate to:
// 6
// |
// +-----2-----+
// | |
// 7 +-----0-----+
// | |
// +-1-+ +-3-+
// | | | |
// 4 5 8 9
package pov
import (
"fmt"
"strings"
)
const testVersion = 2
type Graph map[string][]string
// return a new graph
func New() *Graph {
g := make(Graph)
return &g
}
// add leaves to the graph
func (g *Graph) AddNode(nodeLabel string) {
if _, ok := (*g)[nodeLabel]; !ok {
(*g)[nodeLabel] = []string{}
} else {
return // repeated node
}
}
// add arc from bottome up, leaves should be already added by AddNode
func (g *Graph) AddArc(from, to string) {
// do nothing if already added
g.AddNode(to)
g.AddNode(from)
(*g)[from] = append((*g)[from], to)
}
// return a list of all arcs in the graph.
// Format each arc as a single string like "from -> to".
func (g *Graph) ArcList() (ret []string) {
for from, tos := range *g {
for _, to := range tos {
ret = append(ret, fmt.Sprintf("%s -> %s", from, to))
}
}
return ret
}
// Change root and return the new graph
// Change the arc direction in the path from oldRoot to newRoot
// BFS/DFS to find the path
// If the graph is a spanning tree, which means |E| = |N| - 1,
// then there should be only 1 path.
func (g *Graph) ChangeRoot(oldRoot, newRoot string) *Graph {
// copy the graph
newg := New()
for k, v := range *g {
(*newg)[k] = make([]string, len(v))
copy((*newg)[k], (*g)[k])
}
// find paths from oldroot to newroot
paths := []string{}
g.findPath(oldRoot, newRoot, "", &paths)
// reverse the arcs
for _, p := range paths {
nodes := strings.Fields(p)
if len(nodes) < 2 {
continue // path has only 1 node
}
for i, j := 0, 1; j < len(nodes); i, j = i+1, j+1 {
// remove the arc
newg.RemoveArc(nodes[i], nodes[j])
// add arc
newg.AddArc(nodes[j], nodes[i])
}
}
return newg
}
// Remove an arc in the graph
func (g *Graph) RemoveArc(from, to string) {
if nodes, ok := (*g)[from]; ok {
for i, n := range nodes {
if n == to {
if len(nodes) == 1 {
(*g)[from] = []string{}
} else {
(*g)[from][i] = nodes[len(nodes)-1]
(*g)[from] = (*g)[from][:len(nodes)-1]
}
}
}
} else {
return // cannot find that node from
}
}
// XXX won't work if there is loop in the graph
func (g *Graph) findPath(node1, node2 string, path string, paths *[]string) {
if node1 == node2 {
*paths = append(*paths, path+node2)
return
}
for _, n := range (*g)[node1] {
(*g).findPath(n, node2, path+node1+" ", paths)
}
}
|
package cmd
import (
"errors"
"fmt"
"os"
"regexp"
"strings"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
var container string
var (
containers = []string{"dev", "coverage"}
defaultContainer = "dev"
ciBranch = os.Getenv("BUILDKITE_BRANCH")
ciPullRequest = os.Getenv("BUILDKITE_PULL_REQUEST")
ciTag = os.Getenv("BUILDKITE_TAG")
dockerTags = regexp.MustCompile(`v(?P<Patch>(?P<Minor>(?P<Major>\d+)\.\d+)\.\d+.*)`)
ignoredSuffixes = regexp.MustCompile("alpha|beta")
publicRepo = regexp.MustCompile(`.*:.*`)
tags = dockerTags.FindStringSubmatch(ciTag)
)
func newDockerCmd() (cmd *cobra.Command) {
cmd = &cobra.Command{
Use: "docker",
Short: cmdDockerShort,
Long: cmdDockerLong,
Example: cmdDockerExample,
Args: cobra.NoArgs,
Run: cmdDockerBuildRun,
DisableAutoGenTag: true,
}
cmd.AddCommand(newDockerBuildCmd(), newDockerPushManifestCmd())
return cmd
}
func newDockerBuildCmd() (cmd *cobra.Command) {
cmd = &cobra.Command{
Use: "build",
Short: cmdDockerBuildShort,
Long: cmdDockerBuildLong,
Example: cmdDockerBuildExample,
Args: cobra.NoArgs,
Run: cmdDockerBuildRun,
DisableAutoGenTag: true,
}
cmd.PersistentFlags().StringVar(&container, "container", defaultContainer, "target container among: "+strings.Join(containers, ", "))
return cmd
}
func newDockerPushManifestCmd() (cmd *cobra.Command) {
cmd = &cobra.Command{
Use: "push-manifest",
Short: cmdDockerPushManifestShort,
Long: cmdDockerPushManifestLong,
Example: cmdDockerPushManifestExample,
Args: cobra.NoArgs,
Run: cmdDockerPushManifestRun,
DisableAutoGenTag: true,
}
return cmd
}
func cmdDockerBuildRun(_ *cobra.Command, _ []string) {
log.Infof("Building Docker image %s...", DockerImageName)
checkContainerIsSupported(container)
err := dockerBuildOfficialImage(container)
if err != nil {
log.Fatal(err)
}
docker := &Docker{}
err = docker.Tag(IntermediateDockerImageName, DockerImageName)
if err != nil {
log.Fatal(err)
}
}
func cmdDockerPushManifestRun(_ *cobra.Command, _ []string) {
docker := &Docker{}
switch {
case ciTag != "":
if len(tags) == 4 {
log.Infof("Detected tags: '%s' | '%s' | '%s'", tags[1], tags[2], tags[3])
login(docker, dockerhub)
login(docker, ghcr)
if ignoredSuffixes.MatchString(ciTag) {
deployManifest(docker, tags[1])
} else {
deployManifest(docker, tags[1], tags[2], tags[3], "latest")
}
publishDockerReadme(docker)
} else {
log.Fatal("Docker manifest will not be published, the specified tag does not conform to the standard")
}
case ciBranch != masterTag && !publicRepo.MatchString(ciBranch):
login(docker, dockerhub)
login(docker, ghcr)
deployManifest(docker, ciBranch)
case ciBranch != masterTag && publicRepo.MatchString(ciBranch):
login(docker, dockerhub)
login(docker, ghcr)
deployManifest(docker, "PR"+ciPullRequest)
case ciBranch == masterTag && ciPullRequest == stringFalse:
login(docker, dockerhub)
login(docker, ghcr)
deployManifest(docker, "master")
publishDockerReadme(docker)
default:
log.Info("Docker manifest will not be published")
}
}
func checkContainerIsSupported(container string) {
for _, v := range containers {
if container == v {
return
}
}
log.Fatal("Container is not supported. Please select one of " + strings.Join(containers, ", ") + ".")
}
func dockerBuildOfficialImage(arch string) error {
docker := &Docker{}
filename := "Dockerfile"
dockerfile := fmt.Sprintf("%s.%s", filename, arch)
buildMetaData, err := getBuild(ciBranch, os.Getenv("BUILDKITE_BUILD_NUMBER"), "")
if err != nil {
log.Fatal(err)
}
return docker.Build(IntermediateDockerImageName, dockerfile, ".", buildMetaData)
}
func login(docker *Docker, registry string) {
username := ""
password := ""
switch registry {
case dockerhub:
username = os.Getenv("DOCKER_USERNAME")
password = os.Getenv("DOCKER_PASSWORD")
case ghcr:
username = os.Getenv("GHCR_USERNAME")
password = os.Getenv("GHCR_PASSWORD")
}
if username == "" {
log.Fatal(errors.New("DOCKER_USERNAME/GHCR_USERNAME is empty"))
}
if password == "" {
log.Fatal(errors.New("DOCKER_PASSWORD/GHCR_PASSWORD is empty"))
}
log.Infof("Login to %s as %s", registry, username)
err := docker.Login(username, password, registry)
if err != nil {
log.Fatalf("Login to %s failed: %s", registry, err)
}
}
func deployManifest(docker *Docker, tag ...string) {
tags = make([]string, 0)
log.Infof("The following Docker manifest(s) will be deployed on %s and %s", dockerhub, ghcr)
for _, t := range tag {
log.Infof("- %s:%s", DockerImageName, t)
tags = append(tags, dockerhub+"/"+DockerImageName+":"+t, ghcr+"/"+DockerImageName+":"+t)
}
if err := docker.Manifest(tags); err != nil {
log.Fatal(err)
}
}
func publishDockerReadme(docker *Docker) {
log.Info("Docker pushing README.md to Docker Hub")
if err := docker.PublishReadme(); err != nil {
log.Fatal(err)
}
}
|
package sensu
import (
"github.com/bitly/go-simplejson"
)
type Check struct {
Name string `json:"name"`
Command string `json:"command"`
Executed int
Status int
Issued int `json:"issued"`
Output string
Duration float64
Timeout int
commandExecuted string
data *simplejson.Json
}
|
package main
import (
"bytes"
"crypto/sha256"
"encoding/binary"
"fmt"
"math"
)
var (
maxNonce = math.MaxInt64
maxZero = 2
)
type ProofOfWork struct {
block *Block
}
func (pow *ProofOfWork) Pad() []byte {
src := bytes.Join(
[][]byte{
pow.block.PrevHash,
pow.block.HashTransaction(),
IntToByte(pow.block.Timestamp),
IntToByte(pow.block.Nonce),
},
[]byte{})
return src
}
func (pow *ProofOfWork) Mine() (int64, []byte) {
var (
hash [32]byte
nonce int64
)
for nonce < int64(maxNonce) {
pow.block.Nonce = nonce
hash = sha256.Sum256(pow.Pad())
hashWin := true
for i := 0; i < maxZero; i++ {
if hash[i] != 0 {
hashWin = false
break
}
}
fmt.Printf("\r nonce[%d], hash[%x]", nonce, hash)
if hashWin {
break
} else {
nonce++
}
}
return nonce, hash[:]
}
func IntToByte(num int64) []byte {
buff := new(bytes.Buffer)
binary.Write(buff, binary.BigEndian, num)
return buff.Bytes()
}
/*
func main() {
block := NewGenesisBlock()
pow := ProofOfWork{block}
pow.Mine()
return
}
*/
|
package config
const HttpPort = ":80"
const Environment = "env"
const GoogleClientKey = "599717309315-c84f5ijm874mu2of1i1g6qm6ufbfvmn4.apps.googleusercontent.com"
const GoogleSecret = "x9XbDukgssGemHHeni_UBckZ"
const GoogleAuthCallbackUrl = "http://localhost:5000/auth/google/callback?provider=google"
const FacebookClientKey = "1262553130621343"
const FacebookSecret = "330deb872bc3dec1438fe30feb74c766" //"edaf03f3aee9dc651f68e3ec50077a88"
const FacebookAuthCallbackUrl = "http://127.0.0.1/auth/facebook/callback?provider=facebook"
const DefaultMail = "mail2193"
//const DefaultMail = "mr.uwaifo@gmail.com"
const DefaultMailPassword = "JbK9mSraq8"
var DatabaseName = "testing"
var ConfirmationDbName = "confirmations"
var RedisDB = 0
var RedirectUrl = "http://localhost:8000"
|
package pie_test
import (
"github.com/elliotchance/pie/v2"
"github.com/stretchr/testify/assert"
"testing"
)
func TestPop(t *testing.T) {
numbers := []float64{42.0, 4.2}
assert.Equal(t, 42.0, *pie.Pop(&numbers))
assert.Equal(t, []float64{4.2}, numbers)
assert.Equal(t, 4.2, *pie.Pop(&numbers))
assert.Equal(t, []float64{}, numbers)
}
|
package controller
import "github.com/therecipe/qt/core"
var StackController *stackController
type stackController struct {
core.QObject
_ func() `constructor:"init`
_ func(string) `signal:"clicked"`
}
func (c *stackController) init() {
StackController = c
}
|
package service_test
import (
"context"
"crypto/tls"
"net/http"
"strings"
"testing"
authTest "github.com/go-ocf/cloud/authorization/provider"
"github.com/go-ocf/cloud/grpc-gateway/pb"
grpcTest "github.com/go-ocf/cloud/grpc-gateway/test"
"github.com/go-ocf/cloud/http-gateway/test"
"github.com/go-ocf/cloud/http-gateway/uri"
"github.com/go-ocf/kit/codec/json"
kitNetGrpc "github.com/go-ocf/kit/net/grpc"
"github.com/stretchr/testify/require"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
)
func TestGetResource(t *testing.T) {
deviceID := grpcTest.MustFindDeviceByName(grpcTest.TestDeviceName)
ctx, cancel := context.WithTimeout(context.Background(), test.TestTimeout)
defer cancel()
ctx = kitNetGrpc.CtxWithToken(ctx, authTest.UserToken)
tearDown := grpcTest.SetUp(ctx, t)
defer tearDown()
conn, err := grpc.Dial(grpcTest.GRPC_HOST, grpc.WithTransportCredentials(credentials.NewTLS(&tls.Config{
RootCAs: grpcTest.GetRootCertificatePool(t),
})))
require.NoError(t, err)
c := pb.NewGrpcGatewayClient(conn)
defer conn.Close()
shutdownDevSim := grpcTest.OnboardDevSim(ctx, t, c, deviceID, grpcTest.GW_HOST, grpcTest.GetAllBackendResourceLinks())
defer shutdownDevSim()
webTearDown := test.NewTestHTTPGW(t, test.NewTestBackendConfig().String())
defer webTearDown()
var response map[string]interface{}
getResource(t, deviceID, uri.Device+"/light/1", &response)
require.Equal(t, map[string]interface{}{"name": "Light", "power": uint64(0), "state": false}, response)
}
func TestGetResourceNotExist(t *testing.T) {
deviceID := grpcTest.MustFindDeviceByName(grpcTest.TestDeviceName)
ctx, cancel := context.WithTimeout(context.Background(), test.TestTimeout)
defer cancel()
ctx = kitNetGrpc.CtxWithToken(ctx, authTest.UserToken)
tearDown := grpcTest.SetUp(ctx, t)
defer tearDown()
conn, err := grpc.Dial(grpcTest.GRPC_HOST, grpc.WithTransportCredentials(credentials.NewTLS(&tls.Config{
RootCAs: grpcTest.GetRootCertificatePool(t),
})))
require.NoError(t, err)
c := pb.NewGrpcGatewayClient(conn)
defer conn.Close()
shutdownDevSim := grpcTest.OnboardDevSim(ctx, t, c, deviceID, grpcTest.GW_HOST, grpcTest.GetAllBackendResourceLinks())
defer shutdownDevSim()
webTearDown := test.NewTestHTTPGW(t, test.NewTestBackendConfig().String())
defer webTearDown()
getReq := test.NewRequest("GET", uri.Device+"/notExist", strings.NewReader("")).
DeviceId(deviceID).AuthToken(authTest.UserToken).Build()
res := test.HTTPDo(t, getReq)
defer res.Body.Close()
var response map[string]string
err = json.ReadFrom(res.Body, &response)
require.NoError(t, err)
require.Equal(t, http.StatusNotFound, res.StatusCode)
exp := map[string]string{
"err": "cannot get resource: rpc error: code = NotFound desc = cannot retrieve resources values: rpc error: code = NotFound desc = cannot retrieve resources values: not found",
}
require.Equal(t, exp, response)
}
func getResource(t *testing.T, deviceID, uri string, data interface{}) {
getReq := test.NewRequest("GET", uri, nil).
DeviceId(deviceID).AuthToken(authTest.UserToken).Build()
res := test.HTTPDo(t, getReq)
defer res.Body.Close()
require.Equal(t, http.StatusOK, res.StatusCode)
err := json.ReadFrom(res.Body, &data)
require.NoError(t, err)
}
|
package authenticate_test
import (
"github.com/gin-gonic/gin"
"github.com/stretchr/testify/assert"
"net/http"
"net/http/httptest"
"oneday-infrastructure/api/authenticate"
"oneday-infrastructure/internal/pkg/authenticate/domain"
"oneday-infrastructure/tools"
"strings"
"testing"
)
func TestLogin(t *testing.T) {
router := gin.Default()
authenticate.InitAuthenticateApi(router)
w := httptest.NewRecorder()
cmd := domain.LoginCmd{
Username: "zzf",
EffectiveSeconds: 60 * 60,
PassCode: "zzf",
LoginMode: "PASSWORD",
EncryptWay: "MD5",
UniqueCode: "code",
}
req, _ := http.NewRequest(
"POST",
"login",
strings.NewReader(tools.JsonString(cmd)))
req.Header.Set("Content-Type", "Application/json")
req.Header.Set("tenantCode", "tttt")
router.ServeHTTP(w, req)
assert.Equal(t, 200, w.Code)
assert.True(t, true, (tools.JsonStringToMap(w.Body.String()))["token"].(string) != "")
}
func TestResetPassword(t *testing.T) {
router := gin.Default()
authenticate.InitAuthenticateApi(router)
w := httptest.NewRecorder()
cmd := domain.ResetPasswordCmd{
Username: "zzf",
EncryptWay: "MD5",
NewPassword: "zzf",
OldPassword: "zzf",
}
req, _ := http.NewRequest(
"POST",
"resetPassword",
strings.NewReader(tools.JsonString(cmd)))
req.Header.Set("Content-Type", "Application/json")
req.Header.Set("tenantCode", "tttt")
router.ServeHTTP(w, req)
assert.Equal(t, 200, w.Code)
assert.Equal(t, domain.ResetPasswordSuccess, (tools.JsonStringToMap(w.Body.String()))["result"].(string))
}
|
package smtp
import (
"crypto/tls"
"io"
"net/smtp"
)
type smtpClient interface {
Hello(string) error
Extension(string) (bool, string)
StartTLS(*tls.Config) error
Auth(smtp.Auth) error
Mail(string) error
Rcpt(string) error
Data() (io.WriteCloser, error)
Quit() error
Close() error
}
|
package main
import (
"bufio"
"database/sql"
"encoding/base64"
"flag"
"fmt"
"io/ioutil"
"os"
_ "github.com/mattn/go-sqlite3"
)
// DatabasePath is a constant containig the main sqlite file path
const DatabasePath string = "../persistent/codeImages"
func convertFileToBase64(filePath string) (encoded string, err error) {
// Open file on disk.
f, err := os.Open(filePath)
if err != nil {
fmt.Println("Problems opening image file.", err)
}
// Read entire JPG into byte slice.
reader := bufio.NewReader(f)
content, err := ioutil.ReadAll(reader)
if err != nil {
fmt.Println("Problems converting image file into byte slice.", err)
}
// Encode as base64.
encoded = base64.StdEncoding.EncodeToString(content)
return
}
func checkTable() (existsTable bool, err error) {
//
db, err := sql.Open("sqlite3", DatabasePath)
if err != nil {
fmt.Println("Error opening database.", err)
}
defer db.Close()
statement := "SELECT CAST(COUNT(*) AS BIT) FROM sqlite_master WHERE type = 'table' AND name = 'code_images'"
stmt, err := db.Prepare(statement)
if err != nil {
fmt.Println("Problems creating the statement that validates the existence of local storage.", err)
}
defer stmt.Close()
err = stmt.QueryRow().Scan(&existsTable)
if err != nil {
fmt.Println("Could not find the tables within local storage.", err)
}
return
}
func createTable() (err error) {
//
db, err := sql.Open("sqlite3", DatabasePath)
if err != nil {
fmt.Println("Error opening database.", err)
}
defer db.Close()
statement := `CREATE TABLE IF NOT EXISTS code_images (
id INTEGER,
content TEXT,
PRIMARY KEY (id))`
stmt, err := db.Prepare(statement)
if err != nil {
fmt.Println("Problems creating the local storage table. ", err)
}
defer stmt.Close()
_, err = stmt.Exec()
return
}
func storeImage(id int, content string) (err error) {
//
db, err := sql.Open("sqlite3", DatabasePath)
if err != nil {
fmt.Println("Error opening database.", err)
}
defer db.Close()
statement := `INSERT INTO code_images (id, content) VALUES ($1, $2)`
stmt, err := db.Prepare(statement)
if err != nil {
fmt.Println("Problems building the statement that insert image's data into local storage. ", err)
}
defer stmt.Close()
_, err = stmt.Exec(id, content)
if err != nil {
fmt.Println("An issue occurred while storing images within local storage. ", err)
}
return
}
func main() {
//
idImg := flag.Int("i", 0, "image's Id")
imgPath := flag.String("p", "../", "image's path")
flag.Parse()
code, err := convertFileToBase64(*imgPath)
if err != nil {
fmt.Println("Problem converting image to base64 code. ", err)
}
exists, err := checkTable()
if !exists {
if err = createTable(); err != nil {
fmt.Println("Problem creating local table of code_images. ", err)
}
}
if err = storeImage(*idImg, code); err != nil {
fmt.Println("Error inserting coded image into local storage. ", err)
}
}
|
package slug
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestSlug(t *testing.T) {
cases := [][]struct {
in, out string
}{
{
{"foo", "foo"},
{"foo", "foo-1"},
{"foo bar", "foo-bar"},
},
{
{"foo", "foo"},
{"fooCamelCase", "foocamelcase"},
},
{
{"foo", "foo"},
{"foo", "foo-1"},
// {"foo 1", "foo-1-1"}, // these are too rare for Jsonnet
// {"foo 1", "foo-1-2"},
{"foo", "foo-2"},
},
{
{"heading with a - dash", "heading-with-a---dash"},
{"heading with an _ underscore", "heading-with-an-_-underscore"},
{"heading with a period.txt", "heading-with-a-periodtxt"},
{"exchange.bind_headers(exchange, routing [, bindCallback])", "exchangebind_headersexchange-routing--bindcallback"},
},
}
for _, cs := range cases {
s := New()
for _, c := range cs {
assert.Equal(t, c.out, s.Slug(c.in))
}
}
}
|
package util
import (
"github.com/ActiveState/logyard-apps/common"
"github.com/ActiveState/stackato-go/server"
)
type Config struct {
Info struct {
Name string `json:"name"`
} `json:"info"`
}
var c *server.Config
func getConfig() *Config {
return c.GetConfig().(*Config)
}
func loadConfig() {
var err error
c, err = server.NewConfig("cloud_controller_ng", Config{})
if err != nil {
common.Fatal("Unable to load cc_ng config; %v", err)
}
}
func GetBrandName() string {
return getConfig().Info.Name
}
|
package handler
import (
"fmt"
"github.com/form3tech-oss/jwt-go"
"github.com/gofiber/fiber/v2"
"github.com/serbanmarti/fiber_rest_api/internal"
)
func (h *Handler) Restricted(c *fiber.Ctx) error {
user := c.Locals("user").(*jwt.Token)
claims := user.Claims.(*internal.JWTClaims)
return HTTPSuccess(c, fiber.Map{"Msg": fmt.Sprintf("Welcome %s", claims.Id)})
}
|
package size
import "fmt"
type unit string
const px unit = "px"
const em unit = "em"
const percent unit = "%"
const none unit = "none"
const times unit = "times"
const vw unit = "vw"
const vh unit = "vh"
const vmin unit = "vmin"
const vmax unit = "vmax"
var Auto = Size{unit: none, stringValue: "auto"}
type Size struct {
intValue int64
floatValue float64
stringValue string
unit unit
}
func Times(value float64) Size {
return Size{floatValue: value, unit: times}
}
func Px(value int64) Size {
return Size{intValue: value, unit: px}
}
func Percent(value int64) Size {
return Size{intValue: value, unit: percent}
}
func Em(value float64) Size {
return Size{floatValue: value, unit: em}
}
func Vh(value float64) Size {
return Size{floatValue: value, unit: vh}
}
func Vw(value float64) Size {
return Size{floatValue: value, unit: vw}
}
func Vmin(value float64) Size {
return Size{floatValue: value, unit: vmin}
}
func Vmax(value float64) Size {
return Size{floatValue: value, unit: vmax}
}
func (size Size) String() string {
switch size.unit {
case px, percent:
return fmt.Sprintf("%v%v", size.intValue, size.unit)
case em, vh, vw, vmin, vmax:
return fmt.Sprintf("%v%v", size.floatValue, size.unit)
case times:
return fmt.Sprintf("%v", size.floatValue)
case none:
return fmt.Sprintf("%v", size.stringValue)
default:
panic("unsupported unit")
}
}
|
// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0.
package utils
import (
"fmt"
"net" //nolint:goimports
// #nosec
// register HTTP handler for /debug/pprof
"net/http"
// For pprof
_ "net/http/pprof" // #nosec G108
"os"
"sync"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/log"
berrors "github.com/pingcap/tidb/br/pkg/errors"
tidbutils "github.com/pingcap/tidb/util"
"go.uber.org/zap"
)
var (
startedPProf = ""
mu sync.Mutex
)
func listen(statusAddr string) (net.Listener, error) {
mu.Lock()
defer mu.Unlock()
if startedPProf != "" {
log.Warn("Try to start pprof when it has been started, nothing will happen", zap.String("address", startedPProf))
return nil, errors.Annotate(berrors.ErrUnknown, "try to start pprof when it has been started at "+startedPProf)
}
failpoint.Inject("determined-pprof-port", func(v failpoint.Value) {
port := v.(int)
statusAddr = fmt.Sprintf(":%d", port)
log.Info("injecting failpoint, pprof will start at determined port", zap.Int("port", port))
})
listener, err := net.Listen("tcp", statusAddr)
if err != nil {
log.Warn("failed to start pprof", zap.String("addr", statusAddr), zap.Error(err))
return nil, errors.Trace(err)
}
startedPProf = listener.Addr().String()
log.Info("bound pprof to addr", zap.String("addr", startedPProf))
_, _ = fmt.Fprintf(os.Stderr, "bound pprof to addr %s\n", startedPProf)
return listener, nil
}
// StartPProfListener forks a new goroutine listening on specified port and provide pprof info.
func StartPProfListener(statusAddr string, wrapper *tidbutils.TLS) error {
listener, err := listen(statusAddr)
if err != nil {
return err
}
go func() {
if e := http.Serve(wrapper.WrapListener(listener), nil); e != nil {
log.Warn("failed to serve pprof", zap.String("addr", startedPProf), zap.Error(e))
mu.Lock()
startedPProf = ""
mu.Unlock()
return
}
}()
return nil
}
|
package marketplace
import (
"github.com/jinzhu/configor"
"github.com/sonm-io/core/accounts"
)
type MarketplaceConfig struct {
ListenAddr string `yaml:"address"`
Eth accounts.EthConfig `required:"true" yaml:"ethereum"`
}
func NewConfig(path string) (*MarketplaceConfig, error) {
cfg := &MarketplaceConfig{}
err := configor.Load(cfg, path)
if err != nil {
return nil, err
}
return cfg, nil
}
|
package vtubers
import (
"context"
"fmt"
"google.golang.org/api/option"
"google.golang.org/api/youtube/v3"
"os"
"time"
)
type (
YoutubeStruct struct {
ChannelId string
Client *youtube.Service
SearchList *youtube.SearchListCall
VideosList *youtube.VideosListCall
ChannelList *youtube.ChannelsListCall
}
)
func New(filename string, channelId string) (*YoutubeStruct, error) {
var err error
var client *youtube.Service
_, err = os.Stat(filename)
if err != nil {
return &YoutubeStruct{}, err
}
client, err = youtube.NewService(context.Background(), option.WithCredentialsFile(filename))
if err != nil {
return &YoutubeStruct{}, err
}
return &YoutubeStruct{
ChannelId: channelId,
Client: client,
SearchList: client.Search.List([]string{"snippet"}),
VideosList: client.Videos.List([]string{"liveStreamingDetails", "snippet"}),
ChannelList: client.Channels.List([]string{"snippet", "statistics", "brandingSettings"}),
}, nil
}
func (s *YoutubeStruct) GetChannelInfo() (*youtube.ChannelListResponse, error) {
response, err := s.ChannelList.Id(s.ChannelId).Do()
//response.Items[0].Statistics.SubscriberCount
return response, err
}
func (s *YoutubeStruct) getLives(eventType string) (*youtube.SearchListResponse, error) {
response, err := s.SearchList.ChannelId(s.ChannelId).EventType(eventType).Type("video").Do()
return response, err
}
func (s *YoutubeStruct) GetUpcomingLive() (*youtube.SearchListResponse, error) {
response, err := s.getLives("upcoming")
return response, err
}
func (s *YoutubeStruct) GetCompletedLive() (*youtube.SearchListResponse, error) {
response, err := s.getLives("completed")
return response, err
}
func (s *YoutubeStruct) GetNowLive() (*youtube.SearchListResponse, error) {
response, err := s.getLives("live")
return response, err
}
func (s *YoutubeStruct) GetVideo(videoId string) (*youtube.VideoListResponse, error) {
video, err := s.VideosList.Id(videoId).Do()
if err != nil {
return nil, err
}
return video, nil
}
func (s *YoutubeStruct) GetLiveMessages(livechatId string, callback func(*youtube.LiveChatMessageListResponse) error) error {
req := s.Client.LiveChatMessages.List(livechatId, []string{"id", "snippet", "authorDetails"}).MaxResults(100)
ctx, cancel := context.WithCancel(context.Background())
go func() {
for {
select {
case <-ctx.Done():
time.Sleep(150 * time.Millisecond)
return
default:
if ctx.Err() != nil {
fmt.Println(ctx.Err())
return
}
time.Sleep(150 * time.Millisecond)
}
}
}()
err := req.Pages(ctx, func(response *youtube.LiveChatMessageListResponse) error {
time.Sleep(150 * time.Millisecond)
return callback(response)
})
cancel()
return err
}
|
package do_test
import (
"time"
. "github.com/bryanl/dolb/do"
"github.com/bryanl/dolb/mocks"
"github.com/digitalocean/godo"
"github.com/stretchr/testify/mock"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var oldActionTimeout time.Duration
var _ = BeforeSuite(func() {
oldActionTimeout = ActionTimeout
ActionTimeout = 10 * time.Millisecond
})
var _ = AfterSuite(func() {
ActionTimeout = oldActionTimeout
})
var _ = Describe("DoClient", func() {
var (
actionsService = &mocks.ActionsService{}
domainsService = &mocks.DomainsService{}
dropletsService = &mocks.DropletsService{}
godoc = &godo.Client{
Actions: actionsService,
Domains: domainsService,
Droplets: dropletsService,
}
domain = "lb.example.com"
ldo = NewLiveDigitalOcean(godoc, domain)
)
Describe("creating an agent", func() {
BeforeEach(func() {
droplet := &godo.Droplet{ID: 1}
dropletsService.On(
"Create",
mock.AnythingOfTypeArgument("*godo.DropletCreateRequest"),
).Return(droplet, nil, nil).Once()
actions := []godo.Action{
{ID: 1, Type: "create"},
}
dropletsService.On(
"Actions",
1,
mock.Anything,
).Return(actions, nil, nil).Once()
a1 := godo.Action{ID: 1, Type: "created", Status: "in-progress"}
actionsService.On("Get", 1).Return(&a1, nil, nil).Once()
a2 := a1
a2.Status = "completed"
actionsService.On("Get", 1).Return(&a2, nil, nil).Once()
d2 := droplet
d2.Networks = &godo.Networks{
V4: []godo.NetworkV4{
{Type: "public", IPAddress: "4.4.4.4"},
{Type: "private", IPAddress: "10.10.10.10"},
},
}
dropletsService.On("Get", 1).Return(d2, nil, nil).Once()
})
It("creates an agent using godo", func() {
dcr := &DropletCreateRequest{
SSHKeys: []string{"1", "2"},
}
_, err := ldo.CreateAgent(dcr)
Expect(err).ToNot(HaveOccurred())
})
})
Describe("deleting an agent", func() {
Context("that exists", func() {
BeforeEach(func() {
dropletsService.On("Delete", 1).Return(nil, nil).Once()
})
AfterEach(func() {
dropletsService.AssertExpectations(GinkgoT())
})
It("deletes the agent", func() {
err := ldo.DeleteAgent(1)
Expect(err).ToNot(HaveOccurred())
})
})
})
Describe("creating a dns entry", func() {
BeforeEach(func() {
expectedDrer := &godo.DomainRecordEditRequest{
Type: "A",
Name: "foo",
Data: "192.168.1.1",
}
record := &godo.DomainRecord{
ID: 5,
Type: "A",
Name: "foo",
Data: "192.168.1.1",
}
domainsService.On(
"CreateRecord",
domain,
expectedDrer,
).Return(record, nil, nil).Once()
})
It("creates a dns entry", func() {
de, err := ldo.CreateDNS("foo", "192.168.1.1")
Expect(err).ToNot(HaveOccurred())
Expect(de).ToNot(BeNil())
Expect(de.RecordID).To(Equal(5))
})
})
Describe("deleting a dns entry", func() {
Context("that exists", func() {
BeforeEach(func() {
domainsService.On("DeleteRecord", domain, 1).Return(nil, nil)
})
AfterEach(func() {
domainsService.AssertExpectations(GinkgoT())
})
It("deletes the agent", func() {
err := ldo.DeleteDNS(1)
Expect(err).ToNot(HaveOccurred())
})
})
})
Describe("creating a floating ip", func() {
})
Describe("deleting a floating ip", func() {
})
Describe("assigning a floating ip", func() {
})
Describe("unassinging a floating ip", func() {
})
})
|
package main
import "fmt"
func main() {
s := []int{6,5,9}
fmt.Println(maxProfit(s))
}
func maxProfit(prices []int) int {
n := len(prices)
if n < 2 {
return 0
}
minPrice := prices[0]
maxprofit := 0
for i := 0; i < n; i++ {
if prices[i] < minPrice {
minPrice = prices[i]
} else if prices[i]-minPrice > maxprofit {
maxprofit = prices[i] - minPrice
}
}
return maxprofit
}
func max(x, y int) int {
if x > y {
return x
}
return y
}
func min(x, y int) int {
if x > y {
return y
}
return x
}
|
/*
* Copyright (c) 2020 - present Kurtosis Technologies LLC.
* All Rights Reserved.
*/
package services
const (
MockServicePort = 1000
)
// Mock service, for testing purposes only
type MockService struct {
serviceId ServiceID
ipAddr string
// For testing, the service will report as available on the Nth call to IsAvailable
becomesAvailableOnCheck int
// Number of calls to IsAvailable that have happened
callsToIsAvailable int
}
func NewMockService(serviceId ServiceID, ipAddr string, becomesAvailableOnCheck int) *MockService {
return &MockService{
serviceId: serviceId,
ipAddr: ipAddr,
becomesAvailableOnCheck: becomesAvailableOnCheck,
callsToIsAvailable: 0,
}
}
func (m MockService) GetServiceID() ServiceID {
return m.serviceId
}
func (m MockService) GetIPAddress() string {
return m.ipAddr
}
func (m *MockService) IsAvailable() bool {
m.callsToIsAvailable++
return m.callsToIsAvailable >= m.becomesAvailableOnCheck
}
|
package main
import (
"fmt"
"io/ioutil"
"os"
"testing"
)
func sayHi(name string) {
fmt.Printf("hi, %s!\n", name)
}
func Example_sayHi() {
sayHi("winston")
sayHi("sadie")
// Output:
// hi, winston!
// hi, sadie!
}
func Test_sayHi(t *testing.T) {
r, w, err := os.Pipe()
if err != nil {
t.Fatal(err)
}
stdout := os.Stdout
os.Stdout = w
defer func() {
os.Stdout = stdout
}()
sayHi("boy")
w.Close()
out, err := ioutil.ReadAll(r)
if err != nil {
t.Fatal(err)
}
expected := "hi, boy!\n"
if string(out) != expected {
t.Errorf("expected: %q, got %q", expected, string(out))
}
}
|
package web
import (
"net/http"
"github.com/sirupsen/logrus"
)
type healthChecker struct {
logger logrus.FieldLogger
}
// healthCheck reports if the server is ready to process traffic. It does not validate downstream dependencies.
func (hc *healthChecker) healthCheck(w http.ResponseWriter, req *http.Request) {
hc.logger.Info("healthCheck")
_, _ = w.Write([]byte("OK"))
}
// deepCheck reports on the status of downstream dependencies. It should be non-blocking (ie, report on the status
// of watchdogs, rather than make roundtrips). For now, it just reports that it's always happy.
func (hc *healthChecker) deepCheck(w http.ResponseWriter, req *http.Request) {
hc.logger.Info("deepCheck")
_, _ = w.Write([]byte("OK"))
}
|
package objectstorage
import (
"github.com/go-chi/chi"
"github.com/hardstylez72/bblog/internal/objectstorage"
)
type objectStorageController struct {
objectStorage objectstorage.Storage
}
func NewObjectStorageController(objectStorage objectstorage.Storage) *objectStorageController {
return &objectStorageController{
objectStorage: objectStorage,
}
}
func (c objectStorageController) Mount(r chi.Router) {
r.Post("/v1/object-storage/upload", c.UploadObjectHandler)
}
|
package tude
type Circle struct {
center *Point
radius float64
}
func (c *Circle) Radius() float64 {
return c.radius
}
func (c *Circle) Center() *Point {
return c.center
}
func (c *Circle) Contains(point *Point) bool {
return Distance(point, c.center) <= c.radius
}
func NewCircle(center *Point, radius float64) *Circle {
return &Circle{center, radius}
}
|
package handlers
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/valyala/fasthttp"
"github.com/authelia/authelia/v4/internal/authentication"
"github.com/authelia/authelia/v4/internal/mocks"
"github.com/authelia/authelia/v4/internal/session"
)
func TestAuthzImplementation(t *testing.T) {
assert.Equal(t, "Legacy", AuthzImplLegacy.String())
assert.Equal(t, "", AuthzImplementation(-1).String())
}
func TestFriendlyMethod(t *testing.T) {
assert.Equal(t, "unknown", friendlyMethod(""))
assert.Equal(t, "GET", friendlyMethod(fasthttp.MethodGet))
}
func TestGenerateVerifySessionHasUpToDateProfileTraceLogs(t *testing.T) {
mock := mocks.NewMockAutheliaCtx(t)
generateVerifySessionHasUpToDateProfileTraceLogs(mock.Ctx, &session.UserSession{Username: "john", DisplayName: "example", Groups: []string{"abc"}, Emails: []string{"user@example.com", "test@example.com"}}, &authentication.UserDetails{Username: "john", Groups: []string{"123"}, DisplayName: "notexample", Emails: []string{"notuser@example.com"}})
generateVerifySessionHasUpToDateProfileTraceLogs(mock.Ctx, &session.UserSession{Username: "john", DisplayName: "example"}, &authentication.UserDetails{Username: "john", DisplayName: "example"})
generateVerifySessionHasUpToDateProfileTraceLogs(mock.Ctx, &session.UserSession{Username: "john", DisplayName: "example", Emails: []string{"abc@example.com"}}, &authentication.UserDetails{Username: "john", DisplayName: "example"})
generateVerifySessionHasUpToDateProfileTraceLogs(mock.Ctx, &session.UserSession{Username: "john", DisplayName: "example"}, &authentication.UserDetails{Username: "john", DisplayName: "example", Emails: []string{"abc@example.com"}})
}
|
package router
import "context"
type Endpoint func(ctx context.Context, request interface{}) (response interface{}, err error)
|
package app
import (
"context"
)
// shakespeareKey is a custom string type to ensure no collisions
type shakespeareKey string
func (a *app) GetShakespeareText(ctx context.Context, text string) (string, error) {
key := shakespeareKey(text)
translation, ok := a.shakespeareLRU.Get(key)
if !ok {
var err error
translation, err = a.shakespeare.ConvertText(ctx, text)
if err != nil {
return "", err
}
a.shakespeareLRU.Add(key, translation)
}
return translation.(string), nil
}
|
package hue
import (
"bytes"
"encoding/json"
"github.com/ermos/hue/internal/logger"
)
func (b *BridgeFetch) Bridge() error {
body, err := b.bridge.get("/config")
if err != nil {
return logger.Error(err)
}
err = json.NewDecoder(bytes.NewBuffer(body)).Decode(&b.bridge.Config)
if err != nil {
return logger.Error(err)
}
return nil
}
|
// SPDX-FileCopyrightText: 2023 The Pion community <https://pion.ly>
// SPDX-License-Identifier: MIT
package rtcp
import (
"testing"
)
func TestPrint(t *testing.T) {
type Tests struct {
packet Packet
expected string
}
tests := []Tests{
{
&ExtendedReport{
SenderSSRC: 0x01020304,
Reports: []ReportBlock{
&LossRLEReportBlock{
XRHeader: XRHeader{
BlockType: LossRLEReportBlockType,
},
SSRC: 0x12345689,
BeginSeq: 5,
EndSeq: 12,
Chunks: []Chunk{
Chunk(0x4006),
Chunk(0x0006),
Chunk(0x8765),
Chunk(0x0000),
},
},
&DuplicateRLEReportBlock{
XRHeader: XRHeader{
BlockType: DuplicateRLEReportBlockType,
},
SSRC: 0x12345689,
BeginSeq: 5,
EndSeq: 12,
Chunks: []Chunk{
Chunk(0x4123),
Chunk(0x3FFF),
Chunk(0xFFFF),
Chunk(0x0000),
},
},
&PacketReceiptTimesReportBlock{
XRHeader: XRHeader{
BlockType: PacketReceiptTimesReportBlockType,
},
SSRC: 0x98765432,
BeginSeq: 15432,
EndSeq: 15577,
ReceiptTime: []uint32{
0x11111111,
0x22222222,
0x33333333,
0x44444444,
0x55555555,
},
},
&ReceiverReferenceTimeReportBlock{
XRHeader: XRHeader{
BlockType: ReceiverReferenceTimeReportBlockType,
},
NTPTimestamp: 0x0102030405060708,
},
&DLRRReportBlock{
XRHeader: XRHeader{
BlockType: DLRRReportBlockType,
},
Reports: []DLRRReport{
{
SSRC: 0x88888888,
LastRR: 0x12345678,
DLRR: 0x99999999,
},
{
SSRC: 0x09090909,
LastRR: 0x12345678,
DLRR: 0x99999999,
},
{
SSRC: 0x11223344,
LastRR: 0x12345678,
DLRR: 0x99999999,
},
},
},
&StatisticsSummaryReportBlock{
XRHeader{
BlockType: StatisticsSummaryReportBlockType,
},
true, true, true, ToHIPv4,
0xFEDCBA98,
0x1234, 0x5678,
0x11111111,
0x22222222,
0x33333333,
0x44444444,
0x55555555,
0x66666666,
0x01, 0x02, 0x03, 0x04,
},
&VoIPMetricsReportBlock{
XRHeader{
BlockType: VoIPMetricsReportBlockType,
},
0x89ABCDEF,
0x05, 0x06, 0x07, 0x08,
0x1111, 0x2222, 0x3333, 0x4444,
0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99,
0x00,
0x1122, 0x3344, 0x5566,
},
},
},
"rtcp.ExtendedReport:\n" +
"\tSenderSSRC: 0x1020304\n" +
"\tReports:\n" +
"\t\t0 (rtcp.LossRLEReportBlock):\n" +
"\t\t\tXRHeader:\n" +
"\t\t\t\tBlockType: [LossRLEReportBlockType]\n" +
"\t\t\t\tTypeSpecific: 0x0\n" +
"\t\t\t\tBlockLength: 0\n" +
"\t\t\tT: 0\n" +
"\t\t\tSSRC: 0x12345689\n" +
"\t\t\tBeginSeq: 5\n" +
"\t\t\tEndSeq: 12\n" +
"\t\t\tChunks:\n" +
"\t\t\t\t0: [[RunLength type=1, length=6]]\n" +
"\t\t\t\t1: [[RunLength type=0, length=6]]\n" +
"\t\t\t\t2: [[BitVector 0b000011101100101]]\n" +
"\t\t\t\t3: [[TerminatingNull]]\n" +
"\t\t1 (rtcp.DuplicateRLEReportBlock):\n" +
"\t\t\tXRHeader:\n" +
"\t\t\t\tBlockType: [DuplicateRLEReportBlockType]\n" +
"\t\t\t\tTypeSpecific: 0x0\n" +
"\t\t\t\tBlockLength: 0\n" +
"\t\t\tT: 0\n" +
"\t\t\tSSRC: 0x12345689\n" +
"\t\t\tBeginSeq: 5\n" +
"\t\t\tEndSeq: 12\n" +
"\t\t\tChunks:\n" +
"\t\t\t\t0: [[RunLength type=1, length=291]]\n" +
"\t\t\t\t1: [[RunLength type=0, length=16383]]\n" +
"\t\t\t\t2: [[BitVector 0b111111111111111]]\n" +
"\t\t\t\t3: [[TerminatingNull]]\n" +
"\t\t2 (rtcp.PacketReceiptTimesReportBlock):\n" +
"\t\t\tXRHeader:\n" +
"\t\t\t\tBlockType: [PacketReceiptTimesReportBlockType]\n" +
"\t\t\t\tTypeSpecific: 0x0\n" +
"\t\t\t\tBlockLength: 0\n" +
"\t\t\tT: 0\n" +
"\t\t\tSSRC: 0x98765432\n" +
"\t\t\tBeginSeq: 15432\n" +
"\t\t\tEndSeq: 15577\n" +
"\t\t\tReceiptTime: [286331153 572662306 858993459 1145324612 1431655765]\n" +
"\t\t3 (rtcp.ReceiverReferenceTimeReportBlock):\n" +
"\t\t\tXRHeader:\n" +
"\t\t\t\tBlockType: [ReceiverReferenceTimeReportBlockType]\n" +
"\t\t\t\tTypeSpecific: 0x0\n" +
"\t\t\t\tBlockLength: 0\n" +
"\t\t\tNTPTimestamp: 72623859790382856\n" +
"\t\t4 (rtcp.DLRRReportBlock):\n" +
"\t\t\tXRHeader:\n" +
"\t\t\t\tBlockType: [DLRRReportBlockType]\n" +
"\t\t\t\tTypeSpecific: 0x0\n" +
"\t\t\t\tBlockLength: 0\n" +
"\t\t\tReports:\n" +
"\t\t\t\t0:\n" +
"\t\t\t\t\tSSRC: 0x88888888\n" +
"\t\t\t\t\tLastRR: 305419896\n" +
"\t\t\t\t\tDLRR: 2576980377\n" +
"\t\t\t\t1:\n" +
"\t\t\t\t\tSSRC: 0x9090909\n" +
"\t\t\t\t\tLastRR: 305419896\n" +
"\t\t\t\t\tDLRR: 2576980377\n" +
"\t\t\t\t2:\n" +
"\t\t\t\t\tSSRC: 0x11223344\n" +
"\t\t\t\t\tLastRR: 305419896\n" +
"\t\t\t\t\tDLRR: 2576980377\n" +
"\t\t5 (rtcp.StatisticsSummaryReportBlock):\n" +
"\t\t\tXRHeader:\n" +
"\t\t\t\tBlockType: [StatisticsSummaryReportBlockType]\n" +
"\t\t\t\tTypeSpecific: 0x0\n" +
"\t\t\t\tBlockLength: 0\n" +
"\t\t\tLossReports: true\n" +
"\t\t\tDuplicateReports: true\n" +
"\t\t\tJitterReports: true\n" +
"\t\t\tTTLorHopLimit: [[ToH = IPv4]]\n" +
"\t\t\tSSRC: 0xFEDCBA98\n" +
"\t\t\tBeginSeq: 4660\n" +
"\t\t\tEndSeq: 22136\n" +
"\t\t\tLostPackets: 286331153\n" +
"\t\t\tDupPackets: 572662306\n" +
"\t\t\tMinJitter: 858993459\n" +
"\t\t\tMaxJitter: 1145324612\n" +
"\t\t\tMeanJitter: 1431655765\n" +
"\t\t\tDevJitter: 1717986918\n" +
"\t\t\tMinTTLOrHL: 1\n" +
"\t\t\tMaxTTLOrHL: 2\n" +
"\t\t\tMeanTTLOrHL: 3\n" +
"\t\t\tDevTTLOrHL: 4\n" +
"\t\t6 (rtcp.VoIPMetricsReportBlock):\n" +
"\t\t\tXRHeader:\n" +
"\t\t\t\tBlockType: [VoIPMetricsReportBlockType]\n" +
"\t\t\t\tTypeSpecific: 0x0\n" +
"\t\t\t\tBlockLength: 0\n" +
"\t\t\tSSRC: 0x89ABCDEF\n" +
"\t\t\tLossRate: 5\n" +
"\t\t\tDiscardRate: 6\n" +
"\t\t\tBurstDensity: 7\n" +
"\t\t\tGapDensity: 8\n" +
"\t\t\tBurstDuration: 4369\n" +
"\t\t\tGapDuration: 8738\n" +
"\t\t\tRoundTripDelay: 13107\n" +
"\t\t\tEndSystemDelay: 17476\n" +
"\t\t\tSignalLevel: 17\n" +
"\t\t\tNoiseLevel: 34\n" +
"\t\t\tRERL: 51\n" +
"\t\t\tGmin: 68\n" +
"\t\t\tRFactor: 85\n" +
"\t\t\tExtRFactor: 102\n" +
"\t\t\tMOSLQ: 119\n" +
"\t\t\tMOSCQ: 136\n" +
"\t\t\tRXConfig: 153\n" +
"\t\t\tJBNominal: 4386\n" +
"\t\t\tJBMaximum: 13124\n" +
"\t\t\tJBAbsMax: 21862\n",
},
{
&FullIntraRequest{
SenderSSRC: 0x0,
MediaSSRC: 0x4bc4fcb4,
FIR: []FIREntry{
{
SSRC: 0x12345678,
SequenceNumber: 0x42,
},
{
SSRC: 0x98765432,
SequenceNumber: 0x57,
},
},
},
"rtcp.FullIntraRequest:\n" +
"\tSenderSSRC: 0\n" +
"\tMediaSSRC: 1271200948\n" +
"\tFIR:\n" +
"\t\t0:\n" +
"\t\t\tSSRC: 305419896\n" +
"\t\t\tSequenceNumber: 66\n" +
"\t\t1:\n" +
"\t\t\tSSRC: 2557891634\n" +
"\t\t\tSequenceNumber: 87\n",
},
{
&Goodbye{
Sources: []uint32{
0x01020304,
0x05060708,
},
Reason: "because",
},
"rtcp.Goodbye:\n" +
"\tSources: [16909060 84281096]\n" +
"\tReason: because\n",
},
{
&ReceiverReport{
SSRC: 0x902f9e2e,
Reports: []ReceptionReport{{
SSRC: 0xbc5e9a40,
FractionLost: 0,
TotalLost: 0,
LastSequenceNumber: 0x46e1,
Jitter: 273,
LastSenderReport: 0x9f36432,
Delay: 150137,
}},
ProfileExtensions: []byte{},
},
"rtcp.ReceiverReport:\n" +
"\tSSRC: 2419039790\n" +
"\tReports:\n" +
"\t\t0:\n" +
"\t\t\tSSRC: 3160316480\n" +
"\t\t\tFractionLost: 0\n" +
"\t\t\tTotalLost: 0\n" +
"\t\t\tLastSequenceNumber: 18145\n" +
"\t\t\tJitter: 273\n" +
"\t\t\tLastSenderReport: 166945842\n" +
"\t\t\tDelay: 150137\n" +
"\tProfileExtensions: []\n",
},
{
NewCNAMESourceDescription(0x902f9e2e, "{9c00eb92-1afb-9d49-a47d-91f64eee69f5}"),
"rtcp.SourceDescription:\n" +
"\tChunks:\n" +
"\t\t0:\n" +
"\t\t\tSource: 2419039790\n" +
"\t\t\tItems:\n" +
"\t\t\t\t0:\n" +
"\t\t\t\t\tType: [CNAME]\n" +
"\t\t\t\t\tText: {9c00eb92-1afb-9d49-a47d-91f64eee69f5}\n",
},
{
&PictureLossIndication{
SenderSSRC: 0x902f9e2e,
MediaSSRC: 0x902f9e2e,
},
"rtcp.PictureLossIndication:\n" +
"\tSenderSSRC: 2419039790\n" +
"\tMediaSSRC: 2419039790\n",
},
{
&RapidResynchronizationRequest{
SenderSSRC: 0x902f9e2e,
MediaSSRC: 0x902f9e2e,
},
"rtcp.RapidResynchronizationRequest:\n" +
"\tSenderSSRC: 2419039790\n" +
"\tMediaSSRC: 2419039790\n",
},
{
&ReceiverEstimatedMaximumBitrate{
SenderSSRC: 1,
Bitrate: 8927168,
SSRCs: []uint32{1215622422},
},
"rtcp.ReceiverEstimatedMaximumBitrate:\n" +
"\tSenderSSRC: 1\n" +
"\tBitrate: 8.927168e+06\n" +
"\tSSRCs: [1215622422]\n",
},
{
&SenderReport{
SSRC: 0x902f9e2e,
NTPTime: 0xda8bd1fcdddda05a,
RTPTime: 0xaaf4edd5,
PacketCount: 1,
OctetCount: 2,
Reports: []ReceptionReport{{
SSRC: 0xbc5e9a40,
FractionLost: 0,
TotalLost: 0,
LastSequenceNumber: 0x46e1,
Jitter: 273,
LastSenderReport: 0x9f36432,
Delay: 150137,
}},
ProfileExtensions: []byte{
0x81, 0xca, 0x0, 0x6,
0x2b, 0x7e, 0xc0, 0xc5,
0x1, 0x10, 0x4c, 0x63,
0x49, 0x66, 0x7a, 0x58,
0x6f, 0x6e, 0x44, 0x6f,
0x72, 0x64, 0x53, 0x65,
0x57, 0x36, 0x0, 0x0,
},
},
"rtcp.SenderReport:\n" +
"\tSSRC: 2419039790\n" +
"\tNTPTime: 15747911406015324250\n" +
"\tRTPTime: 2868178389\n" +
"\tPacketCount: 1\n" +
"\tOctetCount: 2\n" +
"\tReports:\n" +
"\t\t0:\n" +
"\t\t\tSSRC: 3160316480\n" +
"\t\t\tFractionLost: 0\n" +
"\t\t\tTotalLost: 0\n" +
"\t\t\tLastSequenceNumber: 18145\n" +
"\t\t\tJitter: 273\n" +
"\t\t\tLastSenderReport: 166945842\n" +
"\t\t\tDelay: 150137\n" +
"\tProfileExtensions: [129 202 0 6 43 126 192 197 1 16 76 99 73 102 122 88 111 110 68 111 114 100 83 101 87 54 0 0]\n",
},
{
&SliceLossIndication{
SenderSSRC: 0x902f9e2e,
MediaSSRC: 0x902f9e2e,
SLI: []SLIEntry{{0xaaa, 0, 0x2C}},
},
"rtcp.SliceLossIndication:\n" +
"\tSenderSSRC: 2419039790\n" +
"\tMediaSSRC: 2419039790\n" +
"\tSLI:\n" +
"\t\t0:\n" +
"\t\t\tFirst: 2730\n" +
"\t\t\tNumber: 0\n" +
"\t\t\tPicture: 44\n",
},
{
&SourceDescription{
Chunks: []SourceDescriptionChunk{
{
Source: 0x10000000,
Items: []SourceDescriptionItem{
{
Type: SDESCNAME,
Text: "A",
},
{
Type: SDESPhone,
Text: "B",
},
},
},
},
},
"rtcp.SourceDescription:\n" +
"\tChunks:\n" +
"\t\t0:\n" +
"\t\t\tSource: 268435456\n" +
"\t\t\tItems:\n" +
"\t\t\t\t0:\n" +
"\t\t\t\t\tType: [CNAME]\n" +
"\t\t\t\t\tText: A\n" +
"\t\t\t\t1:\n" +
"\t\t\t\t\tType: [PHONE]\n" +
"\t\t\t\t\tText: B\n",
},
{
&TransportLayerCC{
Header: Header{
Padding: true,
Count: FormatTCC,
Type: TypeTransportSpecificFeedback,
Length: 5,
},
SenderSSRC: 4195875351,
MediaSSRC: 1124282272,
BaseSequenceNumber: 153,
PacketStatusCount: 1,
ReferenceTime: 4057090,
FbPktCount: 23,
// 0b00100000, 0b00000001
PacketChunks: []PacketStatusChunk{
&RunLengthChunk{
Type: TypeTCCRunLengthChunk,
PacketStatusSymbol: TypeTCCPacketReceivedSmallDelta,
RunLength: 1,
},
},
// 0b10010100
RecvDeltas: []*RecvDelta{
{
Type: TypeTCCPacketReceivedSmallDelta,
Delta: 37000,
},
},
},
"rtcp.TransportLayerCC:\n" +
"\tHeader:\n" +
"\t\tPadding: true\n" +
"\t\tCount: 15\n" +
"\t\tType: [TSFB]\n" +
"\t\tLength: 5\n" +
"\tSenderSSRC: 4195875351\n" +
"\tMediaSSRC: 1124282272\n" +
"\tBaseSequenceNumber: 153\n" +
"\tPacketStatusCount: 1\n" +
"\tReferenceTime: 4057090\n" +
"\tFbPktCount: 23\n" +
"\tPacketChunks:\n" +
"\t\t0 (rtcp.RunLengthChunk):\n" +
"\t\t\tPacketStatusChunk: <nil>\n" +
"\t\t\tType: 0\n" +
"\t\t\tPacketStatusSymbol: 1\n" +
"\t\t\tRunLength: 1\n" +
"\tRecvDeltas:\n" +
"\t\t0:\n" +
"\t\t\tType: 1\n" +
"\t\t\tDelta: 37000\n",
},
{
&TransportLayerNack{
SenderSSRC: 0x902f9e2e,
MediaSSRC: 0x902f9e2e,
Nacks: []NackPair{{1, 0xAA}, {1034, 0x05}},
},
"rtcp.TransportLayerNack:\n" +
"\tSenderSSRC: 2419039790\n" +
"\tMediaSSRC: 2419039790\n" +
"\tNacks:\n" +
"\t\t0:\n" +
"\t\t\tPacketID: 1\n" +
"\t\t\tLostPackets: 170\n" +
"\t\t1:\n" +
"\t\t\tPacketID: 1034\n" +
"\t\t\tLostPackets: 5\n",
},
}
for i, test := range tests {
actual := stringify(test.packet)
if actual != test.expected {
t.Fatalf("Error stringifying test %d\nExpected:\n%s\n\nGot:\n%s\n\n", i, test.expected, actual)
}
}
}
|
package server
import (
"fmt"
"net"
"net/http"
"sync"
"github.com/appootb/substratum/gateway"
"github.com/appootb/substratum/logger"
"github.com/appootb/substratum/rpc"
"github.com/appootb/substratum/util/iphelper"
prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
"github.com/grpc-ecosystem/grpc-gateway/runtime"
"github.com/prometheus/client_golang/prometheus/promhttp"
"google.golang.org/grpc"
)
type ServeMux struct {
metrics bool
connAddr string
rpcListener net.Listener
gatewayListener net.Listener
rpcSrv *grpc.Server
httpMux *http.ServeMux
gatewayMux *runtime.ServeMux
}
func NewServeMux(rpcPort, gatewayPort uint16, metrics bool) (*ServeMux, error) {
var err error
m := &ServeMux{
rpcSrv: rpc.New(
rpc.NewOptions(
rpc.WithDefaultKeepaliveOption(),
rpc.WithDefaultUnaryInterceptors(),
rpc.WithDefaultStreamInterceptors(),
),
),
metrics: metrics,
httpMux: http.NewServeMux(),
gatewayMux: gateway.New(gateway.DefaultOptions),
}
m.connAddr = fmt.Sprintf("%s:%d", iphelper.LocalIP(), rpcPort)
m.rpcListener, err = net.Listen("tcp", fmt.Sprintf(":%d", rpcPort))
if err != nil {
return nil, err
}
m.gatewayListener, err = net.Listen("tcp", fmt.Sprintf(":%d", gatewayPort))
if err != nil {
return nil, err
}
m.httpMux.Handle("/", m.gatewayMux)
if metrics {
m.httpMux.Handle("/metrics", promhttp.Handler())
}
return m, nil
}
func (m *ServeMux) RPCServer() *grpc.Server {
return m.rpcSrv
}
func (m *ServeMux) HTTPMux() *http.ServeMux {
return m.httpMux
}
func (m *ServeMux) GatewayMux() *runtime.ServeMux {
return m.gatewayMux
}
func (m *ServeMux) Handle(pattern string, handler http.Handler) {
m.httpMux.Handle(pattern, handler)
}
func (m *ServeMux) HandleFunc(pattern string, handler http.HandlerFunc) {
m.httpMux.HandleFunc(pattern, handler)
}
func (m *ServeMux) Serve() {
if m.metrics {
prometheus.Register(m.rpcSrv)
}
//
wg := sync.WaitGroup{}
wg.Add(2)
go func() {
wg.Done()
err := m.rpcSrv.Serve(m.rpcListener)
if err != nil {
logger.Error("rpc_server", logger.Content{
"server": "gRPC",
"addr": m.rpcListener.Addr(),
"err": err.Error(),
})
}
}()
go func() {
wg.Done()
err := http.Serve(m.gatewayListener, m.httpMux)
if err != nil {
logger.Error("gateway_server", logger.Content{
"server": "gateway",
"addr": m.gatewayListener.Addr(),
"err": err.Error(),
})
}
}()
wg.Wait()
}
func (m *ServeMux) ConnAddr() string {
return m.connAddr
}
|
package mock
import (
"os"
"testing"
"github.com/10gen/realm-cli/internal/cli/user"
"github.com/10gen/realm-cli/internal/cloud/realm"
u "github.com/10gen/realm-cli/internal/utils/test"
"github.com/10gen/realm-cli/internal/utils/test/assert"
"go.mongodb.org/mongo-driver/bson/primitive"
)
// NewProfile returns a new CLI profile with a random name
func NewProfile(t *testing.T) *user.Profile {
t.Helper()
profile, err := user.NewProfile(primitive.NewObjectID().Hex())
assert.Nil(t, err)
return profile
}
// NewProfileFromTmpDir returns a new CLI profile with a random name
// and a current working directory based on a temporary directory
// along with the associated cleanup function
func NewProfileFromTmpDir(t *testing.T, name string) (*user.Profile, func()) {
t.Helper()
tmpDir, teardown, err := u.NewTempDir(name)
assert.Nil(t, err)
_, resetHomeDir := u.SetupHomeDir(tmpDir)
profile := NewProfile(t)
profile.WorkingDirectory = tmpDir
return profile,
func() {
resetHomeDir()
teardown()
}
}
// NewProfileFromWd returns a new CLI profile with a random name
// and the current working directory
func NewProfileFromWd(t *testing.T) *user.Profile {
t.Helper()
wd, err := os.Getwd()
assert.Nil(t, err)
profile := NewProfile(t)
profile.WorkingDirectory = wd
return profile
}
// NewProfileWithSession returns a new CLI profile with a session
func NewProfileWithSession(t *testing.T, session realm.Session) *user.Profile {
profile := NewProfile(t)
profile.SetRealmBaseURL(u.RealmServerURL())
profile.SetSession(user.Session{session.AccessToken, session.RefreshToken})
return profile
}
|
package firewall
type ResourceLimiter interface {
Acquire() interface{}
Release(resource interface{})
}
type ChanResourceLimiter struct {
pool chan interface{}
}
// NewChanResourcePool ...
func NewChanResourcePool(cap int) *ChanResourceLimiter {
obj := &ChanResourceLimiter{pool: make(chan interface{}, cap)}
return obj
}
func (this *ChanResourceLimiter) Acquire() interface{} {
return <-this.pool
}
func (this *ChanResourceLimiter) Release(resource interface{}) {
this.pool <- resource
}
|
package main
import "testing"
var (
sa = StringArray{0, 10, []string{}}
)
func init() {
sa.NewStringArray(&StringArray{})
}
func TestAdd(t *testing.T) {
allCourses := []string{
"CST8101",
"CST8110",
"CST8215",
"CST8300",
"MAT8001",
"empty",
}
for _, course := range allCourses {
sa.Add(course)
}
if sa.IsEmpty() {
t.Error("String Array Object Should Not Be Empty ")
}
}
func TestAddAt(t *testing.T) {
capacity := sa.Capacity()
sa = StringArray{10, capacity, sa.stringArray}
sa.AddAt(29, "CST8108")
if sa.IsEmpty() {
t.Error("String Array Object Should Not Be Empty ")
}
}
func TestClear(i *testing.T) {
sa.clear()
}
|
package swift
import (
"fmt"
"io"
"github.com/ncw/swift"
"github.com/root-gg/utils"
"github.com/root-gg/plik/server/common"
"github.com/root-gg/plik/server/data"
)
// Ensure Swift Data Backend implements data.Backend interface
var _ data.Backend = (*Backend)(nil)
// Config describes configuration for Swift data backend
type Config struct {
swift.Connection
Container string // Swift container name
}
// NewConfig instantiate a new default configuration
// and override it with configuration passed as argument
func NewConfig(params map[string]interface{}) (config *Config) {
config = new(Config)
config.Container = "plik"
utils.Assign(config, params)
return
}
// Backend object
type Backend struct {
config *Config
connection *swift.Connection
}
// NewBackend instantiate a new OpenSwift Data Backend
// from configuration passed as argument
func NewBackend(config *Config) (b *Backend) {
b = new(Backend)
b.config = config
return b
}
// GetFile implementation for Swift Data Backend
func (b *Backend) GetFile(file *common.File) (reader io.ReadCloser, err error) {
err = b.auth()
if err != nil {
return nil, err
}
reader, pipeWriter := io.Pipe()
objectID := objectID(file)
go func() {
_, e := b.connection.ObjectGet(b.config.Container, objectID, pipeWriter, true, nil)
defer func() { _ = pipeWriter.CloseWithError(e) }()
}()
// This does only very basic checking and basically always return nil, error will happen when reading from the reader
return reader, nil
}
// AddFile implementation for Swift Data Backend
func (b *Backend) AddFile(file *common.File, fileReader io.Reader) (err error) {
err = b.auth()
if err != nil {
return err
}
objectID := objectID(file)
object, err := b.connection.ObjectCreate(b.config.Container, objectID, true, "", "", nil)
_, err = io.Copy(object, fileReader)
if err != nil {
return err
}
err = object.Close()
if err != nil {
return err
}
return nil
}
// RemoveFile implementation for Swift Data Backend
func (b *Backend) RemoveFile(file *common.File) (err error) {
err = b.auth()
if err != nil {
return err
}
objectID := objectID(file)
err = b.connection.ObjectDelete(b.config.Container, objectID)
if err != nil {
// Ignore "file not found" errors
if err == swift.ObjectNotFound {
return nil
}
return err
}
return nil
}
func objectID(file *common.File) string {
return file.UploadID + "." + file.ID
}
func (b *Backend) auth() (err error) {
if b.connection != nil && b.connection.Authenticated() {
return
}
connection := &b.config.Connection
// Authenticate
err = connection.Authenticate()
if err != nil {
return fmt.Errorf("unable to autenticate : %s", err)
}
b.connection = connection
// Create container
err = b.connection.ContainerCreate(b.config.Container, nil)
if err != nil {
return err
}
return nil
}
|
package targets
import (
"os"
"../effects"
)
import . "../defs"
const (
SYNTAX_WLA_DX = 0
SYNTAX_GAS_68K = 1
)
type ICodeGenerator interface {
OutputCallbacks(outFile *os.File) int
OutputChannelData(outFile *os.File) int
OutputEffectFlags(outFile *os.File)
OutputPatterns(outFile *os.File) int
OutputTable(outFile *os.File, tblName string, effMap *effects.EffectMap, canLoop bool, scaling int, loopDelim int) int
}
type CodeGenerator struct {
itarget ITarget
}
type CodeGeneratorWla struct {
CodeGenerator
}
type CodeGeneratorGas68k struct {
CodeGenerator
}
func NewCodeGenerator(cgID int, itarget ITarget) ICodeGenerator {
var cg ICodeGenerator = ICodeGenerator(nil)
switch cgID {
case SYNTAX_WLA_DX:
cg = &CodeGeneratorWla{CodeGenerator: CodeGenerator{itarget}}
case SYNTAX_GAS_68K:
cg = &CodeGeneratorGas68k{CodeGenerator: CodeGenerator{itarget}}
}
return cg
} |
package resources
func checkConnection() {
if connection == nil {
panic("Connection is not initialized in resouces package")
}
}
|
package initDemo
import (
"github.com/cyrilpanicker/golang-snippets/testPackages/package1"
_ "github.com/cyrilpanicker/golang-snippets/testPackages/package2"
)
func Run(){
package1.Package1Function()
}
|
package user_characters
import (
"Golang-API-Game/pkg/repository"
"database/sql"
"log"
)
//UserCharacter table data
type UserCharacter struct {
UserID string
UserCharacterID string
CharacterID string
}
type User struct {
UserID string
Result string
}
type Character struct {
name string
}
//Insert table data
func Insert(userID, userCharacterID, characterID string) error {
stmt, err := repository.DB.Prepare("INSERT INTO user_characters(user_id, user_character_id, character_id) VALUES(?, ?, ?)") //prepare 少し早くなる
if err != nil {
return err
}
_, err = stmt.Exec(userID, userCharacterID, characterID)
return err
}
// SelectByPrimaryKeuser Get character_id,character_id conditon:userID
func SelectByPrimaryKey(userID string) (*UserCharacter, error) {
row := repository.DB.QueryRow("SELECT * FROM user_characters WHERE user_id=?", userID)
return convertToUserCharacter(row)
}
// convert row data to Usercharacter data
func convertToUserCharacter(row *sql.Row) (*UserCharacter, error) {
usercharacter := UserCharacter{}
err := row.Scan(&usercharacter.UserID, &usercharacter.UserCharacterID, &usercharacter.CharacterID) //userテーブルの三つのカラムを構造体に入れる
if err != nil {
if err == sql.ErrNoRows {
return nil, nil
}
log.Println(err)
return nil, err
}
return &usercharacter, nil
}
// UpdateByPrimaryKey
func UpdateByPrimaryKey(userCharacterID string, random string) error {
stmt, err := repository.DB.Prepare("UPDATE user_characters SET user_character_id=? WHERE user_id=?")
if err != nil {
log.Println(err)
return err
}
_, err = stmt.Exec(userCharacterID, random)
return err
}
// SelectByUserID
func SelectByUserID(userID string) ([]UserCharacter, error) {
rows, err := repository.DB.Query("SELECT * FROM user_characters WHERE user_id=?", userID)
if err != nil {
log.Println(err)
return nil, err
}
return convertToUserCharacterID(rows)
}
// convert row data to Usercharacter data
func convertToUserCharacterID(rows *sql.Rows) ([]UserCharacter, error) {
var userCharacterList []UserCharacter
for rows.Next() {
userCharacter := UserCharacter{}
err := rows.Scan(&userCharacter.UserID, &userCharacter.UserCharacterID, &userCharacter.CharacterID) //userテーブルの三つのカラムを構造体に入れる
if err != nil {
log.Println("failed convert rows to CharacterID ", err)
}
userCharacterList = append(userCharacterList, userCharacter)
}
return userCharacterList, nil
}
|
package main
import (
"io"
"os"
"time"
log "github.com/sirupsen/logrus"
"github.com/webee/multisocket/address"
"github.com/webee/multisocket/examples"
"github.com/webee/multisocket/protocol/stream"
_ "github.com/webee/multisocket/transport/all"
)
func init() {
log.SetLevel(log.DebugLevel)
log.SetFormatter(&log.TextFormatter{
ForceColors: true,
FullTimestamp: true,
})
}
func main() {
x := os.Args[1]
addrs := os.Args[2:]
protoStream := stream.New()
for _, addr := range addrs {
if err := address.Connect(protoStream, addr); err != nil {
log.WithField("err", err).Panicf("connect")
}
}
protoStream.SetOption(stream.Options.ConnKeepAliveIdle, 10*time.Second)
if x == "server" {
server(protoStream)
} else {
client(protoStream)
}
}
func server(protoStream stream.Stream) {
var (
err error
conn stream.Connection
)
mr := examples.NewMultiplexReader(os.Stdin)
for {
if conn, err = protoStream.Accept(); err != nil {
log.WithField("err", err).Error("accept")
break
}
xb, _ := mr.NewReader()
go func() {
handleConn(conn, xb, os.Stdout)
xb.Close()
}()
}
}
func client(protoStream stream.Stream) {
var (
err error
conn io.ReadWriteCloser
)
if conn, err = protoStream.Connect(0); err != nil {
log.WithField("err", err).Panic("connect")
}
handleConn(conn, os.Stdin, os.Stdout)
}
func handleConn(conn io.ReadWriteCloser, src io.Reader, dest io.Writer) {
go func() {
io.Copy(conn, src)
conn.Close()
}()
io.Copy(dest, conn)
conn.Close()
}
|
package main
import (
"fmt"
"strings"
)
func main() {
a := "gopher"
b := "hello world"
//Compare 函數用於比較兩字符串大小
fmt.Println(strings.Compare(a,b)) //a<b output -1
fmt.Println(strings.Compare(a,a)) //a=a output 0
fmt.Println(strings.Compare(b,a)) //b>a output 1
//Join用於連接2字串
var s []string
s = append(s, a, b)
fmt.Println(strings.Join(s, "&"))
} |
package main
import (
"runtime"
"time"
log "github.com/ianwoolf/go-logger/new"
)
func main() {
runtime.GOMAXPROCS(runtime.NumCPU())
logger := log.LogDir{
Dir: "./log",
FlushInterval: 2, //s
BufferSize: 256, // k
}
log.SetFall(true) // print all level to info
log.SetConsole(true)
logger.Init()
for i := 10000; i > 0; i-- {
// go logger.Backend.Log(log.ERROR, []byte("test"))
logger.Info("test")
logger.Debug("test")
logger.Error("test")
logger.Warn("test")
logger.Fatal("test")
time.Sleep(1000 * time.Millisecond)
}
select {}
}
|
package sgs
import (
"er"
"sutil"
)
type appConf struct {
Profile string
DefaultClients int
MinimalClients int
OptimalWaitSecond int
}
//conf sgs web server configuration
type conf struct {
Port int
WSReadBuff int
WSWriteBuff int
BaseTickMs int
AuthSrvURI string
TestEnabled bool
App appConf
}
var _defaultConf = conf{
Port: 9090,
WSReadBuff: 1024,
WSWriteBuff: 1024,
BaseTickMs: 100,
AuthSrvURI: "http://127.0.0.1:3115",
App: appConf{
Profile: "2pvp",
DefaultClients: 2,
MinimalClients: 2,
OptimalWaitSecond: 30,
},
}
//loadConf read conf file to get the settings, default values will be used when a field is missing
func loadConf(f string) conf {
c := _defaultConf
e := sutil.LoadConfFile(f, &c)
if e != nil {
er.Throw(_E_LOAD_CONF_FAIL, er.EInfo{"filename": f, "system error": e.Error()}).To(_log)
}
return c
}
|
package etcd
import (
"context"
"encoding/json"
"fmt"
"github.com/hpcloud/tail"
clientv3 "go.etcd.io/etcd/client/v3"
"log_agent/agent"
"log_agent/config"
"time"
)
type LogType struct {
Topic string `json:"topic"`
Filename string `json:"filename"`
}
func Watch(key string) (err error) {
var cli *clientv3.Client
cli, err = clientv3.New(clientv3.Config{
Endpoints: []string{config.AppConfigObj.EtcdConfig.Address},
DialTimeout: 5 * time.Second,
})
if err != nil {
fmt.Printf("connect to etcd failed, err:%v\n", err)
return
}
fmt.Println("connect to etcd success")
defer cli.Close()
// watch key:q1mi change
rch := cli.Watch(context.Background(), key) // <-chan WatchResponse
// 一旦发现 配置变动, 就告诉log agent 去 读相应的日志, 并将督导的结果发送给kafka
var logs = make([]LogType, 10)
for wresp := range rch {
for _, ev := range wresp.Events {
err = json.Unmarshal(ev.Kv.Value, &logs)
if err != nil {
fmt.Println("填入的json不合法:", err)
return
}
if len(logs) <= 0 {
fmt.Println("填入的json不能为空")
return
}
fmt.Println(logs)
for _, item := range logs {
var tailInstance *tail.Tail
go func(t *tail.Tail, l LogType) {
t, err = agent.ReadFile(l.Filename)
if err != nil {
fmt.Println("读取日志失败")
return
}
agent.Run(t, l.Topic)
}(tailInstance, item)
}
}
}
return
}
|
package main
import (
"MovieDatabase/handlers"
"MovieDatabase/repo"
"MovieDatabase/service"
"log"
"net/http"
"path/filepath"
)
func main() {
file := "moviedb.json"
ext := filepath.Ext(file)
if ext != ".json" {
log.Fatal("Invalid File Extension")
}
repository := repo.NewRepo(file)
serv := service.CreateService(repository)
handler := handlers.NewMovieHandler(serv)
router := handlers.ConfigRouter(handler)
server := &http.Server{
Handler: router,
Addr: "127.0.0.1:8080",
}
log.Fatal(server.ListenAndServe())
}
|
package sol
import "testing"
func TestSlice(t *testing.T) {
t.Log(isMatch("mississippi", "mis*is*p*."))
}
|
package modals
import (
"net/http"
"encoding/json"
"strconv"
"github.com/gorilla/mux"
)
// Types
// Events
type Event struct {
ID int `json:"id,omitempty"`
Name string `json:"name,omitempty"`
Date string `json:"date,omitempty"`
Time string `json:"time,omitempty"`
Description string `json:"description,omitempty"`
Location string `json:"location,omitempty"`
Image string `json:"image,omitempty"`
Story string `json:"story,omitempty"`
Price float32 `json:"price,omitempty"`
Contacts []Contact `json:"contacts,omitempty"`
}
var events []Event
// Functions
func GetEvents(w http.ResponseWriter, r *http.Request) {
// Return all the events
json.NewEncoder(w).Encode(events)
}
func GetEvent(w http.ResponseWriter, r *http.Request) {
// Get the event ID (params["id"])
params := mux.Vars(r)
id, _ := strconv.ParseInt(params["id"], 10, 0)
// Search for the relevent event
for _, player := range squad {
if player.ID == int(id) {
json.NewEncoder(w).Encode(player)
}
}
} |
package items
type Season struct {
ID int64
MovieID int
Number string
}
|
package model
import (
"encoding/json"
)
type Photo struct {
id int64
gallery int64
data []byte
description string
mimetype string
}
type PhotoJSON struct {
Id int64
Gallery int64
Description string
Mimetype string
}
func (p *Photo)MarshalJSON() ([]byte, error){
return json.Marshal(PhotoJSON{
p.id,
p.gallery,
p.description,
p.mimetype,
})
}
func NewPhoto(gallery int64, data []byte, description string, mimetype string) *Photo{
photo := new(Photo)
photo.gallery = gallery
photo.data = data
photo.description = description
photo.mimetype = mimetype
return photo
}
func (p *Photo) Id() int64 {
return p.id
}
func (p *Photo) SetId(id int64) {
p.id = id
}
func (p *Photo) Gallery() int64 {
return p.gallery
}
func (p *Photo) SetGallery(gallery int64) {
p.gallery = gallery
}
func (p *Photo) Data() []byte {
return p.data
}
func (p *Photo) SetData(data []byte) {
p.data = data
}
func (p *Photo) Description() string {
return p.description
}
func (p *Photo) SetDescription(description string){
p.description = description
}
func (p *Photo) Mimetype() string {
return p.mimetype
}
func (p *Photo) SetMimetype(mimetype string){
p.mimetype = mimetype
}
/*
func getPhoto(id int) (*Photo, errror) {}
func (p *Photo) create(gallery int, data []byte, description string) (int, error) {}
func (p *Photo) remove(id int) error {}
*/ |
package main
import "fmt"
func main() {
x := 0
// an anonymous function assigned to a variable (func expression)
increment := func() int {
x++
return x
}
fmt.Println(increment())
fmt.Println(increment())
}
/*
Closure helps us limit the scope of variables used by multiple functions
without closure, for two or more funcs to have access to the same variable,
that variable would need to be package scope
*/
/*
(+)anonymous function
a function without a name
with it, we can do something like adding a function inside a function
(+)func expression
assigning a func to a variable
*/
|
package beverage
import (
"fmt"
"strings"
)
// Mocha is a decorator. Implements Beverage interface for Mirroring.
type Mocha struct {
baseCost Dollar
description string
beverage Beverage
}
func (m *Mocha) Description() string {
if !strings.Contains(m.beverage.Description(), m.description){
return fmt.Sprintf("%s %s", m.beverage.Description(), m.description)
}
return m.beverage.Description()
}
func (m *Mocha) Cost() Dollar {
return m.beverage.Cost() + m.baseCost
}
func AddMocha(b Beverage) *Mocha {
return &Mocha{
baseCost: 7.99,
description: "Charged with Mocha.",
beverage: b,
}
}
// Whip is a decorator. Implements Beverage interface for Mirroring.
type Whip struct {
baseCost Dollar
description string
beverage Beverage
}
func (w *Whip) Description() string {
if !strings.Contains(w.beverage.Description(), w.description) {
return fmt.Sprintf("%s %s", w.beverage.Description(), w.description)
}
return w.beverage.Description()
}
func (w *Whip) Cost() Dollar {
return w.beverage.Cost() + w.baseCost
}
func AddWhip(b Beverage) *Whip {
return &Whip{
baseCost: 7.99,
description: "Whipped, not whooped.",
beverage: b,
}
}
|
func plusOne(head *ListNode) *ListNode {
dummy := &ListNode{0, head}
process(dummy)
if dummy.Val == 1{
return dummy
} else {
return dummy.Next
}
}
func process(nd *ListNode) int {
var carry, val int
if nd.Next == nil{
carry, val = 0, nd.Val + 1
} else {
carry, val = process(nd.Next), nd.Val
}
if carry + val >= 10{
val = carry + val - 10
carry = 1
} else {
val = carry + val
carry = 0
}
nd.Val = val
return carry
}
|
package model
import (
"github.com/jinzhu/gorm"
"wechatvoice/tool/db"
)
type Category struct {
gorm.Model
Uuid string `sql:"size:32;not null"` //主键
CategoryName string //分类名称
}
func init() {
info := new(Category)
info.GetConn().AutoMigrate(&Category{})
}
func (this *Category) GetConn() *gorm.DB {
db := dbpool.OpenConn()
return db.Model(&Category{})
}
func (this *Category) CloseConn(db *gorm.DB) {
dbpool.CloseConn(db)
}
|
package main
/*
* @lc app=leetcode id=47 lang=golang
*
* [47] Permutations II
*/
/*
这道题花了我一整天Debug,最后在StackOverflow上解决了问题
具体请看: https://stackoverflow.com/questions/56649138
*/
// Solution 2: 交换法
func permuteUnique_1(nums []int) [][]int {
qsort_47(nums, 0, len(nums)-1)
res := make([][]int, 0, len(nums))
helper_47(&res, nums, 0)
return res
}
func helper_47(res *[][]int, nums []int, start int) {
copied := make([]int, len(nums))
copy(copied, nums)
nums = copied
if start == len(nums)-1 {
*res = append(*res, nums)
return
}
for i := start; i < len(nums); i++ {
if start == i || nums[start] != nums[i] {
nums[i], nums[start] = nums[start], nums[i] // 交换元素
helper_47(res, nums, start+1)
// Note: 注意这里不能交换回去
}
}
}
func qsort_47(nums []int, low, high int) {
if low >= high {
return
}
i, j, pivot := low, high, nums[low]
for i < j {
for i < j && nums[j] >= pivot {
j--
}
nums[i] = nums[j]
for i < j && nums[i] <= pivot {
i++
}
nums[j] = nums[i]
}
nums[i] = pivot
qsort_47(nums, low, i-1)
qsort_47(nums, i+1, high)
}
// Solution 1:使用used数组
func permuteUnique(nums []int) [][]int {
qsort_47(nums, 0, len(nums)-1)
res := make([][]int, 0, len(nums))
helper_47_2(&res, nums, 0, make([]int, len(nums)), make([]bool, len(nums)))
return res
}
func helper_47_2(res *[][]int, nums []int, start int, path []int, used []bool) {
if start == len(nums) {
copied := make([]int, len(path))
copy(copied, path)
*res = append(*res, copied)
return
}
for i := 0; i < len(nums); i++ {
if !used[i] && (i == 0 || used[i-1] || nums[i] != nums[i-1]) {
path[start] = nums[i]
used[i] = true
helper_47_2(res, nums, start+1, path, used)
used[i] = false
}
}
}
|
func judgeCircle(moves string) bool {
if len(moves)%2!=0{
return false
}
if strings.Count(moves,"U") != strings.Count(moves,"D"){
return false
}
if strings.Count(moves,"L") != strings.Count(moves,"R"){
return false
}
return true
}
|
package inspect
import (
"fmt"
"sort"
"github.com/square/p2/pkg/health"
"github.com/square/p2/pkg/launch"
"github.com/square/p2/pkg/store/consul"
"github.com/square/p2/pkg/types"
)
const (
INTENT_SOURCE = iota
REALITY_SOURCE
)
type LaunchableVersion struct {
Location string `json:"location,omitempty"`
Version launch.LaunchableVersion `json:"version,omitempty"`
}
type NodePodStatus struct {
NodeName types.NodeName `json:"node,omitempty"`
PodId types.PodID `json:"pod,omitempty"`
IntentManifestSHA string `json:"intent_manifest_sha"`
RealityManifestSHA string `json:"reality_manifest_sha"`
IntentVersions map[launch.LaunchableID]LaunchableVersion `json:"intent_versions,omitempty"`
RealityVersions map[launch.LaunchableID]LaunchableVersion `json:"reality_versions,omitempty"`
Health health.HealthState `json:"health,omitempty"`
// These fields are kept for backwards compatibility with tools that
// parse the output of p2-inspect. intent_versions and reality_versions
// are preferred since those handle multiple versions of manifest syntax
IntentLocations []string `json:"intent_locations"`
RealityLocations []string `json:"reality_locations"`
}
func AddKVPToMap(result consul.ManifestResult, source int, filterNode types.NodeName, filterPod types.PodID, statuses map[types.PodID]map[types.NodeName]NodePodStatus) error {
if result.PodUniqueKey != "" {
// for now, p2-inspect won't show uuid pods
return nil
}
nodeName := result.PodLocation.Node
podId := result.Manifest.ID()
if filterNode != "" && nodeName != filterNode {
return nil
}
if filterPod != "" && podId != filterPod {
return nil
}
if statuses[podId] == nil {
statuses[podId] = make(map[types.NodeName]NodePodStatus)
}
old := statuses[podId][nodeName]
old.IntentVersions = make(map[launch.LaunchableID]LaunchableVersion)
old.RealityVersions = make(map[launch.LaunchableID]LaunchableVersion)
manifestSHA, err := result.Manifest.SHA()
if err != nil {
return err
}
switch source {
case INTENT_SOURCE:
if old.IntentManifestSHA != "" {
return fmt.Errorf("Two intent manifests for node %s pod %s", nodeName, podId)
}
old.IntentManifestSHA = manifestSHA
for launchableID, launchable := range result.Manifest.GetLaunchableStanzas() {
var version launch.LaunchableVersion
if launchable.Version.ID != "" {
version = launchable.Version
}
old.IntentVersions[launchableID] = LaunchableVersion{
Location: launchable.Location,
Version: version,
}
old.IntentLocations = append(old.IntentLocations, launchable.Location)
}
sort.Strings(old.IntentLocations)
case REALITY_SOURCE:
if old.RealityManifestSHA != "" {
return fmt.Errorf("Two reality manifests for node %s pod %s", nodeName, podId)
}
old.RealityManifestSHA = manifestSHA
for launchableID, launchable := range result.Manifest.GetLaunchableStanzas() {
var version launch.LaunchableVersion
if launchable.Version.ID != "" {
version = launchable.Version
}
old.RealityVersions[launchableID] = LaunchableVersion{
Location: launchable.Location,
Version: version,
}
old.RealityLocations = append(old.RealityLocations, launchable.Location)
}
sort.Strings(old.RealityLocations)
}
statuses[podId][nodeName] = old
return nil
}
|
/*
* @Description:
* @Author: JiaYe
* @Date: 2021-04-09 17:47:13
* @LastEditTime: 2021-04-12 10:01:04
* @LastEditors: JiaYe
*/
package main
import "fmt"
func main() {
//fmt.Println(calc(10, 5))
//sayHello()
//sayHello1("JiaYe")
fmt.Println(add(1, 2))
}
func calc(n1, n2 int) (int, int, int, int) {
return n1 + n2, n1 - n2, n1 * n2, n1 / n2
}
//无参无返回值
func sayHello() {
fmt.Println("hello goland")
}
//有参无返回值
func sayHello1(name string) {
//fmt.Printf("Hi, %v\n", name)
fmt.Println("Hi,", name)
}
//有参有返回值
func add(n1, n2 int) int {
return n1 + n2
}
|
/*
Copyright 2018 Intel Corporation.
SPDX-License-Identifier: Apache-2.0
*/
package oimcommon
import (
"os"
"os/exec"
)
// CmdMonitor can be used to detect when a command terminates
// unexpectedly. It works by letting the command inherit the write
// end of a pipe, then closing that end in the parent process and then
// watching the read end.
//
// Alternatively one can also block on cmd.Wait() in a goroutine.
// But that might have unintended side effects, like reaping the child.
// The advantage of CmdMonitor is that it doesn't interfere with
// the child lifecycle.
type CmdMonitor struct {
pr *os.File
pw *os.File
}
// AddCmdMonitor prepares the command for watching. Must be
// called before starting the command.
func AddCmdMonitor(cmd *exec.Cmd) (CmdMonitor, error) {
pr, pw, err := os.Pipe()
if err != nil {
return CmdMonitor{}, err
}
cmd.ExtraFiles = append(cmd.ExtraFiles, pw)
return CmdMonitor{pr, pw}, nil
}
// Watch must be called after starting the command.
// The returned channel is closed once the command
// terminates.
func (cm CmdMonitor) Watch() <-chan interface{} {
done := make(chan interface{})
go func() {
defer close(done)
b := make([]byte, 1)
cm.pr.Read(b) // nolint: gosec
}()
cm.pw.Close() // nolint: gosec
return done
}
|
package webcam
import (
"bytes"
"encoding/binary"
"unsafe"
"github.com/stanier/webcam/ioctl"
"golang.org/x/sys/unix"
)
const (
V4L2_CAP_VIDEO_CAPTURE uint32 = 0x00000001
V4L2_CAP_STREAMING uint32 = 0x04000000
V4L2_BUF_TYPE_VIDEO_CAPTURE uint32 = 1
V4L2_MEMORY_MMAP uint32 = 1
V4L2_FIELD_ANY uint32 = 0
)
const (
V4L2_CTRL_CLASS_USER uint32 = 0x980000
V4L2_CID_BASE uint32 = (V4L2_CTRL_CLASS_USER | 0x900)
V4L2_CTRL_CLASS_CAMERA uint32 = 0x9a0000
V4L2_CID_CAMERA_CLASS_BASE uint32 = (V4L2_CTRL_CLASS_CAMERA | 0x900)
V4L2_CID_CAMERA_CLASS uint32 = (V4L2_CTRL_CLASS_CAMERA | 1)
)
const (
V4L2_CID_EXPOSURE uint32 = V4L2_CID_BASE + 17
V4L2_CID_EXPOSURE_AUTO uint32 = V4L2_CID_CAMERA_CLASS_BASE + 1
V4L2_CID_EXPOSURE_ABSOLUTE uint32 = V4L2_CID_CAMERA_CLASS_BASE + 2
V4L2_CID_EXPOSURE_AUTO_PRIORITY uint32 = V4L2_CID_CAMERA_CLASS_BASE + 3
)
const (
V4L2_EXPOSURE_AUTO int32 = iota
V4L2_EXPOSURE_MANUAL int32 = iota
V4L2_EXPOSURE_SHUTTER_PRIORITY int32 = iota
V4L2_EXPOSURE_APERTURE_PRIORITY int32 = iota
)
const (
V4L2_FRMSIZE_TYPE_DISCRETE uint32 = iota
V4L2_FRMSIZE_TYPE_CONTINUOUS uint32 = iota
V4L2_FRMSIZE_TYPE_STEPWISE uint32 = iota
)
var (
VIDIOC_QUERYCAP = ioctl.IoR(uintptr('V'), 0, unsafe.Sizeof(v4l2_capability{}))
VIDIOC_ENUM_FMT = ioctl.IoRW(uintptr('V'), 2, unsafe.Sizeof(v4l2_fmtdesc{}))
VIDIOC_S_FMT = ioctl.IoRW(uintptr('V'), 5, unsafe.Sizeof(v4l2_format{}))
VIDIOC_REQBUFS = ioctl.IoRW(uintptr('V'), 8, unsafe.Sizeof(v4l2_requestbuffers{}))
VIDIOC_QUERYBUF = ioctl.IoRW(uintptr('V'), 9, unsafe.Sizeof(v4l2_buffer{}))
VIDIOC_QBUF = ioctl.IoRW(uintptr('V'), 15, unsafe.Sizeof(v4l2_buffer{}))
VIDIOC_DQBUF = ioctl.IoRW(uintptr('V'), 17, unsafe.Sizeof(v4l2_buffer{}))
//sizeof int32
VIDIOC_STREAMON = ioctl.IoW(uintptr('V'), 18, 4)
VIDIOC_ENUM_FRAMESIZES = ioctl.IoRW(uintptr('V'), 74, unsafe.Sizeof(v4l2_frmsizeenum{}))
__p = unsafe.Pointer(uintptr(0))
NativeByteOrder = getNativeByteOrder()
)
var (
VIDIOC_S_EXT_CTRLS = ioctl.IoRW(uintptr('V'), 72, unsafe.Sizeof(v4l2_ext_controls{}))
)
var (
VIDIOC_S_CTRL = ioctl.IoRW(uintptr('V'), 28, unsafe.Sizeof(v4l2_control{}))
)
type v4l2_ext_control struct {
id uint32
size uint32
reserved2 [1]uint32
value int32
}
type v4l2_ext_controls struct {
ctrl_class uint32 // Set to V4L2_CTRL_CLASS_CAMERA
//which uint32 We shouldn't need this yet
count uint32
error_idx uint32
reserved [2]uint32 // Set to 0
controls *[1]v4l2_ext_control
/*controls struct {
// I only need one of these for the required function, others will
// be added later
/*id uint32
size uint32
reserved [1]uint32
value int32
value64 int64
_string []byte
p_u8 *uint8
p_u16 *uint16
p_u32 *uint32
// Anonymous pointer to compound excluded, because that's not how you
// should be using pointers
}*/
}
type v4l2_control struct {
id uint32
value int32
}
type v4l2_capability struct {
driver [16]uint8
card [32]uint8
bus_info [32]uint8
version uint32
capabilities uint32
device_caps uint32
reserved [3]uint32
}
type v4l2_fmtdesc struct {
index uint32
_type uint32
flags uint32
description [32]uint8
pixelformat uint32
reserved [4]uint32
}
type v4l2_frmsizeenum struct {
index uint32
pixel_format uint32
_type uint32
union [24]uint8
reserved [2]uint32
}
type v4l2_frmsize_discrete struct {
Width uint32
Height uint32
}
type v4l2_frmsize_stepwise struct {
Min_width uint32
Max_width uint32
Step_width uint32
Min_height uint32
Max_height uint32
Step_height uint32
}
//Hack to make go compiler properly align union
type v4l2_format_aligned_union struct {
data [200 - unsafe.Sizeof(__p)]byte
_ unsafe.Pointer
}
type v4l2_format struct {
_type uint32
union v4l2_format_aligned_union
}
type v4l2_pix_format struct {
Width uint32
Height uint32
Pixelformat uint32
Field uint32
Bytesperline uint32
Sizeimage uint32
Colorspace uint32
Priv uint32
Flags uint32
Ycbcr_enc uint32
Quantization uint32
Xfer_func uint32
}
type v4l2_requestbuffers struct {
count uint32
_type uint32
memory uint32
reserved [2]uint32
}
type v4l2_buffer struct {
index uint32
_type uint32
bytesused uint32
flags uint32
field uint32
timestamp unix.Timeval
timecode v4l2_timecode
sequence uint32
memory uint32
union [unsafe.Sizeof(__p)]uint8
length uint32
reserved2 uint32
reserved uint32
}
type v4l2_timecode struct {
_type uint32
flags uint32
frames uint8
seconds uint8
minutes uint8
hours uint8
userbits [4]uint8
}
func checkCapabilities(fd uintptr) (supportsVideoCapture bool, supportsVideoStreaming bool, err error) {
caps := &v4l2_capability{}
err = ioctl.Ioctl(fd, VIDIOC_QUERYCAP, uintptr(unsafe.Pointer(caps)))
if err != nil {
return
}
supportsVideoCapture = (caps.capabilities & V4L2_CAP_VIDEO_CAPTURE) != 0
supportsVideoStreaming = (caps.capabilities & V4L2_CAP_STREAMING) != 0
return
}
func getPixelFormat(fd uintptr, index uint32) (code uint32, description string, err error) {
fmtdesc := &v4l2_fmtdesc{}
fmtdesc.index = index
fmtdesc._type = V4L2_BUF_TYPE_VIDEO_CAPTURE
err = ioctl.Ioctl(fd, VIDIOC_ENUM_FMT, uintptr(unsafe.Pointer(fmtdesc)))
if err != nil {
return
}
code = fmtdesc.pixelformat
description = CToGoString(fmtdesc.description[:])
return
}
func getFrameSize(fd uintptr, index uint32, code uint32) (frameSize FrameSize, err error) {
frmsizeenum := &v4l2_frmsizeenum{}
frmsizeenum.index = index
frmsizeenum.pixel_format = code
err = ioctl.Ioctl(fd, VIDIOC_ENUM_FRAMESIZES, uintptr(unsafe.Pointer(frmsizeenum)))
if err != nil {
return
}
switch frmsizeenum._type {
case V4L2_FRMSIZE_TYPE_DISCRETE:
discrete := &v4l2_frmsize_discrete{}
err = binary.Read(bytes.NewBuffer(frmsizeenum.union[:]), NativeByteOrder, discrete)
if err != nil {
return
}
frameSize.MinWidth = discrete.Width
frameSize.MaxWidth = discrete.Width
frameSize.StepWidth = 0
frameSize.MinHeight = discrete.Height
frameSize.MaxHeight = discrete.Height
frameSize.StepHeight = 0
case V4L2_FRMSIZE_TYPE_CONTINUOUS:
case V4L2_FRMSIZE_TYPE_STEPWISE:
stepwise := &v4l2_frmsize_stepwise{}
err = binary.Read(bytes.NewBuffer(frmsizeenum.union[:]), NativeByteOrder, stepwise)
if err != nil {
return
}
frameSize.MinWidth = stepwise.Min_width
frameSize.MaxWidth = stepwise.Max_width
frameSize.StepWidth = stepwise.Step_width
frameSize.MinHeight = stepwise.Min_height
frameSize.MaxHeight = stepwise.Max_height
frameSize.StepHeight = stepwise.Step_height
}
return
}
func setExposure(fd uintptr, exposure_value *int32) error {
/*exposure := &v4l2_control{
id: V4L2_CID_EXPOSURE_ABSOLUTE,
value: *exposure_value,
}
err := ioctl.Ioctl(fd, VIDIOC_S_CTRL, uintptr(unsafe.Pointer(exposure)))*/
controls := &v4l2_ext_controls{
ctrl_class: V4L2_CTRL_CLASS_CAMERA,
count: uint32(1),
//error_idx: 0,
reserved: [2]uint32{uint32(0), uint32(0)},
controls: &[1]v4l2_ext_control{
v4l2_ext_control{
id: V4L2_CID_EXPOSURE_AUTO,
size: uint32(unsafe.Sizeof(uintptr(*exposure_value))),
reserved2: [1]uint32{0},
value: *exposure_value,
},
},
}
err := ioctl.Ioctl(fd, VIDIOC_S_EXT_CTRLS, uintptr(unsafe.Pointer(controls)))
if err != nil {
return err
}
return nil
}
func setImageFormat(fd uintptr, formatcode *uint32, width *uint32, height *uint32) (err error) {
format := &v4l2_format{
_type: V4L2_BUF_TYPE_VIDEO_CAPTURE,
}
pix := v4l2_pix_format{
Width: *width,
Height: *height,
Pixelformat: *formatcode,
Field: V4L2_FIELD_ANY,
}
pixbytes := &bytes.Buffer{}
err = binary.Write(pixbytes, NativeByteOrder, pix)
if err != nil {
return
}
copy(format.union.data[:], pixbytes.Bytes())
err = ioctl.Ioctl(fd, VIDIOC_S_FMT, uintptr(unsafe.Pointer(format)))
if err != nil {
return
}
pixReverse := &v4l2_pix_format{}
err = binary.Read(bytes.NewBuffer(format.union.data[:]), NativeByteOrder, pixReverse)
if err != nil {
return
}
*width = pixReverse.Width
*height = pixReverse.Height
*formatcode = pixReverse.Pixelformat
return
}
func mmapRequestBuffers(fd uintptr, buf_count *uint32) (err error) {
req := &v4l2_requestbuffers{}
req.count = *buf_count
req._type = V4L2_BUF_TYPE_VIDEO_CAPTURE
req.memory = V4L2_MEMORY_MMAP
err = ioctl.Ioctl(fd, VIDIOC_REQBUFS, uintptr(unsafe.Pointer(req)))
if err != nil {
return
}
*buf_count = req.count
return
}
func mmapQueryBuffer(fd uintptr, index uint32, length *uint32) (buffer []byte, err error) {
req := &v4l2_buffer{}
req._type = V4L2_BUF_TYPE_VIDEO_CAPTURE
req.memory = V4L2_MEMORY_MMAP
req.index = index
err = ioctl.Ioctl(fd, VIDIOC_QUERYBUF, uintptr(unsafe.Pointer(req)))
if err != nil {
return
}
var offset uint32
err = binary.Read(bytes.NewBuffer(req.union[:]), NativeByteOrder, &offset)
if err != nil {
return
}
*length = req.length
buffer, err = unix.Mmap(int(fd), int64(offset), int(req.length), unix.PROT_READ|unix.PROT_WRITE, unix.MAP_SHARED)
return
}
func mmapDequeueBuffer(fd uintptr, index *uint32, length *uint32) (err error) {
buffer := &v4l2_buffer{}
buffer._type = V4L2_BUF_TYPE_VIDEO_CAPTURE
buffer.memory = V4L2_MEMORY_MMAP
err = ioctl.Ioctl(fd, VIDIOC_DQBUF, uintptr(unsafe.Pointer(buffer)))
if err != nil {
return
}
*index = buffer.index
*length = buffer.bytesused
return
}
func mmapEnqueueBuffer(fd uintptr, index uint32) (err error) {
buffer := &v4l2_buffer{}
buffer._type = V4L2_BUF_TYPE_VIDEO_CAPTURE
buffer.memory = V4L2_MEMORY_MMAP
buffer.index = index
err = ioctl.Ioctl(fd, VIDIOC_QBUF, uintptr(unsafe.Pointer(buffer)))
return
}
func mmapReleaseBuffer(buffer []byte) (err error) {
err = unix.Munmap(buffer)
return
}
func startStreaming(fd uintptr) (err error) {
var uintPointer uint32 = V4L2_BUF_TYPE_VIDEO_CAPTURE
err = ioctl.Ioctl(fd, VIDIOC_STREAMON, uintptr(unsafe.Pointer(&uintPointer)))
return
}
func FD_SET(p *unix.FdSet, i int) {
var l int = int(len(p.Bits))
p.Bits[i/l] |= 1 << uintptr(i%l)
}
func waitForFrame(fd uintptr, timeout uint32) (count int, err error) {
for {
fds := &unix.FdSet{}
FD_SET(fds, int(fd))
var oneSecInNsec int64 = 1e9
timeoutNsec := int64(timeout) * oneSecInNsec
nativeTimeVal := unix.NsecToTimeval(timeoutNsec)
tv := &nativeTimeVal
count, err = unix.Select(int(fd+1), fds, nil, nil, tv)
if count < 0 && err == unix.EINTR {
continue
}
return
}
}
func getNativeByteOrder() binary.ByteOrder {
var i int32 = 0x01020304
u := unsafe.Pointer(&i)
pb := (*byte)(u)
b := *pb
if b == 0x04 {
return binary.LittleEndian
} else {
return binary.BigEndian
}
}
func CToGoString(c []byte) string {
n := -1
for i, b := range c {
if b == 0 {
break
}
n = i
}
return string(c[:n+1])
}
|
package main
import "fmt"
func main() {
f := 7 / 0.5
fmt.Println(f)
f2 := 20 / 30.0
fmt.Println(f2)
f = 7 / f2
fmt.Println(f)
}
/**
最低票价
在一个火车旅行很受欢迎的国度,你提前一年计划了一些火车旅行。在接下来的一年里,你要旅行的日子将以一个名为 days 的数组给出。每一项是一个从 1 到 365 的整数。
火车票有三种不同的销售方式:
- 一张为期一天的通行证售价为 costs[0] 美元;
- 一张为期七天的通行证售价为 costs[1] 美元;
- 一张为期三十天的通行证售价为 costs[2] 美元。
通行证允许数天无限制的旅行。 例如,如果我们在第 2 天获得一张为期 7 天的通行证,那么我们可以连着旅行 7 天:第 2 天、第 3 天、第 4 天、第 5 天、第 6 天、第 7 天和第 8 天。
返回你想要完成在给定的列表 days 中列出的每一天的旅行所需要的最低消费。
示例 1:
```
输入:days = [1,4,6,7,8,20], costs = [2,7,15]
输出:11
解释:
例如,这里有一种购买通行证的方法,可以让你完成你的旅行计划:
在第 1 天,你花了 costs[0] = $2 买了一张为期 1 天的通行证,它将在第 1 天生效。
在第 3 天,你花了 costs[1] = $7 买了一张为期 7 天的通行证,它将在第 3, 4, ..., 9 天生效。
在第 20 天,你花了 costs[0] = $2 买了一张为期 1 天的通行证,它将在第 20 天生效。
你总共花了 $11,并完成了你计划的每一天旅行。
```
示例 1:
```
输入:days = [1,2,3,4,5,6,7,8,9,10,30,31], costs = [2,7,15]
输出:17
解释:
例如,这里有一种购买通行证的方法,可以让你完成你的旅行计划:
在第 1 天,你花了 costs[2] = $15 买了一张为期 30 天的通行证,它将在第 1, 2, ..., 30 天生效。
在第 31 天,你花了 costs[0] = $2 买了一张为期 1 天的通行证,它将在第 31 天生效。
你总共花了 $17,并完成了你计划的每一天旅行。
```
提示:
1. 1 <= days.length <= 365
2. 1 <= days[i] <= 365
3. days 按顺序严格递增
4. costs.length == 3
5. 1 <= costs[i] <= 1000
```
*/
/**
太鸡难了,不由得有个疑问,动态规划真的科学吗?
TODO 后续再温习温习
*/
func mincostTickets(days []int, costs []int) int {
memo := [366]int{}
dayM := map[int]bool{}
for _, d := range days {
dayM[d] = true
}
var dp func(day int) int
dp = func(day int) int {
if day > 365 {
return 0
}
if memo[day] > 0 {
return memo[day]
}
if dayM[day] {
memo[day] = min(min(dp(day+1)+costs[0], dp(day+7)+costs[1]), dp(day+30)+costs[2])
} else {
memo[day] = dp(day + 1)
}
return memo[day]
}
return dp(1)
}
func min(x, y int) int {
if x < y {
return x
}
return y
}
|
package codegen
import (
"context"
"fmt"
"io/ioutil"
"net"
"net/url"
"os"
"path/filepath"
"sort"
"strings"
"time"
shellquote "github.com/kballard/go-shellquote"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/session/secrets/secretsprovider"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/entitlements"
digest "github.com/opencontainers/go-digest"
specs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/openllb/hlb/errdefs"
"github.com/openllb/hlb/parser"
"github.com/openllb/hlb/pkg/llbutil"
"github.com/openllb/hlb/pkg/sockproxy"
"github.com/openllb/hlb/solver"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
)
type Resolve struct{}
func (ir Resolve) Call(ctx context.Context, cln *client.Client, ret Register, opts Option) error {
return nil
}
type Checksum struct{}
func (c Checksum) Call(ctx context.Context, cln *client.Client, ret Register, opts Option, dgst digest.Digest) error {
retOpts, err := ret.Option()
if err != nil {
return err
}
return ret.Set(append(retOpts, llb.Checksum(dgst)))
}
type Chmod struct{}
func (c Chmod) Call(ctx context.Context, cln *client.Client, ret Register, opts Option, mode os.FileMode) error {
retOpts, err := ret.Option()
if err != nil {
return err
}
return ret.Set(append(retOpts, llb.Chmod(mode)))
}
type Filename struct{}
func (f Filename) Call(ctx context.Context, cln *client.Client, ret Register, opts Option, filename string) error {
retOpts, err := ret.Option()
if err != nil {
return err
}
return ret.Set(append(retOpts, llb.Filename(filename)))
}
type KeepGitDir struct{}
func (kgd KeepGitDir) Call(ctx context.Context, cln *client.Client, ret Register, opts Option) error {
retOpts, err := ret.Option()
if err != nil {
return err
}
return ret.Set(append(retOpts, llb.KeepGitDir()))
}
type IncludePatterns struct{}
func (ip IncludePatterns) Call(ctx context.Context, cln *client.Client, ret Register, opts Option, patterns ...string) error {
retOpts, err := ret.Option()
if err != nil {
return err
}
return ret.Set(append(retOpts, llb.IncludePatterns(patterns)))
}
type ExcludePatterns struct{}
func (ep ExcludePatterns) Call(ctx context.Context, cln *client.Client, ret Register, opts Option, patterns ...string) error {
retOpts, err := ret.Option()
if err != nil {
return err
}
return ret.Set(append(retOpts, llb.ExcludePatterns(patterns)))
}
type FollowPaths struct{}
func (fp FollowPaths) Call(ctx context.Context, cln *client.Client, ret Register, opts Option, paths ...string) error {
retOpts, err := ret.Option()
if err != nil {
return err
}
return ret.Set(append(retOpts, llb.FollowPaths(paths)))
}
type FrontendInput struct{}
func (fi FrontendInput) Call(ctx context.Context, cln *client.Client, ret Register, opts Option, key string, input Filesystem) error {
retOpts, err := ret.Option()
if err != nil {
return err
}
def, err := input.State.Marshal(ctx, llb.LinuxAmd64)
if err != nil {
return err
}
retOpts = append(retOpts, llbutil.FrontendInput(key, def))
for _, opt := range input.SolveOpts {
retOpts = append(retOpts, opt)
}
for _, opt := range input.SessionOpts {
retOpts = append(retOpts, opt)
}
return ret.Set(retOpts)
}
type FrontendOpt struct{}
func (fo FrontendOpt) Call(ctx context.Context, cln *client.Client, ret Register, opts Option, key, value string) error {
retOpts, err := ret.Option()
if err != nil {
return err
}
return ret.Set(append(retOpts, llbutil.FrontendOpt(key, value)))
}
type CreateParents struct{}
func (cp CreateParents) Call(ctx context.Context, cln *client.Client, ret Register, opts Option) error {
retOpts, err := ret.Option()
if err != nil {
return err
}
return ret.Set(append(retOpts, llb.WithParents(true)))
}
type Chown struct{}
func (c Chown) Call(ctx context.Context, cln *client.Client, ret Register, opts Option, owner string) error {
retOpts, err := ret.Option()
if err != nil {
return err
}
return ret.Set(append(retOpts, llb.WithUser(owner)))
}
type CreatedTime struct{}
func (ct CreatedTime) Call(ctx context.Context, cln *client.Client, ret Register, opts Option, t time.Time) error {
retOpts, err := ret.Option()
if err != nil {
return err
}
return ret.Set(append(retOpts, llb.WithCreatedTime(t)))
}
type AllowNotFound struct{}
func (anf AllowNotFound) Call(ctx context.Context, cln *client.Client, ret Register, opts Option) error {
retOpts, err := ret.Option()
if err != nil {
return err
}
return ret.Set(append(retOpts, llb.WithAllowNotFound(true)))
}
type AllowWildcard struct{}
func (aw AllowWildcard) Call(ctx context.Context, cln *client.Client, ret Register, opts Option) error {
retOpts, err := ret.Option()
if err != nil {
return err
}
return ret.Set(append(retOpts, llb.WithAllowWildcard(true)))
}
type FollowSymlinks struct{}
func (fs FollowSymlinks) Call(ctx context.Context, cln *client.Client, ret Register, opts Option) error {
retOpts, err := ret.Option()
if err != nil {
return err
}
return ret.Set(append(retOpts, llbutil.WithFollowSymlinks(true)))
}
type ContentsOnly struct{}
func (co ContentsOnly) Call(ctx context.Context, cln *client.Client, ret Register, opts Option) error {
retOpts, err := ret.Option()
if err != nil {
return err
}
return ret.Set(append(retOpts, llbutil.WithCopyDirContentsOnly(true)))
}
type Unpack struct{}
func (u Unpack) Call(ctx context.Context, cln *client.Client, ret Register, opts Option) error {
retOpts, err := ret.Option()
if err != nil {
return err
}
return ret.Set(append(retOpts, llbutil.WithAttemptUnpack(true)))
}
type CreateDestPath struct{}
func (cdp CreateDestPath) Call(ctx context.Context, cln *client.Client, ret Register, opts Option) error {
retOpts, err := ret.Option()
if err != nil {
return err
}
return ret.Set(append(retOpts, llbutil.WithCreateDestPath(true)))
}
type CopyAllowWildcard struct{}
func (caw CopyAllowWildcard) Call(ctx context.Context, cln *client.Client, ret Register, opts Option) error {
retOpts, err := ret.Option()
if err != nil {
return err
}
return ret.Set(append(retOpts, llbutil.WithAllowWildcard(true)))
}
type AllowEmptyWildcard struct{}
func (aew AllowEmptyWildcard) Call(ctx context.Context, cln *client.Client, ret Register, opts Option) error {
retOpts, err := ret.Option()
if err != nil {
return err
}
return ret.Set(append(retOpts, llbutil.WithAllowEmptyWildcard(true)))
}
type UtilChown struct{}
func (uc UtilChown) Call(ctx context.Context, cln *client.Client, ret Register, opts Option, owner string) error {
retOpts, err := ret.Option()
if err != nil {
return err
}
return ret.Set(append(retOpts, llbutil.WithChown(owner)))
}
type UtilChmod struct{}
func (uc UtilChmod) Call(ctx context.Context, cln *client.Client, ret Register, opts Option, mode os.FileMode) error {
retOpts, err := ret.Option()
if err != nil {
return err
}
return ret.Set(append(retOpts, llbutil.WithChmod(mode)))
}
type UtilCreatedTime struct{}
func (uct UtilCreatedTime) Call(ctx context.Context, cln *client.Client, ret Register, opts Option, t time.Time) error {
retOpts, err := ret.Option()
if err != nil {
return err
}
return ret.Set(append(retOpts, llbutil.WithCreatedTime(t)))
}
type TemplateField struct {
Name string
Value interface{}
}
type StringField struct{}
func (sf StringField) Call(ctx context.Context, cln *client.Client, ret Register, opts Option, name, value string) error {
retOpts, err := ret.Option()
if err != nil {
return err
}
return ret.Set(append(retOpts, &TemplateField{name, value}))
}
type LocalRunOption struct {
IgnoreError bool
OnlyStderr bool
IncludeStderr bool
}
type IgnoreError struct{}
func (ie IgnoreError) Call(ctx context.Context, cln *client.Client, ret Register, opts Option) error {
retOpts, err := ret.Option()
if err != nil {
return err
}
return ret.Set(append(retOpts, func(o *LocalRunOption) {
o.IgnoreError = true
}))
}
type OnlyStderr struct{}
func (os OnlyStderr) Call(ctx context.Context, cln *client.Client, ret Register, opts Option) error {
retOpts, err := ret.Option()
if err != nil {
return err
}
return ret.Set(append(retOpts, func(o *LocalRunOption) {
o.OnlyStderr = true
}))
}
type IncludeStderr struct{}
func (is IncludeStderr) Call(ctx context.Context, cln *client.Client, ret Register, opts Option) error {
retOpts, err := ret.Option()
if err != nil {
return err
}
return ret.Set(append(retOpts, func(o *LocalRunOption) {
o.IncludeStderr = true
}))
}
type Shlex struct{}
func (s Shlex) Call(ctx context.Context, cln *client.Client, ret Register, opts Option) error {
retOpts, err := ret.Option()
if err != nil {
return err
}
return ret.Set(append(retOpts, &Shlex{}))
}
func ShlexArgs(args []string, shlex bool) ([]string, error) {
if len(args) == 0 {
return nil, nil
}
if len(args) == 1 {
if shlex {
parts, err := shellquote.Split(args[0])
if err != nil {
return nil, err
}
return parts, nil
}
return []string{"/bin/sh", "-c", args[0]}, nil
}
return args, nil
}
type ReadonlyRootfs struct{}
func (rr ReadonlyRootfs) Call(ctx context.Context, cln *client.Client, ret Register, opts Option) error {
retOpts, err := ret.Option()
if err != nil {
return err
}
return ret.Set(append(retOpts, llbutil.WithReadonlyRootFS()))
}
type RunEnv struct{}
func (re RunEnv) Call(ctx context.Context, cln *client.Client, ret Register, opts Option, key, value string) error {
retOpts, err := ret.Option()
if err != nil {
return err
}
return ret.Set(append(retOpts, llbutil.WithEnv(key, value)))
}
type RunDir struct{}
func (rd RunDir) Call(ctx context.Context, cln *client.Client, ret Register, opts Option, path string) error {
retOpts, err := ret.Option()
if err != nil {
return err
}
return ret.Set(append(retOpts, llbutil.WithDir(path)))
}
type RunUser struct{}
func (ru RunUser) Call(ctx context.Context, cln *client.Client, ret Register, opts Option, name string) error {
retOpts, err := ret.Option()
if err != nil {
return err
}
return ret.Set(append(retOpts, llbutil.WithUser(name)))
}
type RunBreakpoint struct{}
func (rb RunBreakpoint) Call(ctx context.Context, cln *client.Client, ret Register, opts Option, command ...string) error {
retOpts, err := ret.Option()
if err != nil {
return err
}
return ret.Set(append(retOpts, breakpointCommand(command)))
}
type IgnoreCache struct{}
func (ig IgnoreCache) Call(ctx context.Context, cln *client.Client, ret Register, opts Option) error {
retOpts, err := ret.Option()
if err != nil {
return err
}
return ret.Set(append(retOpts, llb.AddEnv("HLB_IGNORE_CACHE", identity.NewID())))
}
type Network struct{}
func (n Network) Call(ctx context.Context, cln *client.Client, ret Register, opts Option, mode string) error {
retOpts, err := ret.Option()
if err != nil {
return err
}
var netMode pb.NetMode
switch mode {
case "unset":
netMode = pb.NetMode_UNSET
case "host":
netMode = pb.NetMode_HOST
retOpts = append(retOpts, solver.WithEntitlement(entitlements.EntitlementNetworkHost))
case "none":
netMode = pb.NetMode_NONE
default:
return errdefs.WithInvalidNetworkMode(Arg(ctx, 0), mode, []string{"unset", "host", "none"})
}
return ret.Set(append(retOpts, llbutil.WithNetwork(netMode)))
}
type Security struct{}
func (s Security) Call(ctx context.Context, cln *client.Client, ret Register, opts Option, mode string) error {
retOpts, err := ret.Option()
if err != nil {
return err
}
var securityMode pb.SecurityMode
switch mode {
case "sandbox":
securityMode = pb.SecurityMode_SANDBOX
case "insecure":
securityMode = pb.SecurityMode_INSECURE
retOpts = append(retOpts, solver.WithEntitlement(entitlements.EntitlementSecurityInsecure))
default:
return errdefs.WithInvalidSecurityMode(Arg(ctx, 0), mode, []string{"sandbox", "insecure"})
}
return ret.Set(append(retOpts, llbutil.WithSecurity(securityMode)))
}
type Host struct{}
func (s Host) Call(ctx context.Context, cln *client.Client, ret Register, opts Option, host string, address net.IP) error {
retOpts, err := ret.Option()
if err != nil {
return err
}
return ret.Set(append(retOpts, llbutil.WithExtraHost(host, address)))
}
type SSH struct{}
func (s SSH) Call(ctx context.Context, cln *client.Client, ret Register, opts Option) error {
retOpts, err := ret.Option()
if err != nil {
return err
}
var (
sshOpts = []llb.SSHOption{llbutil.WithChmod(0600)}
localPaths []string
)
for _, opt := range opts {
switch o := opt.(type) {
case llb.SSHOption:
sshOpts = append(sshOpts, o)
case string:
localPaths = append(localPaths, o)
}
}
sort.Strings(localPaths)
id := llbutil.SSHID(localPaths...)
sshOpts = append(sshOpts, llbutil.WithID(id))
retOpts = append(retOpts, llbutil.WithAgentConfig(id, sockproxy.AgentConfig{
ID: id,
SSH: true,
Paths: localPaths,
}))
return ret.Set(append(retOpts, llbutil.WithSSHSocket("", sshOpts...)))
}
type Forward struct{}
func (f Forward) Call(ctx context.Context, cln *client.Client, ret Register, opts Option, src *url.URL, dest string) error {
retOpts, err := ret.Option()
if err != nil {
return err
}
var (
id string
localPath string
)
if src.Scheme == "unix" {
localPath, err = parser.ResolvePath(ModuleDir(ctx), src.Path)
if err != nil {
return Arg(ctx, 0).WithError(err)
}
_, err = os.Stat(filepath.Dir(localPath))
if err != nil {
return Arg(ctx, 0).WithError(err)
}
id = digest.FromString(localPath).String()
} else {
dialerFunc := func() (net.Conn, error) {
var dialer net.Dialer
conn, err := dialer.DialContext(ctx, src.Scheme, src.Host)
if err != nil {
return nil, Arg(ctx, 0).WithError(fmt.Errorf("cannot dial %s", src))
}
return conn, err
}
dir, err := ioutil.TempDir("", "hlb-forward")
if err != nil {
return errors.Wrap(err, "failed to create tmp dir for forwarding sock")
}
localPath = filepath.Join(dir, "proxy.sock")
id = digest.FromString(src.String()).String()
var lc net.ListenConfig
l, err := lc.Listen(ctx, "unix", localPath)
if err != nil {
return errors.Wrap(err, "failed to listen on forwarding sock")
}
var g errgroup.Group
retOpts = append(retOpts, solver.WithCallback(func(ctx context.Context, resp *client.SolveResponse) error {
defer os.RemoveAll(dir)
err := l.Close()
if err != nil && !isClosedNetworkError(err) {
return errors.Wrap(err, "failed to close listener")
}
return g.Wait()
}))
g.Go(func() error {
err := sockproxy.Run(l, dialerFunc)
if err != nil && !isClosedNetworkError(err) {
return err
}
return nil
})
}
retOpts = append(retOpts, llbutil.WithAgentConfig(id, sockproxy.AgentConfig{
ID: id,
SSH: false,
Paths: []string{localPath},
}))
return ret.Set(append(retOpts, llbutil.WithSSHSocket(dest, llbutil.WithID(id))))
}
func isClosedNetworkError(err error) bool {
// ErrNetClosing is hidden in an internal golang package so we can't use
// errors.Is: https://golang.org/src/internal/poll/fd.go
return strings.Contains(err.Error(), "use of closed network connection")
}
type Secret struct{}
func (s Secret) Call(ctx context.Context, cln *client.Client, ret Register, opts Option, localPath, mountpoint string) error {
retOpts, err := ret.Option()
if err != nil {
return err
}
var (
secretOpts []llb.SecretOption
includePatterns []string
excludePatterns []string
)
for _, opt := range opts {
switch o := opt.(type) {
case llb.SecretOption:
secretOpts = append(secretOpts, o)
case *SecretIncludePatterns:
includePatterns = append(includePatterns, o.Patterns...)
case *SecretExcludePatterns:
excludePatterns = append(excludePatterns, o.Patterns...)
}
}
localPath, err = parser.ResolvePath(ModuleDir(ctx), localPath)
if err != nil {
return err
}
localFiles, err := llbutil.FilterLocalFiles(localPath, includePatterns, excludePatterns)
if err != nil {
return err
}
for _, localFile := range localFiles {
mountpoint := filepath.Join(
mountpoint,
strings.TrimPrefix(localFile, localPath),
)
id := llbutil.SecretID(localFile)
retOpts = append(retOpts,
llbutil.WithSecret(
mountpoint,
append(secretOpts, llbutil.WithID(id))...,
),
llbutil.WithSecretSource(id, secretsprovider.Source{
ID: id,
FilePath: localFile,
}),
)
}
return ret.Set(retOpts)
}
type Mount struct {
Bind string
Image *solver.ImageSpec
}
func (m Mount) Call(ctx context.Context, cln *client.Client, ret Register, opts Option, input Filesystem, mountpoint string) error {
retOpts, err := ret.Option()
if err != nil {
return err
}
var cache *Cache
for _, opt := range opts {
var ok bool
cache, ok = opt.(*Cache)
if ok {
break
}
}
if Binding(ctx).Binds() == "target" {
if cache != nil {
return errdefs.WithBindCacheMount(Binding(ctx).Bind.As, cache)
}
retOpts = append(retOpts, &Mount{Bind: mountpoint, Image: input.Image})
}
retOpts = append(retOpts, &llbutil.MountRunOption{
Source: input.State,
Target: mountpoint,
Opts: opts,
})
for _, opt := range input.SolveOpts {
retOpts = append(retOpts, opt)
}
for _, opt := range input.SessionOpts {
retOpts = append(retOpts, opt)
}
return ret.Set(retOpts)
}
type MountTarget struct{}
func (mt MountTarget) Call(ctx context.Context, cln *client.Client, ret Register, opts Option, target string) error {
retOpts, err := ret.Option()
if err != nil {
return err
}
return ret.Set(append(retOpts, llbutil.WithTarget(target)))
}
type UID struct{}
func (u UID) Call(ctx context.Context, cln *client.Client, ret Register, opts Option, uid int) error {
retOpts, err := ret.Option()
if err != nil {
return err
}
return ret.Set(append(retOpts, llbutil.WithUID(uid)))
}
type GID struct{}
func (g GID) Call(ctx context.Context, cln *client.Client, ret Register, opts Option, gid int) error {
retOpts, err := ret.Option()
if err != nil {
return err
}
return ret.Set(append(retOpts, llbutil.WithGID(gid)))
}
type LocalPaths struct{}
func (lp LocalPaths) Call(ctx context.Context, cln *client.Client, ret Register, opts Option, localPaths ...string) error {
retOpts, err := ret.Option()
if err != nil {
return err
}
for _, localPath := range localPaths {
resolvedPath, err := parser.ResolvePath(ModuleDir(ctx), localPath)
if err != nil {
return err
}
retOpts = append(retOpts, resolvedPath)
}
return ret.Set(retOpts)
}
type SecretIncludePatterns struct {
Patterns []string
}
func (iip SecretIncludePatterns) Call(ctx context.Context, cln *client.Client, ret Register, opts Option, patterns ...string) error {
retOpts, err := ret.Option()
if err != nil {
return err
}
return ret.Set(append(retOpts, &SecretIncludePatterns{patterns}))
}
type SecretExcludePatterns struct {
Patterns []string
}
func (sep SecretExcludePatterns) Call(ctx context.Context, cln *client.Client, ret Register, opts Option, patterns ...string) error {
retOpts, err := ret.Option()
if err != nil {
return err
}
return ret.Set(append(retOpts, &SecretExcludePatterns{patterns}))
}
type CopyIncludePatterns struct{}
func (iip CopyIncludePatterns) Call(ctx context.Context, cln *client.Client, ret Register, opts Option, patterns ...string) error {
retOpts, err := ret.Option()
if err != nil {
return err
}
return ret.Set(append(retOpts, llbutil.WithIncludePatterns(patterns)))
}
type CopyExcludePatterns struct{}
func (sep CopyExcludePatterns) Call(ctx context.Context, cln *client.Client, ret Register, opts Option, patterns ...string) error {
retOpts, err := ret.Option()
if err != nil {
return err
}
return ret.Set(append(retOpts, llbutil.WithExcludePatterns(patterns)))
}
type Readonly struct{}
func (r Readonly) Call(ctx context.Context, cln *client.Client, ret Register, opts Option) error {
retOpts, err := ret.Option()
if err != nil {
return err
}
return ret.Set(append(retOpts, llbutil.WithReadonlyMount()))
}
type Tmpfs struct{}
func (t Tmpfs) Call(ctx context.Context, cln *client.Client, ret Register, opts Option) error {
retOpts, err := ret.Option()
if err != nil {
return err
}
return ret.Set(append(retOpts, llbutil.WithTmpfs()))
}
type SourcePath struct{}
func (sp SourcePath) Call(ctx context.Context, cln *client.Client, ret Register, opts Option, path string) error {
retOpts, err := ret.Option()
if err != nil {
return err
}
return ret.Set(append(retOpts, llbutil.WithSourcePath(path)))
}
type Cache struct {
parser.Node
}
func (c Cache) Call(ctx context.Context, cln *client.Client, ret Register, opts Option, id, mode string) error {
retOpts, err := ret.Option()
if err != nil {
return err
}
var sharing llb.CacheMountSharingMode
switch mode {
case "shared":
sharing = llb.CacheMountShared
case "private":
sharing = llb.CacheMountPrivate
case "locked":
sharing = llb.CacheMountLocked
default:
return errdefs.WithInvalidSharingMode(Arg(ctx, 1), mode, []string{"shared", "private", "locked"})
}
retOpts = append(retOpts, &Cache{ProgramCounter(ctx)}, llbutil.WithPersistentCacheDir(id, sharing))
return ret.Set(retOpts)
}
type Platform struct{}
func (p Platform) Call(ctx context.Context, cln *client.Client, ret Register, opts Option, os, arch string) error {
retOpts, err := ret.Option()
if err != nil {
return err
}
return ret.Set(append(retOpts, &specs.Platform{
OS: os,
Architecture: arch,
}))
}
|
package neo_test
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/yggie/github-data-challenge-2014/models"
"github.com/yggie/github-data-challenge-2014/neo"
)
var _ = Describe("Persist", func() {
neo.Clear(neo.ALL)
var err error
Context("PersistPushEvent", func() {
var pushEvent models.PushEvent
BeforeEach(func() {
pushEvent = models.PushEvent{
Size: 3,
DistinctSize: 2,
PushId: 123123123,
Commits: []*models.Commit{},
Event: &models.Event{
Id: 1,
EventType: "PushEvent",
CreatedAt: "Today",
User: &models.User{
Id: 1,
Login: "samuex",
GravatarId: "abcdef",
AvatarUrl: "http://example.com",
},
Repository: &models.Repository{
Id: 1,
Name: "my-repo",
Url: "http://github.com/randomguy/my-repo",
Languages: models.Languages{
models.C: 3000,
models.RUBY: 6000,
models.SHELL: 3000,
},
},
},
}
err = neo.PersistPushEvent(&pushEvent)
})
It("should not have any errors", func() {
Expect(err).To(BeNil())
})
It("should persist the event object", func() {
Expect(neo.Count(neo.EVENTS)).To(Equal(1))
})
It("should persist the repository object", func() {
Expect(neo.Count(neo.REPOSITORIES)).To(Equal(1))
})
It("should persist the user object", func() {
Expect(neo.Count(neo.USERS)).To(Equal(1))
})
It("should persist the languages object", func() {
Expect(neo.Count(neo.LANGUAGES)).To(Equal(3))
})
})
})
|
package main
import (
"context"
"os"
"github.com/rodrigo-brito/ninjabot"
"github.com/rodrigo-brito/ninjabot/example"
"github.com/rodrigo-brito/ninjabot/pkg/exchange"
"github.com/rodrigo-brito/ninjabot/pkg/model"
"github.com/rodrigo-brito/ninjabot/pkg/notification"
"github.com/rodrigo-brito/ninjabot/pkg/storage"
log "github.com/sirupsen/logrus"
)
func main() {
var (
ctx = context.Background()
telegramKey = os.Getenv("TELEGRAM_KEY")
telegramID = os.Getenv("TELEGRAM_ID")
telegramChannel = os.Getenv("TELEGRAM_CHANNEL")
)
settings := model.Settings{
Pairs: []string{
"BTCUSDT",
"ETHUSDT",
"BNBUSDT",
"LTCUSDT",
},
}
// Use binance for realtime data feed
binance, err := exchange.NewBinance(ctx)
if err != nil {
log.Fatal(err)
}
storage, err := storage.FromFile("backtest.db")
if err != nil {
log.Fatal(err)
}
notifier := notification.NewTelegram(telegramID, telegramKey, telegramChannel)
paperWallet := exchange.NewPaperWallet(
ctx,
"USDT",
exchange.WithPaperFee(0.001, 0.001),
exchange.WithPaperAsset("USDT", 10000),
exchange.WithDataFeed(binance),
)
strategy := new(example.MyStrategy)
bot, err := ninjabot.NewBot(
ctx,
settings,
paperWallet,
strategy,
ninjabot.WithStorage(storage),
ninjabot.WithNotifier(notifier),
ninjabot.WithCandleSubscription(paperWallet),
)
if err != nil {
log.Fatalln(err)
}
err = bot.Run(ctx)
if err != nil {
log.Fatalln(err)
}
}
|
package event
import (
"subd/models"
"time"
)
type Repository interface {
CheckUser(user string) (bool, error)
CheckUserByEmail(email string) (bool, error)
CheckUserByNicknameOrEmail(nickname string, email string) (bool, error)
AddNewForum(newForum *models.Forum) (error, bool)
GetForumCounts(slug string) (uint64, uint64, error)
GetForum(slug string) (models.Forum, int)
CheckForum(slug string) (bool, error)
CheckThread(slug string) (bool, error)
CheckThreadById(id int) (bool, error)
CheckPost(id int) (bool, error)
GetThread(slug string) (models.Thread, error)
GetThreadStatus(slug string) (models.Thread, int)
GetThreadById(id int) (models.Thread, int)
GetPost(id int) (models.Post, int)
GetUser(name string) (models.User, int)
AddNewThread(newThread models.Thread) (uint64, error)
GetForumUsers(slug string, limit int, since string, desc bool) (models.Users, error)
AddForumUsers(slug string, author string) error
GetForumThreads(slug string, limit int, since string, desc bool) (models.Threads, error)
EditMessage(id int, message string) error
Clear() error
Status() (models.Status, error)
CreateUser(nickname string, user models.User) error
GetUserByNicknameOrEmail(nickname string, email string) (models.Users, error)
UpdateUser(nickname string, user models.User) error
IncrementThreads(forum string) error
IncrementPosts(forum string) error
AddPost(newPosts []*models.Post, thread models.Thread, now time.Time) int
UpdateThread(slugOrId string, thread models.Thread) (models.Thread, error)
UpdateThreadById(id int, thread models.Thread) (models.Thread, error)
CheckVote(id int, nickname string) (bool, error)
AddVote(id int, vote models.Vote) error
UpdateVote(id int, vote models.Vote) error
GetValueVote(id int, nickname string) (int, error)
GetPostsFlat(id int ,limit int, since int) (models.Posts, error)
GetPostsFlatDesc(id int ,limit int, since int) (models.Posts, error)
GetPostsTree(id int ,limit int) (models.Posts, error)
GetPostsTreeDesc(id int ,limit int) (models.Posts, error)
GetPostsTreeSince(id int ,limit int, since int) (models.Posts, error)
GetPostsTreeSinceDesc(id int ,limit int, since int) (models.Posts, error)
GetPostsParentTree(id int ,limit int) (models.Posts, error)
GetPostsParentTreeDesc(id int ,limit int) (models.Posts, error)
GetPostsParentTreeSince(id int ,limit int, since int) (models.Posts, error)
GetPostsParentTreeSinceDesc(id int ,limit int, since int) (models.Posts, error)
GetPostNull(id int) (models.PostNullMessage, int)
}
|
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package terraform
const (
// TerraformWriteConnectionSecretToRefName is the name for Terraform WriteConnectionSecretToRef
TerraformWriteConnectionSecretToRefName = "writeConnectionSecretToRef"
// TerraformWriteConnectionSecretToRefType is the type for Terraform WriteConnectionSecretToRef
TerraformWriteConnectionSecretToRefType = "[writeConnectionSecretToRef](#writeConnectionSecretToRef)"
// TerraformWriteConnectionSecretToRefDescription is the description for Terraform WriteConnectionSecretToRef
TerraformWriteConnectionSecretToRefDescription = "The secret which the cloud resource connection will be written to"
// TerraformSecretNameDescription is the description for the name for Terraform Secret
TerraformSecretNameDescription = "The secret name which the cloud resource connection will be written to"
// TerraformSecretNamespaceDescription is the description for the namespace for Terraform Secret
TerraformSecretNamespaceDescription = "The secret namespace which the cloud resource connection will be written to"
)
|
package apiauth
import (
"bytes"
"crypto/hmac"
"crypto/md5"
"crypto/sha1"
"encoding/base64"
"errors"
"fmt"
"io/ioutil"
"log"
"net/http"
"strings"
"time"
)
//Finder is a method which takes clients accessID and the request as input and
// returns secretKey of the associated accessID.
// It also allows you to return any additional result you might want to return
// from Authentic method
type Finder func(accessID string, req *http.Request) (secretKey string, result interface{}, err error)
var errorMD5 = errors.New("MD5 mismatch occurred")
var errorSignMismatch = errors.New("Signature Mismatch occurred")
var errorReqOld = errors.New("Request too old")
var errorAuthHeader = errors.New("Malformed Auth Header")
var gmt *time.Location
func init() {
loc, err := time.LoadLocation("Etc/GMT")
if err != nil {
log.Panic("apiauth: Can not load timezone Etc/GMT: ", err)
}
gmt = loc
}
//relaxation for out of sync servers
const maxTimeOffset time.Duration = 30 * time.Second
//Authentic Determines if the request is authentic given the request and a finder method
//and returns the result returned by Finder if request is authentic else error
func Authentic(request *http.Request, f Finder) (interface{}, error) {
if requestTooOld(request) {
return nil, errorReqOld
}
accessID, hmacHash, err := parseAuthHeader(request)
if err != nil {
return nil, err
}
if err = validateMD5(request); err != nil {
return nil, err
}
secretKey, finderInfo, err := f(accessID, request)
if err != nil {
return nil, err
}
if signRequest(request, secretKey) != hmacHash {
return nil, errorSignMismatch
}
return finderInfo, nil
}
//Sign signs an HTTP request using the client's access id and secret key
//and modifies request object by adding some headers.
func Sign(request *http.Request, accessID, secretKey string) error {
err := setMD5(request)
if err != nil {
return err
}
setDate(request)
setAuthorizationHeader(request, accessID, secretKey)
return nil
}
func requestTooOld(request *http.Request) bool {
headerTime, err := time.Parse(time.RFC1123, getDate(request))
if err != nil {
return true
}
diff := time.Since(headerTime)
if diff < -maxTimeOffset || diff > (900*time.Second+maxTimeOffset) {
return true
}
return false
}
func signRequest(request *http.Request, secretKey string) string {
hash := hmac.New(sha1.New, []byte(secretKey))
hash.Write([]byte(evaluateCanonicalString(request)))
return base64.StdEncoding.EncodeToString(hash.Sum(nil))
}
func parseAuthHeader(request *http.Request) (string, string, error) {
authHeader := request.Header.Get("Authorization")
if authHeader == "" {
return "", "", errorAuthHeader
}
authSpaceSplit := strings.Split(authHeader, " ")
if len(authSpaceSplit) != 2 {
return "", "", errorAuthHeader
}
authColonSplit := strings.Split(authSpaceSplit[1], ":")
if len(authColonSplit) != 2 {
return "", "", errorAuthHeader
}
return authColonSplit[0], authColonSplit[1], nil
}
func evaluateCanonicalString(request *http.Request) string {
var buffer bytes.Buffer
buffer.WriteString(request.Method)
buffer.WriteString(",")
buffer.WriteString(getContentType(request))
buffer.WriteString(",")
buffer.WriteString(getMD5(request))
buffer.WriteString(",")
buffer.WriteString(getPath(request))
buffer.WriteString(",")
buffer.WriteString(getDate(request))
canStr := buffer.String()
return canStr
}
func getContentType(request *http.Request) string {
return request.Header.Get("Content-Type")
}
func getDate(request *http.Request) string {
return request.Header.Get("Date")
}
func getPath(request *http.Request) string {
return request.URL.RequestURI()
}
func getMD5(request *http.Request) string {
return request.Header.Get("Content-Md5")
}
func evaluateMD5Hash(request *http.Request) (string, error) {
if request.Method == "POST" || request.Method == "PUT" {
bodyBytes, err := ioutil.ReadAll(request.Body)
if err != nil {
return "", err
}
// Restore the io.ReadCloser to its original state
request.Body = ioutil.NopCloser(bytes.NewBuffer(bodyBytes))
ctMd5 := md5.New()
ctMd5.Write(bodyBytes)
return base64.StdEncoding.EncodeToString(ctMd5.Sum(nil)), nil
}
return "", nil
}
func validateMD5(request *http.Request) error {
expectedHash, err := evaluateMD5Hash(request)
if err != nil {
return err
}
if expectedHash != getMD5(request) {
return errorMD5
}
return nil
}
func setDate(request *http.Request) {
request.Header.Add("Date", time.Now().In(gmt).Format(time.RFC1123))
}
func setMD5(request *http.Request) error {
calculatedMD5, err := evaluateMD5Hash(request)
if err != nil {
return err
}
request.Header.Set("Content-Md5", calculatedMD5)
return nil
}
func setAuthorizationHeader(request *http.Request, accessID, secretKey string) {
request.Header.Add("Authorization", fmt.Sprintf("APIAuth %s:%s", accessID, signRequest(request, secretKey)))
}
|
package lcd
import (
"net/http"
"github.com/gorilla/mux"
"github.com/irisnet/irishub/client/context"
"github.com/irisnet/irishub/codec"
)
func registerQueryRoutes(cliCtx context.CLIContext, r *mux.Router, cdc *codec.Codec) {
// Get rand by the request id
r.HandleFunc(
"/rand/rands/{request-id}",
queryRandHandlerFn(cliCtx, cdc),
).Methods("GET")
// Get the pending rand requests from queue
r.HandleFunc(
"/rand/queue",
queryQueueHandlerFn(cliCtx, cdc),
).Methods("GET")
}
// queryRandHandlerFn performs rand query by the request id
func queryRandHandlerFn(cliCtx context.CLIContext, cdc *codec.Codec) http.HandlerFunc {
return queryRand(cliCtx, cdc, "custom/rand/rand/")
}
// queryQueueHandlerFn performs rand request queue query by an optional heigth
func queryQueueHandlerFn(cliCtx context.CLIContext, cdc *codec.Codec) http.HandlerFunc {
return queryQueue(cliCtx, cdc, "custom/rand/queue")
}
|
package redcode
import (
"testing"
"github.com/go-test/deep"
)
func TestSanity(t *testing.T) {
checkInstructions(t, " MOV #7, -1\n", Instruction{
Opcode: OpMov,
A: Operand{Mode: Immediate, Expression: constNum(7)},
B: Operand{Mode: Relative, Expression: constNum(-1)},
})
checkInstructions(t, "mov #7, -1", Instruction{
Opcode: OpMov,
A: Operand{Mode: Immediate, Expression: constNum(7)},
B: Operand{Mode: Relative, Expression: constNum(-1)},
})
}
func TestComments(t *testing.T) {
checkInstructions(t, " ; comment")
checkInstructions(t, "mov #7, -1 ; comment", Instruction{
Opcode: OpMov,
A: Operand{Mode: Immediate, Expression: constNum(7)},
B: Operand{Mode: Relative, Expression: constNum(-1)},
})
checkInstructions(t, "mov #7, -1\n; comment\nmov #8, 1", Instruction{
Opcode: OpMov,
A: Operand{Mode: Immediate, Expression: constNum(7)},
B: Operand{Mode: Relative, Expression: constNum(-1)},
}, Instruction{
Opcode: OpMov,
A: Operand{Mode: Immediate, Expression: constNum(8)},
B: Operand{Mode: Relative, Expression: constNum(1)},
})
}
func TestDirectives(t *testing.T) {
checkDirectives(t, ";name Imp \n; author A.K. Dewdney", Directives{
"name": "Imp",
"author": "A.K. Dewdney",
})
}
func TestLabel(t *testing.T) {
checkInstructions(t, "imp mov 0, 1", Instruction{
Label: "imp",
Opcode: OpMov,
A: Operand{Mode: Relative, Expression: constNum(0)},
B: Operand{Mode: Relative, Expression: constNum(1)},
})
}
func TestImmediate(t *testing.T) {
checkInstructions(t, "mov #0, 1", Instruction{
Opcode: OpMov,
A: Operand{Mode: Immediate, Expression: constNum(0)},
B: Operand{Mode: Relative, Expression: constNum(1)},
})
}
func TestDirect(t *testing.T) {
checkInstructions(t, "mov $0, 1", Instruction{
Opcode: OpMov,
A: Operand{Mode: Relative, Expression: constNum(0)},
B: Operand{Mode: Relative, Expression: constNum(1)},
})
}
func TestIndirect(t *testing.T) {
checkInstructions(t, "mov @0, 1", Instruction{
Opcode: OpMov,
A: Operand{Mode: Indirect, Expression: constNum(0)},
B: Operand{Mode: Relative, Expression: constNum(1)},
})
}
func TestDecrement(t *testing.T) {
checkInstructions(t, "mov <0, 1", Instruction{
Opcode: OpMov,
A: Operand{Mode: DecrementIndirect, Expression: constNum(0)},
B: Operand{Mode: Relative, Expression: constNum(1)},
})
}
func TestLabelRef(t *testing.T) {
checkInstructions(t, "imp mov imp, 1", Instruction{
Label: "imp",
Opcode: OpMov,
A: Operand{Mode: Relative, Expression: label("imp")},
B: Operand{Mode: Relative, Expression: constNum(1)},
})
}
func TestExpr(t *testing.T) {
checkInstructions(t, "imp mov imp, imp + 1", Instruction{
Label: "imp",
Opcode: OpMov,
A: Operand{Mode: Relative, Expression: label("imp")},
B: Operand{Mode: Relative, Expression: &Expression{Operation: Add, Left: label("imp"), Right: constNum(1)}},
})
}
func TestDjn(t *testing.T) {
checkInstructions(t, "djn 1, 2", Instruction{
Opcode: OpDjn,
A: Operand{Mode: Relative, Expression: constNum(1)},
B: Operand{Mode: Relative, Expression: constNum(2)},
})
}
func TestSpl(t *testing.T) {
checkInstructions(t, "spl 2", Instruction{
Opcode: OpSpl,
A: Operand{Mode: Relative, Expression: constNum(2)},
})
}
func TestDat(t *testing.T) {
checkInstructions(t, "dat #2", Instruction{
Opcode: OpDat,
B: Operand{Mode: Immediate, Expression: constNum(2)},
})
}
func TestEnd(t *testing.T) {
checkInstructions(t, "spl 2\nend blah\ngargle gargle gargle", Instruction{
Opcode: OpSpl,
A: Operand{Mode: Relative, Expression: constNum(2)},
}, Instruction{
Opcode: OpEnd,
A: Operand{Mode: Relative, Expression: label("blah")},
})
}
func checkInstructions(t *testing.T, text string, instructions ...Instruction) {
lines, _, err := ParseString(text, "string")
if err != nil {
t.Errorf("Error prsing '%s': %s", text, err)
return
}
if diff := deep.Equal(lines, instructions); diff != nil {
t.Errorf("Different parse of '%s' %+v", text, diff)
return
}
}
func checkDirectives(t *testing.T, text string, values Directives) {
_, directives, err := ParseString(text, "string")
if err != nil {
t.Errorf("Error prsing '%s': %s", text, err)
return
}
if diff := deep.Equal(directives, values); diff != nil {
t.Errorf("Different parse of '%s' %+v", text, diff)
return
}
}
func constNum(n int) *Expression {
return &Expression{Operation: Number, Number: n}
}
func label(l string) *Expression {
return &Expression{Operation: Label, Label: l}
}
|
package main
import (
"fmt"
)
func main() {
re := twoSum([]int{3, 3, 4}, 6)
fmt.Println(re)
}
func twoSum(nums []int, target int) []int {
m := make(map[int]int, len(nums))
for k, v := range nums {
if _, ok := m[v]; ok {
return []int{m[v], k}
}
m[target-v] = k
}
return nil
}
|
package main
import "fmt"
func main() {
fmt.Println(countCompleteComponents(3, [][]int{
{1, 0},
{2, 1},
}))
fmt.Println(countCompleteComponents(6, [][]int{
{0, 1},
{0, 2},
{1, 2},
{3, 4},
}))
}
func countCompleteComponents(n int, edges [][]int) int {
grid := make([][]int, n)
for i := range grid {
grid[i] = make([]int, n)
}
for _, e := range edges {
grid[e[0]][e[1]] = 1
grid[e[1]][e[0]] = 1
}
mm := make(map[int]bool)
var ans int
for i := 0; i < n; i++ {
if mm[i] {
continue
}
//var jj []int
jj := []int{}
for j := 0; j < n; j++ {
if grid[i][j] == 1 {
// 从 i 出发的
jj = append(jj, j)
}
if grid[j][i] == 1 {
// 从 i 出发的
jj = append(jj, j)
}
}
if handle(jj, grid) {
ans++
}
for _, v := range jj {
mm[v] = true
}
}
return ans
}
func handle(jj []int, grid [][]int) bool {
for ji := 0; ji < len(jj); ji++ {
for jt := ji + 1; jt < len(jj); jt++ {
if grid[jj[ji]][jj[jt]] == 0 {
return false
}
}
}
return true
}
|
package env
// Package Constants
const (
// Eventing-Kafka Configuration
ServiceAccountEnvVarKey = "SERVICE_ACCOUNT"
MetricsPortEnvVarKey = "METRICS_PORT"
HealthPortEnvVarKey = "HEALTH_PORT"
// Kafka Authorization
KafkaBrokerEnvVarKey = "KAFKA_BROKERS"
KafkaUsernameEnvVarKey = "KAFKA_USERNAME"
KafkaPasswordEnvVarKey = "KAFKA_PASSWORD"
// Kafka Configuration
KafkaProviderEnvVarKey = "KAFKA_PROVIDER"
KafkaOffsetCommitMessageCountEnvVarKey = "KAFKA_OFFSET_COMMIT_MESSAGE_COUNT"
KafkaOffsetCommitDurationMillisEnvVarKey = "KAFKA_OFFSET_COMMIT_DURATION_MILLIS"
KafkaTopicEnvVarKey = "KAFKA_TOPIC"
// Dispatcher Configuration
ChannelKeyEnvVarKey = "CHANNEL_KEY"
ServiceNameEnvVarKey = "SERVICE_NAME"
ExponentialBackoffEnvVarKey = "EXPONENTIAL_BACKOFF"
InitialRetryIntervalEnvVarKey = "INITIAL_RETRY_INTERVAL"
MaxRetryTimeEnvVarKey = "MAX_RETRY_TIME"
)
|
package domain
import "time"
// Asset is the Asset information from Nexpose
type Asset struct {
ScanTime time.Time
ID int64
IP string
Hostname string
}
|
package pgsql
import (
"testing"
"time"
)
func TestTimetz(t *testing.T) {
dublin, err := time.LoadLocation("Europe/Dublin")
if err != nil {
t.Fatal(err)
}
testlist2{{
data: []testdata{
{
input: timetzval(21, 5, 33, 0, dublin),
output: timetzval(21, 5, 33, 0, dublin)},
{
input: timetzval(4, 5, 6, 789, time.UTC),
output: timetzval(4, 5, 6, 789, time.UTC)},
},
}, {
scanner: TimetzToString,
data: []testdata{
{
input: string("21:05:33+01"),
output: string("21:05:33+01:00")},
{
input: string("04:05:06.789-08"),
output: string("04:05:06.789-08:00")},
},
}, {
scanner: TimetzToByteSlice,
data: []testdata{
{
input: []byte("21:05:33+01"),
output: []byte("21:05:33+01:00")},
{
input: []byte("04:05:06.789-08"),
output: []byte("04:05:06.789-08:00")},
},
}}.execute(t, "timetz")
}
|
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"fmt"
"os"
"path/filepath"
"strings"
"sync"
"github.com/golang/dep"
"github.com/golang/dep/gps"
"github.com/golang/dep/gps/paths"
"github.com/golang/dep/gps/pkgtree"
fb "github.com/golang/dep/internal/feedback"
"github.com/golang/dep/internal/fs"
"github.com/pkg/errors"
)
// gopathScanner supplies manifest/lock data by scanning the contents of GOPATH
// It uses its results to fill-in any missing details left by the rootAnalyzer.
type gopathScanner struct {
ctx *dep.Ctx
directDeps map[gps.ProjectRoot]bool
sm gps.SourceManager
pd projectData
origM *dep.Manifest
origL *dep.Lock
}
func newGopathScanner(ctx *dep.Ctx, directDeps map[gps.ProjectRoot]bool, sm gps.SourceManager) *gopathScanner {
return &gopathScanner{
ctx: ctx,
directDeps: directDeps,
sm: sm,
}
}
// InitializeRootManifestAndLock performs analysis of the filesystem tree rooted
// at path, with the root import path importRoot, to determine the project's
// constraints. Respect any initial constraints defined in the root manifest and
// lock.
func (g *gopathScanner) InitializeRootManifestAndLock(rootM *dep.Manifest, rootL *dep.Lock) error {
var err error
g.ctx.Err.Println("Searching GOPATH for projects...")
g.pd, err = g.scanGopathForDependencies()
if err != nil {
return err
}
g.origM = dep.NewManifest()
g.origM.Constraints = g.pd.constraints
g.origL = &dep.Lock{
P: make([]gps.LockedProject, 0, len(g.pd.ondisk)),
}
for pr, v := range g.pd.ondisk {
// That we have to chop off these path prefixes is a symptom of
// a problem in gps itself
pkgs := make([]string, 0, len(g.pd.dependencies[pr]))
prslash := string(pr) + "/"
for _, pkg := range g.pd.dependencies[pr] {
if pkg == string(pr) {
pkgs = append(pkgs, ".")
} else {
pkgs = append(pkgs, trimPathPrefix(pkg, prslash))
}
}
g.origL.P = append(g.origL.P, gps.NewLockedProject(
gps.ProjectIdentifier{ProjectRoot: pr}, v, pkgs),
)
}
g.overlay(rootM, rootL)
return nil
}
// Fill in gaps in the root manifest/lock with data found from the GOPATH.
func (g *gopathScanner) overlay(rootM *dep.Manifest, rootL *dep.Lock) {
for pkg, prj := range g.origM.Constraints {
if _, has := rootM.Constraints[pkg]; has {
continue
}
rootM.Constraints[pkg] = prj
v := g.pd.ondisk[pkg]
pi := gps.ProjectIdentifier{ProjectRoot: pkg, Source: prj.Source}
f := fb.NewConstraintFeedback(gps.ProjectConstraint{Ident: pi, Constraint: v}, fb.DepTypeDirect)
f.LogFeedback(g.ctx.Err)
f = fb.NewLockedProjectFeedback(gps.NewLockedProject(pi, v, nil), fb.DepTypeDirect)
f.LogFeedback(g.ctx.Err)
}
// Keep track of which projects have been locked
lockedProjects := map[gps.ProjectRoot]bool{}
for _, lp := range rootL.P {
lockedProjects[lp.Ident().ProjectRoot] = true
}
for _, lp := range g.origL.P {
pkg := lp.Ident().ProjectRoot
if _, isLocked := lockedProjects[pkg]; isLocked {
continue
}
rootL.P = append(rootL.P, lp)
lockedProjects[pkg] = true
if _, isDirect := g.directDeps[pkg]; !isDirect {
f := fb.NewLockedProjectFeedback(lp, fb.DepTypeTransitive)
f.LogFeedback(g.ctx.Err)
}
}
// Identify projects whose version is unknown and will have to be solved for
var missing []string // all project roots missing from GOPATH
var missingVCS []string // all project roots missing VCS information
for pr := range g.pd.notondisk {
if _, isLocked := lockedProjects[pr]; isLocked {
continue
}
if g.pd.invalidSVC[pr] {
missingVCS = append(missingVCS, string(pr))
} else {
missing = append(missing, string(pr))
}
}
missingStr := ""
missingVCSStr := ""
if len(missing) > 0 {
missingStr = fmt.Sprintf("The following dependencies were not found in GOPATH:\n %s\n\n",
strings.Join(missing, "\n "))
}
if len(missingVCS) > 0 {
missingVCSStr = fmt.Sprintf("The following dependencies found in GOPATH were missing VCS information (a remote source is required):\n %s\n\n",
strings.Join(missingVCS, "\n "))
}
if len(missingVCS)+len(missing) > 0 {
g.ctx.Err.Printf("\n%s%sThe most recent version of these projects will be used.\n\n", missingStr, missingVCSStr)
}
}
func trimPathPrefix(p1, p2 string) string {
if isPrefix, _ := fs.HasFilepathPrefix(p1, p2); isPrefix {
return p1[len(p2):]
}
return p1
}
// contains checks if a array of strings contains a value
func contains(a []string, b string) bool {
for _, v := range a {
if b == v {
return true
}
}
return false
}
// getProjectPropertiesFromVersion takes a Version and returns a proper
// ProjectProperties with Constraint value based on the provided version.
func getProjectPropertiesFromVersion(v gps.Version) gps.ProjectProperties {
pp := gps.ProjectProperties{}
// extract version and ignore if it's revision only
switch tv := v.(type) {
case gps.PairedVersion:
v = tv.Unpair()
case gps.Revision:
return pp
}
switch v.Type() {
case gps.IsBranch, gps.IsVersion:
pp.Constraint = v
case gps.IsSemver:
c, err := gps.NewSemverConstraintIC(v.String())
if err != nil {
panic(err)
}
pp.Constraint = c
}
return pp
}
type projectData struct {
constraints gps.ProjectConstraints // constraints that could be found
dependencies map[gps.ProjectRoot][]string // all dependencies (imports) found by project root
notondisk map[gps.ProjectRoot]bool // projects that were not found on disk
invalidSVC map[gps.ProjectRoot]bool // projects that were found on disk but SVC data could not be read
ondisk map[gps.ProjectRoot]gps.Version // projects that were found on disk
}
func (g *gopathScanner) scanGopathForDependencies() (projectData, error) {
constraints := make(gps.ProjectConstraints)
dependencies := make(map[gps.ProjectRoot][]string)
packages := make(map[string]bool)
notondisk := make(map[gps.ProjectRoot]bool)
invalidSVC := make(map[gps.ProjectRoot]bool)
ondisk := make(map[gps.ProjectRoot]gps.Version)
var syncDepGroup sync.WaitGroup
syncDep := func(pr gps.ProjectRoot, sm gps.SourceManager) {
if err := sm.SyncSourceFor(gps.ProjectIdentifier{ProjectRoot: pr}); err != nil {
g.ctx.Err.Printf("%+v", errors.Wrapf(err, "Unable to cache %s", pr))
}
syncDepGroup.Done()
}
if len(g.directDeps) == 0 {
return projectData{}, nil
}
for ippr := range g.directDeps {
// TODO(sdboyer) these are not import paths by this point, they've
// already been worked down to project roots.
ip := string(ippr)
pr, err := g.sm.DeduceProjectRoot(ip)
if err != nil {
return projectData{}, errors.Wrap(err, "sm.DeduceProjectRoot")
}
packages[ip] = true
if _, has := dependencies[pr]; has {
dependencies[pr] = append(dependencies[pr], ip)
continue
}
syncDepGroup.Add(1)
go syncDep(pr, g.sm)
dependencies[pr] = []string{ip}
abs, err := g.ctx.AbsForImport(string(pr))
if err != nil {
notondisk[pr] = true
continue
}
v, err := gps.VCSVersion(abs)
if err != nil {
invalidSVC[pr] = true
notondisk[pr] = true
continue
}
ondisk[pr] = v
pp := getProjectPropertiesFromVersion(v)
if pp.Constraint != nil || pp.Source != "" {
constraints[pr] = pp
}
}
// Explore the packages we've found for transitive deps, either
// completing the lock or identifying (more) missing projects that we'll
// need to ask gps to solve for us.
colors := make(map[string]uint8)
const (
white uint8 = iota
grey
black
)
// cache of PackageTrees, so we don't parse projects more than once
ptrees := make(map[gps.ProjectRoot]pkgtree.PackageTree)
// depth-first traverser
var dft func(string) error
dft = func(pkg string) error {
switch colors[pkg] {
case white:
colors[pkg] = grey
pr, err := g.sm.DeduceProjectRoot(pkg)
if err != nil {
return errors.Wrap(err, "could not deduce project root for "+pkg)
}
// We already visited this project root earlier via some other
// pkg within it, and made the decision that it's not on disk.
// Respect that decision, and pop the stack.
if notondisk[pr] {
colors[pkg] = black
return nil
}
ptree, has := ptrees[pr]
if !has {
// It's fine if the root does not exist - it indicates that this
// project is not present in the workspace, and so we need to
// solve to deal with this dep.
r := filepath.Join(g.ctx.GOPATH, "src", string(pr))
fi, err := os.Stat(r)
if os.IsNotExist(err) || !fi.IsDir() {
colors[pkg] = black
notondisk[pr] = true
return nil
}
// We know the project is on disk; the question is whether we're
// first seeing it here, in the transitive exploration, or if it
// was found in the initial pass on direct imports. We know it's
// the former if there's no entry for it in the ondisk map.
if _, in := ondisk[pr]; !in {
abs, err := g.ctx.AbsForImport(string(pr))
if err != nil {
colors[pkg] = black
notondisk[pr] = true
return nil
}
v, err := gps.VCSVersion(abs)
if err != nil {
// Even if we know it's on disk, errors are still
// possible when trying to deduce version. If we
// encounter such an error, just treat the project as
// not being on disk; the solver will work it out.
colors[pkg] = black
notondisk[pr] = true
return nil
}
ondisk[pr] = v
}
ptree, err = pkgtree.ListPackages(r, string(pr))
if err != nil {
// Any error here other than an a nonexistent dir (which
// can't happen because we covered that case above) is
// probably critical, so bail out.
return errors.Wrap(err, "gps.ListPackages")
}
ptrees[pr] = ptree
}
// Get a reachmap that includes main pkgs (even though importing
// them is an error, what we're checking right now is simply whether
// there's a package with go code present on disk), and does not
// backpropagate errors (again, because our only concern right now
// is package existence).
rm, errmap := ptree.ToReachMap(true, false, false, nil)
reached, ok := rm[pkg]
if !ok {
colors[pkg] = black
// not on disk...
notondisk[pr] = true
return nil
}
if _, ok := errmap[pkg]; ok {
// The package is on disk, but contains some errors.
colors[pkg] = black
return nil
}
if deps, has := dependencies[pr]; has {
if !contains(deps, pkg) {
dependencies[pr] = append(deps, pkg)
}
} else {
dependencies[pr] = []string{pkg}
syncDepGroup.Add(1)
go syncDep(pr, g.sm)
}
// recurse
for _, rpkg := range reached.External {
if paths.IsStandardImportPath(rpkg) {
continue
}
err := dft(rpkg)
if err != nil {
// Bubble up any errors we encounter
return err
}
}
colors[pkg] = black
case grey:
return errors.Errorf("Import cycle detected on %s", pkg)
}
return nil
}
// run the depth-first traversal from the set of immediate external
// package imports we found in the current project
for pkg := range packages {
err := dft(pkg)
if err != nil {
return projectData{}, err // already errors.Wrap()'d internally
}
}
syncDepGroup.Wait()
pd := projectData{
constraints: constraints,
dependencies: dependencies,
invalidSVC: invalidSVC,
notondisk: notondisk,
ondisk: ondisk,
}
return pd, nil
}
|
package purchaseorder
import (
"github.com/centrifuge/centrifuge-protobufs/gen/go/purchaseorder"
"github.com/centrifuge/go-centrifuge/documents"
clientpb "github.com/centrifuge/go-centrifuge/protobufs/gen/go/purchaseorder"
"github.com/centrifuge/go-centrifuge/utils/timeutils"
)
func toClientLineItems(items []*LineItem) ([]*clientpb.LineItem, error) {
var citems []*clientpb.LineItem
for _, i := range items {
decs := documents.DecimalsToStrings(i.UnitOfMeasure, i.Quantity, i.PricePerUnit, i.AmountInvoiced, i.AmountTotal, i.ReceivedQuantity)
pts, err := timeutils.ToProtoTimestamps(i.DateCreated, i.DateUpdated)
if err != nil {
return nil, err
}
activities, err := toClientActivities(i.Activities)
if err != nil {
return nil, err
}
citems = append(citems, &clientpb.LineItem{
Status: i.Status,
Description: i.Description,
ItemNumber: i.ItemNumber,
UnitOfMeasure: decs[0],
Quantity: decs[1],
PricePerUnit: decs[2],
AmountInvoiced: decs[3],
AmountTotal: decs[4],
ReceivedQuantity: decs[5],
DateCreated: pts[0],
DateUpdated: pts[1],
PartNo: i.PartNumber,
RequisitionItem: i.RequisitionItem,
RequisitionNumber: i.RequisitionNumber,
RevisionNumber: int64(i.RevisionNumber),
Activities: activities,
TaxItems: toClientTaxItems(i.TaxItems),
})
}
return citems, nil
}
func toClientActivities(activities []*LineItemActivity) ([]*clientpb.LineItemActivity, error) {
var cactivities []*clientpb.LineItemActivity
for _, a := range activities {
pts, err := timeutils.ToProtoTimestamps(a.Date)
if err != nil {
return nil, err
}
decs := documents.DecimalsToStrings(a.Quantity, a.Amount)
cactivities = append(cactivities, &clientpb.LineItemActivity{
Quantity: decs[0],
Amount: decs[1],
ItemNumber: a.ItemNumber,
Status: a.Status,
Date: pts[0],
ReferenceDocumentId: a.ReferenceDocumentID,
ReferenceDocumentItem: a.ReferenceDocumentItem,
})
}
return cactivities, nil
}
func toClientTaxItems(items []*TaxItem) []*clientpb.TaxItem {
var citems []*clientpb.TaxItem
for _, i := range items {
decs := documents.DecimalsToStrings(i.TaxAmount, i.TaxBaseAmount, i.TaxCode, i.TaxRate)
citems = append(citems, &clientpb.TaxItem{
ItemNumber: i.ItemNumber,
PurchaseOrderItemNumber: i.PurchaseOrderItemNumber,
TaxAmount: decs[0],
TaxBaseAmount: decs[1],
TaxCode: decs[2],
TaxRate: decs[3],
})
}
return citems
}
func toP2PLineItems(items []*LineItem) ([]*purchaseorderpb.LineItem, error) {
var pitems []*purchaseorderpb.LineItem
for _, i := range items {
decs, err := documents.DecimalsToBytes(i.UnitOfMeasure, i.Quantity, i.PricePerUnit, i.AmountInvoiced, i.AmountTotal, i.ReceivedQuantity)
if err != nil {
return nil, err
}
patts, err := toP2PActivities(i.Activities)
if err != nil {
return nil, err
}
pti, err := toP2PTaxItems(i.TaxItems)
if err != nil {
return nil, err
}
pts, err := timeutils.ToProtoTimestamps(i.DateCreated, i.DateUpdated)
if err != nil {
return nil, err
}
pitems = append(pitems, &purchaseorderpb.LineItem{
Status: i.Status,
Description: i.Description,
ItemNumber: i.ItemNumber,
UnitOfMeasure: decs[0],
Quantity: decs[1],
PricePerUnit: decs[2],
AmountInvoiced: decs[3],
AmountTotal: decs[4],
ReceivedQuantity: decs[5],
DateCreated: pts[0],
DateUpdated: pts[1],
PartNo: i.PartNumber,
RequisitionItem: i.RequisitionItem,
RequisitionNumber: i.RequisitionNumber,
RevisionNumber: int64(i.RevisionNumber),
Activities: patts,
TaxItems: pti,
})
}
return pitems, nil
}
func toP2PActivities(activities []*LineItemActivity) ([]*purchaseorderpb.LineItemActivity, error) {
var pactivities []*purchaseorderpb.LineItemActivity
for _, a := range activities {
decs, err := documents.DecimalsToBytes(a.Quantity, a.Amount)
if err != nil {
return nil, err
}
pts, err := timeutils.ToProtoTimestamps(a.Date)
if err != nil {
return nil, err
}
pactivities = append(pactivities, &purchaseorderpb.LineItemActivity{
Quantity: decs[0],
Amount: decs[1],
ItemNumber: a.ItemNumber,
Status: a.Status,
Date: pts[0],
ReferenceDocumentId: a.ReferenceDocumentID,
ReferenceDocumentItem: a.ReferenceDocumentItem,
})
}
return pactivities, nil
}
func toP2PTaxItems(items []*TaxItem) ([]*purchaseorderpb.TaxItem, error) {
var pitems []*purchaseorderpb.TaxItem
for _, i := range items {
decs, err := documents.DecimalsToBytes(i.TaxAmount, i.TaxBaseAmount, i.TaxCode, i.TaxRate)
if err != nil {
return nil, err
}
pitems = append(pitems, &purchaseorderpb.TaxItem{
ItemNumber: i.ItemNumber,
PurchaseOrderItemNumber: i.PurchaseOrderItemNumber,
TaxAmount: decs[0],
TaxBaseAmount: decs[1],
TaxCode: decs[2],
TaxRate: decs[3],
})
}
return pitems, nil
}
func fromClientLineItems(citems []*clientpb.LineItem) ([]*LineItem, error) {
var items []*LineItem
for _, ci := range citems {
decs, err := documents.StringsToDecimals(
ci.AmountInvoiced,
ci.AmountTotal,
ci.PricePerUnit,
ci.UnitOfMeasure,
ci.Quantity,
ci.ReceivedQuantity)
if err != nil {
return nil, err
}
ti, err := fromClientTaxItems(ci.TaxItems)
if err != nil {
return nil, err
}
la, err := fromClientLineItemActivities(ci.Activities)
if err != nil {
return nil, err
}
tms, err := timeutils.FromProtoTimestamps(ci.DateCreated, ci.DateUpdated)
if err != nil {
return nil, err
}
items = append(items, &LineItem{
Status: ci.Status,
ItemNumber: ci.ItemNumber,
Description: ci.Description,
AmountInvoiced: decs[0],
AmountTotal: decs[1],
RequisitionNumber: ci.RequisitionNumber,
RequisitionItem: ci.RequisitionItem,
RevisionNumber: int(ci.RevisionNumber),
PricePerUnit: decs[2],
UnitOfMeasure: decs[3],
Quantity: decs[4],
ReceivedQuantity: decs[5],
DateCreated: tms[0],
DateUpdated: tms[1],
PartNumber: ci.PartNo,
TaxItems: ti,
Activities: la,
})
}
return items, nil
}
func fromClientTaxItems(citems []*clientpb.TaxItem) ([]*TaxItem, error) {
var items []*TaxItem
for _, ci := range citems {
decs, err := documents.StringsToDecimals(ci.TaxAmount, ci.TaxRate, ci.TaxCode, ci.TaxBaseAmount)
if err != nil {
return nil, err
}
items = append(items, &TaxItem{
ItemNumber: ci.ItemNumber,
PurchaseOrderItemNumber: ci.PurchaseOrderItemNumber,
TaxAmount: decs[0],
TaxRate: decs[1],
TaxCode: decs[2],
TaxBaseAmount: decs[3],
})
}
return items, nil
}
func fromClientLineItemActivities(catts []*clientpb.LineItemActivity) ([]*LineItemActivity, error) {
var atts []*LineItemActivity
for _, ca := range catts {
decs, err := documents.StringsToDecimals(ca.Quantity, ca.Amount)
if err != nil {
return nil, err
}
tms, err := timeutils.FromProtoTimestamps(ca.Date)
if err != nil {
return nil, err
}
atts = append(atts, &LineItemActivity{
ItemNumber: ca.ItemNumber,
Status: ca.Status,
Quantity: decs[0],
Amount: decs[1],
Date: tms[0],
ReferenceDocumentItem: ca.ReferenceDocumentItem,
ReferenceDocumentID: ca.ReferenceDocumentId,
})
}
return atts, nil
}
func fromP2PLineItemActivities(patts []*purchaseorderpb.LineItemActivity) ([]*LineItemActivity, error) {
var atts []*LineItemActivity
for _, ca := range patts {
decs, err := documents.BytesToDecimals(ca.Quantity, ca.Amount)
if err != nil {
return nil, err
}
tms, err := timeutils.FromProtoTimestamps(ca.Date)
if err != nil {
return nil, err
}
atts = append(atts, &LineItemActivity{
ItemNumber: ca.ItemNumber,
Status: ca.Status,
Quantity: decs[0],
Amount: decs[1],
Date: tms[0],
ReferenceDocumentItem: ca.ReferenceDocumentItem,
ReferenceDocumentID: ca.ReferenceDocumentId,
})
}
return atts, nil
}
func fromP2PTaxItems(pitems []*purchaseorderpb.TaxItem) ([]*TaxItem, error) {
var items []*TaxItem
for _, ci := range pitems {
decs, err := documents.BytesToDecimals(ci.TaxAmount, ci.TaxRate, ci.TaxCode, ci.TaxBaseAmount)
if err != nil {
return nil, err
}
items = append(items, &TaxItem{
ItemNumber: ci.ItemNumber,
PurchaseOrderItemNumber: ci.PurchaseOrderItemNumber,
TaxAmount: decs[0],
TaxRate: decs[1],
TaxCode: decs[2],
TaxBaseAmount: decs[3],
})
}
return items, nil
}
func fromP2PLineItems(pitems []*purchaseorderpb.LineItem) ([]*LineItem, error) {
var items []*LineItem
for _, ci := range pitems {
decs, err := documents.BytesToDecimals(
ci.AmountInvoiced,
ci.AmountTotal,
ci.PricePerUnit,
ci.UnitOfMeasure,
ci.Quantity,
ci.ReceivedQuantity)
if err != nil {
return nil, err
}
ti, err := fromP2PTaxItems(ci.TaxItems)
if err != nil {
return nil, err
}
la, err := fromP2PLineItemActivities(ci.Activities)
if err != nil {
return nil, err
}
tms, err := timeutils.FromProtoTimestamps(ci.DateCreated, ci.DateUpdated)
if err != nil {
return nil, err
}
items = append(items, &LineItem{
Status: ci.Status,
ItemNumber: ci.ItemNumber,
Description: ci.Description,
AmountInvoiced: decs[0],
AmountTotal: decs[1],
RequisitionNumber: ci.RequisitionNumber,
RequisitionItem: ci.RequisitionItem,
RevisionNumber: int(ci.RevisionNumber),
PricePerUnit: decs[2],
UnitOfMeasure: decs[3],
Quantity: decs[4],
ReceivedQuantity: decs[5],
DateCreated: tms[0],
DateUpdated: tms[1],
PartNumber: ci.PartNo,
TaxItems: ti,
Activities: la,
})
}
return items, nil
}
|
package helper
import (
"neosmemo/backend/util"
"net/http"
"time"
)
// Session session type
type Session struct {
UserID string
SessionID string
CreatedAt time.Time
ExpiredAt time.Time
}
// SessionManager SessionID-Session.UserID
//
// Session Manager Design Purpose:
// 1. 用加密的 session text 取代 user_id
// 2. 多端登录(待用 redis/sql 实现)
// 3. and others
//
// NOTE: 暂时存在内存里
var SessionManager = map[string]Session{}
func init() {
// do nth
}
// GetUserIDFromSession GetUserIDFromSession
func GetUserIDFromSession(r *http.Request) (string, bool) {
sessionID, err := util.GetKeyValueFromCookie("session_id", r)
if err != nil {
return "", false
}
session, ok := SessionManager[sessionID]
if !ok {
return "", false
}
return session.UserID, true
}
|
package main
/*
* @lc app=leetcode id=543 lang=golang
*
* [543] Diameter of Binary Tree
*/
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
var best int
func diameterOfBinaryTree(root *TreeNode) int {
best = 0
getDepth(root)
return best
}
func getDepth(root *TreeNode) int {
if root == nil {
return 0
}
left := getDepth(root.Left)
right := getDepth(root.Right)
if left+right > best {
best = left + right
}
return max(left, right) + 1
}
func max(a, b int) int {
if a > b {
return a
}
return b
}
|
/*
The Recamán Sequence is a numeric sequence that starts always with 0. The position of a positive integer in the sequence, or Recamán Index, can be established with the following algorithm:
For every number to find, two variables are considered: the value of the last element of the sequence (last element from now on), and the actual sequence length (length from now on).
If the subtraction of the length from the last element returns a number greater than 0 and not already present in the sequence, it is added to the sequence.
When the conditions of the above statement are not met, will be added always the sum of the last element plus the length (even if it is a number already present in the sequence).
Repeat until the number of interest is found.
Look at example below for the steps to do for to establish the Recamán Index of number 2:
Sequence = [0]
Last - Length = 0 - 1 = -1 (lower than zero)
Last + Length = 0 + 1 = 1
Sequence = [0, 1]
Last - Length = 1 - 2 = -1 (lower than 0)
Last + Length = 1 + 2 = 3
Sequence = [0, 1, 3]
Last - Length = 3 - 3 = 0 (already present in sequence)
Last + Length = 3 + 3 = 6
Sequence = [0, 1, 3, 6]
Last - Length = 6 - 4 = 2 (greater than 0 and not already in sequence)
Sequence = [0, 1, 3, 6, 2]
// The Recaman Index of 2 is: 4
Given a positive integer n, implement a function that returns its Recamán Index.
Examples
recamanIndex(2) ➞ 4
recamanIndex(3) ➞ 2
recamanIndex(7) ➞ 5
Notes
The sequence starts always with 0.
The step with the subtraction Last Element - Sequence Length (verifying that is not already present in the sequence) has the precedence over the second step.
Remember: if the number to add is the result of a subtraction it can't be already in the sequence (first step), if it is the result of an addition it can be already present (second step).
Curiosity: the first number to repeat in the sequence is 42...
Curiosity: the first number with a BIG delay in the sequence is 19.
*/
package main
import (
"math"
)
func main() {
assert(recamanindex(2) == 4)
assert(recamanindex(3) == 2)
assert(recamanindex(7) == 5)
assert(recamanindex(20) == 7)
assert(recamanindex(56) == 204)
assert(recamanindex(100) == 387)
assert(recamanindex(666) == 2292)
assert(recamanindex(1000) == 4419)
assert(recamanindex(10000) == 7215)
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
var (
terms = map[uint]uint{0: 0}
vals = map[uint]uint{0: 0}
inds = map[uint]int{}
)
func recaman(n uint) uint {
if r, f := terms[n]; f {
return r
}
v := recaman(n - 1)
r := v + n
if v > n {
if _, f := vals[v-n]; !f {
r = v - n
}
}
terms[n] = r
vals[r] = n
return r
}
func recamanindex(n uint) int {
if i, f := inds[n]; f {
return i
}
for i := 0; i < math.MaxInt32; i++ {
inds[recaman(uint(i))] = i
if _, f := inds[n]; f {
return i
}
}
return -1
}
|
package routes
import (
//"github.com/adamveld12/goadventure/game"
// "fmt"
// "github.com/adamveld12/goadventure/persistence"
// "github.com/adamveld12/sessionauth"
"github.com/go-martini/martini"
// "github.com/martini-contrib/render"
// "github.com/martini-contrib/sessions"
// "log"
// "net/http"
)
func registerApiRoutes(r martini.Router) {
}
|
package mux
import (
"encoding/json"
"errors"
"github.com/gorilla/mux"
"log"
"net/http"
"os"
"strconv"
)
func RouterStart() {
router := mux.NewRouter()
router.HandleFunc("/", HomeHandler)
// 根据不同的id返回不同的文章对象
router.Path("/article/{id:[0-9]+}").HandlerFunc(ArticleHandler)
// sub router
s := router.Host(defaultHost + ":" + defaultPort).Subrouter()
s.HandleFunc("/books/", BooksHandler)
s.HandleFunc("/books/{id:[0-9]+}", BookHandler)
// 使用装饰器实现日志打印
router.Use(loggingMiddleware)
server := http.Server{
Addr: defaultHost + ":" + defaultPort,
Handler: router,
}
log.Fatal(server.ListenAndServe())
}
func BookHandler(writer http.ResponseWriter, request *http.Request) {
ArticleHandler(writer, request)
}
func BooksHandler(writer http.ResponseWriter, request *http.Request) {
all := ArticlesAll()
encoder := json.NewEncoder(writer)
_ = encoder.Encode(all)
}
func ArticleHandler(w http.ResponseWriter, req *http.Request) {
vars := mux.Vars(req)
id := vars["id"]
encoder := json.NewEncoder(w)
if i, _ := strconv.Atoi(id); i > Articles() {
defaultArticle := &Article{
Id: id,
Title: "Default message",
Content: "this " + id + " is not exists",
}
_ = encoder.Encode(defaultArticle)
return
}
article := GetArticleById(id)
_ = encoder.Encode(&article)
}
var defaultHost string
var defaultPort string
var defaultPath = "properties.json"
func init() {
properties := loadProperties(defaultPath)
defaultHost = properties.Host
defaultPort = properties.Port
}
type ServerConfig struct {
Host string `json:"host"`
Port string `json:"port"`
}
func loadProperties(path string) ServerConfig {
file, e := os.Open(path)
defer file.Close()
if e != nil {
panic(errors.New("properties file is not exits"))
}
decoder := json.NewDecoder(file)
conf := ServerConfig{}
e = decoder.Decode(&conf)
if e != nil {
panic(errors.New("read properties file failed"))
}
return conf
}
func Load(path string) ServerConfig {
return loadProperties(path)
}
type HomeMessage struct {
Message string `json:"message"`
}
func HomeHandler(writer http.ResponseWriter, request *http.Request) {
encoder := json.NewEncoder(writer)
msg := &HomeMessage{
Message: "Welcome to home",
}
_ = encoder.Encode(msg)
}
|
package utils
import (
"log"
"os"
)
func DirExists(name string) bool {
info, err := os.Stat(name)
if os.IsNotExist(err) {
return false
}
return info.IsDir()
}
func SliceToSet(slice []string) map[string]bool {
result := make(map[string]bool)
for _, x := range slice {
if _, ok := result[x]; ok {
log.Fatalf("programmer error - repeated value: %s", x)
} else {
result[x] = true
}
}
return result
}
|
package main
import (
"bufio"
"fmt"
"log"
"io"
"os"
"strings"
"strconv"
)
const (
ascii_offset byte = 97
ascii_int_offset
)
func checkErr(err error) {
if err != nil {
log.Fatalf("Error: %v", err)
}
}
func getIndex(set []byte, match byte) int {
for i, b := range set {
if b == match {
return i
}
}
return -1
}
func genSeq(size int) []byte {
ret := make([]byte, 0, size)
var i byte
for i = 0; i < byte(size); i++ {
ret = append(ret, i + ascii_offset)
}
return ret
}
func processSpin(seq []byte, meta []string) []byte {
size, err := strconv.Atoi(meta[0])
checkErr(err)
return append(seq[len(seq)-size:], seq[:len(seq)-size]...)
}
func swap(seq []byte, l int, r int) []byte {
tmp := seq[r]
seq[r] = seq[l]
seq[l] = tmp
return seq
}
func processExchange(seq []byte, meta []string) []byte {
l, err := strconv.Atoi(meta[0])
checkErr(err)
r, err := strconv.Atoi(meta[1])
checkErr(err)
return swap(seq, l, r)
}
func processPartner(seq []byte, meta []string) []byte {
l := getIndex(seq, []byte(meta[0])[0])
r := getIndex(seq, []byte(meta[1])[0])
return swap(seq, l, r)
}
func main() {
f, err := os.Open("input.txt")
checkErr(err)
defer f.Close()
reader := bufio.NewReader(f)
var readErr error
seq := genSeq(16)
for readErr == nil {
move, readErr := reader.ReadBytes(',')
if readErr != io.EOF {
checkErr(readErr)
}
if len(move) == 0 {
readErr = io.EOF
break
}
var moveEnd int
if move[len(move)-1] == ',' {
moveEnd = len(move)-1
} else {
moveEnd = len(move)
}
meta := strings.Split(string(move[1:moveEnd]), "/")
switch move[0] {
case 's':
seq = processSpin(seq, meta)
case 'x':
seq = processExchange(seq, meta)
case 'p':
seq = processPartner(seq, meta)
}
}
fmt.Println(string(seq))
}
|
package drobox
import (
"encoding/json"
"fmt"
"reflect"
"testing"
"time"
)
func TestDocIdListParse(t *testing.T) {
jsonString := `{
"doc_ids": [
"aaaaaaaaaaaaaaaaaaaaa",
"bbbbbbbbbbbbbbbbbbbbb"
],
"cursor": {
"value": "value_sample",
"expiration": "2000-01-01T09:00:00Z"
},
"has_more": false
}`
jsonBuf := []byte(jsonString)
var docIdList DocIdList
err := json.Unmarshal(jsonBuf, &docIdList)
if err != nil {
fmt.Println(err)
}
expect := DocIdList{
[]string{"aaaaaaaaaaaaaaaaaaaaa", "bbbbbbbbbbbbbbbbbbbbb"},
cursor{"value_sample", time.Date(2000, 1, 1, 9, 0, 0, 0, time.UTC)},
false}
if !reflect.DeepEqual(expect.DocIds, docIdList.DocIds) {
t.Errorf("unexpected actual: %v, expect: %v", docIdList.DocIds, expect.DocIds)
}
if expect.Cursor != docIdList.Cursor {
t.Errorf("unexpected actual: %v, expect: %v", docIdList.Cursor, expect.Cursor)
}
if expect.HasMore != docIdList.HasMore {
t.Errorf("unexpected actual: %v, expect: %v", docIdList.HasMore, expect.HasMore)
}
}
|
package cmd
import (
"github.com/spf13/cobra"
)
var (
skipKeyring bool
)
var configureCmd = &cobra.Command{
Use: "configure",
Short: "Configure a connection to a running server and locally persist credentials for later use",
Long: `
Launch an interactive process to configure a connection to a running Pydio Cells server instance.
By default, we use a secure OAuth2 process based on 'Authorization Code' Grant.
If necessary, you might use an alternative authorization process and/or execute this process non-interactively calling one of the defined sub-commands.
Once a connection with the server established, it stores necessary information locally, keeping the sensitive bits encrypted in the local machine keyring.
If you want to forget a connection, the config file can be wiped out by calling the 'clear' subcommand.
*WARNING*
If no keyring is defined in the local machine, all information is stored in *clear text* in a config file of the Cells Client working directory.
In such case, do not use the 'client-auth' process.
`,
Run: func(cm *cobra.Command, args []string) {
// Call OAuth grant flow by default
configureOAuthCmd.Run(cm, args)
// switch configAuthType {
// case authTypeClientAuth:
// configureClientAuthCmd.Run(cm, args)
// break
// case authTypeOAuth:
// default:
// configureOAuthCmd.Run(cm, args)
// }
},
}
func init() {
flags := configureCmd.PersistentFlags()
helpMsg := "Explicitly tell the tool to *NOT* try to use a keyring. Only use this flag if you really know what your are doing: some sensitive information will end up stored on your file system in clear text."
flags.BoolVar(&skipKeyring, "no-keyring", false, helpMsg)
RootCmd.AddCommand(configureCmd)
}
|
package transport
import (
"context"
"encoding/json"
"fmt"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/tracing/zipkin"
httptransport "github.com/go-kit/kit/transport/http"
"github.com/gorilla/mux"
"github.com/mashenjun/courier/com"
"github.com/mashenjun/courier/pkg/endpoint"
"github.com/mashenjun/courier/pkg/service"
stdzipkin "github.com/openzipkin/zipkin-go"
"github.com/prometheus/client_golang/prometheus/promhttp"
"html/template"
"net/http"
)
const (
responseWriterKey = "rw"
)
func decodeHTTPGenericRequest(ctx context.Context, r *http.Request) (interface{}, error) {
// todo
fmt.Printf("%#v\n", ctx)
return nil, nil
}
func decodeHTTPSubscribeRequest(ctx context.Context, r *http.Request) (interface{}, error) {
var req service.SubscribeReq
req.W = ctx.Value(responseWriterKey).(http.ResponseWriter)
req.R = r
return req, nil
}
func decodeHTTPSendRequest(_ context.Context, r *http.Request) (interface{}, error) {
var req service.SendReq
req.Key = mux.Vars(r)["key"]
tmp := map[string]interface{}{}
err := json.NewDecoder(r.Body).Decode(&tmp)
req.Data = tmp
return req, err
}
func decodeHTTPCloseRequest(_ context.Context, r *http.Request) (interface{}, error) {
var req service.CloseReq
req.Key = mux.Vars(r)["key"]
return req, nil
}
func encodeHTTPGenericResponse(_ context.Context, w http.ResponseWriter, resp interface{}) error {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
response := struct {
ErrorCode int `json:"error_code"`
Data interface{} `json:"data"` // data should contain page info
}{}
response.Data = resp
return json.NewEncoder(w).Encode(response)
}
func encodeError(_ context.Context, err error, w http.ResponseWriter) {
// process the given error and set status code
if sErr, ok := err.(*com.ServiceError); ok {
w.WriteHeader(sErr.StatusCode)
}else {
w.WriteHeader(http.StatusInternalServerError)
err = com.ParameterError // error here go to parameter invalid
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(err)
}
func NewHTTPHandler(endpoints endpoint.Endpoints, logger log.Logger, tracer *stdzipkin.Tracer) http.Handler {
options := []httptransport.ServerOption{
httptransport.ServerErrorEncoder(encodeError),
httptransport.ServerErrorLogger(logger),
zipkin.HTTPServerTrace(tracer),
}
r := mux.NewRouter()
r.Handle("/ping", httptransport.NewServer(
endpoints.PingEndpoint,
decodeHTTPGenericRequest,
encodeHTTPGenericResponse,
options...,
)).Methods(http.MethodGet)
r.HandleFunc("/subscribe", func(w http.ResponseWriter,
r *http.Request){ // very important here to enable ws upgrade
httpSrv := httptransport.NewServer(
endpoints.SubscribeEndpoint,
decodeHTTPSubscribeRequest,
encodeHTTPGenericResponse,
append(options, httptransport.ServerBefore())...,
)
newCtx := context.WithValue(r.Context(),"rw", w)
req := r.WithContext(newCtx)
httpSrv.ServeHTTP(w, req)
}).Methods(http.MethodGet)
r.Handle("/send/{key}", httptransport.NewServer(
endpoints.SendEndpoint,
decodeHTTPSendRequest,
encodeHTTPGenericResponse,
options...,
)).Methods(http.MethodPost)
r.Handle("/close/{key}", httptransport.NewServer(
endpoints.CloseEndpoint,
decodeHTTPCloseRequest,
encodeHTTPGenericResponse,
options...,
)).Methods(http.MethodPost)
r.HandleFunc("/home",func(w http.ResponseWriter,
r *http.Request){
homeTemplate.Execute(w, "ws://"+r.Host+"/subscribe")
}).Methods(http.MethodGet)
// enable metrics exporter for prometheus
r.Handle("/metrics", promhttp.Handler())
return r
}
var homeTemplate = template.Must(template.New("").Parse(`
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<script>
window.addEventListener("load", function(evt) {
var output = document.getElementById("output");
var input = document.getElementById("input");
var ws;
var print = function(message) {
var d = document.createElement("div");
d.innerHTML = message;
output.appendChild(d);
};
document.getElementById("open").onclick = function(evt) {
if (ws) {
return false;
}
ws = new WebSocket("{{.}}");
ws.onopen = function(evt) {
print("OPEN");
}
ws.onclose = function(evt) {
print("CLOSE");
ws = null;
}
ws.onmessage = function(evt) {
print("RESPONSE: " + evt.data);
}
ws.onerror = function(evt) {
print("ERROR: " + evt.data);
}
return false;
};
document.getElementById("send").onclick = function(evt) {
if (!ws) {
return false;
}
print("SEND: " + input.value);
ws.send(input.value);
return false;
};
document.getElementById("close").onclick = function(evt) {
if (!ws) {
return false;
}
ws.close();
return false;
};
});
</script>
</head>
<body>
<table>
<tr><td valign="top" width="50%">
<p>Click "Open" to create a connection to the server,
"Send" to send a message to the server and "Close" to close the connection.
You can change the message and send multiple times.
<p>
<form>
<button id="open">Open</button>
<button id="close">Close</button>
<p><input id="input" type="text" value="Hello world!">
<button id="send">Send</button>
</form>
</td><td valign="top" width="50%">
<div id="output"></div>
</td></tr></table>
</body>
</html>
`))
|
package main
/*
给定平面上 n 对不同的点,“回旋镖” 是由点表示的元组 (i, j, k) ,
其中 i 和 j 之间的距离和 i 和 k 之间的距离相等(需要考虑元组的顺序)。
*/
// 计算平面中相同距离点的对数 (排列数,不是组合)
func numberOfBoomerangs(points [][]int) int {
ans := 0
for i := 0; i < len(points); i++ {
sameDistancePointCount := make(map[int]int)
for t := 0; t < len(points); t++ {
dis := distance(points[i], points[t])
sameDistancePointCount[dis]++
// 每增加一个等距离点,可选的回旋镖排列 + 2*(sameDistancePointCount[dis]-1) 种
// 如果不考虑元组顺序,那么就不需要 * 2
ans += 2 * (sameDistancePointCount[dis] - 1)
}
}
return ans
}
// 距离 (不开方)
func distance(a []int, b []int) int {
return (a[0]-b[0])*(a[0]-b[0]) + (a[1]-b[1])*(a[1]-b[1])
}
/*
题目链接:
https://leetcode-cn.com/problems/number-of-boomerangs/ 回旋镖的数量
*/
/*
总结
1. 题目本质就是找平面中相对于某个点相同距离的点有多少对
*/ |
package csv
import (
"encoding/csv"
"fmt"
"os"
"strconv"
"time"
)
type ethCsv struct {
Date string `json:"date"`
Amount float64 `json:"amount"`
}
func PricesEth(sdate string) (prices []*ethCsv, err error) {
dt_start := uxdate(sdate)
csvFile, err := os.Open("./static/csv/eth.csv")
if err != nil {
return
}
defer csvFile.Close()
reader := csv.NewReader(csvFile)
reader.FieldsPerRecord = -1
csvData, err := reader.ReadAll()
if err != nil {
return
}
for ix, each := range csvData {
if ix > 0 {
pr := ethCsv{}
tm := uxdate(each[1])
if tm.After(dt_start) {
fl, _ := strconv.ParseFloat(each[2], 32)
dd := uxdate(each[1])
pr.Date = fmt.Sprintf("%d-%d-%d", dd.Year(), dd.Month(), dd.Day())
pr.Amount = fl
prices = append(prices, &pr)
}
}
}
return
}
func uxdate(ux string) (tm time.Time) {
i, err := strconv.ParseInt(ux, 10, 64)
if err != nil {
panic(err)
}
tm = time.Unix(i, 0)
return
}
|
package core
import "context"
type Request interface{}
type RequestHandler func(
ctx context.Context,
request interface{}) Result
type Notification interface{}
type NotificationHandler func(
ctx context.Context,
notification interface{}) error
type ReplyHandler func(receivedData interface{})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.