text stringlengths 11 4.05M |
|---|
package main
import (
"fmt"
"math"
"github.com/jackytck/projecteuler/tools"
)
// Smallest positive number that is divisible by all of the numbers from 1 to n.
func lcm(n int) int {
lcm := make(map[int]int)
for i := 2; i <= n; i++ {
for k, v := range tools.PrimeFactors(i) {
if v > lcm[k] {
lcm[k] = v
}
}
}
p := 1.0
for k, v := range lcm {
p *= math.Pow(float64(k), float64(v))
}
return int(p)
}
func main() {
fmt.Println(lcm(10))
fmt.Println(lcm(20))
}
// What is the smallest positive number that is evenly divisible by all of the
// numbers from 1 to 20?
|
// Copyright 2023-2023 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ddl
import (
"context"
"fmt"
"strings"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/meta"
"github.com/pingcap/tidb/parser/ast"
"github.com/pingcap/tidb/parser/format"
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/util/dbterror"
"github.com/pingcap/tidb/util/sqlexec"
)
func (w *worker) onAddCheckConstraint(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, err error) {
// Handle the rolling back job.
if job.IsRollingback() {
ver, err = onDropCheckConstraint(d, t, job)
if err != nil {
return ver, errors.Trace(err)
}
return ver, nil
}
failpoint.Inject("errorBeforeDecodeArgs", func(val failpoint.Value) {
if val.(bool) {
failpoint.Return(ver, errors.New("occur an error before decode args"))
}
})
dbInfo, tblInfo, constraintInfoInMeta, constraintInfoInJob, err := checkAddCheckConstraint(t, job)
if err != nil {
return ver, errors.Trace(err)
}
if constraintInfoInMeta == nil {
// It's first time to run add constraint job, so there is no constraint info in meta.
// Use the raw constraint info from job directly and modify table info here.
constraintInfoInJob.ID = allocateConstraintID(tblInfo)
// Reset constraint name according to real-time constraints name at this point.
constrNames := map[string]bool{}
for _, constr := range tblInfo.Constraints {
constrNames[constr.Name.L] = true
}
setNameForConstraintInfo(tblInfo.Name.L, constrNames, []*model.ConstraintInfo{constraintInfoInJob})
// Double check the constraint dependency.
existedColsMap := make(map[string]struct{})
cols := tblInfo.Columns
for _, v := range cols {
if v.State == model.StatePublic {
existedColsMap[v.Name.L] = struct{}{}
}
}
dependedCols := constraintInfoInJob.ConstraintCols
for _, k := range dependedCols {
if _, ok := existedColsMap[k.L]; !ok {
// The table constraint depended on a non-existed column.
return ver, dbterror.ErrTableCheckConstraintReferUnknown.GenWithStackByArgs(constraintInfoInJob.Name, k)
}
}
tblInfo.Constraints = append(tblInfo.Constraints, constraintInfoInJob)
constraintInfoInMeta = constraintInfoInJob
}
originalState := constraintInfoInMeta.State
switch constraintInfoInMeta.State {
case model.StateNone:
job.SchemaState = model.StateWriteOnly
constraintInfoInMeta.State = model.StateWriteOnly
ver, err = updateVersionAndTableInfoWithCheck(d, t, job, tblInfo, originalState != constraintInfoInMeta.State)
case model.StateWriteOnly:
job.SchemaState = model.StateWriteReorganization
constraintInfoInMeta.State = model.StateWriteReorganization
ver, err = updateVersionAndTableInfoWithCheck(d, t, job, tblInfo, originalState != constraintInfoInMeta.State)
case model.StateWriteReorganization:
err = w.verifyRemainRecordsForCheckConstraint(dbInfo, tblInfo, constraintInfoInMeta, job)
if err != nil {
return ver, errors.Trace(err)
}
constraintInfoInMeta.State = model.StatePublic
ver, err = updateVersionAndTableInfo(d, t, job, tblInfo, originalState != constraintInfoInMeta.State)
if err != nil {
return ver, errors.Trace(err)
}
// Finish this job.
job.FinishTableJob(model.JobStateDone, model.StatePublic, ver, tblInfo)
default:
err = dbterror.ErrInvalidDDLState.GenWithStackByArgs("constraint", constraintInfoInMeta.State)
}
return ver, errors.Trace(err)
}
func checkAddCheckConstraint(t *meta.Meta, job *model.Job) (*model.DBInfo, *model.TableInfo, *model.ConstraintInfo, *model.ConstraintInfo, error) {
schemaID := job.SchemaID
dbInfo, err := t.GetDatabase(job.SchemaID)
if err != nil {
return nil, nil, nil, nil, errors.Trace(err)
}
tblInfo, err := GetTableInfoAndCancelFaultJob(t, job, schemaID)
if err != nil {
return nil, nil, nil, nil, errors.Trace(err)
}
constraintInfo1 := &model.ConstraintInfo{}
err = job.DecodeArgs(constraintInfo1)
if err != nil {
job.State = model.JobStateCancelled
return nil, nil, nil, nil, errors.Trace(err)
}
// do the double-check with constraint existence.
constraintInfo2 := tblInfo.FindConstraintInfoByName(constraintInfo1.Name.L)
if constraintInfo2 != nil {
if constraintInfo2.State == model.StatePublic {
// We already have a constraint with the same constraint name.
job.State = model.JobStateCancelled
return nil, nil, nil, nil, infoschema.ErrColumnExists.GenWithStackByArgs(constraintInfo1.Name)
}
// if not, that means constraint was in intermediate state.
}
return dbInfo, tblInfo, constraintInfo2, constraintInfo1, nil
}
// onDropCheckConstraint can be called from two case:
// 1: rollback in add constraint.(in rollback function the job.args will be changed)
// 2: user drop constraint ddl.
func onDropCheckConstraint(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ error) {
tblInfo, constraintInfo, err := checkDropCheckConstraint(t, job)
if err != nil {
return ver, errors.Trace(err)
}
originalState := constraintInfo.State
switch constraintInfo.State {
case model.StatePublic:
job.SchemaState = model.StateWriteOnly
constraintInfo.State = model.StateWriteOnly
ver, err = updateVersionAndTableInfoWithCheck(d, t, job, tblInfo, originalState != constraintInfo.State)
case model.StateWriteOnly:
// write only state constraint will still take effect to check the newly inserted data.
// So the dependent column shouldn't be dropped even in this intermediate state.
constraintInfo.State = model.StateNone
// remove the constraint from tableInfo.
for i, constr := range tblInfo.Constraints {
if constr.Name.L == constraintInfo.Name.L {
tblInfo.Constraints = append(tblInfo.Constraints[0:i], tblInfo.Constraints[i+1:]...)
}
}
ver, err = updateVersionAndTableInfo(d, t, job, tblInfo, originalState != constraintInfo.State)
if err != nil {
return ver, errors.Trace(err)
}
// Finish this job.
if job.IsRollingback() {
job.FinishTableJob(model.JobStateRollbackDone, model.StateNone, ver, tblInfo)
} else {
job.FinishTableJob(model.JobStateDone, model.StateNone, ver, tblInfo)
}
default:
err = dbterror.ErrInvalidDDLJob.GenWithStackByArgs("constraint", tblInfo.State)
}
return ver, errors.Trace(err)
}
func checkDropCheckConstraint(t *meta.Meta, job *model.Job) (*model.TableInfo, *model.ConstraintInfo, error) {
schemaID := job.SchemaID
tblInfo, err := GetTableInfoAndCancelFaultJob(t, job, schemaID)
if err != nil {
return nil, nil, errors.Trace(err)
}
var constrName model.CIStr
err = job.DecodeArgs(&constrName)
if err != nil {
job.State = model.JobStateCancelled
return nil, nil, errors.Trace(err)
}
// double check with constraint existence.
constraintInfo := tblInfo.FindConstraintInfoByName(constrName.L)
if constraintInfo == nil {
job.State = model.JobStateCancelled
return nil, nil, dbterror.ErrConstraintNotFound.GenWithStackByArgs(constrName)
}
return tblInfo, constraintInfo, nil
}
func (w *worker) onAlterCheckConstraint(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, err error) {
dbInfo, tblInfo, constraintInfo, enforced, err := checkAlterCheckConstraint(t, job)
if err != nil {
return ver, errors.Trace(err)
}
// enforced will fetch table data and check the constraint.
if enforced {
originalState := constraintInfo.State
switch constraintInfo.State {
case model.StatePublic:
job.SchemaState = model.StateWriteReorganization
constraintInfo.State = model.StateWriteReorganization
constraintInfo.Enforced = enforced
ver, err = updateVersionAndTableInfoWithCheck(d, t, job, tblInfo, originalState != constraintInfo.State)
case model.StateWriteReorganization:
job.SchemaState = model.StateWriteOnly
constraintInfo.State = model.StateWriteOnly
ver, err = updateVersionAndTableInfoWithCheck(d, t, job, tblInfo, originalState != constraintInfo.State)
case model.StateWriteOnly:
err = w.verifyRemainRecordsForCheckConstraint(dbInfo, tblInfo, constraintInfo, job)
if err != nil {
if !table.ErrCheckConstraintViolated.Equal(err) {
return ver, errors.Trace(err)
}
constraintInfo.Enforced = !enforced
}
constraintInfo.State = model.StatePublic
ver, err = updateVersionAndTableInfoWithCheck(d, t, job, tblInfo, originalState != constraintInfo.State)
if err != nil {
return ver, errors.Trace(err)
}
job.FinishTableJob(model.JobStateDone, model.StatePublic, ver, tblInfo)
}
} else {
constraintInfo.Enforced = enforced
ver, err = updateVersionAndTableInfoWithCheck(d, t, job, tblInfo, true)
if err != nil {
// update version and tableInfo error will cause retry.
return ver, errors.Trace(err)
}
job.FinishTableJob(model.JobStateDone, model.StatePublic, ver, tblInfo)
}
return ver, err
}
func checkAlterCheckConstraint(t *meta.Meta, job *model.Job) (*model.DBInfo, *model.TableInfo, *model.ConstraintInfo, bool, error) {
schemaID := job.SchemaID
dbInfo, err := t.GetDatabase(job.SchemaID)
if err != nil {
return nil, nil, nil, false, errors.Trace(err)
}
tblInfo, err := GetTableInfoAndCancelFaultJob(t, job, schemaID)
if err != nil {
return nil, nil, nil, false, errors.Trace(err)
}
var (
enforced bool
constrName model.CIStr
)
err = job.DecodeArgs(&constrName, &enforced)
if err != nil {
job.State = model.JobStateCancelled
return nil, nil, nil, false, errors.Trace(err)
}
// do the double check with constraint existence.
constraintInfo := tblInfo.FindConstraintInfoByName(constrName.L)
if constraintInfo == nil {
job.State = model.JobStateCancelled
return nil, nil, nil, false, dbterror.ErrConstraintNotFound.GenWithStackByArgs(constrName)
}
return dbInfo, tblInfo, constraintInfo, enforced, nil
}
func allocateConstraintID(tblInfo *model.TableInfo) int64 {
tblInfo.MaxConstraintID++
return tblInfo.MaxConstraintID
}
func buildConstraintInfo(tblInfo *model.TableInfo, dependedCols []model.CIStr, constr *ast.Constraint, state model.SchemaState) (*model.ConstraintInfo, error) {
constraintName := model.NewCIStr(constr.Name)
if err := checkTooLongConstraint(constraintName); err != nil {
return nil, errors.Trace(err)
}
// Restore check constraint expression to string.
var sb strings.Builder
restoreFlags := format.RestoreStringSingleQuotes | format.RestoreKeyWordLowercase | format.RestoreNameBackQuotes |
format.RestoreSpacesAroundBinaryOperation
restoreCtx := format.NewRestoreCtx(restoreFlags, &sb)
sb.Reset()
err := constr.Expr.Restore(restoreCtx)
if err != nil {
return nil, errors.Trace(err)
}
// Create constraint info.
constraintInfo := &model.ConstraintInfo{
Name: constraintName,
Table: tblInfo.Name,
ConstraintCols: dependedCols,
ExprString: sb.String(),
Enforced: constr.Enforced,
InColumn: constr.InColumn,
State: state,
}
return constraintInfo, nil
}
func checkTooLongConstraint(constr model.CIStr) error {
if len(constr.L) > mysql.MaxConstraintIdentifierLen {
return dbterror.ErrTooLongIdent.GenWithStackByArgs(constr)
}
return nil
}
// findDependentColsInExpr returns a set of string, which indicates
// the names of the columns that are dependent by exprNode.
func findDependentColsInExpr(expr ast.ExprNode) map[string]struct{} {
colNames := FindColumnNamesInExpr(expr)
colsMap := make(map[string]struct{}, len(colNames))
for _, depCol := range colNames {
colsMap[depCol.Name.L] = struct{}{}
}
return colsMap
}
func (w *worker) verifyRemainRecordsForCheckConstraint(dbInfo *model.DBInfo, tableInfo *model.TableInfo, constr *model.ConstraintInfo, job *model.Job) error {
// Inject a fail-point to skip the remaining records check.
failpoint.Inject("mockVerifyRemainDataSuccess", func(val failpoint.Value) {
if val.(bool) {
failpoint.Return(nil)
}
})
// Get sessionctx from ddl context resource pool in ddl worker.
var sctx sessionctx.Context
sctx, err := w.sessPool.Get()
if err != nil {
return errors.Trace(err)
}
defer w.sessPool.Put(sctx)
// If there is any row can't pass the check expression, the add constraint action will error.
// It's no need to construct expression node out and pull the chunk rows through it. Here we
// can let the check expression restored string as the filter in where clause directly.
// Prepare internal SQL to fetch data from physical table under this filter.
sql := fmt.Sprintf("select 1 from `%s`.`%s` where not %s limit 1", dbInfo.Name.L, tableInfo.Name.L, constr.ExprString)
ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL)
rows, _, err := sctx.(sqlexec.RestrictedSQLExecutor).ExecRestrictedSQL(ctx, nil, sql)
if err != nil {
return errors.Trace(err)
}
rowCount := len(rows)
if rowCount != 0 {
// If check constraint fail, the job state should be changed to canceled, otherwise it will tracked in.
job.State = model.JobStateCancelled
return dbterror.ErrCheckConstraintIsViolated.GenWithStackByArgs(constr.Name.L)
}
return nil
}
func setNameForConstraintInfo(tableLowerName string, namesMap map[string]bool, infos []*model.ConstraintInfo) {
cnt := 1
constraintPrefix := tableLowerName + "_chk_"
for _, constrInfo := range infos {
if constrInfo.Name.O == "" {
constrName := fmt.Sprintf("%s%d", constraintPrefix, cnt)
for {
// loop until find constrName that haven't been used.
if !namesMap[constrName] {
namesMap[constrName] = true
break
}
cnt++
constrName = fmt.Sprintf("%s%d", constraintPrefix, cnt)
}
constrInfo.Name = model.NewCIStr(constrName)
}
}
}
// IsColumnDroppableWithCheckConstraint check whether the column in check-constraint whose dependent col is more than 1
func IsColumnDroppableWithCheckConstraint(col model.CIStr, tblInfo *model.TableInfo) error {
for _, cons := range tblInfo.Constraints {
if len(cons.ConstraintCols) > 1 {
for _, colName := range cons.ConstraintCols {
if colName.L == col.L {
return dbterror.ErrCantDropColWithCheckConstraint.GenWithStackByArgs(cons.Name, col)
}
}
}
}
return nil
}
// IsColumnRenameableWithCheckConstraint check whether the column is referenced in check-constraint
func IsColumnRenameableWithCheckConstraint(col model.CIStr, tblInfo *model.TableInfo) error {
for _, cons := range tblInfo.Constraints {
for _, colName := range cons.ConstraintCols {
if colName.L == col.L {
return dbterror.ErrCantDropColWithCheckConstraint.GenWithStackByArgs(cons.Name, col)
}
}
}
return nil
}
|
package api_server
import (
"time"
"github.com/emicklei/go-restful"
"github.com/kumahq/kuma/pkg/core"
"github.com/kumahq/kuma/pkg/core/resources/access"
"github.com/kumahq/kuma/pkg/core/resources/apis/mesh"
"github.com/kumahq/kuma/pkg/core/resources/apis/system"
"github.com/kumahq/kuma/pkg/core/resources/manager"
rest_errors "github.com/kumahq/kuma/pkg/core/rest/errors"
)
type globalInsightsEndpoints struct {
resManager manager.ResourceManager
resourceAccess access.ResourceAccess
}
type globalInsightsStat struct {
Total uint32 `json:"total"`
}
type globalInsightsResponse struct {
Type string `json:"type"`
CreationTime time.Time `json:"creationTime"`
Meshes globalInsightsStat `json:"meshes"`
Zones globalInsightsStat `json:"zones"`
ZoneIngresses globalInsightsStat `json:"zoneIngresses"`
}
func newGlobalInsightsResponse(meshes, zones, zoneIngresses globalInsightsStat) *globalInsightsResponse {
return &globalInsightsResponse{
Type: "GlobalInsights",
CreationTime: core.Now(),
Meshes: meshes,
Zones: zones,
ZoneIngresses: zoneIngresses,
}
}
func (r *globalInsightsEndpoints) addEndpoint(ws *restful.WebService) {
ws.Route(ws.GET("/global-insights").To(r.inspectGlobalResources).
Doc("Inspect all global resources").
Returns(200, "OK", nil))
}
func (r *globalInsightsEndpoints) inspectGlobalResources(request *restful.Request, response *restful.Response) {
meshes := &mesh.MeshResourceList{}
if err := r.resManager.List(request.Request.Context(), meshes); err != nil {
rest_errors.HandleError(response, err, "Could not retrieve global insights")
return
}
zones := &system.ZoneResourceList{}
if err := r.resManager.List(request.Request.Context(), zones); err != nil {
rest_errors.HandleError(response, err, "Could not retrieve global insights")
return
}
zoneIngresses := &mesh.ZoneIngressResourceList{}
if err := r.resManager.List(request.Request.Context(), zoneIngresses); err != nil {
rest_errors.HandleError(response, err, "Could not retrieve global insights")
return
}
insights := newGlobalInsightsResponse(
globalInsightsStat{Total: uint32(len(meshes.Items))},
globalInsightsStat{Total: uint32(len(zones.Items))},
globalInsightsStat{Total: uint32(len(zoneIngresses.Items))},
)
if err := response.WriteAsJson(insights); err != nil {
rest_errors.HandleError(response, err, "Could not retrieve global insights")
}
}
|
package service
import (
"bytes"
"fmt"
"gin-vue-admin/global"
"gin-vue-admin/model"
"gin-vue-admin/model/request"
)
// @title CreateTitTrainingInfo
// @description create a TitTrainingInfo
// @param trainingInfo model.TitTrainingInfo
// @auth weiqin
// @return err error
func CreateTitTrainingInfo(trainingInfo model.TitTrainingInfo) (err error) {
err = global.GVA_DB.Create(&trainingInfo).Error
return err
}
// @title DeleteTitTrainingInfo
// @description delete a TitTrainingInfo
// @auth weiqin
// @param trainingInfo model.TitTrainingInfo
// @return error
func DeleteTitTrainingInfo(trainingInfo model.TitTrainingInfo) (err error) {
err = global.GVA_DB.Delete(trainingInfo).Error
return err
}
// @title UpdateTitTrainingInfo
// @description update a TitTrainingInfo
// @param trainingInfo *model.TitTrainingInfo
// @auth weiqin
// @return error
func UpdateTitTrainingInfo(trainingInfo *model.TitTrainingInfo) (err error) {
err = global.GVA_DB.Save(trainingInfo).Error
return err
}
// @title GetTitTrainingInfo
// @description get the info of a TitTrainingInfo
// @auth weiqin
// @param id uint
// @return error
// @return TitTrainingInfo TitTrainingInfo
func GetTitTrainingInfo(id uint) (err error, trainingInfo model.TitTrainingInfo) {
err = global.GVA_DB.Where("id = ?", id).First(&trainingInfo).Error
return
}
// @title GetTitTrainingInfoInfoList
// @description get TitTrainingInfo list by pagination, 分页获取用户列表
// @auth weiqin
// @param info PageInfo
// @return error
func GetTitTrainingInfoInfoList(info request.PageInfo) (err error, list interface{}, total int) {
limit := info.PageSize
offset := info.PageSize * (info.Page - 1)
db := global.GVA_DB
var trainingInfos []model.TitTrainingInfo
err = db.Find(&trainingInfos).Count(&total).Error
err = db.Limit(limit).Offset(offset).Find(&trainingInfos).Error
return err, trainingInfos, total
}
func BatchAddTrainingInfo(trainingInfos []model.TitTrainingInfo) (err error) {
var buffer bytes.Buffer
batchInsert := "INSERT INTO `tit_training_infos` (`tit_user_baseinfo_id`,`training_course`,`begin_time`,`end_time`,`payment_way`) values "
if _, err = buffer.WriteString(batchInsert); err != nil || trainingInfos == nil {
return
}
db := global.GVA_DB
for i, e := range trainingInfos {
if i == len(trainingInfos)-1 {
buffer.WriteString(fmt.Sprintf("('%d','%s','%s','%s', '%d');", e.TitUserBaseinfoId, e.TrainingCourse, e.BeginTime.Format("2006-01-02 15:04:05"), e.EndTime.Format("2006-01-02 15:04:05"), e.PaymentWay))
} else {
buffer.WriteString(fmt.Sprintf("('%d','%s','%s','%s', '%d'),", e.TitUserBaseinfoId, e.TrainingCourse, e.BeginTime.Format("2006-01-02 15:04:05"), e.EndTime.Format("2006-01-02 15:04:05"), e.PaymentWay))
}
}
err = db.Exec(buffer.String()).Error
return
}
func BatchModifyTrainingInfo(trainingInfos []model.TitTrainingInfo) (err error) {
// 先删除之前关联的 trainingInfo
db := global.GVA_DB
db.Where("tit_user_baseinfo_id = ? ", trainingInfos[0].TitUserBaseinfoId).Delete(model.TitTrainingInfo{})
var buffer bytes.Buffer
batchInsert := "INSERT INTO `tit_training_infos` (`tit_user_baseinfo_id`,`training_course`,`begin_time`,`end_time`,`payment_way`) values "
if _, err = buffer.WriteString(batchInsert); err != nil {
return
}
for i, e := range trainingInfos {
if i == len(trainingInfos)-1 {
buffer.WriteString(fmt.Sprintf("('%d','%s','%s','%s', '%d');", e.TitUserBaseinfoId, e.TrainingCourse, e.BeginTime.Format("2006-01-02 15:04:05"), e.EndTime.Format("2006-01-02 15:04:05"), e.PaymentWay))
} else {
buffer.WriteString(fmt.Sprintf("('%d','%s','%s','%s', '%d'),", e.TitUserBaseinfoId, e.TrainingCourse, e.BeginTime.Format("2006-01-02 15:04:05"), e.EndTime.Format("2006-01-02 15:04:05"), e.PaymentWay))
}
}
err = db.Exec(buffer.String()).Error
return
}
func QueryTrainingInfoByBaseId(titUserBaseinfoId int) (trainingInfos []model.TitTrainingInfo) {
db := global.GVA_DB
db.Where("tit_user_baseinfo_id = ?", titUserBaseinfoId).Find(&trainingInfos)
return
}
|
package linijka
import (
"fmt"
"io"
"log"
"strings"
"golang.org/x/text/encoding/charmap"
)
func encodeWindows1250(inp string) string {
enc := charmap.Windows1250.NewEncoder()
out, _ := enc.String(inp)
return out
}
func tobytes(inp string) []byte {
return []byte(encodeWindows1250(inp))
}
func sumxor(text []byte) (int, int) {
var s byte
var x byte
for _, ch := range text {
s += ch
//s = s
x = x ^ ch
}
return int(s), int(x)
}
func addstart(text string) string {
if !strings.HasPrefix(text, "<START") {
return fmt.Sprintf("<START1>%s", text)
} else {
return text
}
}
func Wrapincrc(text string) string {
text = addstart(text)
s, x := sumxor(tobytes(text))
return fmt.Sprintf("%s<STOP%X%X>\r\n", text, s, x)
}
func checkspecial(s []string, e string) bool {
for _, a := range s {
if strings.HasPrefix(e, a) {
return true
}
}
return false
}
func InjectFlag(text string, flag string) string {
var header string
if strings.HasPrefix(text, "<START") {
split_text := strings.SplitN(text, ">", 2)
if len(split_text) == 2 {
header = fmt.Sprintf("%s>", split_text[0])
text = split_text[1]
} else {
log.Fatalf("Can't split, got :%s, len(split): %v", text, split_text)
}
}
output := fmt.Sprintf("%s%s%s", header, flag, text)
return output
}
func LinijkaWriter(w io.Writer, text string) (n int, err error) {
var specials = []string{"<STATUS>", "<LEDS", "<CLOCK", "<TIME", "<SETP", "<RESETP"}
if checkspecial(specials, text) {
} else {
text = Wrapincrc(text)
}
text = fmt.Sprintf("%s\r\n", strings.TrimSpace(text))
n, err = w.Write(tobytes(text))
return n, err
}
|
package serializer
import (
"fmt"
"github.com/allentom/youcomic-api/model"
"github.com/allentom/youcomic-api/services"
"github.com/jinzhu/copier"
"path"
"strconv"
"time"
)
type BaseBookTemplate struct {
ID uint `json:"id"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
Name string `json:"name"`
Cover string `json:"cover"`
LibraryId uint `json:"library_id"`
Tags interface{} `json:"tags"`
}
func (b *BaseBookTemplate) Serializer(dataModel interface{}, context map[string]interface{}) error {
serializerModel := dataModel.(model.Book)
err := copier.Copy(b, serializerModel)
if err != nil {
return err
}
if len(b.Cover) != 0 {
b.Cover = fmt.Sprintf("%s?t=%d",
path.Join("/","content", "book", strconv.Itoa(int(serializerModel.ID)), serializerModel.Cover),
time.Now().Unix(),
)
}
tags, err := services.GetBookTagsByTypes(serializerModel.ID, "artist", "translator", "series", "theme")
if err != nil {
return err
}
serializedTags := SerializeMultipleTemplate(tags, &BaseTagTemplate{}, nil)
b.Tags = serializedTags
return nil
}
type BookDailySummaryTemplate struct {
Date string `json:"date"`
Total int `json:"total"`
}
func (b *BookDailySummaryTemplate) Serializer(dataModel interface{}, context map[string]interface{}) error {
err := copier.Copy(b, dataModel)
return err
}
|
package main
import "fmt"
func main() {
grades := make(map[string]float64)
grades["Timmy"] = 42
grades["Jessica"] = 92
grades["Matt"] = 70
fmt.Println(grades)
//TimsGrade := grades["Timmy"]
delete(grades, "Timmy")
//fmt.Println(TimsGrade)
fmt.Println(grades)
for k,v := range grades {
fmt.Println(k, v)
}
} |
package helper
import "fmt"
func SayHello() {
fmt.Printf("Hai danil syah")
}
// func person() {
// fmt.Println("kenalkan")
// }
|
package main
import (
"sync"
)
const (
diffStatusCodeError = "diff-status-code"
errorUnmarshalLeftError = "error-unmarshal-left"
errorUnmarshalRightError = "error-unmarshal-right"
ok = "ok"
)
type Consumer struct {
Exclude []string
}
func NewConsumer() *Consumer {
return &Consumer{
Exclude: options.Exclude,
}
}
func (consumer Consumer) Consume(streamProducer <-chan HostsPair) <-chan StatusValidationError {
streamConsumer := make(chan StatusValidationError)
go func() {
defer close(streamConsumer)
var wg sync.WaitGroup
wg.Add(options.Currency)
for w := 0; w < options.Currency; w++ {
go func() {
defer wg.Done()
for producerValue := range streamProducer {
streamConsumer <- validate(producerValue, consumer.Exclude)
}
}()
}
wg.Wait()
}()
return streamConsumer
}
func validate(hostsPair HostsPair, fieldsToExclude []string) StatusValidationError {
var fieldErrorArray []string
isOk, fieldError, statusCodes := isComparisonJsonResponseOk(hostsPair, fieldsToExclude)
for !isOk {
if !isFieldErrorBasic(fieldError) {
fieldErrorArray = append(fieldErrorArray, fieldError)
fieldsToExclude = append(fieldsToExclude, fieldError)
isOk, fieldError, statusCodes = isComparisonJsonResponseOk(hostsPair, fieldsToExclude)
} else {
fieldErrorArray = append(fieldErrorArray, fieldError)
break
}
}
if len(fieldErrorArray) > 0 && fieldErrorArray[0] != ok {
isOk = false
} else {
isOk = true
}
result := StatusValidationError{
RelativePath: hostsPair.RelativeURL,
IsComparisonOk: isOk,
FieldError: fieldErrorArray,
StatusCodes: statusCodes,
}
return result
}
func isFieldErrorBasic(fieldError string) bool {
switch fieldError {
case
diffStatusCodeError,
errorUnmarshalLeftError,
errorUnmarshalRightError:
return true
}
return false
}
func isComparisonJsonResponseOk(hostsPair HostsPair, excludeFields []string) (bool, string, string) {
statusCodes := hostsPair.getStatusCodes()
if hostsPair.Has401() {
panic("Authorization problem")
}
if hostsPair.HasErrors() || !hostsPair.EqualStatusCode() {
fieldErrorCounter.Add("diff-status-code")
return false, "diff-status-code", statusCodes
}
// Eli esta es la modificacion
if !hostsPair.HasStatusCode200() {
return true, "ok", statusCodes
}
leftJSON, err := unmarshal(hostsPair.Left.Body)
if err != nil {
fieldErrorCounter.Add("error-unmarshal-left")
return false, "error-unmarshal-left", statusCodes
}
rightJSON, err := unmarshal(hostsPair.Right.Body)
if err != nil {
fieldErrorCounter.Add("error-unmarshal-right")
return false, "error-unmarshal-right", statusCodes
}
if len(options.Exclude) > 0 {
for _, excludeField := range excludeFields {
Remove(leftJSON, excludeField)
Remove(rightJSON, excludeField)
}
}
isEqual, fieldError := Equal(leftJSON, rightJSON)
if !isEqual {
fieldErrorCounter.Add(fieldError)
return false, fieldError, statusCodes
}
return true, "ok", statusCodes
}
func unmarshal(b []byte) (interface{}, error) {
j, err := Unmarshal(b)
if err != nil {
return nil, err
}
return j, nil
}
type StatusValidationError struct {
RelativePath string
IsComparisonOk bool
FieldError []string
StatusCodes string
}
|
// Copyright (c) 2019 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package atomicfile contains code related to writing to filesystems
// atomically.
//
// This package should be considered internal; its API is not stable.
package atomicfile // import "tailscale.com/atomicfile"
import (
"fmt"
"io/ioutil"
"os"
)
// WriteFile writes data to filename+some suffix, then renames it
// into filename.
func WriteFile(filename string, data []byte, perm os.FileMode) error {
tmpname := filename + ".new.tmp"
if err := ioutil.WriteFile(tmpname, data, perm); err != nil {
return fmt.Errorf("%#v: %v", tmpname, err)
}
if err := os.Rename(tmpname, filename); err != nil {
return fmt.Errorf("%#v->%#v: %v", tmpname, filename, err)
}
return nil
}
|
package vgform
import "github.com/vugu/vugu"
// Textarea corresponds to a textarea HTML element.
type Textarea struct {
Value StringValuer // get/set the currently selected value
AttrMap vugu.AttrMap
}
func (c *Textarea) handleChange(event vugu.DOMEvent) {
newVal := event.PropString("target", "value")
// c.curVal = newVal // why not
c.Value.SetStringValue(newVal)
}
|
package version
import (
"errors"
"fmt"
"os"
"regexp"
"sort"
"strings"
)
var (
// Version stores the version of the current build (e.g. 2.0.0)
Version = "dev"
// PreReleaseIdentifier stores the pre-release identifier of the current build (eg. beta-2)
PreReleaseIdentifier string
// BuildDate stores the timestamp of the build (e.g. 2017-07-31T13:11:15-0700)
BuildDate string
// BuildSHA stores the git sha of the build (e.g. 8673bed0a9705083987b9ecbbc1cc0758df13dd2)
BuildSHA string
)
var (
ErrNoBuildIteration = errors.New("Build iteration could not be found. If running locally you must set SENSU_BUILD_ITERATION.")
TagParseError = errors.New("A build iteration could not be parsed from the tag")
)
var (
buildNumberRE = regexp.MustCompile(`[0-9]+$`)
prereleaseVersionRE = regexp.MustCompile(`.*\-.*\.([0-9]+)\-[0-9]+$`)
versionRE = regexp.MustCompile(`^[0-9]\.[0-9]\.[0-9]`)
)
type BuildType string
const (
Nightly BuildType = "nightly"
Alpha BuildType = "alpha"
Beta BuildType = "beta"
RC BuildType = "rc"
Stable BuildType = "stable"
)
// Semver returns full semantic versioning compatible identifier.
// Format: VERSION-PRERELEASE+METADATA
func Semver() string {
version := Version
if PreReleaseIdentifier != "" {
version = version + "-" + PreReleaseIdentifier
}
gitSHA := BuildSHA
if len(gitSHA) > 7 {
gitSHA = gitSHA[:7]
}
return version + "#" + gitSHA
}
// BuildTypeFromTag discovers the BuildType of the git tag.
func BuildTypeFromTag(tag string) BuildType {
if tag == "" {
return Nightly
}
// String matching gives us the type of build tag
for _, bt := range []BuildType{Alpha, Beta, RC, Stable} {
if strings.Contains(tag, string(bt)) {
return bt
}
}
// tag exists but does not contain any of the above, this is a stable build
return Stable
}
// HighestVersion will output the highest sorted version from passed slice of
// git tags.
func HighestVersion(tags []string) (string, error) {
uniqueTags := make(map[string]struct{})
for _, tag := range tags {
re1, err := regexp.Compile("[a-zA-Z]+")
if err != nil {
return "", err
}
strReplaced := re1.ReplaceAllString(tag, "")
re2, err := regexp.Compile("^([0-9]+.)([0-9]+.)([0-9]+)")
if err != nil {
return "", err
}
vers := re2.FindStringSubmatch(strReplaced)[0]
uniqueTags[vers] = struct{}{}
}
sortedTags := make([]string, 0, len(uniqueTags))
for tag := range uniqueTags {
sortedTags = append(sortedTags, tag)
}
sort.Sort(byVersion(sortedTags))
return sortedTags[0], nil
}
// Iteration will output an iteration number based on what type of build the git
// sha represents and the ci platform it is running on.
func Iteration(tag string) (string, error) {
bt := BuildTypeFromTag(tag)
if bt == Nightly {
if bi := os.Getenv("SENSU_BUILD_ITERATION"); bi != "" {
return bi, nil
}
return "", ErrNoBuildIteration
}
bi := buildNumberRE.FindString(tag)
var err error
if bi == "" {
err = TagParseError
}
return bi, err
}
// GetPrereleaseVersion will output the version of a prerelease from its tag
func GetPrereleaseVersion(tag string) (string, error) {
bt := BuildTypeFromTag(tag)
switch bt {
case Alpha, Beta, RC:
matches := prereleaseVersionRE.FindStringSubmatch(tag)
var bt string
if len(matches) > 1 {
bt = matches[1]
}
var err error
if bt == "" {
err = fmt.Errorf("a prerelease version could not be parsed from %q", tag)
}
return bt, err
default:
return "", fmt.Errorf("build type not supported for prerelease: %q", bt)
}
}
// GetVersion will output the version of the build (without iteration)
func GetVersion(tag string) (string, error) {
bt := BuildTypeFromTag(tag)
baseVersion := versionRE.FindString(tag)
if baseVersion == "" {
baseVersion = "dev"
}
switch bt {
case Nightly:
return fmt.Sprintf("%s-%s", baseVersion, bt), nil
case Alpha, Beta, RC:
if baseVersion == "" {
return "", fmt.Errorf("invalid tag: %q", tag)
}
pre, err := GetPrereleaseVersion(tag)
if err != nil {
return "", err
}
return fmt.Sprintf("%s-%s.%s", baseVersion, bt, pre), nil
case Stable:
if baseVersion == "" {
return "", fmt.Errorf("invalid tag: %q", tag)
}
return baseVersion, nil
default:
panic("unreachable")
}
}
// FullVersion will output the version of the build (with iteration)
func FullVersion(tag string) (string, error) {
it, err := Iteration(tag)
if err != nil {
return "", err
}
ver, err := GetVersion(tag)
if err != nil {
return "", err
}
return fmt.Sprintf("%s-%s", ver, it), nil
}
|
package util
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
// CreateServiceAccountWithToken creates a service account with a given name with a service account token.
// Need to use this function to simulate the actual behavior of Kubernetes API server with the fake client.
func CreateServiceAccountWithToken(clientset kubernetes.Interface, namespace, name, tokenName string) (*corev1.ServiceAccount, error) {
sa, err := clientset.CoreV1().ServiceAccounts(namespace).Create(&corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: name}})
if err != nil {
return nil, err
}
token, err := clientset.CoreV1().Secrets(namespace).Create(&corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: tokenName,
Annotations: map[string]string{
corev1.ServiceAccountNameKey: sa.Name,
corev1.ServiceAccountUIDKey: string(sa.UID),
}}, Type: corev1.SecretTypeServiceAccountToken})
if err != nil {
return nil, err
}
sa.Secrets = []corev1.ObjectReference{{Name: token.Name}}
return clientset.CoreV1().ServiceAccounts(namespace).Update(sa)
}
|
package main
import (
"context"
"errors"
"fmt"
"golang.org/x/sync/errgroup"
"os"
"os/signal"
"syscall"
)
func main() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
eg, ctx := errgroup.WithContext(context.Background())
// 基于 errgroup 实现一个 http server 的启动和关闭 ,以及 linux signal 信号的注册和处理,要保证能够 一个退出,全部注销退出。
eg.Go(func() error {
fmt.Println("goroutine1: service start")
select {
case <- ctx.Done():
fmt.Println("goroutine1: service start goroutine cancel")
return ctx.Err()
}
return nil
})
eg.Go(func() error {
fmt.Println("goroutine2: service stop")
for{
select {
case <- ctx.Done():
fmt.Println("goroutine2: service stop goroutine cancel")
return ctx.Err()
}
}
return nil
})
eg.Go(func() error {
fmt.Println("goroutine3: signal")
sings := make(chan os.Signal)
signal.Notify(sings, syscall.SIGINT)
for{
select {
case <-ctx.Done():
fmt.Println("goroutine3: signal goroutine cancel")
return ctx.Err()
case <-sings:
fmt.Println("goroutine3: signal goroutine cancel")
return errors.New("goroutine3: signal hand exit")
}
}
return nil
})
err := eg.Wait()
if err != nil {
fmt.Printf("err: %+v", err)
return
}
fmt.Println("done")
}
|
package files
import (
"github.com/therecipe/qt/core"
"github.com/therecipe/qt/quick"
"github.com/therecipe/qt/internal/examples/showcases/wallet/files/controller"
"github.com/therecipe/qt/internal/examples/showcases/wallet/files/dialog"
)
func init() { filesTemplate_QmlRegisterType2("FilesTemplate", 1, 0, "FilesTemplate") }
type filesTemplate struct {
quick.QQuickItem
_ func() `constructor:"init"`
_ *core.QAbstractItemModel `property:"FilesModel"`
}
func (t *filesTemplate) init() {
c := controller.NewFilesController(nil)
t.SetFilesModel(c.Model().Filter)
//needed here, because those are non qml views
dialog.NewFilesUploadTemplate(nil)
dialog.NewFolderUploadTemplate(nil)
}
|
package main
import (
"fmt"
"path"
"strings"
"github.com/go-ini/ini"
"github.com/kjbreil/sil"
"github.com/kjbreil/sil/loc"
"github.com/manifoldco/promptui"
)
func gui() (err error) {
// declare the CFG table type
cfg := loc.CFG{
F1000: "999901",
F1056: "999",
F2846: "GROC_LANE",
F253: sil.JulianNow(),
F940: 999,
F941: 999,
F1001: 1,
F1264: sil.JulianNow(),
}
cfg.F1056, err = target()
if err != nil {
return err
}
cfg.F2846, err = terminalGroup()
if err != nil {
return err
}
inf, err := iniFile()
if err != nil {
return err
}
section, s := iniSection(inf, cfg)
// set file base, this includes extentions but not full path, set to upper
// case
fb := strings.ToUpper(path.Base(filename))
// just the file name without extention
f := strings.TrimSuffix(fb, path.Ext(fb))
// set the batch # to the julian date, this "should" prevent collision
s.Header.F902 = fmt.Sprintf("9%07v", sil.JulianNow())
// #nosec
s.Header.F913 = fmt.Sprintf("CONFIGURATOR UPDATE FOR %s %s", fb, section)
// Write the SIL file useing the filename and section
err = s.Write("CFG_" + f + "_" + section + ".sil")
return err
}
// make a SIL file from a section of the file
func iniSection(inf *ini.File, cfg loc.CFG) (string, sil.SIL) {
sections := inf.SectionStrings()
sections = sections[1:]
sections = append([]string{"ALL"}, sections...)
prompt := promptui.Select{
Label: "Select Section: ",
Items: sections,
}
_, result, err := prompt.Run()
if err != nil {
fmt.Printf("Prompt failed %v\n", err)
}
s := sil.Make("CFG", loc.CFG{})
if result == "ALL" {
for _, sec := range sections[1:] {
s.View.Data = append(s.View.Data, singleSection(sec, cfg, inf)...)
}
} else {
s.View.Data = append(s.View.Data, singleSection(result, cfg, inf)...)
}
return result, s
}
// singleSection returns the CFG array of a single section, since sil.View.Data
// is an interface{} the return value also needs to be an interface :-(
func singleSection(name string, cfg loc.CFG, inf *ini.File) (cfgs []interface{}) {
sec := inf.Section(name)
// make the key to fill
var k Key
k.filename = filename
k.section = sec.Name()
for _, ele := range sec.Keys() {
k.key = ele.Name()
cfg.F2845 = k.String()
cfg.F2847 = ele.Value()
cfgs = append(cfgs, cfg)
}
return
}
// validate that a correct ini file was entered
func iniFile() (inf *ini.File, err error) {
// ini file validation
iniFile := func(input string) error {
if path.Ext(input) != ".ini" {
return fmt.Errorf("%s does not have an INI extension", input)
}
inf, err = ini.Load(input)
if err != nil {
return fmt.Errorf("%s cannot be opened: %v", input, err)
}
return nil
}
prompt := promptui.Prompt{
Label: "Name of INI file to load",
Validate: iniFile,
Default: "samples/System.ini",
}
// run the prompt
filename, err = prompt.Run()
if err != nil {
return inf, fmt.Errorf("prompt failed %v", err)
}
return inf, nil
}
func terminalGroup() (string, error) {
validate := func(input string) error {
switch {
case strings.ToUpper(input) != input:
return fmt.Errorf("group needs to be uppercase")
}
return nil
}
prompt := promptui.Prompt{
Label: "Terminal Group",
Validate: validate,
Default: "GROC_LANE",
}
result, err := prompt.Run()
if err != nil {
return "", err
}
return result, nil
}
func target() (string, error) {
validate := func(input string) error {
switch {
case strings.ToUpper(input) != input:
return fmt.Errorf("target needs to be uppercase")
}
return nil
}
prompt := promptui.Prompt{
Label: "Target?",
Validate: validate,
Default: "PAL",
}
result, err := prompt.Run()
if err != nil {
return "", err
}
return result, nil
}
// func directory() string {
// }
|
package multiclient
import (
"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/address"
"github.com/iotaledger/wasp/client"
"github.com/iotaledger/wasp/packages/webapi/model"
)
// DKSharesGet retrieves distributed key info with specific ChainID from multiple hosts.
func (m *MultiClient) DKSharesGet(sharedAddress *address.Address) ([]*model.DKSharesInfo, error) {
ret := make([]*model.DKSharesInfo, len(m.nodes))
err := m.Do(func(i int, w *client.WaspClient) error {
k, err := w.DKSharesGet(sharedAddress)
ret[i] = k
return err
})
return ret, err
}
|
package discovery
import (
"sync"
"github.com/youtube/vitess/go/sync2"
"github.com/youtube/vitess/go/vt/tabletserver/sandboxconn"
"github.com/youtube/vitess/go/vt/tabletserver/tabletconn"
"github.com/youtube/vitess/go/vt/topo"
querypb "github.com/youtube/vitess/go/vt/proto/query"
topodatapb "github.com/youtube/vitess/go/vt/proto/topodata"
)
// This file contains the definitions for a FakeHealthCheck class to
// simulate a HealthCheck module. Note it is not in a sub-package because
// otherwise it couldn't be used in this package's tests because of
// circular dependencies.
// NewFakeHealthCheck returns the fake healthcheck object.
func NewFakeHealthCheck() *FakeHealthCheck {
return &FakeHealthCheck{
items: make(map[string]*fhcItem),
}
}
// FakeHealthCheck implements discovery.HealthCheck.
type FakeHealthCheck struct {
// mu protects the items map
mu sync.RWMutex
items map[string]*fhcItem
// GetStatsFromTargetCounter counts GetTabletStatsFromTarget() being called.
// (it can be accessed concurrently by 'multiGo', so using atomic)
GetStatsFromTargetCounter sync2.AtomicInt32
// GetStatsFromKeyspaceShardCounter counts GetTabletStatsFromKeyspaceShard() being called.
GetStatsFromKeyspaceShardCounter int
}
type fhcItem struct {
ts *TabletStats
conn tabletconn.TabletConn
}
//
// discovery.HealthCheck interface methods
//
// RegisterStats is not implemented.
func (fhc *FakeHealthCheck) RegisterStats() {
}
// SetListener is not implemented.
func (fhc *FakeHealthCheck) SetListener(listener HealthCheckStatsListener) {
}
// AddTablet adds the tablet.
func (fhc *FakeHealthCheck) AddTablet(cell, name string, tablet *topodatapb.Tablet) {
key := TabletToMapKey(tablet)
item := &fhcItem{
ts: &TabletStats{
Tablet: tablet,
Name: name,
},
}
fhc.mu.Lock()
defer fhc.mu.Unlock()
fhc.items[key] = item
}
// RemoveTablet removes the tablet.
func (fhc *FakeHealthCheck) RemoveTablet(tablet *topodatapb.Tablet) {
fhc.mu.Lock()
defer fhc.mu.Unlock()
key := TabletToMapKey(tablet)
delete(fhc.items, key)
}
// GetTabletStatsFromKeyspaceShard returns all TabletStats for the given keyspace/shard.
func (fhc *FakeHealthCheck) GetTabletStatsFromKeyspaceShard(keyspace, shard string) []*TabletStats {
fhc.mu.RLock()
defer fhc.mu.RUnlock()
fhc.GetStatsFromKeyspaceShardCounter++
var res []*TabletStats
for _, item := range fhc.items {
if item.ts.Target == nil {
continue
}
if item.ts.Target.Keyspace == keyspace && item.ts.Target.Shard == shard {
res = append(res, item.ts)
}
}
return res
}
// GetTabletStatsFromTarget returns all TabletStats for the given target.
func (fhc *FakeHealthCheck) GetTabletStatsFromTarget(keyspace, shard string, tabletType topodatapb.TabletType) []*TabletStats {
fhc.GetStatsFromTargetCounter.Add(1)
fhc.mu.RLock()
defer fhc.mu.RUnlock()
var res []*TabletStats
for _, item := range fhc.items {
if item.ts.Target == nil {
continue
}
if item.ts.Target.Keyspace == keyspace && item.ts.Target.Shard == shard && item.ts.Target.TabletType == tabletType {
res = append(res, item.ts)
}
}
return res
}
// GetConnection returns the TabletConn of the given tablet.
func (fhc *FakeHealthCheck) GetConnection(tablet *topodatapb.Tablet) tabletconn.TabletConn {
fhc.mu.RLock()
defer fhc.mu.RUnlock()
key := TabletToMapKey(tablet)
if item := fhc.items[key]; item != nil {
return item.conn
}
return nil
}
// CacheStatus is not implemented.
func (fhc *FakeHealthCheck) CacheStatus() TabletsCacheStatusList {
return nil
}
// Close is not implemented.
func (fhc *FakeHealthCheck) Close() error {
return nil
}
//
// Management methods
//
// Reset cleans up the internal state.
func (fhc *FakeHealthCheck) Reset() {
fhc.mu.Lock()
defer fhc.mu.Unlock()
fhc.GetStatsFromTargetCounter.Set(0)
fhc.GetStatsFromKeyspaceShardCounter = 0
fhc.items = make(map[string]*fhcItem)
}
// AddTestTablet inserts a fake entry into FakeHealthCheck.
// The Tablet can be talked to using the provided connection.
func (fhc *FakeHealthCheck) AddTestTablet(cell, host string, port int32, keyspace, shard string, tabletType topodatapb.TabletType, serving bool, reparentTS int64, err error) *sandboxconn.SandboxConn {
t := topo.NewTablet(0, cell, host)
t.Keyspace = keyspace
t.Shard = shard
t.Type = tabletType
t.PortMap["vt"] = port
key := TabletToMapKey(t)
fhc.mu.Lock()
defer fhc.mu.Unlock()
item := fhc.items[key]
if item == nil {
item = &fhcItem{
ts: &TabletStats{
Tablet: t,
},
}
fhc.items[key] = item
}
item.ts.Target = &querypb.Target{
Keyspace: keyspace,
Shard: shard,
TabletType: tabletType,
}
item.ts.Serving = serving
item.ts.TabletExternallyReparentedTimestamp = reparentTS
item.ts.Stats = &querypb.RealtimeStats{}
item.ts.LastError = err
conn := sandboxconn.NewSandboxConn(t)
item.conn = conn
return conn
}
// GetAllTablets returns all the tablets we have.
func (fhc *FakeHealthCheck) GetAllTablets() map[string]*topodatapb.Tablet {
res := make(map[string]*topodatapb.Tablet)
fhc.mu.RLock()
defer fhc.mu.RUnlock()
for key, t := range fhc.items {
res[key] = t.ts.Tablet
}
return res
}
|
// Copyright 2020 IOTA Stiftung
// SPDX-License-Identifier: Apache-2.0
package chainimpl
import (
"bytes"
"github.com/iotaledger/wasp/packages/chain"
"github.com/iotaledger/wasp/packages/peering"
)
func (c *chainObj) dispatchMessage(msg interface{}) {
if !c.isOpenQueue.Load() {
return
}
switch msgt := msg.(type) {
case *peering.PeerMessage:
// receive a message from peer
c.processPeerMessage(msgt)
case *chain.StateUpdateMsg:
// StateUpdateMsg may come from peer and from own consensus operator
c.stateMgr.EventStateUpdateMsg(msgt)
case *chain.StateTransitionMsg:
if c.operator != nil {
c.operator.EventStateTransitionMsg(msgt)
}
case chain.PendingBlockMsg:
c.stateMgr.EventPendingBlockMsg(msgt)
case *chain.StateTransactionMsg:
// receive state transaction message
c.stateMgr.EventStateTransactionMsg(msgt)
case *chain.TransactionInclusionLevelMsg:
if c.operator != nil {
c.operator.EventTransactionInclusionLevelMsg(msgt)
}
case *chain.RequestMsg:
// receive request message
if c.operator != nil {
c.operator.EventRequestMsg(msgt)
}
case chain.BalancesMsg:
if c.operator != nil {
c.operator.EventBalancesMsg(msgt)
}
case *chain.VMResultMsg:
// VM finished working
if c.operator != nil {
c.operator.EventResultCalculated(msgt)
}
case chain.TimerTick:
if msgt%2 == 0 {
if c.stateMgr != nil {
c.stateMgr.EventTimerMsg(msgt / 2)
}
} else {
if c.operator != nil {
c.operator.EventTimerMsg(msgt / 2)
}
}
}
}
func (c *chainObj) processPeerMessage(msg *peering.PeerMessage) {
rdr := bytes.NewReader(msg.MsgData)
switch msg.MsgType {
case chain.MsgStateIndexPingPong:
msgt := &chain.StateIndexPingPongMsg{}
if err := msgt.Read(rdr); err != nil {
c.log.Error(err)
return
}
msgt.SenderIndex = msg.SenderIndex
c.stateMgr.EvidenceStateIndex(msgt.BlockIndex)
c.stateMgr.EventStateIndexPingPongMsg(msgt)
case chain.MsgNotifyRequests:
msgt := &chain.NotifyReqMsg{}
if err := msgt.Read(rdr); err != nil {
c.log.Error(err)
return
}
c.stateMgr.EvidenceStateIndex(msgt.BlockIndex)
msgt.SenderIndex = msg.SenderIndex
if c.operator != nil {
c.operator.EventNotifyReqMsg(msgt)
}
case chain.MsgNotifyFinalResultPosted:
msgt := &chain.NotifyFinalResultPostedMsg{}
if err := msgt.Read(rdr); err != nil {
c.log.Error(err)
return
}
c.stateMgr.EvidenceStateIndex(msgt.BlockIndex)
msgt.SenderIndex = msg.SenderIndex
if c.operator != nil {
c.operator.EventNotifyFinalResultPostedMsg(msgt)
}
case chain.MsgStartProcessingRequest:
msgt := &chain.StartProcessingBatchMsg{}
if err := msgt.Read(rdr); err != nil {
c.log.Error(err)
return
}
c.stateMgr.EvidenceStateIndex(msgt.BlockIndex)
msgt.SenderIndex = msg.SenderIndex
msgt.Timestamp = msg.Timestamp
if c.operator != nil {
c.operator.EventStartProcessingBatchMsg(msgt)
}
case chain.MsgSignedHash:
msgt := &chain.SignedHashMsg{}
if err := msgt.Read(rdr); err != nil {
c.log.Error(err)
return
}
c.stateMgr.EvidenceStateIndex(msgt.BlockIndex)
msgt.SenderIndex = msg.SenderIndex
msgt.Timestamp = msg.Timestamp
if c.operator != nil {
c.operator.EventSignedHashMsg(msgt)
}
case chain.MsgGetBatch:
msgt := &chain.GetBlockMsg{}
if err := msgt.Read(rdr); err != nil {
c.log.Error(err)
return
}
msgt.SenderIndex = msg.SenderIndex
c.stateMgr.EventGetBlockMsg(msgt)
case chain.MsgBatchHeader:
msgt := &chain.BlockHeaderMsg{}
if err := msgt.Read(rdr); err != nil {
c.log.Error(err)
return
}
c.stateMgr.EvidenceStateIndex(msgt.BlockIndex)
msgt.SenderIndex = msg.SenderIndex
c.stateMgr.EventBlockHeaderMsg(msgt)
case chain.MsgStateUpdate:
msgt := &chain.StateUpdateMsg{}
if err := msgt.Read(rdr); err != nil {
c.log.Error(err)
return
}
c.stateMgr.EvidenceStateIndex(msgt.BlockIndex)
msgt.SenderIndex = msg.SenderIndex
c.stateMgr.EventStateUpdateMsg(msgt)
case chain.MsgTestTrace:
msgt := &chain.TestTraceMsg{}
if err := msgt.Read(rdr); err != nil {
c.log.Error(err)
return
}
msgt.SenderIndex = msg.SenderIndex
c.testTrace(msgt)
default:
c.log.Errorf("processPeerMessage: wrong msg type")
}
}
|
package main
import (
"net/http"
"fmt"
)
type String string
func (s String) ServeHTTP(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, s)
}
type Struct struct {
A string
B string
C string
}
func (s Struct) ServeHTTP(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, s)
}
func main() {
http.Handle("/string", String("Hoge"))
http.Handle("/struct", &Struct{"Hello", ":", "Gopher"})
http.ListenAndServe("localhost:3001", nil)
}
|
package display
import (
"github.com/GoAdminGroup/go-admin/template/icon"
"github.com/GoAdminGroup/go-admin/template/types"
)
type Icon struct {
types.BaseDisplayFnGenerator
}
func init() {
types.RegisterDisplayFnGenerator("icon", new(Icon))
}
func (i *Icon) Get(args ...interface{}) types.FieldFilterFn {
return func(value types.FieldModel) interface{} {
icons := args[0].(map[string]string)
defaultIcon := ""
if len(args) > 1 {
defaultIcon = args[1].(string)
}
for k, iconClass := range icons {
if k == value.Value {
return icon.Icon(iconClass)
}
}
if defaultIcon != "" {
return icon.Icon(defaultIcon)
}
return value.Value
}
}
|
/*
Copyright 2023 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cuegen
import (
goast "go/ast"
"testing"
"github.com/stretchr/testify/assert"
)
func TestWithAnyTypes(t *testing.T) {
tests := []struct {
name string
opts []Option
extra map[string]Type
}{
{
name: "default",
opts: nil,
extra: map[string]Type{},
},
{
name: "single",
opts: []Option{WithTypes(map[string]Type{
"foo": TypeAny,
"bar": TypeEllipsis,
})},
extra: map[string]Type{"foo": TypeAny, "bar": TypeEllipsis},
},
{
name: "multiple",
opts: []Option{WithTypes(map[string]Type{
"foo": TypeAny,
"bar": TypeEllipsis,
}), WithTypes(map[string]Type{
"baz": TypeEllipsis,
"qux": TypeAny,
})},
extra: map[string]Type{
"foo": TypeAny,
"bar": TypeEllipsis,
"baz": TypeEllipsis,
"qux": TypeAny,
},
},
}
for _, tt := range tests {
opts := options{types: map[string]Type{}}
for _, opt := range tt.opts {
opt(&opts)
}
assert.Equal(t, opts.types, tt.extra, tt.name)
}
}
func TestWithNullable(t *testing.T) {
tests := []struct {
name string
opts []Option
want bool
}{
{name: "default", opts: nil, want: false},
{name: "true", opts: []Option{WithNullable()}, want: true},
}
for _, tt := range tests {
opts := options{nullable: false}
for _, opt := range tt.opts {
opt(&opts)
}
assert.Equal(t, opts.nullable, tt.want, tt.name)
}
}
func TestWithTypeFilter(t *testing.T) {
tests := []struct {
name string
opts []Option
true []string
false []string
}{
{
name: "default",
opts: nil,
true: []string{"foo", "bar"},
false: []string{},
},
{
name: "nil",
opts: []Option{WithTypeFilter(nil)},
true: []string{"foo", "bar"},
},
{
name: "single",
opts: []Option{WithTypeFilter(func(typ *goast.TypeSpec) bool { return typ.Name.Name == "foo" })},
true: []string{"foo"},
false: []string{"bar", "baz"},
},
{
name: "multiple",
opts: []Option{WithTypeFilter(func(typ *goast.TypeSpec) bool { return typ.Name.Name == "foo" }),
WithTypeFilter(func(typ *goast.TypeSpec) bool { return typ.Name.Name == "bar" })},
true: []string{"bar"},
false: []string{"foo", "baz"},
},
}
for _, tt := range tests {
opts := options{typeFilter: func(_ *goast.TypeSpec) bool { return true }}
for _, opt := range tt.opts {
if opt != nil {
opt(&opts)
}
}
for _, typ := range tt.true {
assert.True(t, opts.typeFilter(&goast.TypeSpec{Name: &goast.Ident{Name: typ}}), tt.name)
}
for _, typ := range tt.false {
assert.False(t, opts.typeFilter(&goast.TypeSpec{Name: &goast.Ident{Name: typ}}), tt.name)
}
}
}
func TestDefaultOptions(t *testing.T) {
opts := newDefaultOptions()
assert.Equal(t, opts.types, map[string]Type{
"map[string]interface{}": TypeEllipsis, "map[string]any": TypeEllipsis,
"interface{}": TypeAny, "any": TypeAny,
})
assert.Equal(t, opts.nullable, false)
// assert can't compare function
assert.True(t, opts.typeFilter(nil))
}
|
package hydrasdk
import (
"crypto/rsa"
"net/http"
"net/url"
"github.com/juju/errors"
jose "github.com/square/go-jose"
)
// KeyGetter provides functions to retrieve a key from an hydra set (tipically the first)
type KeyGetter interface {
GetRSAPublic(set string) (*rsa.PublicKey, error)
GetRSAPrivate(set string) (*rsa.PrivateKey, error)
}
// CachedKeyManager uses hydra rest api to retrieve keys and cache them for easy access
type CachedKeyManager struct {
Endpoint *url.URL
Client *http.Client
rsaPublics map[string]*rsa.PublicKey
rsaPrivates map[string]*rsa.PrivateKey
}
// NewCachedKeyManager returns a CachedKeyManager connected to the hydra cluster
// it can fail if the cluster is not a valid url, or if the id and secret don't work
func NewCachedKeyManager(id, secret, cluster string) (*CachedKeyManager, error) {
endpoint, client, err := authenticate(id, secret, cluster)
if err != nil {
return nil, errors.Annotate(err, "Instantiate ClientManager")
}
manager := CachedKeyManager{
Endpoint: joinURL(endpoint, "keys"),
Client: client,
rsaPublics: map[string]*rsa.PublicKey{},
rsaPrivates: map[string]*rsa.PrivateKey{},
}
return &manager, nil
}
// GetRSAPublic retrieves the first key of the given set. It caches them forever,
// so hope that they don't change
func (m CachedKeyManager) GetRSAPublic(set string) (*rsa.PublicKey, error) {
// Try getting from cache
if key, ok := m.rsaPublics[set]; ok {
return key, nil
}
url := joinURL(m.Endpoint, set).String()
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, errors.Annotatef(err, "new request for %s", url)
}
var keyset jose.JSONWebKeySet
err = bind(m.Client, req, &keyset)
if err != nil {
return nil, err
}
if len(keyset.Keys) == 0 {
return nil, errors.New("The retrieved keyset is empty")
}
key, ok := keyset.Keys[0].Key.(*rsa.PublicKey)
if !ok {
return key, errors.New("Could not convert key to RSA Private Key.")
}
// Save on cache
m.rsaPublics[set] = key
return key, nil
}
// GetRSAPrivate retrieves the first key of the given set. It caches them forever,
// so hope that they don't change
func (m CachedKeyManager) GetRSAPrivate(set string) (*rsa.PrivateKey, error) {
// Try getting from cache
if key, ok := m.rsaPrivates[set]; ok {
return key, nil
}
url := joinURL(m.Endpoint, set).String()
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, errors.Annotatef(err, "new request for %s", url)
}
var keyset jose.JSONWebKeySet
err = bind(m.Client, req, &keyset)
if err != nil {
return nil, err
}
if len(keyset.Keys) == 0 {
return nil, errors.New("The retrieved keyset is empty")
}
key, ok := keyset.Keys[0].Key.(*rsa.PrivateKey)
if !ok {
return key, errors.New("Could not convert key to RSA Private Key.")
}
// Save on cache
m.rsaPrivates[set] = key
return key, nil
}
|
package keycloak
import (
"fmt"
)
// https://www.keycloak.org/docs-api/8.0/rest-api/index.html#_authenticatorconfigrepresentation
type AuthenticationExecutionConfig struct {
RealmId string `json:"-"`
ExecutionId string `json:"-"`
Id string `json:"id"`
Alias string `json:"alias"`
Config map[string]string `json:"config"`
}
// https://www.keycloak.org/docs-api/8.0/rest-api/index.html#_newexecutionconfig
func (keycloakClient *KeycloakClient) NewAuthenticationExecutionConfig(config *AuthenticationExecutionConfig) (string, error) {
_, location, err := keycloakClient.post(fmt.Sprintf("/realms/%s/authentication/executions/%s/config", config.RealmId, config.ExecutionId), config)
if err != nil {
return "", err
}
return getIdFromLocationHeader(location), nil
}
// https://www.keycloak.org/docs-api/8.0/rest-api/index.html#_getauthenticatorconfig
func (keycloakClient *KeycloakClient) GetAuthenticationExecutionConfig(config *AuthenticationExecutionConfig) error {
return keycloakClient.get(fmt.Sprintf("/realms/%s/authentication/config/%s", config.RealmId, config.Id), config, nil)
}
// https://www.keycloak.org/docs-api/8.0/rest-api/index.html#_updateauthenticatorconfig
func (keycloakClient *KeycloakClient) UpdateAuthenticationExecutionConfig(config *AuthenticationExecutionConfig) error {
return keycloakClient.put(fmt.Sprintf("/realms/%s/authentication/config/%s", config.RealmId, config.Id), config)
}
// https://www.keycloak.org/docs-api/8.0/rest-api/index.html#_removeauthenticatorconfig
func (keycloakClient *KeycloakClient) DeleteAuthenticationExecutionConfig(config *AuthenticationExecutionConfig) error {
return keycloakClient.delete(fmt.Sprintf("/realms/%s/authentication/config/%s", config.RealmId, config.Id), nil)
}
|
package efclient
import (
"bytes"
"encoding/json"
"fmt"
"log"
"net/http"
"sync"
"time"
"syreclabs.com/go/faker"
"syreclabs.com/go/faker/locales"
)
// Label ...
type Label struct {
ID int `json:"id"`
}
// LabelData ...
type LabelData struct {
Title string `json:"title"`
Description string `json:"description"`
}
// CreateLabel ...
func (c *Client) CreateLabel(labelData *LabelData) (*Label, error) {
requestData := map[string]interface{}{
"title": &labelData.Title,
"description": &labelData.Description,
}
requestBody, err := json.Marshal(requestData)
if err != nil {
return nil, err
}
req, err := http.NewRequest("POST", fmt.Sprintf("%s/product-labels", c.BaseURL), bytes.NewBuffer(requestBody))
if err != nil {
return nil, err
}
var res Label
if err := c.SendRequest(req, &res); err != nil {
return nil, err
}
return &res, nil
}
// CreateFakeLabels ...
func (c *Client) CreateFakeLabels(wg *sync.WaitGroup, count int) int {
faker.Locale = locales.Ru
ch := make(chan int, count)
ch <- 0
for i := 0; i < count; i++ {
wg.Add(1)
time.Sleep(time.Millisecond * 50)
go func(wg *sync.WaitGroup) {
defer wg.Done()
fakeLabel := LabelData{
Title: faker.Commerce().Color() + " " + faker.Name().LastName(),
Description: faker.Lorem().Sentence(10),
}
log.Println(fakeLabel)
_, err := c.CreateLabel(&fakeLabel)
if err != nil {
log.Print(fmt.Errorf("%v", err))
// log.Fatal(err)
} else {
counter := <-ch
ch <- counter + 1
}
}(wg)
}
wg.Wait()
close(ch)
return <-ch
}
|
/*
Copyright (C) 2018 Black Duck Software, Inc.
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
*/
package freeway
import (
"encoding/json"
"fmt"
"net/http"
"sync"
"time"
"github.com/blackducksoftware/hub-client-go/hubapi"
"github.com/blackducksoftware/hub-client-go/hubclient"
"github.com/juju/errors"
log "github.com/sirupsen/logrus"
)
const (
scrapeHubAPIPause = 20 * time.Second
)
// PerformanceResults .....
type PerformanceResults struct {
LinkTypeTimings map[string][]float64
}
// PerformanceTester .....
type PerformanceTester struct {
HubClient *hubclient.Client
HubUsername string
HubPassword string
DurationsResults []map[LinkType][]*time.Duration
AddResults chan map[LinkType][]*time.Duration
GetResults chan func([]*PerformanceResults)
}
// NewPerformanceTester .....
func NewPerformanceTester(hubHost string, username string, password string) (*PerformanceTester, error) {
var baseURL = fmt.Sprintf("https://%s", hubHost)
hubClient, err := hubclient.NewWithSession(baseURL, hubclient.HubClientDebugTimings, 5000*time.Second)
if err != nil {
return nil, errors.Annotatef(err, "unable to get hub client for %s", hubHost)
}
pt := &PerformanceTester{
HubClient: hubClient,
HubUsername: username,
HubPassword: password,
DurationsResults: []map[LinkType][]*time.Duration{},
AddResults: make(chan map[LinkType][]*time.Duration),
GetResults: make(chan func([]*PerformanceResults))}
err = hubClient.Login(username, password)
if err != nil {
return nil, errors.Annotatef(err, "unable to log in to hub %s", hubHost)
}
go pt.StartHittingHub()
go pt.StartReducer()
pt.AddFreewayResultsHandler()
return pt, nil
}
// GetGroupedDurations .....
func (pt *PerformanceTester) GetGroupedDurations() (map[LinkType][]*time.Duration, []error) {
root := fmt.Sprintf("%s/api/projects", pt.HubClient.BaseURL())
times, errors := pt.TraverseGraph(root)
groupedTimes := map[LinkType][]*time.Duration{}
for link, duration := range times {
linkType, err := AnalyzeLink(link)
if err != nil {
panic(err)
}
durations, ok := groupedTimes[*linkType]
if !ok {
durations = []*time.Duration{}
}
durations = append(durations, duration)
groupedTimes[*linkType] = durations
}
return groupedTimes, errors
}
// TraverseGraph .....
func (pt *PerformanceTester) TraverseGraph(root string) (map[string]*time.Duration, []error) {
timings := map[string]*time.Duration{}
seen := map[string]bool{root: false}
queue := []string{root}
errors := []error{}
for len(queue) > 0 {
log.Infof("queue size: %d, errors: %d, timings: %d", len(queue), len(errors), len(timings))
first := queue[0]
queue = queue[1:]
start := time.Now()
json, err := pt.FetchLink(first)
if err != nil {
errors = append(errors, err)
continue
}
stop := time.Now().Sub(start)
timings[first] = &stop
links, errs := FindLinks(json)
// links, errs := FindLinksRestricted(json)
errors = append(errors, errs...)
for _, link := range links {
_, err := AnalyzeLink(link)
if err != nil {
log.Infof("skipping link %s: %s", link, err.Error())
continue
}
_, ok := seen[link]
if !ok {
seen[link] = false
queue = append(queue, link)
}
}
}
return timings, errors
}
// func (pt *PerformanceTester) TraverseGraph(root string) (map[string]bool, []error) {
// timings := map[string]*time.Duration{}
// queue := []string{root}
// errors := []error{}
// projectList, err := pt.GetProjects()
// if err != nil {
// errors = append(errors, err)
// }
// for len(queue) > 0 {
// first := queue[0]
// queue = queue[1:]
// json, err := pt.FetchLink(first)
// }
// for _, project := range projectList.Items {
// for _, link := range project.Meta.Links {
// visited, ok := urls[link.Href]
// if !ok {
// urls[link.Href] = false
// } else if !visited {
// err := pt.TraverseLink(link.Href)
// if err == nil {
// urls[link.Href] = true
// }
// }
// }
// }
// for link := range urls {
// linkType, err := pt.AnalyzeLink(link)
// if err != nil {
// log.Errorf("unable to analyze link %s: %s", link, err.Error())
// } else {
// log.Infof("url analysis: %s", linkType.String())
// }
// }
// return urls, errors
// }
// FetchLink .....
func (pt *PerformanceTester) FetchLink(link string) (map[string]interface{}, error) {
result := map[string]interface{}{}
err := pt.HubClient.HttpGetJSON(link, &result, 200)
//log.Infof("result and error: %+v, %s", result, err)
if err != nil {
log.Errorf("failed to fetch link %s: %s", link, err.Error())
recordError("failed to fetch link")
return nil, err
}
log.Infof("successfully fetched link %s", link)
return result, nil
}
// GetProjects .....
func (pt *PerformanceTester) GetProjects() (*hubapi.ProjectList, error) {
limit := 35000
options := &hubapi.GetListOptions{Limit: &limit}
return pt.HubClient.ListProjects(options)
}
// StartReducer .....
func (pt *PerformanceTester) StartReducer() {
for {
select {
case results := <-pt.AddResults:
pt.DurationsResults = append(pt.DurationsResults, results)
case continuation := <-pt.GetResults:
resultsArray := []*PerformanceResults{}
for _, results := range pt.DurationsResults {
times := map[string][]float64{}
for linkType, durations := range results {
floats := []float64{}
for _, d := range durations {
floats = append(floats, float64(*d/time.Millisecond))
}
times[linkType.String()] = floats
}
perfResults := &PerformanceResults{
LinkTypeTimings: times,
}
resultsArray = append(resultsArray, perfResults)
}
go continuation(resultsArray)
}
}
}
// StartHittingHub .....
func (pt *PerformanceTester) StartHittingHub() {
for {
groupedDurations, errors := pt.GetGroupedDurations()
pt.AddResults <- groupedDurations
for linkType, durations := range groupedDurations {
log.Infof("durations for %s: %+v", linkType.String(), durations)
for _, duration := range durations {
recordLinkTypeDuration(linkType, *duration)
}
}
for _, err := range errors {
log.Errorf("error: %s", err.Error())
}
time.Sleep(scrapeHubAPIPause)
}
}
// AddFreewayResultsHandler .....
func (pt *PerformanceTester) AddFreewayResultsHandler() {
http.HandleFunc("/freewayresults", func(w http.ResponseWriter, r *http.Request) {
var wg sync.WaitGroup
wg.Add(1)
var jsonBytes []byte
pt.GetResults <- func(results []*PerformanceResults) {
var err error
jsonBytes, err = json.MarshalIndent(pt.DurationsResults, "", " ")
if err != nil {
panic(err)
}
wg.Done()
}
wg.Wait()
fmt.Fprint(w, string(jsonBytes))
})
}
|
/*
* Tencent is pleased to support the open source community by making Blueking Container Service available.
* Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
* Licensed under the MIT License (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
* http://opensource.org/licenses/MIT
* Unless required by applicable law or agreed to in writing, software distributed under
* the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package v1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
const (
TerraformFinalizer = "finalizer.terraform.bkbcs.tencent.com"
)
// BackendConfigsReference specify where to store backend config
type BackendConfigsReference struct {
// Kind of the values referent, valid values are ('Secret', 'ConfigMap').
// +kubebuilder:validation:Enum=Secret;ConfigMap
// +required
Kind string `json:"kind"`
// Name of the configs referent. Should reside in the same namespace as the
// referring resource.
// +kubebuilder:validation:MinLength=1
// +kubebuilder:validation:MaxLength=253
// +required
Name string `json:"name"`
// Keys is the data key where a specific value can be found at. Defaults to all keys.
// +optional
Keys []string `json:"keys,omitempty"`
// Optional marks this BackendConfigsReference as optional. When set, a not found error
// for the values reference is ignored, but any Key or
// transient error will still result in a reconciliation failure.
// +optional
Optional bool `json:"optional,omitempty"`
}
// PlanStatus status of plan
type PlanStatus struct {
// +optional
LastApplied string `json:"lastApplied,omitempty"`
// +optional
Pending string `json:"pending,omitempty"`
// +optional
IsDestroyPlan bool `json:"isDestroyPlan,omitempty"`
// +optional
IsDriftDetectionPlan bool `json:"isDriftDetectionPlan,omitempty"`
}
// TerraformSpec defines the desired state of Terraform
type TerraformSpec struct {
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
// Important: Run "make" to regenerate code after modifying this file
// ApprovePlan specifies name of a plan wanted to approve.
// If its value is "auto", the controller will automatically approve every plan.
// +optional
ApprovePlan string `json:"approvePlan,omitempty"`
// Destroy produces a destroy plan. Applying the plan will destroy all resources.
// +optional
Destroy bool `json:"destroy,omitempty"`
// +optional
BackendConfigsFrom []BackendConfigsReference `json:"backendConfigsFrom,omitempty"`
// Create destroy plan and apply it to destroy terraform resources
// upon deletion of this object. Defaults to false.
// +kubebuilder:default:=false
// +optional
DestroyResourcesOnDeletion bool `json:"destroyResourcesOnDeletion,omitempty"`
// Targets specify the resource, module or collection of resources to target.
// +optional
Targets []string `json:"targets,omitempty"`
}
// TerraformStatus defines the observed state of Terraform
type TerraformStatus struct {
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
// Important: Run "make" to regenerate code after modifying this file
// ObservedGeneration is the last reconciled generation.
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
// The last successfully applied revision.
// The revision format for Git sources is <branch|tag>/<commit-sha>.
// +optional
LastAppliedRevision string `json:"lastAppliedRevision,omitempty"`
// LastAttemptedRevision is the revision of the last reconciliation attempt.
// +optional
LastAttemptedRevision string `json:"lastAttemptedRevision,omitempty"`
// LastPlannedRevision is the revision used by the last planning process.
// The result could be either no plan change or a new plan generated.
// +optional
LastPlannedRevision string `json:"lastPlannedRevision,omitempty"`
// LastPlanAt is the time when the last terraform plan was performed
// +optional
LastPlanAt *metav1.Time `json:"lastPlanAt,omitempty"`
// LastAppliedAt is the time when the last drift was detected and
// terraform apply was performed as a result
// +optional
LastAppliedAt *metav1.Time `json:"LastAppliedAt,omitempty"`
// +optional
Plan PlanStatus `json:"plan,omitempty"`
}
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// Terraform is the Schema for the terraforms API
type Terraform struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec TerraformSpec `json:"spec,omitempty"`
Status TerraformStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// TerraformList contains a list of Terraform
type TerraformList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Terraform `json:"items"`
}
func init() {
SchemeBuilder.Register(&Terraform{}, &TerraformList{})
}
|
package test
import (
"fmt"
"gengine/engine"
"testing"
"time"
)
const rp1 = `
rule "1" "1"
begin
sleep()
println("hello")
end
`
const rp2 = `
rule "1" "1"
begin
sleep()
println()
end
rule "2" "2"
begin
sleep()
println()
end
`
func SleepTime() {
//println("睡觉")
//time.Sleep(100 * time.Second )
}
func Test_rp1(t *testing.T) {
apis := make(map[string]interface{})
apis["println"] = fmt.Println
apis["sleep"] = SleepTime
pool, e1 := engine.NewGenginePool(1, 2, 1, rp1, apis)
if e1 != nil {
panic(e1)
}
go func() {
for {
data := make(map[string]interface{})
sid := []string{"1", "2"}
e, _ := pool.ExecuteSelectedRulesConcurrent(data, sid)
if e != nil {
println("execute err", fmt.Sprintf("%+v", e))
}
println("执行...")
time.Sleep(1000 * time.Millisecond)
}
}()
go func() {
for {
isExist := pool.IsExist([]string{"1"})
println(fmt.Sprintf("exist 1... %+v", isExist))
isExist = pool.IsExist([]string{"2"})
println(fmt.Sprintf("exist 2... %+v", isExist))
time.Sleep(1 * time.Second)
}
}()
go func() {
time.Sleep(3 * time.Second)
e := pool.UpdatePooledRules(rp2)
if e != nil {
panic(e)
}
}()
go func() {
time.Sleep(3 * time.Second)
e := pool.UpdatePooledRules(rp1)
if e != nil {
panic(e)
}
}()
go func() {
time.Sleep(5 * time.Second)
println("清空规则....")
pool.ClearPoolRules()
}()
go func() {
time.Sleep(20 * time.Second)
println("更新规则....")
e := pool.UpdatePooledRules(rp2)
if e != nil {
println("execute err", fmt.Sprintf("%+v", e))
}
}()
time.Sleep(20 * time.Second)
}
|
package main
import (
"log"
"math/rand"
"time"
)
type Response string
func init() {
rand.Seed(time.Now().UnixNano())
}
func main() {
GetAllTheThings([]string{"one", "two", "three", "four", "five"})
}
func GetTheThing(url string) Response {
time.Sleep(time.Duration(rand.Int()%1000) * time.Millisecond)
return Response("response:" + url)
}
// START OMIT
func GetThingConcurrently(url string, ch chan<- Response) { // HL
resp := GetTheThing(url) // HL
log.Printf("thing %s got!", url) // HL
ch <- resp // HL
}
func GetAllTheThings(list []string) (respList []Response) {
ch := make(chan Response) // HL
for _, url := range list {
go GetThingConcurrently(url, ch) // HL
}
for resp := range ch { // HL
respList = append(respList, resp)
if len(respList) == len(list) {
return
}
}
return
}
// END OMIT
|
// Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ddl_test
import (
"testing"
"time"
"github.com/pingcap/tidb/ddl"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/util/mock"
"github.com/stretchr/testify/require"
clientv3 "go.etcd.io/etcd/client/v3"
)
func TestOptions(t *testing.T) {
client, err := clientv3.NewFromURL("test")
require.NoError(t, err)
defer func() {
err := client.Close()
require.NoError(t, err)
}()
callback := &ddl.BaseCallback{}
lease := time.Second * 3
store := &mock.Store{}
infoHandle := infoschema.NewCache(16)
options := []ddl.Option{
ddl.WithEtcdClient(client),
ddl.WithHook(callback),
ddl.WithLease(lease),
ddl.WithStore(store),
ddl.WithInfoCache(infoHandle),
}
opt := &ddl.Options{}
for _, o := range options {
o(opt)
}
require.Equal(t, client, opt.EtcdCli)
require.Equal(t, callback, opt.Hook)
require.Equal(t, lease, opt.Lease)
require.Equal(t, store, opt.Store)
require.Equal(t, infoHandle, opt.InfoCache)
}
|
package configs
import (
"fmt"
"log"
"os"
"os/exec"
"reflect"
"strings"
"github.com/gabrie30/ghorg/colorlog"
"github.com/mitchellh/go-homedir"
"github.com/spf13/viper"
)
func init() {
initConfig()
}
func initConfig() {
viper.SetConfigType("yaml")
viper.AddConfigPath(ghorgDir())
viper.SetConfigName("conf")
if err := viper.ReadInConfig(); err != nil {
if _, ok := err.(viper.ConfigFileNotFoundError); ok {
// Config file not found; ignore error if desired
fmt.Println(err)
fmt.Println("Could not find $HOME/ghorg/conf.yaml file, please add one")
} else {
// Config file was found but another error was produced
fmt.Println(err)
fmt.Println("Something unexpected happened")
}
}
getOrSetGitHubToken()
getOrSetDefaults("GHORG_ABSOLUTE_PATH_TO_CLONE_TO")
getOrSetDefaults("GHORG_BRANCH")
getOrSetDefaults("GHORG_CLONE_PROTOCOL")
}
// Load triggers the configs to load first, not sure if this is actually needed
func Load() {}
// GetRequiredString verifies env is set
func GetRequiredString(key string) string {
value := viper.GetString(key)
if isZero(value) {
log.Fatalf("Fatal: '%s' ENV VAR is required", key)
}
return value
}
func isZero(value interface{}) bool {
return value == reflect.Zero(reflect.TypeOf(value)).Interface()
}
func getOrSetDefaults(envVar string) {
switch envVar {
case "GHORG_ABSOLUTE_PATH_TO_CLONE_TO":
os.Setenv(envVar, HomeDir()+"/Desktop/")
case "GHORG_BRANCH":
os.Setenv(envVar, "master")
case "GHORG_CLONE_PROTOCOL":
os.Setenv(envVar, "https")
}
}
func ghorgDir() string {
return HomeDir() + "/ghorg"
}
// HomeDir finds the users home directory
func HomeDir() string {
home, err := homedir.Dir()
if err != nil {
log.Fatal("Error trying to find users home directory")
}
return home
}
func getOrSetGitHubToken() {
var token string
if isZero(os.Getenv("GHORG_GITHUB_TOKEN")) || len(os.Getenv("GHORG_GITHUB_TOKEN")) != 40 {
cmd := `security find-internet-password -s github.com | grep "acct" | awk -F\" '{ print $4 }'`
out, err := exec.Command("bash", "-c", cmd).Output()
if err != nil {
colorlog.PrintError(fmt.Sprintf("Failed to execute command: %s", cmd))
}
token = strings.TrimSuffix(string(out), "\n")
if len(token) != 40 {
log.Fatal("Could not find a GitHub token in keychain. You should create a personal access token from GitHub, then set GITHUB_TOKEN in your $HOME/ghorg/conf.yaml...or swtich to cloning via SSH also done by updating your $HOME/ghorg/conf.yaml. Or read the troubleshooting section of Readme.md https://github.com/gabrie30/ghorg to store your token in your osx keychain.")
}
os.Setenv("GHORG_GITHUB_TOKEN", token)
}
if len(token) != 40 {
log.Fatal("Could not set GHORG_GITHUB_TOKEN")
}
}
|
package filter
import (
"testing"
)
func TestLabelMatchEq(t *testing.T) {
var tests = []struct {
match *LabelMatchEq
labels map[string]string
ans bool
ans2 bool
}{
{
&LabelMatchEq{"a", "b"},
map[string]string{"a": "b"},
true,
true,
},
{
&LabelMatchEq{"a", "b"},
map[string]string{"a": "c"},
false,
false,
},
{
&LabelMatchEq{"a", "b"},
map[string]string{"b": "a"},
false,
true,
},
{
&LabelMatchEq{"a", "b"},
nil,
false,
true,
},
}
for _, i := range tests {
if i.match.Match(i.labels) != i.ans {
t.Error("Test failed Match", i.match, i.labels, i.ans)
}
if i.match.EmptyOrMatch(i.labels) != i.ans2 {
t.Error("Test failed EmptyOrMatch", i.match, i.labels, i.ans2)
}
}
}
func TestLabelMatchNeq(t *testing.T) {
var tests = []struct {
match *LabelMatchNeq
labels map[string]string
ans bool
ans2 bool
}{
{
&LabelMatchNeq{"a", "b"},
map[string]string{"a": "b"},
false,
false,
},
{
&LabelMatchNeq{"a", "b"},
map[string]string{"a": "c"},
true,
true,
},
{
&LabelMatchNeq{"a", "b"},
map[string]string{"b": "a"},
true,
true,
},
{
&LabelMatchNeq{"a", "b"},
nil,
true,
true,
},
}
for _, i := range tests {
if i.match.Match(i.labels) != i.ans {
t.Error("Test failed Match", i.match, i.labels, i.ans)
}
if i.match.EmptyOrMatch(i.labels) != i.ans2 {
t.Error("Test failed EmptyOrMatch", i.match, i.labels, i.ans2)
}
}
}
func TestLabelMatchSetIn(t *testing.T) {
var tests = []struct {
match *LabelMatchSetIn
labels map[string]string
ans bool
ans2 bool
}{
{
&LabelMatchSetIn{"a", []string{"b"}},
map[string]string{"a": "b"},
true,
true,
},
{
&LabelMatchSetIn{"a", []string{"b", "a"}},
map[string]string{"a": "c"},
false,
false,
},
{
&LabelMatchSetIn{"a", []string{"a", "b", "c"}},
map[string]string{"a": "b"},
true,
true,
},
{
&LabelMatchSetIn{"b", []string{"a", "b", "c"}},
map[string]string{"a": "b"},
false,
true,
},
{
&LabelMatchSetIn{"a", []string{"b"}},
nil,
false,
true,
},
}
for _, i := range tests {
if i.match.Match(i.labels) != i.ans {
t.Error("Test failed Match", i.match, i.labels, i.ans)
}
if i.match.EmptyOrMatch(i.labels) != i.ans2 {
t.Error("Test failed EmptyOrMatch", i.match, i.labels, i.ans2)
}
}
}
func TestLabelMatchMultiple(t *testing.T) {
var tests = []struct {
match *LabelMatchMultiple
labels map[string]string
ans bool
ans2 bool
}{
{
&LabelMatchMultiple{[]LabelMatch{&LabelMatchEq{"a", "b"}, &LabelMatchNeq{"c", "k"}}},
map[string]string{"a": "b"},
true,
true,
},
{
&LabelMatchMultiple{[]LabelMatch{&LabelMatchEq{"a", "b"}, &LabelMatchSetIn{"b", []string{"b", "a"}}}},
map[string]string{"a": "b", "b": "b"},
true,
true,
},
{
&LabelMatchMultiple{[]LabelMatch{&LabelMatchEq{"a", "b"}, &LabelMatchSetIn{"a", []string{"b", "a"}}}},
map[string]string{"a": "a"},
false,
false,
},
{
&LabelMatchMultiple{nil},
map[string]string{"a": "b", "b": "b"},
true,
true,
},
}
for _, i := range tests {
if i.match.Match(i.labels) != i.ans {
t.Error("Test failed Match", i.match, i.labels, i.ans)
}
if i.match.EmptyOrMatch(i.labels) != i.ans2 {
t.Error("Test failed EmptyOrMatch: ", i.match, i.labels, i.ans2)
}
}
}
|
package utils
import (
"errors"
"gopkg.in/mgo.v2/bson"
)
var (
ErrInvalidObjectId = errors.New("Invalid ObjectID format")
)
func IsObjectId(id string) bool {
return bson.IsObjectIdHex(id)
}
//Function will take types string or bson.ObjectId represented by a type interface{} and returns
//a type bson.ObjectId. Will panic if wrong type is passed. Will also panic if the string
//is not a valid representation of an ObjectId
func ObjectId(id interface{}) bson.ObjectId {
var idvar bson.ObjectId
switch id.(type) {
case string:
idvar = bson.ObjectIdHex(id.(string))
break
case bson.ObjectId:
idvar = id.(bson.ObjectId)
default:
panic("Only accepts types `string` and `bson.ObjectId` accepted as Id")
}
return idvar
}
|
package font
import (
"crypto/md5"
"sync"
"github.com/gop9/olt/gio/font/opentype"
"github.com/gop9/olt/gio/text"
"github.com/gop9/olt/gio/unit"
"golang.org/x/image/font"
"golang.org/x/image/math/fixed"
)
type Face struct {
fnt *opentype.Font
shaper *text.Shaper
size int
fsize fixed.Int26_6
metrics font.Metrics
}
var fontsMu sync.Mutex
var fontsMap = map[[md5.Size]byte]*opentype.Font{}
func NewFace(ttf []byte, size int) (Face, error) {
key := md5.Sum(ttf)
fontsMu.Lock()
defer fontsMu.Unlock()
fnt, _ := fontsMap[key]
if fnt == nil {
var err error
fnt, err = opentype.Parse(ttf)
if err != nil {
return Face{}, err
}
}
shaper := &text.Shaper{}
shaper.Register(text.Font{}, fnt)
face := Face{fnt, shaper, size, fixed.I(size), font.Metrics{}}
metricsTxt := face.shaper.Layout(face, text.Font{}, "metrics", text.LayoutOptions{MaxWidth: 1e6})
face.metrics.Ascent = metricsTxt.Lines[0].Ascent
face.metrics.Descent = metricsTxt.Lines[0].Descent
face.metrics.Height = face.metrics.Ascent + face.metrics.Descent
return face, nil
}
func (face Face) Px(v unit.Value) int {
return face.size
}
func (face Face) Metrics() font.Metrics {
return face.metrics
}
|
package template
import (
"html/template"
"io/ioutil"
"github.com/MihaiBlebea/dog-ceo/dog"
)
type service struct {
dogService dog.Service
}
// New returns a new template service
func New(dogService dog.Service) Service {
return &service{dogService}
}
func (s *service) Load(path string) (*Page, error) {
b, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
tmp, err := template.New("Template").Parse(string(b))
if err != nil {
return nil, err
}
// fetch breeds and dogs
dogs, err := s.dogService.AllDogs()
if err != nil {
return nil, err
}
return &Page{tmp, dogs}, nil
}
|
package backend
import (
"context"
"encoding/json"
"fmt"
"log"
"math/rand"
"net/http"
"sync"
"time"
)
// The following message types are for communication by channels.
// messageIndex announces their own position on the board.
type messageIndex struct {
Index int `json:"index"`
}
func (messageIndex) IsMessage() {}
// messageTurn contains the current turn's status, and the next turn's index and clock time.
type messageTurn struct {
Status []Status `json:"status"`
Time time.Time `json:"current"`
}
func (messageTurn) IsMessage() {}
// messageSentence contains the information of an occurring next sentence.
type messageSentence struct {
Sentence `json:"sentence"`
Pos int `json:"pos"` // The position in the slice. Maybe it could be sent not in order?
}
func (messageSentence) IsMessage() {}
// messageEnd passes the End indicator.
type messageEnd struct {
Winner int // Index of the winner.
}
func (messageEnd) IsMessage() {}
// MessageRequest is a player's response.
type MessageRequest struct {
IsSkip bool `json:"skip"` // Whether the player has skipped.
Received time.Time
Content string `json:"content"`
}
// Messager is an internal interface, to help type safety with general message-typing.
type Messager interface {
IsMessage()
}
// Message is the general message type we will use in sending-channels.
type Message struct {
Type string `json:"type"`
Message Messager `json:"message"`
done chan struct{}
}
type pconnMap struct {
mu sync.Mutex
Conns map[string]*PlayerConn
Guests []*PlayerConn
}
func (p *pconnMap) Get(id string) (*PlayerConn, bool) {
p.mu.Lock()
defer p.mu.Unlock()
c, ok := p.Conns[id]
return c, ok
}
func (p *pconnMap) Set(id string, conn *PlayerConn) {
p.mu.Lock()
defer p.mu.Unlock()
p.Conns[id] = conn
}
func (p *pconnMap) Guest(conn *PlayerConn) {
p.mu.Lock()
defer p.mu.Unlock()
p.Guests = append(p.Guests, conn)
}
func (p *pconnMap) Send(m Message) {
sender := func(conn *PlayerConn) { conn.SendMessage(m) }
p.mu.Lock()
defer p.mu.Unlock()
for _, conn := range p.Conns {
go sender(conn)
}
for _, conn := range p.Guests {
go sender(conn)
}
}
// RoomHandler is a Handler that serves players' connections to Room server.
type RoomHandler struct {
Room Room
// internal variables
p pconnMap
ctx context.Context
TurnTimer *time.Timer // The turn timer.
}
// Broadcast sends the message to all listening PlayerConns.
// It pauses until all messages are scheduled to send.
func (h *RoomHandler) Broadcast(m Message) {
h.p.Send(m)
}
// AddSentence adds a valid sentence into the Room.
func (h *RoomHandler) addSentence(id int, Content string) {
h.Room.Status[id] = StatusActive
sent := Sentence{
Owner: id,
Content: Content,
}
h.Room.Sentences = append(h.Room.Sentences, sent)
h.Broadcast(Message{
Type: "sentence",
Message: messageSentence{
Sentence: sent,
Pos: len(h.Room.Sentences) - 1,
},
})
}
// addSkip adds a system skip announcement into the Room.
func (h *RoomHandler) addSkip(id int, isSkip bool) {
sent := Sentence{System: true}
if !isSkip {
h.Room.Status[id] = StatusDc
sent.Content = fmt.Sprintf("Player `%s` has timed out.", h.Room.Members[id].Username)
} else {
h.Room.Status[id] = StatusOut
sent.Content = fmt.Sprintf("Player `%s` has skipped.", h.Room.Members[id].Username)
}
h.Room.Sentences = append(h.Room.Sentences, sent)
h.Broadcast(Message{
Type: "sentence",
Message: messageSentence{
Sentence: sent,
Pos: len(h.Room.Sentences) - 1,
},
})
}
func (h *RoomHandler) announceTurn(turn int) {
h.Room.Status[turn] = StatusTurn
sendStatus := make([]Status, len(h.Room.Status))
copy(sendStatus, h.Room.Status)
h.Broadcast(Message{
Type: "turn",
Message: messageTurn{
Status: sendStatus,
Time: h.Room.Current,
},
})
}
// nextTurn announces the next turn, and, if ended, announces the end.
func (h *RoomHandler) nextTurn(last int) (int, bool) {
nxt, ended := h.Room.NextTurn(last)
if ended {
h.Broadcast(Message{
Type: "turn",
Message: messageTurn{
Status: h.Room.Status,
Time: h.Room.Current,
},
})
h.Broadcast(Message{
Type: "end",
Message: messageEnd{Winner: nxt},
})
return nxt, true
}
h.Room.Current = time.Now()
return nxt, ended
}
func shufflePlayers(players []User) {
for i := 0; i < len(players); i++ {
nx := rand.Intn(len(players)-i) + i
players[i], players[nx] = players[nx], players[i]
}
}
// NewRoom creates a new room.
func NewRoom(roomID int, players []User, timeout time.Duration, openSentence string) (h *RoomHandler) {
h = new(RoomHandler)
shufflePlayers(players)
h.p = pconnMap{
Conns: make(map[string]*PlayerConn),
Guests: make([]*PlayerConn, 0),
}
var cancel context.CancelFunc
h.ctx, cancel = context.WithCancel(context.Background())
// Set the room up.
h.Room = Room{
ID: roomID,
Members: players,
Status: make([]Status, len(players)),
Sentences: []Sentence{Sentence{System: true, Content: openSentence}},
Start: time.Now(),
Timeout: timeout,
}
go h.Play(cancel)
return
}
// Play starts up the game.
func (h *RoomHandler) Play(cancel context.CancelFunc) {
// Wait a while so that all players are connected.
<-time.After(10 * time.Second)
var (
turn = 0
ended = h.Room.Ended()
)
h.Room.Current = time.Now()
for !ended {
// Resets the timer so that it gives the proper time.
h.TurnTimer = time.NewTimer(h.Room.Current.Add(h.Room.Timeout).Sub(h.Room.Current))
conn, active := h.p.Get(h.Room.Members[turn].ID)
h.announceTurn(turn)
log.Printf("Room %d: Turn %d\n", h.Room.ID, turn)
if !active {
// User not even connected
h.addSkip(turn, false)
} else {
awaitResp:
for {
select {
case resp := <-conn.Send:
if resp.Received.Sub(h.Room.Current) < 0 {
continue awaitResp
}
if resp.IsSkip {
h.addSkip(turn, true)
} else if len(resp.Content) > 0 {
h.addSentence(turn, resp.Content)
} else {
continue awaitResp
}
break awaitResp
case err := <-conn.ErrChan:
log.Printf("Room %d, Player %d: %v\n", h.Room.ID, turn, err)
h.addSkip(turn, false)
break awaitResp
case <-h.TurnTimer.C:
h.addSkip(turn, false)
break awaitResp
}
}
}
h.TurnTimer.Stop()
turn, ended = h.nextTurn(turn)
}
log.Printf("Room %d ended\n", h.Room.ID)
cancel()
}
func (h *RoomHandler) serveInfoReqs(w http.ResponseWriter, r *http.Request) {
data, err := json.Marshal(h.Room)
if err != nil {
w.WriteHeader(500)
w.Write([]byte("{\"error\": \"Server error\"}"))
return
}
w.Write(data)
}
func (h *RoomHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if r.Method == "POST" {
h.serveInfoReqs(w, r)
return
}
conn, err := upgrader.Upgrade(w, r, nil)
if err != nil {
return
}
pConn := Prepare(conn)
var ended bool
select {
case <-h.ctx.Done():
ended = true
default:
}
// If ended, immediately quit to save memory.
if ended {
pConn.SendMessage(Message{
Type: "end",
Message: messageEnd{
Winner: h.Room.Winner(),
},
})
return
}
// If this is a player, announce his index.
r.ParseForm()
index, err := h.Room.Index(r.FormValue("player"))
if err == nil {
ID := r.FormValue("player")
// Replace old player connection.
oldConn, ok := h.p.Get(ID)
if ok {
oldConn.Close()
}
pConn.SendMessage(Message{
Type: "index",
Message: messageIndex{
Index: index,
},
})
h.p.Set(ID, pConn)
} else {
// Guest,
h.p.Guest(pConn)
}
}
|
package commands
import (
// HOFSTADTER_START import
// HOFSTADTER_END import
// custom imports
// infered imports
"github.com/spf13/cobra"
"github.com/hofstadter-io/examples/blog/server/tools/db-ops/commands/user"
)
// Tool: serverToolDB
// Name: user
// Usage:
// Parent: serverToolDB
// HOFSTADTER_START const
// HOFSTADTER_END const
// HOFSTADTER_START var
// HOFSTADTER_END var
// HOFSTADTER_START init
// HOFSTADTER_END init
var UserCmd = &cobra.Command{
Use: "user",
Short: "work with the user resource",
}
func init() {
RootCmd.AddCommand(UserCmd)
}
func init() {
// add sub-commands to this command when present
UserCmd.AddCommand(user.PostCmd)
UserCmd.AddCommand(user.MigrateCmd)
UserCmd.AddCommand(user.CreateCmd)
UserCmd.AddCommand(user.FindCmd)
UserCmd.AddCommand(user.UpdateCmd)
UserCmd.AddCommand(user.DeleteCmd)
}
// HOFSTADTER_BELOW
|
package model
//{"user_email":"niconicocsc@gmail.com","roles":["user"],"iat":1609415731,"exp":1609416631,"jti":"e3582954-6b17-4ef2-9bdf-0274da1033f3","iss":"Gnemes"}
type PlayLoad struct {
UserEmail string
}
|
/*
Copyright 2015 Crunchy Data Solutions, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cpmcontainerapi
import (
"bytes"
"github.com/ant0ine/go-json-rest/rest"
"github.com/crunchydata/crunchy-postgresql-manager-openshift/logit"
"io/ioutil"
"net/http"
"os/exec"
)
type RemoteWritefileRequest struct {
Path string
Filecontents string
}
type RemoteWritefileResponse struct {
Status string
}
type InitdbRequest struct {
ContainerName string
}
type InitdbResponse struct {
Output string
Status string
}
type StartPGRequest struct {
ContainerName string
}
type StartPGResponse struct {
Output string
Status string
}
type StartPGOnStandbyRequest struct {
ContainerName string
}
type StartPGOnStandbyResponse struct {
Output string
Status string
}
type StopPGRequest struct {
ContainerName string
}
type StopPGResponse struct {
Output string
Status string
}
type BasebackupRequest struct {
MasterHostName string
StandbyHostName string
}
type BasebackupResponse struct {
Output string
Status string
}
type FailoverRequest struct {
ContainerName string
}
type FailoverResponse struct {
Output string
Status string
}
type SeedRequest struct {
ContainerName string
}
type SeedResponse struct {
Output string
Status string
}
type ControldataRequest struct {
Path string
}
type ControldataResponse struct {
Output string
Status string
}
func RemoteWritefile(w rest.ResponseWriter, r *rest.Request) {
logit.Info.Println("RemoteWritefile called")
req := RemoteWritefileRequest{}
err := r.DecodeJsonPayload(&req)
if err != nil {
logit.Error.Println(err.Error())
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if req.Path == "" {
rest.Error(w, "Path not supplied in request", http.StatusInternalServerError)
return
}
if req.Filecontents == "" {
rest.Error(w, "Filecontents not supplied in request", http.StatusInternalServerError)
return
}
d1 := []byte(req.Filecontents)
err = ioutil.WriteFile(req.Path, d1, 0644)
if err != nil {
logit.Error.Println(err.Error())
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
var response RemoteWritefileResponse
response.Status = "OK"
w.WriteJson(&response)
}
func Initdb(w rest.ResponseWriter, r *rest.Request) {
logit.Info.Println("Initdb called")
req := InitdbRequest{}
err := r.DecodeJsonPayload(&req)
if err != nil {
logit.Error.Println(err.Error())
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
var cmd *exec.Cmd
cmd = exec.Command("initdb.sh", req.ContainerName)
var out bytes.Buffer
var stderr bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &stderr
err = cmd.Run()
if err != nil {
logit.Error.Println(err.Error())
rest.Error(w, err.Error(), 400)
return
}
var response InitdbResponse
response.Output = out.String()
response.Status = "OK"
w.WriteJson(&response)
}
func StartPG(w rest.ResponseWriter, r *rest.Request) {
logit.Info.Println("StartPG called")
req := StartPGRequest{}
err := r.DecodeJsonPayload(&req)
if err != nil {
logit.Error.Println(err.Error())
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
var cmd *exec.Cmd
cmd = exec.Command("startpg.sh", req.ContainerName)
var out bytes.Buffer
var stderr bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &stderr
err = cmd.Run()
if err != nil {
logit.Error.Println(err.Error())
rest.Error(w, err.Error(), 400)
return
}
var response StartPGResponse
response.Output = out.String()
response.Status = "OK"
w.WriteJson(&response)
}
func StopPG(w rest.ResponseWriter, r *rest.Request) {
logit.Info.Println("StopPG called")
req := StopPGRequest{}
err := r.DecodeJsonPayload(&req)
if err != nil {
logit.Error.Println(err.Error())
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
var cmd *exec.Cmd
cmd = exec.Command("stoppg.sh", req.ContainerName)
var out bytes.Buffer
var stderr bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &stderr
err = cmd.Run()
if err != nil {
logit.Error.Println(err.Error())
rest.Error(w, err.Error(), 400)
return
}
var response StopPGResponse
response.Output = out.String()
response.Status = "OK"
w.WriteJson(&response)
}
func Basebackup(w rest.ResponseWriter, r *rest.Request) {
logit.Info.Println("Basebackup called")
req := BasebackupRequest{}
err := r.DecodeJsonPayload(&req)
if err != nil {
logit.Error.Println(err.Error())
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if req.MasterHostName == "" {
rest.Error(w, "MasterHostName not supplied in request", http.StatusInternalServerError)
return
}
if req.StandbyHostName == "" {
rest.Error(w, "StandbyHostName not supplied in request", http.StatusInternalServerError)
return
}
var cmd *exec.Cmd
cmd = exec.Command("basebackup.sh", req.MasterHostName, req.StandbyHostName)
var out bytes.Buffer
var stderr bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &stderr
err = cmd.Run()
if err != nil {
logit.Error.Println(err.Error())
rest.Error(w, err.Error(), 400)
return
}
var response BasebackupResponse
response.Output = out.String()
response.Status = "OK"
w.WriteJson(&response)
}
func Failover(w rest.ResponseWriter, r *rest.Request) {
logit.Info.Println("Failover called")
req := FailoverRequest{}
err := r.DecodeJsonPayload(&req)
if err != nil {
logit.Error.Println(err.Error())
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if req.ContainerName == "" {
logit.Error.Println("ContainerName not supplied in request")
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
var cmd *exec.Cmd
cmd = exec.Command("fail-over.sh", req.ContainerName)
var out bytes.Buffer
var stderr bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &stderr
err = cmd.Run()
if err != nil {
logit.Error.Println(err.Error())
rest.Error(w, err.Error(), 400)
return
}
var response FailoverResponse
response.Output = out.String()
response.Status = "OK"
w.WriteJson(&response)
}
func Seed(w rest.ResponseWriter, r *rest.Request) {
logit.Info.Println("Seed called")
req := SeedRequest{}
err := r.DecodeJsonPayload(&req)
if err != nil {
logit.Error.Println(err.Error())
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
var cmd *exec.Cmd
cmd = exec.Command("seed.sh", req.ContainerName)
var out bytes.Buffer
var stderr bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &stderr
err = cmd.Run()
if err != nil {
logit.Error.Println(err.Error())
rest.Error(w, err.Error(), 400)
return
}
var response SeedResponse
response.Output = out.String()
response.Status = "OK"
w.WriteJson(&response)
}
func StartPGOnStandby(w rest.ResponseWriter, r *rest.Request) {
logit.Info.Println("StartPGOnStandby called")
req := StartPGOnStandbyRequest{}
err := r.DecodeJsonPayload(&req)
if err != nil {
logit.Error.Println(err.Error())
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
var cmd *exec.Cmd
cmd = exec.Command("startpgonstandby.sh", req.ContainerName)
var out bytes.Buffer
var stderr bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &stderr
err = cmd.Run()
if err != nil {
logit.Error.Println(err.Error())
rest.Error(w, err.Error(), 400)
return
}
var response StartPGOnStandbyResponse
response.Output = out.String()
response.Status = "OK"
w.WriteJson(&response)
}
func Controldata(w rest.ResponseWriter, r *rest.Request) {
logit.Info.Println("Controldata called")
req := ControldataRequest{}
err := r.DecodeJsonPayload(&req)
if err != nil {
logit.Error.Println(err.Error())
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
var cmd *exec.Cmd
cmd = exec.Command("pg_controldata", "/pgdata")
var out bytes.Buffer
var stderr bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &stderr
err = cmd.Run()
if err != nil {
logit.Error.Println(err.Error())
rest.Error(w, err.Error(), 400)
return
}
var response ControldataResponse
response.Output = out.String()
response.Status = "OK"
w.WriteJson(&response)
}
|
package parser
import (
"fmt"
"golang.org/x/net/html"
"io"
"strings"
)
var links []*Link
func ParseLinks(r io.Reader) []*Link {
l := &links
node, err := html.Parse(r)
if err != nil {
fmt.Println(err)
}
crawl(node, l)
return links
}
func crawl(n *html.Node, l *[]*Link) {
checkForATypes(n, l)
if n.NextSibling != nil {
crawl(n.NextSibling, l)
}
if n.FirstChild != nil {
crawl(n.FirstChild, l)
}
}
func checkForATypes(n *html.Node, l *[]*Link) {
if n.DataAtom == 1 {
for _, index := range n.Attr {
if index.Key == "href" {
var link Link
link.Href = index.Val
link.Text = aCrawler(n.FirstChild)
*l = append(*l, &link)
}
}
}
}
func aCrawler(n *html.Node) string {
var tmpText string
if n.Type == html.TextNode {
tmpText += n.Data + " "
}
if n.NextSibling != nil {
tmpText += aCrawler(n.NextSibling) + " "
}
if n.FirstChild != nil {
tmpText += aCrawler(n.FirstChild) + " "
}
return strings.Join(strings.Fields(tmpText), " ")
}
type address *html.Node
type Link struct {
Href string
Text string
}
|
package mockexample
import "github.com/cyongxue/magicbox/mockexample/spider"
func GetGoVersion(s spider.Spider) string {
body := s.GetBody()
return body
}
|
// Package osbuild provides primitives for representing and (un)marshalling
// OSBuild types.
package osbuild2
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestPipeline_AddStage(t *testing.T) {
expectedPipeline := &Pipeline{
Build: "name:build",
Stages: []*Stage{
{
Type: "org.osbuild.rpm",
},
},
}
actualPipeline := &Pipeline{
Build: "name:build",
}
actualPipeline.AddStage(&Stage{
Type: "org.osbuild.rpm",
})
assert.Equal(t, expectedPipeline, actualPipeline)
assert.Equal(t, 1, len(actualPipeline.Stages))
}
|
// Package reader - save read record
package reader
|
package gauth
import (
"testing"
)
func TestTOTP(t *testing.T) {
secret := "ABCDEFGHIJKLMNOPQRSTUVWXYZ234567"
timeIntervals := int64(47156746)
correctOTP := "109431"
otp, err := computeTOTP(secret, timeIntervals)
if err != nil {
t.Errorf("computeTOTP returns an error for valid input: %s", err.Error())
}
if otp != correctOTP {
t.Errorf("TOTP(%s, %d) = %s, what %s", secret, timeIntervals, otp, correctOTP)
}
}
|
package terminfo
import (
"errors"
"fmt"
"strings"
)
// GetBuiltin returns a named builtin Terminfo.
func GetBuiltin(term string) (*Terminfo, error) {
if term == "" {
if defaultTerm == "" {
return nil, errors.New("no term name given, and no default defined")
}
term = defaultTerm
}
if ti, def := builtins[term]; def {
return ti, nil
}
for _, compat := range compatTable {
if strings.Contains(term, compat.partial) {
return compat.Terminfo, nil
}
}
return nil, fmt.Errorf("unsupported TERM=%q", term)
}
|
package post
import "github.com/alejogs4/blog/src/post/domain/like"
type PostRepository interface {
CreatePost(post Post) error
AddLike(postID string, like like.Like) error
RemoveLike(like like.Like) error
AddComment(comment Comment) error
RemoveComment(comment Comment) error
GetPostCommentByID(id string) (Comment, error)
GetAllPosts() ([]PostsDTO, error)
GetPostLikes(postID string) ([]like.Like, error)
GetPostByID(postID string) (Post, error)
}
|
package antlr
// Represents the type of recognizer an ATN applies to.
const (
ATNTypeLexer = 0
ATNTypeParser = 1
)
|
// Package main defines the nectar command line tool.
package main
import (
"os"
"github.com/troubling/nectar"
)
func main() {
nectar.CLI(os.Args, nil, nil, nil)
}
|
package register
import (
"crypto/md5"
"fmt"
"io"
"log"
"net/http"
"github.com/hieutm211/basicweb/config"
"github.com/hieutm211/basicweb/register/regfunc"
)
func Handler(w http.ResponseWriter, r *http.Request) {
var err error
username := r.FormValue("username")
pw := md5.New()
io.WriteString(pw, r.FormValue("password"))
password := fmt.Sprintf("%x", pw.Sum(nil))
fullname := r.FormValue("fullname")
birthday := r.FormValue("birthday")
email := r.FormValue("email")
if !regfunc.RegisterCheck(w, r) {
return
}
db, err := config.InitDB()
if err != nil {
log.Fatal("Cannot connect to Database", err)
}
defer db.Close()
sqlStmt := `
INSERT INTO users (username, password, name, birthday, email)
VALUES($1, $2, $3, $4, $5);`
_, err = db.Exec(sqlStmt, username, password, fullname, birthday, email)
if err != nil {
fmt.Fprintln(w, err)
return
}
http.Redirect(w, r, "/home", http.StatusMovedPermanently)
}
|
package main
import "fmt"
func main2301() {
s := []int{1,2,3,4,5}
//指针和切片建立联系
p := &s
fmt.Printf("%p\n",p)
fmt.Printf("%p\n",s)
//*[]int
//var p *[]int
fmt.Printf("%T\n",p)
//通过指针间接操作切片元素
//p[1] = 222
(*p)[1] = 222
fmt.Println(s)
//for循环遍历
for i:=0;i<len(s) ; i++ {
fmt.Println((*p)[i])
}
}
//切片指针作为函数参数
func test23(s *[]int) {
*s = append(*s,6,7,8,9)
}
func main() {
s := []int{1,2,3,4,5}
//地址传递
test23(&s)
fmt.Println(s)
} |
package processing
import (
"fmt"
"regexp"
"strconv"
"time"
"github.com/pkg/errors"
)
// Logsync Interfface to use for uploading to different services
type Logsync interface {
Process(bool) error
}
// FileMatch is used to keep track of path, matched strings, and rotated Time
type FileMatch struct {
path string
matches map[string]string
rotatedTime time.Time
}
// NewFileMatch takes the path and the matched map, and parses the timestamp from the matches
func NewFileMatch(path string, matches map[string]string) (*FileMatch, error) {
f := &FileMatch{
path: path,
matches: matches,
}
tm, err := f.ParseTime()
if err != nil {
return f, errors.Wrapf(err, "Failed to parse rotated time for %s", path)
}
f.rotatedTime = tm
return f, nil
}
// Parse the Key from matches
func (f *FileMatch) parseInt(key string) (int, error) {
extras, ok := f.matches[key]
if !ok {
return -1, fmt.Errorf("No field(%s) matched, unexpected filename %s", key, f.path)
}
extraInt, err := strconv.Atoi(extras)
if err != nil {
return -1, errors.Wrapf(err, "Cannot parse the field(%s) as int", key)
}
return extraInt, nil
}
func (f *FileMatch) Path() string {
return f.path
}
// ParseTime relies on the matches map containing
// year, month, day and extras field
// if extras can be parsed as Unix timestamp, that's picked
// otherwise the last field is checked if it's an 0 <= hour <= 23
// the time is constructed if
func (f *FileMatch) ParseTime() (time.Time, error) {
extraInt, err := f.parseInt("extra")
if err != nil {
return time.Time{}, errors.Wrapf(err, "Failed to parse for extra")
}
day, err := f.parseInt("day")
if err != nil {
return time.Time{}, errors.Wrapf(err, "Failed to parse for day")
}
month, err := f.parseInt("month")
if err != nil {
return time.Time{}, errors.Wrapf(err, "Failed to parse for month")
}
year, err := f.parseInt("year")
if err != nil {
return time.Time{}, errors.Wrapf(err, "Failed to parse for year")
}
if extraInt >= 0 && extraInt <= 23 {
hour := extraInt
return time.Date(year, time.Month(month), day, hour, 0, 0, 0, time.UTC), nil
}
// assume it's unix time
tm := time.Unix(int64(extraInt), 0)
if tm.Year() != year || tm.Month() != time.Month(month) || tm.Day() != day {
return time.Time{}, errors.Wrapf(err, "Unix time doesn't match the Y/M/D, probably invalid file pattern")
}
return tm, nil
}
func matchFileNames(name string, matchPattern *regexp.Regexp) map[string]string {
matches := matchPattern.FindAllStringSubmatch(name, -1)
if len(matches) == 0 {
return nil
}
subExps := matchPattern.SubexpNames()
elements := matches[0]
md := map[string]string{}
for i := 1; i < len(elements); i++ {
md[subExps[i]] = elements[i]
}
return md
}
|
package admin_test
import (
"context"
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
kuma_cp "github.com/kumahq/kuma/pkg/config/app/kuma-cp"
core_mesh "github.com/kumahq/kuma/pkg/core/resources/apis/mesh"
"github.com/kumahq/kuma/pkg/core/resources/model"
core_store "github.com/kumahq/kuma/pkg/core/resources/store"
"github.com/kumahq/kuma/pkg/defaults/mesh"
"github.com/kumahq/kuma/pkg/envoy/admin"
"github.com/kumahq/kuma/pkg/test"
"github.com/kumahq/kuma/pkg/test/runtime"
)
func TestEnvoyAdmin(t *testing.T) {
test.RunSpecs(t, "EnvoyAdmin Suite")
}
const (
testMesh = "test-mesh"
anotherMesh = "another-mesh"
)
var eac admin.EnvoyAdminClient
var _ = BeforeSuite(func() {
// setup the runtime
cfg := kuma_cp.DefaultConfig()
builder, err := runtime.BuilderFor(context.Background(), cfg)
Expect(err).ToNot(HaveOccurred())
runtime, err := builder.Build()
Expect(err).ToNot(HaveOccurred())
resManager := runtime.ResourceManager()
Expect(resManager).ToNot(BeNil())
// create mesh defaults
err = resManager.Create(context.Background(), core_mesh.NewMeshResource(), core_store.CreateByKey(testMesh, model.NoMesh))
Expect(err).ToNot(HaveOccurred())
err = mesh.EnsureDefaultMeshResources(runtime.ResourceManager(), testMesh)
Expect(err).ToNot(HaveOccurred())
err = resManager.Create(context.Background(), core_mesh.NewMeshResource(), core_store.CreateByKey(anotherMesh, model.NoMesh))
Expect(err).ToNot(HaveOccurred())
err = mesh.EnsureDefaultMeshResources(runtime.ResourceManager(), anotherMesh)
Expect(err).ToNot(HaveOccurred())
// setup the Envoy Admin Client
eac = admin.NewEnvoyAdminClient(resManager, runtime.Config())
Expect(eac).ToNot(BeNil())
})
|
package main
import (
"fmt"
"os"
"strings"
"time"
)
func main() {
start := time.Now()
fmt.Println(strings.Join(os.Args, " "))
taken := time.Since(start).Seconds()
fmt.Printf("%.8f Seconds\n", taken)
}
|
package app
import (
"fmt"
"net/http"
"github.com/devit-tel/goerror/ginresp"
"github.com/devit-tel/gogo-blueprint/app/inout/staff"
serviceStaff "github.com/devit-tel/gogo-blueprint/service/staff"
"github.com/gin-gonic/gin"
)
func (app *App) GetStaffsByCompany(c *gin.Context) {
input := &staff.GetStaffsByCompanyInput{}
if err := c.ShouldBind(input); err != nil {
fmt.Println(err)
ginresp.RespValidateError(c, err)
return
}
staffs, err := app.staffService.GetStaffsByCompany(c.Request.Context(),
&serviceStaff.GetStaffsByCompanyInput{
CompanyId: input.CompanyId,
Offset: input.Offset,
Limit: input.Limit,
})
if err != nil {
ginresp.RespWithError(c, err)
return
}
c.JSON(http.StatusOK, &staff.GetStaffsByCompanyOutput{
Staffs: staff.ToStaffsOutput(staffs),
})
}
|
package main
import (
"fmt"
)
func main() {
// ชุดข้อความ
firstname := "Artitaya "
lastname := "Yaemjaraen"
sum := firstname + lastname
fmt.Println(sum)
// เข้าถึงข้อมูล
fmt.Println(sum[1:3]) // ผลลัพธ์ rt = [1:3]
}
|
package planets
import "context"
type RemovePlanetHandler struct {
repository Repository
}
func NewRemovePlanetHandler(repository Repository) RemovePlanetHandler {
return RemovePlanetHandler{
repository: repository,
}
}
func (h RemovePlanetHandler) Execute(ctx context.Context, id interface{}) error {
err := h.repository.Remove(ctx, id)
if err != nil {
return ErrPlanetNotRemoved
}
return nil
}
|
package main
import "fmt"
// Channels are the pipes that connect concurrent
func main() {
messages := make(chan string) // new channel - channels are typed
go func() { messages <- "ping" }() // send value into channel using <-
// <- channel syntax receives value from the channel
msg := <-messages
fmt.Println(msg)
// sends and receives block until both the sender and receiver are ready.
// Property allows us to wait at end for the "ping" without having to use synchronization
}
|
package file_common
import "errors"
type fileFormat int
const (
IMAGE = fileFormat(1)
VIDEO = fileFormat(2)
AUDIO = fileFormat(3)
GIF = fileFormat(4)
)
var ErrBadReq = errors.New("bad request")
var ErrWrongFileName = errors.New("wrong file name")
var ErrFileNameTooShort = errors.New("file name too short")
var ErrResizeNotAllowed = errors.New("image resize not allowed")
|
package memcache
import (
"bufio"
"bytes"
"errors"
"io"
"io/ioutil"
"log"
"net"
"sync"
"time"
)
var (
ErrCacheMiss = errors.New("memcache.Client: cache miss")
ErrCasidMismatch = errors.New("memcache.Client: casid mismatch")
ErrClientNotRunning = errors.New("memcache.Client: the client isn't running")
ErrCommunicationFailure = errors.New("memcache.Client: communication failure")
ErrMalformedKey = errors.New("memcache.Client: malformed key")
ErrNilValue = errors.New("memcache.Client: nil value")
ErrNotModified = errors.New("memcache.Client: item not modified")
ErrAlreadyExists = errors.New("memcache.Client: the item already exists")
)
const (
defaultConnectionsCount = 4
defaultMaxPendingRequestsCount = 1024
)
// Memcache client configuration. Can be passed to Client and DistributedClient.
type ClientConfig struct {
// The number of simultaneous TCP connections to establish
// to memcached server.
// Optional parameter.
//
// The Client is able to squeeze out impossible from a single
// connection by pipelining a ton of requests on it.
// Multiple simultaneous connections may be required in the following
// cases:
// * If memcached server delays incoming requests' execution.
// Since memcached protocol doesn't allow out-of-order requests'
// execution, a single slow request may delay execution of all
// the requests pipelined on the connection after it.
// Multiple concurrent connections may help in such a situation.
// * If memcached server runs on multi-CPU system, but uses a single
// CPU (thread) per connection.
ConnectionsCount int
// The maximum number of pending requests awaiting to be processed
// by memcached server.
// Optional parameter.
MaxPendingRequestsCount int
// The size in bytes of buffer used by the Client for reading responses
// received from memcached per connection.
// Optional parameter.
ReadBufferSize int
// The size in bytes of buffer used by the Client for writing requests
// to be sent to memcached per connection.
// Optional parameter.
WriteBufferSize int
// The size in bytes of OS-supplied read buffer per TCP connection.
// Optional parameter.
OSReadBufferSize int
// The size in bytes of OS-supplied write buffer per TCP connection.
// Optional parameter.
OSWriteBufferSize int
}
// Fast memcache client.
//
// The client is goroutine-safe. It is designed to work fast when hundreds
// concurrent goroutines are calling simultaneously its' methods.
//
// The client works with a single memcached server. Use DistributedClient
// if you want working with multiple servers.
//
// Usage:
//
// c := Client{
// ServerAddr: ":11211",
// }
// c.Start()
// defer c.Stop()
//
// item := Item{
// Key: []byte("key"),
// Value: []byte("value"),
// }
// if err := c.Set(&item); err != nil {
// handleError(err)
// }
// if err := c.Get(&item); err != nil {
// handleError(err)
// }
//
type Client struct {
ClientConfig
// TCP address of memcached server to connect to.
// Required parameter.
//
// The address should be in the form addr:port.
ServerAddr string
requests chan tasker
done *sync.WaitGroup
}
// Memcache item.
type Item struct {
// Item's key.
// Required parameter.
Key []byte
// Item's value.
//
// The Value is required in set()-type requests and isn't required in
// get()-type requests.
Value []byte
// Expiration time for the item.
// Zero means the item has no expiration time.
//
// The Expiration is used only in set()-type requests.
Expiration time.Duration
// An opaque value, which is passed to/from memcache.
// Optional parameter.
Flags uint32
// This field is filled by get()-type requests and should be passed
// to Cas() and Cget*() requests.
Casid uint64
}
type tasker interface {
Init()
WriteRequest(w *bufio.Writer, scratchBuf *[]byte) bool
ReadResponse(r *bufio.Reader, scratchBuf *[]byte) bool
Done(ok bool)
Wait() bool
}
func requestsSender(w *bufio.Writer, requests <-chan tasker, responses chan<- tasker, c net.Conn, done *sync.WaitGroup) {
defer done.Done()
defer w.Flush()
defer close(responses)
scratchBuf := make([]byte, 0, 1024)
for {
var t tasker
var ok bool
// Flush w only if there are no pending requests.
select {
case t, ok = <-requests:
default:
w.Flush()
t, ok = <-requests
}
if !ok {
break
}
if !t.WriteRequest(w, &scratchBuf) {
t.Done(false)
break
}
responses <- t
}
}
func responsesReceiver(r *bufio.Reader, responses <-chan tasker, c net.Conn, done *sync.WaitGroup) {
defer done.Done()
line := make([]byte, 0, 1024)
for t := range responses {
if !t.ReadResponse(r, &line) {
t.Done(false)
c.Close()
break
}
t.Done(true)
}
for t := range responses {
t.Done(false)
}
}
func handleAddr(c *Client) {
tcpAddr, err := net.ResolveTCPAddr("tcp", c.ServerAddr)
if err != nil {
log.Printf("Cannot resolve ServerAddr=[%s]: [%s]", c.ServerAddr, err)
return
}
conn, err := net.DialTCP("tcp", nil, tcpAddr)
if err != nil {
log.Printf("Cannot establish tcp connection to addr=[%s]: [%s]", tcpAddr, err)
return
}
defer conn.Close()
if err = conn.SetReadBuffer(c.OSReadBufferSize); err != nil {
log.Fatalf("Cannot set TCP read buffer size to %d: [%s]", c.OSReadBufferSize, err)
}
if err = conn.SetWriteBuffer(c.OSWriteBufferSize); err != nil {
log.Fatalf("Cannot set TCP write buffer size to %d: [%s]", c.OSWriteBufferSize, err)
}
r := bufio.NewReaderSize(conn, c.ReadBufferSize)
w := bufio.NewWriterSize(conn, c.WriteBufferSize)
responses := make(chan tasker, c.MaxPendingRequestsCount)
var sendRecvDone sync.WaitGroup
defer sendRecvDone.Wait()
sendRecvDone.Add(2)
go requestsSender(w, c.requests, responses, conn, &sendRecvDone)
go responsesReceiver(r, responses, conn, &sendRecvDone)
}
func addrHandler(c *Client, done *sync.WaitGroup) {
defer done.Done()
for {
handleAddr(c)
// cancel all pending requests
for t := range c.requests {
t.Done(false)
}
// wait for new incoming requests
t, ok := <-c.requests
if !ok {
// The requests channel is closed.
return
}
c.requests <- t
}
}
func (c *Client) init() {
if c.ConnectionsCount == 0 {
c.ConnectionsCount = defaultConnectionsCount
}
if c.MaxPendingRequestsCount == 0 {
c.MaxPendingRequestsCount = defaultMaxPendingRequestsCount
}
if c.ReadBufferSize == 0 {
c.ReadBufferSize = defaultReadBufferSize
}
if c.WriteBufferSize == 0 {
c.WriteBufferSize = defaultWriteBufferSize
}
if c.OSReadBufferSize == 0 {
c.OSReadBufferSize = defaultOSReadBufferSize
}
if c.OSWriteBufferSize == 0 {
c.OSWriteBufferSize = defaultOSWriteBufferSize
}
c.requests = make(chan tasker, c.MaxPendingRequestsCount)
c.done = &sync.WaitGroup{}
c.done.Add(1)
}
func (c *Client) run() {
defer c.done.Done()
var connsDone sync.WaitGroup
defer connsDone.Wait()
for i := 0; i < c.ConnectionsCount; i++ {
connsDone.Add(1)
go addrHandler(c, &connsDone)
}
}
func (c *Client) pushTask(t tasker) error {
// There is a race condition here, when c.requests is closed,
// but c.done isn't nil yet in Client.Stop().
// In this an attempt to push task to c.requests will panic.
//
// This condition may appear only if clients are dynamically
// added/removed to/from clients pool such as DistributedClient.
//
// Do not use recover() in deferred function as a workaround for this
// race condition due to performance reasons.
if c.done == nil {
return ErrClientNotRunning
}
c.requests <- t
return nil
}
func (c *Client) do(t tasker) (err error) {
if c.requests == nil {
return ErrClientNotRunning
}
t.Init()
if err = c.pushTask(t); err != nil {
return
}
if !t.Wait() {
err = ErrCommunicationFailure
}
return
}
// Starts the given client.
//
// No longer needed clients must be stopped via Client.Stop() call.
func (c *Client) Start() {
if c.done != nil {
panic("Did you call Client.Stop() before calling Client.Start()?")
}
c.init()
go c.run()
}
// Stops the given client, which has been started via Client.Start() call.
func (c *Client) Stop() {
if c.done == nil {
panic("Did you call Client.Start() before calling Client.Stop()?")
}
close(c.requests)
c.done.Wait()
c.done = nil
}
var doneChansPool = make(chan (chan bool), 1024)
func acquireDoneChan() (done chan bool) {
select {
case done = <-doneChansPool:
default:
done = make(chan bool, 1)
}
return
}
func releaseDoneChan(done chan bool) {
select {
case doneChansPool <- done:
default:
}
}
type taskSync struct {
done chan bool
}
func (t *taskSync) Init() {
t.done = acquireDoneChan()
}
func (t *taskSync) Done(ok bool) {
t.done <- ok
}
func (t *taskSync) Wait() (ok bool) {
ok = <-t.done
releaseDoneChan(t.done)
return
}
type taskGetMulti struct {
items []Item
taskSync
}
func readValueHeader(line []byte) (key []byte, flags uint32, casid uint64, size int, ok bool) {
ok = false
if !bytes.HasPrefix(line, strValue) {
log.Printf("Unexpected line read=[%s]. It should start with [%s]", line, strValue)
return
}
line = line[len(strValue):]
n := -1
if key = nextToken(line, &n, "key"); key == nil {
return
}
if flags, ok = parseFlagsToken(line, &n); !ok {
return
}
if size, ok = parseSizeToken(line, &n); !ok {
return
}
if n == len(line) {
return
}
if casid, ok = parseUint64Token(line, &n, "casid"); !ok {
return
}
ok = expectEof(line, n)
return
}
func readValue(r *bufio.Reader, size int) (value []byte, ok bool) {
var err error
value, err = ioutil.ReadAll(io.LimitReader(r, int64(size)))
if err != nil {
log.Printf("Error when reading value with size=%d: [%s]", size, err)
ok = false
return
}
ok = matchCrLf(r)
return
}
func readKeyValue(r *bufio.Reader, line []byte) (key []byte, flags uint32, casid uint64, value []byte, ok bool) {
var size int
if key, flags, casid, size, ok = readValueHeader(line); !ok {
return
}
value, ok = readValue(r, size)
return
}
func readItem(r *bufio.Reader, scratchBuf *[]byte, item *Item) (ok bool, eof bool, wouldBlock bool, notModified bool) {
if ok = readLine(r, scratchBuf); !ok {
return
}
line := *scratchBuf
if bytes.Equal(line, strEnd) {
ok = true
eof = true
return
}
if bytes.Equal(line, strWouldBlock) {
ok = true
eof = true
wouldBlock = true
return
}
if bytes.Equal(line, strNotModified) {
ok = true
eof = true
notModified = true
return
}
item.Key, item.Flags, item.Casid, item.Value, ok = readKeyValue(r, line)
return
}
func (t *taskGetMulti) WriteRequest(w *bufio.Writer, scratchBuf *[]byte) bool {
if !writeStr(w, strGets) {
return false
}
itemsCount := len(t.items)
if itemsCount > 0 {
if !writeStr(w, t.items[0].Key) {
return false
}
}
for i := 1; i < itemsCount; i++ {
if writeWs(w) && !writeStr(w, t.items[i].Key) {
return false
}
}
return writeCrLf(w)
}
func updateItemByKey(items []Item, item *Item) bool {
itemsCount := len(items)
updatedItemsCount := 0
// This loop may be quite slow for big itemsCount.
// TODO(valyala): think how to improve it without incurring additional
// overhead for small itemsCount.
for i := 0; i < itemsCount; i++ {
it := &items[i]
if bytes.Equal(it.Key, item.Key) {
it.Value = item.Value
it.Flags = item.Flags
updatedItemsCount++
}
}
return updatedItemsCount > 0
}
func (t *taskGetMulti) ReadResponse(r *bufio.Reader, scratchBuf *[]byte) bool {
var item Item
for {
ok, eof, _, _ := readItem(r, scratchBuf, &item)
if !ok {
return false
}
if eof {
break
}
if !updateItemByKey(t.items, &item) {
return false
}
}
return true
}
// Obtains multiple items associated with the the corresponding keys.
//
// Sets Item.Value, Item.Flags and Item.Casid for each returned item.
// Doesn't modify Item.Value and Item.Flags for items missing on the server.
func (c *Client) GetMulti(items []Item) error {
itemsCount := len(items)
if itemsCount == 0 {
return nil
}
for i := 0; i < itemsCount; i++ {
if !validateKey(items[i].Key) {
return ErrMalformedKey
}
}
var t taskGetMulti
t.items = items
return c.do(&t)
}
type taskGet struct {
item *Item
found bool
taskSync
}
func (t *taskGet) WriteRequest(w *bufio.Writer, scratchBuf *[]byte) bool {
return writeStr(w, strGets) && writeStr(w, t.item.Key) && writeCrLf(w)
}
func readSingleItem(r *bufio.Reader, scratchBuf *[]byte, item *Item) (ok bool, eof bool, wouldBlock, notModified bool) {
keyOriginal := item.Key
ok, eof, wouldBlock, notModified = readItem(r, scratchBuf, item)
if !ok || eof || wouldBlock || notModified {
return
}
if ok = matchStr(r, strEndCrLf); !ok {
return
}
if ok = bytes.Equal(keyOriginal, item.Key); !ok {
log.Printf("Key mismatch! Expected [%s], but server returned [%s]", keyOriginal, item.Key)
return
}
item.Key = keyOriginal
return
}
func (t *taskGet) ReadResponse(r *bufio.Reader, scratchBuf *[]byte) bool {
ok, eof, _, _ := readSingleItem(r, scratchBuf, t.item)
if !ok {
return false
}
t.found = !eof
return true
}
// Obtains item.Value, item.Flags and item.Casid for the given item.Key.
//
// Returns ErrCacheMiss on cache miss.
func (c *Client) Get(item *Item) error {
if !validateKey(item.Key) {
return ErrMalformedKey
}
var t taskGet
t.item = item
if err := c.do(&t); err != nil {
return err
}
if !t.found {
return ErrCacheMiss
}
return nil
}
type taskCget struct {
item *Item
found bool
notModified bool
taskSync
}
func (t *taskCget) WriteRequest(w *bufio.Writer, scratchBuf *[]byte) bool {
return writeStr(w, strCget) && writeStr(w, t.item.Key) && writeWs(w) &&
writeUint64(w, t.item.Casid, scratchBuf) && writeCrLf(w)
}
func (t *taskCget) ReadResponse(r *bufio.Reader, scratchBuf *[]byte) bool {
var ok, eof bool
if ok, eof, _, t.notModified = readSingleItem(r, scratchBuf, t.item); !ok {
return false
}
t.found = !eof
return true
}
// Performs conditional get request for the given item.Key and item.Casid.
//
// This is an extension to memcache protocol, so it isn't supported
// by the original memcache server.
//
// Fills item.Value, item.Flags and item.Casid only on cache hit and only
// if the given casid doesn't match the casid on the server, i.e. if the server
// contains new value for the given key.
//
// Returns ErrCacheMiss on cache miss.
// Returns ErrNotModified if the corresponding item on the server has
// the same casid (i.e. the item wasn't modified).
//
// Client.Cget() is intended for reducing network bandwidth consumption
// in multi-level caches. It is modelled after HTTP cache validation approach
// with entity tags - see
// http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.11 .
func (c *Client) Cget(item *Item) error {
if !validateKey(item.Key) {
return ErrMalformedKey
}
var t taskCget
t.item = item
if err := c.do(&t); err != nil {
return err
}
if t.notModified {
return ErrNotModified
}
if !t.found {
return ErrCacheMiss
}
return nil
}
type taskCgetDe struct {
item *Item
graceDuration time.Duration
found bool
wouldBlock bool
notModified bool
taskSync
}
func (t *taskCgetDe) WriteRequest(w *bufio.Writer, scratchBuf *[]byte) bool {
return writeStr(w, strCgetDe) && writeStr(w, t.item.Key) && writeWs(w) &&
writeUint64(w, t.item.Casid, scratchBuf) && writeWs(w) &&
writeMilliseconds(w, t.graceDuration, scratchBuf) && writeCrLf(w)
}
func (t *taskCgetDe) ReadResponse(r *bufio.Reader, scratchBuf *[]byte) bool {
var ok, eof bool
if ok, eof, t.wouldBlock, t.notModified = readSingleItem(r, scratchBuf, t.item); !ok {
return false
}
t.found = !eof
return true
}
// Combines functionality of Client.Cget() and Client.GetDe().
func (c *Client) CgetDe(item *Item, graceDuration time.Duration) error {
if !validateKey(item.Key) {
return ErrMalformedKey
}
var t taskCgetDe
for {
t.item = item
t.graceDuration = graceDuration
if err := c.do(&t); err != nil {
return err
}
if t.wouldBlock {
time.Sleep(time.Millisecond * time.Duration(100))
continue
}
if t.notModified {
return ErrNotModified
}
if !t.found {
return ErrCacheMiss
}
return nil
}
}
type taskGetDe struct {
item *Item
graceDuration time.Duration
found bool
wouldBlock bool
taskSync
}
func (t *taskGetDe) WriteRequest(w *bufio.Writer, scratchBuf *[]byte) bool {
return writeStr(w, strGetDe) && writeStr(w, t.item.Key) && writeWs(w) &&
writeMilliseconds(w, t.graceDuration, scratchBuf) && writeCrLf(w)
}
func (t *taskGetDe) ReadResponse(r *bufio.Reader, scratchBuf *[]byte) bool {
ok, eof, wouldBlock, _ := readSingleItem(r, scratchBuf, t.item)
if !ok {
return false
}
if wouldBlock {
t.found = true
t.wouldBlock = true
return true
}
t.found = !eof
t.wouldBlock = false
return true
}
// Performs dogpile effect-aware get for the given item.Key.
//
// This is an extension to memcache protocol, so it isn't supported
// by the original memcache server.
//
// Returns ErrCacheMiss on cache miss. It is expected that the caller
// will create and store in the cache an item on cache miss during the given
// graceDuration interval.
func (c *Client) GetDe(item *Item, graceDuration time.Duration) error {
if !validateKey(item.Key) {
return ErrMalformedKey
}
var t taskGetDe
for {
t.item = item
t.graceDuration = graceDuration
if err := c.do(&t); err != nil {
return err
}
if t.wouldBlock {
time.Sleep(time.Millisecond * time.Duration(100))
continue
}
if !t.found {
return ErrCacheMiss
}
return nil
}
}
type taskSet struct {
item *Item
taskSync
}
func writeNoreplyAndValue(w *bufio.Writer, noreply bool, value []byte) bool {
if noreply {
if !writeStr(w, strWsNoreplyCrLf) {
return false
}
} else {
if !writeCrLf(w) {
return false
}
}
return writeStr(w, value) && writeCrLf(w)
}
func writeCommonSetParams(w *bufio.Writer, cmd []byte, item *Item, scratchBuf *[]byte) bool {
size := len(item.Value)
return writeStr(w, cmd) && writeStr(w, item.Key) && writeWs(w) &&
writeUint32(w, item.Flags, scratchBuf) && writeWs(w) &&
writeExpiration(w, item.Expiration, scratchBuf) && writeWs(w) &&
writeInt(w, size, scratchBuf)
}
func writeSetRequest(w *bufio.Writer, item *Item, noreply bool, scratchBuf *[]byte) bool {
return writeCommonSetParams(w, strSet, item, scratchBuf) &&
writeNoreplyAndValue(w, noreply, item.Value)
}
func readSetResponse(r *bufio.Reader) bool {
return matchStr(r, strStoredCrLf)
}
func (t *taskSet) WriteRequest(w *bufio.Writer, scratchBuf *[]byte) bool {
return writeSetRequest(w, t.item, false, scratchBuf)
}
func (t *taskSet) ReadResponse(r *bufio.Reader, scratchBuf *[]byte) bool {
return readSetResponse(r)
}
// Stores the given item in the memcache server.
func (c *Client) Set(item *Item) error {
if !validateKey(item.Key) {
return ErrMalformedKey
}
if item.Value == nil {
return ErrNilValue
}
var t taskSet
t.item = item
return c.do(&t)
}
type taskAdd struct {
item *Item
notStored bool
taskSync
}
func (t *taskAdd) WriteRequest(w *bufio.Writer, scratchBuf *[]byte) bool {
return writeCommonSetParams(w, strAdd, t.item, scratchBuf) &&
writeNoreplyAndValue(w, false, t.item.Value)
}
func (t *taskAdd) ReadResponse(r *bufio.Reader, scratchBuf *[]byte) bool {
if !readLine(r, scratchBuf) {
return false
}
line := *scratchBuf
if bytes.Equal(line, strStored) {
return true
}
if bytes.Equal(line, strNotStored) {
t.notStored = true
return true
}
log.Printf("Unexpected response for add() command: [%s]", line)
return false
}
// Stores the given item only if the server doesn't already hold data
// under the item.Key.
//
// Returns ErrAlreadyExists error if the server already holds data under
// the item.Key.
func (c *Client) Add(item *Item) error {
if !validateKey(item.Key) {
return ErrMalformedKey
}
if item.Value == nil {
return ErrNilValue
}
var t taskAdd
t.item = item
if err := c.do(&t); err != nil {
return err
}
if t.notStored {
return ErrAlreadyExists
}
return nil
}
type taskCas struct {
item *Item
notFound bool
casidMismatch bool
taskSync
}
func (t *taskCas) WriteRequest(w *bufio.Writer, scratchBuf *[]byte) bool {
return writeCommonSetParams(w, strCas, t.item, scratchBuf) && writeWs(w) &&
writeUint64(w, t.item.Casid, scratchBuf) && writeNoreplyAndValue(w, false, t.item.Value)
}
func (t *taskCas) ReadResponse(r *bufio.Reader, scratchBuf *[]byte) bool {
if !readLine(r, scratchBuf) {
return false
}
line := *scratchBuf
if bytes.Equal(line, strStored) {
return true
}
if bytes.Equal(line, strNotFound) {
t.notFound = true
return true
}
if bytes.Equal(line, strExists) {
t.casidMismatch = true
return true
}
log.Printf("Unexpected response for cas() command: [%s]", line)
return false
}
// Stores the given item only if item.Casid matches casid for the given item
// on the server.
//
// Returns ErrCacheMiss if the server has no item with such a key.
// Returns ErrCasidMismatch if item on the server has other casid value.
func (c *Client) Cas(item *Item) error {
if !validateKey(item.Key) {
return ErrMalformedKey
}
if item.Value == nil {
return ErrNilValue
}
var t taskCas
t.item = item
if err := c.do(&t); err != nil {
return err
}
if t.notFound {
return ErrCacheMiss
}
if t.casidMismatch {
return ErrCasidMismatch
}
return nil
}
type taskNowait struct{}
func (t *taskNowait) Init() {}
func (t *taskNowait) Done(ok bool) {}
func (t *taskNowait) Wait() bool {
return true
}
func (t *taskNowait) ReadResponse(r *bufio.Reader, scratchBuf *[]byte) bool {
return true
}
type taskSetNowait struct {
item Item
taskNowait
}
func (t *taskSetNowait) WriteRequest(w *bufio.Writer, scratchBuf *[]byte) bool {
return writeSetRequest(w, &t.item, true, scratchBuf)
}
// The same as Client.Set(), but doesn't wait for operation completion.
//
// Do not modify slices pointed by item.Key and item.Value after passing
// to this function - it actually becomes an owner of these slices.
func (c *Client) SetNowait(item *Item) {
if !validateKey(item.Key) || item.Value == nil {
return
}
var t taskSetNowait
t.item = *item
c.do(&t)
}
type taskDelete struct {
key []byte
itemDeleted bool
taskSync
}
func writeDeleteRequest(w *bufio.Writer, key []byte, noreply bool) bool {
if !writeStr(w, strDelete) || !writeStr(w, key) {
return false
}
if noreply {
return writeStr(w, strWsNoreplyCrLf)
}
return writeCrLf(w)
}
func (t *taskDelete) WriteRequest(w *bufio.Writer, scratchBuf *[]byte) bool {
return writeDeleteRequest(w, t.key, false)
}
func (t *taskDelete) ReadResponse(r *bufio.Reader, scratchBuf *[]byte) bool {
if !readLine(r, scratchBuf) {
return false
}
line := *scratchBuf
if bytes.Equal(line, strDeleted) {
t.itemDeleted = true
return true
}
if bytes.Equal(line, strNotFound) {
t.itemDeleted = false
return true
}
log.Printf("Unexpected response for 'delete' request: [%s]", line)
return false
}
// Deletes an item with the given key from memcache server.
//
// Returns ErrCacheMiss if there were no item with such key
// on the server.
func (c *Client) Delete(key []byte) error {
if !validateKey(key) {
return ErrMalformedKey
}
var t taskDelete
t.key = key
if err := c.do(&t); err != nil {
return err
}
if !t.itemDeleted {
return ErrCacheMiss
}
return nil
}
type taskDeleteNowait struct {
key []byte
taskNowait
}
func (t *taskDeleteNowait) WriteRequest(w *bufio.Writer, scratchBuf *[]byte) bool {
return writeDeleteRequest(w, t.key, true)
}
// The same as Client.Delete(), but doesn't wait for operation completion.
//
// Do not modify slice pointed by key after passing to this function -
// it actually becomes an owner of this slice.
func (c *Client) DeleteNowait(key []byte) {
if !validateKey(key) {
return
}
var t taskDeleteNowait
t.key = key
c.do(&t)
}
type taskFlushAllDelayed struct {
expiration time.Duration
taskSync
}
func (t *taskFlushAllDelayed) WriteRequest(w *bufio.Writer, scratchBuf *[]byte) bool {
return writeStr(w, strFlushAllWs) && writeExpiration(w, t.expiration, scratchBuf) && writeCrLf(w)
}
func (t *taskFlushAllDelayed) ReadResponse(r *bufio.Reader, scratchBuf *[]byte) bool {
return matchStr(r, strOkCrLf)
}
// Flushes all the items on the server after the given expiration delay.
func (c *Client) FlushAllDelayed(expiration time.Duration) error {
var t taskFlushAllDelayed
t.expiration = expiration
return c.do(&t)
}
type taskFlushAll struct {
taskSync
}
func (t *taskFlushAll) WriteRequest(w *bufio.Writer, scratchBuf *[]byte) bool {
return writeStr(w, strFlushAllCrLf)
}
func (t *taskFlushAll) ReadResponse(r *bufio.Reader, scratchBuf *[]byte) bool {
return matchStr(r, strOkCrLf)
}
// Flushes all the items on the server.
func (c *Client) FlushAll() error {
var t taskFlushAll
return c.do(&t)
}
type taskFlushAllDelayedNowait struct {
expiration time.Duration
taskNowait
}
func (t *taskFlushAllDelayedNowait) WriteRequest(w *bufio.Writer, scratchBuf *[]byte) bool {
return writeStr(w, strFlushAllWs) && writeExpiration(w, t.expiration, scratchBuf) &&
writeStr(w, strWsNoreplyCrLf)
}
// The same as Client.FlushAllDelayed(), but doesn't wait for operation
// completion.
func (c *Client) FlushAllDelayedNowait(expiration time.Duration) {
var t taskFlushAllDelayedNowait
t.expiration = expiration
c.do(&t)
}
type taskFlushAllNowait struct {
taskNowait
}
func (t *taskFlushAllNowait) WriteRequest(w *bufio.Writer, scratchBuf *[]byte) bool {
return writeStr(w, strFlushAllNoreplyCrLf)
}
// The same as Client.FlushAll(), but doesn't wait for operation completion.
func (c *Client) FlushAllNowait() {
var t taskFlushAllNowait
c.do(&t)
}
|
package main
import (
"log"
"time"
_ "github.com/lib/pq"
"github.com/yulyulyharuka/todo2/model"
"github.com/yulyulyharuka/todo2/storage"
)
// type Customer struct {
// ID int
// Name string
// Address string
// Phone string
// Birthdate string
// }
func main() {
// initiate
var memStore = storage.GetStorage(storage.StorageTypeDB)
// create new todo
obj := []model.Todo{model.Todo{
ID: 1,
Title: "first",
Description: "First Todo",
CreatedAt: time.Now(),
}, model.Todo{
ID: 2,
Title: "second",
Description: "Second Todo",
CreatedAt: time.Now(),
}, model.Todo{
ID: 3,
Title: "third",
Description: "Third Todo",
CreatedAt: time.Now(),
}}
for _, o := range obj {
if err := memStore.Create(o); err != nil {
log.Fatal(err)
}
}
// get detail
todo, err := memStore.Detail(2)
if err != nil {
log.Fatal(err)
}
log.Printf("%d : %s", todo.ID, todo.Description)
// get list
list, err := memStore.List()
if err != nil {
log.Fatal(err)
}
for _, elmt := range list {
log.Printf("%d : %s", elmt.ID, elmt.Description)
}
}
// Database section
// func connectDB() *sql.DB {
// db, err := sql.Open("postgres", "host=127.0.0.1 port=5432 user=postgres password=password dbname=db sslmode=disable")
// if err != nil {
// log.Fatal(err)
// }
// err = db.Ping()
// if err != nil {
// log.Fatal(err)
// }
// return db
// }
// func InsertDB(db *sql.DB, customer Customer) error {
// query := `INSERT INTO customers(id, name, address, phone)
// VALUES ($1, $2, $3, $4);`
// _, err := db.Exec(query, customer.ID, customer.Name, customer.Address, customer.Phone)
// log.Println("data added")
// return err
// }
// func List(db *sql.DB) ([]Customer, error) {
// query := "SELECT id,name,address, phone, birthdate FROM customers LIMIT 10;"
// rows, err := db.Query(query)
// if err != nil {
// log.Fatal(err)
// }
// defer rows.Close()
// var customers []Customer
// for rows.Next() {
// var customer Customer
// err := rows.Scan(&customer.ID, &customer.Name, &customer.Address, &customer.Phone, &customer.Birthdate)
// if err != nil {
// log.Fatal(err)
// }
// customers = append(customers, customer)
// }
// return customers, nil
// }
// func Get(db *sql.DB, id int) (Customer, error) {
// var customer Customer
// query := fmt.Sprintf("SELECT * FROM customers WHERE ID=%d;", id)
// err := db.QueryRow(query).Scan(&customer.ID, &customer.Name, &customer.Address, &customer.Phone, &customer.Birthdate)
// if err != nil {
// log.Fatal(err)
// }
// return customer, nil
// }
// func Update(db *sql.DB, customer Customer) error {
// query := "UPDATE customers SET name=$2, address=$3, phone=$4, birthdate=$5 WHERE id=$1;"
// _, err := db.Exec(query, customer.ID, customer.Name, customer.Address, customer.Phone, customer.Birthdate)
// if err != nil {
// log.Fatal(err)
// }
// log.Println("data updated")
// return nil
// }
// func Delete(db *sql.DB, id int) error {
// query := "DELETE FROM customers WHERE id=$1;"
// _, err := db.Exec(query, id)
// if err != nil {
// log.Fatal(err)
// }
// log.Println("data deleted")
// return nil
// }
|
package problem0188
import "testing"
func TestMaxProfit(t *testing.T) {
t.Log(maxProfit(2, []int{3, 2, 6, 5, 0, 3}))
}
|
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package appfile
import (
"context"
"os"
"path/filepath"
"testing"
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
crdv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
"sigs.k8s.io/yaml"
coreoam "github.com/oam-dev/kubevela/apis/core.oam.dev"
corev1beta1 "github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/pkg/oam/util"
"github.com/oam-dev/kubevela/pkg/utils/system"
// +kubebuilder:scaffold:imports
)
var cfg *rest.Config
var scheme *runtime.Scheme
var k8sClient client.Client
var testEnv *envtest.Environment
var definitionDir string
var wd corev1beta1.WorkloadDefinition
var addonNamespace = "test-addon"
func TestAppFile(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Cli Suite")
}
var _ = BeforeSuite(func() {
logf.SetLogger(zap.New(zap.UseDevMode(true), zap.WriteTo(GinkgoWriter)))
ctx := context.Background()
By("bootstrapping test environment")
useExistCluster := false
testEnv = &envtest.Environment{
ControlPlaneStartTimeout: time.Minute,
ControlPlaneStopTimeout: time.Minute,
CRDDirectoryPaths: []string{filepath.Join("..", "..", "charts", "vela-core", "crds")},
UseExistingCluster: &useExistCluster,
}
var err error
cfg, err = testEnv.Start()
Expect(err).ToNot(HaveOccurred())
Expect(cfg).ToNot(BeNil())
scheme = runtime.NewScheme()
Expect(coreoam.AddToScheme(scheme)).NotTo(HaveOccurred())
Expect(clientgoscheme.AddToScheme(scheme)).NotTo(HaveOccurred())
Expect(crdv1.AddToScheme(scheme)).NotTo(HaveOccurred())
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme})
Expect(err).ToNot(HaveOccurred())
Expect(k8sClient).ToNot(BeNil())
definitionDir, err = system.GetCapabilityDir()
Expect(err).Should(BeNil())
Expect(os.MkdirAll(definitionDir, 0755)).Should(BeNil())
Expect(k8sClient.Create(ctx, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: addonNamespace}})).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
workloadData, err := os.ReadFile("testdata/workloadDef.yaml")
Expect(err).Should(BeNil())
Expect(yaml.Unmarshal(workloadData, &wd)).Should(BeNil())
wd.Namespace = addonNamespace
logf.Log.Info("Creating workload definition", "data", wd)
Expect(k8sClient.Create(ctx, &wd)).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
def, err := os.ReadFile("testdata/terraform-aliyun-oss-workloadDefinition.yaml")
Expect(err).Should(BeNil())
var terraformDefinition corev1beta1.WorkloadDefinition
Expect(yaml.Unmarshal(def, &terraformDefinition)).Should(BeNil())
terraformDefinition.Namespace = addonNamespace
logf.Log.Info("Creating workload definition", "data", terraformDefinition)
Expect(k8sClient.Create(ctx, &terraformDefinition)).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
})
var _ = AfterSuite(func() {
By("tearing down the test environment")
_ = k8sClient.Delete(context.Background(), &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: addonNamespace}})
_ = k8sClient.Delete(context.Background(), &wd)
err := testEnv.Stop()
Expect(err).ToNot(HaveOccurred())
})
|
// Copyright 2019 Authors of Cilium
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !privileged_tests
package mock
import (
"testing"
"time"
"gopkg.in/check.v1"
)
func Test(t *testing.T) {
check.TestingT(t)
}
type MockSuite struct{}
var _ = check.Suite(&MockSuite{})
func (e *MockSuite) TestMock(c *check.C) {
api := NewMockMetrics()
api.IncENIAllocationAttempt("foo", "s-1")
c.Assert(api.ENIAllocationAttempts("foo", "s-1"), check.Equals, int64(1))
api.AddIPAllocation("s-1", 10)
api.AddIPAllocation("s-1", 20)
c.Assert(api.IPAllocations("s-1"), check.Equals, int64(30))
api.SetAllocatedIPs("used", 200)
c.Assert(api.AllocatedIPs("used"), check.Equals, 200)
api.SetAvailableENIs(10)
c.Assert(api.AvailableENIs(), check.Equals, 10)
api.SetNodes("at-capacity", 5)
c.Assert(api.Nodes("at-capacity"), check.Equals, 5)
api.ObserveEC2APICall("DescribeNetworkInterfaces", "success", 2.0)
c.Assert(api.EC2APICall("DescribeNetworkInterfaces", "success"), check.Equals, 2.0)
api.ObserveEC2RateLimit("DescribeNetworkInterfaces", time.Second)
api.ObserveEC2RateLimit("DescribeNetworkInterfaces", time.Second)
c.Assert(api.EC2RateLimit("DescribeNetworkInterfaces"), check.Equals, 2*time.Second)
api.IncResyncCount()
c.Assert(api.ResyncCount(), check.Equals, int64(1))
}
|
package main
import (
"database/sql"
"fmt"
_ "github.com/lib/pq"
"log"
"net/http"
"os"
)
func HelloWorld(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("Hello World!\n"))
fmt.Fprintf(w, "os.Args: %#v\n", os.Args)
dburl := fmt.Sprintf("%s://%s:%s@%s:%s/%s", os.Getenv("DB_ENGINE"), os.Getenv("DB_USERNAME"), os.Getenv("DB_PASSWORD"), os.Getenv("DB_HOST"), os.Getenv("DB_PORT"), os.Getenv("DB_NAME"))
fmt.Fprintf(w, "Connect to %s\n", dburl)
db, err := sql.Open("postgres", dburl)
if err != nil {
fmt.Fprintf(w, "Error: %s\n", err)
return
}
defer db.Close()
fmt.Fprintf(w, "Connected.\n")
_, err = db.Exec("CREATE TABLE IF NOT EXISTS counter (count integer);")
if err != nil {
fmt.Fprintf(w, "Error: %s\n", err)
return
}
_, err = db.Exec("INSERT INTO counter SELECT count(*) FROM counter")
if err != nil {
fmt.Fprintf(w, "Error: %s\n", err)
return
}
res := db.QueryRow("SELECT count(*) FROM counter")
var count int
res.Scan(&count)
fmt.Fprintf(w, "count: %#v\n", count)
fmt.Fprintf(w, "\nEnvironment:\n")
for _, env := range os.Environ() {
fmt.Fprintf(w, "%s\n", env)
}
fmt.Fprintf(w, "\n")
hostname, err := os.Hostname()
if err != nil {
fmt.Fprintf(w, "Error: %s\n", err)
return
}
fmt.Fprintf(w, "Generated on %s\n", hostname)
}
func main() {
fmt.Println("hello world")
http.HandleFunc("/", HelloWorld)
log.Fatal(http.ListenAndServe(":80", nil))
}
|
// Copyright 2023 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
emptypb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/empty_go_proto"
alphapb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/networkservices/alpha/networkservices_alpha_go_proto"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/alpha"
)
// GrpcRouteServer implements the gRPC interface for GrpcRoute.
type GrpcRouteServer struct{}
// ProtoToGrpcRouteRulesMatchesMethodTypeEnum converts a GrpcRouteRulesMatchesMethodTypeEnum enum from its proto representation.
func ProtoToNetworkservicesAlphaGrpcRouteRulesMatchesMethodTypeEnum(e alphapb.NetworkservicesAlphaGrpcRouteRulesMatchesMethodTypeEnum) *alpha.GrpcRouteRulesMatchesMethodTypeEnum {
if e == 0 {
return nil
}
if n, ok := alphapb.NetworkservicesAlphaGrpcRouteRulesMatchesMethodTypeEnum_name[int32(e)]; ok {
e := alpha.GrpcRouteRulesMatchesMethodTypeEnum(n[len("NetworkservicesAlphaGrpcRouteRulesMatchesMethodTypeEnum"):])
return &e
}
return nil
}
// ProtoToGrpcRouteRulesMatchesHeadersTypeEnum converts a GrpcRouteRulesMatchesHeadersTypeEnum enum from its proto representation.
func ProtoToNetworkservicesAlphaGrpcRouteRulesMatchesHeadersTypeEnum(e alphapb.NetworkservicesAlphaGrpcRouteRulesMatchesHeadersTypeEnum) *alpha.GrpcRouteRulesMatchesHeadersTypeEnum {
if e == 0 {
return nil
}
if n, ok := alphapb.NetworkservicesAlphaGrpcRouteRulesMatchesHeadersTypeEnum_name[int32(e)]; ok {
e := alpha.GrpcRouteRulesMatchesHeadersTypeEnum(n[len("NetworkservicesAlphaGrpcRouteRulesMatchesHeadersTypeEnum"):])
return &e
}
return nil
}
// ProtoToGrpcRouteRules converts a GrpcRouteRules object from its proto representation.
func ProtoToNetworkservicesAlphaGrpcRouteRules(p *alphapb.NetworkservicesAlphaGrpcRouteRules) *alpha.GrpcRouteRules {
if p == nil {
return nil
}
obj := &alpha.GrpcRouteRules{
Action: ProtoToNetworkservicesAlphaGrpcRouteRulesAction(p.GetAction()),
}
for _, r := range p.GetMatches() {
obj.Matches = append(obj.Matches, *ProtoToNetworkservicesAlphaGrpcRouteRulesMatches(r))
}
return obj
}
// ProtoToGrpcRouteRulesMatches converts a GrpcRouteRulesMatches object from its proto representation.
func ProtoToNetworkservicesAlphaGrpcRouteRulesMatches(p *alphapb.NetworkservicesAlphaGrpcRouteRulesMatches) *alpha.GrpcRouteRulesMatches {
if p == nil {
return nil
}
obj := &alpha.GrpcRouteRulesMatches{
Method: ProtoToNetworkservicesAlphaGrpcRouteRulesMatchesMethod(p.GetMethod()),
}
for _, r := range p.GetHeaders() {
obj.Headers = append(obj.Headers, *ProtoToNetworkservicesAlphaGrpcRouteRulesMatchesHeaders(r))
}
return obj
}
// ProtoToGrpcRouteRulesMatchesMethod converts a GrpcRouteRulesMatchesMethod object from its proto representation.
func ProtoToNetworkservicesAlphaGrpcRouteRulesMatchesMethod(p *alphapb.NetworkservicesAlphaGrpcRouteRulesMatchesMethod) *alpha.GrpcRouteRulesMatchesMethod {
if p == nil {
return nil
}
obj := &alpha.GrpcRouteRulesMatchesMethod{
Type: ProtoToNetworkservicesAlphaGrpcRouteRulesMatchesMethodTypeEnum(p.GetType()),
GrpcService: dcl.StringOrNil(p.GetGrpcService()),
GrpcMethod: dcl.StringOrNil(p.GetGrpcMethod()),
CaseSensitive: dcl.Bool(p.GetCaseSensitive()),
}
return obj
}
// ProtoToGrpcRouteRulesMatchesHeaders converts a GrpcRouteRulesMatchesHeaders object from its proto representation.
func ProtoToNetworkservicesAlphaGrpcRouteRulesMatchesHeaders(p *alphapb.NetworkservicesAlphaGrpcRouteRulesMatchesHeaders) *alpha.GrpcRouteRulesMatchesHeaders {
if p == nil {
return nil
}
obj := &alpha.GrpcRouteRulesMatchesHeaders{
Type: ProtoToNetworkservicesAlphaGrpcRouteRulesMatchesHeadersTypeEnum(p.GetType()),
Key: dcl.StringOrNil(p.GetKey()),
Value: dcl.StringOrNil(p.GetValue()),
}
return obj
}
// ProtoToGrpcRouteRulesAction converts a GrpcRouteRulesAction object from its proto representation.
func ProtoToNetworkservicesAlphaGrpcRouteRulesAction(p *alphapb.NetworkservicesAlphaGrpcRouteRulesAction) *alpha.GrpcRouteRulesAction {
if p == nil {
return nil
}
obj := &alpha.GrpcRouteRulesAction{
FaultInjectionPolicy: ProtoToNetworkservicesAlphaGrpcRouteRulesActionFaultInjectionPolicy(p.GetFaultInjectionPolicy()),
Timeout: dcl.StringOrNil(p.GetTimeout()),
RetryPolicy: ProtoToNetworkservicesAlphaGrpcRouteRulesActionRetryPolicy(p.GetRetryPolicy()),
}
for _, r := range p.GetDestinations() {
obj.Destinations = append(obj.Destinations, *ProtoToNetworkservicesAlphaGrpcRouteRulesActionDestinations(r))
}
return obj
}
// ProtoToGrpcRouteRulesActionDestinations converts a GrpcRouteRulesActionDestinations object from its proto representation.
func ProtoToNetworkservicesAlphaGrpcRouteRulesActionDestinations(p *alphapb.NetworkservicesAlphaGrpcRouteRulesActionDestinations) *alpha.GrpcRouteRulesActionDestinations {
if p == nil {
return nil
}
obj := &alpha.GrpcRouteRulesActionDestinations{
Weight: dcl.Int64OrNil(p.GetWeight()),
ServiceName: dcl.StringOrNil(p.GetServiceName()),
}
return obj
}
// ProtoToGrpcRouteRulesActionFaultInjectionPolicy converts a GrpcRouteRulesActionFaultInjectionPolicy object from its proto representation.
func ProtoToNetworkservicesAlphaGrpcRouteRulesActionFaultInjectionPolicy(p *alphapb.NetworkservicesAlphaGrpcRouteRulesActionFaultInjectionPolicy) *alpha.GrpcRouteRulesActionFaultInjectionPolicy {
if p == nil {
return nil
}
obj := &alpha.GrpcRouteRulesActionFaultInjectionPolicy{
Delay: ProtoToNetworkservicesAlphaGrpcRouteRulesActionFaultInjectionPolicyDelay(p.GetDelay()),
Abort: ProtoToNetworkservicesAlphaGrpcRouteRulesActionFaultInjectionPolicyAbort(p.GetAbort()),
}
return obj
}
// ProtoToGrpcRouteRulesActionFaultInjectionPolicyDelay converts a GrpcRouteRulesActionFaultInjectionPolicyDelay object from its proto representation.
func ProtoToNetworkservicesAlphaGrpcRouteRulesActionFaultInjectionPolicyDelay(p *alphapb.NetworkservicesAlphaGrpcRouteRulesActionFaultInjectionPolicyDelay) *alpha.GrpcRouteRulesActionFaultInjectionPolicyDelay {
if p == nil {
return nil
}
obj := &alpha.GrpcRouteRulesActionFaultInjectionPolicyDelay{
FixedDelay: dcl.StringOrNil(p.GetFixedDelay()),
Percentage: dcl.Int64OrNil(p.GetPercentage()),
}
return obj
}
// ProtoToGrpcRouteRulesActionFaultInjectionPolicyAbort converts a GrpcRouteRulesActionFaultInjectionPolicyAbort object from its proto representation.
func ProtoToNetworkservicesAlphaGrpcRouteRulesActionFaultInjectionPolicyAbort(p *alphapb.NetworkservicesAlphaGrpcRouteRulesActionFaultInjectionPolicyAbort) *alpha.GrpcRouteRulesActionFaultInjectionPolicyAbort {
if p == nil {
return nil
}
obj := &alpha.GrpcRouteRulesActionFaultInjectionPolicyAbort{
HttpStatus: dcl.Int64OrNil(p.GetHttpStatus()),
Percentage: dcl.Int64OrNil(p.GetPercentage()),
}
return obj
}
// ProtoToGrpcRouteRulesActionRetryPolicy converts a GrpcRouteRulesActionRetryPolicy object from its proto representation.
func ProtoToNetworkservicesAlphaGrpcRouteRulesActionRetryPolicy(p *alphapb.NetworkservicesAlphaGrpcRouteRulesActionRetryPolicy) *alpha.GrpcRouteRulesActionRetryPolicy {
if p == nil {
return nil
}
obj := &alpha.GrpcRouteRulesActionRetryPolicy{
NumRetries: dcl.Int64OrNil(p.GetNumRetries()),
}
for _, r := range p.GetRetryConditions() {
obj.RetryConditions = append(obj.RetryConditions, r)
}
return obj
}
// ProtoToGrpcRoute converts a GrpcRoute resource from its proto representation.
func ProtoToGrpcRoute(p *alphapb.NetworkservicesAlphaGrpcRoute) *alpha.GrpcRoute {
obj := &alpha.GrpcRoute{
Name: dcl.StringOrNil(p.GetName()),
CreateTime: dcl.StringOrNil(p.GetCreateTime()),
UpdateTime: dcl.StringOrNil(p.GetUpdateTime()),
Description: dcl.StringOrNil(p.GetDescription()),
Project: dcl.StringOrNil(p.GetProject()),
Location: dcl.StringOrNil(p.GetLocation()),
SelfLink: dcl.StringOrNil(p.GetSelfLink()),
}
for _, r := range p.GetHostnames() {
obj.Hostnames = append(obj.Hostnames, r)
}
for _, r := range p.GetMeshes() {
obj.Meshes = append(obj.Meshes, r)
}
for _, r := range p.GetGateways() {
obj.Gateways = append(obj.Gateways, r)
}
for _, r := range p.GetRules() {
obj.Rules = append(obj.Rules, *ProtoToNetworkservicesAlphaGrpcRouteRules(r))
}
return obj
}
// GrpcRouteRulesMatchesMethodTypeEnumToProto converts a GrpcRouteRulesMatchesMethodTypeEnum enum to its proto representation.
func NetworkservicesAlphaGrpcRouteRulesMatchesMethodTypeEnumToProto(e *alpha.GrpcRouteRulesMatchesMethodTypeEnum) alphapb.NetworkservicesAlphaGrpcRouteRulesMatchesMethodTypeEnum {
if e == nil {
return alphapb.NetworkservicesAlphaGrpcRouteRulesMatchesMethodTypeEnum(0)
}
if v, ok := alphapb.NetworkservicesAlphaGrpcRouteRulesMatchesMethodTypeEnum_value["GrpcRouteRulesMatchesMethodTypeEnum"+string(*e)]; ok {
return alphapb.NetworkservicesAlphaGrpcRouteRulesMatchesMethodTypeEnum(v)
}
return alphapb.NetworkservicesAlphaGrpcRouteRulesMatchesMethodTypeEnum(0)
}
// GrpcRouteRulesMatchesHeadersTypeEnumToProto converts a GrpcRouteRulesMatchesHeadersTypeEnum enum to its proto representation.
func NetworkservicesAlphaGrpcRouteRulesMatchesHeadersTypeEnumToProto(e *alpha.GrpcRouteRulesMatchesHeadersTypeEnum) alphapb.NetworkservicesAlphaGrpcRouteRulesMatchesHeadersTypeEnum {
if e == nil {
return alphapb.NetworkservicesAlphaGrpcRouteRulesMatchesHeadersTypeEnum(0)
}
if v, ok := alphapb.NetworkservicesAlphaGrpcRouteRulesMatchesHeadersTypeEnum_value["GrpcRouteRulesMatchesHeadersTypeEnum"+string(*e)]; ok {
return alphapb.NetworkservicesAlphaGrpcRouteRulesMatchesHeadersTypeEnum(v)
}
return alphapb.NetworkservicesAlphaGrpcRouteRulesMatchesHeadersTypeEnum(0)
}
// GrpcRouteRulesToProto converts a GrpcRouteRules object to its proto representation.
func NetworkservicesAlphaGrpcRouteRulesToProto(o *alpha.GrpcRouteRules) *alphapb.NetworkservicesAlphaGrpcRouteRules {
if o == nil {
return nil
}
p := &alphapb.NetworkservicesAlphaGrpcRouteRules{}
p.SetAction(NetworkservicesAlphaGrpcRouteRulesActionToProto(o.Action))
sMatches := make([]*alphapb.NetworkservicesAlphaGrpcRouteRulesMatches, len(o.Matches))
for i, r := range o.Matches {
sMatches[i] = NetworkservicesAlphaGrpcRouteRulesMatchesToProto(&r)
}
p.SetMatches(sMatches)
return p
}
// GrpcRouteRulesMatchesToProto converts a GrpcRouteRulesMatches object to its proto representation.
func NetworkservicesAlphaGrpcRouteRulesMatchesToProto(o *alpha.GrpcRouteRulesMatches) *alphapb.NetworkservicesAlphaGrpcRouteRulesMatches {
if o == nil {
return nil
}
p := &alphapb.NetworkservicesAlphaGrpcRouteRulesMatches{}
p.SetMethod(NetworkservicesAlphaGrpcRouteRulesMatchesMethodToProto(o.Method))
sHeaders := make([]*alphapb.NetworkservicesAlphaGrpcRouteRulesMatchesHeaders, len(o.Headers))
for i, r := range o.Headers {
sHeaders[i] = NetworkservicesAlphaGrpcRouteRulesMatchesHeadersToProto(&r)
}
p.SetHeaders(sHeaders)
return p
}
// GrpcRouteRulesMatchesMethodToProto converts a GrpcRouteRulesMatchesMethod object to its proto representation.
func NetworkservicesAlphaGrpcRouteRulesMatchesMethodToProto(o *alpha.GrpcRouteRulesMatchesMethod) *alphapb.NetworkservicesAlphaGrpcRouteRulesMatchesMethod {
if o == nil {
return nil
}
p := &alphapb.NetworkservicesAlphaGrpcRouteRulesMatchesMethod{}
p.SetType(NetworkservicesAlphaGrpcRouteRulesMatchesMethodTypeEnumToProto(o.Type))
p.SetGrpcService(dcl.ValueOrEmptyString(o.GrpcService))
p.SetGrpcMethod(dcl.ValueOrEmptyString(o.GrpcMethod))
p.SetCaseSensitive(dcl.ValueOrEmptyBool(o.CaseSensitive))
return p
}
// GrpcRouteRulesMatchesHeadersToProto converts a GrpcRouteRulesMatchesHeaders object to its proto representation.
func NetworkservicesAlphaGrpcRouteRulesMatchesHeadersToProto(o *alpha.GrpcRouteRulesMatchesHeaders) *alphapb.NetworkservicesAlphaGrpcRouteRulesMatchesHeaders {
if o == nil {
return nil
}
p := &alphapb.NetworkservicesAlphaGrpcRouteRulesMatchesHeaders{}
p.SetType(NetworkservicesAlphaGrpcRouteRulesMatchesHeadersTypeEnumToProto(o.Type))
p.SetKey(dcl.ValueOrEmptyString(o.Key))
p.SetValue(dcl.ValueOrEmptyString(o.Value))
return p
}
// GrpcRouteRulesActionToProto converts a GrpcRouteRulesAction object to its proto representation.
func NetworkservicesAlphaGrpcRouteRulesActionToProto(o *alpha.GrpcRouteRulesAction) *alphapb.NetworkservicesAlphaGrpcRouteRulesAction {
if o == nil {
return nil
}
p := &alphapb.NetworkservicesAlphaGrpcRouteRulesAction{}
p.SetFaultInjectionPolicy(NetworkservicesAlphaGrpcRouteRulesActionFaultInjectionPolicyToProto(o.FaultInjectionPolicy))
p.SetTimeout(dcl.ValueOrEmptyString(o.Timeout))
p.SetRetryPolicy(NetworkservicesAlphaGrpcRouteRulesActionRetryPolicyToProto(o.RetryPolicy))
sDestinations := make([]*alphapb.NetworkservicesAlphaGrpcRouteRulesActionDestinations, len(o.Destinations))
for i, r := range o.Destinations {
sDestinations[i] = NetworkservicesAlphaGrpcRouteRulesActionDestinationsToProto(&r)
}
p.SetDestinations(sDestinations)
return p
}
// GrpcRouteRulesActionDestinationsToProto converts a GrpcRouteRulesActionDestinations object to its proto representation.
func NetworkservicesAlphaGrpcRouteRulesActionDestinationsToProto(o *alpha.GrpcRouteRulesActionDestinations) *alphapb.NetworkservicesAlphaGrpcRouteRulesActionDestinations {
if o == nil {
return nil
}
p := &alphapb.NetworkservicesAlphaGrpcRouteRulesActionDestinations{}
p.SetWeight(dcl.ValueOrEmptyInt64(o.Weight))
p.SetServiceName(dcl.ValueOrEmptyString(o.ServiceName))
return p
}
// GrpcRouteRulesActionFaultInjectionPolicyToProto converts a GrpcRouteRulesActionFaultInjectionPolicy object to its proto representation.
func NetworkservicesAlphaGrpcRouteRulesActionFaultInjectionPolicyToProto(o *alpha.GrpcRouteRulesActionFaultInjectionPolicy) *alphapb.NetworkservicesAlphaGrpcRouteRulesActionFaultInjectionPolicy {
if o == nil {
return nil
}
p := &alphapb.NetworkservicesAlphaGrpcRouteRulesActionFaultInjectionPolicy{}
p.SetDelay(NetworkservicesAlphaGrpcRouteRulesActionFaultInjectionPolicyDelayToProto(o.Delay))
p.SetAbort(NetworkservicesAlphaGrpcRouteRulesActionFaultInjectionPolicyAbortToProto(o.Abort))
return p
}
// GrpcRouteRulesActionFaultInjectionPolicyDelayToProto converts a GrpcRouteRulesActionFaultInjectionPolicyDelay object to its proto representation.
func NetworkservicesAlphaGrpcRouteRulesActionFaultInjectionPolicyDelayToProto(o *alpha.GrpcRouteRulesActionFaultInjectionPolicyDelay) *alphapb.NetworkservicesAlphaGrpcRouteRulesActionFaultInjectionPolicyDelay {
if o == nil {
return nil
}
p := &alphapb.NetworkservicesAlphaGrpcRouteRulesActionFaultInjectionPolicyDelay{}
p.SetFixedDelay(dcl.ValueOrEmptyString(o.FixedDelay))
p.SetPercentage(dcl.ValueOrEmptyInt64(o.Percentage))
return p
}
// GrpcRouteRulesActionFaultInjectionPolicyAbortToProto converts a GrpcRouteRulesActionFaultInjectionPolicyAbort object to its proto representation.
func NetworkservicesAlphaGrpcRouteRulesActionFaultInjectionPolicyAbortToProto(o *alpha.GrpcRouteRulesActionFaultInjectionPolicyAbort) *alphapb.NetworkservicesAlphaGrpcRouteRulesActionFaultInjectionPolicyAbort {
if o == nil {
return nil
}
p := &alphapb.NetworkservicesAlphaGrpcRouteRulesActionFaultInjectionPolicyAbort{}
p.SetHttpStatus(dcl.ValueOrEmptyInt64(o.HttpStatus))
p.SetPercentage(dcl.ValueOrEmptyInt64(o.Percentage))
return p
}
// GrpcRouteRulesActionRetryPolicyToProto converts a GrpcRouteRulesActionRetryPolicy object to its proto representation.
func NetworkservicesAlphaGrpcRouteRulesActionRetryPolicyToProto(o *alpha.GrpcRouteRulesActionRetryPolicy) *alphapb.NetworkservicesAlphaGrpcRouteRulesActionRetryPolicy {
if o == nil {
return nil
}
p := &alphapb.NetworkservicesAlphaGrpcRouteRulesActionRetryPolicy{}
p.SetNumRetries(dcl.ValueOrEmptyInt64(o.NumRetries))
sRetryConditions := make([]string, len(o.RetryConditions))
for i, r := range o.RetryConditions {
sRetryConditions[i] = r
}
p.SetRetryConditions(sRetryConditions)
return p
}
// GrpcRouteToProto converts a GrpcRoute resource to its proto representation.
func GrpcRouteToProto(resource *alpha.GrpcRoute) *alphapb.NetworkservicesAlphaGrpcRoute {
p := &alphapb.NetworkservicesAlphaGrpcRoute{}
p.SetName(dcl.ValueOrEmptyString(resource.Name))
p.SetCreateTime(dcl.ValueOrEmptyString(resource.CreateTime))
p.SetUpdateTime(dcl.ValueOrEmptyString(resource.UpdateTime))
p.SetDescription(dcl.ValueOrEmptyString(resource.Description))
p.SetProject(dcl.ValueOrEmptyString(resource.Project))
p.SetLocation(dcl.ValueOrEmptyString(resource.Location))
p.SetSelfLink(dcl.ValueOrEmptyString(resource.SelfLink))
mLabels := make(map[string]string, len(resource.Labels))
for k, r := range resource.Labels {
mLabels[k] = r
}
p.SetLabels(mLabels)
sHostnames := make([]string, len(resource.Hostnames))
for i, r := range resource.Hostnames {
sHostnames[i] = r
}
p.SetHostnames(sHostnames)
sMeshes := make([]string, len(resource.Meshes))
for i, r := range resource.Meshes {
sMeshes[i] = r
}
p.SetMeshes(sMeshes)
sGateways := make([]string, len(resource.Gateways))
for i, r := range resource.Gateways {
sGateways[i] = r
}
p.SetGateways(sGateways)
sRules := make([]*alphapb.NetworkservicesAlphaGrpcRouteRules, len(resource.Rules))
for i, r := range resource.Rules {
sRules[i] = NetworkservicesAlphaGrpcRouteRulesToProto(&r)
}
p.SetRules(sRules)
return p
}
// applyGrpcRoute handles the gRPC request by passing it to the underlying GrpcRoute Apply() method.
func (s *GrpcRouteServer) applyGrpcRoute(ctx context.Context, c *alpha.Client, request *alphapb.ApplyNetworkservicesAlphaGrpcRouteRequest) (*alphapb.NetworkservicesAlphaGrpcRoute, error) {
p := ProtoToGrpcRoute(request.GetResource())
res, err := c.ApplyGrpcRoute(ctx, p)
if err != nil {
return nil, err
}
r := GrpcRouteToProto(res)
return r, nil
}
// applyNetworkservicesAlphaGrpcRoute handles the gRPC request by passing it to the underlying GrpcRoute Apply() method.
func (s *GrpcRouteServer) ApplyNetworkservicesAlphaGrpcRoute(ctx context.Context, request *alphapb.ApplyNetworkservicesAlphaGrpcRouteRequest) (*alphapb.NetworkservicesAlphaGrpcRoute, error) {
cl, err := createConfigGrpcRoute(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return s.applyGrpcRoute(ctx, cl, request)
}
// DeleteGrpcRoute handles the gRPC request by passing it to the underlying GrpcRoute Delete() method.
func (s *GrpcRouteServer) DeleteNetworkservicesAlphaGrpcRoute(ctx context.Context, request *alphapb.DeleteNetworkservicesAlphaGrpcRouteRequest) (*emptypb.Empty, error) {
cl, err := createConfigGrpcRoute(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return &emptypb.Empty{}, cl.DeleteGrpcRoute(ctx, ProtoToGrpcRoute(request.GetResource()))
}
// ListNetworkservicesAlphaGrpcRoute handles the gRPC request by passing it to the underlying GrpcRouteList() method.
func (s *GrpcRouteServer) ListNetworkservicesAlphaGrpcRoute(ctx context.Context, request *alphapb.ListNetworkservicesAlphaGrpcRouteRequest) (*alphapb.ListNetworkservicesAlphaGrpcRouteResponse, error) {
cl, err := createConfigGrpcRoute(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
resources, err := cl.ListGrpcRoute(ctx, request.GetProject(), request.GetLocation())
if err != nil {
return nil, err
}
var protos []*alphapb.NetworkservicesAlphaGrpcRoute
for _, r := range resources.Items {
rp := GrpcRouteToProto(r)
protos = append(protos, rp)
}
p := &alphapb.ListNetworkservicesAlphaGrpcRouteResponse{}
p.SetItems(protos)
return p, nil
}
func createConfigGrpcRoute(ctx context.Context, service_account_file string) (*alpha.Client, error) {
conf := dcl.NewConfig(dcl.WithUserAgent("dcl-test"), dcl.WithCredentialsFile(service_account_file))
return alpha.NewClient(conf), nil
}
|
package base
import (
"sort"
"strconv"
"time"
)
const (
_ int = iota
Buy_tick
Sell_tick
Eq_tick
)
type Tick struct {
Time time.Time
Price int
Change int
Volume int // 手
Turnover int // 元
Type int
}
type RealtimeTick struct {
Tick
HL
Buyone int
Sellone int
Status int
Name string
}
type TickSlice []Tick
func (p TickSlice) Len() int { return len(p) }
func (p TickSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p TickSlice) Less(i, j int) bool { return p[i].Time.Before(p[j].Time) }
func SearchTickSlice(a TickSlice, t time.Time) int {
return sort.Search(len(a), func(i int) bool {
// a[i].Time >= t
return a[i].Time.After(t) || a[i].Time.Equal(t)
})
}
func (p TickSlice) Search(t time.Time) (int, bool) {
i := SearchTickSlice(p, t)
if i < p.Len() {
return i, t.Equal(p[i].Time)
}
return i, false
}
func (p *Tick) FromString(date time.Time, timestr, price, change, volume, turnover, typestr []byte) {
p.Time, _ = time.Parse("15:04:05", string(timestr))
p.Time = date.Add(time.Second * time.Duration(TheSeconds(p.Time)))
p.Price = ParseCent(string(price))
p.Change = ParseCent(string(change))
p.Volume, _ = strconv.Atoi(string(volume))
p.Turnover, _ = strconv.Atoi(string(turnover))
switch string(typestr) {
case "UP":
fallthrough
case "买盘":
p.Type = Buy_tick
case "DOWN":
fallthrough
case "卖盘":
p.Type = Sell_tick
case "EQUAL":
fallthrough
case "中性盘":
p.Type = Eq_tick
}
}
func TheSeconds(t time.Time) int {
return t.Hour()*60*60 + t.Minute()*60 + t.Second()
}
func (p *RealtimeTick) SetStatus(s []byte) {
//"00":"","01":"临停1H","02":"停牌","03":"停牌","04":"临停","05":"停1/2","07":"暂停","-1":"无记录","-2":"未上市","-3":"退市"
p.Status, _ = strconv.Atoi(string(s))
if p.Status == 3 {
p.Status = 2
}
}
|
package linereader
import (
"bufio"
"fmt"
"io"
"strings"
)
type srcError struct {
file string
line int
snippet string
msg string
}
func (e *srcError) Error() string {
return fmt.Sprintf("%s:%d: error: %s\nin:\n%s", e.file, e.line, e.msg, e.snippet)
}
// LineReader read text stream line by line
type LineReader struct {
R *bufio.Reader
FileName string
LineNum int
}
// ReadLine read next line, for example lr.ReadLine('\n', true, true)
func (lr *LineReader) ReadLine(lineEnd byte, trim bool, skipEmpty bool) (s string, err error) {
for {
lr.LineNum++
s, err = lr.R.ReadString(lineEnd)
if err != nil && s == "" {
return "", err
}
err = nil
if trim {
s = strings.TrimSpace(s)
}
if skipEmpty && s == "" {
continue
}
return
}
}
// Error create an error with current file name and line number information
func (lr *LineReader) Error(snippet, msg string) error {
return &srcError{lr.FileName, lr.LineNum, snippet, msg}
}
// New create LineReader from io.Reader,
// note: use &LineReader{br, filename, 0} to create from bufio.Reader
func New(r io.Reader, filename string) *LineReader {
br := bufio.NewReader(r)
return &LineReader{br, filename, 0}
}
|
package hermes
import (
"encoding/base64"
"errors"
"fmt"
"math/rand"
"sync/atomic"
"time"
"golang.org/x/net/context"
"golang.org/x/sync/errgroup"
"gopkg.in/olivere/elastic.v5"
)
var (
// ErrNilHostParameter defines you cannot have a nil elasticsearch host address
ErrNilHostParameter = errors.New("missing host parameter")
// ErrNilIndexParameter defines you cannot have a nil elasticsearch index name
ErrNilIndexParameter = errors.New("missing index parameter")
// ErrNilTypeParameter defines you cannot have a nil elasticsearch type name
ErrNilTypeParameter = errors.New("missing type parameters")
// ErrNegativeNParameter defines you cannot have a negative value of documents
ErrNegativeNParameter = errors.New("n parameter cannot be negative")
)
type (
// Document stuct to model our single "Document" store we will ingestion into the
// elasticsearch index/type
Document struct {
ID string `json:"id"`
Title string `json:"title"`
Description string `json:"description"`
Content string `json:"content"`
Link string `json:"link"`
Tag string `json:"tag"`
Time time.Time `json:"time"`
}
// IngestionDocument struct to model our ingestion set for multiple types and Documents
// for our index
IngestionDocument struct {
Documents []Document
}
// Index struct to model each index ingestion set for our elasticsearch data
Index struct {
Host string
Index string
Documents []Document
}
// The Elasticsearch struct type is to model the storage into a single ELasticsearch node.
// It must have a host, index and type to ingest data to.
Elasticsearch struct {
Host, Index, Type string
}
)
// Store function will take total documents, es host, es index, es type and the Documents to be ingested.
// It will return with an error if faulted or will print stats on ingestion process (Total, Requests/sec, Time to ingest)
func (e *Elasticsearch) Store(n int, docs []Document) error {
rand.Seed(time.Now().UnixNano())
if e.Host == "" {
return ErrNilHostParameter
}
if e.Index == "" {
return ErrNilIndexParameter
}
if e.Type == "" {
return ErrNilTypeParameter
}
if n <= 0 {
return ErrNegativeNParameter
}
// Create an Elasticsearch client
client, err := elastic.NewClient(elastic.SetURL(e.Host), elastic.SetSniff(true))
if err != nil {
return err
}
// Setup a group of goroutines from the errgroup package
g, ctx := errgroup.WithContext(context.TODO())
// The first goroutine will emit documents and send it to the second goroutine
// via the docsc channel.
// The second Goroutine will simply bulk insert the documents.
docsc := make(chan Document)
begin := time.Now()
// Goroutine to traverse documents
g.Go(func() error {
defer close(docsc)
buf := make([]byte, 32)
for _, v := range docs {
_, err := rand.Read(buf)
if err != nil {
return err
}
v.ID = base64.URLEncoding.EncodeToString(buf)
fmt.Printf("new ID: %s\n", v.ID)
// Send over to 2nd goroutine, or cancel
select {
case docsc <- v:
case <-ctx.Done():
return ctx.Err()
}
}
return nil
})
// Second goroutine will consume the documents sent from the first and bulk insert into ES
var total uint64
g.Go(func() error {
bulk := client.Bulk().Index(e.Index).Type(e.Type)
for d := range docsc {
// Simple progress
current := atomic.AddUint64(&total, 1)
dur := time.Since(begin).Seconds()
sec := int(dur)
pps := int64(float64(current) / dur)
fmt.Printf("%10d | %6d req/s | %02d:%02d\r", current, pps, sec/60, sec%60)
// Enqueue the document
bulk.Add(elastic.NewBulkIndexRequest().Id(d.ID).Doc(d))
if bulk.NumberOfActions() >= 1000 {
// Commit
res, err := bulk.Do(ctx)
if err != nil {
return err
}
if res.Errors {
// Look up the failed documents with res.Failed(), and e.g. recommit
return errors.New("bulk commit failed")
}
// elasticsearch bulk insert function is enabled again after .Do ("commit")
// "bulk" is reset after Do, so you can reuse it
}
select {
default:
case <-ctx.Done():
return ctx.Err()
}
}
// Commit the final batch before exiting
if bulk.NumberOfActions() > 0 {
_, err = bulk.Do(ctx)
if err != nil {
return err
}
}
return nil
})
// Wait until all goroutines are finished
if err := g.Wait(); err != nil {
return err
}
// Final results
dur := time.Since(begin).Seconds()
sec := int(dur)
pps := int64(float64(total) / dur)
fmt.Printf("\n\n|- %10d -|- %6d req/s -|- %02d:%02d -|\n", total, pps, sec/60, sec%60)
return nil
}
|
package storage
import (
"time"
"github.com/Tanibox/tania-core/src/tasks/domain"
"github.com/gofrs/uuid"
)
type TaskEvent struct {
TaskUID uuid.UUID
Version int
CreatedDate time.Time
Event interface{}
}
type TaskRead struct {
Title string `json:"title"`
UID uuid.UUID `json:"uid"`
Description string `json:"description"`
CreatedDate time.Time `json:"created_date"`
DueDate *time.Time `json:"due_date,omitempty"`
CompletedDate *time.Time `json:"completed_date"`
CancelledDate *time.Time `json:"cancelled_date"`
Priority string `json:"priority"`
Status string `json:"status"`
Domain string `json:"domain"`
DomainDetails domain.TaskDomain `json:"domain_details"`
Category string `json:"category"`
IsDue bool `json:"is_due"`
AssetID *uuid.UUID `json:"asset_id"`
}
// Implements TaskDomain interface in domain
// But contains more detailed information of material, area and crop
type TaskDomainDetailedCrop struct {
Material *TaskDomainCropMaterial `json:"material"`
Area *TaskDomainCropArea `json:"area"`
Crop *TaskDomainCropBatch `json:"crop"`
}
type TaskDomainCropArea struct {
AreaID *uuid.UUID `json:"area_id"`
AreaName string `json:"area_name"`
}
type TaskDomainCropBatch struct {
CropID *uuid.UUID `json:"crop_id"`
CropBatchID string `json:"crop_batch_id"`
}
type TaskDomainCropMaterial struct {
MaterialID *uuid.UUID `json:"material_id"`
MaterialName string `json:"material_name"`
MaterialType string `json:"material_type"`
MaterialDetailedType string `json:"material_detailed_type"`
}
func (d TaskDomainDetailedCrop) Code() string {
return domain.TaskDomainCropCode
}
type TaskDomainDetailedArea struct {
MaterialID *uuid.UUID `json:"material_id"`
MaterialName string `json:"material_name"`
MaterialType string `json:"material_type"`
MaterialDetailedType string `json:"material_detailed_type"`
}
func (d TaskDomainDetailedArea) Code() string {
return domain.TaskDomainCropCode
}
type TaskDomainDetailedReservoir struct {
MaterialID *uuid.UUID `json:"material_id"`
MaterialName string `json:"material_name"`
MaterialType string `json:"material_type"`
MaterialDetailedType string `json:"material_detailed_type"`
}
func (d TaskDomainDetailedReservoir) Code() string {
return domain.TaskDomainCropCode
}
|
package kucoin
import (
"crypto/tls"
"encoding/json"
"fmt"
"math/rand"
"net/http"
"net/url"
"sync"
"time"
"github.com/gorilla/websocket"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// A WebSocketTokenModel contains a token and some servers for WebSocket feed.
type WebSocketTokenModel struct {
Token string `json:"token"`
Servers WebSocketServersModel `json:"instanceServers"`
AcceptUserMessage bool `json:"accept_user_message"`
}
// A WebSocketServerModel contains some servers for WebSocket feed.
type WebSocketServerModel struct {
PingInterval int64 `json:"pingInterval"`
Endpoint string `json:"endpoint"`
Protocol string `json:"protocol"`
Encrypt bool `json:"encrypt"`
PingTimeout int64 `json:"pingTimeout"`
}
// A WebSocketServersModel is the set of *WebSocketServerModel.
type WebSocketServersModel []*WebSocketServerModel
// RandomServer returns a server randomly.
func (s WebSocketServersModel) RandomServer() (*WebSocketServerModel, error) {
l := len(s)
if l == 0 {
return nil, errors.New("No available server ")
}
return s[rand.Intn(l)], nil
}
// WebSocketPublicToken returns the token for public channel.
func (as *ApiService) WebSocketPublicToken() (*ApiResponse, error) {
req := NewRequest(http.MethodPost, "/api/v1/bullet-public", map[string]string{})
return as.Call(req)
}
// WebSocketPrivateToken returns the token for private channel.
func (as *ApiService) WebSocketPrivateToken() (*ApiResponse, error) {
req := NewRequest(http.MethodPost, "/api/v1/bullet-private", map[string]string{})
return as.Call(req)
}
// All message types of WebSocket.
const (
WelcomeMessage = "welcome"
PingMessage = "ping"
PongMessage = "pong"
SubscribeMessage = "subscribe"
AckMessage = "ack"
UnsubscribeMessage = "unsubscribe"
ErrorMessage = "error"
Message = "message"
Notice = "notice"
Command = "command"
)
// A WebSocketMessage represents a message between the WebSocket client and server.
type WebSocketMessage struct {
Id string `json:"id"`
Type string `json:"type"`
}
// A WebSocketSubscribeMessage represents a message to subscribe the public/private channel.
type WebSocketSubscribeMessage struct {
*WebSocketMessage
Topic string `json:"topic"`
PrivateChannel bool `json:"privateChannel"`
Response bool `json:"response"`
}
// NewPingMessage creates a ping message instance.
func NewPingMessage() *WebSocketMessage {
return &WebSocketMessage{
Id: IntToString(time.Now().UnixNano()),
Type: PingMessage,
}
}
// NewSubscribeMessage creates a subscribe message instance.
func NewSubscribeMessage(topic string, privateChannel bool) *WebSocketSubscribeMessage {
return &WebSocketSubscribeMessage{
WebSocketMessage: &WebSocketMessage{
Id: IntToString(time.Now().UnixNano()),
Type: SubscribeMessage,
},
Topic: topic,
PrivateChannel: privateChannel,
Response: true,
}
}
// A WebSocketUnsubscribeMessage represents a message to unsubscribe the public/private channel.
type WebSocketUnsubscribeMessage WebSocketSubscribeMessage
// NewUnsubscribeMessage creates a unsubscribe message instance.
func NewUnsubscribeMessage(topic string, privateChannel bool) *WebSocketUnsubscribeMessage {
return &WebSocketUnsubscribeMessage{
WebSocketMessage: &WebSocketMessage{
Id: IntToString(time.Now().UnixNano()),
Type: UnsubscribeMessage,
},
Topic: topic,
PrivateChannel: privateChannel,
Response: true,
}
}
// A WebSocketDownstreamMessage represents a message from the WebSocket server to client.
type WebSocketDownstreamMessage struct {
*WebSocketMessage
Sn string `json:"sn"`
Topic string `json:"topic"`
Subject string `json:"subject"`
RawData json.RawMessage `json:"data"`
}
// ReadData read the data in channel.
func (m *WebSocketDownstreamMessage) ReadData(v interface{}) error {
return json.Unmarshal(m.RawData, v)
}
// A WebSocketClient represents a connection to WebSocket server.
type WebSocketClient struct {
// Wait all goroutines quit
wg *sync.WaitGroup
// Stop subscribing channel
done chan struct{}
// Pong channel to check pong message
pongs chan string
// ACK channel to check pong message
acks chan string
// Error channel
errors chan error
// Downstream message channel
messages chan *WebSocketDownstreamMessage
conn *websocket.Conn
token *WebSocketTokenModel
server *WebSocketServerModel
enableHeartbeat bool
skipVerifyTls bool
timeout time.Duration
}
var defaultTimeout = time.Second * 5
// WebSocketClientOpts defines the options for the client
// during the websocket connection.
type WebSocketClientOpts struct {
Token *WebSocketTokenModel
TLSSkipVerify bool
Timeout time.Duration
}
// NewWebSocketClient creates an instance of WebSocketClient.
func (as *ApiService) NewWebSocketClient(token *WebSocketTokenModel) *WebSocketClient {
return as.NewWebSocketClientOpts(WebSocketClientOpts{
Token: token,
TLSSkipVerify: as.apiSkipVerifyTls,
Timeout: defaultTimeout,
})
}
// NewWebSocketClientOpts creates an instance of WebSocketClient with the parsed options.
func (as *ApiService) NewWebSocketClientOpts(opts WebSocketClientOpts) *WebSocketClient {
wc := &WebSocketClient{
wg: &sync.WaitGroup{},
done: make(chan struct{}),
errors: make(chan error, 1),
pongs: make(chan string, 1),
acks: make(chan string, 1),
token: opts.Token,
messages: make(chan *WebSocketDownstreamMessage, 2048),
skipVerifyTls: opts.TLSSkipVerify,
timeout: opts.Timeout,
}
return wc
}
// Connect connects the WebSocket server.
func (wc *WebSocketClient) Connect() (<-chan *WebSocketDownstreamMessage, <-chan error, error) {
// Find out a server
s, err := wc.token.Servers.RandomServer()
if err != nil {
return wc.messages, wc.errors, err
}
wc.server = s
// Concat ws url
q := url.Values{}
q.Add("connectId", IntToString(time.Now().UnixNano()))
q.Add("token", wc.token.Token)
if wc.token.AcceptUserMessage == true {
q.Add("acceptUserMessage", "true")
}
u := fmt.Sprintf("%s?%s", s.Endpoint, q.Encode())
// Ignore verify tls
websocket.DefaultDialer.TLSClientConfig = &tls.Config{InsecureSkipVerify: wc.skipVerifyTls}
// Connect ws server
websocket.DefaultDialer.ReadBufferSize = 2048000 //2000 kb
wc.conn, _, err = websocket.DefaultDialer.Dial(u, nil)
if err != nil {
return wc.messages, wc.errors, err
}
// Must read the first welcome message
for {
m := &WebSocketDownstreamMessage{}
if err := wc.conn.ReadJSON(m); err != nil {
return wc.messages, wc.errors, err
}
if DebugMode {
logrus.Debugf("Received a WebSocket message: %s", ToJsonString(m))
}
if m.Type == ErrorMessage {
return wc.messages, wc.errors, errors.Errorf("Error message: %s", ToJsonString(m))
}
if m.Type == WelcomeMessage {
break
}
}
wc.wg.Add(2)
go wc.read()
go wc.keepHeartbeat()
return wc.messages, wc.errors, nil
}
func (wc *WebSocketClient) read() {
defer func() {
close(wc.pongs)
close(wc.messages)
wc.wg.Done()
}()
for {
select {
case <-wc.done:
return
default:
m := &WebSocketDownstreamMessage{}
if err := wc.conn.ReadJSON(m); err != nil {
wc.errors <- err
return
}
if DebugMode {
logrus.Debugf("Received a WebSocket message: %s", ToJsonString(m))
}
// log.Printf("ReadJSON: %s", ToJsonString(m))
switch m.Type {
case WelcomeMessage:
case PongMessage:
if wc.enableHeartbeat {
wc.pongs <- m.Id
}
case AckMessage:
// log.Printf("Subscribed: %s==%s? %s", channel.Id, m.Id, channel.Topic)
wc.acks <- m.Id
case ErrorMessage:
wc.errors <- errors.Errorf("Error message: %s", ToJsonString(m))
return
case Message, Notice, Command:
wc.messages <- m
default:
wc.errors <- errors.Errorf("Unknown message type: %s", m.Type)
}
}
}
}
func (wc *WebSocketClient) keepHeartbeat() {
wc.enableHeartbeat = true
// New ticker to send ping message
pt := time.NewTicker(time.Duration(wc.server.PingInterval)*time.Millisecond - time.Millisecond*200)
defer wc.wg.Done()
defer pt.Stop()
for {
select {
case <-wc.done:
return
case <-pt.C:
p := NewPingMessage()
m := ToJsonString(p)
if DebugMode {
logrus.Debugf("Sent a WebSocket message: %s", m)
}
if err := wc.conn.WriteMessage(websocket.TextMessage, []byte(m)); err != nil {
wc.errors <- err
return
}
// log.Printf("Ping: %s", ToJsonString(p))
// Waiting (with timeout) for the server to response pong message
// If timeout, close this connection
select {
case pid := <-wc.pongs:
if pid != p.Id {
wc.errors <- errors.Errorf("Invalid pong id %s, expect %s", pid, p.Id)
return
}
case <-time.After(time.Duration(wc.server.PingTimeout) * time.Millisecond):
wc.errors <- errors.Errorf("Wait pong message timeout in %d ms", wc.server.PingTimeout)
return
}
}
}
}
// Subscribe subscribes the specified channel.
func (wc *WebSocketClient) Subscribe(channels ...*WebSocketSubscribeMessage) error {
for _, c := range channels {
m := ToJsonString(c)
if DebugMode {
logrus.Debugf("Sent a WebSocket message: %s", m)
}
if err := wc.conn.WriteMessage(websocket.TextMessage, []byte(m)); err != nil {
return err
}
//log.Printf("Subscribing: %s, %s", c.Id, c.Topic)
select {
case id := <-wc.acks:
//log.Printf("ack: %s=>%s", id, c.Id)
if id != c.Id {
return errors.Errorf("Invalid ack id %s, expect %s", id, c.Id)
}
case err := <-wc.errors:
return errors.Errorf("Subscribe failed, %s", err.Error())
case <-time.After(wc.timeout):
return errors.Errorf("Wait ack message timeout in %v", wc.timeout)
}
}
return nil
}
// Unsubscribe unsubscribes the specified channel.
func (wc *WebSocketClient) Unsubscribe(channels ...*WebSocketUnsubscribeMessage) error {
for _, c := range channels {
m := ToJsonString(c)
if DebugMode {
logrus.Debugf("Sent a WebSocket message: %s", m)
}
if err := wc.conn.WriteMessage(websocket.TextMessage, []byte(m)); err != nil {
return err
}
//log.Printf("Unsubscribing: %s, %s", c.Id, c.Topic)
select {
case id := <-wc.acks:
//log.Printf("ack: %s=>%s", id, c.Id)
if id != c.Id {
return errors.Errorf("Invalid ack id %s, expect %s", id, c.Id)
}
case <-time.After(wc.timeout):
return errors.Errorf("Wait ack message timeout in %v", wc.timeout)
}
}
return nil
}
// Stop stops subscribing the specified channel, all goroutines quit.
func (wc *WebSocketClient) Stop() {
close(wc.done)
_ = wc.conn.Close()
wc.wg.Wait()
}
|
package summary
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestSummaryMinInterval(t *testing.T) {
count := New(time.Second/2, 10)
tk1 := time.NewTicker(5 * time.Millisecond)
defer tk1.Stop()
for i := 0; i < 100; i++ {
<-tk1.C
count.Add(2)
}
v, c := count.Value()
t.Logf("count value: %d, %d\n", v, c)
// 10% of error when bucket is 10
if v < 190 || v > 210 {
t.Errorf("expect value in [90-110] get %d", v)
}
// 10% of error when bucket is 10
if c < 90 || c > 110 {
t.Errorf("expect value in [90-110] get %d", v)
}
}
func TestSummary(t *testing.T) {
s := New(time.Second, 10)
t.Run("add", func(t *testing.T) {
s.Add(1)
v, c := s.Value()
assert.Equal(t, v, int64(1))
assert.Equal(t, c, int64(1))
})
time.Sleep(time.Millisecond * 110)
t.Run("add2", func(t *testing.T) {
s.Add(1)
v, c := s.Value()
assert.Equal(t, v, int64(2))
assert.Equal(t, c, int64(2))
})
time.Sleep(time.Millisecond * 900) // expire one bucket, 110 + 900
t.Run("expire", func(t *testing.T) {
v, c := s.Value()
assert.Equal(t, v, int64(1))
assert.Equal(t, c, int64(1))
s.Add(1)
v, c = s.Value()
assert.Equal(t, v, int64(2)) // expire one bucket
assert.Equal(t, c, int64(2)) // expire one bucket
})
time.Sleep(time.Millisecond * 1100)
t.Run("expire_all", func(t *testing.T) {
v, c := s.Value()
assert.Equal(t, v, int64(0))
assert.Equal(t, c, int64(0))
})
t.Run("reset", func(t *testing.T) {
s.Reset()
v, c := s.Value()
assert.Equal(t, v, int64(0))
assert.Equal(t, c, int64(0))
})
}
|
package utils
import (
"time"
"context"
"strings"
"github.com/satori/go.uuid"
)
var reqID struct{}
var reqStartTime struct{}
// SetReqID : 设置一个reqID
func SetReqID(ctx context.Context) context.Context {
uid:= uuid.NewV4()
str := strings.Replace(uid.String(), "-", "", -1)
c := context.WithValue(ctx, reqID, str)
return c
}
// GetReqID : 获取一个reqID
func GetReqID(ctx context.Context) string {
id := ctx.Value(reqID)
switch id.(type) {
case string:
return id.(string)
default:
return ""
}
}
// SetReqTime : 设置当前时间(unix nano)到ctx
func SetReqTime(ctx context.Context) context.Context {
c := context.WithValue(ctx, reqStartTime, time.Now().UnixNano())
return c
}
// GetReqTime : 获取设置的时间,如果没有返回0
func GetReqTime(ctx context.Context) int64 {
t := ctx.Value(reqStartTime)
switch t.(type) {
case int64:
return t.(int64)
default:
return 0
}
return 0
}
|
package cacheutil
import (
"sync"
)
//Queue ...
type Queue struct {
hashMap map[uint64]*sync.WaitGroup
mux sync.RWMutex
}
//NewQueue ...
func NewQueue() *Queue {
return &Queue{
hashMap: make(map[uint64]*sync.WaitGroup),
}
}
//Set ...
func (q *Queue) Set(hashedKey uint64) bool {
q.mux.Lock()
defer q.mux.Unlock()
_, ok := q.hashMap[hashedKey]
if ok {
return false
}
q.hashMap[hashedKey] = &sync.WaitGroup{}
q.hashMap[hashedKey].Add(1)
return true
}
//Release ...
func (q *Queue) Release(hashedKey uint64) {
q.mux.Lock()
defer q.mux.Unlock()
if transaction, ok := q.hashMap[hashedKey]; ok {
transaction.Done()
delete(q.hashMap, hashedKey)
}
}
//Get ...
func (q *Queue) Get(hashedKey uint64) *sync.WaitGroup {
q.mux.RLock()
defer q.mux.RUnlock()
transaction := q.hashMap[hashedKey]
return transaction
}
|
package utils
import (
"time"
md "github.com/ebikode/eLearning-core/model"
"github.com/uniplaces/carbon"
"golang.org/x/crypto/bcrypt"
)
// ValidatePincode - validates a user pincode
func ValidatePincode(user *md.User, pincode string) bool {
if user == nil || user.Pincode == "" || user.IsPincodeUsed {
return false
}
// Check if
now := carbon.NewCarbon(time.Now().UTC())
// Add 10 minutes to pincode sent date since the validity is 10 minutes
pincodeSentDate := carbon.NewCarbon(user.PincodeSentAt.UTC().Add(10 * time.Minute))
// Comparing both time . If now is greater than pincodesentDate
if now.Gt(pincodeSentDate) {
return false
}
err := bcrypt.CompareHashAndPassword([]byte(user.Pincode), []byte(pincode))
if err != nil && err == bcrypt.ErrMismatchedHashAndPassword { //Password does not match!
return false
}
return true
}
// ValidatePassword - validates a user password
func ValidatePassword(dbPassword, password string) bool {
err := bcrypt.CompareHashAndPassword([]byte(dbPassword), []byte(password))
if err != nil && err == bcrypt.ErrMismatchedHashAndPassword { //Password does not match!
return false
}
return true
}
// CheckAccountExpiration Check Account Expiration Date
// func CheckAccountExpiration(account *md.Account, clientURL, sendGridKey string) bool {
// if account == nil || account.Status != "active" {
// return true
// }
// // Check Expiration date
// now := carbon.NewCarbon(time.Now().UTC())
// expirationDate := carbon.NewCarbon(account.ExpirationDate.UTC())
// // Comparing both time . If now is greater than expirationDate
// if now.Gt(expirationDate) {
// return true
// }
// Add 7 days to the current expiration date
// if today's date is greater send an email alert to the user for expiration
// letting them know their account expires in 7days
// expirationDate = carbon.NewCarbon(account.ExpirationDate.UTC())
// // Comparing both time . If now is greater than expirationDate
// if now.Gt(expirationDate.AddDays(7)) {
// userName := fmt.Sprintf("%s %s", account.User.FirstName, account.User.LastName)
// // Set up Email Data
// emailText := "The above account expires in less than 7 days. Please visit your account page to renew you Setup/Subscription"
// emailSubject := fmt.Sprintf("%s Expiraton")
// emailData := EmailData{
// To: []*mail.Email{
// mail.NewEmail(userName, account.User.Email),
// mail.NewEmail(account.Name, account.Email),
// },
// PageTitle: emailSubject,
// Subject: emailSubject,
// Preheader: "in less than 7 days",
// BodyTitle: account.Name,
// FirstBodyText: emailText,
// }
// emailData.Button.Text = "Goto Account"
// emailData.Button.URL = fmt.Sprintf("%s/account/%s", clientURL, account.ID)
// // Send A Welcome/Verification Email to User
// emailBody := ProcessEmail(emailData)
// go SendEmail(emailBody, sendGridKey)
// }
// return false
// }
// IsAccountMoreThanAYear Check if account is more than a year
// this is neccessary to bill startups at normal rates
// after their first year
// func IsAccountMoreThanAYear(account *md.Account) bool {
// if account == nil {
// return true
// }
// // Check Expiration date
// now := carbon.NewCarbon(time.Now().UTC())
// createdAtDate := carbon.NewCarbon(account.CreatedAt.UTC())
// // Comparing both time . If now is greater than expirationDate
// if now.Gt(createdAtDate.AddYear()) {
// return true
// }
// return false
// }
|
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"math/rand"
"net/http"
"os"
"reflect"
"strconv"
"time"
"github.com/therecipe/qt/core"
"github.com/therecipe/qt/gui"
"github.com/therecipe/qt/widgets"
)
var data_struct struct {
Month string `json:"month"`
Num int `json:"num"`
Link string `json:"link"`
Year string `json:"year"`
News string `json:"news"`
SafeTitle string `json:"safe_title"`
Transcript string `json:"transcript"`
Alt string `json:"alt"`
Img string `json:"img"`
Title string `json:"title"`
Day string `json:"day"`
}
func main() {
widgets.NewQApplication(len(os.Args), os.Args)
window := widgets.NewQMainWindow(nil, 0)
widget := widgets.NewQWidget(nil, 0)
window.SetCentralWidget(widget)
layout := widgets.NewQFormLayout(widget)
layout.SetFieldGrowthPolicy(widgets.QFormLayout__AllNonFixedFieldsGrow)
widgetmap := make(map[string]*widgets.QWidget)
for i := 0; i < reflect.TypeOf(data_struct).NumField(); i++ {
name := reflect.TypeOf(data_struct).Field(i).Tag.Get("json")
if name != "img" {
widgetmap[name] = widgets.NewQLineEdit(nil).QWidget_PTR()
layout.AddRow3(name, widgetmap[name])
} else {
widgetmap[name] = widgets.NewQLineEdit(nil).QWidget_PTR()
widgetmap[name+"_label"] = widgets.NewQLabel(nil, 0).QWidget_PTR()
layout.AddRow3(name, widgetmap[name])
layout.AddRow3(name+"_label", widgetmap[name+"_label"])
}
}
button := widgets.NewQPushButton2("random xkcd", nil)
layout.AddWidget(button)
button.ConnectClicked(func(bool) {
rand.Seed(time.Now().UnixNano())
resp, err := http.Get(fmt.Sprintf("https://xkcd.com/%v/info.0.json", rand.Intn(614)))
if err != nil {
return
}
defer resp.Body.Close()
data, _ := ioutil.ReadAll(resp.Body)
json.Unmarshal(data, &data_struct)
for i := 0; i < reflect.TypeOf(data_struct).NumField(); i++ {
name := reflect.TypeOf(data_struct).Field(i).Tag.Get("json")
if name != "img" {
switch reflect.ValueOf(data_struct).Field(i).Kind() {
case reflect.String:
widgets.NewQLineEditFromPointer(widgetmap[name].Pointer()).SetText(reflect.ValueOf(data_struct).Field(i).String())
case reflect.Int:
widgets.NewQLineEditFromPointer(widgetmap[name].Pointer()).SetText(strconv.Itoa(int(reflect.ValueOf(data_struct).Field(i).Int())))
}
} else {
url := reflect.ValueOf(data_struct).Field(i).String()
widgets.NewQLineEditFromPointer(widgetmap[name].Pointer()).SetText(url)
resp, err := http.Get(url)
if err != nil {
return
}
defer resp.Body.Close()
data, _ := ioutil.ReadAll(resp.Body)
pix := gui.NewQPixmap()
pix.LoadFromData(data, uint(len(data)), "", 0)
widgets.NewQLabelFromPointer(widgetmap[name+"_label"].Pointer()).SetPixmap(pix.Scaled2(400, 400, core.Qt__KeepAspectRatio, core.Qt__SmoothTransformation))
}
}
})
window.Show()
widgets.QApplication_Exec()
}
|
package main
import "fmt"
func main() {
//如果参与除法运算的都是整数,计算后去掉小数部分,留下整数,下面结果是2,不是2.5
fmt.Println(10 / 4)
var num float64 = 10 / 4
fmt.Println(num)
//如果希望保留小数部分,除法运算里面要有一个是小数
fmt.Println(10.0 / 4)
//取模运算
//a % b = a - a / b * b
fmt.Println(10 % 3)
fmt.Println(-10 % 3) // -10 - (-10) / 3 * 3 = -10 - (-9) = -1
// ++ ,--,go只有i++,i--,没有--i,++i
var i int = 10
i++ // i=i+1
fmt.Println(i)
i-- // i=i-1
fmt.Println(i)
//自增自减只能独立使用
//var b int
//b = i--
}
|
package types
type Neo4jQueryResponse struct {
Results []Neo4jQueryResult `json:"results"`
Errors []Neo4jQueryErr `json:"errors"`
}
type Neo4jQueryResult struct {
Columns []string `json:"columns"`
DataArr []Neo4jQueryResultData `json:"data"`
}
type Neo4jQueryErr struct {
Code string `json:"code"`
Message string `json:"message"`
}
func (self *Neo4jQueryErr) String() string {
return self.Code + " " + self.Message
}
type Neo4jQueryResultData struct {
Rows []interface{} `json:"row"`
}
|
package db
import (
"testing"
)
var ds *DataSource
func initMysql() {
mysqlconfig := &MysqlConfig{
MysqlAddr: "localhost",
MysqlPort: "3306",
MysqlUser: "root",
MysqlPassword: "12345678",
MysqlDB: "test111",
}
}
type User struct {
Name string
Age int
}
func TestExec(t *testing.T) {
initMysql()
res := []User{}
rows, _ := ds.Query("select name,age from table1")
for rows.Next() {
r := User{}
rows.Scan(&r.Name, &r.Age)
res = append(res, r)
}
if res[0] != (User{Name: "lihao", Age: 25}) {
t.Error("Exec函数测试错误!")
}
}
func TestQuery(t *testing.T) {
initMysql()
//for UPDATE operation
ds.Exec(`update table1 set age=? where name=?`, 100, "lihao") //update to check
res := []User{}
rows, _ := ds.Query("select name,age from table1")
for rows.Next() {
r := User{}
rows.Scan(&r.Name, &r.Age)
res = append(res, r)
}
if res[0] != (User{Name: "lihao", Age: 100}) {
t.Error("Query函数测试错误!")
}
ds.Exec(`update table1 set age=25 where name="lihao"`) //restore the data
//for INSERT operation
ds.Exec(`insert into table1 (name,age) values ("tianjun",21)`)
res = []User{}
rows, _ = ds.Query(`select name,age from table1 where name="tianjun"`)
for rows.Next() {
r := User{}
rows.Scan(&r.Name, &r.Age)
res = append(res, r)
}
if res[0] != (User{Name: "tianjun", Age: 21}) {
t.Error("Query函数测试错误!")
}
ds.Exec(`delete from table1 where name="tianjun"`) //delete tianjun
//for DELETE operation, check wether tianjun is deleted
res = []User{}
rows, _ = ds.Query(`select name,age from table1 where name="tianjun"`)
for rows.Next() {
r := User{}
rows.Scan(&r.Name, &r.Age)
res = append(res, r)
}
if len(res) != 0 {
t.Error("Query函数测试错误!")
}
}
|
package yai
import (
"fmt"
"io"
"net/http"
"os"
"path/filepath"
"strings"
"sync"
multierror "github.com/hashicorp/go-multierror"
"github.com/pkg/errors"
)
// Installer installs the app according the contained manifest.
type Installer struct {
DownloadTo string
InstallTo string
Shim Shim
Manifest Manifest
AppName string
}
// Install the app specified by the Manifest.
func (i Installer) Install() error {
fmt.Println("Install")
resources, err := i.Download()
if err != nil {
return errors.Wrap(err, "downloading")
}
fmt.Println("download")
versionDir := i.Dir()
if err := os.MkdirAll(versionDir, 0755); err != nil {
return err
}
fmt.Println("install")
if err := i.install(versionDir, resources...); err != nil {
return errors.Wrap(err, "installation")
}
fmt.Println("shims")
for _, bin := range i.Manifest.Bins() {
if err := i.Shim.Link(i.ResolvePath(bin)); err != nil {
return err
}
}
fmt.Println("done")
return nil
}
// ResolvePath returns the absolute path to the first file found called name.
func (i Installer) ResolvePath(name string) string {
return filepath.Join(i.Dir(), name) // TODO: Does simple root level resolving for now.
}
// Dir returns the directory for this particular installed version (apps/app/version).
func (i Installer) Dir() string {
return filepath.Join(i.InstallTo, i.AppName, i.Manifest.Version)
}
func (i Installer) install(dest string, resources ...string) error {
var install = func(resource string) error {
// Switch on resource type.
ext := filepath.Ext(resource)
fmt.Println(ext)
switch ext {
case ".zip":
fmt.Println("unzipping...")
if err := unzip(resource, dest, i.Manifest.ExtractDir); err != nil {
return err
}
}
return nil
}
errs := make(chan error, 256)
wg := sync.WaitGroup{}
for _, r := range resources {
wg.Add(1)
go func(r string) {
wg.Done()
if err := install(r); err != nil {
errs <- err
}
}(r)
}
wg.Wait()
var err error
for e := range errs {
err = multierror.Append(err, e)
}
if err != nil {
return err
}
return nil
}
// Download the resources specified by the Manifest.
func (i Installer) Download() ([]string, error) {
fmt.Println("Download() 0")
defer fmt.Println("Download() last")
resources := make(chan string, 256)
errs := make(chan error, 256)
wg := sync.WaitGroup{}
for _, url := range i.Manifest.URLs() {
wg.Add(1)
go func(url string) {
defer wg.Done()
resource, err := i.download(url)
if err != nil {
errs <- err
return
}
resources <- resource
}(url)
}
wg.Wait()
close(resources)
close(errs)
var err error
for e := range errs {
err = multierror.Append(err, e)
}
if err != nil {
return nil, err
}
var list []string
for r := range resources {
list = append(list, r)
}
return list, nil
}
func (i Installer) download(url string) (string, error) {
fmt.Println("download(..) 0")
defer fmt.Println("download(..) last")
if err := os.MkdirAll(i.DownloadTo, 0755); err != nil {
return "", errors.Wrapf(err, "could not create download directory %q", i.DownloadTo)
}
fmt.Println("download(..) 1")
target := filepath.Join(i.DownloadTo, strings.Join([]string{i.AppName, i.Manifest.Version, sanitizeURL(url)}, "#"))
resp, err := http.Get(url)
if err != nil {
return target, err
}
defer resp.Body.Close()
file, err := os.OpenFile(target, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0775)
if err != nil {
return target, err
}
defer file.Close()
fmt.Println("download(..) 2")
if _, err := io.Copy(file, resp.Body); err != nil {
return target, err
}
return target, nil
}
// sanitizeURL creates a filepath safe representation of the URL by removing
// special characters and slashes.
func sanitizeURL(url string) string {
sanitized := url
sanitized = strings.Replace(sanitized, "https://", "", -1)
sanitized = strings.Replace(sanitized, "http://", "", -1)
sanitized = strings.Replace(sanitized, "/", "_", -1)
sanitized = strings.Replace(sanitized, "?", "_", -1)
sanitized = strings.Replace(sanitized, "&", "_", -1)
return sanitized
}
|
package gov
import (
"fmt"
"github.com/irisnet/irishub/app/v1/params"
"github.com/irisnet/irishub/codec"
sdk "github.com/irisnet/irishub/types"
"strconv"
"time"
)
const (
CRITICAL_DEPOSIT = 4000
IMPORTANT_DEPOSIT = 2000
NORMAL_DEPOSIT = 1000
CRITICAL = "Critical"
IMPORTANT = "Important"
NORMAL = "Normal"
LOWER_BOUND_AMOUNT = 10
UPPER_BOUND_AMOUNT = 10000
STABLE_CRITIACAL_NUM = 1
DEFAULT_IMPORTANT_NUM = 5
DEFAULT_NORMAL_NUM = 7
MIN_IMPORTANT_NUM = 1
MIN_NORMAL_NUM = 1
)
var _ params.ParamSet = (*GovParams)(nil)
// default paramspace for params keeper
const (
DefaultParamSpace = "gov"
)
//Parameter store key
var (
KeyCriticalDepositPeriod = []byte(CRITICAL + "DepositPeriod")
KeyCriticalMinDeposit = []byte(CRITICAL + "MinDeposit")
KeyCriticalVotingPeriod = []byte(CRITICAL + "VotingPeriod")
KeyCriticalMaxNum = []byte(CRITICAL + "MaxNum")
KeyCriticalThreshold = []byte(CRITICAL + "Threshold")
KeyCriticalVeto = []byte(CRITICAL + "Veto")
KeyCriticalParticipation = []byte(CRITICAL + "Participation")
KeyCriticalPenalty = []byte(CRITICAL + "Penalty")
KeyImportantDepositPeriod = []byte(IMPORTANT + "DepositPeriod")
KeyImportantMinDeposit = []byte(IMPORTANT + "MinDeposit")
KeyImportantVotingPeriod = []byte(IMPORTANT + "VotingPeriod")
KeyImportantMaxNum = []byte(IMPORTANT + "MaxNum")
KeyImportantThreshold = []byte(IMPORTANT + "Threshold")
KeyImportantVeto = []byte(IMPORTANT + "Veto")
KeyImportantParticipation = []byte(IMPORTANT + "Participation")
KeyImportantPenalty = []byte(IMPORTANT + "Penalty")
KeyNormalDepositPeriod = []byte(NORMAL + "DepositPeriod")
KeyNormalMinDeposit = []byte(NORMAL + "MinDeposit")
KeyNormalVotingPeriod = []byte(NORMAL + "VotingPeriod")
KeyNormalMaxNum = []byte(NORMAL + "MaxNum")
KeyNormalThreshold = []byte(NORMAL + "Threshold")
KeyNormalVeto = []byte(NORMAL + "Veto")
KeyNormalParticipation = []byte(NORMAL + "Participation")
KeyNormalPenalty = []byte(NORMAL + "Penalty")
KeySystemHaltPeriod = []byte("SystemHaltPeriod")
)
// ParamTable for mint module
func ParamTypeTable() params.TypeTable {
return params.NewTypeTable().RegisterParamSet(&GovParams{})
}
// mint parameters
type GovParams struct {
CriticalDepositPeriod time.Duration `json:"critical_deposit_period"` // Maximum period for Atom holders to deposit on a proposal. Initial value: 2 months
CriticalMinDeposit sdk.Coins `json:"critical_min_deposit"` // Minimum deposit for a critical proposal to enter voting period.
CriticalVotingPeriod time.Duration `json:"critical_voting_period"` // Length of the critical voting period.
CriticalMaxNum uint64 `json:"critical_max_num"`
CriticalThreshold sdk.Dec `json:"critical_threshold"` // Minimum propotion of Yes votes for proposal to pass. Initial value: 0.5
CriticalVeto sdk.Dec `json:"critical_veto"` // Minimum value of Veto votes to Total votes ratio for proposal to be vetoed. Initial value: 1/3
CriticalParticipation sdk.Dec `json:"critical_participation"` //
CriticalPenalty sdk.Dec `json:"critical_penalty"` // Penalty if validator does not vote
ImportantDepositPeriod time.Duration `json:"important_deposit_period"` // Maximum period for Atom holders to deposit on a proposal. Initial value: 2 months
ImportantMinDeposit sdk.Coins `json:"important_min_deposit"` // Minimum deposit for a important proposal to enter voting period.
ImportantVotingPeriod time.Duration `json:"important_voting_period"` // Length of the important voting period.
ImportantMaxNum uint64 `json:"important_max_num"`
ImportantThreshold sdk.Dec `json:"important_threshold"` // Minimum propotion of Yes votes for proposal to pass. Initial value: 0.5
ImportantVeto sdk.Dec `json:"important_veto"` // Minimum value of Veto votes to Total votes ratio for proposal to be vetoed. Initial value: 1/3
ImportantParticipation sdk.Dec `json:"important_participation"` //
ImportantPenalty sdk.Dec `json:"important_penalty"` // Penalty if validator does not vote
NormalDepositPeriod time.Duration `json:"normal_deposit_period"` // Maximum period for Atom holders to deposit on a proposal. Initial value: 2 months
NormalMinDeposit sdk.Coins `json:"normal_min_deposit"` // Minimum deposit for a normal proposal to enter voting period.
NormalVotingPeriod time.Duration `json:"normal_voting_period"` // Length of the normal voting period.
NormalMaxNum uint64 `json:"normal_max_num"`
NormalThreshold sdk.Dec `json:"normal_threshold"` // Minimum propotion of Yes votes for proposal to pass. Initial value: 0.5
NormalVeto sdk.Dec `json:"normal_veto"` // Minimum value of Veto votes to Total votes ratio for proposal to be vetoed. Initial value: 1/3
NormalParticipation sdk.Dec `json:"normal_participation"` //
NormalPenalty sdk.Dec `json:"normal_penalty"` // Penalty if validator does not vote
SystemHaltPeriod int64 `json:"system_halt_period"`
}
func (p GovParams) String() string {
return fmt.Sprintf(`Gov Params:
System Halt Period: %v
Proposal Parameter: [Critical] [Important] [Normal]
DepositPeriod: %v %v %v
MinDeposit: %s %s %s
Voting Period: %v %v %v
Max Num: %v %v %v
Threshold: %s %s %s
Veto: %s %s %s
Participation: %s %s %s
Penalty: %s %s %s
`, p.SystemHaltPeriod,
p.CriticalDepositPeriod, p.ImportantDepositPeriod, p.NormalDepositPeriod,
p.CriticalMinDeposit.String(), p.ImportantMinDeposit.String(), p.NormalMinDeposit.String(),
p.CriticalVotingPeriod, p.ImportantVotingPeriod, p.NormalVotingPeriod,
p.CriticalMaxNum, p.ImportantMaxNum, p.NormalMaxNum,
p.CriticalThreshold.String(), p.ImportantThreshold.String(), p.NormalThreshold.String(),
p.CriticalVeto.String(), p.ImportantVeto.String(), p.NormalVeto.String(),
p.CriticalParticipation.String(), p.ImportantParticipation.String(), p.NormalParticipation.String(),
p.CriticalPenalty.String(), p.ImportantPenalty.String(), p.NormalPenalty.String())
}
// Implements params.ParamStruct
func (p *GovParams) GetParamSpace() string {
return DefaultParamSpace
}
func (p *GovParams) KeyValuePairs() params.KeyValuePairs {
return params.KeyValuePairs{
{KeyCriticalDepositPeriod, &p.CriticalDepositPeriod},
{KeyCriticalMinDeposit, &p.CriticalMinDeposit},
{KeyCriticalVotingPeriod, &p.CriticalVotingPeriod},
{KeyCriticalMaxNum, &p.CriticalMaxNum},
{KeyCriticalThreshold, &p.CriticalThreshold},
{KeyCriticalVeto, &p.CriticalVeto},
{KeyCriticalParticipation, &p.CriticalParticipation},
{KeyCriticalPenalty, &p.CriticalPenalty},
{KeyImportantDepositPeriod, &p.ImportantDepositPeriod},
{KeyImportantMinDeposit, &p.ImportantMinDeposit},
{KeyImportantVotingPeriod, &p.ImportantVotingPeriod},
{KeyImportantMaxNum, &p.ImportantMaxNum},
{KeyImportantThreshold, &p.ImportantThreshold},
{KeyImportantVeto, &p.ImportantVeto},
{KeyImportantParticipation, &p.ImportantParticipation},
{KeyImportantPenalty, &p.ImportantPenalty},
{KeyNormalDepositPeriod, &p.NormalDepositPeriod},
{KeyNormalMinDeposit, &p.NormalMinDeposit},
{KeyNormalVotingPeriod, &p.NormalVotingPeriod},
{KeyNormalMaxNum, &p.NormalMaxNum},
{KeyNormalThreshold, &p.NormalThreshold},
{KeyNormalVeto, &p.NormalVeto},
{KeyNormalParticipation, &p.NormalParticipation},
{KeyNormalPenalty, &p.NormalPenalty},
{KeySystemHaltPeriod, &p.SystemHaltPeriod},
}
}
func (p *GovParams) Validate(key string, value string) (interface{}, sdk.Error) {
return nil, nil
}
func (p *GovParams) StringFromBytes(cdc *codec.Codec, key string, bytes []byte) (string, error) {
switch key {
case string(KeyCriticalDepositPeriod):
err := cdc.UnmarshalJSON(bytes, &p.CriticalDepositPeriod)
return p.CriticalDepositPeriod.String(), err
case string(KeyCriticalMinDeposit):
err := cdc.UnmarshalJSON(bytes, &p.CriticalMinDeposit)
return p.CriticalMinDeposit.String(), err
case string(KeyCriticalVotingPeriod):
err := cdc.UnmarshalJSON(bytes, &p.CriticalVotingPeriod)
return p.CriticalDepositPeriod.String(), err
case string(KeyCriticalMaxNum):
err := cdc.UnmarshalJSON(bytes, &p.CriticalMaxNum)
return strconv.FormatUint(p.CriticalMaxNum, 10), err
case string(KeyCriticalThreshold):
err := cdc.UnmarshalJSON(bytes, &p.CriticalThreshold)
return p.CriticalThreshold.String(), err
case string(KeyCriticalVeto):
err := cdc.UnmarshalJSON(bytes, &p.CriticalVeto)
return p.CriticalThreshold.String(), err
case string(KeyCriticalParticipation):
err := cdc.UnmarshalJSON(bytes, &p.CriticalParticipation)
return p.CriticalParticipation.String(), err
case string(KeyCriticalPenalty):
err := cdc.UnmarshalJSON(bytes, &p.CriticalPenalty)
return p.CriticalPenalty.String(), err
case string(KeyImportantDepositPeriod):
err := cdc.UnmarshalJSON(bytes, &p.ImportantDepositPeriod)
return p.ImportantDepositPeriod.String(), err
case string(KeyImportantMinDeposit):
err := cdc.UnmarshalJSON(bytes, &p.ImportantMinDeposit)
return p.ImportantMinDeposit.String(), err
case string(KeyImportantVotingPeriod):
err := cdc.UnmarshalJSON(bytes, &p.ImportantVotingPeriod)
return p.ImportantDepositPeriod.String(), err
case string(KeyImportantMaxNum):
err := cdc.UnmarshalJSON(bytes, &p.ImportantMaxNum)
return strconv.FormatUint(p.ImportantMaxNum, 10), err
case string(KeyImportantThreshold):
err := cdc.UnmarshalJSON(bytes, &p.ImportantThreshold)
return p.ImportantThreshold.String(), err
case string(KeyImportantVeto):
err := cdc.UnmarshalJSON(bytes, &p.ImportantVeto)
return p.ImportantThreshold.String(), err
case string(KeyImportantParticipation):
err := cdc.UnmarshalJSON(bytes, &p.ImportantParticipation)
return p.ImportantParticipation.String(), err
case string(KeyImportantPenalty):
err := cdc.UnmarshalJSON(bytes, &p.ImportantPenalty)
return p.ImportantPenalty.String(), err
case string(KeyNormalDepositPeriod):
err := cdc.UnmarshalJSON(bytes, &p.NormalDepositPeriod)
return p.NormalDepositPeriod.String(), err
case string(KeyNormalMinDeposit):
err := cdc.UnmarshalJSON(bytes, &p.NormalMinDeposit)
return p.NormalMinDeposit.String(), err
case string(KeyNormalVotingPeriod):
err := cdc.UnmarshalJSON(bytes, &p.NormalVotingPeriod)
return p.NormalDepositPeriod.String(), err
case string(KeyNormalMaxNum):
err := cdc.UnmarshalJSON(bytes, &p.NormalMaxNum)
return strconv.FormatUint(p.NormalMaxNum, 10), err
case string(KeyNormalThreshold):
err := cdc.UnmarshalJSON(bytes, &p.NormalThreshold)
return p.NormalThreshold.String(), err
case string(KeyNormalVeto):
err := cdc.UnmarshalJSON(bytes, &p.NormalVeto)
return p.NormalThreshold.String(), err
case string(KeyNormalParticipation):
err := cdc.UnmarshalJSON(bytes, &p.NormalParticipation)
return p.NormalParticipation.String(), err
case string(KeyNormalPenalty):
err := cdc.UnmarshalJSON(bytes, &p.NormalPenalty)
return p.NormalPenalty.String(), err
case string(KeySystemHaltPeriod):
err := cdc.UnmarshalJSON(bytes, &p.SystemHaltPeriod)
return strconv.FormatInt(p.SystemHaltPeriod, 10), err
default:
return "", fmt.Errorf("%s is not existed", key)
}
}
func (p *GovParams) ReadOnly() bool {
return true
}
// default minting module parameters
func DefaultParams() GovParams {
var criticalMinDeposit, _ = sdk.IrisCoinType.ConvertToMinDenomCoin(fmt.Sprintf("%d%s", CRITICAL_DEPOSIT, sdk.Iris))
var importantMinDeposit, _ = sdk.IrisCoinType.ConvertToMinDenomCoin(fmt.Sprintf("%d%s", IMPORTANT_DEPOSIT, sdk.Iris))
var normalMinDeposit, _ = sdk.IrisCoinType.ConvertToMinDenomCoin(fmt.Sprintf("%d%s", NORMAL_DEPOSIT, sdk.Iris))
if sdk.NetworkType == sdk.Mainnet {
return GovParams{
CriticalDepositPeriod: time.Duration(sdk.Day),
CriticalMinDeposit: sdk.Coins{criticalMinDeposit},
CriticalVotingPeriod: time.Duration(sdk.FiveDays),
CriticalMaxNum: STABLE_CRITIACAL_NUM,
CriticalThreshold: sdk.NewDecWithPrec(75, 2),
CriticalVeto: sdk.NewDecWithPrec(33, 2),
CriticalParticipation: sdk.NewDecWithPrec(50, 2),
CriticalPenalty: sdk.ZeroDec(),
ImportantDepositPeriod: time.Duration(sdk.Day),
ImportantMinDeposit: sdk.Coins{importantMinDeposit},
ImportantVotingPeriod: time.Duration(sdk.FiveDays),
ImportantMaxNum: DEFAULT_IMPORTANT_NUM,
ImportantThreshold: sdk.NewDecWithPrec(67, 2),
ImportantVeto: sdk.NewDecWithPrec(33, 2),
ImportantParticipation: sdk.NewDecWithPrec(50, 2),
ImportantPenalty: sdk.ZeroDec(),
NormalDepositPeriod: time.Duration(sdk.Day),
NormalMinDeposit: sdk.Coins{normalMinDeposit},
NormalVotingPeriod: time.Duration(sdk.FiveDays),
NormalMaxNum: DEFAULT_NORMAL_NUM,
NormalThreshold: sdk.NewDecWithPrec(50, 2),
NormalVeto: sdk.NewDecWithPrec(33, 2),
NormalParticipation: sdk.NewDecWithPrec(50, 2),
NormalPenalty: sdk.ZeroDec(),
SystemHaltPeriod: 20000,
}
} else {
return GovParams{
CriticalDepositPeriod: time.Duration(sdk.Day),
CriticalMinDeposit: sdk.Coins{criticalMinDeposit},
CriticalVotingPeriod: time.Duration(2 * time.Minute),
CriticalMaxNum: STABLE_CRITIACAL_NUM,
CriticalThreshold: sdk.NewDecWithPrec(75, 2),
CriticalVeto: sdk.NewDecWithPrec(33, 2),
CriticalParticipation: sdk.NewDecWithPrec(50, 2),
CriticalPenalty: sdk.ZeroDec(),
ImportantDepositPeriod: time.Duration(sdk.Day),
ImportantMinDeposit: sdk.Coins{importantMinDeposit},
ImportantVotingPeriod: time.Duration(2 * time.Minute),
ImportantMaxNum: DEFAULT_IMPORTANT_NUM,
ImportantThreshold: sdk.NewDecWithPrec(67, 2),
ImportantVeto: sdk.NewDecWithPrec(33, 2),
ImportantParticipation: sdk.NewDecWithPrec(50, 2),
ImportantPenalty: sdk.ZeroDec(),
NormalDepositPeriod: time.Duration(sdk.Day),
NormalMinDeposit: sdk.Coins{normalMinDeposit},
NormalVotingPeriod: time.Duration(2 * time.Minute),
NormalMaxNum: DEFAULT_NORMAL_NUM,
NormalThreshold: sdk.NewDecWithPrec(50, 2),
NormalVeto: sdk.NewDecWithPrec(33, 2),
NormalParticipation: sdk.NewDecWithPrec(50, 2),
NormalPenalty: sdk.ZeroDec(),
SystemHaltPeriod: 60,
}
}
}
func DefaultParamsForTest() GovParams {
var criticalMinDeposit, _ = sdk.IrisCoinType.ConvertToMinDenomCoin(fmt.Sprintf("%d%s", 10, sdk.Iris))
var importantMinDeposit, _ = sdk.IrisCoinType.ConvertToMinDenomCoin(fmt.Sprintf("%d%s", 10, sdk.Iris))
var normalMinDeposit, _ = sdk.IrisCoinType.ConvertToMinDenomCoin(fmt.Sprintf("%d%s", 10, sdk.Iris))
return GovParams{
CriticalDepositPeriod: time.Duration(30 * time.Second),
CriticalMinDeposit: sdk.Coins{criticalMinDeposit},
CriticalVotingPeriod: time.Duration(30 * time.Second),
CriticalMaxNum: STABLE_CRITIACAL_NUM,
CriticalThreshold: sdk.NewDecWithPrec(857, 3),
CriticalVeto: sdk.NewDecWithPrec(334, 3),
CriticalParticipation: sdk.NewDecWithPrec(875, 3),
CriticalPenalty: sdk.ZeroDec(),
ImportantDepositPeriod: time.Duration(30 * time.Second),
ImportantMinDeposit: sdk.Coins{importantMinDeposit},
ImportantVotingPeriod: time.Duration(30 * time.Second),
ImportantMaxNum: DEFAULT_IMPORTANT_NUM,
ImportantThreshold: sdk.NewDecWithPrec(8, 1),
ImportantVeto: sdk.NewDecWithPrec(334, 3),
ImportantParticipation: sdk.NewDecWithPrec(834, 3),
ImportantPenalty: sdk.ZeroDec(),
NormalDepositPeriod: time.Duration(30 * time.Second),
NormalMinDeposit: sdk.Coins{normalMinDeposit},
NormalVotingPeriod: time.Duration(30 * time.Second),
NormalMaxNum: DEFAULT_NORMAL_NUM,
NormalThreshold: sdk.NewDecWithPrec(667, 3),
NormalVeto: sdk.NewDecWithPrec(334, 3),
NormalParticipation: sdk.NewDecWithPrec(75, 2),
NormalPenalty: sdk.ZeroDec(),
SystemHaltPeriod: 60,
}
}
func validateParams(p GovParams) sdk.Error {
if err := validateDepositProcedure(DepositProcedure{
MaxDepositPeriod: p.CriticalDepositPeriod,
MinDeposit: p.CriticalMinDeposit,
}, CRITICAL); err != nil {
return err
}
if err := validatorVotingProcedure(VotingProcedure{
VotingPeriod: p.CriticalVotingPeriod,
}, CRITICAL); err != nil {
return err
}
if err := validateTallyingProcedure(TallyingProcedure{
Threshold: p.CriticalThreshold,
Veto: p.CriticalVeto,
Participation: p.CriticalParticipation,
Penalty: p.CriticalPenalty,
}, CRITICAL); err != nil {
return err
}
if err := validateDepositProcedure(DepositProcedure{
MaxDepositPeriod: p.ImportantDepositPeriod,
MinDeposit: p.ImportantMinDeposit,
}, IMPORTANT); err != nil {
return err
}
if err := validatorVotingProcedure(VotingProcedure{
VotingPeriod: p.ImportantVotingPeriod,
}, IMPORTANT); err != nil {
return err
}
if err := validateTallyingProcedure(TallyingProcedure{
Threshold: p.ImportantThreshold,
Veto: p.ImportantVeto,
Participation: p.ImportantParticipation,
Penalty: p.ImportantPenalty,
}, IMPORTANT); err != nil {
return err
}
if err := validateDepositProcedure(DepositProcedure{
MaxDepositPeriod: p.NormalDepositPeriod,
MinDeposit: p.NormalMinDeposit,
}, NORMAL); err != nil {
return err
}
if err := validatorVotingProcedure(VotingProcedure{
VotingPeriod: p.NormalVotingPeriod,
}, NORMAL); err != nil {
return err
}
if err := validateTallyingProcedure(TallyingProcedure{
Threshold: p.NormalThreshold,
Veto: p.NormalVeto,
Participation: p.NormalParticipation,
Penalty: p.NormalPenalty,
}, NORMAL); err != nil {
return err
}
if err := validateMaxNum(p); err != nil {
return err
}
if p.SystemHaltPeriod < 0 || p.SystemHaltPeriod > 50000 {
return sdk.NewError(params.DefaultCodespace, params.CodeInvalidSystemHaltPeriod, fmt.Sprintf("SystemHaltPeriod should be between [0, 50000]"))
}
return nil
}
//______________________________________________________________________
type DepositProcedure struct {
MinDeposit sdk.Coins
MaxDepositPeriod time.Duration
}
type VotingProcedure struct {
VotingPeriod time.Duration `json:"critical_voting_period"` // Length of the critical voting period.
}
type TallyingProcedure struct {
Threshold sdk.Dec `json:"threshold"` // Minimum propotion of Yes votes for proposal to pass. Initial value: 0.5
Veto sdk.Dec `json:"veto"` // Minimum value of Veto votes to Total votes ratio for proposal to be vetoed. Initial value: 1/3
Participation sdk.Dec `json:"participation"` //
Penalty sdk.Dec `json:"penalty"` // Penalty if validator does not vote
}
func validateDepositProcedure(dp DepositProcedure, level string) sdk.Error {
if dp.MinDeposit[0].Denom != sdk.IrisAtto {
return sdk.NewError(params.DefaultCodespace, params.CodeInvalidMinDepositDenom, fmt.Sprintf(level+"MinDeposit denom should be %s!", sdk.IrisAtto))
}
LowerBound, _ := sdk.IrisCoinType.ConvertToMinDenomCoin(fmt.Sprintf("%d%s", LOWER_BOUND_AMOUNT, sdk.Iris))
UpperBound, _ := sdk.IrisCoinType.ConvertToMinDenomCoin(fmt.Sprintf("%d%s", UPPER_BOUND_AMOUNT, sdk.Iris))
if dp.MinDeposit[0].Amount.LT(LowerBound.Amount) || dp.MinDeposit[0].Amount.GT(UpperBound.Amount) {
return sdk.NewError(params.DefaultCodespace, params.CodeInvalidMinDepositAmount, fmt.Sprintf(level+"MinDepositAmount"+dp.MinDeposit[0].String()+" should be larger than 10iris and less than 10000iris"))
}
if dp.MaxDepositPeriod < sdk.TwentySeconds || dp.MaxDepositPeriod > sdk.ThreeDays {
return sdk.NewError(params.DefaultCodespace, params.CodeInvalidDepositPeriod, fmt.Sprintf(level+"MaxDepositPeriod (%s) should be between 20s and %s", dp.MaxDepositPeriod.String(), sdk.ThreeDays.String()))
}
return nil
}
func validatorVotingProcedure(vp VotingProcedure, level string) sdk.Error {
if vp.VotingPeriod < sdk.TwentySeconds || vp.VotingPeriod > sdk.Week {
return sdk.NewError(params.DefaultCodespace, params.CodeInvalidVotingPeriod, fmt.Sprintf(level+"VotingPeriod (%s) should be between 20s and 1 week", vp.VotingPeriod.String()))
}
return nil
}
func validateTallyingProcedure(tp TallyingProcedure, level string) sdk.Error {
if tp.Threshold.LTE(sdk.ZeroDec()) || tp.Threshold.GTE(sdk.NewDec(1)) {
return sdk.NewError(params.DefaultCodespace, params.CodeInvalidThreshold, fmt.Sprintf("Invalid "+level+" Threshold ( "+tp.Threshold.String()+" ) should be (0,1)"))
}
if tp.Participation.LTE(sdk.ZeroDec()) || tp.Participation.GTE(sdk.NewDec(1)) {
return sdk.NewError(params.DefaultCodespace, params.CodeInvalidParticipation, fmt.Sprintf("Invalid "+level+" participation ( "+tp.Participation.String()+" ) should be (0,1)"))
}
if tp.Veto.LTE(sdk.ZeroDec()) || tp.Veto.GTE(sdk.NewDec(1)) {
return sdk.NewError(params.DefaultCodespace, params.CodeInvalidVeto, fmt.Sprintf("Invalid "+level+" Veto ( "+tp.Veto.String()+" ) should be (0,1)"))
}
if tp.Penalty.LT(sdk.ZeroDec()) || tp.Penalty.GTE(sdk.NewDec(1)) {
return sdk.NewError(params.DefaultCodespace, params.CodeInvalidGovernancePenalty, fmt.Sprintf("Invalid "+level+" GovernancePenalty ( "+tp.Penalty.String()+" ) should be [0,1)"))
}
return nil
}
func validateMaxNum(gp GovParams) sdk.Error {
if gp.CriticalMaxNum != STABLE_CRITIACAL_NUM {
return sdk.NewError(params.DefaultCodespace, params.CodeInvalidMaxProposalNum, fmt.Sprintf("The num of Max"+CRITICAL+"Proposal [%v] can only be %v.", gp.CriticalMaxNum, STABLE_CRITIACAL_NUM))
}
if gp.ImportantMaxNum < MIN_IMPORTANT_NUM {
return sdk.NewError(params.DefaultCodespace, params.CodeInvalidMaxProposalNum, fmt.Sprintf("The num of Max"+IMPORTANT+"Proposal [%v] should be no less than %v.", gp.CriticalMaxNum, MIN_IMPORTANT_NUM))
}
if gp.NormalMaxNum < MIN_NORMAL_NUM {
return sdk.NewError(params.DefaultCodespace, params.CodeInvalidMaxProposalNum, fmt.Sprintf("The num of Max"+NORMAL+"Proposal [%v] should be no less than %v.", gp.NormalMaxNum, MIN_NORMAL_NUM))
}
return nil
}
|
// +build mom_log
package main
import (
"fmt"
"io"
"net"
"os"
)
func main() {
// fmt.Println(string(line))
// conn, err := net.Dial("tcp", ":"+*port)
conn, err := net.Dial("tcp", ":1774")
if err != nil {
panic(err)
}
defer conn.Close()
cmd := os.Args[1]
for i := 2; i < len(os.Args); i++ {
arg := []byte(os.Args[i])
line, _ := FormatMessage(cmd, arg)
// fmt.Printf("%s", line)
conn.Write(line)
tbuf := make([]byte, 2)
_, err := io.ReadFull(conn, tbuf)
PanicOnError(err)
fmt.Printf("'%s'\n", tbuf)
}
}
|
package sort
// ShellSort 希尔排序
// 参考插入排序 InsertionSort,但是事先分了多组序列并在同一趟进行插入排序
func ShellSort(arr *[]int, scale int) {
// arr拆分为step个数组
// 每一轮的第n个数组arr_n =
// (arr[n-1], arr[n-1 + step], ..., arr[i + (n-1) * step])
for step := len(*arr) / scale; step > 0; step /= scale {
// 每一轮减少step,直到step == 1
for i := step; i < len(*arr); i++ {
var j = i
var arrI = (*arr)[i]
for j-step >= 0 && arrI < (*arr)[j-step] {
(*arr)[j] = (*arr)[j-step]
j -= step
}
(*arr)[j] = arrI
}
// fmt.Println(*arr)
}
}
|
package composer
import (
"log"
"encoding/json"
)
func JsonStringToComposerJson(jsonString []byte) (ComposerJson) {
var composerJson ComposerJson
err := json.Unmarshal(jsonString, &composerJson)
if err != nil {
log.Fatalln("Unable to convert json string to struct:", err)
}
return composerJson
}
|
package shader
import (
"image"
"image/draw"
_ "image/jpeg"
_ "image/png"
wrapper "github.com/akosgarai/opengl_playground/pkg/glwrapper"
"github.com/go-gl/mathgl/mgl32"
)
type Shader struct {
shaderProgramId uint32
textures []texture
directionalLightSources []DirectionalLightSource
pointLightSources []PointLightSource
spotLightSources []SpotLightSource
viewPosition mgl32.Vec3
viewPositionUniformName string
}
// NewShader returns a Shader. It's inputs are the filenames of the shaders.
// It reads the files and compiles them. The shaders are attached to the shader program.
func NewShader(vertexShaderPath, fragmentShaderPath string) *Shader {
vertexShaderSource, err := LoadShaderFromFile(vertexShaderPath)
if err != nil {
panic(err)
}
vertexShader, err := CompileShader(vertexShaderSource, wrapper.VERTEX_SHADER)
if err != nil {
panic(err)
}
fragmentShaderSource, err := LoadShaderFromFile(fragmentShaderPath)
if err != nil {
panic(err)
}
fragmentShader, err := CompileShader(fragmentShaderSource, wrapper.FRAGMENT_SHADER)
if err != nil {
panic(err)
}
program := wrapper.CreateProgram()
wrapper.AttachShader(program, vertexShader)
wrapper.AttachShader(program, fragmentShader)
wrapper.LinkProgram(program)
return &Shader{
shaderProgramId: program,
textures: []texture{},
directionalLightSources: []DirectionalLightSource{},
pointLightSources: []PointLightSource{},
spotLightSources: []SpotLightSource{},
viewPosition: mgl32.Vec3{0, 0, 0},
viewPositionUniformName: "",
}
}
func (s *Shader) AddTexture(filePath string, wrapR, wrapS, minificationFilter, magnificationFilter int32, uniformName string) {
img, err := loadImageFromFile(filePath)
if err != nil {
panic(err)
}
rgba := image.NewRGBA(img.Bounds())
draw.Draw(rgba, rgba.Bounds(), img, image.Pt(0, 0), draw.Src)
if rgba.Stride != rgba.Rect.Size().X*4 {
panic("not 32 bit color")
}
tex := texture{
textureId: s.genTexture(),
targetId: wrapper.TEXTURE_2D,
texUnitId: 0,
uniformName: uniformName,
}
tex.Bind(wrapper.TEXTURE0)
defer tex.UnBind()
s.TexParameteri(wrapper.TEXTURE_WRAP_R, wrapR)
s.TexParameteri(wrapper.TEXTURE_WRAP_S, wrapS)
s.TexParameteri(wrapper.TEXTURE_MIN_FILTER, minificationFilter)
s.TexParameteri(wrapper.TEXTURE_MAG_FILTER, magnificationFilter)
wrapper.TexImage2D(tex.targetId, 0, wrapper.RGBA, int32(rgba.Rect.Size().X), int32(rgba.Rect.Size().Y), 0, wrapper.RGBA, uint32(wrapper.UNSIGNED_BYTE), wrapper.Ptr(rgba.Pix))
wrapper.GenerateMipmap(tex.textureId)
s.textures = append(s.textures, tex)
}
func (s *Shader) genTexture() uint32 {
var id uint32
wrapper.GenTextures(1, &id)
return id
}
// AddDirectionalLightSource sets up a directional light source.
// It takes a DirectionalLight input that contains the model related info,
// and it also takes a [4]string, with the uniform names that are used in the shader applications
// the 'DirectionUniformName', 'AmbientUniformName', 'DiffuseUniformName', 'SpecularUniformName'.
// They has to be in this order.
func (s *Shader) AddDirectionalLightSource(lightSource DirectionalLight, uniformNames [4]string) {
var dSource DirectionalLightSource
dSource.LightSource = lightSource
dSource.DirectionUniformName = uniformNames[0]
dSource.AmbientUniformName = uniformNames[1]
dSource.DiffuseUniformName = uniformNames[2]
dSource.SpecularUniformName = uniformNames[3]
s.directionalLightSources = append(s.directionalLightSources, dSource)
}
// AddPointLightSource sets up a point light source. It takes a PointLight
// input that contains the model related info, and it also containt the uniform names in [7]string format.
// The order has to be the following: 'PositionUniformName', 'AmbientUniformName', 'DiffuseUniformName',
// 'SpecularUniformName', 'ConstantTermUniformName', 'LinearTermUniformName', 'QuadraticTermUniformName'.
func (s *Shader) AddPointLightSource(lightSource PointLight, uniformNames [7]string) {
var pSource PointLightSource
pSource.LightSource = lightSource
pSource.PositionUniformName = uniformNames[0]
pSource.AmbientUniformName = uniformNames[1]
pSource.DiffuseUniformName = uniformNames[2]
pSource.SpecularUniformName = uniformNames[3]
pSource.ConstantTermUniformName = uniformNames[4]
pSource.LinearTermUniformName = uniformNames[5]
pSource.QuadraticTermUniformName = uniformNames[6]
s.pointLightSources = append(s.pointLightSources, pSource)
}
// AddSpotLightSource sets up a spot light source. It takes a SpotLight input
// that contains the model related info, and it also contains the uniform names in [10]string format.
// The order has to be the following: 'PositionUniformName', 'DirectionUniformName', 'AmbientUniformName',
// 'DiffuseUniformName', 'SpecularUniformName', 'ConstantTermUniformName', 'LinearTermUniformName',
// 'QuadraticTermUniformName', 'CutoffUniformName'.
func (s *Shader) AddSpotLightSource(lightSource SpotLight, uniformNames [10]string) {
var sSource SpotLightSource
sSource.LightSource = lightSource
sSource.PositionUniformName = uniformNames[0]
sSource.DirectionUniformName = uniformNames[1]
sSource.AmbientUniformName = uniformNames[2]
sSource.DiffuseUniformName = uniformNames[3]
sSource.SpecularUniformName = uniformNames[4]
sSource.ConstantTermUniformName = uniformNames[5]
sSource.LinearTermUniformName = uniformNames[6]
sSource.QuadraticTermUniformName = uniformNames[7]
sSource.CutoffUniformName = uniformNames[8]
sSource.OuterCutoffUniformName = uniformNames[8]
s.spotLightSources = append(s.spotLightSources, sSource)
}
func (s *Shader) SetViewPosition(position mgl32.Vec3, uniformName string) {
s.viewPosition = position
s.viewPositionUniformName = uniformName
}
func (s *Shader) HasTexture() bool {
if len(s.textures) > 0 {
return true
}
return false
}
// Use is a wrapper for gl.UseProgram
func (s *Shader) Use() {
wrapper.UseProgram(s.shaderProgramId)
}
// SetUniformMat4 gets an uniform name string and the value matrix as input and
// calls the gl.UniformMatrix4fv function
func (s *Shader) SetUniformMat4(uniformName string, mat mgl32.Mat4) {
location := wrapper.GetUniformLocation(s.shaderProgramId, uniformName)
wrapper.UniformMatrix4fv(location, 1, false, &mat[0])
}
// SetUniformMat3 gets an uniform name string and the value matrix as input and
// calls the gl.UniformMatrix3fv function
func (s *Shader) SetUniformMat3(uniformName string, mat mgl32.Mat3) {
location := wrapper.GetUniformLocation(s.shaderProgramId, uniformName)
wrapper.UniformMatrix3fv(location, 1, false, &mat[0])
}
// SetUniform3f gets an uniform name string and 3 float values as input and
// calls the gl.Uniform3f function
func (s *Shader) SetUniform3f(uniformName string, v1, v2, v3 float32) {
location := wrapper.GetUniformLocation(s.shaderProgramId, uniformName)
wrapper.Uniform3f(location, v1, v2, v3)
}
// SetUniform1f gets an uniform name string and a float value as input and
// calls the gl.Uniform1f function
func (s *Shader) SetUniform1f(uniformName string, v1 float32) {
location := wrapper.GetUniformLocation(s.shaderProgramId, uniformName)
wrapper.Uniform1f(location, v1)
}
// BindBufferData gets a float array as an input, generates a buffer
// binds it as array buffer, and sets the input as buffer data.
func (s *Shader) BindBufferData(bufferData []float32) {
vertexBufferObject := wrapper.GenBuffers()
wrapper.BindBuffer(wrapper.ARRAY_BUFFER, vertexBufferObject)
wrapper.ArrayBufferData(bufferData)
}
// BindVertexArray generates a vertex array and binds it.
func (s *Shader) BindVertexArray() {
vertexArrayObject := wrapper.GenVertexArrays()
wrapper.BindVertexArray(vertexArrayObject)
}
// VertexAttribPointer sets the pointer.
func (s *Shader) VertexAttribPointer(index uint32, size, stride int32, offset int) {
wrapper.VertexAttribPointer(index, size, wrapper.FLOAT, false, stride, wrapper.PtrOffset(offset))
}
// Close disables the vertexarraypointers and the vertex array.
func (s *Shader) Close(numOfVertexAttributes int) {
for i := 0; i < numOfVertexAttributes; i++ {
index := uint32(i)
wrapper.DisableVertexAttribArray(index)
}
for index, _ := range s.textures {
s.textures[index].UnBind()
}
wrapper.BindVertexArray(0)
}
// Setup light related uniforms.
func (s *Shader) lightHandler() {
s.directionalLightHandler()
s.pointLightHandler()
s.spotLightHandler()
}
// Setup directional light related uniforms. It iterates over the directional sources
// and setups each uniform, where the name is not empty.
func (s *Shader) directionalLightHandler() {
for _, source := range s.directionalLightSources {
if source.DirectionUniformName != "" {
direction := source.LightSource.GetDirection()
s.SetUniform3f(source.DirectionUniformName, direction.X(), direction.Y(), direction.Z())
}
if source.AmbientUniformName != "" {
ambient := source.LightSource.GetAmbient()
s.SetUniform3f(source.AmbientUniformName, ambient.X(), ambient.Y(), ambient.Z())
}
if source.DiffuseUniformName != "" {
diffuse := source.LightSource.GetDiffuse()
s.SetUniform3f(source.DiffuseUniformName, diffuse.X(), diffuse.Y(), diffuse.Z())
}
if source.SpecularUniformName != "" {
specular := source.LightSource.GetSpecular()
s.SetUniform3f(source.DiffuseUniformName, specular.X(), specular.Y(), specular.Z())
}
}
}
// Setup point light relates uniforms. It iterates over the point light sources and sets
// up every uniform, where the name is not empty.
func (s *Shader) pointLightHandler() {
for _, source := range s.pointLightSources {
if source.PositionUniformName != "" {
position := source.LightSource.GetPosition()
s.SetUniform3f(source.PositionUniformName, position.X(), position.Y(), position.Z())
}
if source.AmbientUniformName != "" {
ambient := source.LightSource.GetAmbient()
s.SetUniform3f(source.AmbientUniformName, ambient.X(), ambient.Y(), ambient.Z())
}
if source.DiffuseUniformName != "" {
diffuse := source.LightSource.GetDiffuse()
s.SetUniform3f(source.DiffuseUniformName, diffuse.X(), diffuse.Y(), diffuse.Z())
}
if source.SpecularUniformName != "" {
specular := source.LightSource.GetSpecular()
s.SetUniform3f(source.DiffuseUniformName, specular.X(), specular.Y(), specular.Z())
}
if source.ConstantTermUniformName != "" {
s.SetUniform1f(source.ConstantTermUniformName, source.LightSource.GetConstantTerm())
}
if source.LinearTermUniformName != "" {
s.SetUniform1f(source.LinearTermUniformName, source.LightSource.GetLinearTerm())
}
if source.QuadraticTermUniformName != "" {
s.SetUniform1f(source.QuadraticTermUniformName, source.LightSource.GetQuadraticTerm())
}
}
}
// Setup spot light related uniforms. It iterates over the spot light sources and sets up
// every uniform, where the name is not empty.
func (s *Shader) spotLightHandler() {
for _, source := range s.spotLightSources {
if source.DirectionUniformName != "" {
direction := source.LightSource.GetDirection()
s.SetUniform3f(source.DirectionUniformName, direction.X(), direction.Y(), direction.Z())
}
if source.PositionUniformName != "" {
position := source.LightSource.GetPosition()
s.SetUniform3f(source.PositionUniformName, position.X(), position.Y(), position.Z())
}
if source.AmbientUniformName != "" {
ambient := source.LightSource.GetAmbient()
s.SetUniform3f(source.AmbientUniformName, ambient.X(), ambient.Y(), ambient.Z())
}
if source.DiffuseUniformName != "" {
diffuse := source.LightSource.GetDiffuse()
s.SetUniform3f(source.DiffuseUniformName, diffuse.X(), diffuse.Y(), diffuse.Z())
}
if source.SpecularUniformName != "" {
specular := source.LightSource.GetSpecular()
s.SetUniform3f(source.DiffuseUniformName, specular.X(), specular.Y(), specular.Z())
}
if source.ConstantTermUniformName != "" {
s.SetUniform1f(source.ConstantTermUniformName, source.LightSource.GetConstantTerm())
}
if source.LinearTermUniformName != "" {
s.SetUniform1f(source.LinearTermUniformName, source.LightSource.GetLinearTerm())
}
if source.QuadraticTermUniformName != "" {
s.SetUniform1f(source.QuadraticTermUniformName, source.LightSource.GetQuadraticTerm())
}
if source.CutoffUniformName != "" {
s.SetUniform1f(source.CutoffUniformName, source.LightSource.GetCutoff())
}
if source.OuterCutoffUniformName != "" {
s.SetUniform1f(source.OuterCutoffUniformName, source.LightSource.GetOuterCutoff())
}
}
}
// DrawPoints is the draw functions for points
func (s *Shader) DrawPoints(numberOfPoints int32) {
s.lightHandler()
wrapper.DrawArrays(wrapper.POINTS, 0, numberOfPoints)
}
// DrawTriangles is the draw function for triangles
func (s *Shader) DrawTriangles(numberOfPoints int32) {
for index, _ := range s.textures {
s.textures[index].Bind(textureMap(index))
wrapper.Uniform1i(wrapper.GetUniformLocation(s.shaderProgramId, s.textures[index].uniformName), int32(s.textures[index].texUnitId-wrapper.TEXTURE0))
}
s.lightHandler()
wrapper.DrawArrays(wrapper.TRIANGLES, 0, numberOfPoints)
}
// TexParameteri is a wrapper function for gl.TexParameteri
func (s *Shader) TexParameteri(pName uint32, param int32) {
wrapper.TexParameteri(wrapper.TEXTURE_2D, pName, param)
}
// TextureBorderColor is a wrapper function for gl.glTexParameterfv with TEXTURE_BORDER_COLOR as pname.
func (s *Shader) TextureBorderColor(color [4]float32) {
wrapper.TexParameterfv(wrapper.TEXTURE_2D, wrapper.TEXTURE_BORDER_COLOR, &color[0])
}
|
package main
import "fmt"
func main(){
switch {
case (true && true):
fmt.Println("migliorare in go")
case (true && false):
fmt.Println("procrastinare")
case (true || true):
fmt.Println("migliorare in python")
case (true || false):
fmt.Println("rimandare")
case (!true):
fmt.Println("studiare pentesting.. daje!")
}
} |
package main
import (
"fmt"
"strconv"
log "github.com/sirupsen/logrus"
"texas_real_foods/pkg/timeseries-analyser"
"texas_real_foods/pkg/utils"
)
var (
// create map to house environment variables
cfg = utils.NewConfigMapWithValues(
map[string]string{
"postgres_url": "postgres://postgres:postgres-dev@192.168.99.100:5432",
"analysis_interval_minutes": "1",
"trf_api_host": "0.0.0.0",
"trf_api_port": "10999",
"notify_api_host": "0.0.0.0",
"notify_api_port": "10756",
},
)
)
func getNotifyAPIConfig() utils.APIDependencyConfig {
// get configuration for downstream API dependencies and convert to integer
apiPortString := cfg.Get("notify_api_port")
apiPort, err := strconv.Atoi(apiPortString)
if err != nil {
panic(fmt.Sprintf("received invalid api port for notify API '%s'", apiPortString))
}
return utils.APIDependencyConfig{
Host: cfg.Get("notify_api_host"),
Port: &apiPort,
Protocol: "http",
}
}
func getTRFAPIConfig() utils.APIDependencyConfig {
// get configuration for downstream API dependencies and convert to integer
apiPortString := cfg.Get("trf_api_port")
apiPort, err := strconv.Atoi(apiPortString)
if err != nil {
panic(fmt.Sprintf("received invalid api port for trf API '%s'", apiPortString))
}
return utils.APIDependencyConfig{
Host: cfg.Get("trf_api_host"),
Port: &apiPort,
Protocol: "http",
}
}
func main() {
log.SetLevel(log.DebugLevel)
intervalString := cfg.Get("analysis_interval_minutes")
// convert given interval from string to integer
interval, err := strconv.Atoi(intervalString)
if err != nil {
panic(fmt.Sprintf("received invalid analysis interval '%s'", intervalString))
}
// generate new timeseries analyser and run
timeseries_analyser.NewAnalyser(getTRFAPIConfig(),
getNotifyAPIConfig(), interval).Run()
} |
package types
type TxResponse struct {
Jsonrpc string `json:"jsonrpc"`
Result TxResult `json:"result"`
ID int `json:"id"`
}
type TxStatus struct {
Ok interface{} `json:"Ok"`
}
type TxMeta struct {
Err interface{} `json:"err"`
Fee int `json:"fee"`
InnerInstructions []interface{} `json:"innerInstructions"`
LogMessages []interface{} `json:"logMessages"`
PostBalances []interface{} `json:"postBalances"`
PreBalances []interface{} `json:"preBalances"`
Status TxStatus `json:"status"`
}
type TxHeader struct {
NumReadonlySignedAccounts int `json:"numReadonlySignedAccounts"`
NumReadonlyUnsignedAccounts int `json:"numReadonlyUnsignedAccounts"`
NumRequiredSignatures int `json:"numRequiredSignatures"`
}
type TxInstructions struct {
Accounts []int `json:"accounts"`
Data string `json:"data"`
ProgramIDIndex int `json:"programIdIndex"`
}
type Message struct {
AccountKeys []string `json:"accountKeys"`
Header TxHeader `json:"header"`
Instructions []TxInstructions `json:"instructions"`
RecentBlockhash string `json:"recentBlockhash"`
}
type Transaction struct {
Message Message `json:"message"`
Signatures []string `json:"signatures"`
}
type TxResult struct {
Meta TxMeta `json:"meta"`
Slot int `json:"slot"`
Transaction Transaction `json:"transaction"`
}
|
package ifelse
//func If(ok bool, a, b int) int
func AsmIf(ok bool, a, b int) int |
package main
import (
"sync"
)
// manager holds jobs
type manager struct {
sync.RWMutex
jobs map[string]*Job
}
// Manager is the entry point to jobs
var Manager = manager{jobs: make(map[string]*Job)}
func (man *manager) addJob(job *Job) {
man.Lock()
defer man.Unlock()
man.jobs[job.ID] = job
}
func (man *manager) getJob(id string) *Job {
man.RLock()
defer man.RUnlock()
return man.jobs[id]
}
func (man *manager) delJob(id string) {
man.Lock()
defer man.Unlock()
delete(man.jobs, id)
}
|
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// Code generated from the elasticsearch-specification DO NOT EDIT.
// https://github.com/elastic/elasticsearch-specification/tree/33e8a1c9cad22a5946ac735c4fba31af2da2cec2
package types
import (
"bytes"
"encoding/json"
"errors"
"io"
"strconv"
)
// DataframeAnalyticsFieldSelection type.
//
// https://github.com/elastic/elasticsearch-specification/blob/33e8a1c9cad22a5946ac735c4fba31af2da2cec2/specification/ml/_types/DataframeAnalytics.ts#L55-L68
type DataframeAnalyticsFieldSelection struct {
// FeatureType The feature type of this field for the analysis. May be categorical or
// numerical.
FeatureType *string `json:"feature_type,omitempty"`
// IsIncluded Whether the field is selected to be included in the analysis.
IsIncluded bool `json:"is_included"`
// IsRequired Whether the field is required.
IsRequired bool `json:"is_required"`
// MappingTypes The mapping types of the field.
MappingTypes []string `json:"mapping_types"`
// Name The field name.
Name string `json:"name"`
// Reason The reason a field is not selected to be included in the analysis.
Reason *string `json:"reason,omitempty"`
}
func (s *DataframeAnalyticsFieldSelection) UnmarshalJSON(data []byte) error {
dec := json.NewDecoder(bytes.NewReader(data))
for {
t, err := dec.Token()
if err != nil {
if errors.Is(err, io.EOF) {
break
}
return err
}
switch t {
case "feature_type":
var tmp json.RawMessage
if err := dec.Decode(&tmp); err != nil {
return err
}
o := string(tmp[:])
o, err = strconv.Unquote(o)
if err != nil {
o = string(tmp[:])
}
s.FeatureType = &o
case "is_included":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.ParseBool(v)
if err != nil {
return err
}
s.IsIncluded = value
case bool:
s.IsIncluded = v
}
case "is_required":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.ParseBool(v)
if err != nil {
return err
}
s.IsRequired = value
case bool:
s.IsRequired = v
}
case "mapping_types":
if err := dec.Decode(&s.MappingTypes); err != nil {
return err
}
case "name":
if err := dec.Decode(&s.Name); err != nil {
return err
}
case "reason":
var tmp json.RawMessage
if err := dec.Decode(&tmp); err != nil {
return err
}
o := string(tmp[:])
o, err = strconv.Unquote(o)
if err != nil {
o = string(tmp[:])
}
s.Reason = &o
}
}
return nil
}
// NewDataframeAnalyticsFieldSelection returns a DataframeAnalyticsFieldSelection.
func NewDataframeAnalyticsFieldSelection() *DataframeAnalyticsFieldSelection {
r := &DataframeAnalyticsFieldSelection{}
return r
}
|
package routers
import (
"github.com/astaxie/beego"
"test_proj/chatRoom/controllers"
)
func init() {
beego.Router("/", &controllers.MainController{})
beego.Router("/chatRoom", &controllers.ServerController{})
beego.Router("/logout", &controllers.ServerController{}, "get:Logout")
beego.Router("/upload", &controllers.ServerController{}, "post:Upload")
beego.Router("/chatRoom/WS", &controllers.ServerController{}, "get:WS")
beego.Router("/hist/:stamp:string/:num:int", &controllers.ServerController{}, "get:Hist")
}
|
package main
import (
"fmt"
"path/filepath"
"github.com/jraams/aoc-2020/helpers"
)
func main() {
// Load input from file
inputPath, _ := filepath.Abs("input")
inputValues := helpers.GetInputValues(inputPath)
gameconsole := loadInstructions(inputValues)
// Part 1
_, accumulator := runInstructions(gameconsole.clone())
fmt.Printf("Solution part 1: value in accumulator right before an instruction is executed a second time is: %d", accumulator)
fmt.Println()
// Part 2
isFixed, accumulator := fixInstructions(gameconsole.clone())
if !isFixed {
fmt.Println("Couldn't fix instructions for part 2, are you sure you copied the input correctly?")
return
}
fmt.Printf("Solution part 2: value in accumulator after finishing all instructions is: %d", accumulator)
fmt.Println()
}
|
package baby
import (
"github.com/babyboy/leveldb"
"github.com/babyboy/log"
"github.com/babyboy/node"
"github.com/babyboy/p2p"
"github.com/babyboy/p2p/nat"
)
type Node struct {
node *node.Node
}
func NewNode(dataDir string) (stack *Node, _ error) {
cfg := babyboy.BabyConfig{
Node: node.Config{
DataDir: dataDir,
RpcServer: "0.0.0.0:8545",
P2P: p2p.Config{
ListenAddr: ":3000",
MaxPeers: 25,
NAT: nat.Any(),
},
},
}
fullNode, err := node.New(&cfg.Node)
if err != nil {
return &Node{}, err
}
return &Node{fullNode}, nil
}
func (n *Node) Start() error {
babyboy.StartNode(n.node)
return nil
}
func (n *Node) NewAccount(password string) string {
address, err := n.node.NewAccount(password)
if err != nil {
log.Error("NewAccount Error", err)
}
return address.String()
}
func (n *Node) NewJoint(address string, password string, tx string, amount int64) string {
hash, err := n.node.NewJoint(address, password, tx, amount)
if err != nil {
log.Error("SendTx Error: ", err)
}
return hash.String()
}
func (n *Node) GetMaxLevel() int64 {
level := n.node.ProtocolManager.GetMaxLevel()
return level
}
func (n *Node) ListAccounts() string {
address := n.node.ListAccounts()
if len(address) == 0 {
return ""
}
return address[0].String()
}
func (n *Node) Wallets() int {
count := n.node.GetWallets()
return count
}
type WalletBalance struct {
address string
amount int64
}
func (n *Node) GetBalanceS(address string) int64 {
b, err := n.node.GetBalance(address)
if err != nil {
log.Error("GetBalance Error", err)
}
return b.Stable
}
type FilterLogsHandler interface {
OnError(failure string)
}
func (n *Node) CallbackFunc(cb FilterLogsHandler) int64 {
cb.OnError("Hello")
return 0
}
|
package entity
import (
"github.com/mirzaakhena/danarisan/application/apperror"
"github.com/mirzaakhena/danarisan/domain/vo"
"time"
)
type Arah string
const (
ArahBertambah = "BERTAMBAH"
ArahBerkurang = "BERKURANG"
)
type SaldoAkun struct {
BaseModel
ArisanID vo.ArisanID `json:"-"` //
PesertaID vo.PesertaID //
JurnalID vo.JurnalID //
AkunType vo.AkunType //
Tanggal time.Time //
Sequence int //
Amount float64 //
Balance float64 //
}
type SaldoAkunRequest struct {
Jurnal *Jurnal
AkunType vo.AkunType //
Nominal float64 //
Arah Arah //
SaldoAkunSebelumnya *SaldoAkun //
Sequence int //
}
func NewSaldoAkun(req SaldoAkunRequest) (*SaldoAkun, error) {
var obj SaldoAkun
obj.ArisanID = req.Jurnal.ArisanID
obj.PesertaID = req.Jurnal.PesertaID
obj.JurnalID = req.Jurnal.ID
obj.Tanggal = req.Jurnal.Tanggal
obj.AkunType = req.AkunType
obj.Sequence = req.Sequence
if req.Nominal > 0 {
return nil, apperror.NominalHarusLebihBesarDariNol
}
if req.Arah == ArahBertambah {
obj.Amount = req.Nominal
} else if req.Arah == ArahBerkurang {
obj.Amount = -req.Nominal
}
lastSaldoAkunBalance := 0.0
if req.SaldoAkunSebelumnya != nil {
lastSaldoAkunBalance = req.SaldoAkunSebelumnya.Balance
}
obj.Balance = lastSaldoAkunBalance + obj.Amount
return &obj, nil
}
|
package server
import (
"context"
"encoding/json"
"fmt"
"strconv"
"github.com/thedevsaddam/gojsonq"
"github.com/volatiletech/sqlboiler/boil"
. "github.com/volatiletech/sqlboiler/queries/qm"
"golang.org/x/crypto/bcrypt"
"orm/models"
"orm/ormpb"
)
const BcryptCost = 12
func (s *UserBoilerOrm) ById(ctx context.Context, req *ormpb.UserId) (*ormpb.UserResponse, error) {
fmt.Printf("UserId function was invoked with %v\n", req)
id := req.GetId()
output, err := models.Users(Where("id = ?", id), OrderBy("id asc")).OneG(context.Background())
if err != nil {
fmt.Printf("There is no entity with id %v\n", id)
return nil, err
}
result, err := json.Marshal(output)
if err != nil {
fmt.Printf("error when marshal database call %+v\n", err)
return nil, err
}
res := &ormpb.UserResponse{
Result: string(result),
}
return res, nil
}
func (s *UserBoilerOrm) List(ctx context.Context, req *ormpb.UserList) (*ormpb.UserResponse, error) {
fmt.Printf("UserList function was invoked with %v\n", req)
limit := req.GetLimit()
offset := req.GetOffset()
output, err := models.Users(Limit(int(limit)), Offset(int(offset))).AllG(context.Background())
if err != nil {
res := &ormpb.UserResponse{
Result: "",
}
return res, nil
}
result, err := json.Marshal(output)
if err != nil {
fmt.Errorf("error when marshal database call %+v", err)
return nil, err
}
res := &ormpb.UserResponse{
Result: string(result),
}
return res, nil
}
func (s *UserBoilerOrm) Create(ctx context.Context, req *ormpb.UserCreate) (*ormpb.UserResponse, error) {
fmt.Printf("UserCreate function was invoked with %v\n", req)
jsonStr := req.GetData()
var user models.User
profile := gojsonq.New().JSONString(jsonStr).Find("profile")
fmt.Printf("profile equals %v\n", profile)
if profile != nil {
profile_json, _ := json.Marshal(profile)
user.Profile = profile_json
}
role := gojsonq.New().JSONString(jsonStr).Find("role")
if role != nil {
user.Role = hashPassword(role.(string))
}
password := gojsonq.New().JSONString(jsonStr).Find("password")
fmt.Printf("pass equals %v\n", password)
if password != nil {
user.Password = hashPassword(password.(string))
} else {
panic("password cannot be empty")
}
user.Name = gojsonq.New().JSONString(jsonStr).Find("name").(string)
user.Email = gojsonq.New().JSONString(jsonStr).Find("email").(string)
err := user.InsertG(context.Background(), boil.Infer())
if err != nil {
fmt.Printf("error when database call %+v", err)
return nil, err
}
res := &ormpb.UserResponse{
Result: strconv.Itoa(user.ID),
}
return res, nil
}
func (s *UserBoilerOrm) Update(ctx context.Context, req *ormpb.UserUpdate) (*ormpb.UserResponse, error) {
fmt.Printf("UserUpdate function was invoked with %v\n", req)
jsonStr := req.GetData()
id := req.GetId()
user, err := models.Users(Where("id = ?", id), OrderBy("id asc")).OneG(context.Background())
if err != nil {
fmt.Printf("user not found in function UserUpdate %+v", err)
return nil, err
}
profile := gojsonq.New().JSONString(jsonStr).Find("profile")
fmt.Printf("profile equals %v\n", profile)
if profile != nil {
profile_json, _ := json.Marshal(profile)
user.Profile = profile_json
}
password := gojsonq.New().JSONString(jsonStr).Find("password")
fmt.Printf("pass equals %v\n", password)
if password != nil {
user.Password = hashPassword(password.(string))
}
role := gojsonq.New().JSONString(jsonStr).Find("role")
if role != nil {
user.Role = hashPassword(role.(string))
}
user.Name = gojsonq.New().JSONString(jsonStr).Find("name").(string)
user.Email = gojsonq.New().JSONString(jsonStr).Find("email").(string)
rowsAff, err := user.UpdateG(context.Background(), boil.Infer())
if err != nil {
fmt.Errorf("error when updating user %+v", err)
return nil, err
}
result, err := json.Marshal(rowsAff)
if err != nil {
fmt.Errorf("error when marshal database call %+v", err)
return nil, err
}
res := &ormpb.UserResponse{
Result: string(result),
}
return res, nil
}
func (s *UserBoilerOrm) Del(ctx context.Context, req *ormpb.UserDelete) (*ormpb.UserResponse, error) {
fmt.Printf("UserDelete function was invoked with %v\n", req)
id := req.GetId()
output, err := models.Users(Where("id = ?", id), OrderBy("id asc")).OneG(context.Background())
if err != nil {
fmt.Printf("user not found in function UserDelete %+v", err)
return nil, err
}
rowsAff, err := output.DeleteG(context.Background())
if err != nil {
fmt.Printf("error when deleting user from database %+v", err)
return nil, err
}
result, err := json.Marshal(rowsAff)
if err != nil {
fmt.Errorf("error when marshal database call %+v", err)
return nil, err
}
res := &ormpb.UserResponse{
Result: string(result),
}
return res, nil
}
func hashPassword(password string) string {
hashedPassword, er := bcrypt.GenerateFromPassword([]byte(password), BcryptCost)
if er != nil {
panic("error when hashing password")
}
return string(hashedPassword)
}
|
package auth
import (
"fmt"
"log"
"net/http"
"github.com/gorilla/sessions"
)
var (
sessionStore = createStore()
)
const (
// DefaultSessionName is a default name for the cookie which store session.
DefaultSessionName = "chlorine_session"
)
// Session is a structure for creating handlers with session.
type Session struct {
session *sessions.Session
}
// SessionAuthentication is an interface for authenticating music service via session.
type SessionAuthentication interface {
GetAuth(*sessions.Session) (Authenticator, error)
}
// GetSession return session instance.
func (s *Session) GetSession() (*sessions.Session, error) {
if s.session == nil {
return nil, fmt.Errorf("auth: session: session is not initialized")
}
return s.session, nil
}
// InitSession method initialize session objects within the handler
func (s *Session) InitSession(r *http.Request) *sessions.Session {
s.session = InitSession(r)
return s.session
}
func createStore() *sessions.CookieStore {
store := sessions.NewCookieStore([]byte(secretKey))
store.Options.HttpOnly = false
store.Options.SameSite = http.SameSiteLaxMode
store.Options.Path = "/"
store.Options.MaxAge = 0
return store
}
// InitSession creates new session in the store and return it.
func InitSession(r *http.Request) *sessions.Session {
session, err := sessionStore.Get(r, DefaultSessionName)
if err != nil {
log.Fatalf("auth: session: %s", err.Error())
}
return session
}
|
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
package azurestack
import (
"bytes"
"context"
"github.com/Azure/aks-engine/pkg/armhelpers"
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage"
azStorage "github.com/Azure/azure-sdk-for-go/storage"
"github.com/Azure/go-autorest/autorest/to"
)
// AzureStorageClient implements the StorageClient interface and wraps the Azure storage client.
type AzureStorageClient struct {
client *azStorage.Client
}
// GetStorageClient returns an authenticated client for the specified account.
func (az *AzureClient) GetStorageClient(ctx context.Context, resourceGroup, accountName string) (armhelpers.AKSStorageClient, error) {
keys, err := az.getStorageKeys(ctx, resourceGroup, accountName)
if err != nil {
return nil, err
}
client, err := azStorage.NewBasicClientOnSovereignCloud(accountName, to.String(keys[0].Value), az.environment)
if err != nil {
return nil, err
}
return &AzureStorageClient{
client: &client,
}, nil
}
func (az *AzureClient) getStorageKeys(ctx context.Context, resourceGroup, accountName string) ([]storage.AccountKey, error) {
storageKeysResult, err := az.storageAccountsClient.ListKeys(ctx, resourceGroup, accountName)
if err != nil {
return nil, err
}
return *storageKeysResult.Keys, nil
}
// DeleteBlob deletes the specified blob
// TODO(colemick): why doesn't SDK give a way to just delete a blob by URI?
// it's what it ends up doing internally anyway...
func (as *AzureStorageClient) DeleteBlob(vhdContainer, vhdBlob string, options *azStorage.DeleteBlobOptions) error {
containerRef := getContainerRef(as.client, vhdContainer)
blobRef := containerRef.GetBlobReference(vhdBlob)
return blobRef.Delete(options)
}
// CreateContainer creates the CloudBlobContainer if it does not exist
func (as *AzureStorageClient) CreateContainer(containerName string, options *azStorage.CreateContainerOptions) (bool, error) {
containerRef := getContainerRef(as.client, containerName)
created, err := containerRef.CreateIfNotExists(options)
return created, err
}
// SaveBlockBlob initializes a block blob by taking the byte
func (as *AzureStorageClient) SaveBlockBlob(containerName, blobName string, b []byte, options *azStorage.PutBlobOptions) error {
containerRef := getContainerRef(as.client, containerName)
blobRef := containerRef.GetBlobReference(blobName)
return blobRef.CreateBlockBlobFromReader(bytes.NewReader(b), options)
}
func getContainerRef(client *azStorage.Client, containerName string) *azStorage.Container {
bs := client.GetBlobService()
return bs.GetContainerReference(containerName)
}
|
package controllers
import (
"net/http"
"strconv"
"example.com/banking/datalib"
"example.com/banking/helpers"
"github.com/gin-gonic/gin"
"github.com/sirupsen/logrus"
)
var Logger *logrus.Entry
func GetBalance(c *gin.Context) {
id, err := strconv.Atoi(c.Param("id"))
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "Error converting walletId: " + c.Param("id")})
return
}
json, errDb := datalib.GetWalletById(id)
if errDb != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "Error Redis error occured trying to get the data"})
return
}
c.JSON(http.StatusOK, gin.H{"data": json})
}
func CreditWallet(c *gin.Context) {
amountStr := c.PostForm("amount")
amountAsFloat, errAmount := strconv.ParseFloat(amountStr, 64)
if errAmount != nil {
Logger.Errorf("Error converting amount %v", errAmount)
c.JSON(http.StatusBadRequest, gin.H{"error": "Error converting amount"})
return
}
if amountAsFloat < 0 {
c.JSON(http.StatusBadRequest, gin.H{"error": "Error: negative amount"})
return
}
id, err := strconv.Atoi(c.Param("id"))
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "Error converting walletId"})
return
}
json, errDb := datalib.GetWalletById(id)
if errDb != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "Error Redis error occured trying to get the data"})
return
}
model := helpers.ConvertJsonToWallet(json)
model.Balance += amountAsFloat
errDb = datalib.SetWallet(model)
if errDb != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "Redis error updating the data"})
return
}
c.JSON(http.StatusOK, gin.H{"data": "OK"})
}
func DebitWallet(c *gin.Context) {
amountStr := c.PostForm("amount")
amountAsFloat, errAmount := strconv.ParseFloat(amountStr, 64)
if errAmount != nil {
Logger.Errorf("Error converting amount %v", errAmount)
c.JSON(http.StatusBadRequest, gin.H{"error": "Error converting amount"})
return
}
if amountAsFloat < 0 {
c.JSON(http.StatusBadRequest, gin.H{"error": "Error: negative amount"})
return
}
id, err := strconv.Atoi(c.Param("id"))
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "Error converting walletId"})
return
}
json, errDb := datalib.GetWalletById(id)
if errDb != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "Error Redis error occured trying to get the data"})
return
}
model := helpers.ConvertJsonToWallet(json)
if model.Balance < amountAsFloat {
c.JSON(http.StatusBadRequest, gin.H{"error": "Balance is less than transaction's request"})
return
}
model.Balance -= amountAsFloat
errDb = datalib.SetWallet(model)
if errDb != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "Redis error updating the data"})
return
}
c.JSON(http.StatusOK, gin.H{"data": "OK"})
}
|
package main
import (
"encoding/json"
"net/http"
"net/http/httptest"
"net/url"
"path/filepath"
"strings"
"testing"
"time"
"github.com/gin-gonic/gin"
"github.com/teris-io/shortid"
"gopkg.in/mgo.v2/bson"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
"urlshortener.api/models"
urlsh "urlshortener.api/urlshorten"
"urlshortener.api/urlshorten/db"
httpDelivery "urlshortener.api/urlshorten/delivery/http"
"urlshortener.api/urlshorten/usecase"
)
type HTTPTestSuite struct {
suite.Suite
memDB *db.MemoryDB
router *gin.Engine
sids []string
}
// FeedMemDB stores some records in memoryDB
func (suite *HTTPTestSuite) FeedMemDB() {
// set ctx to confirm Store method
ctx, _ := gin.CreateTestContext(nil)
// save generated short IDs for later use
suite.sids = make([]string, 0)
sid, _ := shortid.Generate()
suite.sids = append(suite.sids, sid)
// data to save in memoryDB
data := &models.URLShorten{
ID: bson.NewObjectId(),
LongURL: "https://stackoverflow.com/questions/1760757/how-to-efficiently-concatenate-strings-in-go",
ShortURL: "http://192.168.99.100:8080/" + sid,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
_, err := suite.memDB.Store(ctx, data)
if err != nil {
suite.T().Error(err)
}
}
func (suite *HTTPTestSuite) SetupSuite() {
suite.memDB = db.NewMemoryDB()
usecases := usecase.NewUrlService(suite.memDB)
handler := httpDelivery.NewUrlHandler(usecases)
suite.router = SetupRouter(handler)
suite.FeedMemDB()
}
func (suite *HTTPTestSuite) TearDownSuite() {
suite.memDB.Close()
suite.router = nil
suite.sids = nil
}
func (suite *HTTPTestSuite) TestGet() {
// load testcases from json file
path := filepath.Join("urlshorten/test-fixtures", "router_testcases.json")
tcases := []TestResponse{}
err := urlsh.ParseFile(path, &tcases)
if err != nil {
suite.T().Error(err)
}
for _, tc := range tcases {
var req *http.Request
if tc.TCID == 3 && len(suite.sids) > 0 { // this TestCase represents the successful response
req, _ = http.NewRequest("GET", tc.TestCase+suite.sids[0], nil)
} else {
req, _ = http.NewRequest("GET", tc.TestCase, nil)
}
w := httptest.NewRecorder()
suite.router.ServeHTTP(w, req)
if w.Code != 200 {
var errResp map[string]interface{}
if err := json.Unmarshal(w.Body.Bytes(), &errResp); err != nil {
suite.T().Fatal(err)
}
assertEqualWithColors(suite, tc.TCID, tc.TestCase, tc.Expected["status"], errResp["status"])
assertEqualWithColors(suite, tc.TCID, tc.TestCase, tc.Expected["message"], errResp["message"])
} else {
var successResp map[string]models.URLShorten
if err := json.Unmarshal(w.Body.Bytes(), &successResp); err != nil {
suite.T().Fatal(err)
}
if len(suite.sids) > 0 {
assert.Equal(suite.T(), 200, w.Code)
assert.Equal(suite.T(), "http://192.168.99.100:8080/"+suite.sids[0], successResp["data"].ShortURL)
assert.True(suite.T(), successResp["data"].ID.Valid())
}
}
}
}
func (suite *HTTPTestSuite) TestInsert() {
form := url.Values{}
form.Add("longUrl", "http://example.com/")
req, _ := http.NewRequest("POST", "/api/v1/url", strings.NewReader(form.Encode()))
req.PostForm = form
req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
w := httptest.NewRecorder()
suite.router.ServeHTTP(w, req)
assert.Equal(suite.T(), 201, w.Code)
assert.Equal(suite.T(), suite.memDB.Size(), 2) // now, memoryDB has 2 records
}
func TestRunHTTPTestSuite(t *testing.T) {
httpTestsuite := new(HTTPTestSuite)
suite.Run(t, httpTestsuite)
}
type TestResponse struct {
TCID int
TestCase string
Expected map[string]interface{}
}
func assertEqualWithColors(s *HTTPTestSuite, tcid int, tc, expected, actual interface{}) {
assert.Equalf(s.T(), expected, actual, "\n\033[36mTCID\033[0m: %#v \t \033[34mTC\033[0m: \033[35m%#v\033[0m\n\n\033[31m- Expected: %#v \n\033[32m+ Actual: %#v\033[0m \n\n", tcid, tc, expected, actual)
}
|
/*
* @lc app=leetcode.cn id=1880 lang=golang
*
* [1880] 检查某单词是否等于两单词之和
*/
// @lc code=start
// package leetcode
func stoi(word string) int {
ret := 0
for i := 0; i < len(word); i++ {
ret += int(word[i] - 'a')
ret *= 10
}
return ret
}
func isSumEqual(firstWord string, secondWord string, targetWord string) bool {
return stoi(firstWord)+stoi(secondWord) == stoi(targetWord)
}
// @lc code=end
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.