text stringlengths 11 4.05M |
|---|
// Copyright 2017 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package sql
import (
"context"
"fmt"
"strings"
"github.com/cockroachdb/cockroach/pkg/clusterversion"
"github.com/cockroachdb/cockroach/pkg/config"
"github.com/cockroachdb/cockroach/pkg/kv/kvclient"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/server/telemetry"
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgnotice"
"github.com/cockroachdb/cockroach/pkg/sql/privilege"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sqlerrors"
"github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry"
"github.com/cockroachdb/cockroach/pkg/util/log/eventpb"
"github.com/cockroachdb/errors"
)
type dropIndexNode struct {
n *tree.DropIndex
idxNames []fullIndexName
}
// DropIndex drops an index.
// Privileges: CREATE on table.
// Notes: postgres allows only the index owner to DROP an index.
// mysql requires the INDEX privilege on the table.
func (p *planner) DropIndex(ctx context.Context, n *tree.DropIndex) (planNode, error) {
if err := checkSchemaChangeEnabled(
ctx,
p.ExecCfg(),
"DROP INDEX",
); err != nil {
return nil, err
}
// Keep a track of the indexes that exist to check. When the IF EXISTS
// options are provided, we will simply not include any indexes that
// don't exist and continue execution.
idxNames := make([]fullIndexName, 0, len(n.IndexList))
for _, index := range n.IndexList {
tn, tableDesc, err := expandMutableIndexName(ctx, p, index, !n.IfExists /* requireTable */)
if err != nil {
// Error or table did not exist.
return nil, err
}
if tableDesc == nil {
// IfExists specified and table did not exist.
continue
}
if err := p.CheckPrivilege(ctx, tableDesc, privilege.CREATE); err != nil {
return nil, err
}
idxNames = append(idxNames, fullIndexName{tn: tn, idxName: index.Index})
}
return &dropIndexNode{n: n, idxNames: idxNames}, nil
}
// ReadingOwnWrites implements the planNodeReadingOwnWrites interface.
// This is because DROP INDEX performs multiple KV operations on descriptors
// and expects to see its own writes.
func (n *dropIndexNode) ReadingOwnWrites() {}
func (n *dropIndexNode) startExec(params runParams) error {
telemetry.Inc(sqltelemetry.SchemaChangeDropCounter("index"))
if n.n.Concurrently {
params.p.BufferClientNotice(
params.ctx,
pgnotice.Newf("CONCURRENTLY is not required as all indexes are dropped concurrently"),
)
}
ctx := params.ctx
for _, index := range n.idxNames {
// Need to retrieve the descriptor again for each index name in
// the list: when two or more index names refer to the same table,
// the mutation list and new version number created by the first
// drop need to be visible to the second drop.
tableDesc, err := params.p.ResolveMutableTableDescriptor(
ctx, index.tn, true /*required*/, tree.ResolveRequireTableOrViewDesc)
if sqlerrors.IsUndefinedRelationError(err) {
// Somehow the descriptor we had during planning is not there
// any more.
return errors.NewAssertionErrorWithWrappedErrf(err,
"table descriptor for %q became unavailable within same txn",
tree.ErrString(index.tn))
}
if err != nil {
return err
}
if tableDesc.IsView() && !tableDesc.MaterializedView() {
return pgerror.Newf(pgcode.WrongObjectType, "%q is not a table or materialized view", tableDesc.Name)
}
// If we couldn't find the index by name, this is either a legitimate error or
// this statement contains an 'IF EXISTS' qualifier. Both of these cases are
// handled by `dropIndexByName()` below so we just ignore the error here.
idx, _ := tableDesc.FindIndexWithName(string(index.idxName))
var shardColName string
// If we're dropping a sharded index, record the name of its shard column to
// potentially drop it if no other index refers to it.
if idx != nil && idx.IsSharded() && !idx.Dropped() {
shardColName = idx.GetShardColumnName()
}
if err := params.p.dropIndexByName(
ctx, index.tn, index.idxName, tableDesc, n.n.IfExists, n.n.DropBehavior, checkIdxConstraint,
tree.AsStringWithFQNames(n.n, params.Ann()),
); err != nil {
return err
}
if shardColName != "" {
if err := n.maybeDropShardColumn(params, tableDesc, shardColName); err != nil {
return err
}
}
}
return nil
}
// dropShardColumnAndConstraint drops the given shard column and its associated check
// constraint.
func (n *dropIndexNode) dropShardColumnAndConstraint(
params runParams, tableDesc *tabledesc.Mutable, shardColDesc *descpb.ColumnDescriptor,
) error {
validChecks := tableDesc.Checks[:0]
for _, check := range tableDesc.AllActiveAndInactiveChecks() {
if used, err := tableDesc.CheckConstraintUsesColumn(check, shardColDesc.ID); err != nil {
return err
} else if used {
if check.Validity == descpb.ConstraintValidity_Validating {
return pgerror.Newf(pgcode.ObjectNotInPrerequisiteState,
"referencing constraint %q in the middle of being added, try again later", check.Name)
}
} else {
validChecks = append(validChecks, check)
}
}
if len(validChecks) != len(tableDesc.Checks) {
tableDesc.Checks = validChecks
}
tableDesc.AddColumnMutation(shardColDesc, descpb.DescriptorMutation_DROP)
for i := range tableDesc.Columns {
if tableDesc.Columns[i].ID == shardColDesc.ID {
// Note the third slice parameter which will force a copy of the backing
// array if the column being removed is not the last column.
tableDesc.Columns = append(tableDesc.Columns[:i:i],
tableDesc.Columns[i+1:]...)
break
}
}
if err := tableDesc.AllocateIDs(params.ctx); err != nil {
return err
}
mutationID := tableDesc.ClusterVersion.NextMutationID
if err := params.p.writeSchemaChange(
params.ctx, tableDesc, mutationID, tree.AsStringWithFQNames(n.n, params.Ann()),
); err != nil {
return err
}
return nil
}
// maybeDropShardColumn drops the given shard column, if there aren't any other indexes
// referring to it.
//
// Assumes that the given index is sharded.
func (n *dropIndexNode) maybeDropShardColumn(
params runParams, tableDesc *tabledesc.Mutable, shardColName string,
) error {
shardColDesc, err := tableDesc.FindColumnWithName(tree.Name(shardColName))
if err != nil {
return err
}
if shardColDesc.Dropped() {
return nil
}
if catalog.FindNonDropIndex(tableDesc, func(otherIdx catalog.Index) bool {
return otherIdx.ContainsColumnID(shardColDesc.GetID())
}) != nil {
return nil
}
return n.dropShardColumnAndConstraint(params, tableDesc, shardColDesc.ColumnDesc())
}
func (*dropIndexNode) Next(runParams) (bool, error) { return false, nil }
func (*dropIndexNode) Values() tree.Datums { return tree.Datums{} }
func (*dropIndexNode) Close(context.Context) {}
type fullIndexName struct {
tn *tree.TableName
idxName tree.UnrestrictedName
}
// dropIndexConstraintBehavior is used when dropping an index to signal whether
// it is okay to do so even if it is in use as a constraint (outbound FK or
// unique). This is a subset of what is implied by DropBehavior CASCADE, which
// implies dropping *all* dependencies. This is used e.g. when the element
// constrained is being dropped anyway.
type dropIndexConstraintBehavior bool
const (
checkIdxConstraint dropIndexConstraintBehavior = true
ignoreIdxConstraint dropIndexConstraintBehavior = false
)
func (p *planner) dropIndexByName(
ctx context.Context,
tn *tree.TableName,
idxName tree.UnrestrictedName,
tableDesc *tabledesc.Mutable,
ifExists bool,
behavior tree.DropBehavior,
constraintBehavior dropIndexConstraintBehavior,
jobDesc string,
) error {
idxI, err := tableDesc.FindIndexWithName(string(idxName))
if err != nil {
// Only index names of the form "table@idx" throw an error here if they
// don't exist.
if ifExists {
// Noop.
return nil
}
// Index does not exist, but we want it to: error out.
return pgerror.WithCandidateCode(err, pgcode.UndefinedObject)
}
if idxI.Dropped() {
return nil
}
idx := idxI.IndexDesc()
if idx.Unique && behavior != tree.DropCascade && constraintBehavior != ignoreIdxConstraint && !idx.CreatedExplicitly {
return errors.WithHint(
pgerror.Newf(pgcode.DependentObjectsStillExist,
"index %q is in use as unique constraint", idx.Name),
"use CASCADE if you really want to drop it.",
)
}
// Check if requires CCL binary for eventual zone config removal. Only
// necessary for the system tenant, because secondary tenants do not have
// zone configs for individual objects.
if p.ExecCfg().Codec.ForSystemTenant() {
_, zone, _, err := GetZoneConfigInTxn(ctx, p.txn, config.SystemTenantObjectID(tableDesc.ID), nil, "", false)
if err != nil {
return err
}
for _, s := range zone.Subzones {
if s.IndexID != uint32(idx.ID) {
_, err = GenerateSubzoneSpans(
p.ExecCfg().Settings,
p.ExecCfg().ClusterID(),
p.ExecCfg().Codec,
tableDesc,
zone.Subzones,
false, /* newSubzones */
)
if sqlerrors.IsCCLRequiredError(err) {
return sqlerrors.NewCCLRequiredError(fmt.Errorf("schema change requires a CCL binary "+
"because table %q has at least one remaining index or partition with a zone config",
tableDesc.Name))
}
break
}
}
}
// Remove all foreign key references and backreferences from the index.
// TODO (lucy): This is incorrect for two reasons: The first is that FKs won't
// be restored if the DROP INDEX is rolled back, and the second is that
// validated constraints should be dropped in the schema changer in multiple
// steps to avoid inconsistencies. We should be queuing a mutation to drop the
// FK instead. The reason why the FK is removed here is to keep the index
// state consistent with the removal of the reference on the other table
// involved in the FK, in case of rollbacks (#38733).
// TODO (rohany): switching all the checks from checking the legacy ID's to
// checking if the index has a prefix of the columns needed for the foreign
// key might result in some false positives for this index while it is in
// a mixed version cluster, but we have to remove all reads of the legacy
// explicit index fields.
// Construct a list of all the remaining indexes, so that we can see if there
// is another index that could replace the one we are deleting for a given
// foreign key constraint.
remainingIndexes := make([]*descpb.IndexDescriptor, 1, len(tableDesc.ActiveIndexes()))
remainingIndexes[0] = tableDesc.GetPrimaryIndex().IndexDesc()
for _, index := range tableDesc.PublicNonPrimaryIndexes() {
if index.GetID() != idx.ID {
remainingIndexes = append(remainingIndexes, index.IndexDesc())
}
}
// indexHasReplacementCandidate runs isValidIndex on each index in remainingIndexes and returns
// true if at least one index satisfies isValidIndex.
indexHasReplacementCandidate := func(isValidIndex func(*descpb.IndexDescriptor) bool) bool {
foundReplacement := false
for _, index := range remainingIndexes {
if isValidIndex(index) {
foundReplacement = true
break
}
}
return foundReplacement
}
// Check for foreign key mutations referencing this index.
for _, m := range tableDesc.Mutations {
if c := m.GetConstraint(); c != nil &&
c.ConstraintType == descpb.ConstraintToUpdate_FOREIGN_KEY &&
// If the index being deleted could be used as a index for this outbound
// foreign key mutation, then make sure that we have another index that
// could be used for this mutation.
idx.IsValidOriginIndex(c.ForeignKey.OriginColumnIDs) &&
!indexHasReplacementCandidate(func(idx *descpb.IndexDescriptor) bool {
return idx.IsValidOriginIndex(c.ForeignKey.OriginColumnIDs)
}) {
return pgerror.Newf(pgcode.ObjectNotInPrerequisiteState,
"referencing constraint %q in the middle of being added, try again later", c.ForeignKey.Name)
}
}
if err := p.MaybeUpgradeDependentOldForeignKeyVersionTables(ctx, tableDesc); err != nil {
return err
}
// If the we aren't at a high enough version to drop indexes on the origin
// side then we have to attempt to delete them.
if !p.ExecCfg().Settings.Version.IsActive(ctx, clusterversion.NoOriginFKIndexes) {
// Index for updating the FK slices in place when removing FKs.
sliceIdx := 0
for i := range tableDesc.OutboundFKs {
tableDesc.OutboundFKs[sliceIdx] = tableDesc.OutboundFKs[i]
sliceIdx++
fk := &tableDesc.OutboundFKs[i]
canReplace := func(idx *descpb.IndexDescriptor) bool {
return idx.IsValidOriginIndex(fk.OriginColumnIDs)
}
// The index being deleted could be used as the origin index for this foreign key.
if idx.IsValidOriginIndex(fk.OriginColumnIDs) && !indexHasReplacementCandidate(canReplace) {
if behavior != tree.DropCascade && constraintBehavior != ignoreIdxConstraint {
return errors.Errorf("index %q is in use as a foreign key constraint", idx.Name)
}
sliceIdx--
if err := p.removeFKBackReference(ctx, tableDesc, fk); err != nil {
return err
}
}
}
tableDesc.OutboundFKs = tableDesc.OutboundFKs[:sliceIdx]
}
// If this index is used on the referencing side of any FK constraints, try
// to remove the references or find an alternate index that will suffice.
candidateConstraints := make([]descpb.UniqueConstraint, len(remainingIndexes))
for i := range remainingIndexes {
// We can't copy directly because of the interface conversion.
candidateConstraints[i] = remainingIndexes[i]
}
if err := p.tryRemoveFKBackReferences(
ctx, tableDesc, idx, behavior, candidateConstraints,
); err != nil {
return err
}
if len(idx.Interleave.Ancestors) > 0 {
if err := p.removeInterleaveBackReference(ctx, tableDesc, idx); err != nil {
return err
}
}
for _, ref := range idx.InterleavedBy {
if err := p.removeInterleave(ctx, ref); err != nil {
return err
}
}
var droppedViews []string
for _, tableRef := range tableDesc.DependedOnBy {
if tableRef.IndexID == idx.ID {
// Ensure that we have DROP privilege on all dependent views
err := p.canRemoveDependentViewGeneric(
ctx, "index", idx.Name, tableDesc.ParentID, tableRef, behavior)
if err != nil {
return err
}
viewDesc, err := p.getViewDescForCascade(
ctx, "index", idx.Name, tableDesc.ParentID, tableRef.ID, behavior,
)
if err != nil {
return err
}
viewJobDesc := fmt.Sprintf("removing view %q dependent on index %q which is being dropped",
viewDesc.Name, idx.Name)
cascadedViews, err := p.removeDependentView(ctx, tableDesc, viewDesc, viewJobDesc)
if err != nil {
return err
}
qualifiedView, err := p.getQualifiedTableName(ctx, viewDesc)
if err != nil {
return err
}
droppedViews = append(droppedViews, qualifiedView.FQString())
droppedViews = append(droppedViews, cascadedViews...)
}
}
// Overwriting tableDesc.Index may mess up with the idx object we collected above. Make a copy.
idxCopy := *idx
idx = &idxCopy
// Currently, a replacement primary index must be specified when dropping the primary index,
// and this cannot be done with DROP INDEX.
if idx.ID == tableDesc.GetPrimaryIndexID() {
return errors.WithHint(
pgerror.Newf(pgcode.FeatureNotSupported, "cannot drop the primary index of a table using DROP INDEX"),
"instead, use ALTER TABLE ... ALTER PRIMARY KEY or"+
"use DROP CONSTRAINT ... PRIMARY KEY followed by ADD CONSTRAINT ... PRIMARY KEY in a transaction",
)
}
foundIndex := catalog.FindPublicNonPrimaryIndex(tableDesc, func(idxEntry catalog.Index) bool {
return idxEntry.GetID() == idx.ID
})
if foundIndex == nil {
return pgerror.Newf(
pgcode.ObjectNotInPrerequisiteState,
"index %q in the middle of being added, try again later",
idxName,
)
}
idxEntry := *foundIndex.IndexDesc()
idxOrdinal := foundIndex.Ordinal()
// Unsplit all manually split ranges in the index so they can be
// automatically merged by the merge queue. Gate this on being the
// system tenant because secondary tenants aren't allowed to scan
// the meta ranges directly.
if p.ExecCfg().Codec.ForSystemTenant() {
span := tableDesc.IndexSpan(p.ExecCfg().Codec, idxEntry.ID)
ranges, err := kvclient.ScanMetaKVs(ctx, p.txn, span)
if err != nil {
return err
}
for _, r := range ranges {
var desc roachpb.RangeDescriptor
if err := r.ValueProto(&desc); err != nil {
return err
}
// We have to explicitly check that the range descriptor's start key
// lies within the span of the index since ScanMetaKVs returns all
// intersecting spans.
if !desc.GetStickyBit().IsEmpty() && span.Key.Compare(desc.StartKey.AsRawKey()) <= 0 {
// Swallow "key is not the start of a range" errors because it would
// mean that the sticky bit was removed and merged concurrently. DROP
// INDEX should not fail because of this.
if err := p.ExecCfg().DB.AdminUnsplit(ctx, desc.StartKey); err != nil && !strings.Contains(err.Error(), "is not the start of a range") {
return err
}
}
}
}
// the idx we picked up with FindIndexByID at the top may not
// contain the same field any more due to other schema changes
// intervening since the initial lookup. So we send the recent
// copy idxEntry for drop instead.
if err := tableDesc.AddIndexMutation(&idxEntry, descpb.DescriptorMutation_DROP); err != nil {
return err
}
tableDesc.RemovePublicNonPrimaryIndex(idxOrdinal)
if err := p.removeIndexComment(ctx, tableDesc.ID, idx.ID); err != nil {
return err
}
if err := validateDescriptor(ctx, p, tableDesc); err != nil {
return err
}
mutationID := tableDesc.ClusterVersion.NextMutationID
if err := p.writeSchemaChange(ctx, tableDesc, mutationID, jobDesc); err != nil {
return err
}
p.BufferClientNotice(
ctx,
errors.WithHint(
pgnotice.Newf("the data for dropped indexes is reclaimed asynchronously"),
"The reclamation delay can be customized in the zone configuration for the table.",
),
)
// Record index drop in the event log. This is an auditable log event
// and is recorded in the same transaction as the table descriptor
// update.
return p.logEvent(ctx,
tableDesc.ID,
&eventpb.DropIndex{
TableName: tn.FQString(),
IndexName: string(idxName),
MutationID: uint32(mutationID),
CascadeDroppedViews: droppedViews,
})
}
|
package data
//
// Copyright (c) 2019 ARM Limited.
//
// SPDX-License-Identifier: MIT
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
//
import (
"encoding/json"
)
type Row struct {
Key string `json:"key"`
LocalVersion uint64 `json:"localVersion"`
Siblings *SiblingSet `json:"siblings"`
}
func (row *Row) Encode() []byte {
result, _ := json.Marshal(row)
return result
}
func (row *Row) Decode(encodedRow []byte, formatVersion string) error {
if formatVersion == "0" {
var siblingSet SiblingSet
err := siblingSet.Decode(encodedRow)
if err == nil {
row.LocalVersion = 0
row.Siblings = &siblingSet
return nil
}
// if it fails to decode using format vesion
// 0 which was without the row type then it might
// have been converted already to the new format
// version in a partially completed upgrade before.
// in this case, try to decode it as a regular row
// before returning an error
}
return json.Unmarshal(encodedRow, row)
} |
package cmd
import (
"bytes"
"context"
"encoding/json"
"log"
"os/exec"
"regexp"
"strconv"
"strings"
"time"
"github.com/gabrielperezs/goreactor/lib"
"github.com/gabrielperezs/goreactor/reactor"
"github.com/gabrielperezs/goreactor/reactorlog"
"github.com/savaki/jq"
)
const (
defaultMaximumCmdTimeLive = 10 * time.Minute
)
// Cmd is the command struct that will be executed after recive the order
// from the input plugins
type Cmd struct {
r *reactor.Reactor
cmd string
user string
workingDirectory string
environment []string
args []string
cond map[string]*regexp.Regexp
maximumCmdTimeLive time.Duration
}
// NewOrGet create the command struct and fill the parameters needed from the
// config data.
func NewOrGet(r *reactor.Reactor, c map[string]interface{}) (*Cmd, error) {
o := &Cmd{
r: r,
cond: make(map[string]*regexp.Regexp),
}
for k, v := range c {
switch strings.ToLower(k) {
case "cmd":
o.cmd = v.(string)
case "args":
for _, n := range v.([]interface{}) {
o.args = append(o.args, n.(string))
}
case "user":
o.user = v.(string)
case "workingdirectory":
o.workingDirectory = v.(string)
case "env":
for _, n := range v.([]interface{}) {
o.environment = append(o.environment, n.(string))
}
case "cond":
for _, v := range v.([]interface{}) {
for nk, nv := range v.(map[string]interface{}) {
o.cond[nk] = regexp.MustCompile(nv.(string))
}
}
case strings.ToLower("maximumCmdTimeLive"):
var err error
o.maximumCmdTimeLive, err = time.ParseDuration(v.(string))
if err != nil {
log.Print(err)
o.maximumCmdTimeLive = defaultMaximumCmdTimeLive
}
}
}
if o.maximumCmdTimeLive == 0 {
o.maximumCmdTimeLive = defaultMaximumCmdTimeLive
}
return o, nil
}
// MatchConditions is a filter to replace the variables (usually commands arguments)
// that are coming from the Input message
func (o *Cmd) MatchConditions(msg lib.Msg) error {
for k, v := range o.cond {
if strings.HasPrefix(k, "$.") {
op, _ := jq.Parse(k[1:]) // create an Op
value, _ := op.Apply(msg.Body())
nv := bytes.Trim(value, "\"")
if !v.Match(nv) {
return reactor.ErrInvalidMsgForPlugin
}
}
}
return nil
}
func (o *Cmd) findAndReplaceJsonPaths(msg lib.Msg, s string) string {
newParse := s
for _, argValue := range strings.Split(s, "$.") {
if argValue == "" {
continue
}
op, _ := jq.Parse("." + argValue) // create an Op
value, _ := op.Apply(msg.Body())
newParse = strings.Replace(newParse, "$."+argValue, strings.Trim(string(value), "\""), -1)
}
return newParse
}
func (o *Cmd) findReplace(msg lib.Msg, s string) string {
var currentString = s
if strings.Contains(currentString, "$.") {
currentString = o.findAndReplaceJsonPaths(msg, currentString)
}
return currentString
}
func (o *Cmd) findReplaceReturningSlice(msg lib.Msg, s string) []string {
if !strings.HasPrefix(s, "$.") || !strings.HasSuffix(s, "...") {
return []string{o.findReplace(msg, s)} // Fallback to previous function
}
cleanArgValue := s[1 : len(s)-3] // Remove initial $ and final ...
op, err := jq.Parse(cleanArgValue)
if err != nil {
return []string{o.findReplace(msg, s)} // Fallback to previous function
}
substituted, _ := op.Apply(msg.Body())
var values []string
json.Unmarshal(substituted, &values)
return values
}
func (o *Cmd) replaceVariablesInArgs(msg lib.Msg, args []string) {
for i := 0; i < len(args); i++ {
if strings.Contains(args[i], "${CreationTimestampMilliseconds}") {
args[i] = strings.Replace(args[i], "${CreationTimestampMilliseconds}",
strconv.FormatInt(msg.CreationTimestampMilliseconds(), 10), -1)
}
if strings.Contains(args[i], "${CreationTimestampSeconds}") {
var milliSecondsInSecond int64 = 1000
args[i] = strings.Replace(args[i], "${CreationTimestampSeconds}",
strconv.FormatInt(msg.CreationTimestampMilliseconds()/milliSecondsInSecond, 10), -1)
}
}
}
func (o *Cmd) getReplacedArguments(msg lib.Msg) []string {
var args []string
for _, parse := range o.args {
args = append(args, o.findReplaceReturningSlice(msg, parse)...)
}
o.replaceVariablesInArgs(msg, args)
return args
}
// Run will execute the binary command that was defined in the config.
// In this function we also define the OUT and ERR data destination of
// the command.
func (o *Cmd) Run(rl reactorlog.ReactorLog, msg lib.Msg) error {
args := o.getReplacedArguments(msg)
logLabel := o.findReplace(msg, o.r.Label)
rl.SetLabel(logLabel)
ctx, cancel := context.WithTimeout(context.Background(), o.maximumCmdTimeLive)
defer cancel()
var c *exec.Cmd
if len(args) > 0 {
c = exec.CommandContext(ctx, o.cmd, args...)
} else {
c = exec.CommandContext(ctx, o.cmd)
}
if o.user != "" {
err := setUserToCmd(o.user, o.environment, c)
if err != nil {
return err
}
}
c.Dir = o.workingDirectory
c.Stdout = rl
c.Stderr = rl
if err := c.Start(); err != nil {
rl.Write([]byte("error starting process " + o.cmd + " " + strings.Join(args, " ") + ": " + err.Error()))
return err
}
pid := c.Process.Pid // Since Start returned correctly, c.Process is not null.
rl.Start(pid, o.cmd+" "+strings.Join(args, " "))
if err := c.Wait(); err != nil {
rl.Write([]byte("error running process: " + err.Error()))
return err
}
return nil
}
// Exit will finish the command // TODO
func (o *Cmd) Exit() {
}
|
package problem0022
import "strings"
func generateParenthesis(n int) []string {
res := []string{}
p := ""
var dfs func(string, int, int, int)
dfs = func(p string, n int, l int, r int) {
if l == n {
res = append(res, p+strings.Repeat(")", l-r))
return
}
if l < n {
dfs(p+"(", n, l+1, r)
}
if r < l {
dfs(p+")", n, l, r+1)
}
return
}
dfs(p, n, 0, 0)
return res
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package firmware
import (
"context"
"regexp"
"time"
"github.com/golang/protobuf/ptypes/empty"
"chromiumos/tast/common/servo"
"chromiumos/tast/ctxutil"
"chromiumos/tast/remote/firmware"
"chromiumos/tast/remote/firmware/fixture"
"chromiumos/tast/remote/powercontrol"
"chromiumos/tast/rpc"
"chromiumos/tast/services/cros/ui"
"chromiumos/tast/testing"
)
type powerModeTestParams struct {
powermode firmware.ResetType
}
const (
coldReset firmware.ResetType = "coldreset"
shutDown firmware.ResetType = "shutdown"
warmReset firmware.ResetType = "warmreset"
)
func init() {
testing.AddTest(&testing.Test{
Func: PowerModes,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Verifies that system comes back after shutdown and coldreset",
Contacts: []string{"pathan.jilani@intel.com", "intel-chrome-system-automation-team@intel.com", "cros-fw-engprod@google.com"},
ServiceDeps: []string{"tast.cros.ui.ScreenLockService"},
SoftwareDeps: []string{"chrome", "reboot"},
Vars: []string{"servo",
"firmware.mode", // Optional. Expecting "tablet". By default firmware.mode will be "clamshell".
},
Attr: []string{"group:firmware", "firmware_unstable"},
Fixture: fixture.NormalMode,
Params: []testing.Param{{
Name: "coldreset",
Val: powerModeTestParams{powermode: coldReset},
}, {
Name: "shutdown",
Val: powerModeTestParams{powermode: shutDown},
}, {
Name: "warmreset",
Val: powerModeTestParams{powermode: warmReset},
},
},
})
}
// PowerModes verifies that system comes back after shutdown and coldreset.
func PowerModes(ctx context.Context, s *testing.State) {
h := s.FixtValue().(*fixture.Value).Helper
dut := s.DUT()
testOpt := s.Param().(powerModeTestParams)
// Servo setup.
if err := h.RequireServo(ctx); err != nil {
s.Fatal("Failed opening servo: ", err)
}
// Get the initial tablet_mode_angle settings to restore at the end of test.
re := regexp.MustCompile(`tablet_mode_angle=(\d+) hys=(\d+)`)
out, err := dut.Conn().CommandContext(ctx, "ectool", "motionsense", "tablet_mode_angle").Output()
if err != nil {
s.Fatal("Failed to retrieve tablet_mode_angle settings: ", err)
}
m := re.FindSubmatch(out)
if len(m) != 3 {
s.Fatalf("Failed to get initial tablet_mode_angle settings: got submatches %+v", m)
}
initLidAngle := m[1]
initHys := m[2]
defaultMode := "clamshell"
if mode, ok := s.Var("firmware.mode"); ok {
defaultMode = mode
}
if defaultMode == "tablet" {
// Set tabletModeAngle to 0 to force the DUT into tablet mode.
testing.ContextLog(ctx, "Put DUT into tablet mode")
if err := dut.Conn().CommandContext(ctx, "ectool", "motionsense", "tablet_mode_angle", "0", "0").Run(); err != nil {
s.Fatal("Failed to set DUT into tablet mode: ", err)
}
}
defer func(ctx context.Context) {
s.Log("Performing Cleanup")
if !dut.Connected(ctx) {
if err := h.Servo.SetPowerState(ctx, servo.PowerStateOn); err != nil {
s.Fatal("Failed to set powerstate to ON at cleanup: ", err)
}
}
if err := dut.Conn().CommandContext(ctx, "ectool", "motionsense", "tablet_mode_angle", string(initLidAngle), string(initHys)).Run(); err != nil {
s.Fatal("Failed to restore tablet_mode_angle to the original settings: ", err)
}
}(ctx)
cl, err := rpc.Dial(ctx, s.DUT(), s.RPCHint())
if err != nil {
s.Fatal("Failed to connect to the RPC service on the DUT: ", err)
}
defer cl.Close(ctx)
screenLockService := ui.NewScreenLockServiceClient(cl.Conn)
if _, err := screenLockService.NewChrome(ctx, &empty.Empty{}); err != nil {
s.Fatal("Failed to login chrome: ", err)
}
defer screenLockService.CloseChrome(ctx, &empty.Empty{})
if testOpt.powermode == "coldreset" {
s.Log("Performing cold reset")
if err := dut.Conn().CommandContext(ctx, "ectool", "reboot_ec", "cold", "at-shutdown").Run(); err != nil {
s.Fatal("Failed to execute ectool reboot_ec cmd: ", err)
}
if err := dut.Conn().CommandContext(ctx, "shutdown", "-h", "now").Run(); err != nil {
s.Fatal("Failed to execute shutdown command: ", err)
}
if err := dut.WaitConnect(ctx); err != nil {
s.Fatal("Failed to wake up DUT: ", err)
}
if err := powercontrol.ValidatePrevSleepState(ctx, dut, 5); err != nil {
s.Fatal("Previous Sleep state is not 5: ", err)
}
}
if testOpt.powermode == "shutdown" {
s.Log("Performing shutdown")
if err := dut.Conn().CommandContext(ctx, "shutdown", "-h", "now").Run(); err != nil {
s.Fatal("Failed to run shutdown command: ", err)
}
if err := dut.WaitUnreachable(ctx); err != nil {
s.Fatal("Failed to shutdown DUT: ", err)
}
s.Log("Power Normal Pressing")
if err := h.Servo.SetPowerState(ctx, servo.PowerStateOn); err != nil {
s.Fatal("Failed to set powerstate to ON: ", err)
}
cCtx, cancel := ctxutil.Shorten(ctx, time.Minute)
defer cancel()
// Setting power state ON, once again if system fails to boot.
if err := dut.WaitConnect(cCtx); err != nil {
if err := h.Servo.SetPowerState(ctx, servo.PowerStateOn); err != nil {
s.Fatal("Failed to set powerstate to ON: ", err)
}
if err := dut.WaitConnect(ctx); err != nil {
s.Fatal("Failed to wake up DUT: ", err)
}
}
if err := powercontrol.ValidatePrevSleepState(ctx, dut, 5); err != nil {
s.Fatal("Previous Sleep state is not 5: ", err)
}
}
if testOpt.powermode == "warmreset" {
s.Log("Performing warm reset")
if err := h.DUT.Reboot(ctx); err != nil {
s.Fatal("Failed to reboot DUT: ", err)
}
if err := powercontrol.ValidatePrevSleepState(ctx, dut, 0); err != nil {
s.Fatal("Previous Sleep state is not 0: ", err)
}
}
}
|
package base
// Track : represents a track
type Track struct {
Name string `json:"name"`
}
// Playlist : represents a playlist
type Playlist []Track
// Genre : type definition to represents a playlist genre
type Genre string
// Genre's avaliables
const (
GenreParty Genre = "party"
GenrePop Genre = "pop"
GenreRock Genre = "rock"
GenreClassical Genre = "classical"
)
|
package server
import (
"net/http"
"io"
"users"
)
func RootHandler(w http.ResponseWriter, req *http.Request) {
io.WriteString(w, "Welcome to the Printox alpha API\n")
}
// SignUpHandler receives a signup request
func SignUpHandler(w http.ResponseWriter, req *http.Request) {
if parseErr := users.ParseSignUpRequest(req); parseErr != nil {
w.WriteHeader(http.StatusConflict)
io.WriteString(w, parseErr.Error())
return
}
w.WriteHeader(http.StatusOK)
}
// LoginHandler receives login credentials and confirms or denies access
func LoginHandler(w http.ResponseWriter, req *http.Request) {}
|
package exporter
import (
"encoding/json"
"encoding/xml"
"errors"
"fmt"
week3 "go_feed_export"
"os"
yaml "gopkg.in/yaml.v2"
)
// ScrollFeeds prints all social media feeds
func ScrollFeeds(platforms ...week3.SocialMedia) {
for _, sm := range platforms {
for _, fd := range sm.Feed() {
fmt.Println(fd)
}
fmt.Println("=================================")
}
}
// ExportTXT writes all feed into corresponding txt files
func ExportTXT(u week3.SocialMedia, filename string) error {
f, err := os.OpenFile("./files_txt/"+filename, os.O_CREATE|os.O_WRONLY, 0755)
if err != nil {
return errors.New("an error occured opening the file: " + err.Error())
}
for _, fd := range u.Feed() {
n, err := f.Write([]byte(fd + "\n"))
if err != nil {
return errors.New("an error occured writing to file: " + err.Error())
}
fmt.Printf("wrote %d bytes\n", n)
}
fmt.Println()
return nil
}
// FeedPresentation struct holds an array for each feed
type FeedPresentation struct {
FeedStream []string
FeedCount int
}
// ExportJSON writes all feed into corresponding json files
func ExportJSON(u week3.SocialMedia, filename string) error {
data := FeedPresentation{}
f, err := os.OpenFile("./files_json/"+filename, os.O_CREATE|os.O_WRONLY, 0755)
if err != nil {
return errors.New("an error occured opening the file: " + err.Error())
}
for _, fd := range u.Feed() {
data.FeedStream = append(data.FeedStream, fd)
}
data.FeedCount = len((data.FeedStream))
file, _ := json.MarshalIndent(data, "", " ")
n, err := f.Write([]byte(file))
if err != nil {
return errors.New("an error occured writing to file: " + err.Error())
}
fmt.Printf("wrote %d bytes of json\n", n)
fmt.Println()
return nil
}
// ExportXML writes all feed into corresponding xml files
func ExportXML(u week3.SocialMedia, filename string) error {
data := FeedPresentation{}
f, err := os.OpenFile("./files_xml/"+filename, os.O_CREATE|os.O_WRONLY, 0755)
if err != nil {
return errors.New("an error occured opening the file: " + err.Error())
}
for _, fd := range u.Feed() {
data.FeedStream = append(data.FeedStream, fd)
}
data.FeedCount = len((data.FeedStream))
file, _ := xml.MarshalIndent(data, "", " ")
n, err := f.Write([]byte(file))
if err != nil {
return errors.New("an error occured writing to file: " + err.Error())
}
fmt.Printf("wrote %d bytes of xml\n", n)
fmt.Println()
return nil
}
// ExportYAML writes all feed into corresponding xml files
func ExportYAML(u week3.SocialMedia, filename string) error {
data := FeedPresentation{}
f, err := os.OpenFile("./files_yaml/"+filename, os.O_CREATE|os.O_WRONLY, 0755)
if err != nil {
return errors.New("an error occured opening the file: " + err.Error())
}
for _, fd := range u.Feed() {
data.FeedStream = append(data.FeedStream, fd+"\n")
}
data.FeedCount = len((data.FeedStream))
file, _ := yaml.Marshal(data)
n, err := f.Write([]byte(file))
if err != nil {
return errors.New("an error occured writing to file: " + err.Error())
}
fmt.Printf("wrote %d bytes of yaml\n", n)
fmt.Println()
return nil
}
|
package md5
import (
"testing"
)
func TestComputeFile(t *testing.T) {
r, err := ComputeFile("/Volumes/Data/软件/deepin-desktop-community-1002-amd64.iso")
if err != nil {
t.Error(err.Error())
return
}
t.Log(r)
}
|
package geojson_test
import (
"encoding/json"
"fmt"
"log"
"github.com/paulmach/orb"
"github.com/paulmach/orb/geojson"
"github.com/paulmach/orb/quadtree"
)
func ExampleFeature_Point() {
f := geojson.NewFeature(orb.Point{1, 1})
f.Properties["key"] = "value"
qt := quadtree.New(f.Geometry.Bound().Pad(1))
err := qt.Add(f) // add the feature to a quadtree
if err != nil {
log.Fatalf("unexpected error: %v", err)
}
// type assert the feature back into a Feature from
// the orb.Pointer interface.
feature := qt.Find(orb.Point{0, 0}).(*geojson.Feature)
fmt.Printf("key=%s", feature.Properties["key"])
// Output:
// key=value
}
func ExampleFeatureCollection_foreignMembers() {
rawJSON := []byte(`
{ "type": "FeatureCollection",
"features": [
{ "type": "Feature",
"geometry": {"type": "Point", "coordinates": [102.0, 0.5]},
"properties": {"prop0": "value0"}
}
],
"title": "Title as Foreign Member"
}`)
fc := geojson.NewFeatureCollection()
err := json.Unmarshal(rawJSON, &fc)
if err != nil {
log.Fatalf("invalid json: %v", err)
}
fmt.Println(fc.Features[0].Geometry)
fmt.Println(fc.ExtraMembers["title"])
data, _ := json.Marshal(fc)
fmt.Println(string(data))
// Output:
// [102 0.5]
// Title as Foreign Member
// {"features":[{"type":"Feature","geometry":{"type":"Point","coordinates":[102,0.5]},"properties":{"prop0":"value0"}}],"title":"Title as Foreign Member","type":"FeatureCollection"}
}
// MyFeatureCollection is a depricated/no longer supported way to extract
// foreign/extra members from a feature collection. Now an UnmarshalJSON
// method, like below, is required for it to work.
type MyFeatureCollection struct {
geojson.FeatureCollection
Title string `json:"title"`
}
// UnmarshalJSON implemented as below is now required for the extra members
// to be decoded directly into the type.
func (fc *MyFeatureCollection) UnmarshalJSON(data []byte) error {
err := json.Unmarshal(data, &fc.FeatureCollection)
if err != nil {
return err
}
fc.Title = fc.ExtraMembers.MustString("title", "")
return nil
}
func ExampleFeatureCollection_foreignMembersCustom() {
// Note: this approach to handling foreign/extra members requires
// implementing an `UnmarshalJSON` method on the new type.
// See MyFeatureCollection type and its UnmarshalJSON function above.
rawJSON := []byte(`
{ "type": "FeatureCollection",
"features": [
{ "type": "Feature",
"geometry": {"type": "Point", "coordinates": [102.0, 0.5]},
"properties": {"prop0": "value0"}
}
],
"title": "Title as Foreign Member"
}`)
fc := &MyFeatureCollection{}
err := json.Unmarshal(rawJSON, &fc)
if err != nil {
log.Fatalf("invalid json: %v", err)
}
fmt.Println(fc.FeatureCollection.Features[0].Geometry)
fmt.Println(fc.Features[0].Geometry)
fmt.Println(fc.Title)
// Output:
// [102 0.5]
// [102 0.5]
// Title as Foreign Member
}
func ExampleUnmarshalFeatureCollection() {
rawJSON := []byte(`
{ "type": "FeatureCollection",
"features": [
{ "type": "Feature",
"geometry": {"type": "Point", "coordinates": [102.0, 0.5]},
"properties": {"prop0": "value0"}
}
]
}`)
fc, _ := geojson.UnmarshalFeatureCollection(rawJSON)
// Geometry will be unmarshalled into the correct geo.Geometry type.
point := fc.Features[0].Geometry.(orb.Point)
fmt.Println(point)
// Output:
// [102 0.5]
}
func Example_unmarshal() {
rawJSON := []byte(`
{ "type": "FeatureCollection",
"features": [
{ "type": "Feature",
"geometry": {"type": "Point", "coordinates": [102.0, 0.5]},
"properties": {"prop0": "value0"}
}
]
}`)
fc := geojson.NewFeatureCollection()
err := json.Unmarshal(rawJSON, &fc)
if err != nil {
log.Fatalf("invalid json: %v", err)
}
// Geometry will be unmarshalled into the correct geo.Geometry type.
point := fc.Features[0].Geometry.(orb.Point)
fmt.Println(point)
// Output:
// [102 0.5]
}
func ExampleFeatureCollection_MarshalJSON() {
fc := geojson.NewFeatureCollection()
fc.Append(geojson.NewFeature(orb.Point{1, 2}))
_, err := fc.MarshalJSON()
if err != nil {
log.Fatalf("marshal error: %v", err)
}
// standard lib encoding/json package will also work
data, err := json.MarshalIndent(fc, "", " ")
if err != nil {
log.Fatalf("marshal error: %v", err)
}
fmt.Println(string(data))
// Output:
// {
// "features": [
// {
// "type": "Feature",
// "geometry": {
// "type": "Point",
// "coordinates": [
// 1,
// 2
// ]
// },
// "properties": null
// }
// ],
// "type": "FeatureCollection"
// }
}
|
package main
import (
"database/sql"
"fmt"
)
//NewConnection create connection to mariaDB
func newConnection(u string, p string, db string, host string, port int) (*sql.DB, error) {
//dbSource := u + ":" + p + "@tcp(" + host + ":" + strconv.Itoa(port) + ")/" + db + "?parseTime=true"
dbSource := fmt.Sprintf("server=%s;user id=%s;database=%s;password=%s;port=%d", host, u, db, p, port)
// fmt.Printf("db source: %s", dbSource)
con, err := sql.Open("mssql", dbSource)
if err != nil {
return nil, err
}
return con, nil
}
//CloseConnection close connection with mariaDB
func closeConnection(con *sql.DB) error {
con.Close()
return nil
}
func getTrackingByUserID(t string, con *sql.DB) ([]Tracking, error) {
err := con.Ping()
if err != nil {
return nil, err
}
var ts []Tracking
rows, err := con.Query("exec [dbo].[Select_TrackingByUserID] '" + t + "'")
for rows.Next() {
var t Tracking
err = rows.Scan(&t.TrackingID, &t.SessionID, &t.UserID, &t.LessionID, &t.Event, &t.Note, &t.LogUTCTime)
ts = append(ts, t)
}
defer rows.Close()
return ts, err
}
func getTrackingByDeviceIDLessionID(deviceID string, lessionID string, con *sql.DB) ([]Tracking, error) {
err := con.Ping()
if err != nil {
return nil, err
}
var ts []Tracking
rows, err := con.Query(fmt.Sprintf("exec [dbo].[Select_TrackingByDeviceIDLessionID] ?,?"), deviceID, lessionID)
for rows.Next() {
var t Tracking
err = rows.Scan(&t.TrackingID, &t.SessionID, &t.UserID, &t.LessionID, &t.Event, &t.Note, &t.LogUTCTime)
ts = append(ts, t)
}
defer rows.Close()
return ts, err
}
func getTrackingByUserIDSessionID(userid string, sessionid string, con *sql.DB) ([]Tracking, error) {
err := con.Ping()
if err != nil {
return nil, err
}
var ts []Tracking
//fmt.Println(fmt.Sprintf("exec [dbo].[Select_TrackingByUserIDSessionID] ?,?"), userid, sessionid)
//TrackingID XRequestID SessionID UserID LessionID Event Note LogUTCTime
//1178 430d3147-4032-4ffb-8c35-48ec2d9a5eed 20180704 105554002 0 USER_LOGGINGIN User:105554002 Logging in at:0 2018-07-04 06:17:22.313
//TrackingID int
//XRequestID string
//SessionID string
//UserID int
//LessionID string
//Event string
//Note *string
//LogUTCTime time.Time
rows, err := con.Query(fmt.Sprintf("exec [dbo].[Select_TrackingByUserIDSessionID] ?,?"), userid, sessionid)
for rows.Next() {
var t Tracking
err = rows.Scan(&t.TrackingID, &t.XRequestID, &t.SessionID, &t.UserID, &t.LessionID, &t.Event, &t.Note, &t.LogUTCTime)
ts = append(ts, t)
}
defer rows.Close()
return ts, err
}
func getProgressesByUserIDSessionID(userid int, sessionid string, con *sql.DB) ([]int, error) {
err := con.Ping()
if err != nil {
return nil, err
}
ps := make([]int,0)
//fmt.Println(fmt.Sprintf("exec [dbo].[Select_ProgressByUserIDLessionID] ?,?"), userid, lessionid)
rows, err := con.Query(fmt.Sprintf("exec [dbo].[Select_ProgressByUserIDSessionID] ?,?"), userid, sessionid)
for rows.Next() {
var p int
err = rows.Scan(&p)
ps = append(ps, p)
}
defer rows.Close()
return ps, err
}
func getQuestionByQuestionGroupID(t string, con *sql.DB) ([]Question, error) {
err := con.Ping()
if err != nil {
return nil, err
}
var quizs []Question
rows, err := con.Query("exec [dbo].[Select_QuestionByQuestionGroup] '" + t + "'")
for rows.Next() {
var qiz Question
err = rows.Scan(&qiz.ID, &qiz.QuestionType, &qiz.QuestionTitle, &qiz.QuestionDescription, &qiz.Answer, &qiz.QuestionGroup)
quizs = append(quizs, qiz)
}
defer rows.Close()
return quizs, err
}
func getWordsBySemesterIDFromDB(t string, con *sql.DB) ([]Word, error) {
err := con.Ping()
if err != nil {
return nil, err
}
var words []Word
rows, err := con.Query("exec [dbo].[Select_WordsBySemesterID] '" + t + "'")
for rows.Next() {
var wd Word
err = rows.Scan(&wd.ID, &wd.SemesterID, &wd.LessionID, &wd.ImagePath, &wd.AudioPath, &wd.Answer, &wd.Options)
words = append(words, wd)
}
defer rows.Close()
return words, err
}
func getOneWordByWordID(t int, con *sql.DB) ([]Word, error) {
err := con.Ping()
if err != nil {
return nil, err
}
var words []Word
rows, err := con.Query(fmt.Sprintf("exec [dbo].[Select_WordsByWordID] ?"), t)
//rows, err := con.Query("exec [dbo].[Select_WordsByWordID] '" + t + "'")
for rows.Next() {
var wd Word
err = rows.Scan(&wd.ID, &wd.SemesterID, &wd.LessionID, &wd.ImagePath, &wd.AudioPath, &wd.Answer, &wd.Options)
words = append(words, wd)
}
defer rows.Close()
return words, err
}
func getAnswerByUserIDSessionIDFromDB(userid int, sessionid string, con *sql.DB) ([]Answer, error) {
err := con.Ping()
if err != nil {
return nil, err
}
var as []Answer
rows, err := con.Query(fmt.Sprintf("exec [dbo].[Select_AnswerByUserIDSessionID] ?,?"), userid, sessionid)
for rows.Next() {
var a Answer
err = rows.Scan(&a.AnswerID, &a.UserID, &a.SessionID, &a.SemesterID, &a.LessionID, &a.UserAnswer, &a.CorrectAnswer)
as = append(as, a)
}
defer rows.Close()
return as, err
}
func getComboFromDBBySessionID(sessionid string, con *sql.DB) ([]ComboRecord, error) {
err := con.Ping()
if err != nil {
return nil, err
}
var cb []ComboRecord
rows, err := con.Query(fmt.Sprintf("exec [dbo].[Select_ComboBySessionID] ?"), sessionid)
for rows.Next() {
var c ComboRecord
err = rows.Scan(&c.UserName, &c.UserImgPath, &c.UserCombo)
cb = append(cb, c)
}
defer rows.Close()
return cb, err
}
func getPracticeByUserIDSessionIDFromDB(userid int, sessionid string, con *sql.DB) (Word, error) {
err := con.Ping()
var ww Word
if err != nil {
return ww, err
}
rows, err := con.Query(fmt.Sprintf("exec [dbo].[Select_PracticeWordByUserIDSessionID] ?,?"), userid, sessionid)
for rows.Next() {
var wd Word
//fmt.Println(fmt.Sprintf("exec [dbo].[Select_PracticeWordByUserIDSessionID] ?,?"), userid, sessionid)
err = rows.Scan(&wd.ID, &wd.SemesterID, &wd.LessionID, &wd.ImagePath, &wd.AudioPath, &wd.Answer, &wd.Options)
ww = wd
}
defer rows.Close()
return ww, err
}
func GetStairRecordsFromDBBySessionID(sessionid string, con *sql.DB) ([]StairRecord, error) {
err := con.Ping()
if err != nil {
return nil, err
}
var sr []StairRecord
rows, err := con.Query(fmt.Sprintf("exec [dbo].[Select_StairBySessionID] ?"), sessionid)
for rows.Next() {
var s StairRecord
err = rows.Scan(&s.UserName, &s.LocalPhotoPath, &s.TotalCnt)
sr = append(sr, s)
}
defer rows.Close()
return sr, err
}
func getUserPracticeResultByUserIDSessionID(userid int, sessionid string, con *sql.DB) (PracticeResult, error) {
err := con.Ping()
var rt PracticeResult
var comboCount = 0
var tempComboCnt = 0
var previousIsCorrect = true
var cb = 0
if err != nil {
return rt, err
}
rows, err := con.Query(fmt.Sprintf("exec [dbo].[Select_PracticeResultByUserIDSessionID] ?,?"), userid, sessionid)
for rows.Next() {
var pr PracticeRecord
//
err = rows.Scan(&pr.UserPracticeID, &pr.UserID, &pr.SessionID, &pr.SemesterID, &pr.UserAnswer, &pr.CorrectAnswer, &pr.CorrectCnt, &pr.CorrectRate, pr.ComboInDB)
rt.CorrectCnt = pr.CorrectCnt
rt.CorrectRate = pr.CorrectRate
cb = pr.ComboInDB
if pr.UserAnswer == pr.CorrectAnswer {
if previousIsCorrect {
tempComboCnt += 1
} else {
tempComboCnt = 1
}
if tempComboCnt >= comboCount {
comboCount = tempComboCnt
}
previousIsCorrect = true
} else {
tempComboCnt = 0
previousIsCorrect = false
}
}
rt.ComboCnt = comboCount
if comboCount > cb {
_, err = con.Exec(fmt.Sprintf("exec [dbo].[Upsert_ComboByUserIDComboCnt] ?,?,?"), userid,sessionid, comboCount)
}
defer rows.Close()
//fmt.Println(fmt.Sprintf("exec [dbo].[Select_PracticeResultByUserIDSessionID] ?,?"), userid, sessionid)
return rt, err
}
func insertQuestion(q Question, con *sql.DB) (sql.Result, error) {
err := con.Ping()
if err != nil {
return nil, err
}
r, err := con.Exec(fmt.Sprintf("exec [dbo].[Insert_Question] ?,?,?,?,?"), q.QuestionType, q.QuestionTitle, q.QuestionDescription, q.Answer, q.QuestionGroup)
return r, err
}
func upsertAnswerToDB(a Answer,con *sql.DB) (sql.Result, error) {
err := con.Ping()
if err != nil {
return nil, err
}
//exec [dbo].[Upsert_Answer] '105554002','20180704',3,1,'qqq','ccc'
//AsnwerID int
//UserID int
//SessionID int
//SemesterID int
//LessionID int
//UserAnswer string
//orrectAnswer string
r, err := con.Exec(fmt.Sprintf("exec [dbo].[Upsert_Answer] ?,?,?,?,?,?"), a.UserID, a.SessionID, a.SemesterID, a.LessionID, a.UserAnswer, a.CorrectAnswer)
return r, err
}
func insertOneUser(u User, con *sql.DB) (sql.Result, error) {
err := con.Ping()
if err != nil {
return nil, err
}
r, err := con.Exec(fmt.Sprintf("exec [dbo].[Insert_User] ?,?,?,?"), u.UserID, u.UserName, u.PhotoPath, u.LocalPhotoPath)
return r, err
}
func insertTracking(t Tracking, con *sql.DB) (sql.Result, error) {
err := con.Ping()
if err != nil {
return nil, err
}
r, err := con.Exec(fmt.Sprintf("exec [dbo].[Insert_Tracking] ?,?,?,?,?,?"), t.XRequestID, t.SessionID, t.UserID, t.LessionID, t.Event, t.Note)
return r, err
}
func insertProgress(t Progress, con *sql.DB) (sql.Result, error) {
err := con.Ping()
if err != nil {
return nil, err
}
r, err := con.Exec(fmt.Sprintf("exec [dbo].[Insert_Progress] ?,?,?"), t.UserID, t.SessionID, t.LessionID)
return r, err
}
func insertUserPractice(p PracticeRecord, con *sql.DB) (sql.Result, error) {
err := con.Ping()
if err != nil {
return nil, err
}
r, err := con.Exec(fmt.Sprintf("exec [dbo].[Insert_PracticeRecord] ?,?,?,?,?"), p.UserID, p.SessionID, p.SemesterID, p.UserAnswer, p.CorrectAnswer)
return r, err
}
func insertWord(wd Word, con *sql.DB) (sql.Result, error) {
err := con.Ping()
if err != nil {
return nil, err
}
r, err := con.Exec(fmt.Sprintf("exec [dbo].[Insert_Word] ?,?,?,?,?,?"), wd.SemesterID, wd.LessionID, wd.ImagePath, wd.AudioPath, wd.Answer, wd.Options)
return r, err
}
func getUserInfoByUserID(t string, con *sql.DB) (User, error) {
var ts User
err := con.Ping()
if err != nil {
return ts, err
}
rows, err := con.Query(fmt.Sprintf("exec [dbo].[Select_UserByUserID] ?"), t)
for rows.Next() {
var t User
err = rows.Scan(&t.UserID, &t.UserName, &t.PhotoPath)
ts = t
}
defer rows.Close()
return ts, err
}
|
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
//
//+build e2e
package pkg
import (
"fmt"
"time"
"github.com/sirupsen/logrus"
"github.com/pkg/errors"
)
// WaitConfig contains configuration for WaitForFunc.
type WaitConfig struct {
Timeout time.Duration
Interval time.Duration
TolerateErrors int
Logger logrus.FieldLogger
}
// NewWaitConfig create new WaitConfig.
func NewWaitConfig(timeout, interval time.Duration, tolerateErrs int, log logrus.FieldLogger) WaitConfig {
return WaitConfig{
Timeout: timeout,
Interval: interval,
TolerateErrors: tolerateErrs,
Logger: log,
}
}
// WaitForFunc waits until `isReady` returns `true`, error is returned or timeout reached.
func WaitForFunc(cfg WaitConfig, isReady func() (bool, error)) error {
done := time.After(cfg.Timeout)
errsCount := 0
for {
ready, err := isReady()
if err != nil {
if cfg.Logger != nil {
cfg.Logger.WithError(err).Error("error while waiting for condition")
}
errsCount++
if errsCount > cfg.TolerateErrors {
return errors.Wrap(err, "while checking if condition is ready")
}
} else {
if ready {
return nil
}
if cfg.Logger != nil {
cfg.Logger.Debug("condition not ready")
}
errsCount = 0
}
select {
case <-done:
return fmt.Errorf("timeout waiting for condition")
default:
time.Sleep(cfg.Interval)
}
}
}
|
// Copyright (C) 2018 Minio Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package sio implements the DARE format. It provides an API for secure
// en/decrypting IO operations using io.Reader and io.Writer.
package sio // import "github.com/minio/sio"
import (
"bytes"
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"errors"
"io"
"runtime"
"golang.org/x/crypto/chacha20poly1305"
"golang.org/x/sys/cpu"
)
const (
// Version20 specifies version 2.0
Version20 byte = 0x20
// Version10 specifies version 1.0
Version10 byte = 0x10
)
const (
// AES_256_GCM specifies the cipher suite AES-GCM with 256 bit keys.
AES_256_GCM byte = iota
// CHACHA20_POLY1305 specifies the cipher suite ChaCha20Poly1305 with 256 bit keys.
CHACHA20_POLY1305
)
// supportsAES indicates whether the CPU provides hardware support for AES-GCM.
// AES-GCM should only be selected as default cipher if there's hardware support.
var supportsAES = (cpu.X86.HasAES && cpu.X86.HasPCLMULQDQ) || runtime.GOARCH == "s390x"
const (
keySize = 32
headerSize = 16
maxPayloadSize = 1 << 16
tagSize = 16
maxPackageSize = headerSize + maxPayloadSize + tagSize
maxDecryptedSize = 1 << 48
maxEncryptedSize = maxDecryptedSize + ((headerSize + tagSize) * 1 << 32)
)
var newAesGcm = func(key []byte) (cipher.AEAD, error) {
aes256, err := aes.NewCipher(key)
if err != nil {
return nil, err
}
return cipher.NewGCM(aes256)
}
var supportedCiphers = [...]func([]byte) (cipher.AEAD, error){
AES_256_GCM: newAesGcm,
CHACHA20_POLY1305: chacha20poly1305.New,
}
var (
errUnsupportedVersion = Error{"sio: unsupported version"}
errUnsupportedCipher = Error{"sio: unsupported cipher suite"}
errInvalidPayloadSize = Error{"sio: invalid payload size"}
errTagMismatch = Error{"sio: authentication failed"}
errUnexpectedSize = Error{"sio: size is too large for DARE"}
// Version 1.0 specific
errPackageOutOfOrder = Error{"sio: sequence number mismatch"}
// Version 2.0 specific
errNonceMismatch = Error{"sio: header nonce mismatch"}
errUnexpectedEOF = Error{"sio: unexpected EOF"}
errUnexpectedData = Error{"sio: unexpected data after final package"}
)
// Error is the error returned by an io.Reader or io.Writer
// if the encrypted data cannot be decrypted because it is
// malformed or not authentic.
type Error struct{ msg string }
func (e Error) Error() string { return e.msg }
// Config contains the format configuration. The only field
// which must always be set manually is the secret key.
type Config struct {
// The minimal supported version of the format. If
// not set the default value - Version10 - is used.
MinVersion byte
// The highest supported version of the format. If
// not set the default value - Version20 - is used.
MaxVersion byte
// A list of supported cipher suites. If not set the
// default value is used.
CipherSuites []byte
// The secret encryption key. It must be 32 bytes long.
Key []byte
// The first expected sequence number. It should only
// be set manually when decrypting a range within a
// stream.
SequenceNumber uint32
// The RNG used to generate random values. If not set
// the default value (crypto/rand.Reader) is used.
Rand io.Reader
// The size of the encrypted payload in bytes. The
// default value is 64KB. It should be used to restrict
// the size of encrypted packages. The payload size
// must be between 1 and 64 KB.
//
// This field is specific for version 1.0 and is
// deprecated.
PayloadSize int
}
// EncryptedSize computes the size of an encrypted data stream
// from the plaintext size. It is the inverse of DecryptedSize().
//
// EncryptedSize returns an error if the provided size is to large.
func EncryptedSize(size uint64) (uint64, error) {
if size > maxDecryptedSize {
return 0, errUnexpectedSize
}
encSize := (size / maxPayloadSize) * maxPackageSize
if mod := size % maxPayloadSize; mod > 0 {
encSize += mod + (headerSize + tagSize)
}
return encSize, nil
}
// DecryptedSize computes the size of a decrypted data stream
// from the encrypted stream size. It is the inverse of EncryptedSize().
//
// DecryptedSize returns an error if the provided size is to large
// or if the provided size is an invalid encrypted stream size.
func DecryptedSize(size uint64) (uint64, error) {
if size > maxEncryptedSize {
return 0, errUnexpectedSize
}
decSize := (size / maxPackageSize) * maxPayloadSize
if mod := size % maxPackageSize; mod > 0 {
if mod <= headerSize+tagSize {
return 0, errors.New("sio: size is not valid") // last package is not valid
}
decSize += mod - (headerSize + tagSize)
}
return decSize, nil
}
// Encrypt reads from src until it encounters an io.EOF and encrypts all received
// data. The encrypted data is written to dst. It returns the number of bytes
// encrypted and the first error encountered while encrypting, if any.
//
// Encrypt returns the number of bytes written to dst.
func Encrypt(dst io.Writer, src io.Reader, config Config) (n int64, err error) {
encReader, err := EncryptReader(src, config)
if err != nil {
return 0, err
}
return io.CopyBuffer(dst, encReader, make([]byte, headerSize+maxPayloadSize+tagSize))
}
// Decrypt reads from src until it encounters an io.EOF and decrypts all received
// data. The decrypted data is written to dst. It returns the number of bytes
// decrypted and the first error encountered while decrypting, if any.
//
// Decrypt returns the number of bytes written to dst. Decrypt only writes data to
// dst if the data was decrypted successfully. It returns an error of type sio.Error
// if decryption fails.
func Decrypt(dst io.Writer, src io.Reader, config Config) (n int64, err error) {
decReader, err := DecryptReader(src, config)
if err != nil {
return 0, err
}
return io.CopyBuffer(dst, decReader, make([]byte, maxPayloadSize))
}
// DecryptBuffer decrypts all received data in src.
// The decrypted data is appended to dst.
// If the number of output bytes is unknown,
// making a dst with capacity of len(src) is reasonable.
//
// DecryptBuffer only returns data to if the data was decrypted successfully.
// It returns an error of type sio.Error if decryption fails.
func DecryptBuffer(dst, src []byte, config Config) (output []byte, err error) {
if err := setConfigDefaults(&config); err != nil {
return nil, err
}
if config.MinVersion == Version10 && config.MaxVersion == Version10 {
buf := bytes.NewBuffer(dst)
if _, err := Decrypt(buf, bytes.NewBuffer(src), config); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
if config.MinVersion == Version20 && config.MaxVersion == Version20 {
return decryptBufferV20(dst, src, &config)
}
return decryptBuffer(dst, src, &config)
}
// EncryptReader wraps the given src and returns an io.Reader which encrypts
// all received data. EncryptReader returns an error if the provided encryption
// configuration is invalid.
func EncryptReader(src io.Reader, config Config) (io.Reader, error) {
if err := setConfigDefaults(&config); err != nil {
return nil, err
}
if config.MaxVersion == Version20 {
return encryptReaderV20(src, &config)
}
return encryptReaderV10(src, &config)
}
// DecryptReader wraps the given src and returns an io.Reader which decrypts
// all received data. DecryptReader returns an error if the provided decryption
// configuration is invalid. The returned io.Reader returns an error of
// type sio.Error if the decryption fails.
func DecryptReader(src io.Reader, config Config) (io.Reader, error) {
if err := setConfigDefaults(&config); err != nil {
return nil, err
}
if config.MinVersion == Version10 && config.MaxVersion == Version10 {
return decryptReaderV10(src, &config)
}
if config.MinVersion == Version20 && config.MaxVersion == Version20 {
return decryptReaderV20(src, &config)
}
return decryptReader(src, &config), nil
}
// DecryptReaderAt wraps the given src and returns an io.ReaderAt which decrypts
// all received data. DecryptReaderAt returns an error if the provided decryption
// configuration is invalid. The returned io.ReaderAt returns an error of
// type sio.Error if the decryption fails.
func DecryptReaderAt(src io.ReaderAt, config Config) (io.ReaderAt, error) {
if err := setConfigDefaults(&config); err != nil {
return nil, err
}
if config.MinVersion == Version10 && config.MaxVersion == Version10 {
return decryptReaderAtV10(src, &config)
}
if config.MinVersion == Version20 && config.MaxVersion == Version20 {
return decryptReaderAtV20(src, &config)
}
return decryptReaderAt(src, &config), nil
}
// EncryptWriter wraps the given dst and returns an io.WriteCloser which
// encrypts all data written to it. EncryptWriter returns an error if the
// provided decryption configuration is invalid.
//
// The returned io.WriteCloser must be closed successfully to finalize the
// encryption process.
func EncryptWriter(dst io.Writer, config Config) (io.WriteCloser, error) {
if err := setConfigDefaults(&config); err != nil {
return nil, err
}
if config.MaxVersion == Version20 {
return encryptWriterV20(dst, &config)
}
return encryptWriterV10(dst, &config)
}
// DecryptWriter wraps the given dst and returns an io.WriteCloser which
// decrypts all data written to it. DecryptWriter returns an error if the
// provided decryption configuration is invalid.
//
// The returned io.WriteCloser must be closed successfully to finalize the
// decryption process. The returned io.WriteCloser returns an error of
// type sio.Error if the decryption fails.
func DecryptWriter(dst io.Writer, config Config) (io.WriteCloser, error) {
if err := setConfigDefaults(&config); err != nil {
return nil, err
}
if config.MinVersion == Version10 && config.MaxVersion == Version10 {
return decryptWriterV10(dst, &config)
}
if config.MinVersion == Version20 && config.MaxVersion == Version20 {
return decryptWriterV20(dst, &config)
}
return decryptWriter(dst, &config), nil
}
func defaultCipherSuites() []byte {
if supportsAES {
return []byte{AES_256_GCM, CHACHA20_POLY1305}
}
return []byte{CHACHA20_POLY1305, AES_256_GCM}
}
func setConfigDefaults(config *Config) error {
if config.MinVersion > Version20 {
return errors.New("sio: unknown minimum version")
}
if config.MaxVersion > Version20 {
return errors.New("sio: unknown maximum version")
}
if len(config.Key) != keySize {
return errors.New("sio: invalid key size")
}
if len(config.CipherSuites) > 2 {
return errors.New("sio: too many cipher suites")
}
for _, c := range config.CipherSuites {
if int(c) >= len(supportedCiphers) {
return errors.New("sio: unknown cipher suite")
}
}
if config.PayloadSize > maxPayloadSize {
return errors.New("sio: payload size is too large")
}
if config.MinVersion < Version10 {
config.MinVersion = Version10
}
if config.MaxVersion < Version10 {
config.MaxVersion = Version20
}
if config.MinVersion > config.MaxVersion {
return errors.New("sio: minimum version cannot be larger than maximum version")
}
if len(config.CipherSuites) == 0 {
config.CipherSuites = defaultCipherSuites()
}
if config.Rand == nil {
config.Rand = rand.Reader
}
if config.PayloadSize == 0 {
config.PayloadSize = maxPayloadSize
}
return nil
}
|
package kindle_emailer
import (
"fmt"
"kindle_clipping_exporter/kindle"
"log"
"net/smtp"
"strings"
)
const messageHeader = "To: %s\r\nSubject: Your most recent kindle clippings\r\n"
type Credentials struct {
FromEmail string
FromEmailPassword string
ToEmail string
}
func SendEmail(d kindle.Device, cred *Credentials) {
if len(d.NewClippings) == 0 {
log.Println("No new clippings, skipping email.")
return
}
// Sender data.
from := cred.FromEmail
password := cred.FromEmailPassword
// Receiver email address.
to := []string{
cred.ToEmail,
}
// smtp server configuration for gmail.
smtpHost := "smtp.gmail.com"
smtpPort := "587"
var msg strings.Builder
msg.Write([]byte(fmt.Sprintf(messageHeader, cred.ToEmail)))
for _, clipping := range d.NewClippings {
msg.WriteString(clipping.ToString())
msg.WriteString("\n==========\n")
}
msg.WriteString("\r\n")
// Authentication.
auth := smtp.PlainAuth("", from, password, smtpHost)
// Sending email.
err := smtp.SendMail(smtpHost+":"+smtpPort, auth, from, to, []byte(msg.String()))
if err != nil {
fmt.Println(err)
return
}
log.Println("Email Sent Successfully!")
}
|
package cache
import (
"sync"
"time"
)
type CacheItem struct {
sync.RWMutex
Key interface{}
Data interface{}
LifeSpan time.Duration
TimeStamp time.Time
}
func CreateCacheItem(cache_key interface{}, cache_data interface{}) *CacheItem {
Item := CacheItem{Key: cache_key, Data: cache_data}
return &Item
}
|
package dialect
import (
"context"
"time"
"github.com/phogolabs/log"
)
// Logger represents a logger
type Logger = log.Logger
// LoggerDriver is a driver that logs all driver operations.
type LoggerDriver struct {
Driver
logger Logger
}
// Log gets a driver and an optional logging function, and returns
// a new debugged-driver that prints all outgoing operations.
func Log(d Driver, logger Logger) Driver {
return &LoggerDriver{d, logger}
}
// Exec logs its params and calls the underlying driver Exec method.
func (d *LoggerDriver) Exec(ctx context.Context, query string, args, v interface{}) error {
var (
start = time.Now()
logger = d.logger
)
err := d.Driver.Exec(ctx, query, args, v)
logger = logger.WithField("sql.query", query)
logger = logger.WithField("sql.param", args)
logger = logger.WithField("sql.duration", time.Since(start).String())
if err != nil {
logger.WithError(err).Errorf("query.exec fail")
return err
}
logger.Infof("query.exec success")
return nil
}
// Query logs its params and calls the underlying driver Query method.
func (d *LoggerDriver) Query(ctx context.Context, query string, args, v interface{}) error {
var (
start = time.Now()
logger = d.logger
)
err := d.Driver.Query(ctx, query, args, v)
logger = logger.WithField("sql.query", query)
logger = logger.WithField("sql.param", args)
logger = logger.WithField("sql.duration", time.Since(start).String())
if err != nil {
logger.WithError(err).Errorf("query.exec fail")
return err
}
logger.Infof("query.exec success")
return nil
}
// Tx adds an log-id for the transaction and calls the underlying driver Tx command.
func (d *LoggerDriver) Tx(ctx context.Context) (Tx, error) {
logger := d.logger.WithField("sql.tx", time.Now().Unix())
tx, err := d.Driver.Tx(ctx)
if err != nil {
logger.WithError(err).Errorf("tx.start fail")
return nil, err
}
logger.Infof("tx.start success")
return &LoggerTx{tx, logger, ctx}, nil
}
// LoggerTx is a transaction implementation that logs all transaction operations.
type LoggerTx struct {
Tx // underlying transaction.
logger Logger // log function. defaults to fmt.Println.
ctx context.Context // underlying transaction context.
}
// Exec logs its params and calls the underlying transaction Exec method.
func (d *LoggerTx) Exec(ctx context.Context, query string, args, v interface{}) error {
var (
start = time.Now()
logger = d.logger
)
err := d.Tx.Exec(ctx, query, args, v)
logger = logger.WithField("sql.query", query)
logger = logger.WithField("sql.param", args)
logger = logger.WithField("sql.duration", time.Since(start).String())
if err != nil {
logger.WithError(err).Errorf("query.exec fail")
return err
}
logger.Infof("query.exec success")
return nil
}
// Query logs its params and calls the underlying transaction Query method.
func (d *LoggerTx) Query(ctx context.Context, query string, args, v interface{}) error {
var (
start = time.Now()
logger = d.logger
)
err := d.Tx.Query(ctx, query, args, v)
logger = logger.WithField("sql.query", query)
logger = logger.WithField("sql.param", args)
logger = logger.WithField("sql.duration", time.Since(start).String())
if err != nil {
logger.WithError(err).Errorf("query.exec fail")
return err
}
logger.Infof("query.exec success")
return nil
}
// Commit logs this step and calls the underlying transaction Commit method.
func (d *LoggerTx) Commit() error {
var (
logger = d.logger
err = d.Tx.Commit()
)
if err != nil {
logger = d.logger.WithError(err)
logger.Errorf("tx.commit fail")
return err
}
logger.Infof("tx.commit success")
return nil
}
// Rollback logs this step and calls the underlying transaction Rollback method.
func (d *LoggerTx) Rollback() error {
var (
logger = d.logger
err = d.Tx.Rollback()
)
if err != nil {
logger = d.logger.WithError(err)
logger.Errorf("tx.rollback fail")
return err
}
logger.Infof("tx.rollback success")
return nil
}
|
package main
import (
"fmt"
"os"
"github.com/jessevdk/go-flags"
)
const VERSION = "2.0.0-beta5"
type Options struct{}
var (
options Options
cmd = flags.NewParser(&options, flags.Default)
)
func main() {
cmd.SubcommandsOptional = true
_, err := cmd.Parse()
if err != nil {
os.Exit(1)
}
if cmd.Command.Active == nil {
config, err := ReadConfig()
if err != nil {
if os.IsNotExist(err) {
fmt.Println("DLite has not been installed. Please run 'sudo dlite install'")
} else {
fmt.Println(err)
}
os.Exit(1)
}
fmt.Println("DLite configuration:")
fmt.Printf(" uuid : %s\n", config.Uuid)
fmt.Printf(" cpu count : %d\n", config.CpuCount)
fmt.Printf(" memory : %d GiB\n", config.Memory)
fmt.Printf(" disk size : %d GiB\n", config.DiskSize)
fmt.Printf(" hostname : %s\n", config.Hostname)
fmt.Printf(" dns server : %s\n", config.DNSServer)
fmt.Printf(" docker version : %s\n", config.DockerVersion)
if config.Extra != "" {
fmt.Printf(" docker args : %s\n", config.Extra)
}
}
}
|
package main
import (
"fmt"
"time"
)
//When using channels as function parameters, as you often will, by default can send and receive within the function.
//To provide additional safety at compile time, channel function parameters can be defined with a direction.
//That is, they can be defined to be read-only or write-only.
//The in function signature defined has a channel function parameter locked down for writing only,
//as per the additional channel direction notation inserted between the channel name and the channel type.
func in(channel chan<- string, msg string) {
channel <- msg
}
//Likewise, the out function signature declared has a channel function parameter locked down for reading only.
func out(channel <-chan string) {
for {
fmt.Println(<-channel)
}
}
func main() {
channel := make(chan string, 1)
go out(channel)
for i := 0; i < 10; i++ {
in(channel, fmt.Sprintf("cloudacademy - %d", i))
}
time.Sleep(time.Second * 10) //the "hacky" way to let goroutines to complete before the main program execution does
}
|
package middleware
import (
"net/http"
"project/packages/authentication/token"
"project/packages/handlers/response"
)
func UserAuthorize(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
//getToken
tokenString, err := token.GetTokenString(r)
if err != nil {
response.ResponseWithJson(w, http.StatusBadRequest, map[string]string{"message": err.Error()})
return
}
//verify
_, err2 := token.VerifyToken(tokenString)
if err2 != nil {
response.ResponseWithJson(w, http.StatusBadRequest, map[string]string{"message": err2.Error()})
return
}
next.ServeHTTP(w, r)
})
}
// func UserAuthorize(next http.HandlerFunc) http.HandlerFunc {
// return func(w http.ResponseWriter, r *http.Request) {
// //getToken
// tokenString, err := token.GetTokenString(r)
// if err != nil {
// response.ResponseWithJson(w, http.StatusBadRequest, map[string]string{"message": err.Error()})
// return
// }
// //verify
// _, err2 := token.VerifyToken(tokenString)
// if err2 != nil {
// response.ResponseWithJson(w, http.StatusBadRequest, map[string]string{"message": err2.Error()})
// return
// }
// next(w, r)
// }
// }
|
package main
import (
"fmt"
"io/ioutil"
"regexp"
"strings"
"strconv"
)
type Disc struct {
numberOfPositions, currentPosition int
}
func main() {
discs := parseInput("day15input")
solve(discs)
}
func parseInput(filename string) []Disc {
var discs []Disc
input, err := ioutil.ReadFile(filename)
r, _ := regexp.Compile("[0-9]+")
if err != nil {
fmt.Println(err)
} else {
inputLine := strings.Split(string(input), "\n")
for i := 0; i < len(inputLine) - 1; i++ {
read := r.FindAllString(inputLine[i], -1)
numberOfPositions, _ := strconv.Atoi(read[1])
startPosition, _ := strconv.Atoi(read[3])
discs = append(discs, Disc{numberOfPositions, startPosition})
}
}
return discs
}
func (d *Disc) tick() {
d.currentPosition = (d.currentPosition + 1) % d.numberOfPositions
}
func solve(discs []Disc) {
solved := false
firstTime := 0
for i := 0; !solved; i++ {
allZero := true
for j := 0; j < len(discs) && allZero; j++ {
if (discs[j].currentPosition + (i+j+1)) % discs[j].numberOfPositions != 0 {
allZero = false
}
}
if allZero {
solved = true
firstTime = i
}
}
fmt.Println(firstTime)
}
|
package middleware
import (
"github.com/gin-gonic/gin"
)
// Common 全局通用的中间件
func Common(r *gin.Engine) {
r.Use(gin.Recovery())
r.Use(ErrorMiddleware())
}
|
package canvas
import (
"github.com/gopherjs/gopherjs/js"
"github.com/gopherjs/jquery"
"github.com/platinasystems/weeb/r2"
"fmt"
)
type CompositeOperation int
const (
SrcOver CompositeOperation = iota // A over B (default)
SrcAtop // A atop B
SrcIn // A in B
SrcOut // A out B
DstOver // B over A
DstAtop // B atop A
DstIn // B in A
DstOut // B out A
Lighter // A plus B
Copy // A (B is ignored)
Xor // A xor B
)
var compositionStrings = []string{
SrcOver: "source-over",
SrcAtop: "source-atop",
SrcIn: "source-in",
SrcOut: "source-out",
DstOver: "destination-over",
DstAtop: "destination-atop",
DstIn: "destination-in",
DstOut: "destination-out",
Lighter: "lighter",
Copy: "copy",
Xor: "xor",
}
type Context struct {
*js.Object
Font string `js:"font"` // "10px sans-serif"
GlobalAlpha float64 `js:"globalAlpha"`
GlobalCompositeOperation string `js:"globalCompositeOperation"` // "source-over"
ImageSmoothingEnabled bool `js:"imageSmoothingEnabled"`
LineCap string `js:"lineCap"` // "butt" round square
LineDashOffset float64 `js:"lineDashOffset"`
LineJoin string `js:"lineJoin"` // "miter" round miter
LineWidth float64 `js:"lineWidth"`
MiterLimit float64 `js:"miterLimit"`
ShadowBlur float64 `js:"shadowBlur"`
ShadowColor string `js:"shadowColor"`
ShadowOffsetX float64 `js:"shadowOffsetX"`
ShadowOffsetY float64 `js:"shadowOffsetY"`
FillStyle string `js:"fillStyle"` // css color, CanvasGradient, CanvasPattern
StrokeStyle string `js:"strokeStyle"`
TextAlign string `js:"textAlign"` // "start" (default), "end", "left", "right", "center"
TextBaseline string `js:"textBaseline"` // "top", "hanging", "middle", "alphabetic" (default), "ideographic", "bottom"
// Canvas size in points.
Size r2.X
// Screen resolution in pixels per point (and inverse).
PixelsPerPoint, PointsPerPixel float64
}
// (x,y) coordinate as complex number for easy arithmetic.
// X increasing to the right; Y increasing down on screen.
type Xy complex128
func GetContext(c interface{}) *Context {
return &Context{Object: jquery.NewJQuery(c).Underlying().Index(0).Call("getContext", "2d")}
}
// Push/pop graphics state on stack.
func (c *Context) Save() { c.Call("save") }
func (c *Context) Restore() { c.Call("restore") }
func (c *Context) BeginPath() { c.Call("beginPath") }
func (c *Context) ClosePath() { c.Call("closePath") }
func (c *Context) Fill() { c.Call("fill") }
func (c *Context) Stroke() { c.Call("stroke") }
func (c *Context) Clip() { c.Call("clip") }
func (c *Context) IsPointInPath(x r2.X) { c.Call("isPointInPath", x.X(), x.Y()) }
func (c *Context) FillRect(x, s r2.X) { c.Call("fillRect", x.X(), x.Y(), s.X(), s.Y()) }
func (c *Context) StrokeRect(x, s r2.X) { c.Call("strokeRect", x.X(), x.Y(), s.X(), s.Y()) }
func (c *Context) ClearRect(x, s r2.X) { c.Call("clearRect", x.X(), x.Y(), s.X(), s.Y()) }
func (c *Context) FillText(text string, x r2.X, maxWidth ...float64) {
if len(maxWidth) > 0 {
c.Call("fillText", text, x.X(), x.Y(), maxWidth)
} else {
c.Call("fillText", text, x.X(), x.Y())
}
}
func (c *Context) StrokeText(text string, x r2.X, maxWidth ...float64) {
if len(maxWidth) > 0 {
c.Call("strokeText", text, x.X(), x.Y(), maxWidth)
} else {
c.Call("strokeText", text, x.X(), x.Y())
}
}
func (c *Context) MeasureText(text string) r2.X {
dx := c.Call("measureText", text).Get("width").Float()
return r2.X(complex(dx, 0))
}
func (c *Context) MoveTo(x r2.X) { c.Call("moveTo", x.X(), x.Y()) }
func (c *Context) LineTo(x r2.X) { c.Call("lineTo", x.X(), x.Y()) }
func (c *Context) Rect(x, s r2.X) { c.Call("rect", x.X(), x.Y(), s.X(), s.Y()) }
func (c *Context) QuadraticCurveTo(c1, x1 r2.X) {
c.Call("quadraticCurveTo", c1.X(), c1.Y(), x1.X(), x1.Y())
}
func (c *Context) BezierCurveTo(c1, c2, x1 r2.X) {
c.Call("bezierCurveTo", c1.X(), c1.Y(), c2.X(), c2.Y(), x1.X(), x1.Y())
}
func (c *Context) Arc(x r2.X, r float64, θ0, θ1 r2.Angle, ccw ...bool) {
c.Call("arc", x.X(), x.Y(), r, θ0.Radians(), θ1.Radians(), ccw)
}
func (c *Context) Ellipse(x, r r2.X, θ0, θ1, rotation r2.Angle, ccw ...bool) {
c.Call("ellipse", x.X(), x.Y(), r.X(), r.Y(), rotation.Radians(), θ0.Radians(), θ1.Radians(), ccw)
}
func (c *Context) ArcTo(x1, x2 r2.X, r float64) { c.Call("arcTo", x1.X(), x1.Y(), x2.X(), x2.Y(), r) }
// Transforms
func (c *Context) Scale(x r2.X) { c.Call("scale", x.X(), x.Y()) }
func (c *Context) Translate(x r2.X) { c.Call("translate", x.X(), x.Y()) }
func (c *Context) Rotate(θ r2.Angle) { c.Call("rotate", θ.Radians()) }
// Applies to current transform.
func (c *Context) Transform(m00, m01, m10, m11 float64, dx r2.X) {
c.Call("transform", m00, m01, m10, m11, dx.X(), dx.Y())
}
// Resets transform to DX 0 and identity matrix then applies given transform.
func (c *Context) SetTransform(m00, m01, m10, m11 float64, dx r2.X) {
c.Call("setTransform", m00, m01, m10, m11, dx.X(), dx.Y())
}
type RGBA struct {
R, G, B, A float32
}
// Saturate from 0 to 255
func sat(x float32) int {
i := int(x * 256)
switch {
case i < 0:
i = 0
case i > 255:
i = 255
}
return i
}
// For {Fill,Stroke}Style
func (c *Context) Style(x interface{}) string {
var s string
switch v := x.(type) {
case RGBA:
s = fmt.Sprintf("rgba(%d,%d,%d,%f)", sat(v.R), sat(v.G), sat(v.B), v.A)
case string:
s = v
default:
panic(v)
}
return s
}
func (c *Context) SetFillStyle(x interface{}) {
c.FillStyle = c.Style(x)
}
func (c *Context) SetStrokeStyle(x interface{}) {
c.StrokeStyle = c.Style(x)
}
type ImageData struct {
*js.Object
Width float64 `js:"width"`
Height float64 `js:"height"`
Data []byte `js:"data"`
}
func (c *Context) DrawImage(elt string, srcX, srcSize, dstX, dstSize r2.X) {}
func (c *Context) CreateImageData(size r2.X) (img *ImageData) { return }
func (c *Context) GetImageData(x, size r2.X) (img *ImageData) { return }
func (c *Context) PutImageData(img *ImageData, dx, dirtyX, dirtySize r2.X) { return }
/*
Not yet:
createImageData: createImageData()
drawImage: drawImage()
getImageData: getImageData()
putImageData: putImageData()
createLinearGradient: createLinearGradient()
createPattern: createPattern()
createRadialGradient: createRadialGradient()
drawFocusIfNeeded: drawFocusIfNeeded()
getContextAttributes: getContextAttributes()
*/
// Drawer is an interface for types which know how to draw with a Context.
type Drawer interface {
Draw(c *Context)
}
type Listener interface {
Event(c *Context, x r2.X)
}
type Interface interface {
Drawer(id string) (f Drawer, ok bool)
Listener(id string) (f Listener, ok bool)
}
type Page struct {
Interface
DrawerById map[string]Drawer
ListenerById map[string]Listener
}
func (p *Page) Drawer(id string) (d Drawer, ok bool) {
d, ok = p.DrawerById[id]
return
}
func (p *Page) SetDrawer(id string, d Drawer) {
if p.DrawerById == nil {
p.DrawerById = make(map[string]Drawer)
}
p.DrawerById[id] = d
}
func (p *Page) Listener(id string) (d Listener, ok bool) {
d, ok = p.ListenerById[id]
return
}
func (p *Page) SetListener(id string, d Listener) {
if p.ListenerById == nil {
p.ListenerById = make(map[string]Listener)
}
p.ListenerById[id] = d
}
|
package util
import (
"os"
"fmt"
"bufio"
"io"
)
type FileReadUtil interface {
Read() ([]byte, error)
}
type BufferFileReader struct {
path string
}
func NewBufferFileReader(path string) FileReadUtil {
read := new(BufferFileReader)
read.path = path
return read
}
func (reader *BufferFileReader) Read() ([]byte, error) {
fi, err := os.Open(reader.path)
if err != nil {
return nil, fmt.Errorf("read file failed==>%s", err.Error())
}
r := bufio.NewReader(fi)
buf := make([]byte, 1024, 1024)
dest := make([]byte, 0, 1024)
for {
n, err := r.Read(buf)
if err != nil && err != io.EOF {
return nil, err
}
if n == 0 {
break
}
dest = append(dest, buf[:n]...)
}
return dest, nil
}
|
package devpod
import (
"context"
"crypto/tls"
"fmt"
"github.com/loft-sh/devspace/pkg/devspace/kill"
"io"
"net/http"
"os"
syncpkg "sync"
"github.com/loft-sh/devspace/pkg/devspace/deploy"
"github.com/mgutz/ansi"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"github.com/loft-sh/devspace/pkg/devspace/config/loader"
"github.com/loft-sh/devspace/pkg/devspace/kubectl/selector"
"github.com/loft-sh/devspace/pkg/devspace/services/attach"
"github.com/loft-sh/devspace/pkg/devspace/services/logs"
"github.com/loft-sh/devspace/pkg/devspace/services/proxycommands"
"github.com/loft-sh/devspace/pkg/devspace/services/ssh"
"github.com/loft-sh/devspace/pkg/devspace/services/terminal"
logpkg "github.com/loft-sh/devspace/pkg/util/log"
"github.com/loft-sh/devspace/pkg/util/tomb"
"github.com/sirupsen/logrus"
"github.com/skratchdot/open-golang/open"
"gopkg.in/yaml.v3"
"time"
runtimevar "github.com/loft-sh/devspace/pkg/devspace/config/loader/variable/runtime"
"github.com/loft-sh/devspace/pkg/devspace/config/versions/latest"
devspacecontext "github.com/loft-sh/devspace/pkg/devspace/context"
"github.com/loft-sh/devspace/pkg/devspace/hook"
"github.com/loft-sh/devspace/pkg/devspace/services/podreplace"
"github.com/loft-sh/devspace/pkg/devspace/services/portforwarding"
"github.com/loft-sh/devspace/pkg/devspace/services/sync"
"github.com/loft-sh/devspace/pkg/devspace/services/targetselector"
"github.com/pkg/errors"
)
var (
openMaxWait = 5 * time.Minute
)
var (
DefaultTerminalStdout io.Writer = os.Stdout
DefaultTerminalStderr io.Writer = os.Stderr
DefaultTerminalStdin io.Reader = os.Stdin
)
type devPod struct {
selectedPod *selector.SelectedPodContainer
m syncpkg.Mutex
done chan struct{}
err error
cancelCtx context.Context
cancel context.CancelFunc
}
func newDevPod() *devPod {
return &devPod{
done: make(chan struct{}),
}
}
func (d *devPod) Start(ctx devspacecontext.Context, devPodConfig *latest.DevPod, options Options) error {
d.m.Lock()
if d.cancel != nil {
d.m.Unlock()
return errors.Errorf("dev pod is already running, please stop it before starting")
}
d.cancelCtx, d.cancel = context.WithCancel(ctx.Context())
ctx = ctx.WithContext(d.cancelCtx)
d.m.Unlock()
// log devpod to console if debug
if ctx.Log().GetLevel() == logrus.DebugLevel {
out, err := yaml.Marshal(devPodConfig)
if err == nil {
ctx.Log().Debugf("DevPod Config: \n%s\n", string(out))
}
}
// start the dev pod
err := d.startWithRetry(ctx, devPodConfig, options)
if err != nil {
d.Stop()
return err
}
return nil
}
func (d *devPod) Err() error {
d.m.Lock()
defer d.m.Unlock()
return d.err
}
func (d *devPod) Done() <-chan struct{} {
return d.done
}
func (d *devPod) Stop() {
d.m.Lock()
if d.cancel != nil {
d.cancel()
}
d.m.Unlock()
<-d.done
}
func (d *devPod) startWithRetry(ctx devspacecontext.Context, devPodConfig *latest.DevPod, options Options) error {
t := &tomb.Tomb{}
go func(ctx devspacecontext.Context) {
// wait for parent context cancel
// or that the DevPod is done
select {
case <-ctx.Context().Done():
case <-t.Dead():
}
if ctx.IsDone() {
<-t.Dead()
ctx.Log().Debugf("Stopped dev %s", devPodConfig.Name)
close(d.done)
return
}
// check if pod was terminated
d.m.Lock()
selectedPod := d.selectedPod
d.selectedPod = nil
d.m.Unlock()
// check if we need to restart
if selectedPod != nil {
shouldRestart := false
err := wait.PollImmediateUntil(time.Second, func() (bool, error) {
pod, err := ctx.KubeClient().KubeClient().CoreV1().Pods(selectedPod.Pod.Namespace).Get(ctx.Context(), selectedPod.Pod.Name, metav1.GetOptions{})
if err != nil {
if kerrors.IsNotFound(err) {
ctx.Log().Debugf("Restart dev %s because pod isn't found anymore", devPodConfig.Name)
shouldRestart = true
return true, nil
}
// this case means there might be problems with internet
ctx.Log().Debugf("error trying to retrieve pod: %v", err)
return false, nil
} else if pod.DeletionTimestamp != nil {
ctx.Log().Debugf("Restart dev %s because pod is terminating", devPodConfig.Name)
shouldRestart = true
return true, nil
}
return true, nil
}, ctx.Context().Done())
if err != nil {
if err != wait.ErrWaitTimeout {
ctx.Log().Errorf("error restarting dev: %v", err)
}
} else if shouldRestart {
d.restart(ctx, devPodConfig, options)
return
}
}
ctx.Log().Debugf("Stopped dev %s", devPodConfig.Name)
d.m.Lock()
d.err = t.Err()
d.m.Unlock()
close(d.done)
}(ctx)
// Create a new tomb and run it
tombCtx := t.Context(ctx.Context())
ctx = ctx.WithContext(tombCtx)
<-t.NotifyGo(func() error {
return d.start(ctx, devPodConfig, options, t)
})
if !t.Alive() {
return t.Err()
}
return nil
}
func (d *devPod) restart(ctx devspacecontext.Context, devPodConfig *latest.DevPod, options Options) {
for {
err := d.startWithRetry(ctx, devPodConfig, options)
if err != nil {
if ctx.IsDone() {
return
}
ctx.Log().Infof("Restart dev %s because of: %v", devPodConfig.Name, err)
select {
case <-ctx.Context().Done():
return
case <-time.After(time.Second * 10):
continue
}
}
return
}
}
func (d *devPod) start(ctx devspacecontext.Context, devPodConfig *latest.DevPod, opts Options, parent *tomb.Tomb) error {
// check first if we need to replace the pod
if !opts.DisablePodReplace && needPodReplace(devPodConfig) {
err := podreplace.NewPodReplacer().ReplacePod(ctx, devPodConfig)
if err != nil {
return errors.Wrap(err, "replace pod")
}
} else {
devPodCache, ok := ctx.Config().RemoteCache().GetDevPod(devPodConfig.Name)
if ok && devPodCache.Deployment != "" {
_, err := podreplace.NewPodReplacer().RevertReplacePod(ctx, &devPodCache, &deploy.PurgeOptions{ForcePurge: true})
if err != nil {
return errors.Wrap(err, "replace pod")
}
}
}
var imageSelector []string
if devPodConfig.ImageSelector != "" {
imageSelectorObject, err := runtimevar.NewRuntimeResolver(ctx.WorkingDir(), true).FillRuntimeVariablesAsImageSelector(ctx.Context(), devPodConfig.ImageSelector, ctx.Config(), ctx.Dependencies())
if err != nil {
return err
}
imageSelector = []string{imageSelectorObject.Image}
}
// wait for pod to be ready
ctx.Log().Infof("Waiting for pod to become ready...")
options := targetselector.NewEmptyOptions().
ApplyConfigParameter("", devPodConfig.LabelSelector, imageSelector, devPodConfig.Namespace, "").
WithWaitingStrategy(targetselector.NewUntilNewestRunningWaitingStrategy(time.Millisecond * 500)).
WithSkipInitContainers(true)
var err error
selectedPod, err := targetselector.NewTargetSelector(options).SelectSingleContainer(ctx.Context(), ctx.KubeClient(), ctx.Log())
if err != nil {
return errors.Wrap(err, "waiting for pod to become ready")
}
// check if the correct pod is matched
loader.EachDevContainer(devPodConfig, func(devContainer *latest.DevContainer) bool {
if devContainer.Container == "" {
return true
}
// check if the container exists in the pod
for _, container := range selectedPod.Pod.Spec.Containers {
if container.Name == devContainer.Container {
return true
}
}
for _, container := range selectedPod.Pod.Spec.InitContainers {
if container.Name == devContainer.Container {
return true
}
}
err = fmt.Errorf("selected pod '%s/%s' doesn't include container '%s', please make sure you don't have overlapping label selectors within the namespace and the pod you select contains container '%s'", selectedPod.Pod.Namespace, selectedPod.Pod.Name, devContainer.Container, devContainer.Container)
return false
})
if err != nil {
return errors.Wrap(err, "select pod")
}
ctx.Log().Infof("Selected pod %s", ansi.Color(selectedPod.Pod.Name, "yellow+b"))
// set selected pod
d.m.Lock()
d.selectedPod = selectedPod
d.m.Unlock()
// Run dev.open configs
if !opts.DisableOpen {
ctx := ctx.WithLogger(ctx.Log().WithPrefixColor("open ", "yellow+b"))
for _, openConfig := range devPodConfig.Open {
if openConfig.URL != "" {
url := openConfig.URL
ctx.Log().Infof("Opening '%s' as soon as application will be started", openConfig.URL)
parent.Go(func() error {
now := time.Now()
for time.Since(now) < openMaxWait {
select {
case <-ctx.Context().Done():
return nil
case <-time.After(time.Second):
err := tryOpen(ctx.Context(), url, ctx.Log())
if err == nil {
return nil
}
}
}
return nil
})
}
}
}
// start sync and port forwarding
err = d.startServices(ctx, devPodConfig, newTargetSelector(selectedPod.Pod.Name, selectedPod.Pod.Namespace, selectedPod.Container.Name, parent), opts, parent)
if err != nil {
return err
}
// start logs
terminalDevContainer := d.getTerminalDevContainer(devPodConfig)
if terminalDevContainer != nil {
return d.startTerminal(ctx, terminalDevContainer, opts, selectedPod, parent)
}
// start attach if defined
attachDevContainer := d.getAttachDevContainer(devPodConfig)
if attachDevContainer != nil {
return d.startAttach(ctx, attachDevContainer, opts, selectedPod, parent)
}
return d.startLogs(ctx, devPodConfig, selectedPod, parent)
}
func tryOpen(ctx context.Context, url string, log logpkg.Logger) error {
timeoutCtx, cancel := context.WithTimeout(ctx, time.Second)
defer cancel()
req, err := http.NewRequestWithContext(timeoutCtx, "GET", url, nil)
if err != nil {
return err
}
client := &http.Client{Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}}
resp, err := client.Do(req)
if err != nil {
return err
}
defer func() {
if resp != nil {
resp.Body.Close()
}
}()
if resp != nil && resp.StatusCode != http.StatusBadGateway && resp.StatusCode != http.StatusServiceUnavailable {
select {
case <-ctx.Done():
return nil
case <-time.After(time.Second):
}
_ = open.Start(url)
log.Donef("Successfully opened %s", url)
return nil
}
return fmt.Errorf("not reachable")
}
func (d *devPod) startLogs(ctx devspacecontext.Context, devPodConfig *latest.DevPod, selectedPod *selector.SelectedPodContainer, parent *tomb.Tomb) error {
ctx = ctx.WithLogger(ctx.Log().WithPrefixColor("logs ", "yellow+b"))
loader.EachDevContainer(devPodConfig, func(devContainer *latest.DevContainer) bool {
if devContainer.Logs == nil || (devContainer.Logs.Enabled != nil && !*devContainer.Logs.Enabled) {
return true
}
parent.Go(func() error {
return logs.StartLogs(ctx, devContainer, newTargetSelector(selectedPod.Pod.Name, selectedPod.Pod.Namespace, selectedPod.Container.Name, parent))
})
return true
})
return nil
}
func (d *devPod) getAttachDevContainer(devPodConfig *latest.DevPod) *latest.DevContainer {
// find dev container config
var devContainer *latest.DevContainer
loader.EachDevContainer(devPodConfig, func(d *latest.DevContainer) bool {
if d.Attach == nil || (d.Attach.Enabled != nil && !*d.Attach.Enabled) {
return true
}
devContainer = d
return false
})
return devContainer
}
func (d *devPod) getTerminalDevContainer(devPodConfig *latest.DevPod) *latest.DevContainer {
// find dev container config
var devContainer *latest.DevContainer
loader.EachDevContainer(devPodConfig, func(d *latest.DevContainer) bool {
if d.Terminal == nil || (d.Terminal.Enabled != nil && !*d.Terminal.Enabled) {
return true
}
devContainer = d
return false
})
return devContainer
}
func (d *devPod) startAttach(ctx devspacecontext.Context, devContainer *latest.DevContainer, opts Options, selectedPod *selector.SelectedPodContainer, parent *tomb.Tomb) error {
parent.Go(func() error {
id, err := logpkg.AcquireGlobalSilence()
if err != nil {
return err
}
defer logpkg.ReleaseGlobalSilence(id)
// make sure the global log is silent
ctx = ctx.WithLogger(ctx.Log().WithPrefixColor("attach ", "yellow+b"))
err = attach.StartAttach(
ctx,
devContainer,
newTargetSelector(selectedPod.Pod.Name, selectedPod.Pod.Namespace, selectedPod.Container.Name, parent),
DefaultTerminalStdout,
DefaultTerminalStderr,
DefaultTerminalStdin,
parent,
)
if err != nil {
return errors.Wrap(err, "error in attach")
}
// if context is done we just return
if ctx.IsDone() {
return nil
}
// kill ourselves here
if !opts.ContinueOnTerminalExit {
kill.StopDevSpace("")
} else {
parent.Kill(nil)
}
return nil
})
return nil
}
func (d *devPod) startTerminal(ctx devspacecontext.Context, devContainer *latest.DevContainer, opts Options, selectedPod *selector.SelectedPodContainer, parent *tomb.Tomb) error {
parent.Go(func() error {
id, err := logpkg.AcquireGlobalSilence()
if err != nil {
return err
}
defer logpkg.ReleaseGlobalSilence(id)
// make sure the global log is silent
ctx = ctx.WithLogger(ctx.Log().WithPrefixColor("term ", "yellow+b"))
err = terminal.StartTerminal(
ctx,
devContainer,
newTargetSelector(selectedPod.Pod.Name, selectedPod.Pod.Namespace, selectedPod.Container.Name, parent),
DefaultTerminalStdout,
DefaultTerminalStderr,
DefaultTerminalStdin,
parent,
)
if err != nil {
return errors.Wrap(err, "error in terminal forwarding")
}
// if context is done we just return
if ctx.IsDone() {
return nil
}
// kill ourselves here
if !opts.ContinueOnTerminalExit {
kill.StopDevSpace("")
} else {
parent.Kill(nil)
}
return nil
})
return nil
}
func (d *devPod) startServices(ctx devspacecontext.Context, devPod *latest.DevPod, selector targetselector.TargetSelector, opts Options, parent *tomb.Tomb) error {
pluginErr := hook.ExecuteHooks(ctx, map[string]interface{}{}, "devCommand:before:sync", "dev.beforeSync", "devCommand:before:portForwarding", "dev.beforePortForwarding")
if pluginErr != nil {
return pluginErr
}
// Start sync
syncDone := parent.NotifyGo(func() error {
if opts.DisableSync {
return nil
}
// add prefix
ctx := ctx.WithLogger(ctx.Log().WithPrefixColor("sync ", "yellow+b"))
err := sync.StartSync(ctx, devPod, selector, parent)
return err
})
// Start Port Forwarding
portForwardingDone := parent.NotifyGo(func() error {
if opts.DisablePortForwarding {
return nil
}
ctx := ctx.WithLogger(ctx.Log().WithPrefixColor("ports ", "yellow+b"))
return portforwarding.StartPortForwarding(ctx, devPod, selector, parent)
})
// wait for both to finish
<-syncDone
<-portForwardingDone
// Start SSH
sshDone := parent.NotifyGo(func() error {
// add ssh prefix
ctx := ctx.WithLogger(ctx.Log().WithPrefixColor("ssh ", "yellow+b"))
return ssh.StartSSH(ctx, devPod, selector, parent)
})
// Start Reverse Commands
reverseCommandsDone := parent.NotifyGo(func() error {
// add proxy prefix
ctx := ctx.WithLogger(ctx.Log().WithPrefixColor("proxy ", "yellow+b"))
return proxycommands.StartProxyCommands(ctx, devPod, selector, parent)
})
// wait for ssh and reverse commands
<-sshDone
<-reverseCommandsDone
// execute hooks
pluginErr = hook.ExecuteHooks(ctx, map[string]interface{}{}, "devCommand:after:sync", "dev.afterSync", "devCommand:after:portForwarding", "dev.afterPortForwarding")
if pluginErr != nil {
return pluginErr
}
return nil
}
func needPodReplace(devPodConfig *latest.DevPod) bool {
if len(devPodConfig.Patches) > 0 {
return true
}
needReplace := false
loader.EachDevContainer(devPodConfig, func(devContainer *latest.DevContainer) bool {
if needPodReplaceContainer(devContainer) {
needReplace = true
return false
}
return true
})
return needReplace
}
func needPodReplaceContainer(devContainer *latest.DevContainer) bool {
if devContainer.DevImage != "" {
return true
}
if len(devContainer.PersistPaths) > 0 {
return true
}
if devContainer.RestartHelper != nil && devContainer.RestartHelper.Inject != nil && *devContainer.RestartHelper.Inject {
return true
}
if devContainer.Terminal != nil && !devContainer.Terminal.DisableReplace && (devContainer.Terminal.Enabled == nil || *devContainer.Terminal.Enabled) {
return true
}
if devContainer.Attach != nil && !devContainer.Attach.DisableReplace && (devContainer.Attach.Enabled == nil || *devContainer.Attach.Enabled) {
return true
}
if len(devContainer.Env) > 0 {
return true
}
if len(devContainer.Command) > 0 {
return true
}
if devContainer.Args != nil {
return true
}
if devContainer.RestartHelper == nil || devContainer.RestartHelper.Inject == nil || *devContainer.RestartHelper.Inject {
for _, s := range devContainer.Sync {
if s.OnUpload != nil && s.OnUpload.RestartContainer {
return true
}
}
}
if devContainer.WorkingDir != "" {
return true
}
if devContainer.Resources != nil {
return true
}
return false
}
|
package store
import (
log "git.ronaksoftware.com/blip/server/internal/logger"
"git.ronaksoftware.com/blip/server/internal/tools"
"git.ronaksoftware.com/blip/server/pkg/config"
"github.com/mailru/easyjson/gen"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
"go.uber.org/zap"
"strings"
"sync"
"time"
)
/*
Creation Time: 2020 - Feb - 02
Created by: (ehsan)
Maintainers:
1. Ehsan N. Moosa (E2)
Auditor: Ehsan N. Moosa (E2)
Copyright Ronak Software Group 2018
*/
//go:generate rm -f *_easyjson.go
//go:generate easyjson store.go messages.go
var (
_ = gen.Generator{}
storeCol *mongo.Collection
stores map[int64]*Store
storeConns map[int64]*mongo.Client
storesMtx sync.RWMutex
)
const (
BucketSongs = "songs"
BucketCovers = "covers"
)
func InitMongo(c *mongo.Client) {
storeCol = c.Database(config.DbMain).Collection(config.ColStore)
}
func Init() {
storesMtx.Lock()
defer storesMtx.Unlock()
stores = make(map[int64]*Store)
storeConns = make(map[int64]*mongo.Client)
log.Info("Loading Stores ...")
cur, err := storeCol.Find(nil, bson.D{})
if err != nil {
log.Warn("Error On Initializing Stores", zap.Error(err))
}
for cur.Next(nil) {
storeX := &Store{}
err = cur.Decode(storeX)
if err != nil {
continue
}
if err := createStoreConnection(storeX); err != nil {
log.Warn("Err On Create Store Connection",
zap.Int64("StoreID", storeX.ID),
zap.String("Dsn", storeX.Dsn),
zap.Error(err),
)
continue
}
}
log.Info("Stores Loaded.")
go watchForStores()
}
func createStoreConnection(storeX *Store) error {
return tools.Try(5, time.Second, func() error {
mongoClient, err := mongo.Connect(nil, options.Client().ApplyURI(storeX.Dsn).SetDirect(true))
if err != nil {
return err
}
err = mongoClient.Ping(nil, nil)
if err != nil {
return err
}
storeConns[storeX.ID] = mongoClient
stores[storeX.ID] = storeX
return nil
})
}
func watchForStores() {
var resumeToken bson.Raw
for {
opts := options.ChangeStream().SetFullDocument(options.UpdateLookup)
if resumeToken != nil {
opts.SetStartAfter(resumeToken)
}
stream, err := storeCol.Watch(nil, mongo.Pipeline{}, opts)
if err != nil {
log.Warn("Error On Watch Stream for Stores", zap.Error(err))
time.Sleep(time.Second)
continue
}
for stream.Next(nil) {
storeX := &Store{}
resumeToken = stream.ResumeToken()
operationType := strings.Trim(stream.Current.Lookup("operationType").String(), "\"")
switch operationType {
case "insert", "update":
err = stream.Current.Lookup("fullDocument").UnmarshalWithRegistry(bson.DefaultRegistry, storeX)
if err != nil {
log.Warn("Error On Decoding Store", zap.Error(err))
continue
}
storesMtx.Lock()
if err := createStoreConnection(storeX); err != nil {
log.Warn("Err On Create Store Connection",
zap.Int64("StoreID", storeX.ID),
zap.String("Dsn", storeX.Dsn),
zap.Error(err),
)
continue
}
storesMtx.Unlock()
log.Info("Store Added/Updated", zap.Int64("StoreID", storeX.ID))
}
}
_ = stream.Close(nil)
}
}
|
package main
import (
"fmt"
"strconv"
"time"
)
func sample11(ch chan string) {
for i := 0; i < 19; i++ {
ch <- "select test" + strconv.Itoa(i)
time.Sleep(time.Second * 1)
}
}
func sample112(ch chan int) {
for i := 0; i < 19; i++ {
ch <- i
time.Sleep(time.Second * 2)
}
}
func main() {
ch1 := make(chan string)
ch2 := make(chan int)
for i := 0; i < 10; i++ {
go sample11(ch1)
go sample112(ch2)
}
for {
select {
case str, ch1Check := <-ch1:
if !ch1Check {
fmt.Println("ch1 failed")
}
fmt.Println(str)
case p, ch2Check := <-ch2:
if !ch2Check {
fmt.Println("ch2 failed")
}
fmt.Println(p)
}
}
//time.Sleep(60*time.Second)
}
|
package elastic
import (
"github.com/olivere/elastic"
"time"
"context"
)
type Eser interface {
Init() error
Put(area string,message interface{}) error
Index() error
}
type es struct {
client *elastic.Client
addr string
ctx context.Context
index string
}
//addr http://192.168.2.10:9201
func New(addr,index string) Eser{
return &es{
addr:addr,
index:index,
ctx:context.Background(),
}
}
func (e *es) Init() error {
client, err := elastic.NewClient(
elastic.SetURL(e.addr),
elastic.SetSniff(false), //TODO 代表什么意思
elastic.SetHealthcheckInterval(10*time.Second))
//elastic.SetErrorLog(log.New(os.Stderr, "ELASTIC ", log.LstdFlags)),
//elastic.SetInfoLog(log.New(os.Stdout, "", log.LstdFlags)))
if err != nil {
return err
// Handle error
}
e.client = client
return nil
}
/*
// Add a document to the index @ liuliqiang.info
tweet := Tweet{User: "olivere", Message: "Take Five"}
_, err = client.Index().
Index("twitter").
Type("tweet").
Id("1").
BodyJson(tweet).
Do()
if err != nil {
// Handle error
panic(err)
}
*/
func (e *es) Put(area string,message interface{}) error {
_, err := e.client.Index().
Index(area).
Type("employee").BodyJson(message).
Do(e.ctx)
return err
}
// Index 索引不存在就创建
func (e *es) Index() error {
exists, err := e.client.IndexExists(e.index).Do(e.ctx)
if err != nil {
return err
}
if !exists {
// Create a new index.
createIndex, err := e.client.CreateIndex(e.index).Do(e.ctx)
if err != nil {
return err
// Handle error
}
if !createIndex.Acknowledged {
// Not acknowledged
}
}
return nil
} |
package apis
import (
"fmt"
"trueabc.top/zinx/ziface"
"trueabc.top/zinx/zinx_app_demo/mmo_game/game_server/core"
"trueabc.top/zinx/zinx_app_demo/mmo_game/game_server/pb"
"trueabc.top/zinx/znet"
)
type MoveApi struct {
znet.BaseRouter
}
func (a *MoveApi) Handler(request ziface.IRequest) {
// 解析客户端的协议
proto_msg := &pb.Position{}
err := proto_msg.Unmarshal(request.GetData())
if err != nil {
fmt.Println("Move: Position Unmarshal error ", err)
return
}
// 得到当前发送位置的玩家信息
pid, err := request.GetConnection().GetProperty("pid")
if err != nil {
fmt.Println("GetProperty pid error: ", err)
return
}
fmt.Printf("User pid = %d, move(%f, %f, %f, %f)\n", pid, proto_msg.X, proto_msg.Y, proto_msg.Z, proto_msg.V)
// 给其他玩家进行当前玩家的位置信息广播
player := core.WManObj.GetPlayerByPid(pid.(int32))
player.UpdatePos(proto_msg.X, proto_msg.Y, proto_msg.Z, proto_msg.V)
}
|
package main
import "fmt"
var java, python, c bool
func main() {
var i int
fmt.Println(java, python, c, i)
}
|
package ravendb
type tcpNegotiateParameters struct {
operation operationTypes
version int
database string
sourceNodeTag string
destinationNodeTag string
destinationUrl string
readResponseAndGetVersionCallback func(string) int
}
|
func threeSum(nums []int) [][]int {
if len(nums) == 0 {
return nil
}
ans := make([][]int, 0, 1)
sort.Ints(nums)
for i := 0; i < len(nums); i++ {
//dedupulicate num i
if i != 0 && nums[i] == nums[i - 1] {
continue
}
l, r := i + 1, len(nums) - 1
target := 0 - nums[i]
for l < r {
if nums[l] + nums[r] == target {
ans = append(ans, []int {nums[i], nums[l], nums[r]})
//deduplicate num l & r
for l < r && nums[l] == nums[l + 1] {
l++
}
for l < r && nums[r] == nums[r - 1] {
r--
}
l++
r--
} else if nums[l] + nums[r] < target {
l++
} else {
r--
}
}
}
return ans
} |
package main
// Leetcode 1055. (medium)
func shortestWay(source string, target string) int {
m, n := len(source), len(target)
dp := make([][]int, m+1)
for i := range dp {
dp[i] = make([]int, 26)
}
for i := 0; i < 26; i++ {
dp[m][i] = m
}
for i := m - 1; i >= 0; i-- {
for j := 0; j < 26; j++ {
if source[i] == byte('a'+j) {
dp[i][j] = i
} else {
dp[i][j] = dp[i+1][j]
}
}
}
res := 1
tmp := 0
for i := 0; i < n; i++ {
c := target[i]
if dp[0][c-'a'] == m {
return -1
}
tmp = dp[tmp][c-'a']
if tmp == m {
res++
tmp = dp[0][c-'a']
}
tmp++
}
return res
}
|
package main
import (
"encoding/csv"
"fmt"
"io"
"io/ioutil"
"log"
"strings"
)
var alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
var names []string
func main() {
names = readNamesFile("names.txt")
sortNames(0, len(names)-1)
sum := 0
for i := range names {
sum += score(i)
}
fmt.Println("sum:", sum)
}
func score(i int) int {
sum := 0
for n := range names[i] {
sum += indexOf(names[i][n]) + 1
}
return sum * (i + 1)
}
//quicksort
func sortNames(lo, hi int) {
if lo < hi {
p := partitionNames(lo, hi)
sortNames(lo, p-1)
sortNames(p+1, hi)
}
}
func partitionNames(lo, hi int) int {
pivot, temp := names[hi], ""
i := lo
for j := lo; j < hi; j++ {
if comesFirst(names[j], pivot) {
temp = names[i]
names[i] = names[j]
names[j] = temp
i++
}
}
temp = names[i]
names[i] = names[hi]
names[hi] = temp
return i
}
func indexOf(char byte) int {
for a := range alphabet {
if alphabet[a] == char {
return a
}
}
return -1
}
func comesFirst(name1, name2 string) bool {
for n := range name1 {
if n >= len(name2) {
return false
}
name1Index := indexOf(name1[n])
name2Index := indexOf(name2[n])
if name1Index < name2Index {
return true
} else if name1Index > name2Index {
return false
}
}
return true
}
func readNamesFile(filename string) []string {
var names []string
in, err := (ioutil.ReadFile(filename))
if err != nil {
fmt.Println("there was an error reading the file")
return names
}
r := csv.NewReader(strings.NewReader(string(in)))
names, err = r.Read()
if err == io.EOF {
return names
}
if err != nil {
log.Fatal(err)
return names
}
return names
}
|
package rpc
import (
"bytes"
"github.com/davecheney/nfs/xdr"
"io"
)
type transport interface {
send([]byte) error
recv() ([]byte, error)
io.Closer
}
type mismatch_info struct {
low uint32
high uint32
}
type Header struct {
Rpcvers uint32
Prog uint32
Vers uint32
Proc uint32
Cred Auth
Verf Auth
}
type message struct {
Xid uint32
Msgtype uint32
Body interface{}
}
type Auth struct {
Flavor uint32
Body []byte
}
var AUTH_NULL = Auth{
0,
[]byte{},
}
type AUTH_UNIX struct {
Stamp uint32
Machinename string
Uid uint32
Gid uint32
Gids uint32
}
// Auth converts a into an Auth opaque struct
func (a AUTH_UNIX) Auth() Auth {
w := new(bytes.Buffer)
xdr.Write(w, a)
return Auth{
1,
w.Bytes(),
}
}
|
package main
import "fmt"
// Slices in go or maybe even arrays(Check this), will double the memory allocated if full and you want to `append` to it
// NOTE: ^^ in ex4a4b4c.go, the groceryList append function adds to the array BUT the fucntion is pure so I don't know if the statement is true
func arraysAndSlices() {
var arr [5]int
// For creating a slice of an array, the memory needs to be allocated beforehand, this is what the
// `make` method does
var sliceOfArr []int = make([]int, 5 /*,10 --> the upper limit of what the slice can hold*/)
arr[0] = 1
sliceOfArr[0] = 1 // This would keeled over if not initialised with `make` is
fmt.Println(len(sliceOfArr)) // will return 5
fmt.Println(cap(sliceOfArr)) // will return 10 which is the max capacity
}
func arraysAndSlicesOne() {
fruitArray := [5]string{"apple", "banana", "Orange", "gooseberry", "peach"}
fruitArraySlice := fruitArray[1:3]
fmt.Println(len(sliceOfArr)) // will return 2
fmt.Println(cap(sliceOfArr)) // will return 4 since we are starting at index 1 and the original array has 4 more items we could use
}
// func main() {
// var bag [5]float64 = [5]float64{6, 1.5, 4.5, 7.0, 8}
// // var bag := [5]float64{6, 1.5, 4.5, 7.0, 8}
// // Go can automatically infer how many items (more like how much memory to reserve) you want in the array based off of what is on RHS
// // var bag := [...]float64{6, 1.5, 4.5, 7.0, 8}
// for _, bagItem := range bag {
// fmt.Println(bagItem)
// }
// }
|
package exporter
import (
"context"
"crypto/md5"
"fmt"
"log"
"os"
"os/exec"
"regexp"
"sort"
"strconv"
"strings"
"github.com/databrickslabs/terraform-provider-databricks/common"
"github.com/databrickslabs/terraform-provider-databricks/compute"
"github.com/databrickslabs/terraform-provider-databricks/identity"
"github.com/databrickslabs/terraform-provider-databricks/provider"
"github.com/hashicorp/hcl/v2"
"github.com/hashicorp/hcl/v2/hclsyntax"
"github.com/hashicorp/hcl/v2/hclwrite"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/terraform"
"github.com/zclconf/go-cty/cty"
)
/** High level overview of importer design:
+----------+ Add +--------------------+
| resource +-------> stateApproximation |
+--^-------+ +----|-------------|-+
| | |
+------------------------+ | Emit | |
| "normal provider flow" | +------^-----+ +-----V-----+ +---V---+
+------------^-----------+ | importable +--------> reference | | scope |
| +------^-----+ +--------|--+ +---V---+
+--------------+--------------+ | | |
|terraform-provider-databricks| | | |
+--------------+--------------+ | | |
| | List +--V----------V---+
+----------v---------+ +----^------------+ | |
| | | | | Generated |
| importer command +--------> importContext | | HCL Files |
| | | | | |
+--------------------+ +-----------------+ +-----------------+
*/
type importContext struct {
Module string
Context context.Context
Client *common.DatabricksClient
State stateApproximation
Importables map[string]importable
Resources map[string]*schema.Resource
Scope importedResources
Files map[string]*hclwrite.File
Directory string
importing map[string]bool
nameFixes []regexFix
hclFixes []regexFix
allUsers []identity.ScimUser
allGroups []identity.ScimGroup
mountMap map[string]mount
variables map[string]string
debug bool
mounts bool
services string
listing string
match string
lastActiveDays int64
generateDeclaration bool
meAdmin bool
prefix string
}
type mount struct {
URL string
InstanceProfile string
ClusterID string
}
func newImportContext(c *common.DatabricksClient) *importContext {
p := provider.DatabricksProvider()
p.TerraformVersion = "exporter"
p.SetMeta(c)
c.Provider = p
ctx := context.WithValue(context.Background(), common.Provider, p)
ctx = context.WithValue(ctx, common.ResourceName, "exporter")
c.WithCommandExecutor(func(
ctx context.Context,
c *common.DatabricksClient) common.CommandExecutor {
return compute.NewCommandsAPI(ctx, c)
})
return &importContext{
Client: c,
Context: ctx,
State: stateApproximation{},
Importables: resourcesMap,
Resources: p.ResourcesMap,
Files: map[string]*hclwrite.File{},
Scope: []*resource{},
importing: map[string]bool{},
nameFixes: []regexFix{
{regexp.MustCompile(`[0-9a-f]{8}[_-][0-9a-f]{4}[_-][0-9a-f]{4}` +
`[_-][0-9a-f]{4}[_-][0-9a-f]{12}[_-]`), ""},
{regexp.MustCompile(`[_-][0-9]+[\._-][0-9]+[\._-].*\.([a-z0-9]{1,4})`), "_$1"},
{regexp.MustCompile(`@.*$`), ""},
{regexp.MustCompile(`[-\s\.\|]`), "_"},
{regexp.MustCompile(`\W+`), ""},
{regexp.MustCompile(`[_]{2,}`), "_"},
},
hclFixes: []regexFix{ // Be careful with that! it may break working code
},
allUsers: []identity.ScimUser{},
variables: map[string]string{},
}
}
func (ic *importContext) Run() error {
if len(ic.services) == 0 {
return fmt.Errorf("no services to import")
}
log.Printf("[INFO] Importing %s module into %s directory Databricks resources of %s services",
ic.Module, ic.Directory, ic.services)
info, err := os.Stat(ic.Directory)
if os.IsNotExist(err) {
err = os.MkdirAll(ic.Directory, 0755)
if err != nil {
return fmt.Errorf("can't create directory %s", ic.Directory)
}
} else if !info.IsDir() {
return fmt.Errorf("the path %s is not a directory", ic.Directory)
}
usersAPI := identity.NewUsersAPI(ic.Context, ic.Client)
me, err := usersAPI.Me()
if err != nil {
return err
}
for _, g := range me.Groups {
if g.Display == "admins" {
ic.meAdmin = true
break
}
}
for resourceName, ir := range ic.Importables {
if ir.List == nil {
continue
}
if !strings.Contains(ic.listing, ir.Service) {
log.Printf("[DEBUG] %s (%s service) is not part of listing",
resourceName, ir.Service)
continue
}
if err := ir.List(ic); err != nil {
return err
}
}
if len(ic.Scope) == 0 {
return fmt.Errorf("no resources to import")
}
sh, err := os.OpenFile(fmt.Sprintf("%s/import.sh", ic.Directory), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0755)
if err != nil {
return err
}
defer sh.Close()
// nolint
sh.WriteString("#!/bin/sh\n\n")
if ic.generateDeclaration {
dcfile, err := os.Create(fmt.Sprintf("%s/databricks.tf", ic.Directory))
if err != nil {
return err
}
// nolint
dcfile.WriteString(
`terraform {
required_providers {
databricks = {
source = "databrickslabs/databricks"
version = "` + common.Version() + `"
}
}
}
provider "databricks" {
}
`)
dcfile.Close()
}
sort.Sort(ic.Scope)
scopeSize := len(ic.Scope)
log.Printf("[INFO] Generating configuration for %d resources", scopeSize)
for i, r := range ic.Scope {
ir := ic.Importables[r.Resource]
f, ok := ic.Files[ir.Service]
if !ok {
f = hclwrite.NewEmptyFile()
ic.Files[ir.Service] = f
}
if ir.Ignore != nil && ir.Ignore(ic, r) {
continue
}
body := f.Body()
if ir.Body != nil {
err := ir.Body(ic, body, r)
if err != nil {
log.Printf("[ERROR] %s", err.Error())
}
} else {
resourceBlock := body.AppendNewBlock("resource", []string{r.Resource, r.Name})
err := ic.dataToHcl(ir, []string{}, ic.Resources[r.Resource],
r.Data, resourceBlock.Body())
if err != nil {
log.Printf("[ERROR] %s", err.Error())
}
}
if i%50 == 0 {
log.Printf("[INFO] Generated %d of %d resources", i, scopeSize)
}
if r.Mode != "data" {
// nolint
sh.WriteString(r.ImportCommand(ic) + "\n")
}
}
for service, f := range ic.Files {
formatted := hclwrite.Format(f.Bytes())
// fix some formatting in a hacky way instead of writing 100 lines
// of HCL AST writer code
formatted = []byte(ic.regexFix(string(formatted), ic.hclFixes))
log.Printf("[DEBUG] %s", formatted)
generatedFile := fmt.Sprintf("%s/%s.tf", ic.Directory, service)
if tf, err := os.Create(generatedFile); err == nil {
defer tf.Close()
if _, err = tf.Write(formatted); err != nil {
return err
}
}
log.Printf("[INFO] Created %s", generatedFile)
}
if len(ic.variables) > 0 {
vf, err := os.Create(fmt.Sprintf("%s/vars.tf", ic.Directory))
if err != nil {
return err
}
defer vf.Close()
f := hclwrite.NewEmptyFile()
body := f.Body()
for k, v := range ic.variables {
b := body.AppendNewBlock("variable", []string{k}).Body()
b.SetAttributeValue("description", cty.StringVal(v))
}
// nolint
vf.Write(f.Bytes())
log.Printf("[INFO] Written %d variables", len(ic.variables))
}
cmd := exec.CommandContext(context.Background(), "terraform", "fmt")
cmd.Dir = ic.Directory
err = cmd.Run()
if err != nil {
return err
}
log.Printf("[INFO] Done. Please edit the files and roll out new environment.")
return nil
}
func (ic *importContext) MatchesName(n string) bool {
if ic.match == "" {
return true
}
return strings.Contains(strings.ToLower(n), strings.ToLower(ic.match))
}
func (ic *importContext) Find(r *resource, pick string) hcl.Traversal {
for _, sr := range ic.State.Resources {
if sr.Type != r.Resource {
continue
}
for _, i := range sr.Instances {
v := i.Attributes[r.Attribute]
if v == nil {
log.Printf("[WARN] Can't find instance attribute '%v' in resource: '%v' with name '%v', ID: '%v'",
r.Attribute, r.Resource, r.Name, r.ID)
continue
}
if v.(string) == r.Value {
if sr.Mode == "data" {
return hcl.Traversal{
hcl.TraverseRoot{Name: "data"},
hcl.TraverseAttr{Name: sr.Type},
hcl.TraverseAttr{Name: sr.Name},
hcl.TraverseAttr{Name: pick},
}
}
return hcl.Traversal{
hcl.TraverseRoot{Name: sr.Type},
hcl.TraverseAttr{Name: sr.Name},
hcl.TraverseAttr{Name: pick},
}
}
}
}
return nil
}
func (ic *importContext) Has(r *resource) bool {
if _, visiting := ic.importing[r.String()]; visiting {
return true
}
k, v := r.MatchPair()
for _, sr := range ic.State.Resources {
if sr.Type != r.Resource {
continue
}
for _, i := range sr.Instances {
if i.Attributes[k].(string) == v {
return true
}
}
}
return false
}
func (ic *importContext) Add(r *resource) {
if ic.Has(r) {
return
}
state := r.Data.State()
if state == nil {
log.Printf("[ERROR] state is nil for %s", r)
return
}
inst := instanceApproximation{
Attributes: map[string]interface{}{},
}
for k, v := range state.Attributes {
inst.Attributes[k] = v
}
if r.Mode == "" {
r.Mode = "managed"
}
inst.Attributes["id"] = r.ID
ic.State.Resources = append(ic.State.Resources, resourceApproximation{
Mode: r.Mode,
Module: ic.Module,
Type: r.Resource,
Name: r.Name,
Instances: []instanceApproximation{inst},
})
// in single-threaded scenario scope is toposorted
ic.Scope = append(ic.Scope, r)
}
func (ic *importContext) regexFix(s string, fixes []regexFix) string {
for _, x := range fixes {
s = x.Regex.ReplaceAllString(s, x.Replacement)
}
return s
}
func (ic *importContext) ResourceName(r *resource) string {
name := r.Name
if name == "" && ic.Importables[r.Resource].Name != nil {
name = ic.Importables[r.Resource].Name(r.Data)
}
if name == "" {
name = r.ID
}
name = ic.prefix + name
name = strings.ToLower(name)
name = ic.regexFix(name, ic.nameFixes)
// this is either numeric id or all-non-ascii
if regexp.MustCompile(`^\d`).MatchString(name) || name == "" {
if name == "" {
name = r.ID
}
name = fmt.Sprintf("r%x", md5.Sum([]byte(name)))[0:12]
}
return name
}
func (ic *importContext) Emit(r *resource) {
// TODO: change into channels, if stack trace depth issues would surface
_, v := r.MatchPair()
if v == "" {
log.Printf("[DEBUG] %s has got empty identifier", r)
return
}
if ic.Has(r) {
log.Printf("[DEBUG] %s already imported", r)
return
}
ic.importing[r.String()] = true
pr, ok := ic.Resources[r.Resource]
if !ok {
log.Printf("[ERROR] %s is not available in provider", r)
return
}
ir, ok := ic.Importables[r.Resource]
if !ok {
log.Printf("[ERROR] %s is not available for import", r)
return
}
if !strings.Contains(ic.services, ir.Service) {
log.Printf("[DEBUG] %s (%s service) is not part of the import",
r.Resource, ir.Service)
return
}
if r.ID == "" {
if ir.Search == nil {
log.Printf("[ERROR] Searching %s is not available", r)
return
}
if err := ir.Search(ic, r); err != nil {
log.Printf("[ERROR] Cannot search for a resource %s: %v", err, r)
return
}
if r.ID == "" {
log.Printf("[INFO] Cannot find %s", r)
return
}
}
if r.Data == nil {
// empty data with resource schema
r.Data = pr.Data(&terraform.InstanceState{
Attributes: map[string]string{},
ID: r.ID,
})
r.Data.MarkNewResource()
resource := strings.ReplaceAll(r.Resource, "databricks_", "")
ctx := context.WithValue(ic.Context, common.ResourceName, resource)
if dia := pr.ReadContext(ctx, r.Data, ic.Client); dia != nil {
log.Printf("[ERROR] Error reading %s#%s: %v", r.Resource, r.ID, dia)
return
}
if r.Data.Id() == "" {
r.Data.SetId(r.ID)
}
}
r.Name = ic.ResourceName(r)
if ir.Import != nil {
if err := ir.Import(ic, r); err != nil {
log.Printf("[ERROR] Failed custom import of %s: %s", r, err)
return
}
}
ic.Add(r)
}
// TODO: move to IC
var dependsRe = regexp.MustCompile(`(\.[\d]+)`)
func (ic *importContext) reference(i importable, path []string, value string) hclwrite.Tokens {
match := dependsRe.ReplaceAllString(strings.Join(path, "."), "")
for _, d := range i.Depends {
if d.Path != match {
continue
}
attr := "id"
if d.Match != "" {
attr = d.Match
}
traversal := ic.Find(&resource{
Resource: d.Resource,
Attribute: attr,
Value: value,
}, attr)
if traversal == nil {
break
}
return hclwrite.TokensForTraversal(traversal)
}
return hclwrite.TokensForValue(cty.StringVal(value))
}
func (ic *importContext) variable(name, desc string) hclwrite.Tokens {
ic.variables[name] = desc
return hclwrite.TokensForTraversal(hcl.Traversal{
hcl.TraverseRoot{Name: "var"},
hcl.TraverseAttr{Name: name},
})
}
type fieldTuple struct {
Field string
Schema *schema.Schema
}
func (ic *importContext) dataToHcl(i importable, path []string,
pr *schema.Resource, d *schema.ResourceData, body *hclwrite.Body) error {
ss := []fieldTuple{}
for a, as := range pr.Schema {
ss = append(ss, fieldTuple{a, as})
}
sort.Slice(ss, func(i, j int) bool {
// it just happens that reverse field order
// makes the most beautiful configs
return ss[i].Field > ss[j].Field
})
for _, tuple := range ss {
a, as := tuple.Field, tuple.Schema
if as.Computed {
continue
}
raw, ok := d.GetOk(strings.Join(append(path, a), "."))
if !ok {
continue
}
switch as.Type {
case schema.TypeString:
body.SetAttributeRaw(a, ic.reference(i, append(path, a), raw.(string)))
case schema.TypeBool:
body.SetAttributeValue(a, cty.BoolVal(raw.(bool)))
case schema.TypeInt:
switch iv := raw.(type) {
case int:
body.SetAttributeValue(a, cty.NumberIntVal(int64(iv)))
case int32:
body.SetAttributeValue(a, cty.NumberIntVal(int64(iv)))
case int64:
body.SetAttributeValue(a, cty.NumberIntVal(iv))
}
case schema.TypeFloat:
body.SetAttributeValue(a, cty.NumberFloatVal(raw.(float64)))
case schema.TypeMap:
ov := map[string]cty.Value{}
for key, iv := range raw.(map[string]interface{}) {
v := cty.StringVal(fmt.Sprintf("%v", iv))
ov[key] = v
}
body.SetAttributeValue(a, cty.ObjectVal(ov))
case schema.TypeSet:
if rawSet, ok := raw.(*schema.Set); ok {
rawList := rawSet.List()
err := ic.readListFromData(i, append(path, a), d, rawList, body, as, func(i int) string {
return strconv.Itoa(rawSet.F(rawList[i]))
})
if err != nil {
return err
}
}
case schema.TypeList:
if rawList, ok := raw.([]interface{}); ok {
err := ic.readListFromData(i, append(path, a), d, rawList, body, as, strconv.Itoa)
if err != nil {
return err
}
}
default:
return fmt.Errorf("unsupported schema type: %v", path)
}
}
return nil
}
func (ic *importContext) readListFromData(i importable, path []string, d *schema.ResourceData,
rawList []interface{}, body *hclwrite.Body, as *schema.Schema,
offsetConverter func(i int) string) error {
if len(rawList) == 0 {
return nil
}
name := path[len(path)-1]
switch elem := as.Elem.(type) {
case *schema.Resource:
if as.MaxItems == 1 {
nestedPath := append(path, offsetConverter(0))
confBlock := body.AppendNewBlock(name, []string{})
return ic.dataToHcl(i, nestedPath, elem, d, confBlock.Body())
}
for offset := range rawList {
confBlock := body.AppendNewBlock(name, []string{})
nestedPath := append(path, offsetConverter(offset))
err := ic.dataToHcl(i, nestedPath, elem, d, confBlock.Body())
if err != nil {
return err
}
}
case *schema.Schema:
toks := hclwrite.Tokens{}
toks = append(toks, &hclwrite.Token{
Type: hclsyntax.TokenOBrack,
Bytes: []byte{'['},
})
for _, raw := range rawList {
if len(toks) != 1 {
toks = append(toks, &hclwrite.Token{
Type: hclsyntax.TokenComma,
Bytes: []byte{','},
})
}
switch x := raw.(type) {
case string:
toks = append(toks, ic.reference(i, path, x)...)
case int:
// probably we don't even use integer lists?...
toks = append(toks, hclwrite.TokensForValue(
cty.NumberIntVal(int64(x)))...)
default:
return fmt.Errorf("unsupported primitive list: %#v", path)
}
}
toks = append(toks, &hclwrite.Token{
Type: hclsyntax.TokenCBrack,
Bytes: []byte{']'},
})
body.SetAttributeRaw(name, toks)
}
return nil
}
|
package main
import (
"fmt"
"strconv"
)
func main() {
var n int
fmt.Scanf("%d", &n)
palindrome(n)
}
// Task 4
// Input 1234437
func palindrome(n int) {
s := strconv.Itoa(n)
found := false
for i := 1; i < len(s); i++ {
if s[i-1] == s[i] {
found = true
start, end := i-1, i
for j, k := start, end; j > 0 && k < len(s); j, k = j-1, k+1 {
if s[j] == s[k] {
start, end = j, k
continue
}
res, _ := strconv.Atoi(s[start : end+1])
fmt.Println(res)
break
}
}
}
if !found {
fmt.Println(0)
}
}
|
package paymentMethods
import (
"outlet/v1/bussiness/paymentMethods"
"gorm.io/gorm"
)
type PaymentMethods struct {
gorm.Model
ID uint `gorm:"primaryKey"`
Name string
}
func toDomain(record PaymentMethods) paymentMethods.Domain {
return paymentMethods.Domain{
ID: int(record.ID),
Name: record.Name,
CreatedAt: record.CreatedAt,
UpdatedAt: record.UpdatedAt,
}
}
func fromDomain(domain paymentMethods.Domain) PaymentMethods {
return PaymentMethods{
ID: uint(domain.ID),
Name: domain.Name,
}
}
func toDomainArray(record []PaymentMethods) []paymentMethods.Domain {
var res []paymentMethods.Domain
for _, v := range record {
res = append(res, toDomain(v))
}
return res
}
|
package entity
//Group Отдел
type Group struct {
Meta *Meta `json:"meta"` // Метаданные
Id string `json:"id"` // Id отдела
AccountId string `json:"accountId"` // Id учетной записи
Name string `json:"name"` // Наимнование отдела
Index int `json:"index"` // Порядковый номер в списке отделов
}
|
package main
import (
"fmt"
"github.com/xeb/backq/modules/certgen"
"github.com/xeb/backq/modules/public"
"gopkg.in/alecthomas/kingpin.v2"
"time"
)
var (
reqport = kingpin.Flag("request_port", "The 0MQ port for publishing requests to bqprivate, e.g. a value of 20000 means binding to 'tcp://*:20000'").Required().Int()
repport = kingpin.Flag("reply_port", "The 0MQ port for listening for replies from bqprivate").Required().Int()
httpport = kingpin.Flag("http_port", "The HTTP Port to listen on").Required().Int()
certpath = kingpin.Flag("certpath", "If set, this will Enable SSL and set the Cert Path").String()
keypath = kingpin.Flag("keypath", "If set, this will Enable SSL and set the Key Path").String()
genssl = kingpin.Flag("gen_ssl", "If set, this will automatically create cert.pem and key.pem and enable SSL").Bool()
)
func main() {
kingpin.Parse()
fmt.Printf("[PUBLIC] Using default port %d\n", *httpport)
reqaddy := fmt.Sprintf("tcp://*:%d", *reqport)
repaddy := fmt.Sprintf("tcp://*:%d", *repport)
fmt.Printf("[PUBLIC] Binding request-0mq channel to '%s'\n", reqaddy)
fmt.Printf("[PUBLIC] Binding reply-0mq channel to '%s'\n", repaddy)
fmt.Printf("[PUBLIC] Binding HTTP receiver to ':%d'\n", *httpport)
if *genssl {
certgen.Generate("localhost", "Jan 1 15:04:05 2011", 365*24*time.Hour, false, 2048, "P256")
*certpath = "cert.pem"
*keypath = "key.pem"
}
public.BindBackQ(reqaddy, repaddy)
public.BindHTTP(*httpport, *certpath, *keypath)
}
|
package unifi
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"strings"
)
// SiteActiveClient defines an active client device
type SiteActiveClient struct {
ID string `json:"_id"`
IsGuestByUAP bool `json:"_is_guest_by_uap"`
LastSeenByUAP int64 `json:"_last_seen_by_uap"`
UptimeByUAP int64 `json:"_uptime_by_uap"`
IsGuestByUGW bool `json:"_is_guest_by_ugw"`
LastSeenByUGW int64 `json:"_last_seen_by_ugw"`
UptimeByUGW int64 `json:"_uptime_by_ugw"`
Anomalies int `json:"anomalies"`
AccessPointMAC string `json:"ap_mac"`
AssociationTime int64 `json:"assoc_time"`
Authorized bool `json:"authorized"`
BSSID string `json:"bssid"`
BytesR int64 `json:"bytes-r"`
CCQ int `json:"ccq"`
Channel int `json:"channel"`
DHCPEndTime int `json:"dhcpend_time"`
ESSID string `json:"essid"`
FirstSeen int64 `json:"first_seen"`
GatewayMAC string `json:"gw_mac"`
HostName string `json:"hostname"`
IdleTime int `json:"idletime"`
IP string `json:"ip"`
Is11R bool `json:"is_11r"`
IsGuest bool `json:"is_guest"`
IsWired bool `json:"is_wired"`
LastSeen int64 `json:"last_seen"`
LatestAssociationTime int64 `json:"latest_assoc_time"`
MAC string `json:"mac"`
Network string `json:"network"`
NetworkID string `json:"network_id"`
Noise int `json:"noise"`
OUI string `json:"oui"`
PowerSaveEnabled bool `json:"powersave_enabled"`
QOSPolicyApplied bool `json:"qos_policy_applied"`
Radio string `json:"radio"`
RadioName string `json:"radio_name"`
RadioProto string `json:"radio_proto"`
RSSI int `json:"rssi"`
RXBytes int64 `json:"rx_bytes"`
RXBytesR int64 `json:"rx_bytes-r"`
RXPackets int64 `json:"rx_packets"`
RXRate int64 `json:"rx_rate"`
Satisfaction int `json:"satisfaction"`
Signal int `json:"signal"`
SiteID string `json:"site_id"`
TXBytes int64 `json:"tx_bytes"`
TXBytesR int64 `json:"tx_bytes-r"`
TXPackets int64 `json:"tx_packets"`
TXPower int `json:"tx_power"`
TXRate int64 `json:"tx_rate"`
Uptime int64 `json:"uptime"`
UserID string `json:"user_id"`
VLAN int `json:"vlan"`
}
// SiteActiveClientsResponse contains the active clients response
type SiteActiveClientsResponse struct {
Meta CommonMeta `json:"meta"`
Data []SiteActiveClient `json:"data"`
}
// SiteActiveClients will list active clients
// site - the site to query
// filterMac - filter to a specific mac, if zero-value, then no filter is applied
func (c *Client) SiteActiveClients(site string, filterMac string) (*SiteActiveClientsResponse, error) {
extPath := "stat/sta"
if filterMac != "" {
extPath = extPath + "/" + strings.ToLower(filterMac)
}
var resp SiteActiveClientsResponse
err := c.doSiteRequest(http.MethodGet, site, extPath, nil, &resp)
return &resp, err
}
// ClientDetails gets the details for a single client
// site - the site to query
// mac - the client mac to query
func (c *Client) ClientDetails(site string, mac string) (*GenericResponse, error) {
if mac == "" {
return nil, fmt.Errorf("must specify a client MAC")
}
var resp GenericResponse
err := c.doSiteRequest(http.MethodGet, site, fmt.Sprintf("stat/user/%s", strings.ToLower(mac)), nil, &resp)
return &resp, err
}
// UpdateClientFixedIP will update a clients fixedIP
// site - the site to modify
// clientID - the ID of the user/client device to be modified
// useFixedIP - true to set a fixedIP, false to unset
// networkID - if useFixedIP set this to the specified value
// fixedIP - if userFixedIP set this to the fixed IP specified
func (c *Client) UpdateClientFixedIP(site string, clientID string, useFixedIP bool, networkID *string, fixedIP *string) (*GenericResponse, error) {
payload := map[string]interface{}{
"_id": strings.TrimSpace(strings.ToLower(clientID)),
"use_fixedip": useFixedIP,
}
if useFixedIP {
if networkID != nil {
payload["network_id"] = *networkID
}
if fixedIP != nil {
payload["fixed_ip"] = *fixedIP
}
}
data, _ := json.Marshal(payload)
extPath := fmt.Sprintf("rest/user/%s", strings.TrimSpace(strings.ToLower(clientID)))
var resp GenericResponse
err := c.doSiteRequest(http.MethodPut, site, extPath, bytes.NewReader(data), &resp)
return &resp, err
}
|
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2017 Mark LaPerriere
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
package main
const activateTmplSrc = `#!/bin/sh
function deactivate() {
if [ -n "$_OLD_GOPJX_PATH" ]; then
export PATH="$_OLD_GOPJX_PATH"
unset _OLD_GOPJX_PATH
fi
if [ -n "$_OLD_GOPJX_GOPATH" ]; then
export GOPATH="$_OLD_GOPJX_GOPATH"
unset _OLD_GOPJX_GOPATH
fi
if [ -n "$_OLD_GOPJX_PS1" ]; then
export PS1="$_OLD_GOPJX_PS1"
unset _OLD_GOPJX_PS1
fi
if [ -n "$VIRTUAL_ENV" ]; then
unset VIRTUAL_ENV
fi
if [ -n "$GOPJX_SRC_PATH" ]; then
unset GOPJX_SRC_PATH
fi
if [ -n "$BASH" -o -n "$ZSH_VERSION" ]; then
hash -r 2>/dev/null
fi
if [ "$1" != "nondestructive" ]; then
unset -f deactivate
fi
}
deactivate nondestructive
if [ -z "$VIRTUAL_ENV_DISABLE_PROMPT" ]; then
export _OLD_GOPJX_PS1="$PS1"
export PS1="[go:{{.Name}}] $_OLD_GOPJX_PS1"
fi
export _OLD_GOPJX_PATH="$PATH"
export _OLD_GOPJX_GOPATH="$GOPATH"
export GOPATH={{.GoPath}}
export PATH=$GOPATH/bin:$PATH
export VIRTUAL_ENV=$GOPATH
export GOPJX_SRC_PATH={{.SrcPath}}
`
|
package main
import (
"strconv"
"time"
)
func Reducer() {
for {
for len(reducer_chan) != 0 {
incr := <-reducer_chan
req_str := "request:" + strconv.Itoa(incr)
req_data, _ := RedisGet(req_str)
req_data.Result = make(map[int][]string)
for _, u := range req_data.Urls {
req_data.Result[u.Status_code] = append(req_data.Result[u.Status_code], u.Url)
}
req_data.Status_msg = "Complete"
er := RedisSet(req_data, req_str)
if er != nil {
panic(er)
}
}
time.Sleep(2000 * time.Millisecond)
}
}
|
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
//
package utility
import (
"context"
"strings"
"time"
"github.com/mattermost/mattermost-cloud/k8s"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// getElasticLoadBalancerInfo returns the private load balancer endpoint and type of the NGINX service.
func getElasticLoadBalancerInfo(namespace string, logger log.FieldLogger, configPath string) (string, string, error) {
k8sClient, err := k8s.NewFromFile(configPath, logger)
if err != nil {
return "", "", err
}
services, err := k8sClient.Clientset.CoreV1().Services(namespace).List(context.TODO(), metav1.ListOptions{})
if err != nil {
return "", "", err
}
for _, service := range services.Items {
if service.Spec.Type == "LoadBalancer" {
if service.Status.LoadBalancer.Ingress != nil {
endpoint := service.Status.LoadBalancer.Ingress[0].Hostname
if endpoint != "" {
return endpoint, service.Annotations["service.beta.kubernetes.io/aws-load-balancer-type"], nil
}
}
}
}
return "", "", nil
}
// getPrivateLoadBalancerEndpoint returns the private load balancer endpoint of the NGINX service.
func getPrivateLoadBalancerEndpoint(ctx context.Context, namespace string, logger log.FieldLogger, configPath string) (string, error) {
k8sClient, err := k8s.NewFromFile(configPath, logger)
if err != nil {
return "", err
}
for {
services, err := k8sClient.Clientset.CoreV1().Services(namespace).List(context.TODO(), metav1.ListOptions{})
if err != nil {
return "", err
}
for _, service := range services.Items {
if service.Spec.Type == "LoadBalancer" || strings.HasSuffix(service.Name, "query") {
if service.Status.LoadBalancer.Ingress != nil {
endpoint := service.Status.LoadBalancer.Ingress[0].Hostname
if endpoint == "" {
return "", errors.New("loadbalancer endpoint value is empty")
}
return endpoint, nil
}
}
}
select {
case <-ctx.Done():
return "", errors.Wrap(ctx.Err(), "timed out waiting for internal load balancer to become ready")
case <-time.After(5 * time.Second):
}
}
}
|
// Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package opt
import (
"reflect"
"runtime"
"testing"
)
// TestAggregateProperties verifies that the various helper functions for
// various properties of aggregations handle all aggregation operators.
func TestAggregateProperties(t *testing.T) {
check := func(fn func()) bool {
ok := true
func() {
defer func() {
if x := recover(); x != nil {
ok = false
}
}()
fn()
}()
return ok
}
for _, op := range AggregateOperators {
funcs := []func(Operator) bool{
AggregateIgnoresDuplicates,
AggregateIgnoresNulls,
AggregateIsNeverNull,
AggregateIsNeverNullOnNonNullInput,
AggregateIsNullOnEmpty,
}
for _, fn := range funcs {
if !check(func() { fn(op) }) {
fnName := runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name()
t.Errorf("%s not handled by %s", op, fnName)
}
}
for _, op2 := range AggregateOperators {
if !check(func() { AggregatesCanMerge(op, op2) }) {
t.Errorf("%s,%s not handled by AggregatesCanMerge", op, op2)
break
}
}
}
}
|
package cmd
import (
"fmt"
"github.com/spf13/cobra"
"snmpsim-cli-manager/snmpsim/cmd/deleteSubcommands"
)
// deleteCmd represents the deleteSubcommands command
var deleteCmd = &cobra.Command{
Use: "delete",
Args: cobra.ExactArgs(0),
Short: "Deletes the component with the given id",
Long: `Completely deletes a component via the given id.`,
Run: func(cmd *cobra.Command, args []string) {
fmt.Println(cmd.Help())
},
}
func init() {
rootCmd.AddCommand(deleteCmd)
deleteCmd.AddCommand(deletesubcommands.DeleteAgentCmd)
deleteCmd.AddCommand(deletesubcommands.DeleteEndpointCmd)
deleteCmd.AddCommand(deletesubcommands.DeleteEngineCmd)
deleteCmd.AddCommand(deletesubcommands.DeleteLabCmd)
deleteCmd.AddCommand(deletesubcommands.DeleteRecordFileCmd)
deleteCmd.AddCommand(deletesubcommands.DeleteTagCmd)
deleteCmd.AddCommand(deletesubcommands.DeleteUserCmd)
deleteCmd.AddCommand(deletesubcommands.DeleteTaggedObjectsCmd)
}
|
package steam
import (
"github.com/13k/go-steam/kv"
)
const (
messageObjectRootKey = "MessageObject"
)
type MessageObject struct {
kv.KeyValue
}
func NewMessageObject() *MessageObject {
return &MessageObject{KeyValue: kv.NewKeyValueRoot(messageObjectRootKey)}
}
func (o *MessageObject) AddObject(key string) *MessageObject {
o.KeyValue.AddObject(key)
return o
}
func (o *MessageObject) AddString(key, value string) *MessageObject {
o.KeyValue.AddString(key, value)
return o
}
func (o *MessageObject) AddInt32(key, value string) *MessageObject {
o.KeyValue.AddInt32(key, value)
return o
}
func (o *MessageObject) AddInt64(key, value string) *MessageObject {
o.KeyValue.AddInt64(key, value)
return o
}
func (o *MessageObject) AddUint64(key, value string) *MessageObject {
o.KeyValue.AddUint64(key, value)
return o
}
func (o *MessageObject) AddFloat32(key, value string) *MessageObject {
o.KeyValue.AddFloat32(key, value)
return o
}
func (o *MessageObject) AddColor(key, value string) *MessageObject {
o.KeyValue.AddColor(key, value)
return o
}
func (o *MessageObject) AddPointer(key, value string) *MessageObject {
o.KeyValue.AddPointer(key, value)
return o
}
|
package middlewares
import (
"net/http"
)
func SimpleCors(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")
if r.Method == http.MethodOptions {
w.Header().Set("Allow", "OPTIONS, POST, GET, PUT")
w.Header().Set("Access-Control-Allow-Headers", "X-API-TOKEN, Content-Type")
w.WriteHeader(http.StatusNoContent)
} else {
next.ServeHTTP(w, r)
}
})
} |
package tcp
import (
"bytes"
"encoding/binary"
"fmt"
"io"
"net"
"reflect"
"strconv"
"sync"
"syscall"
"time"
"shadowsocks-go/pkg/config"
connection "shadowsocks-go/pkg/connection/tcp/unmaintained"
encrypt "shadowsocks-go/pkg/connection/tcp/unmaintained"
"shadowsocks-go/pkg/util"
"github.com/golang/glog"
)
const (
idType = 0 // address type index
idIP0 = 1 // ip addres start index
idDmLen = 1 // domain address length index
idDm0 = 2 // domain address start index
typeIPv4 = 1 // type is ipv4 address
typeDm = 3 // type is domain address
typeIPv6 = 4 // type is ipv6 address
lenIPv4 = net.IPv4len + 2 // ipv4 + 2port
lenIPv6 = net.IPv6len + 2 // ipv6 + 2port
lenDmBase = 2 // 1addrLen + 2port, plus addrLen
lenHmacSha1 = 10
)
//TCPServer maintain a listener
type TCPServer struct {
Config *config.ConnectionInfo
quit chan struct{}
UploadTraffic int64 //request upload traffic
DownloadTraffic int64 //request download traffic
//ClientDict Mapping from client addresses (as host:port) to connection
clientDict map[string]*connector
//mutex Mutex used to serialize access to the dictionary
mutex *sync.Mutex
}
type accepted struct {
conn net.Conn
err error
}
type connector struct {
clientConn *connection.Conn
serverConn map[string]net.Conn //proxy to remote connnection
}
//NewTCPServer create a TCPServer
func NewTCPServer(cfg *config.ConnectionInfo) *TCPServer {
return &TCPServer{
Config: cfg,
quit: make(chan struct{}),
clientDict: make(map[string]*connector),
mutex: new(sync.Mutex),
}
}
//Stop implement quit go routine
func (tcpSrv *TCPServer) Stop() {
glog.V(5).Infof("tcp server close %v\r\n", tcpSrv.Config)
close(tcpSrv.quit)
}
//Traffic ollection traffic for client,return upload traffic and download traffic
func (tcpSrv *TCPServer) Traffic() (int64, int64) {
return tcpSrv.UploadTraffic, tcpSrv.DownloadTraffic
}
func getRequest(conn *connection.Conn, auth bool, timeout time.Duration) (host string, ota bool, err error) {
SetReadTimeout(conn, timeout)
// buf size should at least have the same size with the largest possible
// request size (when addrType is 3, domain name has at most 256 bytes)
// 1(addrType) + 1(lenByte) + 256(max length address) + 2(port) + 10(hmac-sha1)
buf := make([]byte, 270)
// read till we get possible domain length field
if _, err = io.ReadFull(conn, buf[:idType+1]); err != nil {
glog.Errorln("read buffer from remote connection error:", err)
return
}
var reqStart, reqEnd int
addrType := buf[idType]
switch addrType & connection.AddrMask {
case typeIPv4:
reqStart, reqEnd = idIP0, idIP0+lenIPv4
case typeIPv6:
reqStart, reqEnd = idIP0, idIP0+lenIPv6
case typeDm:
glog.V(5).Infoln("Got a Domain Addr Type, read start(%v) end(%v)\r\n", idType+1, idDmLen+1)
if _, err = io.ReadFull(conn, buf[idType+1:idDmLen+1]); err != nil {
glog.Errorf("Read from remote err:%v\r\n", err)
return
}
reqStart, reqEnd = idDm0, int(idDm0+buf[idDmLen]+lenDmBase)
default:
err = fmt.Errorf("addr type %d not supported", addrType&connection.AddrMask)
return
}
if _, err = io.ReadFull(conn, buf[reqStart:reqEnd]); err != nil {
return
}
glog.V(5).Infof("Got string from remote %v \r\n", buf[reqStart:reqEnd])
// Return string for typeIP is not most efficient, but browsers (Chrome,
// Safari, Firefox) all seems using typeDm exclusively. So this is not a
// big problem.
switch addrType & connection.AddrMask {
case typeIPv4:
host = net.IP(buf[idIP0 : idIP0+net.IPv4len]).String()
case typeIPv6:
host = net.IP(buf[idIP0 : idIP0+net.IPv6len]).String()
case typeDm:
host = string(buf[idDm0 : idDm0+buf[idDmLen]])
}
glog.V(5).Infof("Got host from remote: %v\r\n", host)
// parse port
port := binary.BigEndian.Uint16(buf[reqEnd-2 : reqEnd])
host = net.JoinHostPort(host, strconv.Itoa(int(port)))
// if specified one time auth enabled, we should verify this
if auth || addrType&connection.OneTimeAuthMask > 0 {
ota = true
if _, err = io.ReadFull(conn, buf[reqEnd:reqEnd+lenHmacSha1]); err != nil {
return
}
iv := conn.GetIv()
key := conn.GetKey()
actualHmacSha1Buf := util.HmacSha1(append(iv, key...), buf[:reqEnd])
if !bytes.Equal(buf[reqEnd:reqEnd+lenHmacSha1], actualHmacSha1Buf) {
err = fmt.Errorf("verify one time auth failed, iv=%v key=%v data=%v", iv, key, buf[:reqEnd])
return
}
}
return
}
type isClosed struct {
isClosed bool
}
func (tcpSrv *TCPServer) handleConnection(clientKey string) {
connector := tcpSrv.clientDict[clientKey]
conn := connector.clientConn
timeout := time.Duration(tcpSrv.Config.Timeout) * time.Second
closed := false
defer func() {
if !closed {
conn.Close()
tcpSrv.lock()
delete(tcpSrv.clientDict, clientKey)
tcpSrv.unlock()
}
}()
var host string
host, ota, err := getRequest(conn, tcpSrv.Config.EnableOTA, timeout)
if err != nil {
glog.Errorf("error getting request %v<->%v err:%v", conn.RemoteAddr(), conn.LocalAddr(), err)
return
}
glog.V(5).Infof("connection host:%v ota:%v \r\n", host, ota)
remote, err := net.Dial("tcp", host)
if err != nil {
if ne, ok := err.(*net.OpError); ok && (ne.Err == syscall.EMFILE || ne.Err == syscall.ENFILE) {
// log too many open file error
// EMFILE is process reaches open file limits, ENFILE is system limit
glog.Errorf("dial error:%v\r\n", err)
} else {
glog.Errorf(" connecting to:%v occur err:%v", host, err)
}
return
}
connector.serverConn[host] = remote
defer func() {
if !closed {
remote.Close()
delete(connector.serverConn, host)
}
}()
glog.V(5).Infof("piping %s<->%s ota=%v connOta=%v", conn.RemoteAddr(), host, ota, conn.IsOta())
if ota {
go tcpSrv.handleRequest(conn, remote, timeout)
} else {
go tcpSrv.PipeThenClose(conn, remote, timeout, true)
}
tcpSrv.PipeThenClose(remote, conn, timeout, false)
closed = true
return
}
func (tcpSrv *TCPServer) lock() {
tcpSrv.mutex.Lock()
}
func (tcpSrv *TCPServer) unlock() {
tcpSrv.mutex.Unlock()
}
func (tcpSrv *TCPServer) process(accept accepted, cipher *encrypt.Cipher) {
if accept.err != nil {
glog.V(5).Infof("accept error: %v\n", accept.err)
return
}
reqAddr := accept.conn.RemoteAddr().String()
tcpSrv.lock()
connnector, found := tcpSrv.clientDict[reqAddr]
if !found {
conn := connection.NewConn(accept.conn, cipher.Copy())
connnector = &connector{
clientConn: conn,
serverConn: make(map[string]net.Conn),
}
tcpSrv.clientDict[reqAddr] = connnector
tcpSrv.unlock()
glog.V(5).Infof("Created new connection for client %s\n", reqAddr)
} else {
glog.V(5).Infof("Found connection for client %s\n", reqAddr)
tcpSrv.unlock()
}
go tcpSrv.handleConnection(reqAddr)
}
//Run start a tcp listen for user
func (tcpSrv *TCPServer) Run() {
password := tcpSrv.Config.Password
method := tcpSrv.Config.EncryptMethod
port := tcpSrv.Config.Port
portStr := strconv.Itoa(port)
ln, err := net.Listen("tcp", ":"+portStr)
if err != nil {
glog.Errorf("tcp server(%v) error: %v\n", port, err)
}
defer ln.Close()
cipher, err := encrypt.NewCipher(method, password)
if err != nil {
glog.Errorf("Error generating cipher for port: %s %v\n", port, err)
return
}
glog.V(5).Infof("tcp server listening on %v port %v ...\n", ln.Addr().String(), port)
for {
c := make(chan accepted, 1)
go func() {
glog.V(5).Infoln("wait for accept")
var conn net.Conn
conn, err = ln.Accept()
c <- accepted{conn: conn, err: err}
}()
select {
case <-tcpSrv.quit:
glog.Infof("Receive Quit singal for %s\r\n", port)
return
case accept := <-c:
tcpSrv.process(accept, cipher.Copy())
}
}
}
func (tcpSrv *TCPServer) Compare(client *config.ConnectionInfo) bool {
return reflect.DeepEqual(*tcpSrv.Config, *client)
}
|
package vision
import (
"bytes"
"encoding/binary"
"fmt"
"io/ioutil"
"os"
)
var fp = fmt.Fprintf
const (
Image_type_none = uint32(iota)
Image_type_gray16
Image_type_rgba
Image_type_rgb48
Image_type_gray8
)
type Image struct {
Image_type, Width, Height, n_bytes uint32
Pixels [] byte
}
func New_image ( image_type, width, height uint32 ) ( * Image ) {
bpp := Bytes_per_pixel ( image_type )
return & Image { Image_type : image_type,
Width : width,
Height : height,
Pixels : make ( []byte, width * height * bpp ),
n_bytes : width * height * bpp,
}
}
func Bytes_per_pixel ( image_type uint32 ) ( uint32 ) {
switch image_type {
case Image_type_none :
return 0
case Image_type_gray16 :
return 2
case Image_type_rgba :
return 4
case Image_type_rgb48 :
return 6
case Image_type_gray8 :
return 1
}
panic ( fmt.Errorf ( "Bytes_Per_Pixel error: unknown image type: %d\n", image_type ) )
return 0
}
func Image_type_name ( image_type uint32 ) ( string ) {
switch image_type {
case Image_type_none :
return "none"
case Image_type_gray16 :
return "gray16"
case Image_type_rgba :
return "rgba"
case Image_type_rgb48 :
return "rgb48"
case Image_type_gray8 :
return "gray8"
default :
return "unknown"
}
}
func Read ( file_name string ) ( * Image ) {
fn := "Read"
buf, err := ioutil.ReadFile ( file_name )
check ( err, fn )
var image_type, width, height uint32
buf_reader := bytes.NewBuffer ( buf )
err = binary.Read ( buf_reader, binary.BigEndian, & image_type )
check ( err, fn )
err = binary.Read ( buf_reader, binary.BigEndian, & width )
check ( err, fn )
err = binary.Read ( buf_reader, binary.BigEndian, & height )
check ( err, fn )
img := New_image ( image_type, width, height )
err = binary.Read ( buf_reader, binary.BigEndian, & img.Pixels )
check ( err, fn )
return img
}
func ( img * Image ) Write ( file_name string ) {
fn := "Write"
f, err := os.Create ( file_name )
check ( err, fn )
defer f.Close ( )
var buf bytes.Buffer
err = binary.Write ( & buf,
binary.BigEndian,
[]uint32{img.Image_type, img.Width, img.Height} )
check ( err, fn )
err = binary.Write ( & buf,
binary.BigEndian,
img.Pixels )
_, err = f.Write ( buf.Bytes() )
check ( err, fn )
}
func check ( err error, fn string ) {
if err != nil {
panic ( fmt.Errorf ( "Image %s error: |%s|\n", fn, err.Error() ) )
}
}
|
package pbevents
import (
"github.com/hyperledger/burrow/binary"
"github.com/hyperledger/burrow/crypto"
"github.com/hyperledger/burrow/execution/events"
"github.com/hyperledger/burrow/txs/payload"
)
// this mostly contains tedious mapping between protobuf and our domain objects, but it may be worth
// the pain to avoid complexity and edge cases using gogoproto or other techniques.
func GetEventDataCall(edt *events.EventDataCall) *EventDataCall {
return &EventDataCall{
Origin: edt.Origin.Bytes(),
CallData: GetCallData(edt.CallData),
StackDepth: edt.StackDepth,
Return: edt.Return,
Exception: edt.Exception,
}
}
func GetCallData(cd *events.CallData) *CallData {
return &CallData{
Caller: cd.Caller.Bytes(),
Callee: cd.Callee.Bytes(),
Data: cd.Data,
Gas: cd.Gas,
}
}
func GetExecutionEvent(event *events.Event) *ExecutionEvent {
return &ExecutionEvent{
Header: GetEventHeader(event.Header),
EventData: GetEventData(event),
}
}
func GetEventHeader(header *events.Header) *EventHeader {
return &EventHeader{
TxType: header.TxType.String(),
TxHash: header.TxHash,
EventType: header.EventType.String(),
EventID: header.EventID,
Height: header.Height,
Index: header.Index,
}
}
func GetEventData(ev *events.Event) isExecutionEvent_EventData {
if ev.Call != nil {
return &ExecutionEvent_EventDataCall{
EventDataCall: &EventDataCall{
CallData: GetCallData(ev.Call.CallData),
Origin: ev.Call.Origin.Bytes(),
StackDepth: ev.Call.StackDepth,
Return: ev.Call.Return,
Exception: ev.Call.Exception,
},
}
}
if ev.Log != nil {
return &ExecutionEvent_EventDataLog{
EventDataLog: &EventDataLog{
Address: ev.Log.Address.Bytes(),
Data: ev.Log.Data,
Topics: GetTopic(ev.Log.Topics),
},
}
}
if ev.Tx != nil {
return &ExecutionEvent_EventDataTx{
EventDataTx: &EventDataTx{
Return: ev.Tx.Return,
Exception: ev.Tx.Exception,
},
}
}
return nil
}
func GetTopic(topics []binary.Word256) [][]byte {
topicBytes := make([][]byte, len(topics))
for i, t := range topics {
topicBytes[i] = t.Bytes()
}
return topicBytes
}
func (ee *ExecutionEvent) Event() *events.Event {
return &events.Event{
Header: ee.GetHeader().Header(),
Tx: ee.GetEventDataTx().Tx(),
Log: ee.GetEventDataLog().Log(ee.Header.Height),
Call: ee.GetEventDataCall().Call(ee.Header.TxHash),
}
}
func (ee *ExecutionEvent) Key() events.Key {
return ee.Header.Key()
}
func (h *EventHeader) Key() events.Key {
return events.NewKey(h.Height, h.Index)
}
func (h *EventHeader) Header() *events.Header {
return &events.Header{
TxType: payload.TxTypeFromString(h.TxType),
TxHash: h.TxHash,
EventType: events.EventTypeFromString(h.EventType),
EventID: h.EventID,
Height: h.Height,
Index: h.Index,
}
}
func (tx *EventDataTx) Tx() *events.EventDataTx {
if tx == nil {
return nil
}
return &events.EventDataTx{
Return: tx.Return,
Exception: tx.Exception,
}
}
func (log *EventDataLog) Log(height uint64) *events.EventDataLog {
if log == nil {
return nil
}
topicWords := make([]binary.Word256, len(log.Topics))
for i, bs := range log.Topics {
topicWords[i] = binary.LeftPadWord256(bs)
}
return &events.EventDataLog{
Height: height,
Topics: topicWords,
Address: crypto.MustAddressFromBytes(log.Address),
Data: log.Data,
}
}
func (call *EventDataCall) Call(txHash []byte) *events.EventDataCall {
if call == nil {
return nil
}
return &events.EventDataCall{
Return: call.Return,
CallData: call.CallData.CallData(),
Origin: crypto.MustAddressFromBytes(call.Origin),
StackDepth: call.StackDepth,
Exception: call.Exception,
}
}
func (cd *CallData) CallData() *events.CallData {
return &events.CallData{
Caller: crypto.MustAddressFromBytes(cd.Caller),
Callee: crypto.MustAddressFromBytes(cd.Callee),
Value: cd.Value,
Gas: cd.Gas,
Data: cd.Data,
}
}
|
package main
import (
"crypto/rand"
"encoding/base32"
"encoding/hex"
"fmt"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
"io"
"os"
"strconv"
"strings"
"syscall"
"time"
)
func GenerateID(l int) string {
const (
// ensures we backoff for less than 450ms total. Use the following to
// select new value, in units of 10ms:
// n*(n+1)/2 = d -> n^2 + n - 2d -> n = (sqrt(8d + 1) - 1)/2
maxretries = 9
backoff = time.Millisecond * 10
)
var (
totalBackoff time.Duration
count int
retries int
size = (l*5 + 7) / 8
u = make([]byte, size)
)
// TODO: Include time component, counter component, random component
for {
// This should never block but the read may fail. Because of this,
// we just try to read the random number generator until we get
// something. This is a very rare condition but may happen.
b := time.Duration(retries) * backoff
time.Sleep(b)
totalBackoff += b
n, err := io.ReadFull(rand.Reader, u[count:])
if err != nil {
if retryOnError(err) && retries < maxretries {
count += n
retries++
logrus.Errorf("error generating version 4 uuid, retrying: %v", err)
continue
}
// Any other errors represent a system problem. What did someone
// do to /dev/urandom?
panic(fmt.Errorf("error reading random number generator, retried for %v: %v", totalBackoff.String(), err))
}
break
}
s := base32.StdEncoding.EncodeToString(u)
return s[:l]
}
func retryOnError(err error) bool {
switch err := err.(type) {
case *os.PathError:
return retryOnError(err.Err) // unpack the target error
case syscall.Errno:
if err == unix.EPERM {
// EPERM represents an entropy pool exhaustion, a condition under
// which we backoff and retry.
return true
}
}
return false
}
const shortLen = 12
func TruncateID(id string) string {
if i := strings.IndexRune(id, ':'); i >= 0 {
id = id[i+1:]
}
if len(id) > shortLen {
id = id[:shortLen]
}
return id
}
type readerFunc func(p []byte) (int, error)
func (fn readerFunc) Read(p []byte) (int, error) {
return fn(p)
}
func generateID(r io.Reader) string {
b := make([]byte, 32)
for {
if _, err := io.ReadFull(r, b); err != nil {
panic(err) // This shouldn't happen
}
id := hex.EncodeToString(b)
// if we try to parse the truncated for as an int and we don't have
// an error then the value is all numeric and causes issues when
// used as a hostname. ref #3869
if _, err := strconv.ParseInt(TruncateID(id), 10, 64); err == nil {
continue
}
return id
}
}
func GenerateNonCryptoID() string {
return generateID(readerFunc(rand.Read))
}
|
package main
import "fmt"
type person struct {
first string
last string
favTatli []string
}
// 003 Example struct
type arac struct {
kapi int
renk string
}
type kamyon struct {
arac
dortTeker bool
}
type sedan struct {
arac
luks bool
}
func main() {
// 001 Example
p1 := person{
first: "Kamil",
last: "KAPLAN",
favTatli: []string{
"a",
"b",
"c",
},
}
fmt.Println(p1)
fmt.Printf("Name: %v \t LastName: %v \n", p1.first, p1.last)
for i, v := range p1.favTatli {
fmt.Println(i, v)
}
// 002 Example
m := map[string]person{
p1.last: p1,
}
fmt.Println(m)
for i, v := range m {
fmt.Println(i, v)
}
for _, v := range m {
fmt.Println(v.first)
fmt.Println(v.last)
for i, val := range v.favTatli {
fmt.Println(i, val)
}
}
// 003 Example
k := kamyon{
arac: arac{
kapi: 2,
renk: "beyaz",
},
dortTeker: true,
}
s := sedan{
arac: arac{
kapi: 4,
renk: "gümüş",
},
luks: false,
}
fmt.Println(k)
fmt.Println(s)
// 004 Example
strct := struct {
first string
friends map[string]int
favDrink []string
}{
first: "Kamil",
friends: map[string]int{
"a": 5,
"b": 6,
"c": 7,
},
favDrink: []string{
"su",
"ayran",
},
}
for i, v := range strct.favDrink {
fmt.Println(i, v)
}
for _, val := range strct.friends {
fmt.Println(val)
}
}
|
package main
import(
"github.com/nsf/termbox-go"
)
var (
frame = map[string]int { "top": 2, "botton": 3, "right": 2, "left": 2 }
wall = "⬜"
screan [DisplayY][DisplayX] rune
piledBlock [DisplayY][DisplayX] rune
)
// 表示枠
func mainScrean() {
wallRune := []rune(wall)[0]
for r := 0; r < DisplayY; r++ {
for c := 0; c < frame["top"]; c++ {
screan[r][c] = wallRune
}
}
for r := 0; r < DisplayY; r++ {
for c := DisplayX - frame["right"]; c < DisplayX; c++ {
screan[r][c] = wallRune
}
}
for r := 0; r < frame["left"]; r++ {
for c := 0; c < DisplayX; c++ {
screan[r][c] = wallRune
}
}
for r := DisplayY - frame["botton"]; r < DisplayY; r++ {
for c := 0; c < DisplayX; c++ {
screan[r][c] = wallRune
}
}
}
func drawScrean() {
for r := 0; r < DisplayY; r++ {
for c := 0; c < DisplayX; c++ {
termbox.SetCell(c*StrWidth, r, screan[r][c], Coldef, Coldef)
if piledBlock[r][c] != 0 {
termbox.SetCell(c*StrWidth, r, piledBlock[r][c], Coldef, Coldef)
}
}
}
}
// pile 現在のブロックをpiledBlockに保存する
func pile() {
for r := 0; r < DisplayY; r++ {
for c := 0; c < DisplayX; c++ {
if currentBlock[r][c] != 0 {
piledBlock[r][c] = currentBlock[r][c]
}
}
}
}
// captureBlock 現在のブロック位置をscrean反映
func captureBlock() {
for r := 0; r < DisplayY; r++ {
for c := 0; c < DisplayX; c++ {
if currentBlock[r][c] != 0 {
screan[r][c] = currentBlock[r][c]
}
}
}
}
// deleteLine 埋まった行を削除する
func deleteLine() {
}
|
package main
import (
"bytes"
"flag"
"github.com/ian-kent/go-log/appenders"
"github.com/ian-kent/go-log/layout"
"github.com/ian-kent/go-log/levels"
"github.com/ian-kent/go-log/log"
gotcha "github.com/ian-kent/gotcha/app"
"github.com/ian-kent/gotcha/http"
"net/url"
"os"
"strconv"
"strings"
)
var maxlen = 262144000
var retain = 52428800
var applog bytes.Buffer
type Appender struct {
a appenders.Appender
}
func (a *Appender) Write(level levels.LogLevel, message string, args ...interface{}) {
a.a.Write(level, message, args...)
applog.Write([]byte(a.Layout().Format(level, message, args...) + "\n"))
if applog.Len() > maxlen {
b := applog.Bytes()[retain:]
applog = *new(bytes.Buffer)
applog.Write(b)
}
}
func (a *Appender) SetLayout(layout layout.Layout) {
a.a.SetLayout(layout)
}
func (a *Appender) Layout() layout.Layout {
return a.a.Layout()
}
func NewAppender() *Appender {
return &Appender{
a: appenders.Console(),
}
}
func main() {
log.Logger().SetAppender(NewAppender())
global := "websysd.json"
flag.StringVar(&global, "global", global, "global environment configuration")
workspaces := make([]string, 0)
flag.Var((*AppendSliceValue)(&workspaces), "workspace", "websysd workspace file (can be specified multiple times), defaults to './workspace.json'")
// Create our Gotcha application
var app = gotcha.Create(Asset)
if len(workspaces) == 0 {
workspaces = append(workspaces, "./workspace.json")
}
LoadConfig(global, workspaces)
GlobalWorkspace = NewWorkspace(GlobalConfigWorkspace.Name, GlobalConfigWorkspace.Environment, make(map[string]map[string][]string), GlobalConfigWorkspace.InheritEnvironment)
for fn, args := range GlobalConfigWorkspace.Functions {
log.Info("=> Creating global function: %s", fn)
GlobalWorkspace.Functions[fn] = &Function{
Name: fn,
Args: args.Args,
Command: args.Command,
Executor: args.Executor,
}
}
if GlobalWorkspace.InheritEnvironment {
log.Info("=> Inheriting process environment into global workspace")
for _, k := range os.Environ() {
p := strings.SplitN(k, "=", 2)
log.Info(" %s = %s", p[0], p[1])
// TODO variable subst for current env vars
if _, ok := GlobalWorkspace.Environment[p[0]]; !ok {
GlobalWorkspace.Environment[p[0]] = p[1]
}
}
}
for _, ws := range ConfigWorkspaces {
log.Info("=> Creating workspace: %s", ws.Name)
var workspace *Workspace
if wks, ok := Workspaces[ws.Name]; ok {
log.Warn("Workspace %s already exists, merging tasks and environment")
workspace = wks
} else {
workspace = NewWorkspace(ws.Name, ws.Environment, ws.Columns, ws.InheritEnvironment)
Workspaces[ws.Name] = workspace
}
workspace.IsLocked = ws.IsLocked
if workspace.InheritEnvironment && !GlobalWorkspace.InheritEnvironment {
log.Info("=> Inheriting process environment into workspace")
for _, k := range os.Environ() {
p := strings.SplitN(k, "=", 2)
log.Info(" %s = %s", p[0], p[1])
// TODO variable subst for current env vars
if _, ok := GlobalWorkspace.Environment[p[0]]; !ok {
GlobalWorkspace.Environment[p[0]] = p[1]
}
}
}
for fn, args := range ws.Functions {
log.Info("=> Creating workspace function: %s", fn)
workspace.Functions[fn] = &Function{
Name: fn,
Args: args.Args,
Command: args.Command,
Executor: args.Executor,
}
}
for _, t := range ws.Tasks {
log.Info("=> Creating task: %s", t.Name)
if _, ok := workspace.Tasks[t.Name]; ok {
log.Warn("Task %s already exists, overwriting")
}
env := make(map[string]string)
for k, v := range GlobalWorkspace.Environment {
env[k] = v
}
for k, v := range ws.Environment {
env[k] = v
}
for k, v := range t.Environment {
env[k] = v
}
task := NewTask(workspace, t.Name, t.Executor, t.Command, env, t.Service, t.Stdout, t.Stderr, t.Metadata, t.Pwd)
workspace.Tasks[t.Name] = task
}
}
// Get the router
r := app.Router
// Create some routes
r.Get("/", list_workspaces)
r.Get("/log", show_log)
r.Get("/workspace/(?P<workspace>[^/]+)", list_tasks)
// Serve static content (but really use a CDN)
r.Get("/images/(?P<file>.*)", r.Static("assets/images/{{file}}"))
r.Get("/css/(?P<file>.*)", r.Static("assets/css/{{file}}"))
r.Post("/workspace/(?P<workspace>[^/]+)/task/(?P<task>[^/]+)/start", startTask)
r.Post("/workspace/(?P<workspace>[^/]+)/task/(?P<task>[^/]+)/stop", stopTask)
r.Post("/workspace/(?P<workspace>[^/]+)/task/(?P<task>[^/]+)/enable", enableServiceTask)
r.Post("/workspace/(?P<workspace>[^/]+)/task/(?P<task>[^/]+)/disable", disableServiceTask)
r.Get("/workspace/(?P<workspace>[^/]+)/task/(?P<task>[^/]+)", taskHistory)
r.Get("/workspace/(?P<workspace>[^/]+)/task/(?P<task>[^/]+)/run/(?P<run>\\d+)", taskRun)
r.Get("/workspace/(?P<workspace>[^/]+)/task/(?P<task>[^/]+)/run/(?P<run>\\d+)/stdout", taskRunStdout)
r.Get("/workspace/(?P<workspace>[^/]+)/task/(?P<task>[^/]+)/run/(?P<run>\\d+)/stderr", taskRunStderr)
// Start our application
app.Start()
defer func() {
for _, ws := range Workspaces {
for _, t := range ws.Tasks {
if t.ActiveTask != nil && t.ActiveTask.Cmd != nil && t.ActiveTask.Cmd.Process != nil {
t.ActiveTask.Cmd.Process.Kill()
}
}
}
}()
<-make(chan int)
}
func redir(session *http.Session) {
redir := "/"
if k := session.Request.Referer(); len(k) > 0 {
redir = k
}
session.Redirect(&url.URL{Path: redir})
}
func startTask(session *http.Session) {
ws, _ := session.Stash["workspace"].(string)
id, _ := session.Stash["task"].(string)
Workspaces[ws].Tasks[id].Start()
redir(session)
}
func stopTask(session *http.Session) {
ws, _ := session.Stash["workspace"].(string)
id, _ := session.Stash["task"].(string)
Workspaces[ws].Tasks[id].Stop()
redir(session)
}
func enableServiceTask(session *http.Session) {
ws, _ := session.Stash["workspace"].(string)
id, _ := session.Stash["task"].(string)
Workspaces[ws].Tasks[id].Service = true
if Workspaces[ws].Tasks[id].ActiveTask == nil {
Workspaces[ws].Tasks[id].Start()
}
redir(session)
}
func disableServiceTask(session *http.Session) {
ws, _ := session.Stash["workspace"].(string)
id, _ := session.Stash["task"].(string)
Workspaces[ws].Tasks[id].Service = false
redir(session)
}
func taskHistory(session *http.Session) {
ws, _ := session.Stash["workspace"].(string)
id, _ := session.Stash["task"].(string)
session.Stash["Title"] = "Task"
session.Stash["Page"] = "History"
session.Stash["Workspace"] = Workspaces[ws]
session.Stash["Task"] = Workspaces[ws].Tasks[id]
session.RenderWithLayout("task.html", "layout.html", "Content")
}
func taskRun(session *http.Session) {
ws, _ := session.Stash["workspace"].(string)
id, _ := session.Stash["task"].(string)
run, _ := strconv.Atoi(session.Stash["run"].(string))
session.Stash["Title"] = "Task run"
session.Stash["Page"] = "TaskRun"
session.Stash["Workspace"] = Workspaces[ws]
session.Stash["Task"] = Workspaces[ws].Tasks[id]
session.Stash["TaskRun"] = Workspaces[ws].Tasks[id].TaskRuns[run]
session.RenderWithLayout("taskrun.html", "layout.html", "Content")
}
func taskRunStdout(session *http.Session) {
ws, _ := session.Stash["workspace"].(string)
id, _ := session.Stash["task"].(string)
run, _ := strconv.Atoi(session.Stash["run"].(string))
session.Stash["Title"] = "Task run stdout"
session.Stash["Page"] = "TaskOutput"
session.Stash["Type"] = "stdout"
session.Stash["Workspace"] = Workspaces[ws]
session.Stash["Task"] = Workspaces[ws].Tasks[id]
session.Stash["TaskRun"] = Workspaces[ws].Tasks[id].TaskRuns[run]
session.Stash["LogOutput"] = Workspaces[ws].Tasks[id].TaskRuns[run].StdoutBuf.String()
session.RenderWithLayout("log.html", "layout.html", "Content")
}
func taskRunStderr(session *http.Session) {
ws, _ := session.Stash["workspace"].(string)
id, _ := session.Stash["task"].(string)
run, _ := strconv.Atoi(session.Stash["run"].(string))
session.Stash["Title"] = "Task run stderr"
session.Stash["Page"] = "TaskOutput"
session.Stash["Type"] = "stderr"
session.Stash["Workspace"] = Workspaces[ws]
session.Stash["Task"] = Workspaces[ws].Tasks[id]
session.Stash["TaskRun"] = Workspaces[ws].Tasks[id].TaskRuns[run]
session.Stash["LogOutput"] = Workspaces[ws].Tasks[id].TaskRuns[run].StderrBuf.String()
session.RenderWithLayout("log.html", "layout.html", "Content")
}
func list_workspaces(session *http.Session) {
// Stash a value and render a template
session.Stash["Title"] = "websysd"
session.Stash["Page"] = "Workspaces"
session.Stash["Workspaces"] = Workspaces
session.RenderWithLayout("workspaces.html", "layout.html", "Content")
}
func list_tasks(session *http.Session) {
ws, _ := session.Stash["workspace"].(string)
// Stash a value and render a template
session.Stash["Title"] = "websysd"
session.Stash["Page"] = "Tasks"
session.Stash["Workspace"] = Workspaces[ws]
session.Stash["Tasks"] = Workspaces[ws].Tasks
session.RenderWithLayout("tasks.html", "layout.html", "Content")
}
func show_log(session *http.Session) {
session.Stash["Title"] = "websysd log"
session.Stash["Page"] = "AppLog"
session.Stash["LogOutput"] = applog.String()
session.RenderWithLayout("applog.html", "layout.html", "Content")
}
|
package command
import "fmt"
func isVersionCommand(args []string) (isVersion bool) {
isVersion = args[0] == "version" || args[0] == "-v" || args[0] == "--version"
return
}
func (d *Dispatcher) displayVersion() {
if d.version == "" {
fmt.Println("version information is not available")
return
}
fmt.Println(d.version)
}
|
package httpServer
import (
"log"
"net/http"
"vrcdb/httpServer/handlers"
"vrcdb/httpServer/middlewares"
"vrcdb/wsServer"
"github.com/gorilla/mux"
"github.com/justinas/alice"
)
func Init() {
log.Println("Initializing http routes...")
middlewareChain := alice.New(middlewares.Logger, middlewares.Recover)
var mainRouter = mux.NewRouter()
authRouter := mainRouter.NewRoute().Subrouter()
authRouter.Use(middlewares.Authenticator)
// No auth required to call these
mainRouter.HandleFunc("/config", handlers.ConfigGet).Methods("GET") // Get API config
// Websocket stuff
authRouter.HandleFunc("/ws", wsServer.Upgrade) // Webscoket endpoint
authRouter.HandleFunc("/ws/", wsServer.Upgrade) // Webscoket endpoint
http.Handle("/", middlewareChain.Then(mainRouter))
}
|
package webhandlers
import (
"database/sql"
"encoding/json"
"errors"
"io/ioutil"
"log"
"net/http"
"github.com/dannylesnik/http-inject-context/models"
"github.com/gorilla/mux"
)
//GetPerson -
func GetPerson(w http.ResponseWriter, r *http.Request) {
personID := mux.Vars(r)["id"]
log.Printf(" Reuqest URI %s", r.RequestURI)
log.Printf(" Person ID %s", personID)
db, ok := r.Context().Value(models.SQLKEY).(*models.DB)
if !ok {
http.Error(w, "could not get database connection pool from context", 500)
return
}
person, err := db.GetPerson(personID)
if err == sql.ErrNoRows {
json.NewEncoder(w).Encode(models.Error{Error: "Can't get Person", Message: err.Error(), Code: 404})
} else if err != nil {
json.NewEncoder(w).Encode(models.Error{Error: "Can't get Person", Message: err.Error(), Code: 500})
} else {
json.NewEncoder(w).Encode(person)
}
}
//UpdatePerson -
func UpdatePerson(w http.ResponseWriter, r *http.Request) {
var person models.Person
reqBody, err := ioutil.ReadAll(r.Body)
db, ok := r.Context().Value(models.SQLKEY).(*models.DB)
if !ok {
http.Error(w, "could not get database connection pool from context", 500)
return
}
if err != nil {
json.NewEncoder(w).Encode(models.Error{Error: "Can't read Request", Message: err.Error(), Code: 400})
} else {
if err := models.Unmarshal(reqBody, &person); err != nil {
json.NewEncoder(w).Encode(models.Error{Error: "Can't parse JSON Request", Message: err.Error(), Code: 400})
} else {
result, err := db.UpdatePerson(person)
if err != nil {
json.NewEncoder(w).Encode(models.Error{Error: "Can't Update Person!!", Message: err.Error(), Code: 500})
} else if result == 0 {
json.NewEncoder(w).Encode(models.Error{Error: "Person with such ID doesnt exist!!!", Message: errors.New("Query returned 0 affected records").Error(), Code: 404})
} else {
json.NewEncoder(w).Encode(person)
}
}
}
}
//CreatePerson -
func CreatePerson(w http.ResponseWriter, r *http.Request) {
var person models.Person
reqBody, err := ioutil.ReadAll(r.Body)
db, ok := r.Context().Value(models.SQLKEY).(*models.DB)
if !ok {
http.Error(w, "could not get database connection pool from context", 500)
return
}
if err != nil {
json.NewEncoder(w).Encode(models.Error{Error: "Can't read Request", Message: err.Error(),Code: 400})
} else {
if err := models.Unmarshal(reqBody, &person); err != nil {
json.NewEncoder(w).Encode(models.Error{Error: "Can't parse JSON Request", Message: err.Error(), Code: 400})
} else {
_, err := db.AddPersonToDB(person)
if err != nil {
log.Println(err)
json.NewEncoder(w).Encode(models.Error{Error: "Can't Save to DB!!",Message: err.Error(),Code: 500})
} else {
json.NewEncoder(w).Encode(person)
}
}
}
}
//DeletePerson -
func DeletePerson(w http.ResponseWriter, r *http.Request) {
personID := mux.Vars(r)["id"]
log.Printf(" Event ID %s", personID)
db, ok := r.Context().Value(models.SQLKEY).(*models.DB)
if !ok {
http.Error(w, "could not get database connection pool from context", 500)
return
}
person, err := db.GetPerson(personID)
if err == sql.ErrNoRows {
json.NewEncoder(w).Encode(models.Error{Error: "Can't get Person", Message: err.Error(), Code: 404})
} else if err != nil {
json.NewEncoder(w).Encode(models.Error{Error: "Can't delete Person", Message: err.Error(), Code: 500})
} else {
result, err := db.DeletePerson(personID)
if err != nil {
json.NewEncoder(w).Encode(models.Error{Error: "Can't delete Person", Message: err.Error(), Code: 500})
} else if result == 0 {
json.NewEncoder(w).Encode(models.Error{Error: "Can't delete Person", Message: "Person does not exist!!!", Code: 404})
} else {
json.NewEncoder(w).Encode(person)
}
}
} |
package bst
type Queue struct {
head *node
tail *node
}
type node struct {
Value *Node // Value is a type `Node` of the tree node of BST
Next *node
}
func (q *Queue) IsEmpty() bool {
return q.head == nil
}
func (q *Queue) Enqueue(v *Node) {
if q.head == nil {
q.head = &node{Value: v}
q.tail = q.head
} else {
q.tail.Next = &node{Value: v}
q.tail = q.tail.Next
}
}
func (q *Queue) Dequeue() (*Node, bool) {
if q.IsEmpty() {
return nil, false
}
ret := q.head.Value
if q.head == q.tail {
q.head = nil
q.tail = nil
} else {
q.head = q.head.Next
}
return ret, true
}
func (q *Queue) Peek() (*Node, bool) {
if q.IsEmpty() {
return nil, false
}
return q.head.Value, true
}
|
package entity_config
import (
"encoding/json"
"fmt"
"github.com/brooklyncentral/brooklyn-cli/models"
"github.com/brooklyncentral/brooklyn-cli/net"
)
func ConfigValue(network *net.Network, application, entity, config string) (interface{}, error) {
bytes, err := ConfigValueAsBytes(network, application, entity, config)
if nil != err || 0 == len(bytes) {
return nil, err
}
var value interface{}
err = json.Unmarshal(bytes, &value)
if nil != err {
return nil, err
}
return value, nil
}
func ConfigValueAsBytes(network *net.Network, application, entity, config string) ([]byte, error) {
url := fmt.Sprintf("/v1/applications/%s/entities/%s/config/%s", application, entity, config)
body, err := network.SendGetRequest(url)
if err != nil {
return []byte{}, err
}
return body, nil
}
func SetConfig(network *net.Network, application, entity, config, value string) (string, error) {
url := fmt.Sprintf("/v1/applications/%s/entities/%s/config/%s", application, entity, config)
val := []byte(value)
body, err := network.SendPostRequest(url, val)
if nil != err {
return "", err
}
return string(body), nil
}
func ConfigList(network *net.Network, application, entity string) ([]models.ConfigSummary, error) {
url := fmt.Sprintf("/v1/applications/%s/entities/%s/config", application, entity)
var configList []models.ConfigSummary
body, err := network.SendGetRequest(url)
if err != nil {
return configList, err
}
err = json.Unmarshal(body, &configList)
return configList, err
}
func PostConfig(network *net.Network, application, entity, config, value string) (string, error) {
url := fmt.Sprintf("/v1/applications/%s/entities/%s/config", application, entity)
val := []byte(value)
body, err := network.SendPostRequest(url, val)
if nil != err {
return "", err
}
return string(body), nil
}
func ConfigCurrentState(network *net.Network, application, entity string) (map[string]interface{}, error) {
url := fmt.Sprintf("/v1/applications/%s/entities/%s/config/current-state", application, entity)
var currentState map[string]interface{}
body, err := network.SendGetRequest(url)
if err != nil {
return currentState, err
}
err = json.Unmarshal(body, ¤tState)
return currentState, err
}
|
package server
import (
"MORE.Tech/backend/db"
"MORE.Tech/backend/models"
"github.com/gin-gonic/gin"
)
func GetTestQuestions(c *gin.Context) {
var result []models.TestQuestion
err := db.GetDB().Preload("TestAnswers").Find(&result).Error
if err != nil {
handleInternalError(c, err)
return
}
handleOK(c, result)
}
func GetUser(c *gin.Context) {
var result models.User
id := c.Param("id")
err := db.GetDB().Preload("InvestProfile").Preload("GameWeek").
Preload("Instruments").Preload("GameWeek.InstrumentRateChanges").
Preload("GameWeek.News").Preload("GameWeek.Advices").
Preload("GameWeek.Instruments").Preload("GameWeek.Instruments.InstrumentType").
First(&result, id).Error
if err != nil {
handleInternalError(c, err)
return
}
handleOK(c, result)
}
func CreateUser(c *gin.Context) {
var payload models.User
if err := c.ShouldBindJSON(&payload); err != nil {
handleBadRequest(c, err)
return
}
payload.InvestProfileID = 1
payload.GameWeekID = 1
payload.Balance = 100000
err := db.GetDB().Create(&payload).Error
if err != nil {
handleInternalError(c, err)
return
}
handleOK(c, payload)
}
func GetGameWeek(c *gin.Context) {
var result models.GameWeek
id := c.Param("id")
err := db.GetDB().Preload("InstrumentRateChanges").
Preload("News").Preload("Advices").
Preload("Instruments").Preload("Instruments.InstrumentType").
First(&result, id).Error
if err != nil {
handleInternalError(c, err)
return
}
handleOK(c, result)
}
func NextWeek(c *gin.Context) {
var result models.User
id := c.Param("id")
err := db.GetDB().First(&result, id).Error
if err != nil {
handleInternalError(c, err)
return
}
result.GameWeekID += 1
// TODO: make balance changes
err = db.GetDB().Model(&result).Updates(result).Error
if err != nil {
handleInternalError(c, err)
return
}
handleOK(c, result)
} |
package main
import (
"fmt"
"os"
"golang.org/x/net/html"
)
func forEachNode(n *html.Node, pre, post func(n *html.Node)) {
if pre != nil {
pre(n)
}
for c := n.FirstChild; c != nil; c = c.NextSibling {
forEachNode(c, pre, post)
}
if post != nil {
post(n)
}
}
var depth int
func nodeHasChildren(n *html.Node) bool {
for c := n.FirstChild; c != nil; c = c.NextSibling {
if c.Type == html.ElementNode || c.Type == html.TextNode {
return true
}
continue
}
return false
}
func startElement(n *html.Node) {
if n.Type == html.CommentNode {
fmt.Printf("%*s//%s\n", depth*2, "", n.Data)
return
}
if n.Type == html.TextNode {
fmt.Printf("%*s %s\n", depth*2, "", n.Data)
return
}
if n.Type == html.ElementNode {
fmt.Printf("%*s<%s ", depth*2, "", n.Data)
for _, a := range n.Attr {
fmt.Printf("%s=%s ", a.Key, a.Val)
}
if nodeHasChildren(n) {
fmt.Printf(">\n")
} else {
fmt.Printf("/> \n")
}
depth++
}
}
func endElement(n *html.Node) {
if n.Type == html.ElementNode {
depth--
if nodeHasChildren(n) {
fmt.Printf("%*s</%s>\n", depth*2, "", n.Data)
}
}
}
func main() {
doc, err := html.Parse(os.Stdin)
if err != nil {
fmt.Fprintf(os.Stderr, "outline2: parse: %v", err)
os.Exit(1)
}
forEachNode(doc, startElement, endElement)
}
|
package main
import "fmt"
func main() {
var i byte = 65
var j byte
j = 0x61
fmt.Println("소문자 %c 10진수로 %d 입니다", i, j)
}
|
package cmd
import (
"bytes"
"context"
"encoding/json"
"io/ioutil"
"os"
"testing"
"time"
"github.com/gridscale/gsclient-go/v3"
"github.com/gridscale/gscloud/render"
"github.com/gridscale/gscloud/runtime"
"github.com/spf13/cobra"
"github.com/stretchr/testify/assert"
)
var changeTime, _ = time.Parse(time.RFC3339, "2020-07-02T16:15:00+02:00")
var mockStorage = gsclient.Storage{
Properties: gsclient.StorageProperties{
ObjectUUID: "xxx-xxx-xxx",
Name: "test",
Capacity: 10,
Status: "active",
ChangeTime: gsclient.GSTime{
Time: changeTime,
},
},
}
var mockStorageList = []gsclient.Storage{
mockStorage,
}
type mockClient struct{}
func (g mockClient) GetStorageList(ctx context.Context) ([]gsclient.Storage, error) {
return mockStorageList, nil
}
func (g mockClient) DeleteStorage(ctx context.Context, id string) error {
return nil
}
func (g mockClient) CloneStorage(ctx context.Context, id string) (gsclient.CreateResponse, error) {
return gsclient.CreateResponse{}, nil
}
func (g mockClient) CreateStorage(ctx context.Context, body gsclient.StorageCreateRequest) (gsclient.CreateResponse, error) {
return gsclient.CreateResponse{}, nil
}
func (g mockClient) GetDeletedStorages(ctx context.Context) ([]gsclient.Storage, error) {
return []gsclient.Storage{}, nil
}
func (g mockClient) GetStorage(ctx context.Context, id string) (gsclient.Storage, error) {
return gsclient.Storage{}, nil
}
func (g mockClient) GetStorageEventList(ctx context.Context, id string) ([]gsclient.Event, error) {
return []gsclient.Event{}, nil
}
func (g mockClient) GetStoragesByLocation(ctx context.Context, id string) ([]gsclient.Storage, error) {
return []gsclient.Storage{}, nil
}
func (g mockClient) UpdateStorage(ctx context.Context, id string, body gsclient.StorageUpdateRequest) error {
return nil
}
func (g mockClient) CreateStorageFromBackup(ctx context.Context, backupID, storageName string) (gsclient.CreateResponse, error) {
return gsclient.CreateResponse{}, nil
}
func Test_StorageListCmd(t *testing.T) {
marshalledMockStorage, _ := json.Marshal(mockStorageList)
type testCase struct {
expectedOutput string
jsonFlag bool
quietFlag bool
}
buf := new(bytes.Buffer)
headers := []string{"id", "name", "capacity", "changed", "status"}
rows := [][]string{
{
"xxx-xxx-xxx",
"test",
"10",
changeTime.Local().Format(time.RFC3339),
"active",
},
}
render.AsTable(buf, headers, rows, render.Options{})
testCases := []testCase{
{
expectedOutput: buf.String(),
},
{
jsonFlag: true,
expectedOutput: string(marshalledMockStorage) + "\n",
},
{
quietFlag: true,
expectedOutput: mockStorage.Properties.ObjectUUID + "\n",
},
}
for _, test := range testCases {
r, w, _ := os.Pipe()
os.Stdout = w
rootFlags.json = test.jsonFlag
rootFlags.quiet = test.quietFlag
mockClient := mockClient{}
rt, _ = runtime.NewTestRuntime()
rt.SetStorageOperator(mockClient)
cmd := storageLsCmd.RunE
cmd(new(cobra.Command), []string{})
resetFlags()
w.Close()
out, _ := ioutil.ReadAll(r)
assert.Equal(t, test.expectedOutput, string(out))
}
}
func Test_StorageCmdDelete(t *testing.T) {
type testCase struct {
expectedOutput string
}
err := testCase{expectedOutput: ""}
r, w, _ := os.Pipe()
os.Stdout = w
mockClient := mockClient{}
rt, _ = runtime.NewTestRuntime()
rt.SetStorageOperator(mockClient)
cmd := storageRmCmd.RunE
cmd(new(cobra.Command), []string{"rm", mockStorage.Properties.ObjectUUID})
w.Close()
out, _ := ioutil.ReadAll(r)
assert.Equal(t, err.expectedOutput, string(out))
}
|
package serve
// User user object
type User struct {
ID string
Password string
Namespaces map[string]*UserNamespace
}
//UserNamespace user namespace
type UserNamespace struct {
Apps map[string]*UserApplication
}
//UserApplication user application
type UserApplication struct {
Roles []string
}
//NewUser - will create user object.
func NewUser(id string) *User {
user := new(User)
return user
}
|
// Copyright 2020 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ipv4
import (
"fmt"
"math"
"time"
"gvisor.dev/gvisor/pkg/buffer"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/tcpip/header"
"gvisor.dev/gvisor/pkg/tcpip/network/internal/ip"
"gvisor.dev/gvisor/pkg/tcpip/stack"
)
const (
// v1RouterPresentTimeout from RFC 2236 Section 8.11, Page 18
// See note on igmpState.igmpV1Present for more detail.
v1RouterPresentTimeout = 400 * time.Second
// v1MaxRespTime from RFC 2236 Section 4, Page 5. "The IGMPv1 router
// will send General Queries with the Max Response Time set to 0. This MUST
// be interpreted as a value of 100 (10 seconds)."
//
// Note that the Max Response Time field is a value in units of deciseconds.
v1MaxRespTime = 10 * time.Second
// UnsolicitedReportIntervalMax is the maximum delay between sending
// unsolicited IGMP reports.
//
// Obtained from RFC 2236 Section 8.10, Page 19.
UnsolicitedReportIntervalMax = 10 * time.Second
)
type protocolMode int
const (
protocolModeV2OrV3 protocolMode = iota
protocolModeV1
// protocolModeV1Compatibility is for maintaining compatibility with IGMPv1
// Routers.
//
// Per RFC 2236 Section 4 Page 6: "The IGMPv1 router expects Version 1
// Membership Reports in response to its Queries, and will not pay
// attention to Version 2 Membership Reports. Therefore, a state variable
// MUST be kept for each interface, describing whether the multicast
// Querier on that interface is running IGMPv1 or IGMPv2. This variable
// MUST be based upon whether or not an IGMPv1 query was heard in the last
// [Version 1 Router Present Timeout] seconds".
protocolModeV1Compatibility
)
// IGMPVersion is the forced version of IGMP.
type IGMPVersion int
const (
_ IGMPVersion = iota
// IGMPVersion1 indicates IGMPv1.
IGMPVersion1
// IGMPVersion2 indicates IGMPv2. Note that IGMP may still fallback to V1
// compatibility mode as required by IGMPv2.
IGMPVersion2
// IGMPVersion3 indicates IGMPv3. Note that IGMP may still fallback to V2
// compatibility mode as required by IGMPv3.
IGMPVersion3
)
// IGMPEndpoint is a network endpoint that supports IGMP.
type IGMPEndpoint interface {
// SetIGMPVersion sets the IGMP version.
//
// Returns the previous IGMP version.
SetIGMPVersion(IGMPVersion) IGMPVersion
// GetIGMPVersion returns the IGMP version.
GetIGMPVersion() IGMPVersion
}
// IGMPOptions holds options for IGMP.
type IGMPOptions struct {
// Enabled indicates whether IGMP will be performed.
//
// When enabled, IGMP may transmit IGMP report and leave messages when
// joining and leaving multicast groups respectively, and handle incoming
// IGMP packets.
//
// This field is ignored and is always assumed to be false for interfaces
// without neighbouring nodes (e.g. loopback).
Enabled bool
}
var _ ip.MulticastGroupProtocol = (*igmpState)(nil)
// igmpState is the per-interface IGMP state.
//
// igmpState.init() MUST be called after creating an IGMP state.
type igmpState struct {
// The IPv4 endpoint this igmpState is for.
ep *endpoint
genericMulticastProtocol ip.GenericMulticastProtocolState
// mode is used to configure the version of IGMP to perform.
mode protocolMode
// igmpV1Job is scheduled when this interface receives an IGMPv1 style
// message, upon expiration the igmpV1Present flag is cleared.
// igmpV1Job may not be nil once igmpState is initialized.
igmpV1Job *tcpip.Job
}
// Enabled implements ip.MulticastGroupProtocol.
func (igmp *igmpState) Enabled() bool {
// No need to perform IGMP on loopback interfaces since they don't have
// neighbouring nodes.
return igmp.ep.protocol.options.IGMP.Enabled && !igmp.ep.nic.IsLoopback() && igmp.ep.Enabled()
}
// SendReport implements ip.MulticastGroupProtocol.
//
// +checklocksread:igmp.ep.mu
func (igmp *igmpState) SendReport(groupAddress tcpip.Address) (bool, tcpip.Error) {
igmpType := header.IGMPv2MembershipReport
switch igmp.mode {
case protocolModeV2OrV3:
case protocolModeV1, protocolModeV1Compatibility:
igmpType = header.IGMPv1MembershipReport
default:
panic(fmt.Sprintf("unrecognized mode = %d", igmp.mode))
}
return igmp.writePacket(groupAddress, groupAddress, igmpType)
}
// SendLeave implements ip.MulticastGroupProtocol.
//
// +checklocksread:igmp.ep.mu
func (igmp *igmpState) SendLeave(groupAddress tcpip.Address) tcpip.Error {
// As per RFC 2236 Section 6, Page 8: "If the interface state says the
// Querier is running IGMPv1, this action SHOULD be skipped. If the flag
// saying we were the last host to report is cleared, this action MAY be
// skipped."
switch igmp.mode {
case protocolModeV2OrV3:
_, err := igmp.writePacket(header.IPv4AllRoutersGroup, groupAddress, header.IGMPLeaveGroup)
return err
case protocolModeV1, protocolModeV1Compatibility:
return nil
default:
panic(fmt.Sprintf("unrecognized mode = %d", igmp.mode))
}
}
// ShouldPerformProtocol implements ip.MulticastGroupProtocol.
func (igmp *igmpState) ShouldPerformProtocol(groupAddress tcpip.Address) bool {
// As per RFC 2236 section 6 page 10,
//
// The all-systems group (address 224.0.0.1) is handled as a special
// case. The host starts in Idle Member state for that group on every
// interface, never transitions to another state, and never sends a
// report for that group.
return groupAddress != header.IPv4AllSystems
}
type igmpv3ReportBuilder struct {
igmp *igmpState
records []header.IGMPv3ReportGroupAddressRecordSerializer
}
// AddRecord implements ip.MulticastGroupProtocolV2ReportBuilder.
func (b *igmpv3ReportBuilder) AddRecord(genericRecordType ip.MulticastGroupProtocolV2ReportRecordType, groupAddress tcpip.Address) {
var recordType header.IGMPv3ReportRecordType
switch genericRecordType {
case ip.MulticastGroupProtocolV2ReportRecordModeIsInclude:
recordType = header.IGMPv3ReportRecordModeIsInclude
case ip.MulticastGroupProtocolV2ReportRecordModeIsExclude:
recordType = header.IGMPv3ReportRecordModeIsExclude
case ip.MulticastGroupProtocolV2ReportRecordChangeToIncludeMode:
recordType = header.IGMPv3ReportRecordChangeToIncludeMode
case ip.MulticastGroupProtocolV2ReportRecordChangeToExcludeMode:
recordType = header.IGMPv3ReportRecordChangeToExcludeMode
case ip.MulticastGroupProtocolV2ReportRecordAllowNewSources:
recordType = header.IGMPv3ReportRecordAllowNewSources
case ip.MulticastGroupProtocolV2ReportRecordBlockOldSources:
recordType = header.IGMPv3ReportRecordBlockOldSources
default:
panic(fmt.Sprintf("unrecognied genericRecordType = %d", genericRecordType))
}
b.records = append(b.records, header.IGMPv3ReportGroupAddressRecordSerializer{
RecordType: recordType,
GroupAddress: groupAddress,
Sources: nil,
})
}
// Send implements ip.MulticastGroupProtocolV2ReportBuilder.
//
// +checklocksread:b.igmp.ep.mu
func (b *igmpv3ReportBuilder) Send() (sent bool, err tcpip.Error) {
if len(b.records) == 0 {
return false, err
}
options := header.IPv4OptionsSerializer{
&header.IPv4SerializableRouterAlertOption{},
}
mtu := int(b.igmp.ep.MTU()) - int(options.Length())
allSentWithSpecifiedAddress := true
var firstErr tcpip.Error
for records := b.records; len(records) != 0; {
spaceLeft := mtu
maxRecords := 0
for ; maxRecords < len(records); maxRecords++ {
tmp := spaceLeft - records[maxRecords].Length()
if tmp > 0 {
spaceLeft = tmp
} else {
break
}
}
serializer := header.IGMPv3ReportSerializer{Records: records[:maxRecords]}
records = records[maxRecords:]
icmpView := buffer.NewViewSize(serializer.Length())
serializer.SerializeInto(icmpView.AsSlice())
if sentWithSpecifiedAddress, err := b.igmp.writePacketInner(
icmpView,
b.igmp.ep.stats.igmp.packetsSent.v3MembershipReport,
options,
header.IGMPv3RoutersAddress,
); err != nil {
if firstErr != nil {
firstErr = nil
}
allSentWithSpecifiedAddress = false
} else if !sentWithSpecifiedAddress {
allSentWithSpecifiedAddress = false
}
}
return allSentWithSpecifiedAddress, firstErr
}
// NewReportV2Builder implements ip.MulticastGroupProtocol.
func (igmp *igmpState) NewReportV2Builder() ip.MulticastGroupProtocolV2ReportBuilder {
return &igmpv3ReportBuilder{igmp: igmp}
}
// V2QueryMaxRespCodeToV2Delay implements ip.MulticastGroupProtocol.
func (*igmpState) V2QueryMaxRespCodeToV2Delay(code uint16) time.Duration {
if code > math.MaxUint8 {
panic(fmt.Sprintf("got IGMPv3 MaxRespCode = %d, want <= %d", code, math.MaxUint8))
}
return header.IGMPv3MaximumResponseDelay(uint8(code))
}
// V2QueryMaxRespCodeToV1Delay implements ip.MulticastGroupProtocol.
func (*igmpState) V2QueryMaxRespCodeToV1Delay(code uint16) time.Duration {
return time.Duration(code) * time.Millisecond
}
// init sets up an igmpState struct, and is required to be called before using
// a new igmpState.
//
// Must only be called once for the lifetime of igmp.
func (igmp *igmpState) init(ep *endpoint) {
igmp.ep = ep
igmp.genericMulticastProtocol.Init(&ep.mu, ip.GenericMulticastProtocolOptions{
Rand: ep.protocol.stack.Rand(),
Clock: ep.protocol.stack.Clock(),
Protocol: igmp,
MaxUnsolicitedReportDelay: UnsolicitedReportIntervalMax,
})
// As per RFC 2236 Page 9 says "No IGMPv1 Router Present ... is
// the initial state.
igmp.mode = protocolModeV2OrV3
igmp.igmpV1Job = tcpip.NewJob(ep.protocol.stack.Clock(), &ep.mu, func() {
igmp.mode = protocolModeV2OrV3
})
}
// +checklocks:igmp.ep.mu
func (igmp *igmpState) isSourceIPValidLocked(src tcpip.Address, messageType header.IGMPType) bool {
if messageType == header.IGMPMembershipQuery {
// RFC 2236 does not require the IGMP implementation to check the source IP
// for Membership Query messages.
return true
}
// As per RFC 2236 section 10,
//
// Ignore the Report if you cannot identify the source address of the
// packet as belonging to a subnet assigned to the interface on which the
// packet was received.
//
// Ignore the Leave message if you cannot identify the source address of
// the packet as belonging to a subnet assigned to the interface on which
// the packet was received.
//
// Note: this rule applies to both V1 and V2 Membership Reports.
var isSourceIPValid bool
igmp.ep.addressableEndpointState.ForEachPrimaryEndpoint(func(addressEndpoint stack.AddressEndpoint) bool {
if subnet := addressEndpoint.Subnet(); subnet.Contains(src) {
isSourceIPValid = true
return false
}
return true
})
return isSourceIPValid
}
// +checklocks:igmp.ep.mu
func (igmp *igmpState) isPacketValidLocked(pkt stack.PacketBufferPtr, messageType header.IGMPType, hasRouterAlertOption bool) bool {
// We can safely assume that the IP header is valid if we got this far.
iph := header.IPv4(pkt.NetworkHeader().Slice())
// As per RFC 2236 section 2,
//
// All IGMP messages described in this document are sent with IP TTL 1, and
// contain the IP Router Alert option [RFC 2113] in their IP header.
if !hasRouterAlertOption || iph.TTL() != header.IGMPTTL {
return false
}
return igmp.isSourceIPValidLocked(iph.SourceAddress(), messageType)
}
// handleIGMP handles an IGMP packet.
//
// +checklocks:igmp.ep.mu
func (igmp *igmpState) handleIGMP(pkt stack.PacketBufferPtr, hasRouterAlertOption bool) {
received := igmp.ep.stats.igmp.packetsReceived
hdr, ok := pkt.Data().PullUp(pkt.Data().Size())
if !ok {
received.invalid.Increment()
return
}
h := header.IGMP(hdr)
if len(h) < header.IGMPMinimumSize {
received.invalid.Increment()
return
}
// As per RFC 1071 section 1.3,
//
// To check a checksum, the 1's complement sum is computed over the
// same set of octets, including the checksum field. If the result
// is all 1 bits (-0 in 1's complement arithmetic), the check
// succeeds.
if pkt.Data().Checksum() != 0xFFFF {
received.checksumErrors.Increment()
return
}
isValid := func(minimumSize int) bool {
return len(hdr) >= minimumSize && igmp.isPacketValidLocked(pkt, h.Type(), hasRouterAlertOption)
}
switch h.Type() {
case header.IGMPMembershipQuery:
received.membershipQuery.Increment()
if len(h) >= header.IGMPv3QueryMinimumSize {
if isValid(header.IGMPv3QueryMinimumSize) {
igmp.handleMembershipQueryV3(header.IGMPv3Query(h))
} else {
received.invalid.Increment()
}
return
} else if !isValid(header.IGMPQueryMinimumSize) {
received.invalid.Increment()
return
}
igmp.handleMembershipQuery(h.GroupAddress(), h.MaxRespTime())
case header.IGMPv1MembershipReport:
received.v1MembershipReport.Increment()
if !isValid(header.IGMPReportMinimumSize) {
received.invalid.Increment()
return
}
igmp.handleMembershipReport(h.GroupAddress())
case header.IGMPv2MembershipReport:
received.v2MembershipReport.Increment()
if !isValid(header.IGMPReportMinimumSize) {
received.invalid.Increment()
return
}
igmp.handleMembershipReport(h.GroupAddress())
case header.IGMPLeaveGroup:
received.leaveGroup.Increment()
if !isValid(header.IGMPLeaveMessageMinimumSize) {
received.invalid.Increment()
return
}
// As per RFC 2236 Section 6, Page 7: "IGMP messages other than Query or
// Report, are ignored in all states"
default:
// As per RFC 2236 Section 2.1 Page 3: "Unrecognized message types should
// be silently ignored. New message types may be used by newer versions of
// IGMP, by multicast routing protocols, or other uses."
received.unrecognized.Increment()
}
}
func (igmp *igmpState) resetV1Present() {
igmp.igmpV1Job.Cancel()
switch igmp.mode {
case protocolModeV2OrV3, protocolModeV1:
case protocolModeV1Compatibility:
igmp.mode = protocolModeV2OrV3
default:
panic(fmt.Sprintf("unrecognized mode = %d", igmp.mode))
}
}
// handleMembershipQuery handles a membership query.
//
// +checklocks:igmp.ep.mu
func (igmp *igmpState) handleMembershipQuery(groupAddress tcpip.Address, maxRespTime time.Duration) {
// As per RFC 2236 Section 6, Page 10: If the maximum response time is zero
// then change the state to note that an IGMPv1 router is present and
// schedule the query received Job.
if maxRespTime == 0 && igmp.Enabled() {
switch igmp.mode {
case protocolModeV2OrV3, protocolModeV1Compatibility:
igmp.igmpV1Job.Cancel()
igmp.igmpV1Job.Schedule(v1RouterPresentTimeout)
igmp.mode = protocolModeV1Compatibility
case protocolModeV1:
default:
panic(fmt.Sprintf("unrecognized mode = %d", igmp.mode))
}
maxRespTime = v1MaxRespTime
}
igmp.genericMulticastProtocol.HandleQueryLocked(groupAddress, maxRespTime)
}
// handleMembershipQueryV3 handles a membership query.
//
// +checklocks:igmp.ep.mu
func (igmp *igmpState) handleMembershipQueryV3(igmpHdr header.IGMPv3Query) {
sources, ok := igmpHdr.Sources()
if !ok {
return
}
igmp.genericMulticastProtocol.HandleQueryV2Locked(
igmpHdr.GroupAddress(),
uint16(igmpHdr.MaximumResponseCode()),
sources,
igmpHdr.QuerierRobustnessVariable(),
igmpHdr.QuerierQueryInterval(),
)
}
// handleMembershipReport handles a membership report.
//
// +checklocks:igmp.ep.mu
func (igmp *igmpState) handleMembershipReport(groupAddress tcpip.Address) {
igmp.genericMulticastProtocol.HandleReportLocked(groupAddress)
}
// writePacket assembles and sends an IGMP packet.
//
// +checklocksread:igmp.ep.mu
func (igmp *igmpState) writePacket(destAddress tcpip.Address, groupAddress tcpip.Address, igmpType header.IGMPType) (bool, tcpip.Error) {
igmpView := buffer.NewViewSize(header.IGMPReportMinimumSize)
igmpData := header.IGMP(igmpView.AsSlice())
igmpData.SetType(igmpType)
igmpData.SetGroupAddress(groupAddress)
igmpData.SetChecksum(header.IGMPCalculateChecksum(igmpData))
var reportType tcpip.MultiCounterStat
sentStats := igmp.ep.stats.igmp.packetsSent
switch igmpType {
case header.IGMPv1MembershipReport:
reportType = sentStats.v1MembershipReport
case header.IGMPv2MembershipReport:
reportType = sentStats.v2MembershipReport
case header.IGMPLeaveGroup:
reportType = sentStats.leaveGroup
default:
panic(fmt.Sprintf("unrecognized igmp type = %d", igmpType))
}
return igmp.writePacketInner(
igmpView,
reportType,
header.IPv4OptionsSerializer{
&header.IPv4SerializableRouterAlertOption{},
},
destAddress,
)
}
// +checklocksread:igmp.ep.mu
func (igmp *igmpState) writePacketInner(buf *buffer.View, reportStat tcpip.MultiCounterStat, options header.IPv4OptionsSerializer, destAddress tcpip.Address) (bool, tcpip.Error) {
pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
ReserveHeaderBytes: int(igmp.ep.MaxHeaderLength()),
Payload: buffer.MakeWithView(buf),
})
defer pkt.DecRef()
addressEndpoint := igmp.ep.acquireOutgoingPrimaryAddressRLocked(destAddress, false /* allowExpired */)
if addressEndpoint == nil {
return false, nil
}
localAddr := addressEndpoint.AddressWithPrefix().Address
addressEndpoint.DecRef()
addressEndpoint = nil
if err := igmp.ep.addIPHeader(localAddr, destAddress, pkt, stack.NetworkHeaderParams{
Protocol: header.IGMPProtocolNumber,
TTL: header.IGMPTTL,
TOS: stack.DefaultTOS,
}, options); err != nil {
panic(fmt.Sprintf("failed to add IP header: %s", err))
}
sentStats := igmp.ep.stats.igmp.packetsSent
if err := igmp.ep.nic.WritePacketToRemote(header.EthernetAddressFromMulticastIPv4Address(destAddress), pkt); err != nil {
sentStats.dropped.Increment()
return false, err
}
reportStat.Increment()
return true, nil
}
// joinGroup handles adding a new group to the membership map, setting up the
// IGMP state for the group, and sending and scheduling the required
// messages.
//
// If the group already exists in the membership map, returns
// *tcpip.ErrDuplicateAddress.
//
// +checklocks:igmp.ep.mu
func (igmp *igmpState) joinGroup(groupAddress tcpip.Address) {
igmp.genericMulticastProtocol.JoinGroupLocked(groupAddress)
}
// isInGroup returns true if the specified group has been joined locally.
//
// +checklocksread:igmp.ep.mu
func (igmp *igmpState) isInGroup(groupAddress tcpip.Address) bool {
return igmp.genericMulticastProtocol.IsLocallyJoinedRLocked(groupAddress)
}
// leaveGroup handles removing the group from the membership map, cancels any
// delay timers associated with that group, and sends the Leave Group message
// if required.
//
// +checklocks:igmp.ep.mu
func (igmp *igmpState) leaveGroup(groupAddress tcpip.Address) tcpip.Error {
// LeaveGroup returns false only if the group was not joined.
if igmp.genericMulticastProtocol.LeaveGroupLocked(groupAddress) {
return nil
}
return &tcpip.ErrBadLocalAddress{}
}
// softLeaveAll leaves all groups from the perspective of IGMP, but remains
// joined locally.
//
// +checklocks:igmp.ep.mu
func (igmp *igmpState) softLeaveAll() {
igmp.genericMulticastProtocol.MakeAllNonMemberLocked()
}
// initializeAll attemps to initialize the IGMP state for each group that has
// been joined locally.
//
// +checklocks:igmp.ep.mu
func (igmp *igmpState) initializeAll() {
igmp.genericMulticastProtocol.InitializeGroupsLocked()
}
// sendQueuedReports attempts to send any reports that are queued for sending.
//
// +checklocks:igmp.ep.mu
func (igmp *igmpState) sendQueuedReports() {
igmp.genericMulticastProtocol.SendQueuedReportsLocked()
}
// setVersion sets the IGMP version.
//
// +checklocks:igmp.ep.mu
func (igmp *igmpState) setVersion(v IGMPVersion) IGMPVersion {
prev := igmp.mode
igmp.igmpV1Job.Cancel()
var prevGenericModeV1 bool
switch v {
case IGMPVersion3:
prevGenericModeV1 = igmp.genericMulticastProtocol.SetV1ModeLocked(false)
igmp.mode = protocolModeV2OrV3
case IGMPVersion2:
// IGMPv1 and IGMPv2 map to V1 of the generic multicast protocol.
prevGenericModeV1 = igmp.genericMulticastProtocol.SetV1ModeLocked(true)
igmp.mode = protocolModeV2OrV3
case IGMPVersion1:
// IGMPv1 and IGMPv2 map to V1 of the generic multicast protocol.
prevGenericModeV1 = igmp.genericMulticastProtocol.SetV1ModeLocked(true)
igmp.mode = protocolModeV1
default:
panic(fmt.Sprintf("unrecognized version = %d", v))
}
return toIGMPVersion(prev, prevGenericModeV1)
}
func toIGMPVersion(mode protocolMode, genericV1 bool) IGMPVersion {
switch mode {
case protocolModeV2OrV3, protocolModeV1Compatibility:
if genericV1 {
return IGMPVersion2
}
return IGMPVersion3
case protocolModeV1:
return IGMPVersion1
default:
panic(fmt.Sprintf("unrecognized mode = %d", mode))
}
}
// getVersion returns the IGMP version.
//
// +checklocksread:igmp.ep.mu
func (igmp *igmpState) getVersion() IGMPVersion {
return toIGMPVersion(igmp.mode, igmp.genericMulticastProtocol.GetV1ModeLocked())
}
|
package invoice
import "github.com/mpdroog/invoiced/db"
type InvoiceMail struct {
From string
Subject string
To string
Body string
}
type InvoiceEntity struct {
Name string `validate:"nonzero"`
Street1 string `validate:"nonzero"`
Street2 string `validate:"nonzero"`
}
type InvoiceCustomer struct {
Name string `validate:"nonzero"`
Street1 string `validate:"nonzero"`
Street2 string `validate:"nonzero"`
Vat string
Coc string
}
type InvoiceMeta struct {
Conceptid string `validate:"slug"`
Status string `validate:"slug"`
Invoiceid string `validate:"slug"`
Issuedate string `validate:"date"`
Ponumber string `validate:"slug"`
Duedate string `validate:"nonzero,date"`
Paydate string `validate:"date"`
Freefield string
HourFile string
}
type InvoiceLine struct {
Description string `validate:"nonzero"`
Quantity string `validate:"nonzero,qty"`
Price string `validate:"nonzero,price"`
Total string `validate:"nonzero,price"`
}
type InvoiceTotal struct {
Ex string `validate:"nonzero,price"`
Tax string `validate:"nonzero,price"`
Total string `validate:"nonzero,price"`
}
type InvoiceBank struct {
Vat string `validate:"nonzero"`
Coc string `validate:"nonzero"`
Iban string `validate:"nonzero,iban"`
Bic string `validate:"nonzero,bic"`
}
type Invoice struct {
Company string `validate:"nonzero"`
Entity InvoiceEntity
Customer InvoiceCustomer
Meta InvoiceMeta
Lines []InvoiceLine
Notes string
Total InvoiceTotal
Bank InvoiceBank
Mail InvoiceMail
}
type ListReply struct {
Invoices map[string][]*Invoice
Commits []*db.CommitMessage
}
|
package main
import (
"gopkg.in/alecthomas/kingpin.v2"
"os"
)
func main() {
app := kingpin.New("cloudkey", "Encrypt and decrypt files with key in the cloud.")
app.Version("0.0.1")
config := app.Command("config", "Create configuration file").Alias("c")
configGcp := config.Command("gcp", "Create configuration file for GCP").Alias("g")
configGcpGcloudAccount(configGcp)
configGcpServiceAccount(configGcp)
configAws := config.Command("aws", "Create configuration file for AWS").Alias("a")
configAwsStaticCreds(configAws)
configAwsSharedCreds(configAws)
encrypt := app.Command("encrypt", "Encrypt file").Alias("en")
encryptGcp(encrypt)
encryptAws(encrypt)
decrypt := app.Command("decrypt", "Decrypt file").Alias("de")
decryptGcp(decrypt)
decryptAws(decrypt)
reencrypt := app.Command("re-encrypt", "Re-encrypt file, if encrypted file exists").Alias("ren")
reEncryptGcp(reencrypt)
reEncryptAws(reencrypt)
redecrypt := app.Command("re-decrypt", "Re-decrypt file, if encrypted file exists").Alias("rde")
reDecryptGcp(redecrypt)
reDecryptAws(redecrypt)
kingpin.MustParse(app.Parse(os.Args[1:]))
}
func configGcpGcloudAccount(parent *kingpin.CmdClause) {
cmd := parent.Command("gcloud-account", "Create configuration file for GCP using gcloud account").Alias("g")
config := cmd.Arg("config", "config file").Required().String()
project := cmd.Flag("project", "project name").Short('p').Required().String()
location := cmd.Flag("location", "keyring location").Short('l').Required().String()
keyring := cmd.Flag("keyring", "keyring name").Short('r').Required().String()
key := cmd.Flag("key", "key name").Short('k').Required().String()
cmd.Action(func(context *kingpin.ParseContext) error {
return createGCPConfigForGcloudAccount(*config, *project, *location, *keyring, *key)
})
}
func configGcpServiceAccount(parent *kingpin.CmdClause) {
cmd := parent.Command("service-account", "Create configuration file for GCP using service account").Alias("g")
config := cmd.Arg("config", "config file").Required().String()
project := cmd.Flag("project", "project name").Short('p').Required().String()
location := cmd.Flag("location", "keyring location").Short('l').Required().String()
keyring := cmd.Flag("keyring", "keyring name").Short('r').Required().String()
key := cmd.Flag("key", "key name").Short('k').Required().String()
serviceAccountKey := cmd.Flag("service-account-key", "service account json key name").Short('s').String()
cmd.Action(func(context *kingpin.ParseContext) error {
return createGCPConfigForServiceAccount(*config, *project, *location, *keyring, *key, *serviceAccountKey)
})
}
func configAwsStaticCreds(parent *kingpin.CmdClause) {
cmd := parent.Command("static-creds", "Create configuration file for AWS using static credentials").Alias("st")
config := cmd.Arg("config", "config file").Required().String()
accessKeyID := cmd.Flag("access-key-id", "access key id").Short('a').Required().String()
secretAccessKey := cmd.Flag("secret-access-key", "secret access key").Short('s').Required().String()
accessToken := cmd.Flag("access-token", "access token").Short('t').Required().String()
region := cmd.Flag("region", "region name").Short('r').Required().String()
key := cmd.Flag("key", "crypto key id").Short('k').Required().String()
cmd.Action(func(context *kingpin.ParseContext) error {
return createAWSConfigForStaticCredentials(*config, *accessKeyID, *secretAccessKey, *accessToken, *region, *key)
})
}
func configAwsSharedCreds(parent *kingpin.CmdClause) {
cmd := parent.Command("shared-creds", "Create configuration file for AWS using shared credentials").Alias("st")
config := cmd.Arg("config", "config file").Required().String()
file := cmd.Flag("cred-file", "credential file").Short('f').Required().String()
profile := cmd.Flag("profile", "profile name").Short('p').Required().String()
region := cmd.Flag("region", "region name").Short('r').Required().String()
key := cmd.Flag("key", "crypto key id").Short('k').Required().String()
cmd.Action(func(context *kingpin.ParseContext) error {
return createAWSConfigForSharedCredentials(*config, *file, *profile, *region, *key)
})
}
func encryptGcp(parent *kingpin.CmdClause) {
cmd := parent.Command("gcp", "Encrypt file using GCP").Alias("g")
config := cmd.Flag("config", "config file").Short('c').Required().String()
extension := cmd.Flag("extension", "encrypted files extension name").Short('e').Default(".crypted").String()
file := cmd.Arg("file", "target file").Required().String()
cmd.Action(func(context *kingpin.ParseContext) error {
ckey, err := NewCloudKey(*config)
if err != nil {
return err
}
if err := ckey.EncryptGCP(*file, *extension); err != nil {
return err
}
return nil
})
}
func encryptAws(parent *kingpin.CmdClause) {
cmd := parent.Command("aws", "Encrypt file using AWS").Alias("a")
config := cmd.Flag("config", "config file").Short('c').Required().String()
extension := cmd.Flag("extension", "encrypted files extension name").Short('e').Default(".crypted").String()
file := cmd.Arg("file", "target file").Required().String()
cmd.Action(func(context *kingpin.ParseContext) error {
ckey, err := NewCloudKey(*config)
if err != nil {
return err
}
if err := ckey.EncryptAWS(*file, *extension); err != nil {
return err
}
return nil
})
}
func decryptGcp(parent *kingpin.CmdClause) {
cmd := parent.Command("gcp", "Decrypt file using GCP").Alias("g")
config := cmd.Flag("config", "config file").Short('c').Required().String()
extension := cmd.Flag("extension", "encrypted files extension name").Short('e').Default(".crypted").String()
file := cmd.Arg("file", "target file").Required().String()
cmd.Action(func(context *kingpin.ParseContext) error {
ckey, err := NewCloudKey(*config)
if err != nil {
return err
}
if err := ckey.DecryptGCP(*file, *extension); err != nil {
return err
}
return nil
})
}
func decryptAws(parent *kingpin.CmdClause) {
cmd := parent.Command("aws", "Decrypt file using AWS").Alias("a")
config := cmd.Flag("config", "config file").Short('c').Required().String()
extension := cmd.Flag("extension", "encrypted files extension name").Short('e').Default(".crypted").String()
file := cmd.Arg("file", "target file").Required().String()
cmd.Action(func(context *kingpin.ParseContext) error {
ckey, err := NewCloudKey(*config)
if err != nil {
return err
}
if err := ckey.DecryptAWS(*file, *extension); err != nil {
return err
}
return nil
})
}
func reEncryptGcp(parent *kingpin.CmdClause) {
cmd := parent.Command("gcp", "Encrypt files recursively, if encrypted file is exists using GCP KMS").Alias("g")
config := cmd.Flag("config", "config file").Short('c').Required().String()
extension := cmd.Flag("extension", "encrypted files extension name").Short('e').Default(".crypted").String()
dir := cmd.Arg("dir", "target dir").Required().String()
cmd.Action(func(context *kingpin.ParseContext) error {
ckey, err := NewCloudKey(*config)
if err != nil {
return err
}
if err := ckey.ReEncryptGCP(*dir, *extension); err != nil {
return err
}
return nil
})
}
func reEncryptAws(parent *kingpin.CmdClause) {
cmd := parent.Command("aws", "Encrypt files recursively, if encrypted file is exists using AWS KMS").Alias("a")
config := cmd.Flag("config", "config file").Short('c').Required().String()
extension := cmd.Flag("extension", "encrypted files extension name").Short('e').Default(".crypted").String()
dir := cmd.Arg("dir", "target dir").Required().String()
cmd.Action(func(context *kingpin.ParseContext) error {
ckey, err := NewCloudKey(*config)
if err != nil {
return err
}
if err := ckey.ReEncryptAWS(*dir, *extension); err != nil {
return err
}
return nil
})
}
func reDecryptGcp(parent *kingpin.CmdClause) {
cmd := parent.Command("gcp", "Decrypt files recursively, if encrypted file is exists using GCP KMS").Alias("g")
config := cmd.Flag("config", "config file").Short('c').Required().String()
extension := cmd.Flag("extension", "encrypted files extension name").Short('e').Default(".crypted").String()
dir := cmd.Arg("dir", "target dir").Required().String()
cmd.Action(func(context *kingpin.ParseContext) error {
ckey, err := NewCloudKey(*config)
if err != nil {
return err
}
if err := ckey.ReDecryptGCP(*dir, *extension); err != nil {
return err
}
return nil
})
}
func reDecryptAws(parent *kingpin.CmdClause) {
cmd := parent.Command("aws", "Decrypt files recursively, if encrypted file is exists using AWS KMS").Alias("a")
config := cmd.Flag("config", "config file").Short('c').Required().String()
extension := cmd.Flag("extension", "encrypted files extension name").Short('e').Default(".crypted").String()
dir := cmd.Arg("dir", "target dir").Required().String()
cmd.Action(func(context *kingpin.ParseContext) error {
ckey, err := NewCloudKey(*config)
if err != nil {
return err
}
if err := ckey.ReDecryptAWS(*dir, *extension); err != nil {
return err
}
return nil
})
}
|
// Copyright © 2020 Weald Technology Trading
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mock
import (
"bytes"
"errors"
"github.com/google/uuid"
e2types "github.com/wealdtech/go-eth2-types/v2"
)
type Account struct {
id uuid.UUID
name string
privateKey *e2types.BLSPrivateKey
unlocked bool
passphrase []byte
}
// NewAccount creates a new account.
func NewAccount(name string, passphrase []byte) *Account {
uuid, err := uuid.NewRandom()
if err != nil {
panic(err)
}
privateKey, err := e2types.GenerateBLSPrivateKey()
if err != nil {
panic(err)
}
return &Account{
id: uuid,
privateKey: privateKey,
name: name,
passphrase: passphrase,
}
}
// ID provides the ID for the account.
func (a *Account) ID() uuid.UUID {
return a.id
}
// Name provides the name for the account.
func (a *Account) Name() string {
return a.name
}
// PublicKey provides the public key for the account.
func (a *Account) PublicKey() e2types.PublicKey {
return a.privateKey.PublicKey()
}
// Path provides the path for the account.
// Can be empty if the account is not derived from a path.
func (a *Account) Path() string {
return ""
}
// Lock locks the account. A locked account cannot sign.
func (a *Account) Lock() {
a.unlocked = false
}
// Unlock unlocks the account. An unlocked account can sign.
func (a *Account) Unlock(passphrase []byte) error {
if bytes.Equal(a.passphrase, passphrase) {
a.unlocked = true
return nil
}
return errors.New("invalid passphrase")
}
// IsUnlocked returns true if the account is unlocked.
func (a *Account) IsUnlocked() bool {
return a.unlocked
}
// Sign signs data with the account.
func (a *Account) Sign(data []byte) (e2types.Signature, error) {
if !a.IsUnlocked() {
return nil, errors.New("account is locked")
}
return a.Sign(data)
}
|
package models
import (
"github.com/fatih/structs"
)
// Teacher holds information for a teacher
type Teacher struct {
ID string `json:"id" structs:"id" bson:"_id" db:"id"`
Name string `json:"name" structs:"name" bson:"name" db:"name"`
Age string `json:"age" structs:"age" bson:"age" db:"age"`
Department string `json:"department" structs:"department" bson:"department" db:"department"`
Salary int `json:"salary" structs:"salary" bson:"salary" db:"salary"`
}
// Map converts structs to a map representation
func (t *Teacher) Map() map[string]interface{} {
return structs.Map(t)
}
// Names returns the field names of Teacher model
func (t *Teacher) Names() []string {
fields := structs.Fields(t)
names := make([]string, len(fields))
for i, field := range fields {
name := field.Name()
tagName := field.Tag(structs.DefaultTagName)
if tagName != "" {
name = tagName
}
names[i] = name
}
return names
}
|
// Package config contains the flags and defaults for Worker configuration.
package config
import (
"fmt"
"io"
"os"
"reflect"
"sort"
"strings"
"time"
"gopkg.in/urfave/cli.v1"
)
var (
defaultAmqpURI = "amqp://"
defaultBaseDir = "."
defaultFilePollingInterval, _ = time.ParseDuration("5s")
defaultHTTPPollingInterval, _ = time.ParseDuration("3s")
defaultHTTPRefreshClaimInterval, _ = time.ParseDuration("5s")
defaultPoolSize = 1
defaultProviderName = "docker"
defaultQueueType = "amqp"
defaultHardTimeout, _ = time.ParseDuration("50m")
defaultInitialSleep, _ = time.ParseDuration("1s")
defaultLogTimeout, _ = time.ParseDuration("10m")
defaultMaxLogLength = 4500000
defaultScriptUploadTimeout, _ = time.ParseDuration("3m30s")
defaultStartupTimeout, _ = time.ParseDuration("4m")
defaultBuildCacheFetchTimeout, _ = time.ParseDuration("5m")
defaultBuildCachePushTimeout, _ = time.ParseDuration("5m")
defaultHostname, _ = os.Hostname()
defaultLanguage = "default"
defaultDist = "xenial"
defaultArch = "amd64"
defaultGroup = "stable"
defaultOS = "linux"
configType = reflect.ValueOf(Config{}).Type()
defs = []*ConfigDef{
NewConfigDef("ProviderName", &cli.StringFlag{
Value: defaultProviderName,
Usage: "The name of the provider to use. See below for provider-specific configuration",
}),
NewConfigDef("QueueType", &cli.StringFlag{
Value: defaultQueueType,
Usage: `The name of the queue type to use ("amqp", "http", or "file")`,
}),
NewConfigDef("AmqpHeartbeat", &cli.DurationFlag{
Value: 10 * time.Second,
Usage: "The heartbeat timeout value defines after what time the peer TCP connection should be considered unreachable",
}),
NewConfigDef("AmqpConsumerPriority", &cli.IntFlag{
Value: 0,
Usage: "The consumer priority to set when consuming jobs",
}),
NewConfigDef("AmqpURI", &cli.StringFlag{
Value: defaultAmqpURI,
Usage: `The URI to the AMQP server to connect to (only valid for "amqp" queue type)`,
}),
NewConfigDef("LogsAmqpURI", &cli.StringFlag{
Usage: `The URI to the Logs AMQP server to connect to (only valid for "amqp" queue type)`,
}),
NewConfigDef("AmqpInsecure", &cli.BoolFlag{
Usage: `Whether to connect to the AMQP server without verifying TLS certificates (only valid for "amqp" queue type)`,
}),
NewConfigDef("AmqpTlsCert", &cli.StringFlag{
Usage: `The TLS certificate used to connet to the jobs AMQP server`,
}),
NewConfigDef("AmqpTlsCertPath", &cli.StringFlag{
Usage: `Path to the TLS certificate used to connet to the jobs AMQP server`,
}),
NewConfigDef("LogsAmqpTlsCert", &cli.StringFlag{
Usage: `The TLS certificate used to connet to the logs AMQP server`,
}),
NewConfigDef("LogsAmqpTlsCertPath", &cli.StringFlag{
Usage: `Path to the TLS certificate used to connet to the logs AMQP server`,
}),
NewConfigDef("BaseDir", &cli.StringFlag{
Value: defaultBaseDir,
Usage: `The base directory for file-based queues (only valid for "file" queue type)`,
}),
NewConfigDef("FilePollingInterval", &cli.DurationFlag{
Value: defaultFilePollingInterval,
Usage: `The interval at which file-based queues are checked (only valid for "file" queue type)`,
}),
NewConfigDef("PoolSize", &cli.IntFlag{
Value: defaultPoolSize,
Usage: "The size of the processor pool, affecting the number of jobs this worker can run in parallel",
}),
NewConfigDef("BuildAPIURI", &cli.StringFlag{
Usage: "The full URL to the build API endpoint to use. Note that this also requires the path of the URL. If a username is included in the URL, this will be translated to a token passed in the Authorization header",
}),
NewConfigDef("QueueName", &cli.StringFlag{
Usage: "The AMQP queue to subscribe to for jobs",
}),
NewConfigDef("HTTPPollingInterval", &cli.DurationFlag{
Value: defaultHTTPPollingInterval,
Usage: `Sleep interval between new job requests (only valid for "http" queue type)`,
}),
NewConfigDef("HTTPRefreshClaimInterval", &cli.DurationFlag{
Value: defaultHTTPRefreshClaimInterval,
Usage: `Sleep interval between job claim refresh requests (only valid for "http" queue type)`,
}),
NewConfigDef("LibratoEmail", &cli.StringFlag{
Usage: "Librato metrics account email",
}),
NewConfigDef("LibratoToken", &cli.StringFlag{
Usage: "Librato metrics account token",
}),
NewConfigDef("LibratoSource", &cli.StringFlag{
Value: defaultHostname,
Usage: "Librato metrics source name",
}),
NewConfigDef("SentryDSN", &cli.StringFlag{
Usage: "The DSN to send Sentry events to",
}),
NewConfigDef("SentryHookErrors", &cli.BoolFlag{
Usage: "Add logrus.ErrorLevel to logrus sentry hook",
}),
NewConfigDef("Hostname", &cli.StringFlag{
Value: defaultHostname,
Usage: "Host name used in log output to identify the source of a job",
}),
NewConfigDef("DefaultLanguage", &cli.StringFlag{
Value: defaultLanguage,
Usage: "Default \"language\" value for each job",
}),
NewConfigDef("DefaultDist", &cli.StringFlag{
Value: defaultDist,
Usage: "Default \"dist\" value for each job",
}),
NewConfigDef("DefaultArch", &cli.StringFlag{
Value: defaultArch,
Usage: "Default \"arch\" value for each job",
}),
NewConfigDef("DefaultGroup", &cli.StringFlag{
Value: defaultGroup,
Usage: "Default \"group\" value for each job",
}),
NewConfigDef("DefaultOS", &cli.StringFlag{
Value: defaultOS,
Usage: "Default \"os\" value for each job",
}),
NewConfigDef("HardTimeout", &cli.DurationFlag{
Value: defaultHardTimeout,
Usage: "The outermost (maximum) timeout for a given job, at which time the job is cancelled",
}),
NewConfigDef("InitialSleep", &cli.DurationFlag{
Value: defaultInitialSleep,
Usage: "The time to sleep prior to opening log and starting job",
}),
NewConfigDef("LogTimeout", &cli.DurationFlag{
Value: defaultLogTimeout,
Usage: "The timeout for a job that's not outputting anything",
}),
NewConfigDef("ScriptUploadTimeout", &cli.DurationFlag{
Value: defaultScriptUploadTimeout,
Usage: "The timeout for the script upload step",
}),
NewConfigDef("StartupTimeout", &cli.DurationFlag{
Value: defaultStartupTimeout,
Usage: "The timeout for execution environment to be ready",
}),
NewConfigDef("MaxLogLength", &cli.IntFlag{
Value: defaultMaxLogLength,
Usage: "The maximum length of a log in bytes",
}),
NewConfigDef("JobBoardURL", &cli.StringFlag{
Usage: "The base URL for job-board used with http queue",
}),
NewConfigDef("TravisSite", &cli.StringFlag{
Usage: "Either 'org' or 'com', used for job-board",
}),
NewConfigDef("StateUpdatePoolSize", &cli.IntFlag{
Usage: "The pool size for state update workers",
Value: 3,
}),
NewConfigDef("LogPoolSize", &cli.IntFlag{
Usage: "The pool size for log workers",
Value: 3,
}),
NewConfigDef("RabbitMQSharding", &cli.BoolFlag{
Usage: "Enable sharding for the logs AMQP queue",
}),
// build script generator flags
NewConfigDef("BuildCacheFetchTimeout", &cli.DurationFlag{
Value: defaultBuildCacheFetchTimeout,
}),
NewConfigDef("BuildCachePushTimeout", &cli.DurationFlag{
Value: defaultBuildCachePushTimeout,
}),
NewConfigDef("BuildAptCache", &cli.StringFlag{}),
NewConfigDef("BuildNpmCache", &cli.StringFlag{}),
NewConfigDef("BuildParanoid", &cli.BoolFlag{}),
NewConfigDef("BuildFixResolvConf", &cli.BoolFlag{}),
NewConfigDef("BuildFixEtcHosts", &cli.BoolFlag{}),
NewConfigDef("BuildCacheType", &cli.StringFlag{}),
NewConfigDef("BuildCacheS3Scheme", &cli.StringFlag{}),
NewConfigDef("BuildCacheS3Region", &cli.StringFlag{}),
NewConfigDef("BuildCacheS3Bucket", &cli.StringFlag{}),
NewConfigDef("BuildCacheS3AccessKeyID", &cli.StringFlag{}),
NewConfigDef("BuildCacheS3SecretAccessKey", &cli.StringFlag{}),
NewConfigDef("BuildTraceEnabled", &cli.BoolFlag{
Usage: "Enable downloading build traces",
}),
NewConfigDef("BuildTraceS3Bucket", &cli.StringFlag{}),
NewConfigDef("BuildTraceS3KeyPrefix", &cli.StringFlag{}),
NewConfigDef("BuildTraceS3Region", &cli.StringFlag{}),
// non-config and special case flags
NewConfigDef("PayloadFilterExecutable", &cli.StringFlag{
Usage: "External executable which will be called to filter the json to be sent to the build script generator",
}),
NewConfigDef("SkipShutdownOnLogTimeout", &cli.BoolFlag{
Usage: "Special-case mode to aid with debugging timed out jobs",
}),
NewConfigDef("BuildAPIInsecureSkipVerify", &cli.BoolFlag{
Usage: "Skip build API TLS verification (useful for Enterprise and testing)",
}),
NewConfigDef("ProgressType", &cli.StringFlag{
Usage: "Report progress for supported backends (valid values \"text\" or unset)",
}),
NewConfigDef("remote-controller-addr", &cli.StringFlag{
Usage: "enable remote controller http api (and pprof) at address",
}),
NewConfigDef("remote-controller-auth", &cli.StringFlag{
Usage: "username:password for http api basic auth for remote controller",
}),
NewConfigDef("silence-metrics", &cli.BoolFlag{
Usage: "deprecated flag",
}),
NewConfigDef("log-metrics", &cli.BoolFlag{
Usage: "periodically print metrics to the stdout",
}),
NewConfigDef("echo-config", &cli.BoolFlag{
Usage: "echo parsed config and exit",
}),
NewConfigDef("list-backend-providers", &cli.BoolFlag{
Usage: "echo backend provider list and exit",
}),
NewConfigDef("update-images", &cli.BoolFlag{
Usage: "update images for LXD backend from job-board",
}),
NewConfigDef("debug", &cli.BoolFlag{
Usage: "set log level to debug",
}),
NewConfigDef("start-hook", &cli.StringFlag{
Usage: "executable to run just before starting",
}),
NewConfigDef("stop-hook", &cli.StringFlag{
Usage: "executable to run just before exiting",
}),
NewConfigDef("heartbeat-url", &cli.StringFlag{
Usage: "health check and/or supervisor check URL (expects response: {\"state\": \"(up|down)\"})",
}),
NewConfigDef("heartbeat-url-auth-token", &cli.StringFlag{
Usage: "auth token for health check and/or supervisor check URL (may be \"file://path/to/file\")",
}),
NewConfigDef("Infra", &cli.StringFlag{
Usage: "infra tag, e.g. gce or ec2",
}),
NewConfigDef("StackdriverTraceAccountJSON", &cli.StringFlag{
Usage: "file path or JSON to stackdriver trace on Google Cloud",
}),
NewConfigDef("StackdriverProjectID", &cli.StringFlag{
Usage: "google cloud project ID where where traces are exported and viewed",
}),
NewConfigDef("OpencensusTracingEnabled", &cli.BoolFlag{
Usage: "enable tracing for worker with google stackdriver client",
}),
NewConfigDef("OpencensusSamplingRate", &cli.IntFlag{
Usage: "sample rate for trace as an inverse fraction - for sample rate n, every nth event will be sampled",
Value: 1,
}),
}
// Flags is the list of all CLI flags accepted by travis-worker
Flags = defFlags(defs)
)
func twEnvVars(key string) string {
return strings.ToUpper(strings.Join(twEnvVarsSlice(key), ","))
}
func twEnvVarsSlice(key string) []string {
return []string{
fmt.Sprintf("TRAVIS_WORKER_%s", key),
key,
}
}
func init() {
wd, err := os.Getwd()
if err != nil {
return
}
defaultBaseDir = wd
}
func defFlags(defs []*ConfigDef) []cli.Flag {
f := []cli.Flag{}
for _, def := range defs {
f = append(f, def.Flag)
}
return f
}
type ConfigDef struct {
FieldName string
Name string
EnvVar string
Flag cli.Flag
HasField bool
}
func NewConfigDef(fieldName string, flag cli.Flag) *ConfigDef {
if fieldName == "" {
panic("empty field name")
}
var name string
if string(fieldName[0]) == strings.ToLower(string(fieldName[0])) {
name = fieldName
} else {
field, _ := configType.FieldByName(fieldName)
name = field.Tag.Get("config")
}
env := strings.ToUpper(strings.Replace(name, "-", "_", -1))
def := &ConfigDef{
FieldName: fieldName,
Name: name,
EnvVar: env,
HasField: fieldName != name,
}
envPrefixed := twEnvVars(env)
if f, ok := flag.(*cli.BoolFlag); ok {
def.Flag, f.Name, f.EnvVar = f, name, envPrefixed
return def
} else if f, ok := flag.(*cli.StringFlag); ok {
def.Flag, f.Name, f.EnvVar = f, name, envPrefixed
return def
} else if f, ok := flag.(*cli.IntFlag); ok {
def.Flag, f.Name, f.EnvVar = f, name, envPrefixed
return def
} else if f, ok := flag.(*cli.DurationFlag); ok {
def.Flag, f.Name, f.EnvVar = f, name, envPrefixed
return def
} else {
return def
}
}
// Config contains all the configuration needed to run the worker.
type Config struct {
ProviderName string `config:"provider-name"`
QueueType string `config:"queue-type"`
AmqpURI string `config:"amqp-uri"`
AmqpInsecure bool `config:"amqp-insecure"`
AmqpTlsCert string `config:"amqp-tls-cert"`
AmqpTlsCertPath string `config:"amqp-tls-cert-path"`
AmqpHeartbeat time.Duration `config:"amqp-heartbeat"`
AmqpConsumerPriority int `config:"amqp-consumer-priority"`
BaseDir string `config:"base-dir"`
PoolSize int `config:"pool-size"`
BuildAPIURI string `config:"build-api-uri"`
QueueName string `config:"queue-name"`
LibratoEmail string `config:"librato-email"`
LibratoToken string `config:"librato-token"`
LibratoSource string `config:"librato-source"`
LogsAmqpURI string `config:"logs-amqp-uri"`
LogsAmqpTlsCert string `config:"logs-amqp-tls-cert"`
LogsAmqpTlsCertPath string `config:"logs-amqp-tls-cert-path"`
SentryDSN string `config:"sentry-dsn"`
Hostname string `config:"hostname"`
DefaultLanguage string `config:"default-language"`
DefaultDist string `config:"default-dist"`
DefaultArch string `config:"default-arch"`
DefaultGroup string `config:"default-group"`
DefaultOS string `config:"default-os"`
JobBoardURL string `config:"job-board-url"`
TravisSite string `config:"travis-site"`
RabbitMQSharding bool `config:"rabbitmq-sharding"`
StateUpdatePoolSize int `config:"state-update-pool-size"`
LogPoolSize int `config:"log-pool-size"`
FilePollingInterval time.Duration `config:"file-polling-interval"`
HTTPPollingInterval time.Duration `config:"http-polling-interval"`
HTTPRefreshClaimInterval time.Duration `config:"http-refresh-claim-interval"`
HardTimeout time.Duration `config:"hard-timeout"`
InitialSleep time.Duration `config:"initial-sleep"`
LogTimeout time.Duration `config:"log-timeout"`
MaxLogLength int `config:"max-log-length"`
ScriptUploadTimeout time.Duration `config:"script-upload-timeout"`
StartupTimeout time.Duration `config:"startup-timeout"`
BuildTraceEnabled bool `config:"build-trace-enabled"`
BuildTraceS3Bucket string `config:"build-trace-s3-bucket"`
BuildTraceS3KeyPrefix string `config:"build-trace-s3-key-prefix"`
BuildTraceS3Region string `config:"build-trace-s3-region"`
SentryHookErrors bool `config:"sentry-hook-errors"`
BuildAPIInsecureSkipVerify bool `config:"build-api-insecure-skip-verify"`
SkipShutdownOnLogTimeout bool `config:"skip-shutdown-on-log-timeout"`
// build script generator options
BuildCacheFetchTimeout time.Duration `config:"build-cache-fetch-timeout"`
BuildCachePushTimeout time.Duration `config:"build-cache-push-timeout"`
BuildParanoid bool `config:"build-paranoid"`
BuildFixResolvConf bool `config:"build-fix-resolv-conf"`
BuildFixEtcHosts bool `config:"build-fix-etc-hosts"`
BuildAptCache string `config:"build-apt-cache"`
BuildNpmCache string `config:"build-npm-cache"`
BuildCacheType string `config:"build-cache-type"`
BuildCacheS3Scheme string `config:"build-cache-s3-scheme"`
BuildCacheS3Region string `config:"build-cache-s3-region"`
BuildCacheS3Bucket string `config:"build-cache-s3-bucket"`
BuildCacheS3AccessKeyID string `config:"build-cache-s3-access-key-id"`
BuildCacheS3SecretAccessKey string `config:"build-cache-s3-secret-access-key"`
PayloadFilterExecutable string `config:"payload-filter-executable"`
ProgressType string `config:"progress-type"`
Infra string `config:"infra"`
StackdriverTraceAccountJSON string `config:"stackdriver-trace-account-json"`
StackdriverProjectID string `config:"stackdriver-project-id"`
OpencensusTracingEnabled bool `config:"opencensus-tracing-enabled"`
OpencensusSamplingRate int `config:"opencensus-sampling-rate"`
ProviderConfig *ProviderConfig
}
// FromCLIContext creates a Config using a cli.Context by pulling configuration
// from the flags in the context.
func FromCLIContext(c *cli.Context) *Config {
cfg := &Config{}
cfgVal := reflect.ValueOf(cfg).Elem()
for _, def := range defs {
if !def.HasField {
continue
}
field := cfgVal.FieldByName(def.FieldName)
if _, ok := def.Flag.(*cli.BoolFlag); ok {
field.SetBool(c.Bool(def.Name))
} else if _, ok := def.Flag.(*cli.DurationFlag); ok {
field.Set(reflect.ValueOf(c.Duration(def.Name)))
} else if _, ok := def.Flag.(*cli.IntFlag); ok {
field.SetInt(int64(c.Int(def.Name)))
} else if _, ok := def.Flag.(*cli.StringFlag); ok {
field.SetString(c.String(def.Name))
}
}
cfg.ProviderConfig = ProviderConfigFromEnviron(cfg.ProviderName)
return cfg
}
// WriteEnvConfig writes the given configuration to out. The format of the
// output is a list of environment variables settings suitable to be sourced
// by a Bourne-like shell.
func WriteEnvConfig(cfg *Config, out io.Writer) {
cfgMap := map[string]interface{}{}
cfgElem := reflect.ValueOf(cfg).Elem()
for _, def := range defs {
if !def.HasField {
continue
}
field := cfgElem.FieldByName(def.FieldName)
cfgMap[def.Name] = field.Interface()
}
sortedCfgMapKeys := []string{}
for key := range cfgMap {
sortedCfgMapKeys = append(sortedCfgMapKeys, key)
}
sort.Strings(sortedCfgMapKeys)
fmt.Fprintf(out, "# travis-worker env config generated %s\n", time.Now().UTC())
for _, key := range sortedCfgMapKeys {
envKey := fmt.Sprintf("TRAVIS_WORKER_%s", strings.ToUpper(strings.Replace(key, "-", "_", -1)))
fmt.Fprintf(out, "export %s=%q\n", envKey, fmt.Sprintf("%v", cfgMap[key]))
}
fmt.Fprintf(out, "\n# travis-worker provider config:\n")
cfg.ProviderConfig.Each(func(key, value string) {
envKey := strings.ToUpper(fmt.Sprintf("TRAVIS_WORKER_%s_%s", cfg.ProviderName, strings.Replace(key, "-", "_", -1)))
fmt.Fprintf(out, "export %s=%q\n", envKey, value)
})
fmt.Fprintf(out, "# end travis-worker env config\n")
}
|
// Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
// smithcmp is a tool to execute random queries on a database. A TOML
// file provides configuration for which databases to connect to. If there
// is more than one, only non-mutating statements are generated, and the
// output is compared, exiting if there is a difference. If there is only
// one database, mutating and non-mutating statements are generated. A
// flag in the TOML controls whether Postgres-compatible output is generated.
//
// Explicit SQL statements can be specified (skipping sqlsmith generation)
// using the top-level SQL array. Placeholders (`$1`, etc.) are
// supported. Random datums of the correct type will be filled in.
package main
import (
"context"
"fmt"
"io/ioutil"
"log"
"math/rand"
"os"
"strings"
"time"
"github.com/BurntSushi/toml"
"github.com/cockroachdb/cockroach/pkg/cmd/cmpconn"
"github.com/cockroachdb/cockroach/pkg/internal/sqlsmith"
"github.com/cockroachdb/cockroach/pkg/sql/randgen"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/lib/pq/oid"
)
func usage() {
const use = `Usage of %s:
%[1]s config.toml
`
fmt.Printf(use, os.Args[0])
os.Exit(1)
}
type options struct {
Postgres bool
InitSQL string
Smither string
Seed int64
TimeoutMins int
StmtTimeoutSecs int
SQL []string
Databases map[string]struct {
Addr string
InitSQL string
AllowMutations bool
}
}
var sqlMutators = []randgen.Mutator{randgen.ColumnFamilyMutator}
func enableMutations(shouldEnable bool, mutations []randgen.Mutator) []randgen.Mutator {
if shouldEnable {
return mutations
}
return nil
}
func main() {
args := os.Args[1:]
if len(args) != 1 {
usage()
}
tomlData, err := ioutil.ReadFile(args[0])
if err != nil {
log.Fatal(err)
}
var opts options
if err := toml.Unmarshal(tomlData, &opts); err != nil {
log.Fatal(err)
}
timeout := time.Duration(opts.TimeoutMins) * time.Minute
if timeout <= 0 {
timeout = 15 * time.Minute
}
stmtTimeout := time.Duration(opts.StmtTimeoutSecs) * time.Second
if stmtTimeout <= 0 {
stmtTimeout = time.Minute
}
rng := rand.New(rand.NewSource(opts.Seed))
conns := map[string]cmpconn.Conn{}
for name, db := range opts.Databases {
var err error
mutators := enableMutations(opts.Databases[name].AllowMutations, sqlMutators)
if opts.Postgres {
mutators = append(mutators, randgen.PostgresMutator)
}
conns[name], err = cmpconn.NewConnWithMutators(
db.Addr, rng, mutators, db.InitSQL, opts.InitSQL)
if err != nil {
log.Fatalf("%s (%s): %+v", name, db.Addr, err)
}
}
compare := len(conns) > 1
if opts.Seed < 0 {
opts.Seed = timeutil.Now().UnixNano()
fmt.Println("seed:", opts.Seed)
}
smithOpts := []sqlsmith.SmitherOption{
sqlsmith.AvoidConsts(),
}
if opts.Postgres {
smithOpts = append(smithOpts, sqlsmith.PostgresMode())
} else if compare {
smithOpts = append(smithOpts,
sqlsmith.CompareMode(),
sqlsmith.DisableCRDBFns(),
)
}
if _, ok := conns[opts.Smither]; !ok {
log.Fatalf("Smither option not present in databases: %s", opts.Smither)
}
var smither *sqlsmith.Smither
var stmts []statement
if len(opts.SQL) == 0 {
smither, err = sqlsmith.NewSmither(conns[opts.Smither].DB(), rng, smithOpts...)
if err != nil {
log.Fatal(err)
}
} else {
stmts = make([]statement, len(opts.SQL))
for i, stmt := range opts.SQL {
ps, err := conns[opts.Smither].PGX().Prepare("", stmt)
if err != nil {
log.Fatalf("bad SQL statement on %s: %v\nSQL:\n%s", opts.Smither, stmt, err)
}
var placeholders []*types.T
for _, param := range ps.ParameterOIDs {
typ, ok := types.OidToType[oid.Oid(param)]
if !ok {
log.Fatalf("unknown oid: %v", param)
}
placeholders = append(placeholders, typ)
}
stmts[i] = statement{
stmt: stmt,
placeholders: placeholders,
}
}
}
var prep, exec string
ctx := context.Background()
done := time.After(timeout)
for i := 0; true; i++ {
select {
case <-done:
return
default:
}
fmt.Printf("stmt: %d\n", i)
if smither != nil {
exec = smither.Generate()
} else {
randStatement := stmts[rng.Intn(len(stmts))]
name := fmt.Sprintf("s%d", i)
prep = fmt.Sprintf("PREPARE %s AS\n%s;", name, randStatement.stmt)
var sb strings.Builder
fmt.Fprintf(&sb, "EXECUTE %s", name)
for i, typ := range randStatement.placeholders {
if i > 0 {
sb.WriteString(", ")
} else {
sb.WriteString(" (")
}
d := randgen.RandDatum(rng, typ, true)
fmt.Println(i, typ, d, tree.Serialize(d))
sb.WriteString(tree.Serialize(d))
}
if len(randStatement.placeholders) > 0 {
fmt.Fprintf(&sb, ")")
}
fmt.Fprintf(&sb, ";")
exec = sb.String()
fmt.Println(exec)
}
if compare {
if err := cmpconn.CompareConns(
ctx, stmtTimeout, conns, prep, exec, true, /* ignoreSQLErrors */
); err != nil {
fmt.Printf("prep:\n%s;\nexec:\n%s;\nERR: %s\n\n", prep, exec, err)
os.Exit(1)
}
} else {
for _, conn := range conns {
if err := conn.Exec(ctx, prep+exec); err != nil {
fmt.Println(err)
}
}
}
// Make sure the servers are alive.
for name, conn := range conns {
start := timeutil.Now()
fmt.Printf("pinging %s...", name)
if err := conn.Ping(); err != nil {
fmt.Printf("\n%s: ping failure: %v\nprevious SQL:\n%s;\n%s;\n", name, err, prep, exec)
// Try to reconnect.
db := opts.Databases[name]
newConn, err := cmpconn.NewConnWithMutators(
db.Addr, rng, enableMutations(db.AllowMutations, sqlMutators),
db.InitSQL, opts.InitSQL,
)
if err != nil {
log.Fatalf("tried to reconnect: %v\n", err)
}
conns[name] = newConn
}
fmt.Printf(" %s\n", timeutil.Since(start))
}
}
}
type statement struct {
stmt string
placeholders []*types.T
}
|
package keyboard
var activeMediaKeys = map[Key]bool{}
func (k Keyboard) HandleMediaKey(keys byte) {
// next
if keys&1 != 0 {
k.CurrentMode.DoKeyDown(MediaNext)
activeMediaKeys[MediaNext] = true
} else if activeMediaKeys[MediaNext] {
k.CurrentMode.DoKeyUp(MediaNext)
activeMediaKeys[MediaNext] = false
}
// back
if keys&2 != 0 {
k.CurrentMode.DoKeyDown(MediaBack)
activeMediaKeys[MediaBack] = true
} else if activeMediaKeys[MediaBack] {
k.CurrentMode.DoKeyUp(MediaBack)
activeMediaKeys[MediaBack] = false
}
// stop
if keys&4 != 0 {
k.CurrentMode.DoKeyDown(MediaStop)
activeMediaKeys[MediaStop] = true
} else if activeMediaKeys[MediaStop] {
k.CurrentMode.DoKeyUp(MediaStop)
activeMediaKeys[MediaStop] = false
}
// play/pause
if keys&8 != 0 {
k.CurrentMode.DoKeyDown(MediaPlayPause)
activeMediaKeys[MediaPlayPause] = true
} else if activeMediaKeys[MediaPlayPause] {
k.CurrentMode.DoKeyUp(MediaPlayPause)
activeMediaKeys[MediaPlayPause] = false
}
// mute
if keys&16 != 0 {
k.CurrentMode.DoKeyDown(MediaMute)
activeMediaKeys[MediaMute] = true
} else if activeMediaKeys[MediaMute] {
k.CurrentMode.DoKeyUp(MediaMute)
activeMediaKeys[MediaMute] = false
}
// vol up
if keys&32 != 0 {
k.CurrentMode.DoKeyDown(MediaVolUp)
activeMediaKeys[MediaVolUp] = true
} else if activeMediaKeys[MediaVolUp] {
k.CurrentMode.DoKeyUp(MediaVolUp)
activeMediaKeys[MediaVolUp] = false
}
// vol down
if keys&64 != 0 {
k.CurrentMode.DoKeyDown(MediaVolDown)
activeMediaKeys[MediaVolDown] = true
} else if activeMediaKeys[MediaVolDown] {
k.CurrentMode.DoKeyUp(MediaVolDown)
activeMediaKeys[MediaVolDown] = false
}
}
|
package test
const (
// Endpoint is the slack endpoint which can be used for testing calling code.
Endpoint = "https://slack.com/api/api.test"
)
|
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package macaroon_test
import (
"testing"
assert "github.com/stretchr/testify/require"
"storj.io/common/macaroon"
)
func TestNilMacaroon(t *testing.T) {
mac, err := macaroon.NewUnrestricted(nil)
assert.NoError(t, err)
assert.NotNil(t, mac)
data := mac.Serialize()
assert.NotNil(t, data)
assert.NotEmpty(t, data)
mac2, err := macaroon.ParseMacaroon(data)
assert.NoError(t, err)
assert.NotNil(t, mac2)
assert.Equal(t, mac, mac2)
t.Run("Successful add Caveat", func(t *testing.T) {
mac, err = mac.AddFirstPartyCaveat([]byte("cav1"))
assert.NotNil(t, mac)
assert.NoError(t, err)
assert.Equal(t, len(mac.Caveats()), 1)
})
t.Run("Successful serialization", func(t *testing.T) {
data := mac.Serialize()
assert.NotNil(t, data)
assert.NotEmpty(t, data)
mac2, err := macaroon.ParseMacaroon(data)
assert.NotNil(t, mac2)
assert.NoError(t, err)
assert.Equal(t, mac, mac2)
})
}
func TestMacaroon(t *testing.T) {
secret, err := macaroon.NewSecret()
assert.NoError(t, err)
assert.NotNil(t, secret)
assert.Equal(t, len(secret), 32)
mac, err := macaroon.NewUnrestricted(secret)
assert.NoError(t, err)
assert.NotNil(t, mac)
nonce := mac.Head()
assert.NotNil(t, nonce)
assert.Equal(t, len(nonce), 32)
t.Run("Successful add Caveat", func(t *testing.T) {
mac, err = mac.AddFirstPartyCaveat([]byte("cav1"))
assert.NotNil(t, mac)
assert.NoError(t, err)
assert.Equal(t, len(mac.Caveats()), 1)
})
t.Run("Successful serialization", func(t *testing.T) {
data := mac.Serialize()
assert.NotNil(t, data)
assert.NotEmpty(t, data)
mac2, err := macaroon.ParseMacaroon(data)
assert.NotNil(t, mac2)
assert.NoError(t, err)
assert.Equal(t, mac, mac2)
})
t.Run("Successful Unpack", func(t *testing.T) {
ok := mac.Validate(secret)
assert.True(t, ok)
expectedTails := mac.Tails(secret)
ok, tails := mac.ValidateAndTails(secret)
assert.True(t, ok)
assert.EqualValues(t, expectedTails, tails)
c := mac.Caveats()
assert.NotNil(t, c)
assert.NotEmpty(t, c)
})
}
|
package handler
import (
"log"
"net/http"
"strconv"
"crud-api-class/domain"
"github.com/gin-gonic/gin"
)
func Create(c *gin.Context) {
c.Request.ParseForm()
url := c.Request.Form["url"][0]
err := domain.CreateNewElement(url)
if err != nil {
log.Fatal("fail create: ", err)
}
c.Redirect(http.StatusFound, "/")
}
func Update(c *gin.Context) {
c.Request.ParseForm()
url := c.Request.Form["url"][0]
ids := c.Query("id")
id, err := strconv.ParseUint(ids, 10, 64)
if err != nil {
log.Fatal("fail convert id: ", err)
}
err = domain.UpdateElement(id, url)
if err != nil {
log.Fatal("fail update: ", err)
}
c.Redirect(http.StatusFound, "/")
}
func Delete(c *gin.Context) {
ids := c.Query("id")
id, err := strconv.ParseUint(ids, 10, 64)
if err != nil {
log.Fatal("fail convert id: ", err)
}
err = domain.DeleteElement(id)
if err != nil {
log.Fatal("fail delete: ", err)
}
c.Redirect(http.StatusFound, "/")
}
|
package hud
import (
"io"
"time"
"github.com/tilt-dev/tilt/pkg/model/logstore"
)
var backoffInit = 5 * time.Second
var backoffMultiplier = time.Duration(2)
type Stdout io.Writer
type IncrementalPrinter struct {
progress map[progressKey]progressStatus
stdout Stdout
}
func NewIncrementalPrinter(stdout Stdout) *IncrementalPrinter {
return &IncrementalPrinter{
progress: make(map[progressKey]progressStatus),
stdout: stdout,
}
}
func (p *IncrementalPrinter) PrintNewline() {
_, _ = io.WriteString(p.stdout, "\n")
}
func (p *IncrementalPrinter) Print(lines []logstore.LogLine) {
for _, line := range lines {
// Naive progress implementation: skip lines that have already been printed
// recently. This works with any output stream.
//
// TODO(nick): Use ANSI codes to overwrite previous lines. It requires
// a little extra bookkeeping about where to find the progress line,
// and only works on terminals.
progressID := line.ProgressID
key := progressKey{spanID: line.SpanID, progressID: progressID}
if progressID != "" {
status, hasBeenPrinted := p.progress[key]
shouldPrint := line.ProgressMustPrint ||
!hasBeenPrinted ||
line.Time.Sub(status.lastPrinted) > status.printWait
if !shouldPrint {
continue
}
}
_, _ = io.WriteString(p.stdout, line.Text)
if progressID != "" {
status := p.progress[key]
newWait := backoffInit
if status.printWait > 0 {
newWait = backoffMultiplier * status.printWait
}
p.progress[key] = progressStatus{
lastPrinted: line.Time,
printWait: newWait,
}
}
}
}
type progressKey struct {
spanID logstore.SpanID
progressID string
}
type progressStatus struct {
lastPrinted time.Time
printWait time.Duration
}
|
package src;
import (
"os/user"
"io/ioutil"
"github.com/BurntSushi/toml"
"github.com/pkg/errors"
"log"
)
func IdentityOrNameFromCli(arg string) string {
user, err := user.Current()
if err != nil {
return arg;
}
content, err := ioutil.ReadFile(user.HomeDir + "/.devguard/named.toml");
if err != nil {
return arg;
}
var f map[string]interface{};
if _, err := toml.Decode((string)(content), &f); err != nil {
log.Fatal(errors.Wrap(err, "cannot decode ~/.devguard/named.toml"));
}
if f[arg] == nil {
return arg;
}
if sm, ok := f[arg].(string); ok {
return sm;
}
return arg;
}
|
package gohaystack
import (
"encoding/json"
"errors"
"fmt"
"net/url"
"regexp"
"strconv"
"strings"
"time"
)
// Kind is a supported type for a haystack value
type Kind int
// Unit represents a unit is a number
type Unit *string
// NewUnit returns a new unit
func NewUnit(u string) Unit {
return &u
}
// NewNumber ...
func NewNumber(value float32, unit Unit) *Value {
return &Value{
kind: HaystackTypeNumber,
number: struct {
value float32
unit Unit
}{
value,
unit,
},
}
}
// NewURL ...
func NewURL(u *url.URL) *Value {
return &Value{
kind: HaystackTypeURI,
u: u,
}
}
// GetKind of the underlying value
func (v *Value) GetKind() Kind {
return v.kind
}
// Value is an haystack value
type Value struct {
kind Kind
str *string
number struct {
value float32
unit Unit
}
b bool
t time.Time
u *url.URL
ref *HaystackID
g *Grid
dict map[string]*Value
list []*Value
coord struct {
long float32
lat float32
}
}
func (v *Value) unmarshalJSONNotString(b []byte) error {
var list []*Value
err := json.Unmarshal(b, &list)
if err == nil {
v.kind = HaystackTypeList
v.list = list
return nil
}
var g Grid
err = json.Unmarshal(b, &g)
if err == nil {
v.kind = HaystackTypeGrid
v.g = &g
return nil
}
var dict map[string]*Value
err = json.Unmarshal(b, &dict)
if err == nil {
v.kind = HaystackTypeDict
v.dict = dict
return nil
}
var boolean bool
err = json.Unmarshal(b, &boolean)
if err == nil {
v.kind = HaystackTypeBool
v.b = boolean
return nil
}
return errors.New("not implemented")
}
func (v *Value) unmarshalJSONString(b []byte) error {
re := regexp.MustCompile(`^(([m\-znrsdhtucx]):)?(.*)$`)
res := re.FindStringSubmatch(string(b))
switch res[2] {
case ``:
v.kind = HaystackTypeStr
val := res[3]
v.str = &val
case `m`:
v.kind = HaystackTypeMarker
case `-`:
v.kind = HaystackTypeRemove
case `z`:
v.kind = HaystackTypeNA
case "n":
elements := strings.Fields(res[3])
f, err := strconv.ParseFloat(elements[0], 32)
if err != nil {
return err
}
var unit string
if len(elements) > 2 {
return errors.New("wrong entry for number, too many elements")
}
if len(elements) > 1 {
unit = elements[1]
v.number.unit = &unit
}
v.kind = HaystackTypeNumber
v.number.value = float32(f)
case `d`:
t, err := time.Parse("2006-01-02", res[3])
if err != nil {
return err
}
v.kind = HaystackTypeDate
v.t = t
case `h`:
t, err := time.Parse("2006-01-02 15:04:05 +0000 UTC", "1970-01-01 "+res[3]+" +0000 UTC")
if err != nil {
return err
}
v.kind = HaystackTypeTime
v.t = t
case `t`:
// TODO: handle extra location
elements := strings.Fields(res[3])
if len(elements) == 2 {
loc, err := time.LoadLocation(elements[1])
if err != nil {
return err
}
t, err := time.ParseInLocation(time.RFC3339, elements[0], loc)
if err != nil {
return err
}
v.kind = HaystackTypeDateTime
v.t = t.In(loc)
return nil
}
t, err := time.Parse(time.RFC3339, elements[0])
if err != nil {
return err
}
v.kind = HaystackTypeDateTime
v.t = t
case `u`:
u, err := url.Parse(res[3])
if err != nil {
return err
}
v.kind = HaystackTypeURI
v.u = u
case `c`:
elements := strings.Split(res[3], ",")
if len(elements) != 2 {
return errors.New("bad coordinates, expected lat,long")
}
lat, err := strconv.ParseFloat(elements[0], 32)
if err != nil {
return err
}
long, err := strconv.ParseFloat(elements[1], 32)
if err != nil {
return err
}
v.kind = HaystackTypeCoord
v.coord.lat = float32(lat)
v.coord.long = float32(long)
/*
case `x`:
*/
case `s`:
v.kind = HaystackTypeStr
val := res[3]
v.str = &val
case `r`:
v.kind = HaystackTypeRef
val := res[3]
id := HaystackID(val)
v.ref = &id
default:
return errors.New("not implemented")
}
return nil
}
// UnmarshalJSON extract a value from b
func (v *Value) UnmarshalJSON(b []byte) error {
if b == nil || len(b) == 0 {
return errors.New("Cannot unmarshal nil or empty value")
}
// is it a string
if isValidString(b) {
return v.unmarshalJSONString(trimDoubleQuote(b))
}
return v.unmarshalJSONNotString(b)
}
// MarshalJSON encode the value in format compatible with haystack's JSON:
// https://www.project-haystack.org/doc/Json
func (v *Value) MarshalJSON() ([]byte, error) {
var output string
switch v.kind {
case HaystackTypeBool:
output = fmt.Sprintf("%v", v.b)
case HaystackTypeDict:
return json.Marshal(v.dict)
case HaystackTypeList:
return json.Marshal(v.list)
case HaystackTypeGrid:
return json.Marshal(v.g)
case HaystackTypeStr:
output = `"s:` + *v.str + `"`
case HaystackTypeRef:
output = `"r:` + string(*v.ref) + `"`
case HaystackTypeRemove:
output = `"-:"`
case HaystackTypeMarker:
output = `"m:"`
case HaystackTypeNA:
output = `"z:"`
case HaystackTypeCoord:
output = fmt.Sprintf(`"c:%v,%v"`, v.coord.lat, v.coord.long)
case HaystackTypeDate:
output = `"d:` + v.t.Format("2006-01-02") + `"`
case HaystackTypeTime:
output = `"h:` + v.t.Format("15:04:05") + `"`
case HaystackTypeDateTime:
output = `"t:` + v.t.Format(time.RFC3339) + `"`
case HaystackTypeURI:
output = `"u:` + (*v.u).String() + `"`
case HaystackTypeNumber:
var unit string
if v.number.unit != nil {
unit = ` ` + *v.number.unit
}
output = `"n:` + strconv.FormatFloat(float64(v.number.value), 'f', -1, 32) + unit + `"`
default:
return nil, errors.New("type not handled")
}
return []byte(output), nil
}
// NewRef new reference
func NewRef(r *HaystackID) *Value {
return &Value{
kind: HaystackTypeRef,
ref: r,
}
}
// NewStr new string value
func NewStr(s string) *Value {
return &Value{
kind: HaystackTypeStr,
str: &s,
}
}
// MarkerValue ...
var MarkerValue = &Value{
kind: HaystackTypeMarker,
}
// GetHaystackID Returns the underlying value of the reference
func (v *Value) GetHaystackID() (*HaystackID, error) {
if v.kind != HaystackTypeRef {
return nil, errors.New("value type is not a ref")
}
return v.ref, nil
}
// GetString value; returns an error if the underlying type is not an haystack string
func (v *Value) GetString() (string, error) {
if v.kind != HaystackTypeStr {
return "", errors.New("value type is not a string")
}
return *v.str, nil
}
const (
// HaystackTypeUndefined ...
HaystackTypeUndefined Kind = iota
// HaystackTypeGrid is a Grid object
HaystackTypeGrid
// HaystackTypeList Array
HaystackTypeList
// HaystackTypeDict Object
HaystackTypeDict
// HaystackTypeNull null
HaystackTypeNull
// HaystackTypeBool Boolean
HaystackTypeBool
// HaystackTypeMarker "m:"
HaystackTypeMarker
// HaystackTypeRemove "-:"
HaystackTypeRemove
// HaystackTypeNA "z:"
HaystackTypeNA
// HaystackTypeNumber "n:<float> [unit]" "n:45.5" "n:73.2 °F" "n:-INF"
HaystackTypeNumber
// HaystackTypeRef "r:<id> [dis]" "r:abc-123" "r:abc-123 RTU #3"
HaystackTypeRef
// HaystackTypeStr "hello" "s:hello"
HaystackTypeStr
// HaystackTypeDate "d:2014-01-03"
HaystackTypeDate
// HaystackTypeTime "h:23:59:00"
HaystackTypeTime
// HaystackTypeDateTime "t:2015-06-08T15:47:41-04:00 New_York"
HaystackTypeDateTime
// HaystackTypeURI "u:http://project-haystack.org/"
HaystackTypeURI
// HaystackTypeCoord "c:<lat>,<lng>" "c:37.545,-77.449"
HaystackTypeCoord
//HaystackTypeXStr "x:Type:value"
HaystackTypeXStr
// HaystackLastType ...
HaystackLastType
)
|
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package identity_test
import (
"bytes"
"context"
"crypto"
"crypto/x509"
"crypto/x509/pkix"
"encoding/asn1"
"fmt"
"os"
"runtime"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"storj.io/common/identity"
"storj.io/common/identity/testidentity"
"storj.io/common/peertls"
"storj.io/common/peertls/extensions"
"storj.io/common/peertls/testpeertls"
"storj.io/common/peertls/tlsopts"
"storj.io/common/pkcrypto"
"storj.io/common/storj"
"storj.io/common/testcontext"
"storj.io/common/testrand"
)
func TestPeerIdentityFromCertChain(t *testing.T) {
caKey, err := pkcrypto.GeneratePrivateKey()
require.NoError(t, err)
caTemplate, err := peertls.CATemplate()
require.NoError(t, err)
caCert, err := peertls.CreateSelfSignedCertificate(caKey, caTemplate)
require.NoError(t, err)
leafTemplate, err := peertls.LeafTemplate()
require.NoError(t, err)
leafKey, err := pkcrypto.GeneratePrivateKey()
require.NoError(t, err)
pubKey, err := pkcrypto.PublicKeyFromPrivate(leafKey)
require.NoError(t, err)
leafCert, err := peertls.CreateCertificate(pubKey, caKey, leafTemplate, caTemplate)
require.NoError(t, err)
peerIdent, err := identity.PeerIdentityFromChain([]*x509.Certificate{leafCert, caCert})
require.NoError(t, err)
assert.Equal(t, caCert, peerIdent.CA)
assert.Equal(t, leafCert, peerIdent.Leaf)
assert.NotEmpty(t, peerIdent.ID)
}
func TestFullIdentityFromPEM(t *testing.T) {
caKey, err := pkcrypto.GeneratePrivateKey()
require.NoError(t, err)
caTemplate, err := peertls.CATemplate()
require.NoError(t, err)
caCert, err := peertls.CreateSelfSignedCertificate(caKey, caTemplate)
require.NoError(t, err)
require.NoError(t, err)
require.NotEmpty(t, caCert)
leafTemplate, err := peertls.LeafTemplate()
require.NoError(t, err)
leafKey, err := pkcrypto.GeneratePrivateKey()
require.NoError(t, err)
pubKey, err := pkcrypto.PublicKeyFromPrivate(leafKey)
require.NoError(t, err)
leafCert, err := peertls.CreateCertificate(pubKey, caKey, leafTemplate, caTemplate)
require.NoError(t, err)
require.NotEmpty(t, leafCert)
chainPEM := bytes.NewBuffer([]byte{})
require.NoError(t, pkcrypto.WriteCertPEM(chainPEM, leafCert))
require.NoError(t, pkcrypto.WriteCertPEM(chainPEM, caCert))
keyPEM := bytes.NewBuffer([]byte{})
require.NoError(t, pkcrypto.WritePrivateKeyPEM(keyPEM, leafKey))
fullIdent, err := identity.FullIdentityFromPEM(chainPEM.Bytes(), keyPEM.Bytes())
assert.NoError(t, err)
assert.Equal(t, leafCert.Raw, fullIdent.Leaf.Raw)
assert.Equal(t, caCert.Raw, fullIdent.CA.Raw)
assert.Equal(t, leafKey, fullIdent.Key)
}
func TestConfig_Save_with_extension(t *testing.T) {
ctx := testcontext.New(t)
testidentity.CompleteIdentityVersionsTest(t, func(t *testing.T, version storj.IDVersion, ident *identity.FullIdentity) {
identCfg := &identity.Config{
CertPath: ctx.File("chain.pem"),
KeyPath: ctx.File("key.pem"),
}
{ // pre-save version assertions
assert.Equal(t, version.Number, ident.ID.Version().Number)
caVersion, err := storj.IDVersionFromCert(ident.CA)
require.NoError(t, err)
assert.Equal(t, version.Number, caVersion.Number)
versionExt := tlsopts.NewExtensionsMap(ident.CA)[extensions.IdentityVersionExtID.String()]
if ident.ID.Version().Number == 0 {
require.NotEmpty(t, versionExt)
assert.Equal(t, ident.ID.Version().Number, storj.IDVersionNumber(versionExt.Value[0]))
} else {
assert.Empty(t, versionExt)
}
}
{ // test saving
err := identCfg.Save(ident)
assert.NoError(t, err)
certInfo, err := os.Stat(identCfg.CertPath)
assert.NoError(t, err)
keyInfo, err := os.Stat(identCfg.KeyPath)
assert.NoError(t, err)
// TODO (windows): ignoring for windows due to different default permissions
if runtime.GOOS != "windows" {
assert.Equal(t, os.FileMode(0644), certInfo.Mode())
assert.Equal(t, os.FileMode(0600), keyInfo.Mode())
}
}
{ // test loading
loadedFi, err := identCfg.Load()
require.NoError(t, err)
assert.Equal(t, ident.Key, loadedFi.Key)
assert.Equal(t, ident.Leaf, loadedFi.Leaf)
assert.Equal(t, ident.CA, loadedFi.CA)
assert.Equal(t, ident.ID, loadedFi.ID)
versionExt := tlsopts.NewExtensionsMap(ident.CA)[extensions.IdentityVersionExtID.String()]
if ident.ID.Version().Number == 0 {
require.NotEmpty(t, versionExt)
assert.Equal(t, ident.ID.Version().Number, storj.IDVersionNumber(versionExt.Value[0]))
} else {
assert.Empty(t, versionExt)
}
}
})
}
func TestConfig_Save(t *testing.T) {
ctx := testcontext.New(t)
testidentity.IdentityVersionsTest(t, func(t *testing.T, version storj.IDVersion, ident *identity.FullIdentity) {
identCfg := &identity.Config{
CertPath: ctx.File("chain.pem"),
KeyPath: ctx.File("key.pem"),
}
chainPEM := bytes.NewBuffer([]byte{})
require.NoError(t, pkcrypto.WriteCertPEM(chainPEM, ident.Leaf))
require.NoError(t, pkcrypto.WriteCertPEM(chainPEM, ident.CA))
privateKey := ident.Key
require.NotEmpty(t, privateKey)
keyPEM := bytes.NewBuffer([]byte{})
require.NoError(t, pkcrypto.WritePrivateKeyPEM(keyPEM, privateKey))
{ // test saving
err := identCfg.Save(ident)
assert.NoError(t, err)
certInfo, err := os.Stat(identCfg.CertPath)
assert.NoError(t, err)
keyInfo, err := os.Stat(identCfg.KeyPath)
assert.NoError(t, err)
// TODO (windows): ignoring for windows due to different default permissions
if runtime.GOOS != "windows" {
assert.Equal(t, os.FileMode(0644), certInfo.Mode())
assert.Equal(t, os.FileMode(0600), keyInfo.Mode())
}
}
{ // test loading
loadedFi, err := identCfg.Load()
assert.NoError(t, err)
assert.Equal(t, ident.Key, loadedFi.Key)
assert.Equal(t, ident.Leaf, loadedFi.Leaf)
assert.Equal(t, ident.CA, loadedFi.CA)
assert.Equal(t, ident.ID, loadedFi.ID)
}
})
}
func TestVersionedNodeIDFromKey(t *testing.T) {
_, chain, err := testpeertls.NewCertChain(1, storj.LatestIDVersion().Number)
require.NoError(t, err)
pubKey, ok := chain[peertls.LeafIndex].PublicKey.(crypto.PublicKey)
require.True(t, ok)
for _, v := range storj.IDVersions {
version := v
t.Run(fmt.Sprintf("IdentityV%d", version.Number), func(t *testing.T) {
id, err := identity.NodeIDFromKey(pubKey, version)
require.NoError(t, err)
assert.Equal(t, version.Number, id.Version().Number)
})
}
}
func TestVerifyPeer(t *testing.T) {
ca, err := identity.NewCA(context.Background(), identity.NewCAOptions{
Difficulty: 12,
Concurrency: 4,
})
require.NoError(t, err)
require.NotNil(t, ca)
fi, err := ca.NewIdentity()
require.NoError(t, err)
require.NotNil(t, fi)
err = peertls.VerifyPeerFunc(peertls.VerifyPeerCertChains)([][]byte{fi.Leaf.Raw, fi.CA.Raw}, nil)
assert.NoError(t, err)
}
func TestManageablePeerIdentity_AddExtension(t *testing.T) {
ctx := testcontext.New(t)
manageablePeerIdentity, err := testidentity.NewTestManageablePeerIdentity(ctx)
require.NoError(t, err)
oldLeaf := manageablePeerIdentity.Leaf
assert.Len(t, manageablePeerIdentity.CA.Cert.ExtraExtensions, 0)
randBytes := testrand.Bytes(10)
randExt := pkix.Extension{
Id: asn1.ObjectIdentifier{2, 999, int(randBytes[0])},
Value: randBytes,
}
err = manageablePeerIdentity.AddExtension(randExt)
require.NoError(t, err)
assert.Len(t, manageablePeerIdentity.Leaf.ExtraExtensions, 0)
assert.Len(t, manageablePeerIdentity.Leaf.Extensions, len(oldLeaf.Extensions)+1)
assert.Equal(t, oldLeaf.SerialNumber, manageablePeerIdentity.Leaf.SerialNumber)
assert.Equal(t, oldLeaf.IsCA, manageablePeerIdentity.Leaf.IsCA)
assert.Equal(t, oldLeaf.PublicKey, manageablePeerIdentity.Leaf.PublicKey)
ext := tlsopts.NewExtensionsMap(manageablePeerIdentity.Leaf)[randExt.Id.String()]
assert.Equal(t, randExt, ext)
assert.Equal(t, randExt, tlsopts.NewExtensionsMap(manageablePeerIdentity.Leaf)[randExt.Id.String()])
assert.NotEqual(t, oldLeaf.Raw, manageablePeerIdentity.Leaf.Raw)
assert.NotEqual(t, oldLeaf.RawTBSCertificate, manageablePeerIdentity.Leaf.RawTBSCertificate)
assert.NotEqual(t, oldLeaf.Signature, manageablePeerIdentity.Leaf.Signature)
}
func TestManageableFullIdentity_Revoke(t *testing.T) {
ctx := testcontext.New(t)
manageableFullIdentity, err := testidentity.NewTestManageableFullIdentity(ctx)
require.NoError(t, err)
oldLeaf := manageableFullIdentity.Leaf
assert.Len(t, manageableFullIdentity.CA.Cert.ExtraExtensions, 0)
err = manageableFullIdentity.Revoke()
require.NoError(t, err)
assert.Len(t, manageableFullIdentity.Leaf.ExtraExtensions, 0)
assert.Len(t, manageableFullIdentity.Leaf.Extensions, len(oldLeaf.Extensions)+1)
assert.Equal(t, oldLeaf.IsCA, manageableFullIdentity.Leaf.IsCA)
assert.NotEqual(t, oldLeaf.PublicKey, manageableFullIdentity.Leaf.PublicKey)
assert.NotEqual(t, oldLeaf.SerialNumber, manageableFullIdentity.Leaf.SerialNumber)
assert.NotEqual(t, oldLeaf.Raw, manageableFullIdentity.Leaf.Raw)
assert.NotEqual(t, oldLeaf.RawTBSCertificate, manageableFullIdentity.Leaf.RawTBSCertificate)
assert.NotEqual(t, oldLeaf.Signature, manageableFullIdentity.Leaf.Signature)
revocationExt := tlsopts.NewExtensionsMap(manageableFullIdentity.Leaf)[extensions.RevocationExtID.String()]
assert.True(t, extensions.RevocationExtID.Equal(revocationExt.Id))
var rev extensions.Revocation
err = rev.Unmarshal(revocationExt.Value)
require.NoError(t, err)
err = rev.Verify(manageableFullIdentity.CA.Cert)
require.NoError(t, err)
}
func TestEncodeDecodePeerIdentity(t *testing.T) {
ctx := testcontext.New(t)
peerID, err := testidentity.NewTestIdentity(ctx)
require.NoError(t, err)
pi := peerID.PeerIdentity()
// encode the peer identity
encodedPiBytes := identity.EncodePeerIdentity(pi)
assert.NotNil(t, encodedPiBytes)
// decode the peer identity
decodedPi, err := identity.DecodePeerIdentity(ctx, encodedPiBytes)
assert.NoError(t, err)
// again encode the above decoded peer identity and compare
decodedPiBytes := identity.EncodePeerIdentity(decodedPi)
assert.Equal(t, encodedPiBytes, decodedPiBytes)
}
|
package tcp
import "time"
const (
ReadTimeout = time.Minute * 5 //当服务器1分钟内没有收到任何数据,断开客户端连接
)
/*
//数据包结构
const (
TypeLen = 2 // 消息类型字节数组长度
LenLen = 2 // 消息长度字节数组长度
SeqLen = 4 // 消息seq字节数组长度
ContentMaxLen = 1024 * 16 // 消息体最大长度
HeadLen = TypeLen + LenLen + SeqLen // 消息头部字节数组长度(消息类型字节数组长度+消息长度字节数组长度)
BufLen = ContentMaxLen + HeadLen // 缓冲buffer字节数组长度
)
*/
const (
TcpProtocolLen = 2 //uint16
ContentMaxLen = 1024 * 16
BufLen = ContentMaxLen + TcpProtocolLen
)
// 数据包类型
const (
CodeSignIn = 1 // 设备登录
CodeEventSend = 2 // 客户端事件发送
CodeHeartbeat = 3 // 心跳
CodeMessageSend = 4 // 客户端消息发送
CodeMessagePush = 5 // 服务端消息推送
CodeEventPush = 6 // 服务端事件推送
CodeLogout = 7 // 客户端退出登录:用户主动要求退出,或用户相同平台设备登录状态顶替
)
|
// +build !windows,!darwin
// 4 november 2014
package ui
import (
"fmt"
"unsafe"
)
// #include "gtk_unix.h"
import "C"
type progressbar struct {
*controlSingleWidget
pbar *C.GtkProgressBar
}
func newProgressBar() ProgressBar {
widget := C.gtk_progress_bar_new();
p := &progressbar{
controlSingleWidget: newControlSingleWidget(widget),
pbar: (*C.GtkProgressBar)(unsafe.Pointer(widget)),
}
return p
}
func (p *progressbar) Percent() int {
return int(C.gtk_progress_bar_get_fraction(p.pbar) * 100)
}
func (p *progressbar) SetPercent(percent int) {
if percent < 0 || percent > 100 {
panic(fmt.Errorf("given ProgressBar percentage %d out of range", percent))
}
C.gtk_progress_bar_set_fraction(p.pbar, C.gdouble(percent) / 100)
}
|
package utils
import (
"os"
"github.com/BenLubar/dwarfocr"
)
func ReadTilesetFromFile(name string) (*dwarfocr.Tileset, error) {
f, err := os.Open(name)
if err != nil {
return nil, err
}
defer f.Close()
return dwarfocr.ReadTileset(f)
}
|
package main
import (
"fmt"
"io/ioutil"
"os"
"strings"
"github.com/alexcesaro/log"
"github.com/alexcesaro/log/stdlog"
"github.com/awalterschulze/gographviz"
"gopkg.in/yaml.v2"
)
var logger log.Logger
func abort(msg string) {
logger.Critical(msg)
os.Exit(1)
}
type service struct {
Links []string
VolumesFrom []string "volumes_from"
Volumes []string
Ports []string
}
func main() {
var (
bytes []byte
data map[string]service
err error
graph *gographviz.Graph
project string
)
logger = stdlog.GetFromFlags()
project = ""
// Load docker-compose.yml
bytes, err = ioutil.ReadFile("docker-compose.yml")
if err != nil {
abort(err.Error())
}
// Parse it as YML
data = make(map[string]service, 5)
yaml.Unmarshal(bytes, &data)
if err != nil {
abort(err.Error())
}
// Create directed graph
graph = gographviz.NewGraph()
graph.SetName(project)
graph.SetDir(true)
// Add legend
graph.AddSubGraph(project, "cluster_legend", map[string]string{"label": "Legend"})
graph.AddNode("cluster_legend", "legend_service", map[string]string{"label": "service"})
graph.AddNode("cluster_legend", "legend_service_with_ports",
map[string]string{
"label": "\"service with exposed ports\\n80:80 443:443\\n--volume1[:host_dir1]\\n--volume2[:host_dir2]\"",
"shape": "box"})
graph.AddEdge("legend_service", "legend_service_with_ports", true, map[string]string{"label": "links"})
graph.AddEdge("legend_service_with_ports", "legend_service", true, map[string]string{"label": "volumes_from", "style": "dashed"})
// Round one: populate nodes
for name, service := range data {
var attrs = map[string]string{"label": name}
if service.Ports != nil {
attrs["label"] += "\\n" + strings.Join(service.Ports, " ")
attrs["shape"] = "box"
}
if service.Volumes != nil {
attrs["label"] += "\\n--" + strings.Join(service.Volumes, "\\n--")
}
attrs["label"] = fmt.Sprintf("\"%s\"", attrs["label"])
graph.AddNode(project, name, attrs)
}
// Round two: populate connections
for name, service := range data {
// links
if service.Links != nil {
for _, linkTo := range service.Links {
if strings.Contains(linkTo, ":") {
linkTo = strings.Split(linkTo, ":")[0]
}
graph.AddEdge(name, linkTo, true, nil)
}
}
// volumes_from
if service.VolumesFrom != nil {
for _, linkTo := range service.VolumesFrom {
graph.AddEdge(name, linkTo, true, map[string]string{"style": "dotted"})
}
}
}
fmt.Print(graph)
}
|
package main
import (
"flag"
"fmt"
"math"
"math/rand"
"os"
"strconv"
"strings"
"time"
)
const (
WALL = " "
ROAD = "#"
START = "S"
FINISH = "F"
)
type Point struct {
x int
y int
}
func (p1 Point) is(p2 Point) bool {
return p1.x == p2.x && p1.y == p2.y
}
func (p2 Point) opposite(p1 Point) Point {
return Point{2*p1.x - p2.x, 2*p1.y - p2.y}
}
func getPointAtRandom(in []Point) []Point {
if len(in) <= 1 {
return in
} else {
i := rand.Intn(len(in))
newList := []Point{}
for k, p := range in {
if k != i {
newList = append(newList, p)
}
}
return append(getPointAtRandom(newList), in[i])
}
}
type Edge struct {
start Point
end Point
p []Point
distance int
//next []*Edge
}
func (e *Edge) print(indent int) {
fmt.Printf("%v%v->%v (distance:%v)(p:%v)\n", strings.Repeat(" ", indent), e.start, e.end, e.distance, e.p)
/*
for _, e := range e.next {
e.print(indent + 1)
}
*/
}
func (e *Edge) hasPoint(p Point) bool {
for _, ep := range e.p {
if p.is(ep) {
return true
}
}
return false
}
func (e *Edge) nextTo(p Point) bool {
nextTo := len(e.p) < 2 || (!p.is(e.p[1]) && !p.is(e.p[len(e.p)-2]))
return nextTo
}
type Graph struct {
edgelist []Edge
vertexlist []Point
}
func (g *Graph) print() {
for _, e := range g.edgelist {
e.print(0)
}
}
func (g *Graph) addVertex(p Point) {
g.vertexlist = append(g.vertexlist, p)
//fmt.Printf("addVertext :%v\n", p)
}
func (g *Graph) addEdge(e Edge) {
g.edgelist = append(g.edgelist, e)
}
func (g *Graph) hasVertex(p Point) bool {
for _, v := range g.vertexlist {
if v.is(p) {
return true
}
}
return false
}
func (g *Graph) hasEdge(e Edge) bool {
for _, e2 := range g.edgelist {
if e2.start.is(e.start) && e2.end.is(e.end) || e2.start.is(e.end) && e2.end.is(e.start) {
if e2.hasPoint(e.p[1]) {
return true
}
}
}
return false
}
type Maze struct {
width int
height int
data [][]bool
start Point
finish Point
}
func NewMaze(w, h int) *Maze {
m := make([][]bool, w)
for i := range m {
m[i] = make([]bool, h)
}
return &Maze{
width: w,
height: h,
data: m,
}
}
func (m *Maze) isInside(p Point) bool {
return p.x >= 0 && p.x < m.width && p.y >= 0 && p.y < m.height
}
func (m *Maze) setRoad(p Point) {
m.data[p.x][p.y] = true
m.printPoint(p)
}
func (m *Maze) isRoad(p Point) bool {
return m.data[p.x][p.y]
}
func (m *Maze) setWall(p Point) {
m.data[p.x][p.y] = false
m.printPoint(p)
}
func (m *Maze) point(p Point) bool {
if p.x < 0 || p.x >= m.width || p.y < 0 || p.y >= m.height {
return false
} else {
return m.data[p.x][p.y]
}
}
func (m *Maze) printRoad(x, y int) {
sym := ROAD
fmt.Printf("\x1b[%v;%vH%."+strconv.Itoa(x%2+1)+"s", y, x+1, strings.Repeat(sym, 3))
}
func (m *Maze) printInit() {
// Clear Screen
fmt.Print("\x1b[H\x1b[2J")
}
func (m *Maze) printFinish() {
fmt.Printf("\x1b[%v;%vH", m.height+3, 1)
}
func (m *Maze) print() {
// Clear Screen
fmt.Print("\x1b[H\x1b[2J")
// Print Outer Wall
for x := 0; x < (m.width+1)/2*3; x++ {
m.printRoad(x, 0)
m.printRoad(x, m.height+2)
}
//time.Sleep(1 * time.Second)
for y := 0; y < m.height+2; y++ {
m.printRoad(0, y)
m.printRoad((m.width+1)/2*3-1, y)
}
//time.Sleep(1 * time.Second)
for y := 0; y < m.height; y++ {
for x := 0; x < m.width; x++ {
m.printPoint(Point{x, m.height - y - 1})
}
}
//time.Sleep(1 * time.Second)
}
func (m *Maze) printPoint(p Point) {
sym := WALL
if !m.point(p) {
sym = ROAD
}
fmt.Printf("\x1b[%v;%vH%."+strconv.Itoa(p.x%2+1)+"s", m.height-p.y+1, p.x/2*3+p.x%2+2, strings.Repeat(sym, 3))
}
func (m *Maze) drawLine(p1, p2 Point) {
X, x, Y, y := p1.x, p2.x, p1.y, p2.y
if x > X {
X, x = p2.x, p1.x
}
if y > Y {
Y, y = p2.y, p1.y
}
if X == x {
for i := y; i <= Y; i++ {
m.setRoad(Point{x, i})
}
} else if Y == y {
for i := x; i <= X; i++ {
m.setRoad(Point{i, y})
}
} else {
fmt.Fprintf(os.Stderr, "drawLine error\n")
}
}
func (m *Maze) drawFrame(p1, p2 Point) {
m.drawLine(Point{p1.x, p1.y}, Point{p2.x, p1.y})
m.drawLine(Point{p2.x, p1.y}, Point{p2.x, p2.y})
m.drawLine(Point{p2.x, p2.y}, Point{p1.x, p2.y})
m.drawLine(Point{p1.x, p2.y}, Point{p1.x, p1.y})
}
func (m *Maze) nextTo(p Point) []Point {
var plist []Point
nlist := []Point{
Point{p.x - 1, p.y - 1},
Point{p.x - 1, p.y},
Point{p.x - 1, p.y + 1},
Point{p.x, p.y + 1},
Point{p.x + 1, p.y + 1},
Point{p.x + 1, p.y},
Point{p.x + 1, p.y - 1},
Point{p.x, p.y - 1},
}
for _, n := range nlist {
if n.x >= 0 && n.x < m.width && n.y >= 0 && n.y < m.height {
plist = append(plist, n)
}
}
return plist
}
func (m *Maze) canPlot(p Point) bool {
if p.x%2 == 1 && p.y%2 == 1 {
return false
}
//fmt.Printf("p.x=%v\tp.y=%v\n", p.x, p.y)
for _, n := range m.nextTo(p) {
if m.point(n) {
if m.point(n.opposite(p)) {
//fmt.Printf("\topposite false p=%v\tn=%v\topposite(p,n)=%v\n", p, n, opposite(p, n))
return false
}
}
}
return true
}
func (m *Maze) randomPoint() Point {
//p := Point{rand.Intn(int(m.width/2)) * 2, rand.Intn(int(m.height/2)) * 2}
//return Point{rand.Intn(m.width), rand.Intn(m.height)}
return Point{rand.Intn(int(m.width/2)) * 2, rand.Intn(int(m.height/2)) * 2}
}
func (m *Maze) getRoadCandidate(p Point) []Point {
var result []Point
var list []Point = []Point{Point{p.x - 1, p.y}, Point{p.x, p.y + 1}, Point{p.x + 1, p.y}, Point{p.x, p.y - 1}}
for _, p := range list {
//if p.x >= 0 && p.x < m.width && p.y >= 0 && p.y < m.height && m.point(p) == false {
if m.isInside(p) && !m.isRoad(p) {
result = append(result, p)
}
}
return result
}
func (m *Maze) extendRoad(p Point) {
//fmt.Printf("p.x=%v\tp.y=%v\n", p.x, p.y)
for _, wc := range getPointAtRandom(m.getRoadCandidate(p)) {
//fmt.Printf("\twc.x=%v\twc.y=%v\n", wc.x, wc.y)
if m.canPlot(wc) {
m.setRoad(wc)
m.extendRoad(wc)
}
}
}
func (m *Maze) makeMaze() {
var list []Point
list = []Point{m.randomPoint()}
for _, p := range getPointAtRandom(list) {
m.extendRoad(p)
}
}
func (m *Maze) getNextRoad(p Point) []Point {
var result []Point
var list []Point = []Point{Point{p.x - 1, p.y}, Point{p.x, p.y + 1}, Point{p.x + 1, p.y}, Point{p.x, p.y - 1}}
for _, p := range list {
if m.isInside(p) && m.isRoad(p) {
result = append(result, p)
}
}
return result
}
func (m *Maze) extendGraph(cpoint Point, cedge Edge, g *Graph) *Graph {
// Loop
if g.hasVertex(cpoint) {
if !g.hasEdge(cedge) {
//g.edgelist = append(g.edgelist, cedge)
g.addEdge(cedge)
}
return g
}
var nextRoads []Point
for _, road := range m.getNextRoad(cpoint) {
if cedge.nextTo(road) {
nextRoads = append(nextRoads, road)
}
}
if len(nextRoads) == 1 { //One way to go
// Extend Current Edge
for _, next := range nextRoads {
cedge = Edge{start: cedge.start, end: next, p: append(cedge.p, next), distance: cedge.distance + 1}
g = m.extendGraph(next, cedge, g)
}
} else {
// Save Last Edge
if cedge.distance > 0 {
g.addVertex(cpoint)
//g.edgelist = append(g.edgelist, cedge)
g.addEdge(cedge)
}
// New Edge
for _, next := range nextRoads {
ne := Edge{start: cpoint, end: next, distance: 1, p: []Point{cpoint, next}} //Next Edge List
g = m.extendGraph(next, ne, g)
}
}
return g
}
func (m *Maze) makeGraph() *Graph {
start := Point{x: 0, y: 0}
e := Edge{start: start, end: start, distance: 0, p: []Point{start}} //Next Edge List
g := &Graph{}
g = m.extendGraph(start, e, g)
g.addVertex(start)
return g
}
const (
White = iota
Gray
Black
)
//func (g *Graph) dearch(black, gray []Point) {
//}
//func (g *Graph) depthFirstSearch(s Point)
func (g *Graph) dijkstraSearch(s Point) {
//PQ := []Point{}
dist := make(map[Point]int)
HIGH := int(math.Pow(2, 31) - 1)
for _, v := range g.vertexlist {
dist[v] = HIGH
}
//for _, v := range g.vertexlist {
//PQ = append(PQ, v)
//}
BlackList := []Point{}
dist[s] = 0
GrayList := []Point{s}
for len(GrayList) > 0 {
// Search Shortest
shortest := GrayList[0]
for _, v := range GrayList {
if dist[v] < dist[shortest] {
shortest = v
}
}
// Add Shortest V to BlackList
BlackList = append(BlackList, shortest)
fmt.Printf("dist[%v] => %v\n", shortest, dist[shortest])
// Remov Shortest V from GrayList
newGrayList := []Point{}
for _, v := range GrayList {
if !v.is(shortest) {
newGrayList = append(newGrayList, v)
}
}
GrayList = newGrayList
//
for _, e := range g.edgelist {
if e.start.is(shortest) {
GrayList = append(GrayList, e.end)
if dist[e.end] < dist[e.start]+e.distance {
dist[e.end] = dist[e.start] + e.distance
}
}
}
}
//for _, v := range g.vertexlist {
//fmt.Printf("dist[%v] => %v\n", v, dist[v])
//}
}
//func (g *Graph) makeRoute(start Point, end Point) {
//for _, e := range g.searchEdge(start) {
//}
//}
func main() {
var width *int = flag.Int("width", 30, "Width of the maze.")
var height *int = flag.Int("height", 20, "Height of the maze.")
flag.Parse()
rand.Seed(time.Now().UnixNano())
m := NewMaze(*width*2+1, *height*2+1)
m.print() // print Init Maze
m.makeMaze()
m.printFinish()
g := m.makeGraph()
g.print()
start := Point{x: 0, y: 0}
//end := Point{x: m.width - 1, y: m.height - 1}
//g.makeRoute(start, end)
g.dijkstraSearch(start)
}
|
package jumphelper
import (
"fmt"
"log"
"net/http"
"strings"
"time"
)
import (
"github.com/LarryBattle/nonce-golang"
"github.com/bwesterb/go-pow"
"github.com/eyedeekay/gosam"
"golang.org/x/time/rate"
)
// Server is a TCP service that responds to addressbook requests
type Server struct {
host string
port string
samHost string
samPort string
pusher *goSam.Client
transport *http.Transport
client *http.Client
addressBookPath string
jumpHelper *JumpHelper
localService *http.ServeMux
ext bool
verbose bool
subscriptionURLs []string
listing bool
base32 string
base64 string
difficulty int
rate int
burst int
limiter *rate.Limiter
err error
}
func (s *Server) address() string {
return s.host + ":" + s.port
}
func (s *Server) limit(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if s.limiter.Allow() == false {
http.Error(w, http.StatusText(429), http.StatusTooManyRequests)
return
}
next.ServeHTTP(w, r)
})
}
// Serve sets up a listening server on the specified port
func (s *Server) Serve() {
log.Println("Serving new Mux")
s.localService, s.err = s.NewMux()
if s.err != nil {
log.Fatal(s.err)
}
s.err = http.ListenAndServe(s.address(), s.limit(s.localService))
if s.err != nil {
log.Fatal(s.err)
}
}
// HandleExists prints true:address if an antecedent URL exists in the addressbook, false if not
func (s *Server) HandleExists(w http.ResponseWriter, r *http.Request) {
p := strings.TrimPrefix(strings.Replace(r.URL.Path, "check/", "", 1), "/")
if s.jumpHelper.CheckAddressBook(p) {
fmt.Fprintln(w, "TRUE", p)
return
}
fmt.Fprintln(w, "FALSE", p)
return
}
// HandleLookup redirects to a b32.i2p URL instead of behaving like a traditional jump service.
func (s *Server) HandleLookup(w http.ResponseWriter, r *http.Request) {
p := strings.TrimPrefix(strings.Replace(r.URL.Path, "request/", "", 1), "/")
if s.jumpHelper.SearchAddressBook(p) != nil {
line := "http://" + s.jumpHelper.SearchAddressBook(p)[1] + ".b32.i2p"
w.Header().Set("Location", line)
w.WriteHeader(301)
fmt.Fprintln(w, line)
return
}
fmt.Fprintln(w, "FALSE")
return
}
// HandleJump redirects to a base64 URL like a traditional jump service.
func (s *Server) HandleJump(w http.ResponseWriter, r *http.Request) {
p := strings.TrimPrefix(strings.Replace(r.URL.Path, "jump/", "", 1), "/")
if s.jumpHelper.SearchAddressBook(p) != nil {
array := s.jumpHelper.SearchAddressBook(p)
if len(array) == 3 {
line := "http://" + s.host + "/?i2paddresshelper=" + array[2]
w.Header().Set("Location", line)
w.WriteHeader(301)
fmt.Fprintln(w, line)
return
}
fmt.Fprintln(w, "no, it's me, dave, man. let me up")
return
}
fmt.Fprintln(w, "FALSE")
return
}
// HandleListing lists all synced remote jumphelper urls.
func (s *Server) HandleListing(w http.ResponseWriter, r *http.Request) {
if s.listing {
for _, s := range s.jumpHelper.Subs() {
fmt.Fprintln(w, s)
}
return
}
fmt.Fprintln(w, "Listings disabled for this server")
return
}
// HandleBase32 lists all synced remote jumphelper urls.
func (s *Server) HandleBase32(w http.ResponseWriter, r *http.Request) {
if s.listing {
fmt.Fprintln(w, s.base32)
return
}
fmt.Fprintln(w, "Listings disabled for this server")
return
}
// HandleBase64 lists all synced remote jumphelper urls.
func (s *Server) HandleBase64(w http.ResponseWriter, r *http.Request) {
if s.listing {
fmt.Fprintln(w, s.base64)
return
}
fmt.Fprintln(w, "Listings disabled for this server")
return
}
func (s *Server) HandleReflectHeaders(w http.ResponseWriter, r *http.Request) {
for key, value := range r.Header {
log.Println(key, value)
fmt.Fprintf(w, "Header: %s, Value: %s\n", key, value)
}
return
}
// HandleReflect32 replies back with the base32 of the client requesting it
func (s *Server) HandleReflect32(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, r.Header.Get("X-I2p-Destb32"))
return
}
// HandleReflect64 replies back with the base64 of the client requesting it
func (s *Server) HandleReflect64(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, r.Header.Get("X-I2p-Destb64"))
return
}
// HandleReflectBoth replies back with both the base32 and base64 of the client requesting it
func (s *Server) HandleReflectBoth(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, r.Header.Get("X-I2p-Destb32"), ",", r.Header.Get("X-I2p-Destb64"))
return
}
// HandlePush creates a signed list of addresses and pushes it to a requested URL
func (s *Server) HandlePush(w http.ResponseWriter, r *http.Request) {
if s.listing {
p := strings.TrimPrefix(strings.Replace(r.URL.Path, "push/", "", 1), "/")
if p != "" {
send, err := http.NewRequest("POST", p, strings.NewReader(strings.Join(s.jumpHelper.Subs(), ",")))
if err != nil {
return
}
s.client.Do(send)
fmt.Fprintln(w, "Your push was sent to", p)
return
}
fmt.Fprintln(w, "FALSE")
return
}
fmt.Fprintln(w, "Listings disabled for this server")
return
}
// HandleRecv recieves a signed list of URL's from another server's HandlePush
func (s *Server) HandleRecv(w http.ResponseWriter, r *http.Request) {
if s.listing {
p := strings.TrimPrefix(strings.Replace(r.URL.Path, "recv/", "", 1), "/")
if p != "" {
fmt.Fprintln(w, "I recieved a push from:",
r.Header.Get("X-I2p-Destb32"),
"And for now, I did nothing with it because I am dumb)")
return
}
fmt.Fprintln(w, "FALSE")
return
}
fmt.Fprintln(w, "Listings disabled for this server")
return
}
// HandleProof emits a problem for proof-of-work on the client
func (s *Server) HandleProof(w http.ResponseWriter, r *http.Request) {
if s.listing {
fmt.Fprintln(w, pow.NewRequest(uint32(s.difficulty), []byte(nonce.NewToken())))
return
}
fmt.Fprintln(w, "Listings disabled for this server")
return
}
// HandleAccount emits a problem for proof-of-work on the client
func (s *Server) HandleAccount(w http.ResponseWriter, r *http.Request) {
if s.listing {
p := strings.TrimPrefix(strings.Replace(r.URL.Path, "acct/", "", 1), "/")
if p != "" {
reqproof := strings.Split(p, ",")
if len(reqproof) == 4 {
ok, err := pow.Check(reqproof[0], reqproof[1], []byte(reqproof[2]))
if err != nil {
fmt.Fprintln(w, err.Error())
return
}
if ok {
s.jumpHelper.TrustedAddressBook.AddAddress(reqproof[2], reqproof[3])
fmt.Fprintln(w, "proof-of-work valid")
return
}
fmt.Fprintln(w, "proof-of-work invalid")
return
} else if len(reqproof) == 2 {
if strings.HasSuffix(reqproof[0], ".i2p") {
fmt.Fprintln(w, pow.NewRequest(uint32(s.difficulty*2), []byte(nonce.NewToken())))
return
}
}
fmt.Fprintln(w, "Invalid request length to accound handler", len(reqproof))
return
}
fmt.Fprintln(w, "That basketball was just like a basketball to me.")
return
}
fmt.Fprintln(w, "Listings disabled for this server")
return
}
// NewMux sets up a new ServeMux with handlers
func (s *Server) NewMux() (*http.ServeMux, error) {
s.localService = http.NewServeMux()
s.localService.HandleFunc("/acct/", s.HandleAccount)
log.Println("registering /acct/")
s.localService.HandleFunc("/check/", s.HandleExists)
log.Println("registering /check/")
s.localService.HandleFunc("/jump/", s.HandleJump)
log.Println("registering /jump/")
s.localService.HandleFunc("/push/", s.HandlePush)
log.Println("registering /push/")
s.localService.HandleFunc("/recv/", s.HandleRecv)
log.Println("registering /recv/")
s.localService.HandleFunc("/request/", s.HandleLookup)
log.Println("registering /request/")
s.localService.HandleFunc("/addr/", s.HandleBase32)
log.Println("registering /addr/")
s.localService.HandleFunc("/addr64/", s.HandleBase64)
log.Println("registering /addr64/")
s.localService.HandleFunc("/reflect32/", s.HandleReflect32)
log.Println("registering /reflect32/")
s.localService.HandleFunc("/reflect64/", s.HandleReflect64)
log.Println("registering /reflect64/")
s.localService.HandleFunc("/reflect/", s.HandleReflectBoth)
log.Println("registering /reflect/")
s.localService.HandleFunc("/headers/", s.HandleReflectHeaders)
log.Println("registering /headers/")
s.localService.HandleFunc("/pow/", s.HandleProof)
log.Println("registering /pow/")
s.localService.HandleFunc("/sub/", s.HandleListing)
log.Println("registering /sub/")
s.localService.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "Dave's not here man.")
})
if s.err != nil {
return nil, fmt.Errorf("Local mux configuration error: %s", s.err)
}
return s.localService, nil
}
// Rate returns the rate
func (s *Server) Rate() rate.Limit {
r := time.Duration(s.rate) * time.Second
return rate.Every(r)
}
// NewServer creates a new Server that answers jump-related queries
func NewServer(host, port, book, samhost, samport string, subs []string, useh, verbose, share bool, base32 string) (*Server, error) {
return NewServerFromOptions(
SetServerHost(host),
SetServerPort(port),
SetServerAddressBookPath(book),
SetServerJumpHelperHost(samhost),
SetServerJumpHelperPort(samport),
SetServerUseHelper(useh),
SetServerSubscription(subs),
SetServerJumpHelperVerbosity(verbose),
SetServerEnableListing(share),
SetServerBase32(base32),
)
}
// NewServerFromOptions creates a new Server that answers jump-related queries
func NewServerFromOptions(opts ...func(*Server) error) (*Server, error) {
var s Server
s.host = "127.0.0.1"
s.port = "7854"
s.samHost = "127.0.0.1"
s.samPort = "7656"
s.addressBookPath = "/var/lib/i2pd/addressbook/addresses.csv"
s.rate = 1
s.burst = 1
s.ext = true
s.verbose = false
s.listing = false
s.base32 = ""
s.base64 = ""
s.difficulty = 1
s.subscriptionURLs = []string{"http://joajgazyztfssty4w2on5oaqksz6tqoxbduy553y34mf4byv6gpq.b32.i2p/export/alive-hosts.txt"}
for _, o := range opts {
if err := o(&s); err != nil {
return nil, fmt.Errorf("Service configuration error: %s", err)
}
}
s.limiter = rate.NewLimiter(s.Rate(), s.burst)
log.Println("Configured Rate Limiter")
if s.listing {
s.pusher, s.err = goSam.NewClientFromOptions(
goSam.SetHost(s.samHost),
goSam.SetPort(s.samPort),
goSam.SetUnpublished(true),
goSam.SetInLength(uint(3)),
goSam.SetOutLength(uint(3)),
goSam.SetInQuantity(uint(6)),
goSam.SetOutQuantity(uint(6)),
goSam.SetInBackups(uint(2)),
goSam.SetOutBackups(uint(2)),
goSam.SetCloseIdle(true),
goSam.SetCloseIdleTime(uint(300000)),
)
if s.err != nil {
return nil, s.err
}
s.transport = &http.Transport{
Dial: s.pusher.Dial,
}
s.client = &http.Client{
Transport: s.transport,
}
}
s.jumpHelper, s.err = NewJumpHelperFromOptions(
SetJumpHelperAddressBookPath(s.addressBookPath),
SetJumpHelperHost(s.samHost),
SetJumpHelperPort(s.samPort),
SetJumpHelperUseHelper(s.ext),
SetJumpHelperVerbosity(s.verbose),
)
if len(s.subscriptionURLs) < 1 {
s.ext = false
}
if s.err != nil {
return nil, fmt.Errorf("Jump helper load error: %s", s.err)
}
return &s, s.err
}
// Service quickly generates a service with the defaults.
func Service() {
s, err := NewServerFromOptions()
if err != nil {
log.Fatal(err, "Error starting server")
}
go s.Serve()
}
// NewService quickly generates a service with host, port, book strings and fires off a goroutine
func NewService(host, port, book, samhost, samport string, subs []string, useh bool) {
s, err := NewServerFromOptions(
SetServerHost(host),
SetServerPort(port),
SetServerAddressBookPath(book),
SetServerUseHelper(useh),
SetServerJumpHelperHost(samhost),
SetServerJumpHelperPort(samport),
SetServerSubscription(subs),
)
if err != nil {
log.Fatal(err, "Error starting server")
}
go s.Serve()
}
func service() {
s, err := NewServerFromOptions(
SetServerHost("0.0.0.0"),
SetServerPort("7854"),
SetServerAddressBookPath("../addresses.csv"),
SetServerRate(1000),
SetServerBurst(1000),
SetServerUseHelper(false),
SetServerJumpHelperHost("127.0.0.1"),
SetServerJumpHelperPort("7656"),
SetServerSubscription([]string{"http://joajgazyztfssty4w2on5oaqksz6tqoxbduy553y34mf4byv6gpq.b32.i2p/export/alive-hosts.txt"}),
)
if err != nil {
log.Fatal(err, "Error starting server")
}
go s.Serve()
}
|
// Copyright 2022 The ChromiumOS Authors.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package quickanswers contains helper functions for the local Tast tests
// that exercise ChromeOS Quick answers feature.
package quickanswers
import (
"context"
"chromiumos/tast/local/chrome"
)
// SetPrefValue is a helper function to sets value for Quick answers related prefs.
// Note that the pref needs to be allowlisted here:
// https://cs.chromium.org/chromium/src/chrome/browser/extensions/api/settings_private/prefs_util.cc
func SetPrefValue(ctx context.Context, tconn *chrome.TestConn, prefName string, value interface{}) error {
return tconn.Call(ctx, nil, `tast.promisify(chrome.settingsPrivate.setPref)`, prefName, value)
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package constants contains values used across wallpaper tests.
package constants
import (
"image/color"
"chromiumos/tast/local/chrome/uiauto/nodewith"
"chromiumos/tast/local/chrome/uiauto/role"
)
// FillButton is a finder for the "Fill" toggle button node.
var FillButton = nodewith.Name("Fill").Role(role.ToggleButton)
// CenterButton is a finder for the "Center" toggle button node.
var CenterButton = nodewith.Name("Center").Role(role.ToggleButton)
// ChangeDailyButton is a finder for the "Change Daily" toggle button node.
var ChangeDailyButton = nodewith.Name("Change wallpaper image daily").Role(role.ToggleButton)
// GooglePhotosWallpaperAlbumsButton is a finder for the Google Photos "Albums" toggle button node.
var GooglePhotosWallpaperAlbumsButton = nodewith.Name("Albums").Role(role.ToggleButton)
// GooglePhotosWallpaperAlbum is the name of an album in the GooglePhotosWallpaperCollection.
const GooglePhotosWallpaperAlbum = "Album 01"
// GooglePhotosWallpaperCollection is the name of the Google Photos wallpaper collection.
const GooglePhotosWallpaperCollection = "Google Photos"
// GooglePhotosWallpaperPhoto is the name of a photo in the GooglePhotosWallpaperAlbum.
const GooglePhotosWallpaperPhoto = "Photo 01"
// GooglePhotosWallpaperColor is the color of the GooglePhotosWallpaperPhoto.
var GooglePhotosWallpaperColor = color.RGBA{0, 0, 255, 255}
// RefreshButton is a finder for the "Refresh" button node.
var RefreshButton = nodewith.Name("Refresh the current wallpaper image").Role(role.Button)
// SolidColorsCollection is the name of a wallpaper collection of solid colors.
const SolidColorsCollection = "Solid colors"
// ElementCollection is the name of a wallpaper collection of elements.
const ElementCollection = "Element"
// DarkElementImage and LightElementImage are two images in Element collection.
const (
DarkElementImage = "Wind Dark Digital Art by Rutger Paulusse"
LightElementImage = "Wind Light Digital Art by Rutger Paulusse"
)
// YellowWallpaperName is the name of a solid yellow wallpaper in the solid colors collection.
const YellowWallpaperName = "Yellow"
// YellowWallpaperColor is the color of the Solid Colors / Yellow wallpaper image.
var YellowWallpaperColor = color.RGBA{255, 235, 60, 255}
// LocalWallpaperCollection is the wallpaper collection of images stored in the device's Downloads folder.
const LocalWallpaperCollection = "My Images"
// LocalWallpaperFilename is the filename of the image in the Downloads folder.
const LocalWallpaperFilename = "set_local_wallpaper_light_pink_20210929.jpg"
// LocalWallpaperColor is the color of LocalWallpaperFilename.
var LocalWallpaperColor = color.RGBA{255, 203, 198, 255}
// WhiteWallpaperName is the name of a solid white wallpaper in the solid colors collection.
const WhiteWallpaperName = "White"
|
package lennox
import "strconv"
import "fmt"
import "io/ioutil"
import "os"
import "io"
import "os/exec"
const TIME_SHORT = 550
const TIME_LONG = 1550
const TIME_4000 = 4350
const TIME_5000 = 5150
const NO_TEMP = 14
const COOL_MODE = 0
const DRY_MODE = 1
const AUTO_MODE = 2
const HEAT_MODE = 3
const FAN_MODE = 4
type IState interface {
Data() string
}
type CoolState struct {
Temperature int
FanSpeed FanSpeed
}
type HeatState struct {
Temperature int
FanSpeed FanSpeed
}
type OffState struct {
}
type FanState struct {
FanSpeed FanSpeed
}
type DryState struct {
Temperature int
}
func (f FanSpeed) Data() string {
return fmt.Sprintf("%03s", strconv.FormatInt(int64(f), 2))
}
func (s OffState) Data() string {
f := FAN_NONE.Data()
m := strconv.FormatInt(int64(COOL_MODE), 2)
t := strconv.FormatInt(NO_TEMP, 2)
d := fmt.Sprintf("1010000100%03s%03s0100%04s1111111111111111", f, m, t)
return d
}
func (s CoolState) Data() string {
f := s.FanSpeed.Data()
m := strconv.FormatInt(int64(COOL_MODE), 2)
t := strconv.FormatInt(int64(s.Temperature-17), 2)
d := fmt.Sprintf("1010000110%03s%03s0100%04s1111111111111111", f, m, t)
return d
}
func (s HeatState) Data() string {
f := s.FanSpeed.Data()
m := strconv.FormatInt(int64(HEAT_MODE), 2)
t := strconv.FormatInt(int64(s.Temperature-17), 2)
d := fmt.Sprintf("1010000110%03s%03s0100%04s1111111111111111", f, m, t)
return d
}
func (s DryState) Data() string {
f := FAN_NONE.Data()
m := strconv.FormatInt(int64(DRY_MODE), 2)
t := strconv.FormatInt(int64(s.Temperature-17), 2)
d := fmt.Sprintf("1010000110%03s%03s0100%04s1111111111111111", f, m, t)
return d
}
func (s FanState) Data() string {
f := s.FanSpeed.Data()
m := strconv.FormatInt(int64(FAN_MODE), 2)
t := strconv.FormatInt(NO_TEMP, 2)
d := fmt.Sprintf("1010000110%03s%03s0100%04s1111111111111111", f, m, t)
return d
}
/*func flip(s string) string {
newString := strings.Replace(s, "0", "2", -1)
newString = strings.Replace(newString, "1", "0", -1)
newString = strings.Replace(newString, "2", "1", -1)
return newString
}*/
func reverse(s string) string {
runes := []rune(s)
for i, j := 0, len(runes)-1; i < j; i, j = i+1, j-1 {
runes[i], runes[j] = runes[j], runes[i]
}
return string(runes)
}
func checksum(s string) (string,error) {
var sum uint64 = 0
for i := 0; i < 5; i++ {
b := s[8*i : 8*i+8]
b = reverse(b)
i, err := strconv.ParseUint(b, 2, 8)
if err != nil {
return "",err
}
sum += i
}
sum = 256 - sum%256
out := strconv.FormatUint(sum, 2)
out = fmt.Sprintf("%08s", out)
out = reverse(out)
return out,nil
}
func encode(data string) []uint {
var s []uint
s = append(s, TIME_4000, TIME_4000)
for _, v := range data {
s = append(s, TIME_SHORT)
switch v {
case '0':
s = append(s, TIME_SHORT)
case '1':
s = append(s, TIME_LONG)
}
}
s = append(s, TIME_SHORT)
s = append(s, TIME_5000, TIME_4000, TIME_4000)
for _, v := range data {
s = append(s, TIME_SHORT)
switch v {
case '0':
s = append(s, TIME_LONG)
case '1':
s = append(s, TIME_SHORT)
}
}
s = append(s, TIME_SHORT)
return s
}
func Apply(state IState) error {
data := state.Data()
chk,err := checksum(data)
if err != nil {
return err
}
data += chk
encodedData := encode(data)
tmpfile, err := ioutil.TempFile("", "lennox")
if err != nil {
return err
}
defer os.Remove(tmpfile.Name())
var on bool = false
var s string
for _, t := range encodedData{
if(on) {
s = fmt.Sprintf("space %d\n", t)
} else {
s = fmt.Sprintf("pulse %d\n", t)
}
_,err=io.WriteString(tmpfile,s)
if err != nil {
return err
}
on = !on
}
cmd := exec.Command("ir-ctl", "--send", tmpfile.Name())
err = cmd.Run()
return err
}
|
package authors
type berthaAuthor struct {
Name string `json:"name"`
Email string `json:"email"`
ImageURL string `json:"imageurl"`
Biography string `json:"biography"`
TwitterHandle string `json:"twitterhandle"`
FacebookProfile string `json:"facebookprofile"`
LinkedinProfile string `json:"linkedinprofile"`
TmeIdentifier string `json:"tmeidentifier"`
}
type author struct {
UUID string `json:"uuid"`
PrefLabel string `json:"prefLabel"`
Type string `json:"type"`
AlternativeIdentifiers alternativeIdentifiers `json:"alternativeIdentifiers,omitempty"`
Aliases []string `json:"aliases,omitempty"`
BirthYear int `json:"birthYear,omitempty"`
Name string `json:"name"`
Salutation string `json:"salutation,omitempty"`
EmailAddress string `json:"emailAddress,omitempty"`
TwitterHandle string `json:"twitterHandle,omitempty"`
FacebookProfile string `json:"facebookProfile,omitempty"`
LinkedinProfile string `json:"linkedinProfile,omitempty"`
Description string `json:"description,omitempty"`
DescriptionXML string `json:"descriptionXML,omitempty"`
ImageURL string `json:"_imageUrl,omitempty"`
}
type alternativeIdentifiers struct {
TME []string `json:"TME,omitempty"`
UUIDs []string `json:"uuids,omitempty"`
}
type authorLink struct {
APIURL string `json:"apiUrl"`
}
type authorUUID struct {
UUID string `json:"ID"`
}
|
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information
package sync2_test
import (
"context"
"testing"
"time"
"storj.io/common/sync2"
"storj.io/common/time2"
)
func TestSleep(t *testing.T) {
t.Parallel()
t.Run("against the real clock", func(t *testing.T) {
const sleepError = time.Second / 2 // should be larger than most system error with regards to sleep
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
start := time.Now()
if !sync2.Sleep(ctx, time.Second) {
t.Error("expected true as result")
}
if time.Since(start) < time.Second-sleepError {
t.Error("sleep took too little time")
}
})
t.Run("against a fake clock", func(t *testing.T) {
ctx, timeMachine := time2.WithNewMachine(context.Background())
defer sync2.Go(func() { timeMachine.BlockThenAdvance(ctx, 1, time.Second) })()
start := timeMachine.Now()
if !sync2.Sleep(ctx, time.Second) {
t.Error("expected true as result")
}
if timeMachine.Since(start) != time.Second {
t.Error("sleep took too little time")
}
})
}
func TestSleep_Cancel(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithCancel(context.Background())
cancel()
start := time.Now()
if sync2.Sleep(ctx, 5*time.Second) {
t.Error("expected false as result")
}
if time.Since(start) > time.Second {
t.Error("sleep took too long")
}
}
|
package LeetCode
import (
"fmt"
)
func Code124() {
head := InitTree()
head = &TreeNode{-10, nil, nil}
head.Left = &TreeNode{9, nil, nil}
head.Right = &TreeNode{20, nil, nil}
head.Right.Left = &TreeNode{15, nil, nil}
head.Right.Right = &TreeNode{7, nil, nil}
fmt.Println(maxPathSum(head))
}
/**
给定一个非空二叉树,返回其最大路径和。
本题中,路径被定义为一条从树中任意节点出发,达到任意节点的序列。该路径至少包含一个节点,且不一定经过根节点。
示例 1:
输入: [1,2,3]
1
/ \
2 3
输出: 6
示例 2:
输入: [-10,9,20,null,null,15,7]
-10
/ \
9 20
/ \
15 7
输出: 42
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/binary-tree-maximum-path-sum
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
*/
func maxPathSum(root *TreeNode) int {
max := int(^uint(0) >> 1)
min := ^max
fmt.Println(max, min)
min = -1 << 31
fmt.Println(min)
dfs3(root, &min)
return min
}
func dfs3(root *TreeNode, min *int) int {
if root == nil {
return 0
}
left := dfs3(root.Left, min)
right := dfs3(root.Right, min)
fmt.Println(left, right, 1111)
if left < 0 {
return 0
}
if right < 0 {
return 0
}
if root.Val+left+right > *min {
*min = root.Val + left + right
}
if left < right {
return root.Val + right
} else {
return root.Val + left
}
}
|
package main
import "fmt"
type Monkey struct {
Name string
}
func (m *Monkey) Climbing() {
fmt.Println(m.Name, "can climbing")
}
//继承
type LittleMonkey struct {
Monkey
}
//定义一个鸟类的能力的接口
type BirdAble interface {
Fly()
}
type FishAble interface {
Swim()
}
//实现接口
func (m *LittleMonkey) Fly() {
fmt.Println(m.Name, "can fly")
}
func (m *LittleMonkey) Swim() {
fmt.Println(m.Name, "can swim")
}
func main() {
m := &LittleMonkey{Monkey{"wukong"}}
m.Climbing()
m.Fly()
m.Swim()
}
|
// Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package adb_test
import (
"context"
"fmt"
"github.com/google/gapid/core/assert"
"github.com/google/gapid/core/os/android/adb"
"github.com/google/gapid/core/os/file"
"github.com/google/gapid/core/os/shell"
"github.com/google/gapid/core/os/shell/stub"
)
var (
adbPath = file.Abs("/adb")
validDevices = stub.RespondTo(adbPath.System()+` devices`, `
List of devices attached
adb server version (36) doesn't match this client (35); killing...
* daemon not running. starting it now on port 5037 *
* daemon started successfully *
debug_device unknown
debug_device2 unknown
dumpsys_device offline
error_device device
install_device unauthorized
invalid_device unknown
logcat_device unauthorized
no_pgrep_no_ps_device unknown
no_pgrep_ok_ps_device offline
ok_pgrep_no_ps_device device
ok_pgrep_ok_ps_device unauthorized
production_device unknown
pull_device offline
push_device device
rooted_device unauthorized
run_device unknown
screen_off_locked_device offline
screen_off_unlocked_device offline
screen_on_locked_device offline
screen_on_unlocked_device device
`)
emptyDevices = stub.RespondTo(adbPath.System()+` devices`, `
List of devices attached
* daemon not running. starting it now on port 5037 *
* daemon started successfully *
`)
invalidDevices = stub.RespondTo(adbPath.System()+` devices`, `
List of devices attached
* daemon not running. starting it now on port 5037 *
* daemon started successfully *
production_device unauthorized invalid
`)
invalidStatus = stub.RespondTo(adbPath.System()+` devices`, `
List of devices attached
* daemon not running. starting it now on port 5037 *
* daemon started successfully *
production_device invalid
`)
notDevices = stub.RespondTo(adbPath.System()+` devices`, ``)
devices = &stub.Delegate{Handlers: []shell.Target{validDevices}}
)
func init() {
adb.ADB = file.Abs("/adb")
shell.LocalTarget = stub.OneOf(
devices,
stub.RespondTo(adbPath.System()+` -s dumpsys_device shell dumpsys package`, `
Activity Resolver Table:
Non-Data Actions:
android.intent.action.MAIN:
43178558 com.google.foo/.FooActivity filter 4327f110
12345678 com.google.qux/.QuxActivity filter 1256e899
com.google.android.FOO:
43178558 com.google.foo/.FooActivity filter 431d7db8
android.intent.action.SEARCH:
43178558 com.google.foo/.FooActivity filter 4327cc40
Packages:
Package [com.google.foo] (ffffffc):
userId=12345
primaryCpuAbi=armeabi-v7a
secondaryCpuAbi=null
versionCode=902107 minSdk=14 targetSdk=15
flags=[ HAS_CODE ALLOW_CLEAR_USER_DATA ALLOW_BACKUP ]
Package [com.google.qux] (cafe0000):
userId=34567
primaryCpuAbi=armeabi-v7a
secondaryCpuAbi=null
versionCode=123456 targetSdk=15
flags=[ DEBUGGABLE HAS_CODE ALLOW_CLEAR_USER_DATA ALLOW_BACKUP ]
`),
// Screen state queries
stub.RespondTo(adbPath.System()+` -s screen_off_locked_device shell dumpsys window`, `
mHasSoftInput=true
mAwake=false
mScreenOnEarly=true mScreenOnFully=true
mDockLayer=268435456 mStatusBarLayer=-1
mShowingDream=false mDreamingLockscreen=true mDreamingSleepToken=null
mFocusedWindow=Window{7f1fdc4 u0 com.google.gapid.gltests/com.google.gapid.gltests.MainActivity}
...`),
stub.RespondTo(adbPath.System()+` -s screen_off_unlocked_device shell dumpsys window`, `
mHomePressed=false
mAwake=falsemScreenOnEarly=true mScreenOnFully=true
mKeyguardDrawComplete=true mWindowManagerDrawComplete=true
mDockLayer=268435456 mStatusBarLayer=0
mShowingDream=false mShowingLockscreen=false mDreamingSleepToken=null
mStatusBar=Window{5033a83 u0 StatusBar} isStatusBarKeyguard=false
...`),
stub.RespondTo(adbPath.System()+` -s screen_on_locked_device shell dumpsys window`, `
mHasSoftInput=true
mAwake=true
mScreenOnEarly=true mScreenOnFully=true
mDockLayer=268435456 mStatusBarLayer=-1
mShowingDream=false mDreamingLockscreen=true mDreamingSleepToken=null
mFocusedWindow=Window{7f1fdc4 u0 com.google.gapid.gltests/com.google.gapid.gltests.MainActivity}
...`),
stub.RespondTo(adbPath.System()+` -s screen_on_unlocked_device shell dumpsys window`, `
mHomePressed=false
mAwake=truemScreenOnEarly=true mScreenOnFully=true
mKeyguardDrawComplete=true mWindowManagerDrawComplete=true
mDockLayer=268435456 mStatusBarLayer=0
mShowingDream=false mShowingLockscreen=false mDreamingSleepToken=null
mStatusBar=Window{5033a83 u0 StatusBar} isStatusBarKeyguard=false
...`),
// Pid queries.
stub.Regex(`adb -s ok_pgrep_\S*device shell pgrep .* com.google.foo`, stub.Respond("")),
stub.Regex(`adb -s ok_pgrep\S*device shell pgrep -n -f com.google.bar`, stub.Respond("2778")),
stub.RespondTo(adbPath.System()+` -s no_pgrep_ok_ps_device shell ps`, `
u0_a11 21926 5061 1976096 42524 SyS_epoll_ 0000000000 S com.google.android.gms
u0_a111 2778 5062 1990796 59268 SyS_epoll_ 0000000000 S com.google.bar
u0_a69 22841 5062 1255788 88672 SyS_epoll_ 0000000000 S com.example.meh`),
stub.Regex(`adb -s \S*no_ps\S*device shell ps`, stub.Respond("/system/bin/sh: ps: not found")),
stub.Regex(`adb -s \S*no_pgrep\S*device shell pgrep \S+`, stub.Respond("/system/bin/sh: pgrep: not found")),
stub.RespondTo(adbPath.System()+` -s invalid_device shell dumpsys window`, `not a normal response`),
// Root command responses
stub.RespondTo(adbPath.System()+` -s production_device root`, `adbd cannot run as root in production builds`),
&stub.Sequence{
stub.RespondTo(adbPath.System()+` -s debug_device root`, `restarting adbd as root`),
stub.RespondTo(adbPath.System()+` -s debug_device root`, `some random output`),
stub.RespondTo(adbPath.System()+` -s debug_device root`, `adbd is already running as root`),
},
stub.RespondTo(adbPath.System()+` -s debug_device2 root`, `* daemon not running. starting it now at tcp:5036 *
* daemon started successfully *`),
stub.RespondTo(adbPath.System()+` -s rooted_device root`, `adbd is already running as root`),
stub.RespondTo(adbPath.System()+` -s invalid_device root`, `not a normal response`),
stub.Match(adbPath.System()+` -s error_device root`, &stub.Response{WaitErr: fmt.Errorf(`not a normal response`)}),
// SELinuxEnforcing command responses
stub.RespondTo(adbPath.System()+` -s production_device shell getenforce`, `Enforcing`),
stub.RespondTo(adbPath.System()+` -s debug_device shell getenforce`, `Permissive`),
stub.Match(adbPath.System()+` -s error_device shell getenforce`, &stub.Response{WaitErr: fmt.Errorf(`not a normal response`)}),
// Logcat command responses
stub.RespondTo(adbPath.System()+` -s logcat_device logcat -v long -T 0 GAPID:V *:W`, `
[ 03-29 15:16:29.514 24153:24153 V/AndroidRuntime ]
>>>>>> START com.android.internal.os.RuntimeInit uid 0 <<<<<<
[ 03-29 15:16:29.518 24153:24153 D/AndroidRuntime ]
CheckJNI is OFF
[ 03-29 15:16:29.761 31608:31608 I/Finsky ]
[1] PackageVerificationReceiver.onReceive: Verification requested, id = 331
[ 03-29 15:16:32.205 31608:31655 W/qtaguid ]
Failed write_ctrl(u 48) res=-1 errno=22
[ 03-29 15:16:32.205 31608:31655 E/NetworkManagementSocketTagger ]
untagSocket(48) failed with errno -22
[ 03-29 15:16:32.219 31608:31608 F/Finsky ]
[1] PackageVerificationReceiver.onReceive: Verification requested, id = 331
`),
// Common responses to all devices
stub.Regex(`adb -s .* shell getprop ro\.build\.product`, stub.Respond("flame")),
stub.Regex(`adb -s .* shell getprop ro\.build\.version\.release`, stub.Respond("10")),
stub.Regex(`adb -s .* shell getprop ro\.build\.description`, stub.Respond("flame-user 10 QQ1A.191003.005 5926727 release-keys")),
stub.Regex(`adb -s .* shell getprop ro\.product\.cpu\.abi`, stub.Respond("arm64-v8a")),
stub.Regex(`adb -s .* shell getprop ro\.build\.version\.sdk`, stub.Respond("29")),
stub.Regex(`adb -s .* shell setprop persist\.traced\.enable 1`, stub.Respond("")),
stub.Regex(`adb -s .* shell input .*`, stub.Respond("")),
)
}
// expectedCommand uses the standard response for an unexpected command to the stub in order to check the command itself
// was as expected.
func expectedCommand(ctx context.Context, expect string, err error) {
assert.For(ctx, "Expected an unmatched command").
ThatError(err).HasMessage(fmt.Sprintf(`Failed to start process
Cause: unmatched:%s`, expect))
}
|
package main
import (
"fmt"
"sync"
"time"
)
func fibonacci(id int, c chan<- int, quit <-chan int, wg *sync.WaitGroup) {
defer wg.Done()
x, y := 0, 1
for {
select {
case c <- x:
fmt.Println("Sent next value!", id)
x, y = y, x+y
case <-quit:
fmt.Println("!!!quiting...!!! ->", id)
return
default:
fmt.Println("---Waiting for receiver...", id)
time.Sleep(time.Millisecond * 900)
}
}
}
func main() {
c := make(chan int) // Data channel
quit := make(chan int) // Signal channel
go func() {
defer fmt.Println("Terminating consumer...")
for i := 0; i < 10; i++ {
fmt.Println("Received", i, "th value: ", <-c)
time.Sleep(1 * time.Second)
}
quit <- 0
}()
var wg sync.WaitGroup
wg.Add(2)
go fibonacci(1, c, quit, &wg) // Producer
go fibonacci(2, c, quit, &wg) // Producer
wg.Wait()
}
|
package main
import (
"flag"
)
type input struct {
Url string
Max int
}
func (inp *input) Parse() {
url := flag.String("url", "http://www.google.com", "the url of the site to be crawled")
max := flag.Int("max", 1, "max number of requests to make")
flag.Parse()
inp.Url = *url
inp.Max = *max
}
func (inp input) Success() bool {
return inp.Url != "" && inp.Max > 0
}
|
package sneat
type genome struct {
numInputs, numOutputs, species int
l map[int]linkGene
n map[int]neuronGene
fitness, fitnessAdjusted float64
}
func (g *genome) isLinkDuplicated(in, out int) bool {
for _, v := range g.l {
if v.from == in && v.to == out {
return true
}
}
return false
}
func (g *genome) length() int {
return len(g.l) + len(g.n)
}
func (g *genome) newLink(id, in, out int, enabled, recurrent bool, mutstep, weight float64) {
var l linkGene
l.from = in
l.to = out
l.enabled = enabled
l.mutstep = mutstep
l.recurrent = recurrent
l.weight = weight
g.l[id] = l
}
func (g* genome) newNeuron(id int, kind neuronKind, recurrent bool, x, y float64) {
var n neuronGene
n.kind = kind
n.recurrent = recurrent
n.x, n.y = x, y
g.n[id] = n
}
func newMinimalGenome(inputs, outputs int) *genome {
g := new(genome)
g.fitness = -1.0
g.numInputs = inputs
g.numOutputs = outputs
g.species = -1
g.l = make(map[int]linkGene)
g.n = make(map[int]neuronGene)
//minimal architecture:
//bias
g.newNeuron(getNeuronInnovation(-3, -1), nBias, false, -1.0, 0.0)
//inputs
for i:=0; i<inputs; i++ {
g.newNeuron(getNeuronInnovation(-1, -i), nInput, false, posx(i, inputs), 0.0)
}
//outputs
for i:=0; i<outputs; i++ {
g.newNeuron(getNeuronInnovation(-2, -i), nOutput, false, posx(i, outputs), 1.0)
}
//TODO - beautify positions
g.addLink()
return g
}
func posx(i, n int) float64 {
a := float64(i)
b := float64(n)
return (a / b + (a+1.0) / b) * 0.5
} |
package lfsapi
import (
"encoding/json"
"fmt"
"io"
"net/http"
"regexp"
"strconv"
"strings"
"sync"
"github.com/ThomsonReutersEikon/go-ntlm/ntlm"
"github.com/git-lfs/git-lfs/config"
"github.com/git-lfs/git-lfs/errors"
)
var (
lfsMediaTypeRE = regexp.MustCompile(`\Aapplication/vnd\.git\-lfs\+json(;|\z)`)
jsonMediaTypeRE = regexp.MustCompile(`\Aapplication/json(;|\z)`)
)
type Client struct {
Endpoints EndpointFinder
Credentials CredentialHelper
SSH SSHResolver
Netrc NetrcFinder
DialTimeout int
KeepaliveTimeout int
TLSTimeout int
ConcurrentTransfers int
HTTPSProxy string
HTTPProxy string
NoProxy string
SkipSSLVerify bool
Verbose bool
DebuggingVerbose bool
VerboseOut io.Writer
hostClients map[string]*http.Client
clientMu sync.Mutex
ntlmSessions map[string]ntlm.ClientSession
ntlmMu sync.Mutex
httpLogger *syncLogger
LoggingStats bool // DEPRECATED
// only used for per-host ssl certs
gitEnv Env
osEnv Env
uc *config.URLConfig
}
func NewClient(osEnv Env, gitEnv Env) (*Client, error) {
if osEnv == nil {
osEnv = make(TestEnv)
}
if gitEnv == nil {
gitEnv = make(TestEnv)
}
netrc, err := ParseNetrc(osEnv)
if err != nil {
return nil, err
}
httpsProxy, httpProxy, noProxy := getProxyServers(osEnv, gitEnv)
creds, err := getCredentialHelper(&config.Configuration{
Os: osEnv, Git: gitEnv})
if err != nil {
return nil, errors.Wrap(err, "cannot find credential helper(s)")
}
var sshResolver SSHResolver = &sshAuthClient{os: osEnv}
if gitEnv.Bool("lfs.cachecredentials", true) {
sshResolver = withSSHCache(sshResolver)
}
c := &Client{
Endpoints: NewEndpointFinder(gitEnv),
Credentials: creds,
SSH: sshResolver,
Netrc: netrc,
DialTimeout: gitEnv.Int("lfs.dialtimeout", 0),
KeepaliveTimeout: gitEnv.Int("lfs.keepalive", 0),
TLSTimeout: gitEnv.Int("lfs.tlstimeout", 0),
ConcurrentTransfers: gitEnv.Int("lfs.concurrenttransfers", 3),
SkipSSLVerify: !gitEnv.Bool("http.sslverify", true) || osEnv.Bool("GIT_SSL_NO_VERIFY", false),
Verbose: osEnv.Bool("GIT_CURL_VERBOSE", false),
DebuggingVerbose: osEnv.Bool("LFS_DEBUG_HTTP", false),
HTTPSProxy: httpsProxy,
HTTPProxy: httpProxy,
NoProxy: noProxy,
gitEnv: gitEnv,
osEnv: osEnv,
uc: config.NewURLConfig(gitEnv),
}
return c, nil
}
func (c *Client) GitEnv() Env {
return c.gitEnv
}
func (c *Client) OSEnv() Env {
return c.osEnv
}
func IsDecodeTypeError(err error) bool {
_, ok := err.(*decodeTypeError)
return ok
}
type decodeTypeError struct {
Type string
}
func (e *decodeTypeError) TypeError() {}
func (e *decodeTypeError) Error() string {
return fmt.Sprintf("Expected json type, got: %q", e.Type)
}
func DecodeJSON(res *http.Response, obj interface{}) error {
ctype := res.Header.Get("Content-Type")
if !(lfsMediaTypeRE.MatchString(ctype) || jsonMediaTypeRE.MatchString(ctype)) {
return &decodeTypeError{Type: ctype}
}
err := json.NewDecoder(res.Body).Decode(obj)
res.Body.Close()
if err != nil {
return errors.Wrapf(err, "Unable to parse HTTP response for %s %s", res.Request.Method, res.Request.URL)
}
return nil
}
// Env is an interface for the config.Environment methods that this package
// relies on.
type Env interface {
Get(string) (string, bool)
GetAll(string) []string
Int(string, int) int
Bool(string, bool) bool
All() map[string][]string
}
type UniqTestEnv map[string]string
func (e UniqTestEnv) Get(key string) (v string, ok bool) {
v, ok = e[key]
return
}
func (e UniqTestEnv) GetAll(key string) []string {
if v, ok := e.Get(key); ok {
return []string{v}
}
return make([]string, 0)
}
func (e UniqTestEnv) Int(key string, def int) (val int) {
s, _ := e.Get(key)
if len(s) == 0 {
return def
}
i, err := strconv.Atoi(s)
if err != nil {
return def
}
return i
}
func (e UniqTestEnv) Bool(key string, def bool) (val bool) {
s, _ := e.Get(key)
if len(s) == 0 {
return def
}
switch strings.ToLower(s) {
case "true", "1", "on", "yes", "t":
return true
case "false", "0", "off", "no", "f":
return false
default:
return false
}
}
func (e UniqTestEnv) All() map[string][]string {
m := make(map[string][]string)
for k, _ := range e {
m[k] = e.GetAll(k)
}
return m
}
// TestEnv is a basic config.Environment implementation. Only used in tests, or
// as a zero value to NewClient().
type TestEnv map[string][]string
func (e TestEnv) Get(key string) (string, bool) {
all := e.GetAll(key)
if len(all) == 0 {
return "", false
}
return all[len(all)-1], true
}
func (e TestEnv) GetAll(key string) []string {
return e[key]
}
func (e TestEnv) Int(key string, def int) (val int) {
s, _ := e.Get(key)
if len(s) == 0 {
return def
}
i, err := strconv.Atoi(s)
if err != nil {
return def
}
return i
}
func (e TestEnv) Bool(key string, def bool) (val bool) {
s, _ := e.Get(key)
if len(s) == 0 {
return def
}
switch strings.ToLower(s) {
case "true", "1", "on", "yes", "t":
return true
case "false", "0", "off", "no", "f":
return false
default:
return false
}
}
func (e TestEnv) All() map[string][]string {
return e
}
|
package state_system
import (
"encoding/json"
"testing"
)
func TestNewStateTree(t *testing.T) {
tree := NewStateTree()
if tree.pendingState != nil {
t.Error("Pending state was not nil!")
}
if tree.activeState != nil {
t.Error("Active state was not nil!")
}
if tree.stateMap == nil {
t.Error("Tree did not contain a valid Map.")
}
}
func TestReadJson(t *testing.T) {
var jsonBlob = []byte(`{"name": "game", "main": "game_start", "states": []}`)
var stateDesc StateTreeDesc
json.Unmarshal(jsonBlob, &stateDesc)
if stateDesc.Name != "game" {
t.Error("State tree name was incorrect.")
}
if stateDesc.Main != "game_start" {
t.Error("State tree main entry was incorrect.")
}
if len(stateDesc.States) != 0 {
t.Error("State tree did not contain the expected states.")
}
}
|
package config
import (
"github.com/spf13/viper"
"fmt"
_ "github.com/go-sql-driver/mysql"
"os"
"path"
)
type Config struct {
DB *DBConfig
App *AppConfig
}
type DBConfig struct {
Server string
DSN string
Charset string
}
type AppConfig struct {
Env string
MigrationDir string
}
const configPath = ".config/github.com/MetalRex101/auth-server"
const projectPath = "src/github.com/MetalRex101/auth-server"
const configName = "config"
const migrationDir = "migrations"
func GetConfig(appEnv string) *Config {
configDir := path.Join(os.Getenv("HOME"), configPath)
projectDir := path.Join(os.Getenv("GOPATH"), projectPath)
migrationDir := path.Join(projectDir, migrationDir)
os.MkdirAll(configDir, os.ModePerm)
viper.SetConfigName(configName)
viper.AddConfigPath(configDir)
viper.AddConfigPath(projectDir)
err := viper.ReadInConfig()
if err != nil {
fmt.Println("No configuration file loaded - using defaults")
setDefaults()
}
return &Config{
DB: &DBConfig{
Server: "mysql",
DSN: viper.GetString(fmt.Sprintf("%s.datasource", appEnv)),
Charset: "utf8",
},
App: &AppConfig{
Env: appEnv,
MigrationDir: migrationDir,
},
}
}
// If no config is found, set the default(s)
func setDefaults() {
// Mysql defaults
viper.SetDefault("db_host", "127.0.0.1")
viper.SetDefault("db_database", "api-server")
viper.SetDefault("db_username", "root")
viper.SetDefault("db_password", 123456)
viper.SetDefault("db_port", 3306)
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.