text stringlengths 11 4.05M |
|---|
package main
import (
"net/http"
"log"
"fmt"
"html"
)
func handleIndex(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, "Welcome to lesson 1: RethinkDB and Go")
}
func main() {
http.Handle("/", handleIndex)
log.Fatal(http.ListenAndServe(":4567", nil))
}
|
package main
// isTaggable returns true if the given resource type is an AWS resource that supports tags.
func isTaggable(t string) bool {
for _, trt := range taggableResourceTypes {
if t == trt {
return true
}
}
return false
}
// taggableResourceTypes is a list of known AWS type tokens that are taggable.
var taggableResourceTypes = []string{
"aws:accessanalyzer/analyzer:Analyzer",
"aws:acm/certificate:Certificate",
"aws:acmpca/certificateAuthority:CertificateAuthority",
"aws:alb/loadBalancer:LoadBalancer",
"aws:alb/targetGroup:TargetGroup",
"aws:apigateway/apiKey:ApiKey",
"aws:apigateway/clientCertificate:ClientCertificate",
"aws:apigateway/domainName:DomainName",
"aws:apigateway/restApi:RestApi",
"aws:apigateway/stage:Stage",
"aws:apigateway/usagePlan:UsagePlan",
"aws:apigateway/vpcLink:VpcLink",
"aws:applicationloadbalancing/loadBalancer:LoadBalancer",
"aws:applicationloadbalancing/targetGroup:TargetGroup",
"aws:appmesh/mesh:Mesh",
"aws:appmesh/route:Route",
"aws:appmesh/virtualNode:VirtualNode",
"aws:appmesh/virtualRouter:VirtualRouter",
"aws:appmesh/virtualService:VirtualService",
"aws:appsync/graphQLApi:GraphQLApi",
"aws:athena/workgroup:Workgroup",
"aws:autoscaling/group:Group",
"aws:backup/plan:Plan",
"aws:backup/vault:Vault",
"aws:cfg/aggregateAuthorization:AggregateAuthorization",
"aws:cfg/configurationAggregator:ConfigurationAggregator",
"aws:cfg/rule:Rule",
"aws:cloudformation/stack:Stack",
"aws:cloudformation/stackSet:StackSet",
"aws:cloudfront/distribution:Distribution",
"aws:cloudhsmv2/cluster:Cluster",
"aws:cloudtrail/trail:Trail",
"aws:cloudwatch/eventRule:EventRule",
"aws:cloudwatch/logGroup:LogGroup",
"aws:cloudwatch/metricAlarm:MetricAlarm",
"aws:codebuild/project:Project",
"aws:codecommit/repository:Repository",
"aws:codepipeline/pipeline:Pipeline",
"aws:codepipeline/webhook:Webhook",
"aws:codestarnotifications/notificationRule:NotificationRule",
"aws:cognito/identityPool:IdentityPool",
"aws:cognito/userPool:UserPool",
"aws:datapipeline/pipeline:Pipeline",
"aws:datasync/agent:Agent",
"aws:datasync/efsLocation:EfsLocation",
"aws:datasync/locationSmb:LocationSmb",
"aws:datasync/nfsLocation:NfsLocation",
"aws:datasync/s3Location:S3Location",
"aws:datasync/task:Task",
"aws:dax/cluster:Cluster",
"aws:directconnect/connection:Connection",
"aws:directconnect/hostedPrivateVirtualInterfaceAccepter:HostedPrivateVirtualInterfaceAccepter",
"aws:directconnect/hostedPublicVirtualInterfaceAccepter:HostedPublicVirtualInterfaceAccepter",
"aws:directconnect/hostedTransitVirtualInterfaceAcceptor:HostedTransitVirtualInterfaceAcceptor",
"aws:directconnect/linkAggregationGroup:LinkAggregationGroup",
"aws:directconnect/privateVirtualInterface:PrivateVirtualInterface",
"aws:directconnect/publicVirtualInterface:PublicVirtualInterface",
"aws:directconnect/transitVirtualInterface:TransitVirtualInterface",
"aws:directoryservice/directory:Directory",
"aws:dlm/lifecyclePolicy:LifecyclePolicy",
"aws:dms/endpoint:Endpoint",
"aws:dms/replicationInstance:ReplicationInstance",
"aws:dms/replicationSubnetGroup:ReplicationSubnetGroup",
"aws:dms/replicationTask:ReplicationTask",
"aws:docdb/cluster:Cluster",
"aws:docdb/clusterInstance:ClusterInstance",
"aws:docdb/clusterParameterGroup:ClusterParameterGroup",
"aws:docdb/subnetGroup:SubnetGroup",
"aws:dynamodb/table:Table",
"aws:ebs/snapshot:Snapshot",
"aws:ebs/snapshotCopy:SnapshotCopy",
"aws:ebs/volume:Volume",
"aws:ec2/ami:Ami",
"aws:ec2/amiCopy:AmiCopy",
"aws:ec2/amiFromInstance:AmiFromInstance",
"aws:ec2/capacityReservation:CapacityReservation",
"aws:ec2/customerGateway:CustomerGateway",
"aws:ec2/defaultNetworkAcl:DefaultNetworkAcl",
"aws:ec2/defaultRouteTable:DefaultRouteTable",
"aws:ec2/defaultSecurityGroup:DefaultSecurityGroup",
"aws:ec2/defaultSubnet:DefaultSubnet",
"aws:ec2/defaultVpc:DefaultVpc",
"aws:ec2/defaultVpcDhcpOptions:DefaultVpcDhcpOptions",
"aws:ec2/eip:Eip",
"aws:ec2/fleet:Fleet",
"aws:ec2/instance:Instance",
"aws:ec2/internetGateway:InternetGateway",
"aws:ec2/keyPair:KeyPair",
"aws:ec2/launchTemplate:LaunchTemplate",
"aws:ec2/natGateway:NatGateway",
"aws:ec2/networkAcl:NetworkAcl",
"aws:ec2/networkInterface:NetworkInterface",
"aws:ec2/placementGroup:PlacementGroup",
"aws:ec2/routeTable:RouteTable",
"aws:ec2/securityGroup:SecurityGroup",
"aws:ec2/spotInstanceRequest:SpotInstanceRequest",
"aws:ec2/subnet:Subnet",
"aws:ec2/vpc:Vpc",
"aws:ec2/vpcDhcpOptions:VpcDhcpOptions",
"aws:ec2/vpcEndpoint:VpcEndpoint",
"aws:ec2/vpcEndpointService:VpcEndpointService",
"aws:ec2/vpcPeeringConnection:VpcPeeringConnection",
"aws:ec2/vpcPeeringConnectionAccepter:VpcPeeringConnectionAccepter",
"aws:ec2/vpnConnection:VpnConnection",
"aws:ec2/vpnGateway:VpnGateway",
"aws:ec2clientvpn/endpoint:Endpoint",
"aws:ec2transitgateway/routeTable:RouteTable",
"aws:ec2transitgateway/transitGateway:TransitGateway",
"aws:ec2transitgateway/vpcAttachment:VpcAttachment",
"aws:ec2transitgateway/vpcAttachmentAccepter:VpcAttachmentAccepter",
"aws:ecr/repository:Repository",
"aws:ecs/capacityProvider:CapacityProvider",
"aws:ecs/cluster:Cluster",
"aws:ecs/service:Service",
"aws:ecs/taskDefinition:TaskDefinition",
"aws:efs/fileSystem:FileSystem",
"aws:eks/cluster:Cluster",
"aws:eks/fargateProfile:FargateProfile",
"aws:eks/nodeGroup:NodeGroup",
"aws:elasticache/cluster:Cluster",
"aws:elasticache/replicationGroup:ReplicationGroup",
"aws:elasticbeanstalk/application:Application",
"aws:elasticbeanstalk/applicationVersion:ApplicationVersion",
"aws:elasticbeanstalk/environment:Environment",
"aws:elasticloadbalancing/loadBalancer:LoadBalancer",
"aws:elasticloadbalancingv2/loadBalancer:LoadBalancer",
"aws:elasticloadbalancingv2/targetGroup:TargetGroup",
"aws:elasticsearch/domain:Domain",
"aws:elb/loadBalancer:LoadBalancer",
"aws:emr/cluster:Cluster",
"aws:fsx/lustreFileSystem:LustreFileSystem",
"aws:fsx/windowsFileSystem:WindowsFileSystem",
"aws:gamelift/alias:Alias",
"aws:gamelift/build:Build",
"aws:gamelift/fleet:Fleet",
"aws:gamelift/gameSessionQueue:GameSessionQueue",
"aws:glacier/vault:Vault",
"aws:glue/crawler:Crawler",
"aws:glue/job:Job",
"aws:glue/trigger:Trigger",
"aws:iam/role:Role",
"aws:iam/user:User",
"aws:inspector/resourceGroup:ResourceGroup",
"aws:kinesis/analyticsApplication:AnalyticsApplication",
"aws:kinesis/firehoseDeliveryStream:FirehoseDeliveryStream",
"aws:kinesis/stream:Stream",
"aws:kms/externalKey:ExternalKey",
"aws:kms/key:Key",
"aws:lambda/function:Function",
"aws:lb/loadBalancer:LoadBalancer",
"aws:lb/targetGroup:TargetGroup",
"aws:licensemanager/licenseConfiguration:LicenseConfiguration",
"aws:lightsail/instance:Instance",
"aws:mediaconvert/queue:Queue",
"aws:mediapackage/channel:Channel",
"aws:mediastore/container:Container",
"aws:mq/broker:Broker",
"aws:mq/configuration:Configuration",
"aws:msk/cluster:Cluster",
"aws:neptune/cluster:Cluster",
"aws:neptune/clusterInstance:ClusterInstance",
"aws:neptune/clusterParameterGroup:ClusterParameterGroup",
"aws:neptune/eventSubscription:EventSubscription",
"aws:neptune/parameterGroup:ParameterGroup",
"aws:neptune/subnetGroup:SubnetGroup",
"aws:opsworks/stack:Stack",
"aws:organizations/account:Account",
"aws:pinpoint/app:App",
"aws:qldb/ledger:Ledger",
"aws:ram/resourceShare:ResourceShare",
"aws:rds/cluster:Cluster",
"aws:rds/clusterEndpoint:ClusterEndpoint",
"aws:rds/clusterInstance:ClusterInstance",
"aws:rds/clusterParameterGroup:ClusterParameterGroup",
"aws:rds/clusterSnapshot:ClusterSnapshot",
"aws:rds/eventSubscription:EventSubscription",
"aws:rds/instance:Instance",
"aws:rds/optionGroup:OptionGroup",
"aws:rds/parameterGroup:ParameterGroup",
"aws:rds/securityGroup:SecurityGroup",
"aws:rds/snapshot:Snapshot",
"aws:rds/subnetGroup:SubnetGroup",
"aws:redshift/cluster:Cluster",
"aws:redshift/eventSubscription:EventSubscription",
"aws:redshift/parameterGroup:ParameterGroup",
"aws:redshift/snapshotCopyGrant:SnapshotCopyGrant",
"aws:redshift/snapshotSchedule:SnapshotSchedule",
"aws:redshift/subnetGroup:SubnetGroup",
"aws:resourcegroups/group:Group",
"aws:route53/healthCheck:HealthCheck",
"aws:route53/resolverEndpoint:ResolverEndpoint",
"aws:route53/resolverRule:ResolverRule",
"aws:route53/zone:Zone",
"aws:s3/bucket:Bucket",
"aws:s3/bucketObject:BucketObject",
"aws:sagemaker/endpoint:Endpoint",
"aws:sagemaker/endpointConfiguration:EndpointConfiguration",
"aws:sagemaker/model:Model",
"aws:sagemaker/notebookInstance:NotebookInstance",
"aws:secretsmanager/secret:Secret",
"aws:servicecatalog/portfolio:Portfolio",
"aws:sfn/activity:Activity",
"aws:sfn/stateMachine:StateMachine",
"aws:sns/topic:Topic",
"aws:sqs/queue:Queue",
"aws:ssm/activation:Activation",
"aws:ssm/document:Document",
"aws:ssm/maintenanceWindow:MaintenanceWindow",
"aws:ssm/parameter:Parameter",
"aws:ssm/patchBaseline:PatchBaseline",
"aws:storagegateway/cachesIscsiVolume:CachesIscsiVolume",
"aws:storagegateway/gateway:Gateway",
"aws:storagegateway/nfsFileShare:NfsFileShare",
"aws:storagegateway/smbFileShare:SmbFileShare",
"aws:swf/domain:Domain",
"aws:transfer/server:Server",
"aws:transfer/user:User",
"aws:waf/rateBasedRule:RateBasedRule",
"aws:waf/rule:Rule",
"aws:waf/ruleGroup:RuleGroup",
"aws:waf/webAcl:WebAcl",
"aws:wafregional/rateBasedRule:RateBasedRule",
"aws:wafregional/rule:Rule",
"aws:wafregional/ruleGroup:RuleGroup",
"aws:wafregional/webAcl:WebAcl",
"aws:workspaces/directory:Directory",
"aws:workspaces/ipGroup:IpGroup",
}
|
// Copyright 2014 Dirk Jablonowski. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This ist a base virtual connector, this means,
// that this connector do not(!) connect to a real hardware.
package virtual
import (
"github.com/dirkjabl/bricker/connector"
"github.com/dirkjabl/bricker/event"
"github.com/dirkjabl/bricker/util/hash"
)
// This is the generator type and it is a function.
// Every generator is a function, which takes an event and results an event.
// If the result is nil, it will be used as "no result".
type GeneratorFunc func(*event.Event) *event.Event
// This is the virtual connector. To use, create one and attach generators.
// The virtual connector takes any event and handle it.
// For handling you need generators. The generator are called if a matching
// packet comes in (per Send()).
// The matching works with hashes (hash.Hash).
// A fallback generator exists and could overwritten.
type Virtual struct {
receive chan *event.Event
generator map[hash.Hash]GeneratorFunc
fallback GeneratorFunc
serial *connector.Sequence
}
// New creates a new virtual connector.
func New() *Virtual {
v := &Virtual{
receive: make(chan *event.Event, 20),
serial: new(connector.Sequence),
generator: make(map[hash.Hash]GeneratorFunc)}
v.DetachFallbackGenerator()
return v
}
// AttachGenerator add a new generator to the connector.
// If a generator exists with the same hash, it will be overwritten.
func (v *Virtual) AttachGenerator(h hash.Hash, f GeneratorFunc) {
v.generator[h] = f
}
// AttachFallbackGenerator change the existing fallback generator to
// the new given.
func (v *Virtual) AttachFallbackGenerator(f GeneratorFunc) {
v.fallback = f
}
// DetachGenerator removes a generator.
func (v *Virtual) DetachGenerator(h hash.Hash) {
delete(v.generator, h)
}
// DetachFallbackGenerator changes the fallback generator to
// the base fallbck generator without functionality.
func (v *Virtual) DetachFallbackGenerator() {
v.AttachFallbackGenerator(Fallback)
}
// Send takes the given event and looks for a generator for it.
// If the generator returns a event, it will be put in the receive channel.
func (v *Virtual) Send(e *event.Event) {
if e == nil { // no event, no processing
return
}
if e.Packet != nil {
e.Packet.Head.SetSequence(v.serial.GetSequence())
e.Packet.Head.Length = e.Packet.ComputeLength()
}
f := v.getGen(e)
r := f(e)
if r != nil {
v.receive <- r
}
}
// Receive reads a event from the virtual connector (synchron).
func (v *Virtual) Receive() *event.Event {
e, ok := <-v.receive
if !ok {
e = nil // done
}
return e
}
// Done detach all and reset the fallback generator.
// Close the receive channel.
// The virtual connector should not longer used.
func (v *Virtual) Done() {
for h, _ := range v.generator {
v.DetachGenerator(h)
}
v.DetachFallbackGenerator()
close(v.receive)
}
// Fallback is the basic fallback generator.
// It reads a event and do nothing, the result is nil.
func Fallback(e *event.Event) *event.Event {
return nil
}
// Internal method: getGen find a generator to run with this event.
func (v *Virtual) getGen(e *event.Event) GeneratorFunc {
var h hash.Hash
for _, c := range hash.All() {
h = hash.New(c, e.Packet.Head.Uid, e.Packet.Head.FunctionID)
if f, ok := v.generator[h]; ok {
return f
}
}
return Fallback
}
|
/*Project Euler - Problem 20
n! means n (n 1) ... 3 2 1
For example, 10! = 10 9 ... 3 2 1 = 3628800,
and the sum of the digits in the number 10! is 3 + 6 + 2 + 8 + 8 + 0 + 0 = 27.
Find the sum of the digits in the number 100!
*/
package euler
import (
"fmt"
"math/big"
"strconv"
)
func Euler020() string {
num := big.NewInt(100)
answer := BigFactorial(num)
// Convert to string and sum the digits
a_str := answer.String()
var dig_sum int64 = 0
for _, v := range(a_str) {
n, _ := strconv.ParseInt(string(v), 0, 0)
dig_sum += n
}
//fmt.Println(dig_sum)
return fmt.Sprint(dig_sum)
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package perfboot
import (
"bufio"
"context"
"io/ioutil"
"regexp"
"strconv"
"strings"
"time"
"chromiumos/tast/common/testexec"
"chromiumos/tast/errors"
"chromiumos/tast/local/arc"
"chromiumos/tast/local/chrome"
"chromiumos/tast/shutil"
"chromiumos/tast/testing"
)
// GetPerfValues parses ARC log files and extracts performance metrics Android boot flow.
func GetPerfValues(ctx context.Context, tconn *chrome.TestConn, a *arc.ARC) (map[string]time.Duration, error) {
const (
logcatTimeout = 30 * time.Second
// logcatLastEventTag is the last event tag to be processed.
// The test should read logcat until this tag appears.
logcatLastEventTag = "boot_progress_enable_screen"
// logcatIgnoreEventTag is a logcat event tags to be ignored.
// TODO(niwa): Clean this up after making PerfBoot reboot DUT.
// (Using time of boot_progress_system_run makes sense only after rebooting DUT.)
logcatIgnoreEventTag = "boot_progress_system_run"
)
// logcatEventEntryRegexp extracts boot pregress event name and time from a logcat entry.
logcatEventEntryRegexp := regexp.MustCompile(`\d+ I (boot_progress_[^:]+): (\d+)`)
var arcStartTimeMS float64
if err := tconn.Eval(ctx, "tast.promisify(chrome.autotestPrivate.getArcStartTime)()", &arcStartTimeMS); err != nil {
return nil, errors.Wrap(err, "failed to run getArcStartTime()")
}
adjustedArcStartTime := time.Duration(arcStartTimeMS * float64(time.Millisecond))
testing.ContextLogf(ctx, "ARC start time in host clock: %fs", adjustedArcStartTime.Seconds())
vmEnabled, err := arc.VMEnabled()
if err != nil {
return nil, errors.Wrap(err, "failed to check whether ARCVM is enabled")
}
if vmEnabled {
clockDelta, err := clockDelta(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to obtain clock delta")
}
// Guest clock and host clock are different on ARCVM, so we adjust ARC start time.
// adjustedArcStartTime is expected to be a negative value.
adjustedArcStartTime -= clockDelta
testing.ContextLogf(ctx, "ARC start time in guest clock: %fs", adjustedArcStartTime.Seconds())
}
// Set timeout for the logcat command below.
ctx, cancel := context.WithTimeout(ctx, logcatTimeout)
defer cancel()
cmd := a.Command(ctx, "logcat", "-b", "events", "-v", "threadtime")
cmdStr := shutil.EscapeSlice(cmd.Args)
pipe, err := cmd.StdoutPipe()
if err != nil {
return nil, errors.Wrapf(err, "failed to obtain a pipe for %s", cmdStr)
}
if err := cmd.Start(); err != nil {
return nil, errors.Wrapf(err, "failed to start %s", cmdStr)
}
defer func() {
cmd.Kill()
cmd.Wait()
}()
p := make(map[string]time.Duration)
lastEventSeen := false
testing.ContextLog(ctx, "Scanning logcat output")
scanner := bufio.NewScanner(pipe)
for scanner.Scan() {
l := scanner.Text()
m := logcatEventEntryRegexp.FindStringSubmatch(l)
if m == nil {
continue
}
eventTag := m[1]
if eventTag == logcatIgnoreEventTag {
continue
}
eventTimeMs, err := strconv.ParseInt(m[2], 10, 64)
if err != nil {
return nil, errors.Wrapf(err, "failed to extract event time from %q", l)
}
p[eventTag] = time.Duration(eventTimeMs*int64(time.Millisecond)) - adjustedArcStartTime
if eventTag == logcatLastEventTag {
lastEventSeen = true
break
}
}
if err := scanner.Err(); err != nil {
return nil, errors.Wrap(err, "error while scanning logcat")
}
if !lastEventSeen {
return nil, errors.Errorf("timeout while waiting for event %q to appear in logcat",
logcatLastEventTag)
}
return p, nil
}
// clockDelta returns (the host's CLOCK_MONOTONIC - the guest's CLOCK_MONOTONIC) as time.Duration.
func clockDelta(ctx context.Context) (time.Duration, error) {
// /proc/timer_list contains a line which says "now at %Ld nsecs".
// This clock value comes from CLOCK_MONOTONIC (see the kernel's kernel/time/timer_list.c).
parse := func(output string) (int64, error) {
for _, line := range strings.Split(output, "\n") {
tokens := strings.Split(line, " ")
if len(tokens) == 4 && tokens[0] == "now" && tokens[1] == "at" && tokens[3] == "nsecs" {
return strconv.ParseInt(tokens[2], 10, 64)
}
}
return 0, errors.Errorf("unexpected format of /proc/timer_list: %q", output)
}
// Use android-sh to read /proc/timer_list which only root can read.
out, err := arc.BootstrapCommand(ctx, "/system/bin/cat", "/proc/timer_list").Output(testexec.DumpLogOnError)
if err != nil {
return 0, errors.Wrap(err, "failed to read guest's /proc/timer_list")
}
guestClockNS, err := parse(string(out))
if err != nil {
return 0, errors.Wrap(err, "failed to prase guest's /proc/timer_list")
}
out, err = ioutil.ReadFile("/proc/timer_list")
if err != nil {
return 0, errors.Wrap(err, "failed to read host's /proc/timer_list")
}
hostClockNS, err := parse(string(out))
if err != nil {
return 0, errors.Wrap(err, "failed to prase host's /proc/timer_list")
}
testing.ContextLogf(ctx, "Host clock: %d ns, Guest clock: %d ns", hostClockNS, guestClockNS)
return time.Duration((hostClockNS - guestClockNS) * int64(time.Nanosecond)), nil
}
|
package toolbox
import (
"fmt"
"github.com/stretchr/testify/assert"
"strings"
"testing"
)
func TestIsASCIIText(t *testing.T) {
var useCases = []struct {
Description string
Candidate string
Expected bool
}{
{
Description: "basic text",
Candidate: `abc`,
Expected: true,
},
{
Description: "JSON object like text",
Candidate: `{"k1"}`,
Expected: true,
},
{
Description: "JSON array like text",
Candidate: `["$k1"]`,
Expected: true,
},
{
Description: "bin data",
Candidate: "\u0000",
Expected: false,
},
{
Description: "JSON text",
Candidate: `{
"RepositoryDatastore":"db1",
"Db": [
{
"Name": "db1",
"Config": {
"PoolSize": 3,
"MaxPoolSize": 5,
"DriverName": "mysql",
"Descriptor": "[username]:[password]@tcp(127.0.0.1:3306)/db1?parseTime=true",
"Credentials": "$mysqlCredentials"
}
}
]
}
`,
Expected: true,
},
}
for _, useCase := range useCases {
assert.EqualValues(t, useCase.Expected, IsASCIIText(useCase.Candidate), useCase.Description)
}
}
func TestIsPrintText(t *testing.T) {
var useCases = []struct {
Description string
Candidate string
Expected bool
}{
{
Description: "basic text",
Candidate: `abc`,
Expected: true,
},
{
Description: "JSON object like text",
Candidate: `{"k1"}`,
Expected: true,
},
{
Description: "JSON array like text",
Candidate: `["$k1"]`,
Expected: true,
},
{
Description: "bin data",
Candidate: "\u0000",
Expected: false,
},
{
Description: "JSON text",
Candidate: `{
"RepositoryDatastore":"db1",
"Db": [
{
"Name": "db1",
"Config": {
"PoolSize": 3,
"MaxPoolSize": 5,
"DriverName": "mysql",
"Descriptor": "[username]:[password]@tcp(127.0.0.1:3306)/db1?parseTime=true",
"Credentials": "mysql"
}
}
]
}
`,
Expected: true,
},
}
for _, useCase := range useCases {
assert.EqualValues(t, useCase.Expected, IsPrintText(useCase.Candidate), useCase.Description)
}
}
func TestTerminatedSplitN(t *testing.T) {
var data = make([]byte, 0)
for i := 0; i < 9; i++ {
data = append(data, []byte(fmt.Sprintf("%v %v\n", strings.Repeat("x", 32), i))...)
}
text := string(data)
useCases := []struct {
description string
fragmentCount int
expectedFragmentSizes []int
}{
{
description: "one fragment case",
fragmentCount: 1,
expectedFragmentSizes: []int{len(data)},
},
{
description: "two fragments case",
fragmentCount: 2,
expectedFragmentSizes: []int{175, 140},
},
{
description: "3 fragments case",
fragmentCount: 3,
expectedFragmentSizes: []int{140, 140, 35},
},
{
description: "7 fragments case",
fragmentCount: 7,
expectedFragmentSizes: []int{70, 70, 70, 70, 35},
},
{
description: "10 fragments case", //no more fragments then lines, so only 9 fragments here
fragmentCount: 10,
expectedFragmentSizes: []int{35, 35, 35, 35, 35, 35, 35, 35, 35},
},
}
for _, useCase := range useCases {
fragments := TerminatedSplitN(text, useCase.fragmentCount, "\n")
var actualFragmentSizes = make([]int, len(fragments))
for i, fragment := range fragments {
actualFragmentSizes[i] = len(fragment)
}
assert.EqualValues(t, useCase.expectedFragmentSizes, actualFragmentSizes, useCase.description)
}
}
|
/*
A handful of examples using the standard builtin types.
*/
package types
|
package handlers
import (
"net/http"
"strconv"
"github.com/abhinavdwivedi440/microservices/data"
"github.com/gorilla/mux"
)
func (p *Product) DeleteProduct(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
id, _ := strconv.Atoi(vars["id"])
p.l.Println("Handle delete product", id)
err := data.DeleteProduct(id)
if err == data.ErrProductNotFound {
http.Error(w, "product not found", http.StatusNotFound)
return
}
if err != nil {
http.Error(w, "product not found", http.StatusInternalServerError)
return
}
}
|
package conf
import "testing"
const (
testConfigName = "/tmp/config.example.json"
)
func TestNew(t *testing.T) {
if _, err := New("/bad_file_path.json"); err == nil {
t.Error("unexpected behavior")
}
cfg, err := New(testConfigName)
if err != nil {
t.Fatal(err)
}
if cfg.Addr() == "" {
t.Error("empty address")
}
err = cfg.Close()
if err != nil {
t.Errorf("close error: %v", err)
}
}
|
//go:build !windows
package platform
import (
"errors"
"os"
"strings"
"time"
"github.com/shirou/gopsutil/v3/host"
terminal "github.com/wayneashleyberry/terminal-dimensions"
"golang.org/x/sys/unix"
)
func (env *Shell) Root() bool {
defer env.Trace(time.Now(), "Root")
return os.Geteuid() == 0
}
func (env *Shell) Home() string {
return os.Getenv("HOME")
}
func (env *Shell) QueryWindowTitles(processName, windowTitleRegex string) (string, error) {
return "", &NotImplemented{}
}
func (env *Shell) IsWsl() bool {
defer env.Trace(time.Now(), "IsWsl")
// one way to check
// version := env.FileContent("/proc/version")
// return strings.Contains(version, "microsoft")
// using env variable
return env.Getenv("WSL_DISTRO_NAME") != ""
}
func (env *Shell) IsWsl2() bool {
defer env.Trace(time.Now(), "IsWsl2")
if !env.IsWsl() {
return false
}
uname := env.FileContent("/proc/sys/kernel/osrelease")
return strings.Contains(uname, "WSL2")
}
func (env *Shell) TerminalWidth() (int, error) {
defer env.Trace(time.Now(), "TerminalWidth")
if env.CmdFlags.TerminalWidth != 0 {
return env.CmdFlags.TerminalWidth, nil
}
width, err := terminal.Width()
if err != nil {
env.Log(Error, "TerminalWidth", err.Error())
}
return int(width), err
}
func (env *Shell) Platform() string {
const key = "environment_platform"
if val, found := env.Cache().Get(key); found {
return val
}
var platform string
defer func() {
env.Cache().Set(key, platform, -1)
}()
if wsl := env.Getenv("WSL_DISTRO_NAME"); len(wsl) != 0 {
platform = strings.Split(strings.ToLower(wsl), "-")[0]
return platform
}
platform, _, _, _ = host.PlatformInformation()
if platform == "arch" {
// validate for Manjaro
lsbInfo := env.FileContent("/etc/lsb-release")
if strings.Contains(strings.ToLower(lsbInfo), "manjaro") {
platform = "manjaro"
}
}
return platform
}
func (env *Shell) CachePath() string {
defer env.Trace(time.Now(), "CachePath")
// get XDG_CACHE_HOME if present
if cachePath := returnOrBuildCachePath(env.Getenv("XDG_CACHE_HOME")); len(cachePath) != 0 {
return cachePath
}
// HOME cache folder
if cachePath := returnOrBuildCachePath(env.Home() + "/.cache"); len(cachePath) != 0 {
return cachePath
}
return env.Home()
}
func (env *Shell) WindowsRegistryKeyValue(path string) (*WindowsRegistryValue, error) {
return nil, &NotImplemented{}
}
func (env *Shell) InWSLSharedDrive() bool {
if !env.IsWsl2() {
return false
}
windowsPath := env.ConvertToWindowsPath(env.Pwd())
return !strings.HasPrefix(windowsPath, `//wsl.localhost/`) && !strings.HasPrefix(windowsPath, `//wsl$/`)
}
func (env *Shell) ConvertToWindowsPath(path string) string {
windowsPath, err := env.RunCommand("wslpath", "-m", path)
if err == nil {
return windowsPath
}
return path
}
func (env *Shell) ConvertToLinuxPath(path string) string {
if linuxPath, err := env.RunCommand("wslpath", "-u", path); err == nil {
return linuxPath
}
return path
}
func (env *Shell) LookWinAppPath(file string) (string, error) {
return "", errors.New("not relevant")
}
func (env *Shell) DirIsWritable(path string) bool {
defer env.Trace(time.Now(), "DirIsWritable", path)
return unix.Access(path, unix.W_OK) == nil
}
func (env *Shell) Connection(connectionType ConnectionType) (*Connection, error) {
// added to disable the linting error, we can implement this later
if len(env.networks) == 0 {
return nil, &NotImplemented{}
}
return nil, &NotImplemented{}
}
|
package deleteduplicates
import (
"reflect"
"testing"
)
func TestDeleteDuplicates(t *testing.T) {
var head *ListNode
var result, expect []int
head = &ListNode{}
head = deleteDuplicates(head)
result = head.Print()
expect = []int{0}
if !reflect.DeepEqual(result, expect) {
t.Errorf("Get %v, Expect %v", result, expect)
}
head = &ListNode{
Val: 1,
}
head = deleteDuplicates(head)
result = head.Print()
expect = []int{1}
if !reflect.DeepEqual(result, expect) {
t.Errorf("Get %v, Expect %v", result, expect)
}
head = &ListNode{
Val: 1,
Next: &ListNode{
Val: 1,
Next: &ListNode{
Val: 2,
},
},
}
head = deleteDuplicates(head)
result = head.Print()
expect = []int{1, 2}
if !reflect.DeepEqual(result, expect) {
t.Errorf("Get %v, Expect %v", result, expect)
}
head = &ListNode{
Val: 1,
Next: &ListNode{
Val: 1,
Next: &ListNode{
Val: 2,
Next: &ListNode{
Val: 3,
Next: &ListNode{
Val: 3,
},
},
},
},
}
head = deleteDuplicates(head)
result = head.Print()
expect = []int{1, 2, 3}
if !reflect.DeepEqual(result, expect) {
t.Errorf("Get %v, Expect %v", result, expect)
}
}
|
package s3httpfile
import (
"github.com/aws/aws-sdk-go/service/s3"
"os"
"path/filepath"
"time"
)
type s3PrefixFileInfo struct {
*s3.CommonPrefix
}
func (fi *s3PrefixFileInfo) Name() string {
return filepath.Base(*fi.CommonPrefix.Prefix)
}
func (fi *s3PrefixFileInfo) Size() int64 {
return -1
}
func (fi *s3PrefixFileInfo) Mode() os.FileMode {
return 0400
}
func (fi *s3PrefixFileInfo) ModTime() time.Time {
return time.Now()
}
func (fi *s3PrefixFileInfo) IsDir() bool {
return true
}
func (fi *s3PrefixFileInfo) Sys() interface{} {
return nil
}
|
// Copyright (C) 2016-Present Pivotal Software, Inc. All rights reserved.
// This program and the accompanying materials are made available under the terms of the under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
package broker_test
import (
"errors"
"log"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/pivotal-cf/on-demand-service-broker/boshdirector"
"github.com/pivotal-cf/on-demand-service-broker/service"
)
var _ = Describe("Orphan Deployments", func() {
var (
logger *log.Logger
orphans []string
orphanDeploymentsErr error
)
BeforeEach(func() {
logger = loggerFactory.NewWithRequestID()
b = createDefaultBroker()
})
It("returns an empty list when there are no instances or deployments", func() {
orphans, orphanDeploymentsErr = b.OrphanDeployments(logger)
Expect(orphanDeploymentsErr).NotTo(HaveOccurred())
Expect(orphans).To(BeEmpty())
})
It("returns an empty list when there is no orphan instances", func() {
fakeInstanceLister.InstancesReturns([]service.Instance{{GUID: "one"}}, nil)
boshClient.GetDeploymentsReturns([]boshdirector.Deployment{{Name: "service-instance_one"}}, nil)
orphans, orphanDeploymentsErr = b.OrphanDeployments(logger)
Expect(orphanDeploymentsErr).NotTo(HaveOccurred())
Expect(orphans).To(BeEmpty())
})
It("returns a list of one orphan when there are no instances but one deployment", func() {
boshClient.GetDeploymentsReturns([]boshdirector.Deployment{{Name: "service-instance_one"}}, nil)
orphans, orphanDeploymentsErr = b.OrphanDeployments(logger)
Expect(orphanDeploymentsErr).NotTo(HaveOccurred())
Expect(orphans).To(ConsistOf("service-instance_one"))
})
It("ignores non-odb deployments", func() {
deployments := []boshdirector.Deployment{{Name: "not-a-service-instance"}, {Name: "acme-deployment"}}
boshClient.GetDeploymentsReturns(deployments, nil)
orphans, orphanDeploymentsErr = b.OrphanDeployments(logger)
Expect(orphanDeploymentsErr).NotTo(HaveOccurred())
Expect(orphans).To(BeEmpty())
})
It("logs an error when getting the list of instances fails", func() {
fakeInstanceLister.InstancesReturns([]service.Instance{}, errors.New("error listing instances: listing error"))
orphans, orphanDeploymentsErr = b.OrphanDeployments(logger)
Expect(orphanDeploymentsErr).To(HaveOccurred())
Expect(logBuffer.String()).To(ContainSubstring("error listing instances: listing error"))
})
It("logs an error when getting the list of deployments fails", func() {
boshClient.GetDeploymentsReturns([]boshdirector.Deployment{}, errors.New("error getting deployments: get deployment error"))
orphans, orphanDeploymentsErr = b.OrphanDeployments(logger)
Expect(orphanDeploymentsErr).To(HaveOccurred())
Expect(logBuffer.String()).To(ContainSubstring("error getting deployments: get deployment error"))
})
})
|
package identityservice
import (
"net/http"
"strings"
"github.com/gorilla/mux"
"github.com/itsyouonline/identityserver/db"
"github.com/itsyouonline/identityserver/identityservice/company"
"github.com/itsyouonline/identityserver/identityservice/globalconfig"
"github.com/itsyouonline/identityserver/identityservice/organization"
"github.com/itsyouonline/identityserver/identityservice/user"
"github.com/itsyouonline/identityserver/identityservice/userorganization"
"crypto/rand"
"encoding/base64"
log "github.com/Sirupsen/logrus"
)
//Service is the identityserver http service
type Service struct {
}
//NewService creates and initializes a Service
func NewService() *Service {
return &Service{}
}
//AddRoutes registers the http routes with the router.
func (service *Service) AddRoutes(router *mux.Router) {
// User API
user.UsersInterfaceRoutes(router, user.UsersAPI{})
user.InitModels()
// Company API
company.CompaniesInterfaceRoutes(router, company.CompaniesAPI{})
company.InitModels()
// Organization API
organization.OrganizationsInterfaceRoutes(router, organization.OrganizationsAPI{})
userorganization.UsersusernameorganizationsInterfaceRoutes(router, userorganization.UsersusernameorganizationsAPI{})
organization.InitModels()
}
func generateRandomBytes(n int) ([]byte, error) {
b := make([]byte, n)
_, err := rand.Read(b)
// Note that err == nil only if we read len(b) bytes.
if err != nil {
return nil, err
}
return b, nil
}
// Generate a random string (s length) used for secret cookie
func generateCookieSecret(s int) (string, error) {
b, err := generateRandomBytes(s)
return base64.URLEncoding.EncodeToString(b), err
}
// GetCookieSecret gets the cookie secret from mongodb if it exists otherwise, generate a new one and save it
func GetCookieSecret() string {
session := db.GetSession()
defer session.Close()
config := globalconfig.NewManager()
globalconfig.InitModels()
cookie, err := config.GetByKey("cookieSecret")
if err != nil {
log.Debug("No cookie secret found, generating a new one")
secret, err := generateCookieSecret(32)
if err != nil {
log.Panic("Cannot generate secret cookie")
}
cookie.Key = "cookieSecret"
cookie.Value = secret
err = config.Insert(cookie)
// Key was inserted by another instance in the meantime
if db.IsDup(err) {
cookie, err = config.GetByKey("cookieSecret")
if err != nil {
log.Panic("Cannot retreive cookie secret")
}
}
}
log.Debug("Cookie secret: ", cookie.Value)
return cookie.Value
}
//ValidAuthorizationForScopes checks if there is a valid authorization for the requested scopes
func (service *Service) ValidAuthorizationForScopes(r *http.Request, username string, grantedTo string, scopes string) (valid bool, err error) {
authorization, err := user.NewManager(r).GetAuthorization(username, grantedTo)
if authorization == nil || err != nil {
valid = false
return
}
valid = authorization.ScopesAreAuthorized(scopes)
return
}
//FilterPossibleScopes filters the requestedScopes to the relevant ones that are possible
// For example, a `user:memberof:orgid1` is not possible if the user is not a member the `orgid1` organization
func (service *Service) FilterPossibleScopes(r *http.Request, username string, clientID string, requestedScopes string) (possibleScopes string, err error) {
rawscopes := strings.Split(requestedScopes, ",")
orgmgr := organization.NewManager(r)
for _, rawscope := range rawscopes {
scope := strings.TrimSpace(rawscope)
if strings.HasPrefix(scope, "user:memberof:") {
orgid := strings.TrimPrefix(scope, "user:memberof:")
isMember, err := orgmgr.IsMember(orgid, username)
if err != nil {
return "", err
}
if isMember {
possibleScopes += "," + scope
continue
}
isOwner, err := orgmgr.IsOwner(orgid, username)
if err != nil {
return "", err
}
if isOwner {
possibleScopes += "," + scope
}
} else {
possibleScopes += "," + scope
}
}
if len(possibleScopes) > 0 {
possibleScopes = strings.TrimPrefix(possibleScopes, ",")
}
return
}
// GetOauthSecret gets the oauth secret from mongodb for a specified service. If it doesn't exist, an error gets logged.
func GetOauthSecret(service string) (string, error) {
session := db.GetSession()
defer session.Close()
config := globalconfig.NewManager()
globalconfig.InitModels()
secretModel, err := config.GetByKey(service + "-secret")
if err != nil {
log.Errorf("No Oauth secret found for %s. Please insert it into the collection globalconfig with key %s-secret",
service, service)
}
return secretModel.Value, err
}
// GetOauthClientID gets the oauth secret from mongodb for a specified service. If it doesn't exist, an error gets logged.
func GetOauthClientID(service string) (string, error) {
session := db.GetSession()
defer session.Close()
config := globalconfig.NewManager()
globalconfig.InitModels()
clientIDModel, err := config.GetByKey(service + "-clientid")
log.Warn(clientIDModel.Value)
if err != nil {
log.Errorf("No Oauth client id found for %s. Please insert it into the collection globalconfig with key %s-clientid",
service, service)
}
return clientIDModel.Value, err
}
|
package main
import "fmt"
func test(x [2]int) {
fmt.Printf("x: %p\n", &x)
x[1] = 1000
}
func main() {
a := [2]int{}
fmt.Printf("a: %p\n", &a)
test(a)
fmt.Println(a)
println(len(a), cap(a))
fmt.Println("多维数组遍历")
var f [2][3]int = [...][3]int{{1, 2, 3}, {7, 8, 9}}
for k1, v1 := range f {
for k2, v2 := range v1 {
fmt.Printf("(%d,%d)=%d ", k1, k2, v2)
}
fmt.Println()
}
}
|
package models
import (
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/kiali/kiali/kubernetes"
)
type QuotaSpecs []QuotaSpec
type QuotaSpec struct {
meta_v1.TypeMeta
Metadata meta_v1.ObjectMeta `json:"metadata"`
Spec struct {
Rules interface{} `json:"rules"`
} `json:"spec"`
}
func (qss *QuotaSpecs) Parse(quotaSpecs []kubernetes.IstioObject) {
for _, qs := range quotaSpecs {
quotaSpec := QuotaSpec{}
quotaSpec.Parse(qs)
*qss = append(*qss, quotaSpec)
}
}
func (qs *QuotaSpec) Parse(quotaSpec kubernetes.IstioObject) {
qs.TypeMeta = quotaSpec.GetTypeMeta()
qs.Metadata = quotaSpec.GetObjectMeta()
qs.Spec.Rules = quotaSpec.GetSpec()["rules"]
}
|
package fmap
import (
"fmt"
"log"
"strings"
"github.com/lleo/go-functional-collections/key/hash"
)
type fixedTable struct {
nodes [hash.IndexLimit]nodeI
depth uint
usedSlots uint //numEnts uint
hashPath hash.Val
}
func newFixedTable(depth uint, hashVal hash.Val) *fixedTable {
var t = new(fixedTable)
t.depth = depth
t.hashPath = hashVal.HashPath(depth)
return t
}
func (t *fixedTable) copy() tableI {
var nt = new(fixedTable)
*nt = *t
return nt
}
func (t *fixedTable) deepCopy() tableI {
var nt = new(fixedTable)
nt.hashPath = t.hashPath
nt.depth = t.depth
nt.usedSlots = t.usedSlots
//for i := 0; i < len(t.nodes); i++ {
// if table, isTable := t.nodes[i].(tableI); isTable {
// nt.nodes[i] = table.deepCopy()
// } else {
// // leafs are functional, so no need to copy
// // nils can be copied just fine; duh!
// nt.nodes[i] = t.nodes[i]
// }
//}
for i, n := range t.nodes {
switch x := n.(type) {
case tableI:
nt.nodes[i] = x.deepCopy()
case leafI:
// leafI's are functional, so no need to copy them.
nt.nodes[i] = x
case nil:
// nils can be copied just fine; duh!
nt.nodes[i] = x
default:
panic("unknown entry in table")
}
}
return nt
}
// equiv compares the *fixedTable to another node by value. This ultimately
// becomes a deep comparison of tables.
func (t *fixedTable) equiv(other nodeI) bool {
var ot, ok = other.(*fixedTable)
if !ok {
log.Println("other is not a *fixedTable")
return false
}
if t.depth != ot.depth {
log.Printf("t.depth,%d != ot.depth,%d", t.depth, ot.depth)
return false
}
if t.usedSlots != ot.usedSlots {
log.Printf("t.usedSlots,%d != ot.usedSlots,%d", t.usedSlots, ot.usedSlots)
return false
}
if t.hashPath != ot.hashPath {
log.Printf("t.hashPath,%s != ot.hashPath,%s", t.hashPath, ot.hashPath)
return false
}
//ok = ok && t.depth == ot.depth
//ok = ok && t.usedSlots == ot.usedSlots
//ok = ok && t.hashPath == ot.hashPath
//if !ok {
// log.Printf("t,%s != ot,%s", t, ot)
// return false
//}
for i, n := range t.nodes {
if n == nil && n != ot.nodes[i] {
log.Printf("n == nil && n != ot.nodes[%d],%s", i, ot.nodes[i])
return false
}
if n != nil && !n.equiv(ot.nodes[i]) {
log.Printf("!n.equiv(ot.nodes[%d])", i)
return false
}
}
return true
}
func createFixedTable(depth uint, leaf1 leafI, leaf2 *flatLeaf) tableI {
if assertOn {
assertf(depth > 0, "createFixedTable(): depth,%d < 1", depth)
assertf(leaf1.hash().HashPath(depth) == leaf2.hash().HashPath(depth),
"createFixedTable(): hp1,%s != hp2,%s",
leaf1.hash().HashPath(depth),
leaf2.hash().HashPath(depth))
}
var retTable = newFixedTable(depth, leaf1.hash())
var idx1 = leaf1.hash().Index(depth)
var idx2 = leaf2.hash().Index(depth)
if idx1 != idx2 {
retTable.insertInplace(idx1, leaf1)
retTable.insertInplace(idx2, leaf2)
} else { // idx1 == idx2
var node nodeI
if depth == hash.MaxDepth {
node = newCollisionLeaf(append(leaf1.keyVals(), leaf2.keyVals()...))
} else {
node = createFixedTable(depth+1, leaf1, leaf2)
}
retTable.insertInplace(idx1, node)
}
return retTable
}
func upgradeToFixedTable(
hashPath hash.Val,
depth uint,
ents []tableEntry,
) *fixedTable {
var ft = new(fixedTable)
ft.hashPath = hashPath
ft.depth = depth
ft.usedSlots = uint(len(ents))
for _, ent := range ents {
ft.nodes[ent.idx] = ent.node
}
return ft
}
// hash returns an incomplete hash of this table. Any levels past it's current
// depth should be zero.
func (t *fixedTable) hash() hash.Val {
return t.hashPath
}
// String return a string representation of this table including the hashPath,
// depth, and number of entries.
func (t *fixedTable) String() string {
return fmt.Sprintf("fixedTable{hashPath=%s, depth=%d, slotsUsed()=%d}",
t.hashPath.HashPathString(t.depth), t.depth, t.slotsUsed())
}
// treeString returns a string representation of this table and all the tables
// contained herein recursively.
func (t *fixedTable) treeString(indent string, depth uint) string {
var strs = make([]string, 3+t.slotsUsed())
strs[0] = indent + "fixedTable{"
strs[1] = indent + fmt.Sprintf("\thashPath=%s, depth=%d, slotsUsed()=%d,",
t.hashPath.HashPathString(depth), t.depth, t.slotsUsed())
var j = 0
for i, n := range t.nodes {
if t.nodes[i] != nil {
if t, isTable := t.nodes[i].(tableI); isTable {
strs[2+j] = indent + fmt.Sprintf("\tnodes[%d]:\n", i) +
t.treeString(indent+"\t", depth+1)
} else {
strs[2+j] = indent + fmt.Sprintf("\tnodes[%d]: %s", i, n)
}
j++
}
}
strs[len(strs)-1] = indent + "}"
return strings.Join(strs, "\n")
}
func (t *fixedTable) slotsUsed() uint {
if t == nil {
log.Printf("t,%#p.slotsUsed()=0", t)
return 0
}
return t.usedSlots
}
func (t *fixedTable) entries() []tableEntry {
var n = t.slotsUsed()
var ents = make([]tableEntry, n)
var i, j uint
for i, j = 0, 0; j < n && i < hash.IndexLimit; i++ {
if t.nodes[i] != nil {
ents[j] = tableEntry{i, t.nodes[i]}
j++
}
}
return ents
}
func (t *fixedTable) get(idx uint) nodeI {
return t.nodes[idx]
}
func (t *fixedTable) needsUpgrade() bool {
return false
}
func (t *fixedTable) needsDowngrade() bool {
return t.slotsUsed() == downgradeThreshold
}
func (t *fixedTable) upgrade() tableI {
//panic("upgrade() invalid op")
return t
}
func (t *fixedTable) downgrade() tableI {
var nt = newSparseTable(t.depth, t.hashPath, t.slotsUsed())
for idx := uint(0); idx < hash.IndexLimit; idx++ {
if t.nodes[idx] != nil {
nt.insertInplace(idx, t.nodes[idx])
}
}
return nt
}
func (t *fixedTable) insertInplace(idx uint, n nodeI) {
t.nodes[idx] = n
t.usedSlots++
}
func (t *fixedTable) insert(idx uint, n nodeI) tableI {
_ = assertOn && assert(t.nodes[idx] == nil,
"t.insert(idx, n) where idx slot is NOT empty; this should be a replace")
var nt = t.copy().(*fixedTable)
nt.nodes[idx] = n
nt.usedSlots++
return nt
}
func (t *fixedTable) replaceInplace(idx uint, n nodeI) {
t.nodes[idx] = n
}
func (t *fixedTable) replace(idx uint, n nodeI) tableI {
_ = assertOn && assert(t.nodes[idx] != nil,
"t.replace(idx, n) where idx slot is empty; this should be an insert")
var nt = t.copy().(*fixedTable)
nt.replaceInplace(idx, n)
//nt.nodes[idx] = n
return nt
}
func (t *fixedTable) removeInplace(idx uint) {
t.nodes[idx] = nil
t.usedSlots--
}
func (t *fixedTable) remove(idx uint) tableI {
_ = assertOn && assert(t.nodes[idx] != nil,
"t.remove(idx) where idx slot is already empty")
if t.depth > 0 { //non-root table
if t.slotsUsed() == 1 {
return nil
}
if t.slotsUsed()-1 == downgradeThreshold {
var nt = t.downgrade()
nt.removeInplace(idx)
return nt
}
}
var nt = t.copy().(*fixedTable)
nt.removeInplace(idx)
//nt.nodes[idx] = nil
//nt.usedSlots--
return nt
}
// walkPreOrder executes the visitFunc in pre-order traversal. If there is no
// node for a given idx, walkPreOrder skips that idx.
//
// The traversal stops if the visitFunc function returns false.
func (t *fixedTable) walkPreOrder(fn visitFunc, depth uint) bool {
_ = assertOn && assertf(depth == t.depth, "depth,%d != t.depth=%d; t=%s", depth, t.depth, t)
depth++
if !fn(t, depth) {
return false
}
for _, n := range t.nodes {
if n != nil {
if !n.walkPreOrder(fn, depth) {
return false
}
}
}
return true
}
func (t *fixedTable) iter() tableIterFunc {
var i = -1
return func() nodeI {
for i < int(hash.IndexLimit-1) {
i++
if t.nodes[i] != nil {
return t.nodes[i]
}
}
return nil
}
}
func (t *fixedTable) count() int {
var i int
for _, n := range t.nodes {
if n != nil {
i += n.count()
}
}
return i
}
|
package main
import (
"context"
"os"
"os/signal"
exporter "producer/metrics"
"strconv"
"time"
cmap "github.com/orcaman/concurrent-map"
log "github.com/sirupsen/logrus"
"github.com/Shopify/sarama"
)
func init() {
log.SetLevel(logLevel())
}
func main() {
topic := "input"
if value, ok := os.LookupEnv("TOPIC"); ok {
topic = value
}
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
ctx, cancel := context.WithCancel(context.Background())
go exporter.Exporter()
go func() {
oscall := <-c
log.Debug("system call: ", oscall)
cancel()
}()
producer, err := newProducer()
if err != nil {
log.Error("Could not create producer: ", err)
os.Exit(1)
}
if err := sendEpochMessage(ctx, producer, topic); err != nil {
log.Error("failed to produce: ", err)
}
}
func sendEpochMessage(ctx context.Context, producer sarama.SyncProducer, topic string) error {
log.Info("Server Start Producing")
var partitionProduced = cmap.New()
go func() {
for {
time.Sleep(time.Millisecond)
epochTime := strconv.FormatInt(time.Now().UnixNano()/int64(time.Millisecond), 10)
msg := prepareMessage(topic, epochTime)
partition, offset, err := producer.SendMessage(msg)
if err != nil {
log.Info(err)
time.Sleep(1 * time.Second)
}
counter, _ := partitionProduced.Get(string(partition))
if counter == nil {
partitionProduced.Set(string(partition), 1)
} else {
counted := counter.(int)
counted++
partitionProduced.Set(string(partition), counted)
log.Trace("Counter: ", " Message: ", epochTime, " topic: ", topic, " partition: ", partition, " offset: ", offset)
M := exporter.ProducedMessageCounter.WithLabelValues(strconv.Itoa(int(partition)), topic)
M.Inc()
}
}
}()
<-ctx.Done()
log.Info("Server Stopped")
_, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer func() {
cancel()
}()
log.Info("Server Exited Properly")
return nil
}
func prepareMessage(topic, message string) *sarama.ProducerMessage {
msg := &sarama.ProducerMessage{
Topic: topic,
Partition: -1,
Value: sarama.StringEncoder(message),
}
return msg
}
|
// Package tvdb provides go bindings for the TVDB API at https://api.thetvdb.com/swagger.
package tvdb
import (
"encoding/json"
"errors"
"fmt"
"net/http"
"time"
)
const BaseURL = "https://api.thetvdb.com/"
// client is the TVDB client struct.
type client struct {
client *http.Client
apiKey string
baseURL string
token tokenResponse
tokenFromDate time.Time
}
type Client interface {
Search(query string) (*SearchResponse, error)
}
// New creates a new TMDB client.
func NewClient(baseURL string, apiKey string) Client {
tvdbService := &client{
apiKey: apiKey,
baseURL: baseURL,
client: &http.Client{},
token: tokenResponse{},
}
return tvdbService
}
func (s *client) addHeaders(req *http.Request) {
req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", s.token.JWTToken))
req.Header.Add("Content-Type", "application/json")
}
type apiError struct {
Error string `json:"Error"`
}
func unmarshalResponse(resp *http.Response, success interface{}) error {
if code := resp.StatusCode; 200 <= code && code <= 299 {
if success != nil && resp.StatusCode != 204 {
return json.NewDecoder(resp.Body).Decode(success)
}
} else {
var apiErr apiError
if err := json.NewDecoder(resp.Body).Decode(&apiErr); err != nil {
return fmt.Errorf("unmarshal of response failed: %v", err)
}
return errors.New(apiErr.Error)
}
return nil
}
|
package main
import (
"log"
"fmt"
"time"
"os"
"io/ioutil"
"net/url"
"net/http"
"strings"
"strconv"
"github.com/ChimeraCoder/anaconda"
"github.com/sanear/eightBallBot/questionAnalyzer"
"github.com/sanear/eightBallBot/eightBall"
)
func main() {
port := "8080"
if len(os.Args) > 1 {
port = os.Args[1]
}
postLimit := 4
if len(os.Args) > 2 {
postLimit, _ = strconv.Atoi(os.Args[2])
}
go runDumbWebService(":" + port)
anaconda.SetConsumerKey("VhSUdN4bdnVcXvrUcWCK8lFp6")
anaconda.SetConsumerSecret("WNBwa0gv6Daadu4hOhL3fVK7mFZUlo4aoK67Dpgj7tNWiO30ia")
api := anaconda.NewTwitterApi("2879998777-QT8IG6Ncr038vFs6MbITTixf85TXhacoqtmXAFX", "LcMWzd9hgXa6DgZFtwDKHdcSDVabClXTN5w4F6mEIQ43k")
lastIdFile := "lastId.txt"
lastId, _ := readLastIdFromFile(lastIdFile)
for {
postCount := 0
postCount, _ = answerQuestions(api, postCount, postLimit)
postCount, lastId, _ = respondToMentions(api, postCount, postLimit, lastId)
writeLastIdToFile(lastId, lastIdFile)
time.Sleep(20 * time.Second)
}
}
func runDumbWebService(port string) {
log.Printf("Starting Eight Ball webservice on port %s...\n", port)
http.HandleFunc("/", handler)
http.ListenAndServe(port, nil)
log.Println("Stopping Eight Ball webservice...")
}
func handler(writer http.ResponseWriter, request *http.Request) {
fmt.Fprintf(writer, "%s\n", eightBall.EightBallAnswer())
}
func answerQuestions(api *anaconda.TwitterApi, postCount int, postLimit int) (newPostCount int, err error) {
// Main functionality; find yes/no questions and answer them
questions, err := findQuestions(api)
if err != nil {
log.Println("ERROR: Failed to get questions!", err)
return postCount, err
} else {
for _, question := range questions {
if postCount < postLimit {
log.Printf("@%s asked, '%s'\n", question.User.ScreenName, question.Text)
reply, err := answerQuestion(api, question)
if err != nil {
log.Printf("ERROR: Unable to post reply: %s\n", err)
return postCount, err
} else {
log.Printf("Replied to tweet with: '%s'\n", reply.Text)
postCount++
}
} else {
log.Printf("Too many requests have been made this cycle; sleeping for a bit.")
return postCount, nil
}
}
}
return postCount, nil
}
func respondToMentions(api *anaconda.TwitterApi, postCount int, postLimit int, lastId string) (newPostCount int, newLastId string, err error) {
// Also look for mentions and reply to them
v := url.Values{}
v.Set("since_id", string(lastId))
mentions, err := api.GetMentionsTimeline(v)
if err != nil {
log.Println("ERROR: Failed to get mentions!", err)
return postCount, lastId, err
} else {
for _, mention := range mentions {
// Regardless of rate, favorite mentions containing "love" and "funny"
if strings.Contains(strings.ToLower(mention.Text), "love") || strings.Contains(strings.ToLower(mention.Text), "funny") {
_, err = api.Favorite(mention.Id)
if err != nil {
log.Println("ERROR: Failed to favorite tweet!", err)
} else {
log.Printf("Favorited tweet %s: '%s'\n", mention.Id, mention.Text)
}
}
if postCount < postLimit {
log.Printf("@%s mentioned me, saying '%s'\n", mention.User.ScreenName, mention.Text)
reply, err := answerQuestion(api, mention)
if err != nil {
log.Println("ERROR: Unable to post reply!", err)
return postCount, lastId, err
} else {
lastId = strconv.FormatInt(mention.Id, 10)
postCount++
log.Printf("Replied to tweet %s, saying '%s'\n", lastId, reply.Text)
}
} else {
log.Printf("Too many requests have been made this cycle; sleeping for a bit.")
return postCount, lastId, nil
}
}
}
return postCount, lastId, nil
}
func findQuestions(api *anaconda.TwitterApi) (questions []anaconda.Tweet, err error) {
v := url.Values{}
v.Set("lang", "en")
v.Set("count", "100")
searchResult, err := api.GetSearch("?", v)
if err != nil {
return nil, err
}
for _, tweet := range searchResult.Statuses {
if questionAnalyzer.IsYesNoQuestion(tweet.Text) {
questions = append(questions, tweet)
}
}
return questions, nil
}
func answerQuestion(api *anaconda.TwitterApi, question anaconda.Tweet) (reply anaconda.Tweet, err error) {
answer := "@" + question.User.ScreenName + " " + eightBall.EightBallAnswer()
v := url.Values{}
v.Set("in_reply_to_status_id", strconv.FormatInt(question.Id, 10))
return api.PostTweet(answer, v)
}
func writeLastIdToFile(lastId string, file string) (err error) {
err = ioutil.WriteFile(file, []byte(lastId), 0644)
if err != nil {
log.Println("ERROR: failed to write to lastId file.", err)
return err
}
return err
}
func readLastIdFromFile(file string) (lastId string, err error) {
var lastIdBytes, e = ioutil.ReadFile(file)
lastId = string(lastIdBytes)
if e != nil {
log.Println("ERROR: failed to read from lastId file.", err)
return "1", e
}
return lastId, nil
}
|
package references
import (
"JVM-GO/ch07/instructions/base"
"JVM-GO/ch07/rtda"
"JVM-GO/ch07/rtda/heap"
)
// Create new object
type NEW struct{ base.Index16Instruction }
func (self *NEW) Execute(frame *rtda.Frame) {
cp := frame.Method().Class().ConstantPool()
classRef := cp.GetConstant(self.Index).(*heap.ClassRef)
class := classRef.ResolvedClass()
// todo: init class
if !class.InitStarted() {
frame.RevertNextPC()
base.InitClass(frame.Thread(), class)
return
}
// 因为接口和抽象类都不能实例化,所以如果解析后的类是接口或抽象类,
// 按照Java虚拟机规范规定,需要抛出InstantiationError异常。
if class.IsInterface() || class.IsAbstract() {
panic("java.lang.InstantiationError")
}
// 如果解析后的类还没有初始化,则需要先初始化类。
ref := class.NewObject()
frame.OperandStack().PushRef(ref)
}
|
package marsmedia
import (
"encoding/json"
"fmt"
"net/http"
"strconv"
"github.com/prebid/openrtb/v19/openrtb2"
"github.com/prebid/prebid-server/adapters"
"github.com/prebid/prebid-server/config"
"github.com/prebid/prebid-server/errortypes"
"github.com/prebid/prebid-server/openrtb_ext"
)
type MarsmediaAdapter struct {
URI string
}
func (a *MarsmediaAdapter) MakeRequests(requestIn *openrtb2.BidRequest, reqInfo *adapters.ExtraRequestInfo) ([]*adapters.RequestData, []error) {
request := *requestIn
if len(request.Imp) == 0 {
return nil, []error{&errortypes.BadInput{
Message: "No impression in the bid request",
}}
}
var bidderExt adapters.ExtImpBidder
if err := json.Unmarshal(request.Imp[0].Ext, &bidderExt); err != nil {
return nil, []error{&errortypes.BadInput{
Message: "ext.bidder not provided",
}}
}
var marsmediaExt openrtb_ext.ExtImpMarsmedia
if err := json.Unmarshal(bidderExt.Bidder, &marsmediaExt); err != nil {
return nil, []error{&errortypes.BadInput{
Message: "ext.bidder.zoneId not provided",
}}
}
if marsmediaExt.ZoneID == "" {
return nil, []error{&errortypes.BadInput{
Message: "zoneId is empty",
}}
}
validImpExists := false
for i := 0; i < len(request.Imp); i++ {
if request.Imp[i].Banner != nil {
bannerCopy := *requestIn.Imp[i].Banner
if len(bannerCopy.Format) > 0 {
firstFormat := bannerCopy.Format[0]
bannerCopy.W = &(firstFormat.W)
bannerCopy.H = &(firstFormat.H)
request.Imp[i].Banner = &bannerCopy
validImpExists = true
} else if bannerCopy.W != nil && bannerCopy.H != nil {
validImpExists = true
} else {
return nil, []error{&errortypes.BadInput{
Message: "No valid banner format in the bid request",
}}
}
} else if request.Imp[i].Video != nil {
validImpExists = true
}
}
if !validImpExists {
return nil, []error{&errortypes.BadInput{
Message: "No valid impression in the bid request",
}}
}
request.AT = 1 //Defaulting to first price auction for all prebid requests
reqJSON, err := json.Marshal(request)
if err != nil {
return nil, []error{&errortypes.BadInput{
Message: fmt.Sprintf("Json not encoded. err: %s", err),
}}
}
uri := a.URI + "&zone=" + marsmediaExt.ZoneID.String()
headers := http.Header{}
headers.Add("Content-Type", "application/json;charset=utf-8")
headers.Add("Accept", "application/json")
headers.Add("x-openrtb-version", "2.5")
if request.Device != nil {
addHeaderIfNonEmpty(headers, "User-Agent", request.Device.UA)
addHeaderIfNonEmpty(headers, "X-Forwarded-For", request.Device.IP)
addHeaderIfNonEmpty(headers, "Accept-Language", request.Device.Language)
if request.Device.DNT != nil {
addHeaderIfNonEmpty(headers, "DNT", strconv.Itoa(int(*request.Device.DNT)))
}
}
return []*adapters.RequestData{{
Method: "POST",
Uri: uri,
Body: reqJSON,
Headers: headers,
}}, []error{}
}
func (a *MarsmediaAdapter) MakeBids(internalRequest *openrtb2.BidRequest, externalRequest *adapters.RequestData, response *adapters.ResponseData) (*adapters.BidderResponse, []error) {
if response.StatusCode == http.StatusNoContent {
return nil, nil
}
if response.StatusCode == http.StatusBadRequest {
return nil, []error{&errortypes.BadInput{
Message: fmt.Sprintf("Unexpected status code: %d. ", response.StatusCode),
}}
}
if response.StatusCode != http.StatusOK {
return nil, []error{&errortypes.BadServerResponse{
Message: fmt.Sprintf("Unexpected status code: %d. Run with request.debug = 1 for more info", response.StatusCode),
}}
}
var bidResp openrtb2.BidResponse
if err := json.Unmarshal(response.Body, &bidResp); err != nil {
return nil, []error{&errortypes.BadServerResponse{
Message: fmt.Sprintf("Bad server response: %d. ", err),
}}
}
bidResponse := adapters.NewBidderResponseWithBidsCapacity(len(bidResp.SeatBid[0].Bid))
sb := bidResp.SeatBid[0]
for i := 0; i < len(sb.Bid); i++ {
bid := sb.Bid[i]
bidResponse.Bids = append(bidResponse.Bids, &adapters.TypedBid{
Bid: &bid,
BidType: getMediaTypeForImp(bid.ImpID, internalRequest.Imp),
})
}
return bidResponse, nil
}
func addHeaderIfNonEmpty(headers http.Header, headerName string, headerValue string) {
if len(headerValue) > 0 {
headers.Add(headerName, headerValue)
}
}
// getMediaTypeForImp figures out which media type this bid is for.
func getMediaTypeForImp(impId string, imps []openrtb2.Imp) openrtb_ext.BidType {
mediaType := openrtb_ext.BidTypeBanner //default type
for _, imp := range imps {
if imp.ID == impId {
if imp.Video != nil {
mediaType = openrtb_ext.BidTypeVideo
}
return mediaType
}
}
return mediaType
}
// Builder builds a new instance of the Marsmedia adapter for the given bidder with the given config.
func Builder(bidderName openrtb_ext.BidderName, config config.Adapter, server config.Server) (adapters.Bidder, error) {
bidder := &MarsmediaAdapter{
URI: config.Endpoint,
}
return bidder, nil
}
|
package kontena
import (
"fmt"
"io/ioutil"
"os"
"strings"
yaml "gopkg.in/yaml.v2"
"github.com/inloop/goclitools"
"github.com/jakubknejzlik/kontena-git-cli/model"
"github.com/jakubknejzlik/kontena-git-cli/utils"
)
// CreateSecretsImport ...
func (c *Client) CreateSecretsImport(stack, path string, currentSecrets []model.Secret) (map[string]string, error) {
result := map[string]string{}
var secrets map[string]string
data, err := ioutil.ReadFile(path)
if err != nil && !os.IsNotExist(err) {
return result, err
}
yaml.Unmarshal(data, &secrets)
secretsToRemove := []string{}
for _, secret := range currentSecrets {
secretKey := strings.Replace(secret.Name, stack+"_", "", 1)
if strings.HasPrefix(secret.Name, stack+"_") && secrets[secretKey] == "" {
secretsToRemove = append(secretsToRemove, secret.Name)
}
}
for _, key := range secretsToRemove {
result[key] = ""
}
for key, value := range secrets {
result[stack+"_"+key] = value
}
return result, nil
}
// SecretExists ...
func (c *Client) SecretExists(name, stack string) bool {
value, _ := c.SecretValue(stack + "_" + name)
return value != ""
}
// SecretExistsInGrid ...
func (c *Client) SecretExistsInGrid(grid, name, stack string) bool {
value, _ := c.SecretValueInGrid(grid, stack+"_"+name)
return value != ""
}
// SecretWrite ...
func (c *Client) SecretWrite(secret, value string) error {
cmd := fmt.Sprintf("kontena vault update -u %s", secret)
_, err := goclitools.RunWithInput(cmd, []byte(value))
return err
}
// SecretWriteToGrid ...
func (c *Client) SecretWriteToGrid(grid, secret, value string) error {
cmd := fmt.Sprintf("kontena vault update --grid %s -u %s", grid, secret)
_, err := goclitools.RunWithInput(cmd, []byte(value))
return err
}
// SecretRemove ...
func (c *Client) SecretRemove(secret string) error {
return goclitools.RunInteractive(fmt.Sprintf("kontena vault rm --force %s", secret))
}
// SecretRemoveFromGrid ...
func (c *Client) SecretRemoveFromGrid(grid, secret string) error {
return goclitools.RunInteractive(fmt.Sprintf("kontena vault rm --grid %s --force %s", grid, secret))
}
// SecretList ...
func (c *Client) SecretList() ([]model.Secret, error) {
data, err := goclitools.Run("kontena vault ls -l")
if err != nil {
return []model.Secret{}, err
}
rows := utils.SplitString(string(data), "\n")
return model.SecretParseList(rows)
}
// SecretListInGrid ...
func (c *Client) SecretListInGrid(grid string) ([]model.Secret, error) {
data, err := goclitools.Run(fmt.Sprintf("kontena vault ls -l --grid %s", grid))
if err != nil {
return []model.Secret{}, err
}
rows := utils.SplitString(string(data), "\n")
return model.SecretParseList(rows)
}
// SecretValue ...
func (c *Client) SecretValue(name string) (string, error) {
value, err := goclitools.Run(fmt.Sprintf("kontena vault read --value %s", name))
return string(value), err
}
// SecretValueInGrid ...
func (c *Client) SecretValueInGrid(grid, name string) (string, error) {
value, err := goclitools.Run(fmt.Sprintf("kontena vault read --grid %s --value %s", grid, name))
return string(value), err
}
// SecretsImportInGrid ...
func (c *Client) SecretsImportInGrid(secrets map[string]string) error {
secretsInput, err := yaml.Marshal(secrets)
if err != nil {
return err
}
cmd := fmt.Sprintf("kontena vault import --force --empty-is-null")
_, err = goclitools.RunWithInput(cmd, secretsInput)
return err
}
|
package main
import (
"flag"
"fmt"
"os"
"time"
"io/ioutil"
"net"
"crypto/tls"
"crypto/x509"
"github.com/bshuster-repo/logrus-logstash-hook"
log "github.com/sirupsen/logrus"
)
func main() {
sleep := flag.Int("sleep", 5, "time to sleep in seconds")
logstash := flag.String("logstash", "", "logstash server")
secure := flag.Bool("tls", false, "use tls for communications")
cert := flag.String("cert", "", "pem cert to user for tls")
flag.Parse()
q := NewQuotes()
if *logstash == "" {
fmt.Println("logstash server to use not provided")
os.Exit(1)
}
if *secure && *cert == "" {
fmt.Println("tls wanted, but no cert name supplied")
os.Exit(1)
}
var c []byte
if *secure && *cert != "" {
var err error
if c, err = ioutil.ReadFile(*cert); err != nil {
fmt.Printf("error reading certificat file : %s\n", *cert)
fmt.Printf("error : %v\n", err)
os.Exit(1)
}
}
var conn net.Conn
if !*secure {
var err error
if conn, err = net.Dial("tcp", *logstash); err != nil {
fmt.Printf("error connecting to logstash : %s\n", *logstash)
fmt.Printf("error : %v\n", err)
os.Exit(1)
}
} else {
roots := x509.NewCertPool()
ok := roots.AppendCertsFromPEM(c)
if !ok {
fmt.Println("failed to parse certificate")
os.Exit(1)
}
var err error
conn, err = tls.Dial("tcp", *logstash, &tls.Config{
RootCAs: roots,
})
if err != nil {
fmt.Printf("failed to connect to logstash : %s\n", *logstash)
fmt.Printf("error : %v\n", err)
os.Exit(1)
}
}
defer conn.Close()
hook, err := logrustash.NewHookWithConn(conn, "test-quotes")
if err != nil {
fmt.Printf("error creating logstash logging hook : %s\n", *logstash)
fmt.Printf("%v\n", err)
}
log.SetFormatter(&log.JSONFormatter{})
log.SetLevel(log.DebugLevel) // default is Info
log.AddHook(hook)
x := 0
for {
log.WithFields(log.Fields{
"sequence": x,
}).Debug(q.RandomQuote())
x++
time.Sleep(time.Duration(*sleep) * time.Second)
}
}
|
package piscine
func StrRev(s string) string {
ordstr := []rune(s)
revstr := []rune(s)
var r int
for i := range ordstr {
r = i
}
for i := range revstr {
revstr[i] = ordstr[r]
r = r - 1
}
return string(revstr)
}
|
package lang
type mnil struct {
}
var Nil mnil = mnil{}
func IsNil(e Expr) bool {
_, ok := e.(mnil)
return ok
}
func (n mnil) String() string {
return "nil"
}
func (n mnil) Equal(o Expr) bool {
_, ok := o.(mnil)
return ok
}
|
package main
import (
//"unicode/utf8"
"image/color"
//"math"
"math/rand"
//"fmt"
"gonum.org/v1/gonum/stat/distuv"
"gonum.org/v1/plot"
"gonum.org/v1/plot/plotter"
"gonum.org/v1/plot/vg"
)
const (
//male advantage
m = 5.
//For nA and na. We must also choose values for N, B and b
N = 10.
B = 20.
b = 1
)
func RangeInt(min float64, max float64, n int) []float64 {
arr := make([]float64, n)
for r := 0; r < n; r++ {
arr[r] = min + rand.Float64() * (max - min)
}
return arr
}
func sexConflict(x float64) float64 {
y := m*x/(m*x+1-x)
pAA := x*y
pAa := x*(1-y)
paA := (1-x)*y
paa := (1-x)*(1-y)
nA := N*(pAA*b*B + 0.5*pAa*B + 0.5*paA*b*B)
na := N*(0.5*pAa*B + 0.5*paA*b*B + paa*B)
xnew := nA/(nA+na)
return xnew
}
func main() {
//Plot
p, err := plot.New()
if err != nil {
panic(err)
}
p.Title.Text = "Functions"
p.X.Label.Text = "Current frequency of A allele, x"
p.Y.Label.Text = "Future frequency of A, x'"
n := plotter.NewFunction(sexConflict)
//n.Dashes = []vg.Length{vg.Points(2), vg.Points(2)}
n.Width = vg.Points(1)
n.Color = color.RGBA{R: 255, A: 255}
//var DistriGamma = distuv.Gamma{Alpha: 0.2, Beta: 0.4}
//g := plotter.NewFunction(DistriGamma.Prob)
var DistriLaplace = distuv.Laplace{Mu: 0.5, Scale: 0.8}
l := plotter.NewFunction(DistriLaplace.Prob)
l.Width = vg.Points(1)
l.Color = color.RGBA{G: 255, A: 255}
//var DistriGamma = distuv.Gamma{Alpha: 0.2, Beta: 0.4}
e := plotter.NewFunction(func (x float64) float64 { return x })
e.Dashes = []vg.Length{vg.Points(2), vg.Points(2)}
e.Width = vg.Points(1)
p.Add(n, e, l)
p.Legend.Add("sex-conflict", n)
p.Legend.Add("Laplace(mu:0.5, scale:0.8)", l)
p.Legend.ThumbnailWidth = 0.5 * vg.Inch
p.X.Min = 0
p.X.Max = 1
p.Y.Min = 0
p.Y.Max = 1
// Save the plot to a PNG file.
if err := p.Save(4*vg.Inch, 4*vg.Inch, "functions.png"); err != nil {
panic(err)
}
}
|
package bconf_test
import (
"github.com/art4711/bconf"
"testing"
"fmt"
"strings"
)
var testjson = `{"attr": {"name": {"attrind": "4","attronly": "3","body": "5","id": "0","order": "1","status": "6","suborder": "2"},"order": {"0": "id","1": "order","2": "suborder","3": "attronly","4": "attrind","5": "body","6": "status"}},"conf": {"foo.bar": "17","test.conf": "4711"},"counters": {"attr": {"HEJ": "hej_counter"},"word": {"attr": "attr_counter"}},"opers": {"en": {"AND": "0","NOT": "2","OR": "1","and": "0","not": "2","or": "1"}},"state": {"to_id": "4711"},"stopwords": {"ONE": "","one": ""}}`
var testdata = `
#comment
#whitespace before comment
node.1=foo
node.2=bar
a.a.a.a.a=b`
func lj(t *testing.T) bconf.Bconf {
bc := make(bconf.Bconf)
if err := bc.LoadJson([]byte(testjson)); err != nil {
t.Fatalf("LoadJson: %v", err)
}
return bc
}
func ld(t *testing.T) bconf.Bconf {
bc := make(bconf.Bconf)
if err := bc.LoadConfData(strings.NewReader(testdata)); err != nil {
t.Fatalf("LoadConfData: %v", err)
}
return bc
}
func TestLoadJson(t *testing.T) {
lj(t)
}
func TestLoadData(t *testing.T) {
ld(t)
}
func TestGet(t *testing.T) {
bc := lj(t)
if s := bc.GetString("attr", "name", "attrind"); s != "4" {
t.Errorf("attr.name.attrind != 4 (%v)", s)
}
}
func TestForeachVal(t *testing.T) {
bc := lj(t)
n := bc.GetNode("conf")
foo := make(map[string]string)
foo["foo.bar"] = "17"
foo["test.conf"] = "4711"
n.ForeachVal(func(k,v string) {
x := foo[k]
if x != v {
t.Errorf("wrong/missing/repeated k: %v v: %v x: %v", k, v, x)
}
foo[k] = "!"
})
}
func TestForeachSorted(t *testing.T) {
bc := lj(t)
n := bc.GetNode("attr", "order")
i := 0
n.ForeachSorted(func(k,v string) {
if fmt.Sprint(i) != k {
t.Errorf("out of order keys: %v != %v", i, k)
}
i++
})
if i != 7 {
t.Errorf("too few keys: %v", i)
}
}
func TestData(t *testing.T) {
bc := ld(t)
if s := bc.GetString("a", "a", "a", "a", "a"); s != "b" {
t.Errorf("a.a.a.a.a != b (%v)", s)
}
}
|
// Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package movr
import (
"encoding/json"
"golang.org/x/exp/rand"
)
const numerals = `1234567890`
var vehicleTypes = [...]string{`skateboard`, `bike`, `scooter`}
var vehicleColors = [...]string{`red`, `yellow`, `blue`, `green`, `black`}
var bikeBrands = [...]string{
`Merida`, `Fuji`, `Cervelo`, `Pinarello`, `Santa Cruz`, `Kona`, `Schwinn`}
func randString(rng *rand.Rand, length int, alphabet string) string {
buf := make([]byte, length)
for i := range buf {
buf[i] = alphabet[rng.Intn(len(alphabet))]
}
return string(buf)
}
func randCreditCard(rng *rand.Rand) string {
return randString(rng, 10, numerals)
}
func randVehicleType(rng *rand.Rand) string {
return vehicleTypes[rng.Intn(len(vehicleTypes))]
}
func randVehicleStatus(rng *rand.Rand) string {
r := rng.Intn(100)
switch {
case r < 40:
return `available`
case r < 95:
return `in_use`
default:
return `lost`
}
}
func randLatLong(rng *rand.Rand) (float64, float64) {
lat, long := float64(-180+rng.Intn(360)), float64(-90+rng.Intn(180))
return lat, long
}
func randCity(rng *rand.Rand) string {
idx := rng.Int31n(int32(len(cities)))
return cities[idx].city
}
func randVehicleMetadata(rng *rand.Rand, vehicleType string) string {
m := map[string]string{
`color`: vehicleColors[rng.Intn(len(vehicleColors))],
}
switch vehicleType {
case `bike`:
m[`brand`] = bikeBrands[rng.Intn(len(bikeBrands))]
}
j, err := json.Marshal(m)
if err != nil {
panic(err)
}
return string(j)
}
|
package main
import (
"log"
"midCtrl/devices"
"midCtrl/httpServ"
"midCtrl/serv"
"os"
"time"
)
// serviceConn 和主服务器的连接
// var serviceConn net.Conn
// serviceAddr 主服务器地址
// var serviceAddr string
// InitLoger 初始化log配置
func InitLoger(logPath string) error {
if logPath != "" {
file, err := os.OpenFile(logPath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, os.ModeAppend)
if err != nil {
return err
}
log.SetOutput(file)
}
log.SetFlags(log.LstdFlags | log.Lshortfile)
return nil
}
// Init 初始化程序
func Init() {
//config.InitConfig()
InitLoger("")
devices.IntiDevice()
}
/*func transServMsg() {
for {
data := serv.GetData()
fmt.Printf("转发到设备侧的数据:%v %s \n", data, string(data))
devices.SendData(data)
}
}*/
func main() {
Init()
//启动http server
go httpServ.ServStart()
// 转发服务器的消息到设备侧处理
go serv.StartMsgToServer()
for {
//data := devices.GetData()
//fmt.Printf("转发到服务器的数据:%v %s \n", data, string(data))
//serv.SendData(data)
time.Sleep(time.Minute * 1)
log.Println("run main")
}
}
|
package utils
import (
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"log"
"time"
"cloud.google.com/go/storage"
)
func UploadFile(path, name string, data []byte) {
// Prevent log from printing out time information
log.SetFlags(0)
var bucket string
bucket = "bills_upload"
// source = "/home/stingray/Downloads/RB-TT.xlsx"
name = name + ".xlsx"
var r io.Reader
r = bytes.NewReader(data)
ctx := context.Background()
// sa := option.WithCredentialsFile("/home/stingray/dropshop/tools/dropshop-5cbbf-ecc067181e26.json")
client, err := storage.NewClient(ctx)
if err != nil {
fmt.Println("error in creating clinet")
log.Printf("storage.NewClient: %v", err)
}
defer client.Close()
object := path + "/" + name
// Upload an object with storage.Writer.
wc := client.Bucket(bucket).Object(object).NewWriter(ctx)
if _, err = io.Copy(wc, r); err != nil {
log.Printf("io.Copy: %v", err)
}
if err := wc.Close(); err != nil {
log.Printf("Writer.Close: %v", err)
}
}
func uploadFile(bucket, object string) {
}
func DownloadFile(bucket, object string) ([]byte, error) {
// bucket := "bills_upload"
// object := "RB-TT.xlsx"
ctx := context.Background()
// sa := option.WithCredentialsFile("/home/stingray/dropshop/tools/dropshop-5cbbf-ecc067181e26.json")
client, err := storage.NewClient(ctx)
if err != nil {
return nil, fmt.Errorf("storage.NewClient: %v", err)
}
defer client.Close()
ctx, cancel := context.WithTimeout(ctx, time.Second*50)
defer cancel()
rc, err := client.Bucket(bucket).Object(object).NewReader(ctx)
if err != nil {
return nil, fmt.Errorf("Object(%q).NewReader: %v", object, err)
}
defer rc.Close()
data, err := ioutil.ReadAll(rc)
if err != nil {
return nil, fmt.Errorf("ioutil.ReadAll: %v", err)
}
return data, nil
}
|
package service
import (
"fmt"
"intelliq/app/common"
utility "intelliq/app/common"
"intelliq/app/dto"
"intelliq/app/enums"
"intelliq/app/model"
"intelliq/app/repo"
"strings"
"time"
)
func isQuestionInfoValid(question *model.Question) bool {
return strings.HasPrefix(question.GroupCode, common.GROUP_CODE_PREFIX) &&
utility.IsPrimaryIDValid(question.Reviewer.UserID) &&
utility.IsPrimaryIDValid(question.Owner.UserID) &&
utility.IsPrimaryIDValid(question.School.SchoolID)
}
//RequestAddNewQuestion adds new question by teacher
func RequestAddNewQuestion(question *model.Question) *dto.AppResponseDto {
if !isQuestionInfoValid(question) {
return utility.GetErrorResponse(common.MSG_BAD_INPUT)
}
updateQuestionAttributes(question, enums.CurrentQuestionStatus.NEW, true, true)
question.FormatTopicTags()
question.CreateDate = time.Now().UTC()
quesRepo := repo.NewQuestionRepository(question.GroupCode)
if quesRepo == nil { // panic - recover can be used here .....
return utility.GetErrorResponse(common.MSG_UNATHORIZED_ACCESS)
}
var err error
if utility.IsPrimaryIDValid(question.QuestionID) {
err = quesRepo.Update(question) // update resubmitted rejected ques as request
} else {
err = quesRepo.Save(question) // save new ques request
}
if err != nil {
fmt.Println(err.Error())
errorMsg := utility.GetErrorMsg(err)
if len(errorMsg) > 0 {
return utility.GetErrorResponse(errorMsg)
}
return utility.GetErrorResponse(common.MSG_REQUEST_FAILED)
}
return utility.GetSuccessResponse(common.MSG_QUES_SUBMIT_SUCCESS)
}
//RequestApprovedQuestionUpdate create updated version of approved question by teacher else updates dup copy if resubmitted post rejection
func RequestApprovedQuestionUpdate(question *model.Question) *dto.AppResponseDto {
if !isQuestionInfoValid(question) || !utility.IsPrimaryIDValid(question.QuestionID) {
return utility.GetErrorResponse(common.MSG_BAD_INPUT)
}
createCopyQues := true
switch question.Status {
case enums.CurrentQuestionStatus.APPROVED: // update request on approved ques by teacher
_id := question.QuestionID // create copy of original ques
question.OriginID = &_id
question.QuestionID = ""
updateQuestionAttributes(question, enums.CurrentQuestionStatus.TRANSIT, true, false)
break
case enums.CurrentQuestionStatus.REJECTED: // resubmit of approved ques which has been rejected before
if !utility.IsPrimaryIDValid(*question.OriginID) { //checks for validity of original question for this updated version
return utility.GetErrorResponse(common.MSG_BAD_INPUT)
}
updateQuestionAttributes(question, enums.CurrentQuestionStatus.TRANSIT, false, false)
createCopyQues = false
break
default: // no other status permitted
return utility.GetErrorResponse(common.MSG_INVALID_STATE)
}
question.FormatTopicTags()
quesRepo := repo.NewQuestionRepository(question.GroupCode)
if quesRepo == nil {
return utility.GetErrorResponse(common.MSG_UNATHORIZED_ACCESS)
}
var err error
if createCopyQues {
err = quesRepo.Save(question) // creates a duplicate copy of original ques with original id tagged
} else {
err = quesRepo.Update(question) // updates already created dup copy since it was rejected first time, teacher resubmitted again with few changes
}
if err != nil {
fmt.Println(err.Error())
errorMsg := utility.GetErrorMsg(err)
if len(errorMsg) > 0 {
return utility.GetErrorResponse(errorMsg)
}
return utility.GetErrorResponse(common.MSG_REQUEST_FAILED)
}
return utility.GetSuccessResponse(common.MSG_QUES_SUBMIT_SUCCESS)
}
//RequestApprovedQuesRemoval changes status to REMOVE ; raised for approved ques by teacher
func RequestApprovedQuesRemoval(question *model.Question) *dto.AppResponseDto {
if !utility.IsPrimaryIDValid(question.QuestionID) {
return utility.GetErrorResponse(common.MSG_BAD_INPUT)
}
if question.Status != enums.CurrentQuestionStatus.APPROVED {
return utility.GetErrorResponse(common.MSG_INVALID_STATE)
}
quesRepo := repo.NewQuestionRepository(question.GroupCode)
if quesRepo == nil {
return utility.GetErrorResponse(common.MSG_UNATHORIZED_ACCESS)
}
updateQuestionAttributes(question, enums.CurrentQuestionStatus.REMOVE, false, false)
err := quesRepo.UpdateLimitedCols(question)
if err != nil {
fmt.Println(err.Error())
errorMsg := utility.GetErrorMsg(err)
if len(errorMsg) > 0 {
return utility.GetErrorResponse(errorMsg)
}
return utility.GetErrorResponse(common.MSG_REQUEST_FAILED)
}
return utility.GetSuccessResponse(common.MSG_QUES_SUBMIT_SUCCESS)
}
//ApproveRequest updates existing question status by approver
func ApproveRequest(question *model.Question) *dto.AppResponseDto {
var quesList model.Questions
switch question.Status {
case enums.CurrentQuestionStatus.REMOVE: // remove request raised by teacher
return RemoveQuestion(question)
case enums.CurrentQuestionStatus.NEW: // add new ques by teacher
break
case enums.CurrentQuestionStatus.TRANSIT: // update existing approved question request by teacher
if !utility.IsPrimaryIDValid(*question.OriginID) { //checks for validity of original question for this updated version
return utility.GetErrorResponse(common.MSG_BAD_INPUT)
}
ques := model.Question{
QuestionID: *question.OriginID,
Status: enums.CurrentQuestionStatus.OBSOLETE, // original ques updated to OBSOLETE ; cleaned up by scheduler
}
quesList = append(quesList, ques)
break
default: // no other status processed
return utility.GetErrorResponse(common.MSG_INVALID_STATE)
}
updateQuestionAttributes(question, enums.CurrentQuestionStatus.APPROVED, true, true)
quesList = append(quesList, *question)
quesRepo := repo.NewQuestionRepository(question.GroupCode)
if quesRepo == nil {
return utility.GetErrorResponse(common.MSG_UNATHORIZED_ACCESS)
}
err := quesRepo.BulkUpdate(quesList)
if err != nil {
fmt.Println(err.Error())
errorMsg := utility.GetErrorMsg(err)
if len(errorMsg) > 0 {
return utility.GetErrorResponse(errorMsg)
}
return utility.GetErrorResponse(common.MSG_QUES_STATUS_ERROR)
}
AddSubjectTopicTags(question) // inserts topic/tags in group based on subject title
return utility.GetSuccessResponse(common.MSG_QUES_STATUS_SUCCESS)
}
//RejectRequest updates existing question status by approver
func RejectRequest(question *model.Question) *dto.AppResponseDto {
switch question.Status {
case enums.CurrentQuestionStatus.REMOVE: // remove request raised by teacher
updateQuestionAttributes(question, enums.CurrentQuestionStatus.APPROVED, true, true) // switch status back to APPROVED
break
case enums.CurrentQuestionStatus.NEW: // add new ques by teacher
updateQuestionAttributes(question, enums.CurrentQuestionStatus.REJECTED, false, true) // reject status
break
case enums.CurrentQuestionStatus.TRANSIT: // update existing approved question request by teacher
updateQuestionAttributes(question,
enums.CurrentQuestionStatus.REJECTED, false, false) // reject status ; retain originID & reject reason
break
default: // no other status processed
return utility.GetErrorResponse(common.MSG_INVALID_STATE)
}
quesRepo := repo.NewQuestionRepository(question.GroupCode)
if quesRepo == nil {
return utility.GetErrorResponse(common.MSG_UNATHORIZED_ACCESS)
}
err := quesRepo.UpdateLimitedCols(question)
if err != nil {
fmt.Println(err.Error())
errorMsg := utility.GetErrorMsg(err)
if len(errorMsg) > 0 {
return utility.GetErrorResponse(errorMsg)
}
return utility.GetErrorResponse(common.MSG_QUES_STATUS_ERROR)
}
return utility.GetSuccessResponse(common.MSG_QUES_STATUS_SUCCESS)
}
func updateQuestionAttributes(question *model.Question,
status enums.QuestionStatus, clearRejectReason bool, clearOriginID bool) {
question.Status = status
question.LastModifiedDate = time.Now().UTC()
if clearRejectReason {
question.RejectDesc = ""
}
if clearOriginID {
question.OriginID = nil
}
}
//FetchReviewerRequests fetches all ques with status NEW,TRANSIT,REMOVE for a reviewer
func FetchReviewerRequests(requestDto *dto.QuesRequestDto) *dto.AppResponseDto {
if !utility.IsValidGroupCode(requestDto.GroupCode) {
return utility.GetErrorResponse(common.MSG_INVALID_GROUP)
}
if !utility.IsPrimaryIDValid(requestDto.SchoolID) ||
!utility.IsPrimaryIDValid(requestDto.UserID) {
return utility.GetErrorResponse(common.MSG_INVALID_ID)
}
status := []enums.QuestionStatus{enums.CurrentQuestionStatus.NEW,
enums.CurrentQuestionStatus.TRANSIT, enums.CurrentQuestionStatus.REMOVE}
quesRepo := repo.NewQuestionRepository(requestDto.GroupCode)
if quesRepo == nil {
return utility.GetErrorResponse(common.MSG_UNATHORIZED_ACCESS)
}
responseDto, err := quesRepo.FindReviewersRequests(requestDto, status)
if err != nil {
fmt.Println(err.Error())
errorMsg := utility.GetErrorMsg(err)
if len(errorMsg) > 0 {
return utility.GetErrorResponse(errorMsg)
}
return utility.GetErrorResponse(common.MSG_REQUEST_FAILED)
}
return utility.GetSuccessResponse(responseDto)
}
//FetchTeacherRequests fetches all ques with either od status : APPROVED / REJECTED/ PENDING for a teacher
func FetchTeacherRequests(requestDto *dto.QuesRequestDto) *dto.AppResponseDto {
if !utility.IsValidGroupCode(requestDto.GroupCode) {
return utility.GetErrorResponse(common.MSG_INVALID_GROUP)
}
if !utility.IsPrimaryIDValid(requestDto.SchoolID) ||
!utility.IsPrimaryIDValid(requestDto.UserID) {
return utility.GetErrorResponse(common.MSG_INVALID_ID)
}
var status []enums.QuestionStatus
switch requestDto.Status {
case enums.CurrentQuestionStatus.APPROVED:
status = []enums.QuestionStatus{enums.CurrentQuestionStatus.APPROVED}
break
case enums.CurrentQuestionStatus.REJECTED:
status = []enums.QuestionStatus{enums.CurrentQuestionStatus.REJECTED}
break
case enums.CurrentQuestionStatus.PENDING:
status = []enums.QuestionStatus{enums.CurrentQuestionStatus.NEW,
enums.CurrentQuestionStatus.TRANSIT, enums.CurrentQuestionStatus.REMOVE}
break
default:
return utility.GetErrorResponse(common.MSG_NO_STATUS)
}
quesRepo := repo.NewQuestionRepository(requestDto.GroupCode)
if quesRepo == nil {
return utility.GetErrorResponse(common.MSG_UNATHORIZED_ACCESS)
}
responseDto, err := quesRepo.FindTeachersRequests(requestDto, status)
if err != nil {
fmt.Println(err.Error())
errorMsg := utility.GetErrorMsg(err)
if len(errorMsg) > 0 {
return utility.GetErrorResponse(errorMsg)
}
return utility.GetErrorResponse(common.MSG_REQUEST_FAILED)
}
return utility.GetSuccessResponse(responseDto)
}
|
package main
import (
"fmt"
"github.com/xinxuwang/gevloop"
"log"
"net"
"syscall"
)
type session struct {
bytes []byte
pos int
}
func main() {
accept, err := syscall.Socket(syscall.AF_INET, syscall.O_NONBLOCK|syscall.SOCK_STREAM, 0)
if err != nil {
log.Fatal("err:", err)
}
defer syscall.Close(accept)
if err = syscall.SetNonblock(accept, true); err != nil {
log.Fatal("Set noblock err:", err)
}
addr := syscall.SockaddrInet4{Port: 2000}
copy(addr.Addr[:], net.ParseIP("0.0.0.0").To4())
if err := syscall.Bind(accept, &addr); err != nil {
log.Fatal("Bind err:", err)
}
if err := syscall.Listen(accept, 10); err != nil {
log.Fatal("Listen err:", err)
}
el, err := gevloop.Init()
if err != nil {
log.Fatal("err:", err)
}
log.Println("Accept fd:", accept)
acceptIO := gevloop.EvIO{}
acceptIO.Init(el, func(evLoop *gevloop.EvLoop, event gevloop.Event, revent uint32) {
log.Println("AcceptIO Called")
connFd, _, err := syscall.Accept(event.Fd())
if err != nil {
log.Println("accept: ", err)
return
}
syscall.SetNonblock(connFd, true)
connFdIO := gevloop.EvIO{}
sess := session{
bytes: make([]byte, 5),
pos: 0,
}
connFdIO.Init(el, func(evLoop *gevloop.EvLoop, event gevloop.Event, revent uint32) {
log.Println("connFdIO Called")
//assume `HELLO`
for {
buf := make([]byte, 5)
nbytes, err := syscall.Read(event.Fd(), buf)
se := event.Data().(*session)
if err != nil {
log.Println("Read Error:", err)
return
}
if nbytes > 0 {
fmt.Println("Read n:", nbytes)
copy(se.bytes[se.pos:], buf)
se.pos += nbytes
if 5 == len(se.bytes) {
log.Println(string(se.bytes))
sess.pos = 0
return
}
}
if nbytes == 0 {
log.Println("nbytes == 0")
//syscall.Close(event.Fd())
//event.Stop()
return
}
fmt.Println("Read < 0")
return
}
}, connFd, syscall.EPOLLIN, &sess)
connFdIO.Start()
}, accept, syscall.EPOLLIN|syscall.EPOLLET&0xffffffff, nil)
acceptIO.Start()
err = el.Run()
if err != nil {
log.Println("error:", err)
}
}
|
package main
import "fmt"
type list struct {
sentinel *node
}
type node struct {
data string
prev *node
next *node
}
func newList() *list {
sentinel := new(node)
sentinel.next = sentinel
sentinel.prev = sentinel
return &list{sentinel}
}
// insert adds the element e at index i in the list l
func (l *list) insert(i int, e string) {
p := l.sentinel
for i > 0 {
p = p.next
i--
}
newNode := &node{e, p, p.next}
p.next = newNode
newNode.next.prev = newNode
}
// delete removes the element at index i in the list l
func (l *list) delete(i int) {
p := l.sentinel
for i >= 0 {
p = p.next
i--
}
p.prev.next = p.next
p.next.prev = p.prev
}
func (l *list) printList() {
p := l.sentinel.next
for p != l.sentinel {
fmt.Print(p.data + " ")
p = p.next
}
fmt.Println()
}
func main() {
l := newList()
l.insert(0, "a")
l.insert(1, "c")
l.insert(2, "d")
l.insert(3, "e")
l.insert(1, "b")
l.insert(0, "0")
l.insert(3, "-")
l.printList() // 0ab-cde
l.delete(3)
l.printList() // 0abcde
l.delete(0)
l.printList() // abcde
l.delete(2)
l.printList() // abde
l.delete(0)
l.delete(0)
l.delete(0)
l.delete(0)
l.printList() // <empty>
}
|
package main
import (
"fmt"
"mustard/base/container"
)
type E struct {
name string
age int
}
func main() {
trie := container.NewTrie()
trie.Insert([]byte("01234"))
fmt.Println(trie.Root.DumpChild())
fmt.Println(trie.IsPrefix([]byte("01234")))
fmt.Println(trie.IsPrefix("012345"))
fmt.Println(trie.IsPrefix("0123"))
}
|
package AuthMiddleware
import (
"encoding/json"
"fmt"
"github.com/gin-gonic/gin"
"mytweet/middlewares/aws/dynamodb"
"net/http"
)
func AuthMiddleware() gin.HandlerFunc {
return func(c *gin.Context) {
fmt.Println("-----AuthMiddleware-----")
uuid, error := c.Cookie("uuid")
if error != nil {
fmt.Println("this user is not login")
c.Redirect(http.StatusTemporaryRedirect, "/login")
c.AbortWithStatus(http.StatusTemporaryRedirect)
return
}
fmt.Println("uuid is:" + uuid)
output, _ := dynamodb.GetItemById("sessions", uuid)
fmt.Println("--- output -> item -> data -> Value -> login || username -> Value ---")
var result map[string]interface{}
json.Unmarshal(output, &result)
// output -> item -> data -> Value
fish := result["data"].(map[string]interface{})["Value"].(map[string]interface{})
for key, value := range fish {
// login || username -> Value , 恐らくこんな方法は実用的ではないとは思うが
objectValue := value.(map[string]interface{})["Value"]
fmt.Println(key, " : ", objectValue)
}
c.Set("sessions", string(output))
c.Next()
}
}
|
package packages
func InitGraph() *Graph {
return &Graph{
nodes: []*Node{},
}
}
func InitNode(id int) *Node {
return &Node{
id: id,
edges: make(map[int]int),
}
}
type Graph struct {
nodes []*Node
}
type Node struct {
id int
edges map[int]int
}
//GetId return node's id
func (n *Node) GetId() int {
return n.id
}
//GetEdges return node's edges
func (n *Node) GetEdges() map[int]int {
return n.edges
}
//AddNode for add new node
//parameter : none
// return : id of new node
func (graph *Graph) AddNode() (id int) {
id = len(graph.nodes)
graph.nodes = append(graph.nodes, InitNode(id))
return id
}
//AddEdges for add edges,
//parameter : node id (int), node destinaton id (int), weight of the edge (int)
//return : none
func (graph *Graph) AddEdges(nodeId, dstId, wight int) {
//the edges will hold destination id as a key and weight as a value
graph.nodes[nodeId].edges[dstId] = wight
}
//NeighBors
//parameter : node's id (int)
//returns : a list of node that are linked to this node ([]*Node)
func (graph *Graph) NeighBors(id int) (result []*Node) {
//iterate all nodes
for _, node := range graph.nodes {
for edge := range node.edges {
//if the node's id is equal parameter id, then take the destination id from edge's key
if node.id == id {
result = append(result, graph.nodes[edge])
}
//if the edge key is equal id then take the node
if edge == id {
result = append(result, node)
}
}
}
return result
}
//GetNodes return all node
func (graph *Graph) GetNodes() (result []*Node) {
return graph.nodes
}
//GetEdges return all edges as a slice wich contain [start node's id, end node's id, weight]
func (graph *Graph) GetEdges() (result [][3]int) {
for _, n := range graph.nodes {
for k, v := range n.edges {
result = append(result, [3]int{n.id, k, v})
}
}
return result
}
|
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pod
import (
corev1 "k8s.io/api/core/v1"
)
// makeTektonResultsFolderWritable returns a Container that make the tekton folder writable by any user.
func makeTektonResultsFolderWritable(shellImage string, volumeMounts []corev1.VolumeMount) *corev1.Container {
return &corev1.Container{
Name: "tekton-results-folder-writable",
Image: shellImage,
Command: []string{"sh"},
Args: []string{"-c", "chmod 777 " + ResultsDir},
VolumeMounts: volumeMounts,
}
}
|
package conn
import (
"fmt"
_ "github.com/go-sql-driver/mysql"
"github.com/go-xorm/xorm"
)
type DBClient struct {
Engine *xorm.Engine
}
func NewDBClient(user, host, port, pwd, dbName string) (db *DBClient, err error) {
db = &DBClient{}
dataSourceName := fmt.Sprintf("%s:%s@%s:%s/%s?charset=utf8", user, pwd, host, port, dbName)
db.Engine, err = xorm.NewEngine("mysql", dataSourceName)
err = db.Engine.Ping()
return
}
func (d *DBClient) Close() {
_ = d.Engine.Close()
}
func (d *DBClient) NewSession() xorm.Session {
return d.NewSession()
}
|
package client
import (
"crypto/sha1"
"encoding/hex"
"hash"
"io"
)
// hasherReader calculates the hash of a byte stream
// As an underlying io.Reader is read from, the hash is updated
type hasherReader struct {
hash hash.Hash
reader io.Reader
}
// newHasherReader creates a new hasherReader from a provided io.Reader
func newHasherReader(r io.Reader) hasherReader {
hash := sha1.New()
reader := io.TeeReader(r, hash)
return hasherReader{hash, reader}
}
// Hash returns the hash value
// Ensure all contents of the underlying io.Reader have been read
func (h hasherReader) Hash() string {
return hex.EncodeToString(h.hash.Sum(nil))
}
// Read allows hasherReader to conform to io.Reader interface
func (h hasherReader) Read(p []byte) (n int, err error) {
return h.reader.Read(p)
}
|
package main
import (
"time"
"github.com/jacmba/desclock/model"
"github.com/jacmba/desclock/view"
)
func main() {
tm := model.NewTime()
v := view.NewView(tm)
go run(v)
v.Init()
}
func run(v *view.View) {
v.Update()
for {
v.Update()
time.Sleep(100 * time.Millisecond)
}
}
|
package observer
import (
"sync"
"sync/atomic"
)
type Batch struct {
mu sync.Mutex
cond sync.Cond
count uint32
}
func (b *Batch) Exec(f func()) {
i := atomic.AddUint32(&b.count, 1)
if i > 8 {
}
if i > 1 {
}
}
|
package middleware
import (
"net/http"
"peribahasa/app/models"
"github.com/gorilla/context"
)
// Xclaim context
var Xclaim = &models.Token{}
// JwtAuthentication middleware
var JwtAuthentication = func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
noAuth := []string{"/api/register", "/api/login", "/"}
requestPath := r.URL.Path
//skip authentication for request path that in whitelist
for _, value := range noAuth {
if value == requestPath {
next.ServeHTTP(w, r)
return
}
}
bearerToken := r.Header.Get("Authorization")
claim := &models.Token{}
if err := claim.Parse(bearerToken); err != nil && requestPath == "/api" {
next.ServeHTTP(w, r)
return
}
context.Set(r, Xclaim, claim)
next.ServeHTTP(w, r)
})
}
// AllowAccess verification
func AllowAccess(r *http.Request, AllowedRoles models.RoleTypes) error {
claim := context.Get(r, Xclaim).(*models.Token)
var roles models.Roles = claim.Roles
if err := roles.IsAllowed(AllowedRoles); err != nil {
return err
}
return nil
}
|
package main
// 数学总结的方法,就是求n有几个5倍数
func trailingZeroes(n int) int {
var ret = 0
for n >= 5 {
n = n / 5
ret += n
}
return ret
}
func trailingZeroes2(n int) int {
if n < 5 {
return 0
}
var total = int64(1)
for i := 1; i <= n; i++ {
total = total * int64(i)
}
var ret = 0
for total%10 == 0 {
ret += 1
total /= 10
}
return ret
}
|
//go:generate fyne bundle -o bundled.go Icon.png
package main
import (
"log"
"fyne.io/fyne/v2"
"fyne.io/fyne/v2/app"
"fyne.io/fyne/v2/dialog"
"fyne.io/fyne/v2/widget"
"github.com/diamondburned/arikawa/v2/session"
)
const prefTokenKey = "auth.token"
func main() {
a := app.NewWithID("xyz.andy.fibro")
a.SetIcon(resourceIconPng)
w := a.NewWindow("Fibro: Discord")
u := &ui{}
w.SetContent(u.makeUI())
w.Resize(fyne.NewSize(480, 320))
go login(w, a.Preferences(), u)
w.ShowAndRun()
// after app quits
if u.conn != nil {
_ = u.conn.Close()
}
}
func login(w fyne.Window, p fyne.Preferences, u *ui) {
tok := p.String(prefTokenKey)
if tok != "" {
sess, err := session.New(tok)
if err == nil {
loadServers(sess, u)
return
} else {
log.Println("Error connecting with token", err)
}
}
email := widget.NewEntry()
pass := widget.NewPasswordEntry()
dialog.ShowForm("Log in to Discord", "Log in", "Use Token instead",
[]*widget.FormItem{
{Text: "Email", Widget: email},
{Text: "Password", Widget: pass},
}, func(ok bool) {
if ok {
doLogin(email.Text, pass.Text, w, p, u)
} else {
showTokenForm(w, p, u)
}
}, w)
}
func showTokenForm(w fyne.Window, p fyne.Preferences, u *ui) {
token := widget.NewPasswordEntry()
dialog.ShowForm("Log in to Discord using a token", "Log in", "Cancel",
[]*widget.FormItem{
{Text: "Token", Widget: token},
}, func(ok bool) {
if ok {
s, err := session.New(token.Text)
if err != nil {
log.Println("Login Err", err)
return
}
p.SetString(prefTokenKey, token.Text)
loadServers(s, u)
}
}, w)
}
func doLogin(email, pass string, w fyne.Window, p fyne.Preferences, u *ui) {
sess, err := session.Login(email, pass, "")
if err == nil {
p.SetString(prefTokenKey, sess.Token)
loadServers(sess, u)
return
}
if err != session.ErrMFA {
log.Println("Login Err", err)
return
}
mfa := widget.NewEntry()
dialog.ShowForm("Multi-Factor required", "Confirm", "Cancel",
[]*widget.FormItem{
{Text: "Please enter your MFA token", Widget: mfa},
},
func(ok bool) {
if !ok {
return
}
sess, err := session.Login(email, pass, mfa.Text)
if err != nil {
log.Println("Failure in MFA verification")
return
}
p.SetString(prefTokenKey, sess.Token)
loadServers(sess, u)
}, w)
}
|
// SPDX-License-Identifier: ISC
// Copyright (c) 2014-2020 Bitmark Inc.
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package owner
import (
"golang.org/x/time/rate"
"github.com/bitmark-inc/bitmarkd/account"
"github.com/bitmark-inc/bitmarkd/fault"
"github.com/bitmark-inc/bitmarkd/merkle"
"github.com/bitmark-inc/bitmarkd/mode"
"github.com/bitmark-inc/bitmarkd/ownership"
"github.com/bitmark-inc/bitmarkd/reservoir"
"github.com/bitmark-inc/bitmarkd/rpc/ratelimit"
"github.com/bitmark-inc/bitmarkd/storage"
"github.com/bitmark-inc/bitmarkd/transactionrecord"
"github.com/bitmark-inc/logger"
)
// Owner
// -----
// Owner - type for the RPC
type Owner struct {
Log *logger.L
Limiter *rate.Limiter
PoolTransactions storage.Handle
PoolAssets storage.Handle
Ownership ownership.Ownership
}
// Owner bitmarks
// --------------
const (
MaximumBitmarksCount = 100
rateLimitOwner = 200
rateBurstOwner = 100
)
// BitmarksArguments - arguments for RPC
type BitmarksArguments struct {
Owner *account.Account `json:"owner"` // base58
Start uint64 `json:"Start,string"` // first record number
Count int `json:"count"` // number of records
}
// BitmarksReply - result of owner RPC
type BitmarksReply struct {
Next uint64 `json:"next,string"` // Start value for the next call
Data []ownership.Record `json:"data"` // list of bitmarks either issue or transfer
Tx map[string]BitmarksRecord `json:"tx"` // table of tx records
}
// BitmarksRecord - can be any of the transaction records
type BitmarksRecord struct {
Record string `json:"record"`
TxId interface{} `json:"txId,omitempty"`
InBlock uint64 `json:"inBlock"`
AssetId interface{} `json:"assetId,omitempty"`
Data interface{} `json:"data"`
}
// BlockAsset - special record for owned blocks
type BlockAsset struct {
Number uint64 `json:"number"`
}
func New(log *logger.L, pools reservoir.Handles, os ownership.Ownership) *Owner {
return &Owner{
Log: log,
Limiter: rate.NewLimiter(rateLimitOwner, rateBurstOwner),
PoolTransactions: pools.Transactions,
PoolAssets: pools.Assets,
Ownership: os,
}
}
// Bitmarks - list bitmarks belonging to an account
func (owner *Owner) Bitmarks(arguments *BitmarksArguments, reply *BitmarksReply) error {
if err := ratelimit.LimitN(owner.Limiter, arguments.Count, MaximumBitmarksCount); nil != err {
return err
}
log := owner.Log
log.Infof("Owner.Bitmarks: %+v", arguments)
ownershipData, err := owner.Ownership.ListBitmarksFor(arguments.Owner, arguments.Start, arguments.Count)
if nil != err {
return err
}
log.Debugf("ownership: %+v", ownershipData)
// extract unique TxIds
// issues TxId == IssueTxId
// assets could be duplicates
txIds := make(map[merkle.Digest]struct{})
assetIds := make(map[transactionrecord.AssetIdentifier]struct{})
current := uint64(0)
for _, r := range ownershipData {
txIds[r.TxId] = struct{}{}
txIds[r.IssueTxId] = struct{}{}
switch r.Item {
case ownership.OwnedAsset:
ai := r.AssetId
if nil == ai {
log.Criticalf("asset id is nil: %+v", r)
logger.Panicf("asset id is nil: %+v", r)
}
assetIds[*r.AssetId] = struct{}{}
case ownership.OwnedBlock:
if nil == r.BlockNumber {
log.Criticalf("block number is nil: %+v", r)
logger.Panicf("blockNumber is nil: %+v", r)
}
case ownership.OwnedShare:
ai := r.AssetId
if nil == ai {
log.Criticalf("asset id is nil: %+v", r)
logger.Panicf("asset id is nil: %+v", r)
}
assetIds[*r.AssetId] = struct{}{}
default:
log.Criticalf("unsupported item type: %d", r.Item)
logger.Panicf("unsupported item type: %d", r.Item)
}
current = r.N
}
records := make(map[string]BitmarksRecord)
for txId := range txIds {
log.Debugf("txId: %v", txId)
inBlock, transaction := owner.PoolTransactions.GetNB(txId[:])
if nil == transaction {
return fault.LinkToInvalidOrUnconfirmedTransaction
}
tx, _, err := transactionrecord.Packed(transaction).Unpack(mode.IsTesting())
if nil != err {
return err
}
record, ok := transactionrecord.RecordName(tx)
if !ok {
log.Errorf("problem tx: %+v", tx)
return fault.LinkToInvalidOrUnconfirmedTransaction
}
textTxId, err := txId.MarshalText()
if nil != err {
return err
}
records[string(textTxId)] = BitmarksRecord{
Record: record,
TxId: txId,
InBlock: inBlock,
Data: tx,
}
}
assetsLoop:
for assetId := range assetIds {
log.Debugf("asset id: %v", assetId)
var nnn transactionrecord.AssetIdentifier
if nnn == assetId {
records["00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"] = BitmarksRecord{
Record: "Block",
//AssetId: assetId,
Data: BlockAsset{
Number: 0,
},
}
continue assetsLoop
}
inBlock, transaction := owner.PoolAssets.GetNB(assetId[:])
if nil == transaction {
return fault.AssetNotFound
}
tx, _, err := transactionrecord.Packed(transaction).Unpack(mode.IsTesting())
if nil != err {
return err
}
record, ok := transactionrecord.RecordName(tx)
if !ok {
return fault.AssetNotFound
}
textAssetId, err := assetId.MarshalText()
if nil != err {
return err
}
records[string(textAssetId)] = BitmarksRecord{
Record: record,
InBlock: inBlock,
AssetId: assetId,
Data: tx,
}
}
reply.Data = ownershipData
reply.Tx = records
// if no record were found the just return Next as zero
// otherwise the next possible number
if 0 == current {
reply.Next = 0
} else {
reply.Next = current + 1
}
return nil
}
|
package cmd
import (
"encoding/json"
"os"
"strings"
chimeralib "github.com/chimera-kube/chimera-admission-library/pkg/chimera"
"github.com/chimera-kube/chimera-admission/internal/pkg/chimera"
"github.com/pkg/errors"
"github.com/urfave/cli/v2"
admissionv1 "k8s.io/api/admission/v1"
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
)
func startServer(c *cli.Context) error {
if wasmUri == "" {
return errors.New("Please, provide a Wasm URI to load")
}
if fp := strings.ToLower(failurePolicy); fp != "ignore" && fp != "fail" {
return errors.New("FailurePolicy must be \"Ignore\" or \"Fail\"")
}
var wasmModulePath string
moduleSource, modulePath, err := chimera.WasmModuleSource(wasmUri)
if err != nil {
return err
}
switch moduleSource {
case chimera.FileSource:
wasmModulePath = modulePath
case chimera.HTTPSource, chimera.RegistrySource:
var err error
wasmModulePath, err = chimera.FetchRemoteWasmModule(
moduleSource,
modulePath,
wasmRemoteInsecure,
wasmRemoteNonTLS,
wasmRemoteCA)
if err != nil {
return errors.Wrap(err, "Cannot download remote Wasm module from OCI registry")
}
defer os.Remove(wasmModulePath)
}
wasmEnvKeys, wasmEnvValues := computeWasmEnv()
wasmWorker, err = chimera.NewWasmWorker(wasmModulePath, wasmEnvKeys, wasmEnvValues)
if err != nil {
return err
}
operationTypes := []admissionregistrationv1.OperationType{}
for _, operation := range operations.Value() {
switch strings.ToUpper(operation) {
case "*":
operationTypes = append(
operationTypes,
admissionregistrationv1.OperationAll,
)
case "CREATE":
operationTypes = append(
operationTypes,
admissionregistrationv1.Create,
)
case "UPDATE":
operationTypes = append(
operationTypes,
admissionregistrationv1.Update,
)
case "DELETE":
operationTypes = append(
operationTypes,
admissionregistrationv1.Delete,
)
case "CONNECT":
operationTypes = append(
operationTypes,
admissionregistrationv1.Connect,
)
default:
continue
}
}
log := chimera.NewLogger(c.Bool("debug"))
config := chimeralib.AdmissionConfig{
Name: admissionName,
CallbackHost: admissionHost,
CallbackPort: admissionPort,
KubeNamespace: kubeNamespace,
KubeService: kubeService,
Webhooks: []chimeralib.Webhook{
{
Rules: []admissionregistrationv1.RuleWithOperations{
{
Operations: operationTypes,
Rule: admissionregistrationv1.Rule{
APIGroups: apiGroups.Value(),
APIVersions: apiVersions.Value(),
Resources: resources.Value(),
},
},
},
Callback: processRequest,
Path: validatePath,
FailurePolicy: admissionregistrationv1.FailurePolicyType(failurePolicy),
},
},
TLSExtraSANs: tlsExtraSANs.Value(),
CertFile: certFile,
KeyFile: keyFile,
CaFile: caFile,
SkipAdmissionRegistration: skipAdmissionRegistration,
Log: &log,
}
return chimeralib.StartServer(&config, insecureServer)
}
func processRequest(admissionReviewRequest *admissionv1.AdmissionRequest) (chimeralib.WebhookResponse, error) {
admissionReviewRequestBytes, err := json.Marshal(admissionReviewRequest)
if err != nil {
return chimeralib.WebhookResponse{}, err
}
validationResponse, err := wasmWorker.ProcessRequest(admissionReviewRequestBytes)
if err != nil {
return chimeralib.WebhookResponse{}, err
}
if !validationResponse.Accepted {
return chimeralib.NewRejectRequest().WithMessage(validationResponse.Message), nil
}
return chimeralib.NewAllowRequest(), nil
}
|
package journal
import (
"context"
"fmt"
"time"
"github.com/rareinator/Svendeprove/Backend/packages/mssql"
. "github.com/rareinator/Svendeprove/Backend/packages/protocol"
)
type JournalServer struct {
UnimplementedJournalServiceServer
DB *mssql.MSSQL
ListenAddress string
}
func (j *JournalServer) GetJournal(ctx context.Context, journal *JournalRequest) (*Journal, error) {
dbJournal, err := j.DB.GetJournal(journal.JournalId)
if err != nil {
return nil, err
}
result := Journal{
JournalId: dbJournal.JournalId,
CreationTime: dbJournal.CreationTime.Format("02/01/2006 15:04:05"),
Intro: dbJournal.Intro,
Patient: dbJournal.Patient,
CreatedBy: dbJournal.CreatedBy,
}
return &result, nil
}
func (j *JournalServer) GetHealth(ctx context.Context, e *Empty) (*Health, error) {
return &Health{Message: fmt.Sprintf("Journal service is up and running on: %v 🚀", j.ListenAddress)}, nil
}
func (j *JournalServer) GetJournalsByPatient(ctx context.Context, ur *UserRequest) (*Journals, error) {
journals := Journals{}
journals.Journals = make([]*Journal, 0)
dbJournals, err := j.DB.GetJournalsByPatient(ur.UserId)
if err != nil {
return nil, err
}
for _, dbJournal := range dbJournals {
journal := Journal{
JournalId: dbJournal.JournalId,
CreationTime: dbJournal.CreationTime.Format("02/01/2006 15:04:05"),
Intro: dbJournal.Intro,
Patient: dbJournal.Patient,
CreatedBy: dbJournal.CreatedBy,
}
journals.Journals = append(journals.Journals, &journal)
}
return &journals, nil
}
func (j *JournalServer) CreateJournal(ctx context.Context, journal *Journal) (*Journal, error) {
dbJournal := mssql.DBJournal{
CreationTime: time.Now(),
Intro: journal.Intro,
Patient: journal.Patient,
CreatedBy: journal.CreatedBy,
}
journal.CreationTime = dbJournal.CreationTime.Format("02/01/2006 15:04:05")
if err := j.DB.CreateJournal(&dbJournal); err != nil {
return nil, err
}
journal.JournalId = dbJournal.JournalId
return journal, nil
}
func (j *JournalServer) UpdateJournal(ctx context.Context, journal *Journal) (*Journal, error) {
parsedtime, err := time.Parse("02/01/2006 15:04:05", journal.CreationTime)
if err != nil {
return nil, err
}
dbJournal := mssql.DBJournal{
JournalId: journal.JournalId,
CreationTime: parsedtime,
Intro: journal.Intro,
Patient: journal.Patient,
CreatedBy: journal.CreatedBy,
}
if err := j.DB.UpdateJournal(&dbJournal); err != nil {
return nil, err
}
return journal, nil
}
func (j *JournalServer) DeleteJournal(ctx context.Context, jr *JournalRequest) (*Status, error) {
dbJournal := mssql.DBJournal{
JournalId: jr.JournalId,
}
if err := j.DB.DeleteJournal(&dbJournal); err != nil {
return &Status{Success: false}, err
}
return &Status{Success: true}, nil
}
func (j *JournalServer) DeleteJournalDocument(ctx context.Context, jdr *JournalDocumentRequest) (*Status, error) {
dbJournalDocument := mssql.DBJournalDocument{
DocumentId: jdr.JournalDocumentId,
}
document, err := j.DB.GetJournalDocument(dbJournalDocument.DocumentId)
if err != nil {
return &Status{Success: false}, err
}
// Needs to make sure we delete the attachments first
for _, attachment := range document.Attachments {
if err := j.DB.DeleteAttachment(&attachment); err != nil {
return &Status{Success: false}, err
}
}
if err := j.DB.DeleteJournalDocument(&dbJournalDocument); err != nil {
return &Status{Success: false}, err
}
return &Status{Success: true}, nil
}
func (j *JournalServer) UpdateJournalDocument(ctx context.Context, jdr *JournalDocument) (*JournalDocument, error) {
dbJournalDocument := mssql.DBJournalDocument{
DocumentId: jdr.DocumentId,
Content: jdr.Content,
JournalId: jdr.JournalId,
CreatedBy: jdr.CreatedBy,
Title: jdr.Title,
Summary: jdr.Summary,
}
if err := j.DB.UpdateJournalDocument(&dbJournalDocument); err != nil {
return nil, err
}
return jdr, nil
}
func (j *JournalServer) CreateJournalDocument(ctx context.Context, jd *JournalDocument) (*JournalDocument, error) {
fmt.Println("JournalService got called")
dbJD := mssql.DBJournalDocument{
Content: jd.Content,
JournalId: jd.JournalId,
CreatedBy: jd.CreatedBy,
Title: jd.Title,
Summary: jd.Summary,
CreationTime: time.Now(),
}
jd.CreationTime = dbJD.CreationTime.Format("02/01/2006 15:04:05")
if err := j.DB.CreateJournalDocument(&dbJD); err != nil {
return nil, err
}
fmt.Println("did databaseStuff")
if len(jd.Attachments) > 0 {
fmt.Println("Saving journal document Attachments")
for _, attachment := range jd.Attachments {
fileType, err := j.DB.GetOrCreateFileTypeByName(*attachment.FileType)
if err != nil {
return nil, err
}
fmt.Println("did filetype stuff")
//build up store name
storeName := fmt.Sprintf("/journal/document/%v", dbJD.DocumentId)
fmt.Println("buildUpStoreName")
store, err := j.DB.GetOrCreateFileStoreByPath(storeName)
if err != nil {
return nil, err
}
dbAttachment := mssql.DBAttachment{
FileName: attachment.FileName,
FileStoreId: store.FileStoreId,
DocumentId: dbJD.DocumentId,
FileTypeId: fileType.FileTypeId,
}
if err := j.DB.CreateAttachment(&dbAttachment); err != nil {
return nil, err
}
path := fmt.Sprintf("https://school.m9ssen.me/static%v/%v.%v", store.Path, attachment.FileName, *attachment.FileType)
fmt.Printf("path: %v\n\r", path)
attachment.Path = &path
}
}
jd.DocumentId = dbJD.DocumentId
return jd, nil
}
func (j *JournalServer) CreateAttachment(ctx context.Context, attachment *Attachment) (*Attachment, error) {
fileType, err := j.DB.GetOrCreateFileTypeByName(*attachment.FileType)
if err != nil {
return nil, err
}
fileStore, err := j.DB.GetOrCreateFileStoreByPath(*attachment.Path)
if err != nil {
return nil, err
}
dbAttachment := mssql.DBAttachment{
FileName: attachment.FileName,
FileStoreId: fileStore.FileStoreId,
DocumentId: attachment.DocumentId,
FileTypeId: fileType.FileTypeId,
}
if err := j.DB.CreateAttachment(&dbAttachment); err != nil {
return nil, err
}
return attachment, nil
}
func (j *JournalServer) GetJournalDocumentsByJournal(ctx context.Context, jr *JournalRequest) (*JournalDocuments, error) {
journalDocuments := JournalDocuments{
JournalDocuments: make([]*JournalDocument, 0),
}
dbJournalDocuments, err := j.DB.GetJournalDocumentsByJournal(jr.JournalId)
if err != nil {
return nil, err
}
for _, dbJournalDocument := range dbJournalDocuments {
var attachments []*Attachment
if len(dbJournalDocument.Attachments) > 0 {
fmt.Println("Found attachments")
for _, attachment := range dbJournalDocument.Attachments {
resultAttachment := Attachment{
AttachmentId: attachment.AttachmentId,
FileName: attachment.FileName,
FileType: new(string),
Path: new(string),
}
path := fmt.Sprintf("https://school.m9ssen.me/static%v/%v.%v", attachment.FileStore.Path, attachment.FileName, attachment.FileType.Name)
fmt.Printf("path: %v\n\r", path)
resultAttachment.Path = &path
resultAttachment.FileType = &attachment.FileType.Name
attachments = append(attachments, &resultAttachment)
}
}
journalDocument := JournalDocument{
DocumentId: dbJournalDocument.DocumentId,
Content: dbJournalDocument.Content,
JournalId: dbJournalDocument.JournalId,
CreatedBy: dbJournalDocument.CreatedBy,
Title: dbJournalDocument.Title,
Summary: dbJournalDocument.Summary,
CreationTime: dbJournalDocument.CreationTime.Format("02/01/2006 15:04:05"),
Attachments: attachments,
}
journalDocuments.JournalDocuments = append(journalDocuments.JournalDocuments, &journalDocument)
}
return &journalDocuments, nil
}
func (j *JournalServer) GetJournalDocument(ctx context.Context, jdr *JournalDocumentRequest) (*JournalDocument, error) {
dbJournalDocument, err := j.DB.GetJournalDocument(jdr.JournalDocumentId)
if err != nil {
return nil, err
}
var attachments []*Attachment
fmt.Println("Getting journal")
if len(dbJournalDocument.Attachments) > 0 {
fmt.Println("Found attachments")
for _, attachment := range dbJournalDocument.Attachments {
resultAttachment := Attachment{
AttachmentId: attachment.AttachmentId,
FileName: attachment.FileName,
FileType: new(string),
Path: new(string),
}
path := fmt.Sprintf("https://school.m9ssen.me/static%v/%v.%v", attachment.FileStore.Path, attachment.FileName, attachment.FileType.Name)
fmt.Printf("path: %v\n\r", path)
resultAttachment.Path = &path
resultAttachment.FileType = &attachment.FileType.Name
attachments = append(attachments, &resultAttachment)
}
}
result := JournalDocument{
DocumentId: dbJournalDocument.DocumentId,
Content: dbJournalDocument.Content,
JournalId: dbJournalDocument.JournalId,
CreatedBy: dbJournalDocument.CreatedBy,
Title: dbJournalDocument.Title,
Summary: dbJournalDocument.Summary,
CreationTime: dbJournalDocument.CreationTime.Format("02/01/2006 15:04:05"),
Attachments: attachments,
}
return &result, nil
}
|
package keepassrpc
import "testing"
func TestVersion(t *testing.T) {
protocolVersion = []uint8{1, 2, 3}
if ProtocolVersion() != 66051 {
t.Error("ProtocolVersion() returned invalid version")
}
}
func TestGenKey(t *testing.T) {
a, err := GenKey(32)
if err != nil {
t.Error("GenKey failed:", err)
}
b, err := GenKey(32)
if err != nil {
t.Error("GenKey failed:", err)
}
if a.Cmp(b) == 0 {
t.Error("GenKey returned the same key twice")
}
}
|
package vox
import (
"encoding/json"
"testing"
"github.com/prebid/prebid-server/openrtb_ext"
)
func TestValidParams(t *testing.T) {
validator, err := openrtb_ext.NewBidderParamsValidator("../../static/bidder-params")
if err != nil {
t.Fatalf("Failed to fetch the json schema. %v", err)
}
for _, p := range validParams {
if err := validator.Validate(openrtb_ext.BidderVox, json.RawMessage(p)); err != nil {
t.Errorf("Schema rejected valid params: %s", p)
}
}
}
func TestInvalidParams(t *testing.T) {
validator, err := openrtb_ext.NewBidderParamsValidator("../../static/bidder-params")
if err != nil {
t.Fatalf("Failed to fetch the json schema. %v", err)
}
for _, p := range invalidParams {
if err := validator.Validate(openrtb_ext.BidderVox, json.RawMessage(p)); err == nil {
t.Errorf("Schema allowed invalid params: %s", p)
}
}
}
var validParams = []string{
`{"placementId": "64be6fe6685a271d37e900d2"}`,
`{"placementId": "Any String Basically"}`,
`{"placementId":""}`,
`{"placementId":"id", "imageUrl":"http://site.com/img1.png"}`,
`{"placementId":"id", "imageUrl":"http://site.com/img1.png", "displaySizes":["123x90", "1x1", "987x1111"]}`,
}
var invalidParams = []string{
`{"placementId": 42}`,
`{"placementId": null}`,
`{"placementId": 3.1415}`,
`{"placementId": true}`,
`{"placementId": false}`,
`{"placementId":"id", "imageUrl": null}`,
`{"placementId":"id", "imageUrl": true}`,
`{"placementId":"id", "imageUrl": []}`,
`{"placementId":"id", "imageUrl": "http://some.url", "displaySizes": null}`,
`{"placementId":"id", "imageUrl": "http://some.url", "displaySizes": {}}`,
`{"placementId":"id", "imageUrl": "http://some.url", "displaySizes": "String"}`,
}
|
package middleware
import (
"net/http"
"log"
)
func Log() Middleware{
return func(h http.HandlerFunc) http.HandlerFunc {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request){
log.Println("Before")
defer log.Println("After")
h.ServeHTTP(w,r)
})
}
}
|
package main
import (
_ "github.com/acrossmounation/redpack/apis"
_ "github.com/acrossmounation/redpack/core/accounts"
"github.com/go-spring/spring-boot"
"github.com/go-spring/spring-web"
_ "github.com/go-spring/starter-echo"
_ "github.com/go-spring/starter-gorm/mysql"
"github.com/jinzhu/gorm"
)
func main() {
SpringBoot.Config(func(db *gorm.DB) {
db.SingularTable(true)
})
SpringWeb.Validator = SpringWeb.NewDefaultValidator()
SpringBoot.RunApplication()
}
|
package models
import (
"fmt"
"github.com/BurntSushi/toml"
)
type Config struct {
Data struct{
Organization string
User string
Ticket string
}
}
func (obj *Config) ReadConfig(pathFile string) {
if _, err := toml.DecodeFile(pathFile, obj); err != nil {
fmt.Println(err)
}
}
|
/*
Copyright 2011-2017 Frederic Langlet
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
you may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package entropy
// This file is a port from the code of the dcs-bwt-compressor project
// http://code.google.com/p/dcs-bwt-compressor/(itself based on PAQ coders)
//// It was originally written by Matt Mahoney as
//// bbb.cpp - big block BWT compressor version 1, Aug. 31, 2006.
//// http://cs.fit.edu/~mmahoney/compression/bbb.cpp
////
//// ENTROPY CODING
////
//// BWT data is best coded with an order 0 model. The transformed text tends
//// to have long runs of identical bytes (e.g. "nnbaaa"). The BWT data is
//// modeled with a modified PAQ with just one context (no mixing) followed
//// by a 5 stage SSE (APM) and bitwise arithmetic coding. Modeling typically
//// takes about as much time as sorting and unsorting in slow mode.
//// The model uses about 5 MB memory.
//// [ Now reduced to about 256KB of memory. ]
////
//// The order 0 model consists of a mapping:
////
//// order 1, 2, 3 contexts ----------+
//// V
//// order 0 context -> bit history -> p -> APM chain -> arithmetic coder
//// t1 sm
////
//// Bits are coded one at a time. The arithmetic coder maintains a range
//// [lo, hi), initially [0, 1) and repeatedly subdivides the range in proportion
//// to p(0), p(1), the next bit probabilites predicted by the model. The final
//// output is the intest base 256 number x such that lo <= x < hi. As the
//// leading bytes of x become known, they are output. To decompress, the model
//// predictions are repeated as during compression, then the actual bit is
//// determined by which half of the subrange contains x.
////
//// The model inputs a bytewise order 0 context consisting of the last 0 to 7
//// bits of the current byte, plus the number of bits. There are a total of
//// 255 possible bitwise contexts. For each context, a table (t1) maintains
//// an 8 bit state representing the history of 0 and 1 bits previously seen.
//// This history is mapped by another table (a StateMap sm) to a probability,
//// p, that the next bit will be 1. This table is adaptive: after each
//// prediction, the mapping (state -> p) is adjusted to improve the last
//// prediction.
////
//// The output of the StateMap is passed through a series of 6 more adaptive
//// tables, (Adaptive Probability Maps, or APM) each of which maps a context
//// and the input probability to an output probability. The input probability
//// is interpolated between 33 bins on a nonlinear scale with smaller bins
//// near 0 and 1. After each prediction, the corresponding table entries
//// on both sides of p are adjusted to improve the last prediction.
//// The APM chain is like this:
////
//// + A11 ->+ +--->---+ +--->---+
//// | | | | | |
//// p ->+ +-> A2 -> A3 +-> A4 -+-+-> A5 -+-> Encoder
//// | |
//// + A12 ->+
////
//// [ The APM chain has been modified into:
////
//// p --> A2 -> A3 --> A4 --> Encoder
////
//// ]
////
//// A11 and A12 both take c0 (the preceding bits of the current byte) as
//// additional context, but one is fast adapting and the other is slow
//// adapting. Their outputs are averaged.
////
//// A2 is an order 1 context (previous byte and current partial byte).
//// [ A2 has been modified so that it uses only two bits of information
//// from the previous byte: what is the bit in the current bit position
//// and whether the preceding bits are same or different from c0. ]
////
//// A3 takes the previous (but not current) byte as context, plus 2 bits
//// that depend on the current run length (0, 1, 2-3, or 4+), the number
//// of times the last byte was repeated.
//// [ A3 now only takes the two bits on run length. ]
////
//// A4 takes the current byte and the low 5 bits of the second byte back.
//// The output is averaged with 3/4 weight to the A3 output with 1/4 weight.
//// [ A4 has been moved after A5, it takes only the current byte (not the
//// 5 additional bits), and the averaging weights are 1/2 and 1/2. ]
////
//// A5 takes a 14 bit hash of an order 3 context (last 3 bytes plus
//// current partial byte) and is averaged with 1/2 weight to the A4 output.
//// [ A5 takes now 11 bit hash of an order 4 context. ]
////
//// The StateMap, state table, APM, Encoder, and associated code (Array,
//// squash(), stretch()) are taken from PAQ8 with minor non-functional
//// changes (e.g. removing global context).
///////////////////////// state table ////////////////////////
// STATE_TABLE[state,0] = next state if bit is 0, 0 <= state < 256
// STATE_TABLE[state,1] = next state if bit is 1
// STATE_TABLE[state,2] = number of zeros in bit history represented by state
// STATE_TABLE[state,3] = number of ones represented
// States represent a bit history within some context.
// State 0 is the starting state (no bits seen).
// States 1-30 represent all possible sequences of 1-4 bits.
// States 31-252 represent a pair of counts, (n0,n1), the number
// of 0 and 1 bits respectively. If n0+n1 < 16 then there are
// two states for each pair, depending on if a 0 or 1 was the last
// bit seen.
// If n0 and n1 are too large, then there is no state to represent this
// pair, so another state with about the same ratio of n0/n1 is substituted.
// Also, when a bit is observed and the count of the opposite bit is large,
// then part of this count is discarded to favor newer data over old.
var PAQ_STATE_TABLE = []int{
1, 2, 0, 0, 3, 5, 1, 0, 4, 6, 0, 1, 7, 10, 2, 0, // 0-3
8, 12, 1, 1, 9, 13, 1, 1, 11, 14, 0, 2, 15, 19, 3, 0, // 4-7
16, 23, 2, 1, 17, 24, 2, 1, 18, 25, 2, 1, 20, 27, 1, 2, // 8-11
21, 28, 1, 2, 22, 29, 1, 2, 26, 30, 0, 3, 31, 33, 4, 0, // 12-15
32, 35, 3, 1, 32, 35, 3, 1, 32, 35, 3, 1, 32, 35, 3, 1, // 16-19
34, 37, 2, 2, 34, 37, 2, 2, 34, 37, 2, 2, 34, 37, 2, 2, // 20-23
34, 37, 2, 2, 34, 37, 2, 2, 36, 39, 1, 3, 36, 39, 1, 3, // 24-27
36, 39, 1, 3, 36, 39, 1, 3, 38, 40, 0, 4, 41, 43, 5, 0, // 28-31
42, 45, 4, 1, 42, 45, 4, 1, 44, 47, 3, 2, 44, 47, 3, 2, // 32-35
46, 49, 2, 3, 46, 49, 2, 3, 48, 51, 1, 4, 48, 51, 1, 4, // 36-39
50, 52, 0, 5, 53, 43, 6, 0, 54, 57, 5, 1, 54, 57, 5, 1, // 40-43
56, 59, 4, 2, 56, 59, 4, 2, 58, 61, 3, 3, 58, 61, 3, 3, // 44-47
60, 63, 2, 4, 60, 63, 2, 4, 62, 65, 1, 5, 62, 65, 1, 5, // 48-51
50, 66, 0, 6, 67, 55, 7, 0, 68, 57, 6, 1, 68, 57, 6, 1, // 52-55
70, 73, 5, 2, 70, 73, 5, 2, 72, 75, 4, 3, 72, 75, 4, 3, // 56-59
74, 77, 3, 4, 74, 77, 3, 4, 76, 79, 2, 5, 76, 79, 2, 5, // 60-63
62, 81, 1, 6, 62, 81, 1, 6, 64, 82, 0, 7, 83, 69, 8, 0, // 64-67
84, 71, 7, 1, 84, 71, 7, 1, 86, 73, 6, 2, 86, 73, 6, 2, // 68-71
44, 59, 5, 3, 44, 59, 5, 3, 58, 61, 4, 4, 58, 61, 4, 4, // 72-75
60, 49, 3, 5, 60, 49, 3, 5, 76, 89, 2, 6, 76, 89, 2, 6, // 76-79
78, 91, 1, 7, 78, 91, 1, 7, 80, 92, 0, 8, 93, 69, 9, 0, // 80-83
94, 87, 8, 1, 94, 87, 8, 1, 96, 45, 7, 2, 96, 45, 7, 2, // 84-87
48, 99, 2, 7, 48, 99, 2, 7, 88, 101, 1, 8, 88, 101, 1, 8, // 88-91
80, 102, 0, 9, 103, 69, 10, 0, 104, 87, 9, 1, 104, 87, 9, 1, // 92-95
106, 57, 8, 2, 106, 57, 8, 2, 62, 109, 2, 8, 62, 109, 2, 8, // 96-99
88, 111, 1, 9, 88, 111, 1, 9, 80, 112, 0, 10, 113, 85, 11, 0, // 100-103
114, 87, 10, 1, 114, 87, 10, 1, 116, 57, 9, 2, 116, 57, 9, 2, // 104-107
62, 119, 2, 9, 62, 119, 2, 9, 88, 121, 1, 10, 88, 121, 1, 10, // 108-111
90, 122, 0, 11, 123, 85, 12, 0, 124, 97, 11, 1, 124, 97, 11, 1, // 112-115
126, 57, 10, 2, 126, 57, 10, 2, 62, 129, 2, 10, 62, 129, 2, 10, // 116-119
98, 131, 1, 11, 98, 131, 1, 11, 90, 132, 0, 12, 133, 85, 13, 0, // 120-123
134, 97, 12, 1, 134, 97, 12, 1, 136, 57, 11, 2, 136, 57, 11, 2, // 124-127
62, 139, 2, 11, 62, 139, 2, 11, 98, 141, 1, 12, 98, 141, 1, 12, // 128-131
90, 142, 0, 13, 143, 95, 14, 0, 144, 97, 13, 1, 144, 97, 13, 1, // 132-135
68, 57, 12, 2, 68, 57, 12, 2, 62, 81, 2, 12, 62, 81, 2, 12, // 136-139
98, 147, 1, 13, 98, 147, 1, 13, 100, 148, 0, 14, 149, 95, 15, 0, // 140-143
150, 107, 14, 1, 150, 107, 14, 1, 108, 151, 1, 14, 108, 151, 1, 14, // 144-147
100, 152, 0, 15, 153, 95, 16, 0, 154, 107, 15, 1, 108, 155, 1, 15, // 148-151
100, 156, 0, 16, 157, 95, 17, 0, 158, 107, 16, 1, 108, 159, 1, 16, // 152-155
100, 160, 0, 17, 161, 105, 18, 0, 162, 107, 17, 1, 108, 163, 1, 17, // 156-159
110, 164, 0, 18, 165, 105, 19, 0, 166, 117, 18, 1, 118, 167, 1, 18, // 160-163
110, 168, 0, 19, 169, 105, 20, 0, 170, 117, 19, 1, 118, 171, 1, 19, // 164-167
110, 172, 0, 20, 173, 105, 21, 0, 174, 117, 20, 1, 118, 175, 1, 20, // 168-171
110, 176, 0, 21, 177, 105, 22, 0, 178, 117, 21, 1, 118, 179, 1, 21, // 172-175
110, 180, 0, 22, 181, 115, 23, 0, 182, 117, 22, 1, 118, 183, 1, 22, // 176-179
120, 184, 0, 23, 185, 115, 24, 0, 186, 127, 23, 1, 128, 187, 1, 23, // 180-183
120, 188, 0, 24, 189, 115, 25, 0, 190, 127, 24, 1, 128, 191, 1, 24, // 184-187
120, 192, 0, 25, 193, 115, 26, 0, 194, 127, 25, 1, 128, 195, 1, 25, // 188-191
120, 196, 0, 26, 197, 115, 27, 0, 198, 127, 26, 1, 128, 199, 1, 26, // 192-195
120, 200, 0, 27, 201, 115, 28, 0, 202, 127, 27, 1, 128, 203, 1, 27, // 196-199
120, 204, 0, 28, 205, 115, 29, 0, 206, 127, 28, 1, 128, 207, 1, 28, // 200-203
120, 208, 0, 29, 209, 125, 30, 0, 210, 127, 29, 1, 128, 211, 1, 29, // 204-207
130, 212, 0, 30, 213, 125, 31, 0, 214, 137, 30, 1, 138, 215, 1, 30, // 208-211
130, 216, 0, 31, 217, 125, 32, 0, 218, 137, 31, 1, 138, 219, 1, 31, // 212-215
130, 220, 0, 32, 221, 125, 33, 0, 222, 137, 32, 1, 138, 223, 1, 32, // 216-219
130, 224, 0, 33, 225, 125, 34, 0, 226, 137, 33, 1, 138, 227, 1, 33, // 220-223
130, 228, 0, 34, 229, 125, 35, 0, 230, 137, 34, 1, 138, 231, 1, 34, // 224-227
130, 232, 0, 35, 233, 125, 36, 0, 234, 137, 35, 1, 138, 235, 1, 35, // 228-231
130, 236, 0, 36, 237, 125, 37, 0, 238, 137, 36, 1, 138, 239, 1, 36, // 232-235
130, 240, 0, 37, 241, 125, 38, 0, 242, 137, 37, 1, 138, 243, 1, 37, // 236-239
130, 244, 0, 38, 245, 135, 39, 0, 246, 137, 38, 1, 138, 247, 1, 38, // 240-243
140, 248, 0, 39, 249, 135, 40, 0, 250, 69, 39, 1, 80, 251, 1, 39, // 244-247
140, 252, 0, 40, 249, 135, 41, 0, 250, 69, 40, 1, 80, 251, 1, 40, // 248-251
140, 252, 0, 41, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 253-255 are reserved
}
type PAQPredictor struct {
// Removed apm11, apm12 and apm5 from original
pr int // next predicted value (0-4095)
c0 int // bitwise context: last 0-7 bits with a leading 1 (1-255)
c4 int // last 4 whole bytes, last is in low 8 bits
bpos uint // number of bits in c0 (0-7)
states []int // context -> state
sm *PAQStateMap // state -> pr
run uint // count of consecutive identical bytes (0-65535)
runCtx int // (0-3) if run is 0, 1, 2-3, 4+
apm2 *AdaptiveProbMap
apm3 *AdaptiveProbMap
apm4 *AdaptiveProbMap
}
func NewPAQPredictor() (*PAQPredictor, error) {
var err error
this := new(PAQPredictor)
this.pr = 2048
this.c0 = 1
this.states = make([]int, 256)
this.bpos = 8
this.apm2, err = newAdaptiveProbMap(1024, 6)
if err == nil {
this.apm3, err = newAdaptiveProbMap(1024, 7)
}
if err == nil {
this.apm4, err = newAdaptiveProbMap(65536, 8)
}
if err == nil {
this.sm, err = newPAQStateMap()
}
return this, err
}
// Update the probability model
func (this *PAQPredictor) Update(bit byte) {
y := int(bit)
this.states[this.c0] = PAQ_STATE_TABLE[(this.states[this.c0]<<2)+y]
this.c0 = (this.c0 << 1) | y
if this.c0 > 255 {
if this.c0&0xFF == this.c4&0xFF {
if this.run < 4 && this.run != 2 {
this.runCtx += 256
}
this.run++
} else {
this.run = 0
this.runCtx = 0
}
this.bpos = 8
this.c4 = (this.c4 << 8) | (this.c0 & 0xFF)
this.c0 = 1
}
var c1d int
if ((this.c4&0xFF)|256)>>this.bpos == this.c0 {
c1d = 2
} else {
c1d = 0
}
this.bpos--
c1d += ((this.c4 >> this.bpos) & 1)
// Get prediction from state map
p := this.sm.get(y, this.states[this.c0])
// SSE (Secondary Symbol Estimation)
p = this.apm2.get(y, p, this.c0|(c1d<<8))
p = (3*this.apm3.get(y, p, (this.c4&0xFF)|this.runCtx) + p + 2) >> 2
p = (3*this.apm4.get(y, p, this.c0|(this.c4&0xFF00)) + p + 2) >> 2
p32 := uint32(p)
this.pr = p + int((p32 - 2048) >> 31)
}
// Return the split value representing the probability of 1 in the [0..4095] range.
func (this *PAQPredictor) Get() int {
return this.pr
}
//////////////////////////////////////////////////////////////////
// A StateMap maps a nonstationary counter state to a probability.
// After each mapping, the mapping is adjusted to improve future
// predictions. Methods:
//
// get(y, cx) converts state cx (0-255) to a probability (0-4095),
// and trains by updating the previous prediction with y (0-1).
//
// Counter state -> probability * 256
//////////////////////////////////////////////////////////////////
type PAQStateMap struct {
ctx int
data []int
}
var PAQ_STATEMAP_DATA = initPAQStateMapData()
func initPAQStateMapData() []int {
array := make([]int, 256)
for i := range array {
n0 := PAQ_STATE_TABLE[(i<<2)+2]
n1 := PAQ_STATE_TABLE[(i<<2)+3]
array[i] = ((n1 + 5) << 16) / (n0 + n1 + 10)
// Boost low probabilities (typically under estimated by above formula)
if array[i] < 128 {
array[i] <<= 5
}
}
return array
}
func newPAQStateMap() (*PAQStateMap, error) {
this := new(PAQStateMap)
this.data = make([]int, 256)
copy(this.data, PAQ_STATEMAP_DATA)
return this, nil
}
func (this *PAQStateMap) get(bit int, nctx int) int {
this.data[this.ctx] += (((bit << 16) - this.data[this.ctx] + 256) >> 9)
this.ctx = nctx
return this.data[nctx] >> 4
}
|
package smaevcharger
// SMA EV Charger 22 - json Responses
const (
MinAcceptedVersion = "1.2.23"
TimestampFormat = "2006-01-02T15:04:05.000Z"
StatusA = float64(200111) // Not connected
StatusB = float64(200112) // Connected and not charging
StatusC = float64(200113) // Connected and charging
ChargerLocked = float64(5169) // Charger locked
SwitchOeko = float64(4950) // Switch in PV Loading (Optimized or Planned PV loading)
SwitchFast = float64(4718) // Switch in Fast Charge Mode
FastCharge = "4718" // Schnellladen - 4718
OptiCharge = "4719" // Optimiertes Laden - 4719
PlanCharge = "4720" // Laden mit Vorgabe - 4720
StopCharge = "4721" // Ladestopp - 4721
ChargerAppLockEnabled = "1129"
ChargerAppLockDisabled = "1130"
ChargerManualLockEnabled = "5171"
ChargerManualLockDisabled = "5172"
)
// Measurements Data json Response structure
type Measurements struct {
ChannelId string `json:"channelId"`
ComponentId string `json:"componentId"`
Values []struct {
Time string `json:"time"`
Value float64 `json:"value"`
} `json:"values"`
}
// Parameter Data json Response structure
type Parameters struct {
ComponentId string `json:"componentId"`
Values []struct {
ChannelId string `json:"channelId"`
Editable bool `json:"editable"`
PossibleValues []string `json:"possibleValues,omitempty"`
State string `json:"state"`
Timestamp string `json:"timestamp"`
Value string `json:"value"`
} `json:"values"`
}
// Parameter Data json Send structure
type SendParameter struct {
Values []Value `json:"values"`
}
// part of Paramter Send structure
type Value struct {
Timestamp string `json:"timestamp"`
ChannelId string `json:"channelId"`
Value string `json:"value"`
}
|
// 在这个例子中,我们将看到如何使用协程与通道实现一个工作池
package main
import (
"fmt"
"time"
)
// 这是 worker 程序,我们会并发的运行多个 worker
// worker 将在 jobs 频道上接收工作,并在 results 上发送相应的结果
// 每个 worker 我们都会 sleep 一秒钟,以模拟一项昂贵的(耗时一秒钟的)任务
func worker(id int, jobs <-chan int, results chan<- int) {
for j := range jobs {
fmt.Println("worker", id, "start job", j)
time.Sleep(time.Second)
fmt.Println("worker", id, "finished jobs", j)
results <- j * 2
}
}
func main() {
const numJobs = 5
// 为了使用 worker 工作池并且收集其的结果,我们需要 2 个通道
jobs := make(chan int, numJobs)
results := make(chan int, numJobs)
// 这里启动了 3 个 worker, 初始是阻塞的,因为还没有传递任务
for w := 1; w <= 3; w++ {
go worker(w, jobs, results)
}
// 这里我们发送 5 个 jobs, 然后 close 这些通道,表示这些就是所有的任务了
for j := 1; j <= numJobs; j++ {
jobs <- j
}
close(jobs)
// 最后,我们收集所有这些任务的返回值。 这也确保了所有的 worker 协程都已完成
// 另一个等待多个协程的方法是使用WaitGroup
for a := 1; a <= numJobs; a++ {
<-results
}
// 运行程序,显示 5 个任务被多个 worker 执行。 尽管所有的工作总共要花费 5 秒钟
// 但该程序只花了 2 秒钟, 因为 3 个 worker 是并行的
}
|
package wallet
import (
"bytes"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/sha256"
"crypto/x509"
"log"
"golang.org/x/crypto/ripemd160"
b58 "github.com/jbenet/go-base58"
)
const version = byte(0x00)
const addressChecksumLen = 4
type Wallet struct {
PrivateKey ecdsa.PrivateKey
PublicKey []byte
}
func NewWallet() *Wallet {
private, public := newKeyPair()
wallet := Wallet{private, public}
return &wallet
}
func (w Wallet) GetPrivateKey() string {
encPriv := encodePvk(&w.PrivateKey)
return encPriv
}
func GetPlbAddress(pvk string) string{
pbk := decodePvk(pvk)
pubKey := append(pbk.X.Bytes(), pbk.Y.Bytes()...)
pubKeyHash := HashPubKey(pubKey)
versionedPayload := append([]byte{version}, pubKeyHash...)
checksum := checksum(versionedPayload)
fullPayload := append(versionedPayload, checksum...)
address := b58.Encode([]byte(fullPayload))
return address
}
// GetAddress returns wallet address
func (w Wallet) GetAddress() string {
pubKeyHash := HashPubKey(w.PublicKey)
versionedPayload := append([]byte{version}, pubKeyHash...)
checksum := checksum(versionedPayload)
fullPayload := append(versionedPayload, checksum...)
address := b58.Encode([]byte(fullPayload))
return address
}
// HashPubKey hashes public key
func HashPubKey(pubKey []byte) []byte {
publicSHA256 := sha256.Sum256(pubKey)
RIPEMD160Hasher := ripemd160.New()
_, err := RIPEMD160Hasher.Write(publicSHA256[:])
if err != nil {
log.Panic(err)
}
publicRIPEMD160 := RIPEMD160Hasher.Sum(nil)
return publicRIPEMD160
}
// ValidateAddress check if address if valid
func ValidateAddress(address string) bool {
pubKeyHash := b58.Decode(address)
actualChecksum := pubKeyHash[len(pubKeyHash)-addressChecksumLen:]
version := pubKeyHash[0]
pubKeyHash = pubKeyHash[1 : len(pubKeyHash)-addressChecksumLen]
targetChecksum := checksum(append([]byte{version}, pubKeyHash...))
return bytes.Compare(actualChecksum, targetChecksum) == 0
}
func checksum(payload []byte) []byte {
firstSHA := sha256.Sum256(payload)
secondSHA := sha256.Sum256(firstSHA[:])
return secondSHA[:addressChecksumLen]
}
func encodePvk(privateKey *ecdsa.PrivateKey) (string) {
x509Encoded, _ := x509.MarshalECPrivateKey(privateKey)
return b58.Encode(x509Encoded)
}
func decodePvk(pemEncoded string) (*ecdsa.PrivateKey) {
x509Encoded := b58.Decode(pemEncoded)
privateKey, _ := x509.ParseECPrivateKey(x509Encoded)
return privateKey
}
func newKeyPair() (ecdsa.PrivateKey, []byte) {
curve := elliptic.P256()
private, err := ecdsa.GenerateKey(curve, rand.Reader)
if err != nil {
log.Panic(err)
}
pubKey := append(private.PublicKey.X.Bytes(), private.PublicKey.Y.Bytes()...)
return *private, pubKey
}
|
package protologlogrus
import (
"bytes"
"encoding/json"
"io"
"strings"
"unicode"
"github.com/Sirupsen/logrus"
"github.com/golang/protobuf/proto"
"github.com/sr/operator/protolog"
)
var (
levelToLogrusLevel = map[protolog.Level]logrus.Level{
protolog.LevelDebug: logrus.DebugLevel,
protolog.LevelInfo: logrus.InfoLevel,
protolog.LevelError: logrus.ErrorLevel,
}
)
type pusher struct {
logger *logrus.Logger
options PusherOptions
}
func newPusher(options PusherOptions) *pusher {
logger := logrus.New()
if options.Out != nil {
logger.Out = options.Out
}
if options.Formatter != nil {
logger.Formatter = options.Formatter
}
return &pusher{logger, options}
}
func (p *pusher) Push(entry *protolog.Entry) error {
logrusEntry, err := p.getLogrusEntry(entry)
if err != nil {
return err
}
return p.logLogrusEntry(logrusEntry)
}
type flusher interface {
Flush() error
}
type syncer interface {
Sync() error
}
func (p *pusher) Flush() error {
if p.options.Out != nil {
if syncer, ok := p.options.Out.(syncer); ok {
return syncer.Sync()
} else if flusher, ok := p.options.Out.(flusher); ok {
return flusher.Flush()
}
}
return nil
}
func (p *pusher) getLogrusEntry(entry *protolog.Entry) (*logrus.Entry, error) {
logrusEntry := logrus.NewEntry(p.logger)
logrusEntry.Time = entry.Time
logrusEntry.Level = levelToLogrusLevel[entry.Level]
if entry.ID != "" {
logrusEntry.Data["_id"] = entry.ID
}
for _, context := range entry.Contexts {
if context == nil {
continue
}
if err := addProtoMessage(logrusEntry, context); err != nil {
return nil, err
}
}
for key, value := range entry.Fields {
if value != "" {
logrusEntry.Data[key] = value
}
}
// TODO(pedge): verify only one of Event, Message, WriterOutput?
if entry.Event != nil {
logrusEntry.Data["_event"] = proto.MessageName(entry.Event)
if err := addProtoMessage(logrusEntry, entry.Event); err != nil {
return nil, err
}
}
if entry.Message != "" {
logrusEntry.Message = trimRightSpace(entry.Message)
}
if entry.WriterOutput != nil {
logrusEntry.Message = trimRightSpace(string(entry.WriterOutput))
}
return logrusEntry, nil
}
func (p *pusher) logLogrusEntry(entry *logrus.Entry) error {
if err := entry.Logger.Hooks.Fire(entry.Level, entry); err != nil {
return err
}
reader, err := entry.Reader()
if err != nil {
return err
}
_, err = io.Copy(entry.Logger.Out, reader)
return err
}
func addProtoMessage(logrusEntry *logrus.Entry, message proto.Message) error {
m, err := getFieldsForProtoMessage(message)
if err != nil {
return err
}
for key, value := range m {
logrusEntry.Data[key] = value
}
return nil
}
func getFieldsForProtoMessage(message proto.Message) (map[string]interface{}, error) {
data, err := json.Marshal(message)
if err != nil {
return nil, err
}
buffer := bytes.NewBuffer(nil)
if _, err := buffer.Write(data); err != nil {
return nil, err
}
m := make(map[string]interface{}, 0)
if err := json.Unmarshal(buffer.Bytes(), &m); err != nil {
return nil, err
}
n := make(map[string]interface{}, len(m))
for key, value := range m {
switch value.(type) {
case map[string]interface{}:
data, err := json.Marshal(value)
if err != nil {
return nil, err
}
n[key] = string(data)
default:
n[key] = value
}
}
return n, nil
}
func trimRightSpace(s string) string {
return strings.TrimRightFunc(s, unicode.IsSpace)
}
|
// Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package tests
import (
"context"
gosql "database/sql"
"fmt"
"regexp"
"testing"
"github.com/cockroachdb/cockroach-go/crdb"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descs"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/stretchr/testify/require"
)
// TestDescriptorRepairOrphanedDescriptors exercises cases where corruptions to
// the catalog could exist due to bugs and can now be repaired programmatically
// from SQL.
//
// We manually create the corruption by injecting values into namespace and
// descriptor, ensure that the doctor would detect these problems, repair them
// with sql queries, and show that not invalid objects are found.
func TestDescriptorRepairOrphanedDescriptors(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
setup := func(t *testing.T) (serverutils.TestServerInterface, *gosql.DB, func()) {
s, db, _ := serverutils.StartServer(t, base.TestServerArgs{})
return s, db, func() {
s.Stopper().Stop(ctx)
}
}
// The below descriptors were created by performing the following on a
// 20.1.1 cluster:
//
// SET experimental_serial_normalization = 'sql_sequence';
// CREATE DATABASE db;
// USE db;
// CREATE TABLE foo(i SERIAL PRIMARY KEY);
// USE defaultdb;
// DROP DATABASE db CASCADE;
//
// This, due to #51782, leads to the table remaining public but with no
// parent database (52).
const (
orphanedTable = `0aeb010a03666f6f1835203428013a0042380a016910011a0c08011040180030005014600020002a1d6e65787476616c2827666f6f5f695f736571273a3a3a535452494e472930005036480252440a077072696d61727910011801220169300140004a10080010001a00200028003000380040005a007a020800800100880100900101980100a20106080012001800a8010060026a150a090a0561646d696e10020a080a04726f6f741002800101880103980100b201120a077072696d61727910001a016920012800b80101c20100e80100f2010408001200f801008002009202009a0200b20200b80200c0021d`
)
// We want to inject a descriptor that has no parent. This will block
// backups among other things.
const (
parentID = 52
schemaID = 29
descID = 53
tableName = "foo"
)
// This test will inject the table and demonstrate
// that there are problems. It will then repair it by just dropping the
// descriptor and namespace entry. This would normally be unsafe because
// it would leave table data around.
t.Run("orphaned view - 51782", func(t *testing.T) {
s, db, cleanup := setup(t)
defer cleanup()
descs.ValidateOnWriteEnabled.Override(&s.ClusterSettings().SV, false)
require.NoError(t, crdb.ExecuteTx(ctx, db, nil, func(tx *gosql.Tx) error {
if _, err := tx.Exec(
"SELECT crdb_internal.unsafe_upsert_descriptor($1, decode($2, 'hex'));",
descID, orphanedTable); err != nil {
return err
}
_, err := tx.Exec("SELECT crdb_internal.unsafe_upsert_namespace_entry($1, $2, $3, $4, true);",
parentID, schemaID, tableName, descID)
return err
}))
descs.ValidateOnWriteEnabled.Override(&s.ClusterSettings().SV, true)
// Ideally we should be able to query `crdb_internal.invalid_object` but it
// does not do enough validation. Instead we'll just observe the issue that
// the parent descriptor cannot be found.
_, err := db.Exec(
"SELECT count(*) FROM \"\".crdb_internal.tables WHERE table_id = $1",
descID)
require.Regexp(t, `pq: relation "foo" \(53\): referenced database ID 52: descriptor not found`, err)
// In this case, we're treating the injected descriptor as having no data
// so we can clean it up by just deleting the erroneous descriptor and
// namespace entry that was introduced. In the next case we'll go through
// the dance of adding back a parent database in order to drop the table.
require.NoError(t, crdb.ExecuteTx(ctx, db, nil, func(tx *gosql.Tx) error {
if _, err := tx.Exec(
"SELECT crdb_internal.unsafe_delete_descriptor($1, true);",
descID); err != nil {
return err
}
_, err := tx.Exec("SELECT crdb_internal.unsafe_delete_namespace_entry($1, $2, $3, $4);",
parentID, schemaID, tableName, descID)
return err
}))
rows, err := db.Query(
"SELECT count(*) FROM \"\".crdb_internal.tables WHERE table_id = $1",
descID)
require.NoError(t, err)
rowMat, err := sqlutils.RowsToStrMatrix(rows)
require.NoError(t, err)
require.EqualValues(t, [][]string{{"0"}}, rowMat)
})
// This test will inject the table an demonstrate that there are problems. It
// will then repair it by injecting a new database descriptor and namespace
// entry and then demonstrate the problem is resolved.
t.Run("orphaned table with data - 51782", func(t *testing.T) {
s, db, cleanup := setup(t)
defer cleanup()
descs.ValidateOnWriteEnabled.Override(&s.ClusterSettings().SV, false)
require.NoError(t, crdb.ExecuteTx(ctx, db, nil, func(tx *gosql.Tx) error {
if _, err := tx.Exec(
"SELECT crdb_internal.unsafe_upsert_descriptor($1, decode($2, 'hex'));",
descID, orphanedTable); err != nil {
return err
}
_, err := tx.Exec("SELECT crdb_internal.unsafe_upsert_namespace_entry($1, $2, $3, $4, true);",
parentID, schemaID, tableName, descID)
return err
}))
descs.ValidateOnWriteEnabled.Override(&s.ClusterSettings().SV, true)
// Ideally we should be able to query `crdb_internal.invalid_objects` but it
// does not do enough validation. Instead we'll just observe the issue that
// the parent descriptor cannot be found.
_, err := db.Exec(
"SELECT count(*) FROM \"\".crdb_internal.tables WHERE table_id = $1",
descID)
require.Regexp(t, `pq: relation "foo" \(53\): referenced database ID 52: descriptor not found`, err)
// In this case, we're going to inject a parent database
require.NoError(t, crdb.ExecuteTx(ctx, db, nil, func(tx *gosql.Tx) error {
if _, err := tx.Exec(
"SELECT crdb_internal.unsafe_upsert_descriptor($1, crdb_internal.json_to_pb('cockroach.sql.sqlbase.Descriptor', $2))",
parentID, `{
"database": {
"id": 52,
"name": "to_drop",
"privileges": {
"owner_proto": "root",
"users": [
{
"privileges": 2,
"user_proto": "admin"
},
{
"privileges": 2,
"user_proto": "root"
}
],
"version": 1
},
"state": "PUBLIC",
"version": 1
}
}
`,
); err != nil {
return err
}
if _, err := tx.Exec(
"SELECT crdb_internal.unsafe_upsert_namespace_entry($1, $2, $3, $4);",
0, 0, "to_drop", parentID,
); err != nil {
return err
}
if _, err := tx.Exec("SELECT crdb_internal.unsafe_upsert_namespace_entry($1, $2, $3, $4);",
parentID, 0, "public", schemaID); err != nil {
return err
}
// We also need to remove the reference to the sequence.
if _, err := tx.Exec(`
SELECT crdb_internal.unsafe_upsert_descriptor(
$1,
crdb_internal.json_to_pb(
'cockroach.sql.sqlbase.Descriptor',
jsonb_set(
jsonb_set(
crdb_internal.pb_to_json(
'cockroach.sql.sqlbase.Descriptor',
descriptor
),
ARRAY['table', 'columns', '0', 'default_expr'],
'"unique_rowid()"'
),
ARRAY['table', 'columns', '0', 'usesSequenceIds'],
'[]'
)
)
)
FROM system.descriptor
WHERE id = $1;`,
descID); err != nil {
return err
}
return nil
}))
{
rows, err := db.Query(
"SELECT crdb_internal.pb_to_json('cockroach.sql.sqlbase.Descriptor', descriptor) FROM system.descriptor WHERE id = 53")
require.NoError(t, err)
mat, err := sqlutils.RowsToStrMatrix(rows)
require.NoError(t, err)
fmt.Println(sqlutils.MatrixToStr(mat))
}
rows, err := db.Query(
"SELECT count(*) FROM \"\".crdb_internal.tables WHERE table_id = $1",
descID)
require.NoError(t, err)
rowMat, err := sqlutils.RowsToStrMatrix(rows)
require.NoError(t, err)
require.EqualValues(t, [][]string{{"1"}}, rowMat)
_, err = db.Exec("DROP DATABASE to_drop CASCADE")
require.NoError(t, err)
})
}
func TestDescriptorRepair(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
setup := func(t *testing.T) (serverutils.TestServerInterface, *gosql.DB, func()) {
s, db, _ := serverutils.StartServer(t, base.TestServerArgs{})
return s, db, func() {
s.Stopper().Stop(ctx)
}
}
type eventLogPattern struct {
typ string
info string
}
for caseIdx, tc := range []struct {
before []string
op string
expErrRE string
expEventLogEntries []eventLogPattern
after []string
}{
{ // 1
before: []string{
`CREATE DATABASE test`,
`SELECT crdb_internal.unsafe_upsert_namespace_entry(52, 29, 'foo', 59, true)`,
},
op: upsertRepair,
expEventLogEntries: []eventLogPattern{
{
typ: "unsafe_upsert_descriptor",
info: `"DescriptorID":59`,
},
{
typ: "alter_table_owner",
info: `"DescriptorID":59,"TableName":"foo","Owner":"root"`,
},
{
typ: "change_table_privilege",
info: `"DescriptorID":59,"Grantee":"root","GrantedPrivileges":\["ALL"\]`,
},
{
typ: "change_table_privilege",
info: `"DescriptorID":59,"Grantee":"admin","GrantedPrivileges":\["ALL"\]`,
},
{
typ: "change_table_privilege",
info: `"DescriptorID":59,"Grantee":"newuser1","GrantedPrivileges":\["ALL"\]`,
},
{
typ: "change_table_privilege",
info: `"DescriptorID":59,"Grantee":"newuser2","GrantedPrivileges":\["ALL"\]`,
},
},
},
{ // 2
before: []string{
`CREATE DATABASE test`,
`SELECT crdb_internal.unsafe_upsert_namespace_entry(52, 29, 'foo', 59, true)`,
upsertRepair,
},
op: upsertUpdatePrivileges,
expEventLogEntries: []eventLogPattern{
{
typ: "alter_table_owner",
info: `"DescriptorID":59,"TableName":"foo","Owner":"admin"`,
},
{
typ: "change_table_privilege",
info: `"DescriptorID":59,"Grantee":"newuser1","GrantedPrivileges":\["DROP"\],"RevokedPrivileges":\["ALL"\]`,
},
{
typ: "change_table_privilege",
info: `"DescriptorID":59,"Grantee":"newuser2","RevokedPrivileges":\["ALL"\]`,
},
},
},
{ // 3
before: []string{
`CREATE SCHEMA foo`,
},
op: `
SELECT crdb_internal.unsafe_delete_namespace_entry("parentID", 0, 'foo', id)
FROM system.namespace WHERE name = 'foo';
`,
expErrRE: `crdb_internal.unsafe_delete_namespace_entry\(\): refusing to delete namespace entry for non-dropped descriptor`,
},
{ // 4
// Upsert a descriptor which is invalid, then try to upsert a namespace
// entry for it and show that it fails.
before: []string{
upsertInvalidateDuplicateColumnDescriptor,
},
op: `SELECT crdb_internal.unsafe_upsert_namespace_entry(50, 29, 'foo', 52);`,
expErrRE: `relation "foo" \(52\): duplicate column name: "i"`,
},
{ // 5
// Upsert a descriptor which is invalid, then try to upsert a namespace
// entry for it and show that it succeeds with the force flag.
before: []string{
upsertInvalidateDuplicateColumnDescriptor,
},
op: `SELECT crdb_internal.unsafe_upsert_namespace_entry(50, 29, 'foo', 52, true);`,
expEventLogEntries: []eventLogPattern{
{
typ: "unsafe_upsert_namespace_entry",
info: `"Force":true,"FailedValidation":true,"ValidationErrors":".*duplicate column name: \\"i\\""`,
},
},
},
{ // 6
// Upsert a descriptor which is invalid, upsert a namespace entry for it,
// then show that deleting the descriptor fails without the force flag.
before: []string{
upsertInvalidateDuplicateColumnDescriptor,
`SELECT crdb_internal.unsafe_upsert_namespace_entry(50, 29, 'foo', 52, true);`,
},
op: `SELECT crdb_internal.unsafe_delete_descriptor(52);`,
expErrRE: `pq: crdb_internal.unsafe_delete_descriptor\(\): relation "foo" \(52\): duplicate column name: "i"`,
},
{ // 7
// Upsert a descriptor which is invalid, upsert a namespace entry for it,
// then show that deleting the descriptor succeeds with the force flag.
before: []string{
upsertInvalidateDuplicateColumnDescriptor,
`SELECT crdb_internal.unsafe_upsert_namespace_entry(50, 29, 'foo', 52, true);`,
},
op: `SELECT crdb_internal.unsafe_delete_descriptor(52, true);`,
expEventLogEntries: []eventLogPattern{
{
typ: "unsafe_delete_descriptor",
info: `"Force":true,"ForceNotice":".*duplicate column name: \\"i\\""`,
},
},
},
{ // 8
// Upsert a descriptor which is invalid, upsert a namespace entry for it,
// then show that updating the descriptor fails without the force flag.
before: []string{
upsertInvalidateDuplicateColumnDescriptor,
`SELECT crdb_internal.unsafe_upsert_namespace_entry(50, 29, 'foo', 52, true);`,
},
op: updateInvalidateDuplicateColumnDescriptorNoForce,
expErrRE: `pq: crdb_internal.unsafe_upsert_descriptor\(\): relation "foo" \(52\): duplicate column name: "i"`,
},
{ // 9
// Upsert a descriptor which is invalid, upsert a namespace entry for it,
// then show that updating the descriptor succeeds the force flag.
before: []string{
upsertInvalidateDuplicateColumnDescriptor,
`SELECT crdb_internal.unsafe_upsert_namespace_entry(50, 29, 'foo', 52, true);`,
},
op: updateInvalidateDuplicateColumnDescriptorForce,
expEventLogEntries: []eventLogPattern{
{
typ: "unsafe_upsert_descriptor",
info: `"Force":true,"ForceNotice":".*duplicate column name: \\"i\\""`,
},
},
after: []string{
// Ensure that the table is usable.
`INSERT INTO [52 as t] VALUES (1), (2)`,
},
},
{ // 10
// Upsert a descriptor which is invalid, upsert a namespace entry for it,
// then show that deleting the namespace entry fails without the force flag.
before: []string{
upsertInvalidateDuplicateColumnDescriptor,
`SELECT crdb_internal.unsafe_upsert_namespace_entry(50, 29, 'foo', 52, true);`,
},
op: `SELECT crdb_internal.unsafe_delete_namespace_entry(50, 29, 'foo', 52);`,
expErrRE: `pq: crdb_internal.unsafe_delete_namespace_entry\(\): failed to retrieve descriptor 52: relation "foo" \(52\): duplicate column name: "i"`,
},
{ // 11
// Upsert a descriptor which is invalid, upsert a namespace entry for it,
// then show that deleting the namespace entry succeeds with the force flag.
before: []string{
upsertInvalidateDuplicateColumnDescriptor,
`SELECT crdb_internal.unsafe_upsert_namespace_entry(50, 29, 'foo', 52, true);`,
},
op: `SELECT crdb_internal.unsafe_delete_namespace_entry(50, 29, 'foo', 52, true);`,
expEventLogEntries: []eventLogPattern{
{
typ: "unsafe_delete_namespace_entry",
info: `"Force":true,"ForceNotice":".*duplicate column name: \\"i\\""`,
},
},
},
} {
t.Run(fmt.Sprintf("case #%d: %s", caseIdx+1, tc.op), func(t *testing.T) {
s, db, cleanup := setup(t)
now := s.Clock().Now().GoTime()
defer cleanup()
tdb := sqlutils.MakeSQLRunner(db)
descs.ValidateOnWriteEnabled.Override(&s.ClusterSettings().SV, false)
for _, op := range tc.before {
tdb.Exec(t, op)
}
descs.ValidateOnWriteEnabled.Override(&s.ClusterSettings().SV, true)
_, err := db.Exec(tc.op)
if tc.expErrRE == "" {
require.NoError(t, err)
} else {
require.Regexp(t, tc.expErrRE, err)
}
rows := tdb.Query(t, `SELECT "eventType", info FROM system.eventlog WHERE timestamp > $1`,
now)
mat, err := sqlutils.RowsToStrMatrix(rows)
require.NoError(t, err)
outer:
for _, exp := range tc.expEventLogEntries {
for _, e := range mat {
if e[0] != exp.typ {
continue
}
if matched, err := regexp.MatchString(exp.info, e[1]); err != nil {
t.Fatal(err)
} else if matched {
continue outer
}
}
t.Errorf("failed to find log entry matching %+v in:\n%s", exp, sqlutils.MatrixToStr(mat))
}
})
}
}
const (
// This is the json representation of a descriptor which has duplicate
// columns i and will subsequently fail validation.
invalidDuplicateColumnDescriptor = `'{
"table": {
"auditMode": "DISABLED",
"columns": [
{
"id": 1,
"name": "i",
"type": {"family": "IntFamily", "oid": 20, "width": 64}
},
{
"id": 1,
"name": "i",
"type": {"family": "IntFamily", "oid": 20, "width": 64}
}
],
"families": [
{
"columnIds": [
1
],
"columnNames": [
"i"
],
"defaultColumnId": 0,
"id": 0,
"name": "primary"
}
],
"formatVersion": 3,
"id": 52,
"name": "foo",
"nextColumnId": 2,
"nextFamilyId": 1,
"nextIndexId": 2,
"nextMutationId": 2,
"parentId": 50,
"primaryIndex": {
"columnDirections": [
"ASC"
],
"columnIds": [
1
],
"columnNames": [
"i"
],
"compositeColumnIds": [],
"createdExplicitly": false,
"encodingType": 0,
"id": 1,
"name": "primary",
"type": "FORWARD",
"unique": true,
"version": 2
},
"privileges": {
"ownerProto": "root",
"users": [
{
"privileges": 2,
"userProto": "admin"
},
{
"privileges": 2,
"userProto": "root"
}
],
"version": 1
},
"state": "PUBLIC",
"unexposedParentSchemaId": 29,
"version": 1
}
}'`
// This is a statement to insert the invalid descriptor above using
// crdb_internal.unsafe_upsert_descriptor.
upsertInvalidateDuplicateColumnDescriptor = `
SELECT crdb_internal.unsafe_upsert_descriptor(52,
crdb_internal.json_to_pb('cockroach.sql.sqlbase.Descriptor', ` +
invalidDuplicateColumnDescriptor + `))`
// These are CTEs for the below statements to update the above descriptor
// and fix its validation problems.
updateInvalidateDuplicateColumnDescriptorCTEs = `
WITH as_json AS (
SELECT crdb_internal.pb_to_json(
'cockroach.sql.sqlbase.Descriptor',
descriptor,
false -- emit_defaults
) AS descriptor
FROM system.descriptor
WHERE id = 52
),
updated AS (
SELECT crdb_internal.json_to_pb(
'cockroach.sql.sqlbase.Descriptor',
json_set(
descriptor,
ARRAY['table', 'columns'],
json_build_array(
descriptor->'table'->'columns'->0
)
)
) AS descriptor
FROM as_json
)
`
// This is a statement to update the above descriptor fixing its validity
// problems without the force flag.
updateInvalidateDuplicateColumnDescriptorNoForce = `` +
updateInvalidateDuplicateColumnDescriptorCTEs + `
SELECT crdb_internal.unsafe_upsert_descriptor(52, descriptor)
FROM updated;
`
// This is a statement to update the above descriptor fixing its validity
// problems with the force flag.
updateInvalidateDuplicateColumnDescriptorForce = `` +
updateInvalidateDuplicateColumnDescriptorCTEs + `
SELECT crdb_internal.unsafe_upsert_descriptor(52, descriptor, true)
FROM updated;
`
// This is a statement to repair an invalid descriptor using
// crdb_internal.unsafe_upsert_descriptor.
upsertRepair = `
SELECT crdb_internal.unsafe_upsert_descriptor(59, crdb_internal.json_to_pb('cockroach.sql.sqlbase.Descriptor',
'{
"table": {
"columns": [ { "id": 1, "name": "i", "type": { "family": "IntFamily", "oid": 20, "width": 64 } } ],
"families": [
{
"columnIds": [ 1 ],
"columnNames": [ "i" ],
"defaultColumnId": 0,
"id": 0,
"name": "primary"
}
],
"formatVersion": 3,
"id": 59,
"name": "foo",
"nextColumnId": 2,
"nextFamilyId": 1,
"nextIndexId": 2,
"nextMutationId": 1,
"parentId": 52,
"primaryIndex": {
"columnDirections": [ "ASC" ],
"columnIds": [ 1 ],
"columnNames": [ "i" ],
"id": 1,
"name": "primary",
"type": "FORWARD",
"unique": true,
"version": 1
},
"privileges": {
"owner_proto": "root",
"users": [
{ "privileges": 2, "user_proto": "admin" },
{ "privileges": 2, "user_proto": "newuser1" },
{ "privileges": 2, "user_proto": "newuser2" },
{ "privileges": 2, "user_proto": "root" }
],
"version": 1
},
"state": "PUBLIC",
"unexposedParentSchemaId": 29,
"version": 1
}
}
'))
`
// This is a statement to update the above descriptor's privileges.
// It will change the table owner, add privileges for a new user,
// alter the privilege of an existing user, and revoke all privileges for an old user.
upsertUpdatePrivileges = `
SELECT crdb_internal.unsafe_upsert_descriptor(59, crdb_internal.json_to_pb('cockroach.sql.sqlbase.Descriptor',
'{
"table": {
"columns": [ { "id": 1, "name": "i", "type": { "family": "IntFamily", "oid": 20, "width": 64 } } ],
"families": [
{
"columnIds": [ 1 ],
"columnNames": [ "i" ],
"defaultColumnId": 0,
"id": 0,
"name": "primary"
}
],
"formatVersion": 3,
"id": 59,
"name": "foo",
"nextColumnId": 2,
"nextFamilyId": 1,
"nextIndexId": 2,
"nextMutationId": 1,
"parentId": 52,
"primaryIndex": {
"columnDirections": [ "ASC" ],
"columnIds": [ 1 ],
"columnNames": [ "i" ],
"id": 1,
"name": "primary",
"type": "FORWARD",
"unique": true,
"version": 1
},
"privileges": {
"owner_proto": "admin",
"users": [
{ "privileges": 2, "user_proto": "admin" },
{ "privileges": 2, "user_proto": "root" },
{ "privileges": 8, "user_proto": "newuser1" }
],
"version": 1
},
"state": "PUBLIC",
"unexposedParentSchemaId": 29,
"version": 1
}
}
'))
`
)
|
// Copyright 2020 The Moov Authors
// Use of this source code is governed by an Apache License
// license that can be found in the LICENSE file.
package pain_v01
import (
"encoding/xml"
"github.com/moov-io/iso20022/pkg/common"
"github.com/moov-io/iso20022/pkg/utils"
)
type AccountIdentification4Choice struct {
IBAN common.IBAN2007Identifier `xml:"IBAN"`
Othr GenericAccountIdentification1 `xml:"Othr"`
}
func (r AccountIdentification4Choice) Validate() error {
return utils.Validate(&r)
}
type AccountSchemeName1Choice struct {
Cd ExternalAccountIdentification1Code `xml:"Cd"`
Prtry common.Max35Text `xml:"Prtry"`
}
func (r AccountSchemeName1Choice) Validate() error {
return utils.Validate(&r)
}
type ActiveCurrencyAndAmount struct {
Value float64 `xml:",chardata"`
Ccy common.ActiveCurrencyCode `xml:"Ccy,attr"`
}
func (r ActiveCurrencyAndAmount) Validate() error {
return utils.Validate(&r)
}
type ActiveOrHistoricCurrencyAndAmount struct {
Value float64 `xml:",chardata"`
Ccy common.ActiveOrHistoricCurrencyCode `xml:"Ccy,attr"`
}
func (r ActiveOrHistoricCurrencyAndAmount) Validate() error {
return utils.Validate(&r)
}
type AuthenticationChannel1Choice struct {
Cd ExternalAuthenticationChannel1Code `xml:"Cd"`
Prtry common.Max35Text `xml:"Prtry"`
}
func (r AuthenticationChannel1Choice) Validate() error {
return utils.Validate(&r)
}
type Authorisation1Choice struct {
Cd common.Authorisation1Code `xml:"Cd"`
Prtry common.Max128Text `xml:"Prtry"`
}
func (r Authorisation1Choice) Validate() error {
return utils.Validate(&r)
}
type BranchAndFinancialInstitutionIdentification5 struct {
FinInstnId FinancialInstitutionIdentification8 `xml:"FinInstnId"`
BrnchId *BranchData2 `xml:"BrnchId,omitempty" json:",omitempty"`
}
func (r BranchAndFinancialInstitutionIdentification5) Validate() error {
return utils.Validate(&r)
}
type BranchData2 struct {
Id *common.Max35Text `xml:"Id,omitempty" json:",omitempty"`
Nm *common.Max140Text `xml:"Nm,omitempty" json:",omitempty"`
PstlAdr *PostalAddress6 `xml:"PstlAdr,omitempty" json:",omitempty"`
}
func (r BranchData2) Validate() error {
return utils.Validate(&r)
}
type CashAccount24 struct {
Id AccountIdentification4Choice `xml:"Id"`
Tp *CashAccountType2Choice `xml:"Tp,omitempty" json:",omitempty"`
Ccy *common.ActiveOrHistoricCurrencyCode `xml:"Ccy,omitempty" json:",omitempty"`
Nm *common.Max70Text `xml:"Nm,omitempty" json:",omitempty"`
}
func (r CashAccount24) Validate() error {
return utils.Validate(&r)
}
type CashAccountType2Choice struct {
Cd ExternalCashAccountType1Code `xml:"Cd"`
Prtry common.Max35Text `xml:"Prtry"`
}
func (r CashAccountType2Choice) Validate() error {
return utils.Validate(&r)
}
type CategoryPurpose1Choice struct {
Cd ExternalCategoryPurpose1Code `xml:"Cd"`
Prtry common.Max35Text `xml:"Prtry"`
}
func (r CategoryPurpose1Choice) Validate() error {
return utils.Validate(&r)
}
type ClearingSystemIdentification2Choice struct {
Cd ExternalClearingSystemIdentification1Code `xml:"Cd"`
Prtry common.Max35Text `xml:"Prtry"`
}
func (r ClearingSystemIdentification2Choice) Validate() error {
return utils.Validate(&r)
}
type ClearingSystemMemberIdentification2 struct {
ClrSysId *ClearingSystemIdentification2Choice `xml:"ClrSysId,omitempty" json:",omitempty"`
MmbId common.Max35Text `xml:"MmbId"`
}
func (r ClearingSystemMemberIdentification2) Validate() error {
return utils.Validate(&r)
}
type ContactDetails2 struct {
NmPrfx *common.NamePrefix1Code `xml:"NmPrfx,omitempty" json:",omitempty"`
Nm *common.Max140Text `xml:"Nm,omitempty" json:",omitempty"`
PhneNb *common.PhoneNumber `xml:"PhneNb,omitempty" json:",omitempty"`
MobNb *common.PhoneNumber `xml:"MobNb,omitempty" json:",omitempty"`
FaxNb *common.PhoneNumber `xml:"FaxNb,omitempty" json:",omitempty"`
EmailAdr *common.Max2048Text `xml:"EmailAdr,omitempty" json:",omitempty"`
Othr *common.Max35Text `xml:"Othr,omitempty" json:",omitempty"`
}
func (r ContactDetails2) Validate() error {
return utils.Validate(&r)
}
type DateAndPlaceOfBirth struct {
BirthDt common.ISODate `xml:"BirthDt"`
PrvcOfBirth *common.Max35Text `xml:"PrvcOfBirth,omitempty" json:",omitempty"`
CityOfBirth common.Max35Text `xml:"CityOfBirth"`
CtryOfBirth common.CountryCode `xml:"CtryOfBirth"`
}
func (r DateAndPlaceOfBirth) Validate() error {
return utils.Validate(&r)
}
type DatePeriodDetails1 struct {
FrDt common.ISODate `xml:"FrDt"`
ToDt *common.ISODate `xml:"ToDt,omitempty" json:",omitempty"`
}
func (r DatePeriodDetails1) Validate() error {
return utils.Validate(&r)
}
type FinancialIdentificationSchemeName1Choice struct {
Cd ExternalFinancialInstitutionIdentification1Code `xml:"Cd"`
Prtry common.Max35Text `xml:"Prtry"`
}
func (r FinancialIdentificationSchemeName1Choice) Validate() error {
return utils.Validate(&r)
}
type FinancialInstitutionIdentification8 struct {
BICFI *common.BICFIIdentifier `xml:"BICFI,omitempty" json:",omitempty"`
ClrSysMmbId *ClearingSystemMemberIdentification2 `xml:"ClrSysMmbId,omitempty" json:",omitempty"`
Nm *common.Max140Text `xml:"Nm,omitempty" json:",omitempty"`
PstlAdr *PostalAddress6 `xml:"PstlAdr,omitempty" json:",omitempty"`
Othr *GenericFinancialIdentification1 `xml:"Othr,omitempty" json:",omitempty"`
}
func (r FinancialInstitutionIdentification8) Validate() error {
return utils.Validate(&r)
}
type Frequency36Choice struct {
Tp Frequency6Code `xml:"Tp"`
Prd FrequencyPeriod1 `xml:"Prd"`
PtInTm FrequencyAndMoment1 `xml:"PtInTm"`
}
func (r Frequency36Choice) Validate() error {
return utils.Validate(&r)
}
type Frequency37Choice struct {
Cd Frequency10Code `xml:"Cd"`
Prtry common.Max35Text `xml:"Prtry"`
}
func (r Frequency37Choice) Validate() error {
return utils.Validate(&r)
}
type FrequencyAndMoment1 struct {
Tp Frequency6Code `xml:"Tp"`
PtInTm common.Exact2NumericText `xml:"PtInTm"`
}
func (r FrequencyAndMoment1) Validate() error {
return utils.Validate(&r)
}
type FrequencyPeriod1 struct {
Tp Frequency6Code `xml:"Tp"`
CntPerPrd float64 `xml:"CntPerPrd"`
}
func (r FrequencyPeriod1) Validate() error {
return utils.Validate(&r)
}
type GenericAccountIdentification1 struct {
Id common.Max34Text `xml:"Id"`
SchmeNm *AccountSchemeName1Choice `xml:"SchmeNm,omitempty" json:",omitempty"`
Issr *common.Max35Text `xml:"Issr,omitempty" json:",omitempty"`
}
func (r GenericAccountIdentification1) Validate() error {
return utils.Validate(&r)
}
type GenericFinancialIdentification1 struct {
Id common.Max35Text `xml:"Id"`
SchmeNm *FinancialIdentificationSchemeName1Choice `xml:"SchmeNm,omitempty" json:",omitempty"`
Issr *common.Max35Text `xml:"Issr,omitempty" json:",omitempty"`
}
func (r GenericFinancialIdentification1) Validate() error {
return utils.Validate(&r)
}
type GenericOrganisationIdentification1 struct {
Id common.Max35Text `xml:"Id"`
SchmeNm *OrganisationIdentificationSchemeName1Choice `xml:"SchmeNm,omitempty" json:",omitempty"`
Issr *common.Max35Text `xml:"Issr,omitempty" json:",omitempty"`
}
func (r GenericOrganisationIdentification1) Validate() error {
return utils.Validate(&r)
}
type GenericPersonIdentification1 struct {
Id common.Max35Text `xml:"Id"`
SchmeNm *PersonIdentificationSchemeName1Choice `xml:"SchmeNm,omitempty" json:",omitempty"`
Issr *common.Max35Text `xml:"Issr,omitempty" json:",omitempty"`
}
func (r GenericPersonIdentification1) Validate() error {
return utils.Validate(&r)
}
type GroupHeader47 struct {
MsgId common.Max35Text `xml:"MsgId"`
CreDtTm common.ISODateTime `xml:"CreDtTm"`
Authstn []Authorisation1Choice `xml:"Authstn,omitempty" json:",omitempty"`
InitgPty *PartyIdentification43 `xml:"InitgPty,omitempty" json:",omitempty"`
InstgAgt *BranchAndFinancialInstitutionIdentification5 `xml:"InstgAgt,omitempty" json:",omitempty"`
InstdAgt *BranchAndFinancialInstitutionIdentification5 `xml:"InstdAgt,omitempty" json:",omitempty"`
}
func (r GroupHeader47) Validate() error {
return utils.Validate(&r)
}
type LocalInstrument2Choice struct {
Cd ExternalLocalInstrument1Code `xml:"Cd"`
Prtry common.Max35Text `xml:"Prtry"`
}
func (r LocalInstrument2Choice) Validate() error {
return utils.Validate(&r)
}
type Mandate9 struct {
MndtId common.Max35Text `xml:"MndtId"`
MndtReqId *common.Max35Text `xml:"MndtReqId,omitempty" json:",omitempty"`
Authntcn *MandateAuthentication1 `xml:"Authntcn,omitempty" json:",omitempty"`
Tp *MandateTypeInformation2 `xml:"Tp,omitempty" json:",omitempty"`
Ocrncs *MandateOccurrences4 `xml:"Ocrncs,omitempty" json:",omitempty"`
TrckgInd bool `xml:"TrckgInd"`
FrstColltnAmt *ActiveOrHistoricCurrencyAndAmount `xml:"FrstColltnAmt,omitempty" json:",omitempty"`
ColltnAmt *ActiveOrHistoricCurrencyAndAmount `xml:"ColltnAmt,omitempty" json:",omitempty"`
MaxAmt *ActiveOrHistoricCurrencyAndAmount `xml:"MaxAmt,omitempty" json:",omitempty"`
Adjstmnt *MandateAdjustment1 `xml:"Adjstmnt,omitempty" json:",omitempty"`
Rsn *MandateSetupReason1Choice `xml:"Rsn,omitempty" json:",omitempty"`
CdtrSchmeId *PartyIdentification43 `xml:"CdtrSchmeId,omitempty" json:",omitempty"`
Cdtr PartyIdentification43 `xml:"Cdtr"`
CdtrAcct *CashAccount24 `xml:"CdtrAcct,omitempty" json:",omitempty"`
CdtrAgt *BranchAndFinancialInstitutionIdentification5 `xml:"CdtrAgt,omitempty" json:",omitempty"`
UltmtCdtr *PartyIdentification43 `xml:"UltmtCdtr,omitempty" json:",omitempty"`
Dbtr PartyIdentification43 `xml:"Dbtr"`
DbtrAcct *CashAccount24 `xml:"DbtrAcct,omitempty" json:",omitempty"`
DbtrAgt BranchAndFinancialInstitutionIdentification5 `xml:"DbtrAgt"`
UltmtDbtr *PartyIdentification43 `xml:"UltmtDbtr,omitempty" json:",omitempty"`
MndtRef *common.Max35Text `xml:"MndtRef,omitempty" json:",omitempty"`
RfrdDoc []ReferredMandateDocument1 `xml:"RfrdDoc,omitempty" json:",omitempty"`
}
func (r Mandate9) Validate() error {
return utils.Validate(&r)
}
type MandateAdjustment1 struct {
DtAdjstmntRuleInd bool `xml:"DtAdjstmntRuleInd"`
Ctgy *Frequency37Choice `xml:"Ctgy,omitempty" json:",omitempty"`
Amt *ActiveCurrencyAndAmount `xml:"Amt,omitempty" json:",omitempty"`
Rate float64 `xml:"Rate,omitempty" json:",omitempty"`
}
func (r MandateAdjustment1) Validate() error {
return utils.Validate(&r)
}
type MandateAuthentication1 struct {
MsgAuthntcnCd *common.Max16Text `xml:"MsgAuthntcnCd,omitempty" json:",omitempty"`
Dt *common.ISODate `xml:"Dt,omitempty" json:",omitempty"`
Chanl *AuthenticationChannel1Choice `xml:"Chanl,omitempty" json:",omitempty"`
}
func (r MandateAuthentication1) Validate() error {
return utils.Validate(&r)
}
type MandateClassification1Choice struct {
Cd common.MandateClassification1Code `xml:"Cd"`
Prtry common.Max35Text `xml:"Prtry"`
}
func (r MandateClassification1Choice) Validate() error {
return utils.Validate(&r)
}
type MandateCopy1 struct {
OrgnlMsgInf *OriginalMessageInformation1 `xml:"OrgnlMsgInf,omitempty" json:",omitempty"`
OrgnlMndt *OriginalMandate4Choice `xml:"OrgnlMndt"`
MndtSts *MandateStatus1Choice `xml:"MndtSts,omitempty" json:",omitempty"`
SplmtryData []SupplementaryData1 `xml:"SplmtryData,omitempty" json:",omitempty"`
}
func (r MandateCopy1) Validate() error {
return utils.Validate(&r)
}
type MandateCopyRequestV01 struct {
XMLName xml.Name `xml:"MndtCpyReq"`
GrpHdr GroupHeader47 `xml:"GrpHdr"`
UndrlygCpyReqDtls []MandateCopy1 `xml:"UndrlygCpyReqDtls" json:",omitempty"`
SplmtryData []SupplementaryData1 `xml:"SplmtryData,omitempty" json:",omitempty"`
}
func (r MandateCopyRequestV01) Validate() error {
return utils.Validate(&r)
}
type MandateOccurrences4 struct {
SeqTp SequenceType2Code `xml:"SeqTp"`
Frqcy *Frequency36Choice `xml:"Frqcy,omitempty" json:",omitempty"`
Drtn *DatePeriodDetails1 `xml:"Drtn,omitempty" json:",omitempty"`
FrstColltnDt *common.ISODate `xml:"FrstColltnDt,omitempty" json:",omitempty"`
FnlColltnDt *common.ISODate `xml:"FnlColltnDt,omitempty" json:",omitempty"`
}
func (r MandateOccurrences4) Validate() error {
return utils.Validate(&r)
}
type MandateSetupReason1Choice struct {
Cd ExternalMandateSetupReason1Code `xml:"Cd"`
Prtry common.Max70Text `xml:"Prtry"`
}
func (r MandateSetupReason1Choice) Validate() error {
return utils.Validate(&r)
}
type MandateStatus1Choice struct {
Cd ExternalMandateStatus1Code `xml:"Cd"`
Prtry common.Max35Text `xml:"Prtry"`
}
func (r MandateStatus1Choice) Validate() error {
return utils.Validate(&r)
}
type MandateTypeInformation2 struct {
SvcLvl *ServiceLevel8Choice `xml:"SvcLvl,omitempty" json:",omitempty"`
LclInstrm *LocalInstrument2Choice `xml:"LclInstrm,omitempty" json:",omitempty"`
CtgyPurp *CategoryPurpose1Choice `xml:"CtgyPurp,omitempty" json:",omitempty"`
Clssfctn *MandateClassification1Choice `xml:"Clssfctn,omitempty" json:",omitempty"`
}
func (r MandateTypeInformation2) Validate() error {
return utils.Validate(&r)
}
type OrganisationIdentification8 struct {
AnyBIC *common.AnyBICIdentifier `xml:"AnyBIC,omitempty" json:",omitempty"`
Othr []GenericOrganisationIdentification1 `xml:"Othr,omitempty" json:",omitempty"`
}
func (r OrganisationIdentification8) Validate() error {
return utils.Validate(&r)
}
type OrganisationIdentificationSchemeName1Choice struct {
Cd ExternalOrganisationIdentification1Code `xml:"Cd"`
Prtry common.Max35Text `xml:"Prtry"`
}
func (r OrganisationIdentificationSchemeName1Choice) Validate() error {
return utils.Validate(&r)
}
type OriginalMandate4Choice struct {
OrgnlMndtId common.Max35Text `xml:"OrgnlMndtId"`
OrgnlMndt Mandate9 `xml:"OrgnlMndt"`
}
func (r OriginalMandate4Choice) Validate() error {
return utils.Validate(&r)
}
type OriginalMessageInformation1 struct {
MsgId common.Max35Text `xml:"MsgId"`
MsgNmId common.Max35Text `xml:"MsgNmId"`
CreDtTm *common.ISODateTime `xml:"CreDtTm,omitempty" json:",omitempty"`
}
func (r OriginalMessageInformation1) Validate() error {
return utils.Validate(&r)
}
type Party11Choice struct {
OrgId OrganisationIdentification8 `xml:"OrgId"`
PrvtId PersonIdentification5 `xml:"PrvtId"`
}
func (r Party11Choice) Validate() error {
return utils.Validate(&r)
}
type PartyIdentification43 struct {
Nm *common.Max140Text `xml:"Nm,omitempty" json:",omitempty"`
PstlAdr *PostalAddress6 `xml:"PstlAdr,omitempty" json:",omitempty"`
Id *Party11Choice `xml:"Id,omitempty" json:",omitempty"`
CtryOfRes *common.CountryCode `xml:"CtryOfRes,omitempty" json:",omitempty"`
CtctDtls *ContactDetails2 `xml:"CtctDtls,omitempty" json:",omitempty"`
}
func (r PartyIdentification43) Validate() error {
return utils.Validate(&r)
}
type PersonIdentification5 struct {
DtAndPlcOfBirth *DateAndPlaceOfBirth `xml:"DtAndPlcOfBirth,omitempty" json:",omitempty"`
Othr []GenericPersonIdentification1 `xml:"Othr,omitempty" json:",omitempty"`
}
func (r PersonIdentification5) Validate() error {
return utils.Validate(&r)
}
type PersonIdentificationSchemeName1Choice struct {
Cd ExternalPersonIdentification1Code `xml:"Cd"`
Prtry common.Max35Text `xml:"Prtry"`
}
func (r PersonIdentificationSchemeName1Choice) Validate() error {
return utils.Validate(&r)
}
type PostalAddress6 struct {
AdrTp *common.AddressType2Code `xml:"AdrTp,omitempty" json:",omitempty"`
Dept *common.Max70Text `xml:"Dept,omitempty" json:",omitempty"`
SubDept *common.Max70Text `xml:"SubDept,omitempty" json:",omitempty"`
StrtNm *common.Max70Text `xml:"StrtNm,omitempty" json:",omitempty"`
BldgNb *common.Max16Text `xml:"BldgNb,omitempty" json:",omitempty"`
PstCd *common.Max16Text `xml:"PstCd,omitempty" json:",omitempty"`
TwnNm *common.Max35Text `xml:"TwnNm,omitempty" json:",omitempty"`
CtrySubDvsn *common.Max35Text `xml:"CtrySubDvsn,omitempty" json:",omitempty"`
Ctry *common.CountryCode `xml:"Ctry,omitempty" json:",omitempty"`
AdrLine []common.Max70Text `xml:"AdrLine,omitempty" json:",omitempty"`
}
func (r PostalAddress6) Validate() error {
return utils.Validate(&r)
}
type ReferredDocumentType3Choice struct {
Cd DocumentType6Code `xml:"Cd"`
Prtry common.Max35Text `xml:"Prtry"`
}
func (r ReferredDocumentType3Choice) Validate() error {
return utils.Validate(&r)
}
type ReferredDocumentType4 struct {
CdOrPrtry ReferredDocumentType3Choice `xml:"CdOrPrtry"`
Issr *common.Max35Text `xml:"Issr,omitempty" json:",omitempty"`
}
func (r ReferredDocumentType4) Validate() error {
return utils.Validate(&r)
}
type ReferredMandateDocument1 struct {
Tp *ReferredDocumentType4 `xml:"Tp,omitempty" json:",omitempty"`
Nb *common.Max35Text `xml:"Nb,omitempty" json:",omitempty"`
CdtrRef *common.Max35Text `xml:"CdtrRef,omitempty" json:",omitempty"`
RltdDt *common.ISODate `xml:"RltdDt,omitempty" json:",omitempty"`
}
func (r ReferredMandateDocument1) Validate() error {
return utils.Validate(&r)
}
type ServiceLevel8Choice struct {
Cd ExternalServiceLevel1Code `xml:"Cd"`
Prtry common.Max35Text `xml:"Prtry"`
}
func (r ServiceLevel8Choice) Validate() error {
return utils.Validate(&r)
}
type SupplementaryData1 struct {
PlcAndNm *common.Max350Text `xml:"PlcAndNm,omitempty" json:",omitempty"`
Envlp SupplementaryDataEnvelope1 `xml:"Envlp"`
}
func (r SupplementaryData1) Validate() error {
return utils.Validate(&r)
}
type SupplementaryDataEnvelope1 struct {
Item string `xml:",any"`
}
func (r SupplementaryDataEnvelope1) Validate() error {
return utils.Validate(&r)
}
type MandateSuspension1 struct {
SspnsnReqId common.Max35Text `xml:"SspnsnReqId"`
OrgnlMsgInf *OriginalMessageInformation1 `xml:"OrgnlMsgInf,omitempty" json:",omitempty"`
SspnsnRsn MandateSuspensionReason1 `xml:"SspnsnRsn"`
OrgnlMndt OriginalMandate4Choice `xml:"OrgnlMndt"`
SplmtryData []SupplementaryData1 `xml:"SplmtryData,omitempty" json:",omitempty"`
}
func (r MandateSuspension1) Validate() error {
return utils.Validate(&r)
}
type MandateSuspensionReason1 struct {
Orgtr *PartyIdentification43 `xml:"Orgtr,omitempty" json:",omitempty"`
Rsn MandateSuspensionReason1Choice `xml:"Rsn"`
AddtlInf []common.Max105Text `xml:"AddtlInf,omitempty" json:",omitempty"`
}
func (r MandateSuspensionReason1) Validate() error {
return utils.Validate(&r)
}
type MandateSuspensionReason1Choice struct {
Cd ExternalMandateSuspensionReason1Code `xml:"Cd"`
Prtry common.Max35Text `xml:"Prtry"`
}
func (r MandateSuspensionReason1Choice) Validate() error {
return utils.Validate(&r)
}
type MandateSuspensionRequestV01 struct {
XMLName xml.Name `xml:"MndtSspnsnReq"`
GrpHdr GroupHeader47 `xml:"GrpHdr"`
UndrlygSspnsnDtls []MandateSuspension1 `xml:"UndrlygSspnsnDtls" json:",omitempty"`
SplmtryData []SupplementaryData1 `xml:"SplmtryData,omitempty" json:",omitempty"`
}
func (r MandateSuspensionRequestV01) Validate() error {
return utils.Validate(&r)
}
|
// Copyright 2017 Alem Abreha <alem.abreha@gmail.com>. All rights reserved.
// Use of this source code is governed by a MIT license that can be found in the LICENSE file.
package ccm
import (
"bytes"
"encoding/json"
"gopkg.in/yaml.v2"
"io/ioutil"
"log"
"os"
"text/template"
)
func ERRHandler(err error, message string) {
if err != nil {
log.Println("eRROR @ : ", message, " :", err)
}
}
func TenantInfo() circonusTenant {
ccm_path := os.Getenv("CCM_REPO_PATH")
tenant_file := ccm_path + "/tenant.yml"
file_data, err := ioutil.ReadFile(tenant_file)
ERRHandler(err, "read tenant.yml")
var tenantData circonusTenant
ERRHandler(yaml.Unmarshal(file_data, &tenantData), "unmarshal tenant.yml")
return tenantData
}
// HostGroups functions returns a list of of HostGroup structs reading all YAML files under ccm_hosts directory
func HostGroups() ([]HostGroup, error) {
ccm_path := os.Getenv("CCM_REPO_PATH")
host_file_path := ccm_path + "/ccm_hosts/"
file_data, err := ioutil.ReadDir(host_file_path)
ERRHandler(err, "read ccm_hosts dir")
if err != nil {
return nil, err
}
var HostsData []HostGroup
for _, file_info := range file_data {
host_file := host_file_path + file_info.Name()
file_data, err := ioutil.ReadFile(host_file)
ERRHandler(err, "read host_file in ccm_hosts "+host_file)
if err != nil {
return nil, err
}
var host_group HostGroup
ERRHandler(yaml.Unmarshal(file_data, &host_group), "unmarshal host_file "+host_file)
HostsData = append(HostsData, host_group)
}
return HostsData, nil
}
// GroupHostList function takes a group name string and returns a list of hosts (string) that belong to the host group
func GroupHostList(group string) ([]string, error) {
//default result not found
result := []string{}
//get all host groups
host_groups, err := HostGroups()
if err != nil {
return nil, err
}
for _, host_group := range host_groups {
if host_group.GroupName == group {
result = host_group.Members
}
}
return result, nil
}
//ConfigFilesList function returns a list of config files (files under <repo_name>/ccm_config)
func ConfigFileList() ([]string, error) {
ccm_path := os.Getenv("CCM_REPO_PATH")
config_file_path := ccm_path + "/ccm_configs/"
//fmt.Println(config_file_path)
file_data, err := ioutil.ReadDir(config_file_path)
ERRHandler(err, "read ccm_configs dir")
if err != nil {
return nil, err
}
var ConfigFiles []string
for _, file_info := range file_data {
config_file := file_info.Name()
ConfigFiles = append(ConfigFiles, config_file)
}
return ConfigFiles, nil
}
//CCMRead function reads a ccm config file and returns CCMConf struct
func CCMRead(conf_file string) CCMConf {
ccm_path := os.Getenv("CCM_REPO_PATH")
config_file_path := ccm_path + "/ccm_configs/" + conf_file
file_data, err := ioutil.ReadFile(config_file_path)
ERRHandler(err, "read ccm_config file ")
if err != nil {
return CCMConf{}
}
var ccm_configuration CCMConf
ERRHandler(json.Unmarshal(file_data, &ccm_configuration), "CcmConf unmarshal : ")
return ccm_configuration
}
//Zipper function takes all variables in the template file have their values set using the config_file
//values and returns the []byte representation of a check bundle
func Zipper(template_file string, config_file string, host string) CheckInput {
template_data, err := ioutil.ReadFile(template_file)
ERRHandler(err, "template_file_read ")
config_data, err := ioutil.ReadFile(config_file)
ERRHandler(err, "config_file_read ")
var check_config CcmTemplate
ERRHandler(json.Unmarshal(config_data, &check_config), "check_config unmarshal ")
//update target field
check_config.Target = host
template_parser := template.Must(template.New("check_config").Parse(string(template_data)))
zipped_data := new(bytes.Buffer)
ERRHandler(template_parser.Execute(zipped_data, check_config), "template execute : ")
//fmt.Println("\n",zipped_data.String(),"\n")
var parsed_check CheckInput
ERRHandler(json.Unmarshal(zipped_data.Bytes(), &parsed_check), "parsed_check template unmarshal : ")
return parsed_check
}
//ZipRoutin functions launches goroutins that map config to template per host and returns a receive only channel
func ZipRoutin(template_file string, configuration_file string, host string) <-chan CheckInput {
check_holder := make(chan CheckInput)
go func() {
check_input := Zipper(template_file, configuration_file, host)
check_input.Target = host
log.Println("inspecting check configuration\n", check_input)
check_holder <- check_input
close(check_holder)
}()
return check_holder
}
|
package models
//Metric model
type Metric struct {
ID uint `gorm:"primary_key" json:"id"`
Temp float64 `gorm:"not null" json:"temp" binding:"required"`
Moisture float64 `json:"moisture" binding:"required"`
}
//List list to delete model
type List struct {
ListID []int `json:"listID" binding:"required"`
}
|
package usecase
import (
"context"
"github.com/pkg/errors"
"github.com/utahta/momoclo-channel/crawler"
"github.com/utahta/momoclo-channel/entity"
"github.com/utahta/momoclo-channel/event"
"github.com/utahta/momoclo-channel/event/eventtask"
"github.com/utahta/momoclo-channel/log"
"github.com/utahta/momoclo-channel/validator"
)
type (
// CrawlFeed use case
CrawlFeed struct {
log log.Logger
feed crawler.FeedFetcher
taskQueue event.TaskQueue
repo entity.LatestEntryRepository
}
// CrawlFeedParams input parameters
CrawlFeedParams struct {
Code crawler.FeedCode // target identify code
}
)
// NewCrawlFeed returns Crawl use case
func NewCrawlFeed(
log log.Logger,
feed crawler.FeedFetcher,
taskQueue event.TaskQueue,
repo entity.LatestEntryRepository) *CrawlFeed {
return &CrawlFeed{
log: log,
feed: feed,
taskQueue: taskQueue,
repo: repo,
}
}
// Do crawls a site and invokes tweet and line event
func (use *CrawlFeed) Do(ctx context.Context, params CrawlFeedParams) error {
const errTag = "CrawlFeed.Do failed"
items, err := use.feed.Fetch(ctx, params.Code, 1, use.repo.GetURL(ctx, params.Code.String()))
if err != nil {
return errors.Wrap(err, errTag)
}
if len(items) == 0 {
return nil
}
for i := range items {
if err := validator.Validate(items[i]); err != nil {
use.log.Errorf(ctx, "%v: validate error i:%v items:%v err:%v", errTag, i, items, err)
return errors.Wrap(err, errTag)
}
}
// update latest entry
item := items[0] // first item is the latest entry
l, err := use.repo.FindOrNewByURL(ctx, item.FeedCode().String(), item.EntryURL)
if err != nil {
return errors.Wrapf(err, "%v: url:%v", errTag, item.EntryURL)
}
if l.URL == item.EntryURL && l.PublishedAt.Equal(item.PublishedAt) {
return nil // already get feeds. nothing to do
}
l.URL = item.EntryURL
l.PublishedAt = item.PublishedAt
if err := use.repo.Save(ctx, l); err != nil {
return errors.Wrapf(err, errTag)
}
// push events
var tasks []event.Task
for _, item := range items {
tasks = append(tasks,
eventtask.NewEnqueueTweets(item),
eventtask.NewEnqueueLines(item),
)
}
if err := use.taskQueue.PushMulti(ctx, tasks); err != nil {
return errors.Wrap(err, errTag)
}
use.log.Infof(ctx, "crawl feed items:%v", items)
return nil
}
|
package cmd
import (
"errors"
"fmt"
"github.com/NodeFactoryIo/vedran/internal/ui/prompts"
"strconv"
)
func ValidatePayoutFlags(
payoutReward string,
payoutAddress string,
showPrompts bool,
) (float64, error) {
var err error
var rewardAsFloat64 float64
// if total reward is determined as wallet balance
if payoutReward == "-1" {
if payoutAddress == "" {
return 0, errors.New("Unable to set reward amount to entire wallet balance if fee address not provided")
} else {
if showPrompts {
confirmed, err := prompts.ShowConfirmationPrompt(
fmt.Sprintf("You choose that reward amount is defined as entire balance on lb wallet!" +
"On payout entire balance will be distributed as reward and lb fee will be sent to address %s",
payoutAddress),
)
if err != nil {
return 0, err
}
if !confirmed {
return 0, errors.New("Payout configuration canceled")
}
}
}
} else {
rewardAsFloat64, err = strconv.ParseFloat(payoutReward, 64)
if err != nil || rewardAsFloat64 < -1 {
return 0, errors.New("invalid total reward value")
}
}
return rewardAsFloat64, nil
}
|
package crawler
import (
"crypto/tls"
"net/http"
"time"
"golang.org/x/text/encoding/simplifiedchinese"
"golang.org/x/text/transform"
)
// Novel represent a novel
type Novel struct {
Name string
IndexURL string
}
// ICrawler - crawl novels
type ICrawler interface {
LatestChapter(Novel) (string, string)
AddNovels([]Novel)
GetNovels() []Novel
}
func gbkToUTF8(gbk string) string {
gbkToUTF8 := simplifiedchinese.GBK.NewDecoder()
utf8, _, _ := transform.String(gbkToUTF8, gbk)
return utf8
}
func getClient() *http.Client {
timeout := time.Duration(10 * time.Second)
client := &http.Client{
Timeout: timeout,
Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
},
}
return client
}
|
package leetcode
import (
"strconv"
"strings"
)
func areNumbersAscending(s string) bool {
pre := 0
for _, v := range strings.Split(s, " ") {
if v[0] <= '9' {
cur, _ := strconv.Atoi(v)
if pre >= cur {
return false
}
pre = cur
}
}
return true
}
|
package service
import (
"errors"
"testing"
"time"
"github.com/jinzhu/gorm"
meetupmanager "github.com/lucas-dev-it/62252aee-9d11-4149-a0ea-de587cbcd233"
"github.com/lucas-dev-it/62252aee-9d11-4149-a0ea-de587cbcd233/business/model"
"github.com/lucas-dev-it/62252aee-9d11-4149-a0ea-de587cbcd233/weather"
"github.com/stretchr/testify/assert"
)
type mRepo struct{}
func (m mRepo) CountMeetupAttendees(meetupID uint) int {
return int(meetupID)
}
func (m mRepo) FindMeetupByID(meetupID uint) (*model.MeetUp, error) {
if meetupID < 100 {
now := time.Now()
after := now.Add(1)
return &model.MeetUp{
Model: gorm.Model{ID: meetupID},
Name: "test",
Description: "test",
StartDate: &now,
EndDate: &after,
Country: "argentina",
State: "cordoba",
City: "cordoba",
}, nil
} else if meetupID < 200 {
now := time.Now()
after := now.Add(1)
return &model.MeetUp{
Model: gorm.Model{ID: meetupID},
Name: "test",
Description: "test",
StartDate: &now,
EndDate: &after,
Country: "argentinaCached",
State: "cordoba",
City: "cordoba",
}, nil
} else if meetupID < 300 {
now := time.Now().AddDate(1, 0, 0)
after := now.Add(1)
return &model.MeetUp{
Model: gorm.Model{ID: meetupID},
Name: "test future",
Description: "test future",
StartDate: &now,
EndDate: &after,
Country: "argentina",
State: "cordoba",
City: "cordoba",
}, nil
}
return nil, errors.New("intended error")
}
type cacheRepo struct{}
func (c cacheRepo) StoreForecast(key string, forecast *weather.Forecast) error {
if key == "wrongKey" {
return errors.New("error from cache layer")
}
return nil
}
func (c cacheRepo) RetrieveForecast(key string) (*weather.Forecast, error) {
if key == "argentinacached-cordoba" {
return getValidForecast()
}
return nil, errors.New("no present in cache")
}
type wService struct{}
func (w wService) GetForecast(country, state, city string, forecastDays uint) (*weather.Forecast, error) {
if country == "argentina" {
return getValidForecast()
}
return nil, errors.New("weather not available from provider")
}
func getValidForecast() (*weather.Forecast, error) {
now := time.Now()
tomorrow := now.AddDate(0, 0, 1)
nowUnix := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, now.Location()).Unix()
tomorrowUnix := time.Date(tomorrow.Year(), tomorrow.Month(), tomorrow.Day(), 0, 0, 0, 0, tomorrow.Location()).Unix()
return &weather.Forecast{DateTempMap: map[int64]*weather.DailyForecast{
nowUnix: {
MinTemp: 25,
MaxTemp: 35,
},
tomorrowUnix: {
MinTemp: 26,
MaxTemp: 36,
},
}}, nil
}
func getService() *MeetUpService {
return NewMeetUpService(mRepo{}, cacheRepo{}, wService{})
}
func TestMeetUpService_CalculateBeerPacksForMeetup(t *testing.T) {
s := getService()
data, err := s.CalculateBeerPacksForMeetup(99)
if err != nil {
t.Error("unexpected error")
}
assert.Equal(t, 49.5, *data.BeerPacks)
assert.Equal(t, float64(25), data.MinTemperature)
assert.Equal(t, float64(35), data.MaxTemperature)
}
func TestMeetUpService_CalculateBeerPacksForMeetup_FromCache(t *testing.T) {
s := getService()
data, err := s.CalculateBeerPacksForMeetup(199)
if err != nil {
t.Fatalf("unexpected error, got %+v", err)
}
assert.Equal(t, 99.5, *data.BeerPacks)
assert.Equal(t, float64(25), data.MinTemperature)
assert.Equal(t, float64(35), data.MaxTemperature)
}
func TestMeetUpService_CalculateBeerPacksForMeetup_NotExistingMeeting(t *testing.T) {
s := getService()
data, err := s.CalculateBeerPacksForMeetup(9999)
if err == nil && data != nil {
t.Fatalf("expected error, got %+v", data)
}
}
func TestMeetUpService_CalculateBeerPacksForMeetup_FutureMeeting(t *testing.T) {
s := getService()
data, err := s.CalculateBeerPacksForMeetup(299)
if err == nil && data != nil {
t.Fatalf("expected error, got %+v", data)
}
assert.IsType(t, meetupmanager.CustomError{}, err)
}
func TestMeetUpService_GetMeetupWeather(t *testing.T) {
s := getService()
data, err := s.GetMeetupWeather(99)
if err != nil {
t.Error("unexpected error")
}
assert.Equal(t, float64(25), data.MinTemperature)
assert.Equal(t, float64(35), data.MaxTemperature)
}
func TestMeetUpService_GetMeetupWeather_FromCache(t *testing.T) {
s := getService()
data, err := s.GetMeetupWeather(199)
if err != nil {
t.Fatalf("unexpected error, got %+v", err)
}
assert.Equal(t, float64(25), data.MinTemperature)
assert.Equal(t, float64(35), data.MaxTemperature)
}
func TestMeetUpService_GetMeetupWeather_NotExistingMeeting(t *testing.T) {
s := getService()
data, err := s.GetMeetupWeather(9999)
if err == nil && data != nil {
t.Fatalf("expected error, got %+v", data)
}
}
func TestMeetUpService_GetMeetupWeather_FutureMeeting(t *testing.T) {
s := getService()
data, err := s.GetMeetupWeather(299)
if err == nil && data != nil {
t.Fatalf("expected error, got %+v", data)
}
assert.IsType(t, meetupmanager.CustomError{}, err)
}
|
package extractors
import (
"testing"
"github.com/iawia002/annie/config"
"github.com/iawia002/annie/test"
)
func TestTumblr(t *testing.T) {
config.InfoOnly = true
tests := []struct {
name string
args test.Args
}{
{
name: "image test",
args: test.Args{
URL: "http://fuckyeah-fx.tumblr.com/post/170392654141/180202-%E5%AE%8B%E8%8C%9C",
Title: "f(x)",
Size: 1690025,
},
},
{
name: "image test",
args: test.Args{
URL: "http://therealautoblog.tumblr.com/post/171623222197/paganis-new-projects-huayra-successor-with",
Title: "Autoblog • Pagani’s new projects:Huayra successor with...",
Size: 154722,
},
},
{
name: "image test",
args: test.Args{
URL: "https://outdoorspastelnature.tumblr.com/post/170380315768/feel-at-peace",
Title: "Pastel Nature",
Size: 514444,
},
},
{
name: "video test",
args: test.Args{
URL: "https://vernot-today.tumblr.com/post/171963191024/ten-aint-playin-around-anymore",
Title: "Some Random K-Pop Blog",
Size: 5758939,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
data := Tumblr(tt.args.URL)
test.Check(t, tt.args, data)
})
}
}
|
package main
import (
"sync"
)
// hub maintains the set of active clients
type Hub struct {
// Registered clients.
clients map[int]*Client
tokens map[int]string
mutex_client sync.RWMutex
mutex_token sync.RWMutex
}
func InitHub() *Hub {
return &Hub{
clients: make(map[int]*Client),
tokens: make(map[int]string),
}
}
func (self *Hub) Register(c *Client) {
self.mutex_client.Lock()
self.clients[c.userid] = c
self.mutex_client.Unlock()
}
func (self *Hub) UnRegister(c *Client) {
self.mutex_client.Lock()
delete(self.clients, c.userid)
close(c.send)
self.mutex_client.Unlock()
}
func (self *Hub) GetClient(userid int) *Client {
self.mutex_client.RLock()
var client *Client = nil
if c, ok := self.clients[userid]; ok {
client = c
}
self.mutex_client.RUnlock()
return client
}
func (self *Hub) BindToken(uid int, token string) {
self.mutex_token.Lock()
self.tokens[uid] = token
self.mutex_token.Unlock()
}
func (self *Hub) isValidToken(uid int, token string) bool {
self.mutex_token.RLock()
t := self.tokens[uid]
self.mutex_token.RUnlock()
if t == token {
return true
}
return false
}
|
package main
import (
"database/sql"
"encoding/json"
"fmt"
"log"
"../model"
_ "github.com/go-sql-driver/mysql"
)
var db *sql.DB
var err error
func main() {
db, err = sql.Open("mysql", "root:root@tcp(127.0.0.1:3306)/test")
// Check if db is nil
if db == nil {
panic("db is nil")
}
// Check if err connection
if err != nil {
panic(err.Error())
}
// Check if connection successful
if err = db.Ping(); err != nil {
panic(err.Error())
}
fmt.Println("Connection Successful")
defer db.Close()
GetById("1")
GetAll()
}
func GetById(id string) {
stmt, err := db.Prepare("select email from users where id = ?")
if err != nil {
log.Fatal(err)
}
defer stmt.Close()
var email string
err = stmt.QueryRow(id).Scan(&email)
if err != nil {
log.Fatal(err)
}
fmt.Println(email)
}
func GetAll() {
var users = make([]model.User, 0)
stmt, err := db.Prepare("select * from users")
if err != nil {
log.Fatal(err)
}
defer stmt.Close()
rows, err := stmt.Query()
if err != nil {
log.Fatal(err)
}
defer rows.Close()
for rows.Next() {
var u model.User
if err := rows.Scan(&u.ID, &u.Email, &u.Mobile); err != nil {
log.Fatal(err)
}
users = append(users, u)
}
for _, u := range users {
fmt.Println(u)
}
//var u = user{
// id: 3,
// email: "3@test.com",
// mobile: "mobile3",
//}
//InsertOne(u)
GetUnknownColumns()
}
func InsertOne(u model.User) {
stmt, err := db.Prepare("INSERT INTO users(id, email, mobile) VALUES (?,?,?)")
if err != nil {
log.Fatal(err)
}
res, err := stmt.Exec(u.ID, u.Email, u.Mobile)
if err != nil {
log.Fatal(err)
}
rowCnt, err := res.RowsAffected()
if err != nil {
log.Fatal(err)
}
fmt.Println("Affected ", rowCnt)
}
func GetUnknownColumns() {
stmt, err := db.Prepare("select * from users")
if err != nil {
log.Fatal(err)
}
rows, err := stmt.Query()
if err != nil {
log.Fatal(err)
}
cols, err := rows.Columns()
if err != nil {
log.Fatal(err)
}
vals := make([]interface{}, len(cols))
for i, _ := range cols {
vals[i] = new(sql.RawBytes)
}
for rows.Next() {
err = rows.Scan(vals...)
if err != nil {
log.Fatal(err)
}
}
for _, v := range vals {
var u model.User
fmt.Println(*(v.(*sql.RawBytes)))
err := json.Unmarshal(*(v.(*sql.RawBytes)), &u)
if err != nil {
log.Fatal(err)
}
fmt.Println(u)
}
}
|
package sql
import (
"database/sql"
"reflect"
)
type executor interface {
Exec(query string, args ...interface{}) (sql.Result, error)
Query(query string, args ...interface{}) (*sql.Rows, error)
QueryRow(query string, args ...interface{}) *sql.Row
}
func getStructValue(i interface{}) reflect.Value {
v := reflect.ValueOf(i)
if !v.IsValid() {
panic("invalid")
}
for v.Kind() == reflect.Ptr && !v.IsNil() {
v = v.Elem()
}
if v.Kind() != reflect.Struct {
panic("not struct: " + v.Kind().String())
}
return v
}
|
package main
func Lcm(input []uint64) uint64 {
result := Product(input)
for len(input) > 0 {
candidate := result / input[0]
success := true
for _, v := range input {
if candidate%v != 0 {
success = false
break
}
}
if success {
result = candidate
} else {
input = append(input[:0], input[1:]...)
}
}
return result
}
func Product(input []uint64) uint64 {
result := uint64(1)
for _, v := range input {
result *= v
}
return result
}
|
package deeplinks
import (
"net/url"
"strconv"
)
// most of concepts stolen from tdesktop app https://git.io/JtYos
// also, some more info gathered from @deeplink channel at https://t.me/DeepLink
const (
ReservedSchema = "tg"
)
func ReservedHosts() []string {
return []string{
"telegram.me",
"telegram.dog",
"t.me",
"tx.me", // a few official durov's projects redirects to tx.me. looks like simple link mirror
"telesco.pe",
}
}
// https://t.me/DeepLink/16 converting to -> tg://resolve?domain=deeplink&post=16
type ResolveParameters struct {
Domain string `schema:"domain"`
Start string `schema:"start"` // looks like not working
//StartGroup string `schema:"startgroup"` // looks like not working
//Game string `schema:"game"` // looks like not working
Post int `schema:"post"` // if you copy some post link, use this param with domain, example in desc
Thread int `schema:"thread"` // we don't know what does it mean
Comment int `schema:"comment"` // we don't know what does it mean
}
func (p *ResolveParameters) String() string {
values := url.Values{}
if p.Domain != "" {
values.Add("domain", p.Domain)
}
if p.Start != "" {
values.Add("start", p.Start)
}
if p.Post != 0 {
values.Add("post", strconv.Itoa(p.Post))
}
if p.Thread != 0 {
values.Add("thread", strconv.Itoa(p.Thread))
}
if p.Comment != 0 {
values.Add("comment", strconv.Itoa(p.Comment))
}
return (&url.URL{
Scheme: ReservedSchema,
Path: "resolve",
RawQuery: values.Encode(),
}).String()
}
// this parameters works ONLY if resolve?domain parameter is 'telegrampassport'
// tg://resolve?domain=telegrampassport&...
type ResolvePassportParameters struct {
ResolveParameters
// next parameters wasn't tested yet, need to ask telegram developers of full guide how to use these params
// BotID string `schema:"bot_id"`
// Scope string `schema:"scope"`
// PublicKey string `schema:"public_key"`
// CallbackURL string `schema:"callback_url"`
// Nonce string `schema:"nonce"`
// Payload string `schema:"payload"`
// Scope string `schema:"scope"`
}
// https://t.me/joinchat/abcdefg
// tg://join?invite=abcdefg
type JoinParameters struct {
Invite string `schema:"invite,required"`
}
func (p *JoinParameters) String() string {
return (&url.URL{
Scheme: ReservedSchema,
Path: "join",
RawQuery: url.Values{
"invite": []string{p.Invite},
}.Encode(),
}).String()
}
// tg://addstickers?set=abcd
type AddstickersParameters struct {
Set string `schema:"set,required"`
}
// all below pseudohosts used to share specific link, all of them have identical purpose
// tg://msg tg://share tg://msg_url
type MsgParameters struct {
URL string `schema:"url,required"` // link to some resource
Text string `schema:"text"` // text in share message
}
// tg://confirmphone?phone=88005553535&hash=hash+used+by+telegram+api
type ConfirmPhoneParameters struct {
Phone string `schema:"phone"` // phone with + sign like +79161234567
Hash string `schema:"hash"` // confirm hash which is using by account.confirmPhone method
}
// tg://passport // TODO: figure out what the hell is this
type PassportParameters struct {
// Scope string `schema:"scope`
// Nonce string `schema:"nonce`
// Payload string `schema:"payload`
// BotID string `schema:"bot_id`
// PublicKey string `schema:"public_key`
// CallbackURL string `schema:"callback_url`
}
// tg://proxy and tg://socks
// server (address)
// port (port)
// user (user)
// pass (password)
// secret (secret)
// tg://user?id=1234 or MAYBE it's not an id, but an username. or not. i don't know
type UserParameters struct {
ID string `schema:"id"` // not sure how does it works
}
// tg:// filename (?)
// filename = dc_id + _ + document_id (?)
// filename = volume_id + _ + local_id + . + jpg (?)
// filename = md5(url) + . + extension (?)
// filename = "" (?)
// filename = dc_id + _ + document_id + _ + document_version + extension (?)
//
// id (document id)
// hash (access hash)
// dc (dc id)
// size (size)
// mime (mime type)
// name (document file name)
// tg:bg
// tg://bg
//
// slug (wallpaper)
// mode (blur+motion)
// color
// bg_color
// rotation
// intensity
// tg://search_hashtag
//
// hashtag
//
// (used internally by Telegram Web/Telegram React, you can use it by editing a href)
// tg://bot_command
//
// command
// bot
//
// (used internally by Telegram Web/Telegram React, you can use it by editing a href)
// tg://unsafe_url
//
// url
//
// (used internally by Telegram Web, you can use it by editing a href)
// tg://setlanguage
//
// lang
// tg://statsrefresh
//
// (something related to getStatsURL, probably not implemented yet)
// tg://openmessage
//
// user_id
// chat_id
// message_id
//
// (used internally by Android Stock (and fork), do not use, use tg://privatepost)
// tg://privatepost
//
// channel (channelId)
// post (messageId)
// thread (messageId)
// comment (messageId)
// links to theme i hope
//tg://addtheme
//
//slug
// ton stuff. leaved here just for know, we'll never implement it here
//ton://test/test?test=test&test=test
//
//ton://<domain>/<method>?<field1>=<value1>&<field2>=. . .
//
//ton://transfer/WALLET?amount=123&text=test
// tg://login
//
// token
// code
// da fuck is this???
// tg://settings
//
// themes
// devices
// folders
// language
// change_number
// tg://call
//
// format
// name
// phone
// REALLY specific link, used for login in telegram desktop, link works only on mobile apps
// tg://scanqr
// add contact, u no
// tg://addcontact
//
// name
// phone
// tg://search
// qyery
// Next stuff working in http(s) links but generally all of them covered by tg://, leaved here just to know
//
//
// this is really specific link, works on ios
// https://t.me/@id1234
//
// joinchat/
//
// addstickers/
//
// addtheme/
//
// iv/
// url
// rhash
//
// msg/
// share/
// share/url
// url
// text
// (Only android)
//
// confirmphone
// phone
// hash
//
// start
//
// startgroup
//
// game
//
// socks
// proxy
// server (address)
// port (port)
// user (user)
// pass (password)
// secret (secret)
//
// setlanguage/
// (12char max)
//
// bg
// slug
// mode
// intensity
// bg_color
//
// c/
// (/chatid/messageid/ t.me/tgbeta/3539)
// threadId
// comment
//
// s/
// (channel username/messageid)
// q (search query)
//
// ?comment=
|
package main
import (
"sync"
"context"
"time"
)
// --------------------- WAIT 模式 -------------------------
func main() {
wg := sync.WaitGroup{}
wg.Add(3)
go func() {
defer wg.Done()
//do...
}()
go func() {
defer wg.Done()
//do...
}()
go func() {
defer wg.Done()
//do...
}()
//--------------------------- CANCLE 模式 -------------------------
ctx := context.Background()
ctx, cancle := context.WithCancel(ctx)
go Proc(ctx)
go Proc(ctx)
go Proc(ctx)
//cancle after 1s
time.Sleep(time.Second)
cancle()
}
func Proc(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
default:
//do... 取消处理逻辑
}
}
}
|
package main
import (
"encoding/gob"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"sync"
"github.com/docker/go-plugins-helpers/volume"
)
// TODO: Separate the filesystems into libraries
const (
fsAUFS = iota
fsOverlay
)
type unionMountVolume struct {
Filesystem int
Layers []string
MountPoint string
RefCount uint
m sync.Mutex
}
type unionMountDriver struct {
RootDir string
DefaultFS int
Volumes map[string]*unionMountVolume
m sync.Mutex
}
func newUnionMountDriver(rootDir string, defaultFS int) *unionMountDriver {
return &unionMountDriver{
RootDir: rootDir,
DefaultFS: defaultFS,
Volumes: make(map[string]*unionMountVolume),
}
}
func (d *unionMountDriver) saveState() {
d.m.Lock()
defer d.m.Unlock()
path := filepath.Join(d.RootDir, "state.gob")
file, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0755)
if err != nil {
// TODO: Log error
return
}
defer file.Close()
enc := gob.NewEncoder(file)
if err := enc.Encode(d); err != nil {
// TODO: Log error
}
}
func (d *unionMountDriver) mountPoint(volName string) string {
return filepath.Join(d.RootDir, "volumes", volName)
}
func (d *unionMountDriver) Create(r volume.Request) volume.Response {
d.m.Lock()
defer d.m.Unlock()
// Try to read the layers option
layers := make([]string, 0)
if str, ok := r.Options["layers"]; ok && len(str) > 0 {
layers = strings.Split(str, ":")
// Check if paths are absolute
// REVIEW: the possibility of layering docker's named volumes
for _, path := range layers {
if !filepath.IsAbs(path) {
return volume.Response{Err: fmt.Sprintf("layer path \"%s\" is not relative", path)}
}
if _, err := os.Stat(path); os.IsNotExist(err) {
return volume.Response{Err: fmt.Sprintf("layer path \"%s\" does not exist", path)}
}
}
} else {
return volume.Response{Err: "no layers defined"}
}
// Try to read the filesystem option
filesystem := d.DefaultFS
if str, ok := r.Options["filesystem"]; ok {
if d, err := fsFromString(str); err == nil {
filesystem = d
} else {
return volume.Response{Err: err.Error()}
}
}
// FIXME: Support multiple layers for overlay
if filesystem == fsOverlay && len(layers) > 1 {
return volume.Response{Err: "multiple layers with the overlay filesystem is not implemented"}
}
// Create Mount Point
mountPoint := d.mountPoint(r.Name)
if err := os.MkdirAll(mountPoint, 0755); err != nil {
return volume.Response{Err: err.Error()}
}
// Check for duplicate volume name
if _, ok := d.Volumes[r.Name]; ok {
return volume.Response{Err: fmt.Sprintf("volume \"%s\" already exists", r.Name)}
}
d.Volumes[r.Name] = &unionMountVolume{
Filesystem: filesystem,
Layers: layers,
MountPoint: mountPoint,
}
go d.saveState()
return volume.Response{}
}
func (d *unionMountDriver) Remove(r volume.Request) volume.Response {
d.m.Lock()
defer d.m.Unlock()
// Check if volume exists
vol, ok := d.Volumes[r.Name]
if !ok {
return volume.Response{Err: fmt.Sprintf("volume (%s) does not exist", r.Name)}
}
vol.m.Lock()
defer vol.m.Unlock()
if vol.RefCount == 0 {
if err := os.RemoveAll(vol.MountPoint); err != nil {
return volume.Response{Err: fmt.Sprintf("error removing volume path '%s'", vol.MountPoint)}
}
delete(d.Volumes, r.Name)
}
go d.saveState()
return volume.Response{}
}
func (d *unionMountDriver) Mount(r volume.MountRequest) volume.Response {
// Check if volume exists
d.m.Lock()
vol, ok := d.Volumes[r.Name]
d.m.Unlock()
if !ok {
return volume.Response{Err: fmt.Sprintf("volume (%s) does not exist", r.Name)}
}
vol.m.Lock()
defer vol.m.Unlock()
// Mount Volume if not already mounted
if vol.RefCount == 0 {
cmd, _ := mountCmd(vol)
err := exec.Command("sh", "-c", cmd).Run()
if err != nil {
return volume.Response{Err: err.Error()}
}
}
vol.RefCount++
return volume.Response{Mountpoint: vol.MountPoint}
}
func (d *unionMountDriver) Path(r volume.Request) volume.Response {
// Check if volume exists
d.m.Lock()
vol, ok := d.Volumes[r.Name]
d.m.Unlock()
if !ok {
return volume.Response{Err: fmt.Sprintf("volume (%s) does not exist", r.Name)}
}
return volume.Response{Mountpoint: vol.MountPoint}
}
func (d *unionMountDriver) Unmount(r volume.UnmountRequest) volume.Response {
// Check if volume exists
d.m.Lock()
vol, ok := d.Volumes[r.Name]
d.m.Unlock()
if !ok {
return volume.Response{Err: fmt.Sprintf("volume (%s) does not exist", r.Name)}
}
vol.m.Lock()
defer vol.m.Unlock()
if vol.RefCount == 1 {
exec.Command("sh", "-c", fmt.Sprintf("umount -f %s", vol.MountPoint)).Run()
} else if vol.RefCount == 0 {
return volume.Response{Err: fmt.Sprintf("volume (%s) is not mounted", r.Name)}
}
vol.RefCount--
return volume.Response{}
}
func (d *unionMountDriver) Get(r volume.Request) volume.Response {
// Check if volume exists
d.m.Lock()
vol, ok := d.Volumes[r.Name]
d.m.Unlock()
if !ok {
return volume.Response{Err: fmt.Sprintf("volume (%s) does not exist", r.Name)}
}
return volume.Response{
Volume: &volume.Volume{
Name: r.Name,
Mountpoint: vol.MountPoint,
},
}
}
func (d *unionMountDriver) List(r volume.Request) volume.Response {
d.m.Lock()
defer d.m.Unlock()
volumes := []*volume.Volume{}
for n, v := range d.Volumes {
volumes = append(volumes, &volume.Volume{
Name: n,
Mountpoint: v.MountPoint,
})
}
return volume.Response{Volumes: volumes}
}
func (d *unionMountDriver) Capabilities(r volume.Request) volume.Response {
return volume.Response{
Capabilities: volume.Capability{
Scope: "local",
},
}
}
func fsFromString(fs string) (int, error) {
switch strings.ToLower(fs) {
case "aufs":
return fsAUFS, nil
case "overlay", "overlayfs":
return fsOverlay, nil
default:
return fsAUFS, fmt.Errorf("unsupported filesystem")
}
}
func mountCmd(v *unionMountVolume) (string, error) {
switch v.Filesystem {
case fsAUFS:
return fmt.Sprintf("mount -t aufs -o br=%s:%s none %s", v.MountPoint, strings.Join(v.Layers, ":"), v.MountPoint), nil
case fsOverlay:
return fmt.Sprintf("mount -t overlay overlay -o lowerdir=%s,upperdir=%s %s", v.Layers[0], v.MountPoint, v.MountPoint), nil
default:
return "", fmt.Errorf("undefined or unsupported filesystem")
}
}
|
package main
import (
"fmt"
"github.com/arxanchain/sdk-go-common/crypto/ecc"
"encoding/base64"
)
func main() {
keyfile := "../../certs/ecc/prime256v1/server.key"
certfile := "../../certs/ecc/prime256v1/server.crt"
eccLib,err := ecc.NewECCCryptoLib(keyfile, certfile)
if err!=nil {
fmt.Println(err)
}
fmt.Println(eccLib)
plaintext := []byte("hello")
ciphertext,_ := eccLib.Encrypt(plaintext)
fmt.Println(base64.StdEncoding.EncodeToString(ciphertext))
detext,_ := eccLib.Decrypt(ciphertext)
fmt.Println(string(detext))
// signed, err := eccLib.Sign(plaintext)
// fmt.Println(signed)
// fmt.Println(err)
// err := eccLib.Verify(plaintext, signature)
// fmt.Println(err)
} |
package constants
const (
// LabelEdgeWorker is used to identify if a node is a edge node ("true")
// or a cloud node ("false")
LabelEdgeWorker = "alibabacloud.com/is-edge-worker"
// AnnotationAutonomy is used to identify if a node is automous
AnnotationAutonomy = "node.beta.alibabacloud.com/autonomy"
// YurtControllerManagerDeployment defines the yurt controller manager
// deployment in yaml format
YurtControllerManagerDeployment = `
apiVersion: apps/v1
kind: Deployment
metadata:
name: yurt-controller-manager
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
app: yurt-controller-manager
template:
metadata:
labels:
app: yurt-controller-manager
spec:
affinity:
nodeAffinity:
# we prefer allocating ecm on cloud node
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
preference:
matchExpressions:
- key: alibabacloud.com/is-edge-worker
operator: In
values:
- "false"
containers:
- name: yurt-controller-manager
image: openyurt/yurt-controller-manager:latest
command:
- yurt-controller-manager
`
// ServantJobTemplate defines the servant job in yaml format
ServantJobTemplate = `
apiVersion: batch/v1
kind: Job
metadata:
name: {{.jobName}}
namespace: kube-system
spec:
template:
spec:
hostPID: true
restartPolicy: OnFailure
nodeName: {{.nodeName}}
volumes:
- name: host-var-tmp
hostPath:
path: /var/tmp
type: Directory
containers:
- name: yurtctl-servant
image: openyurt/yurtctl-servant:latest
command:
- /bin/sh
- -c
args:
- "sed -i 's|__kubernetes_service_host__|$(KUBERNETES_SERVICE_HOST)|g;s|__kubernetes_service_port_https__|$(KUBERNETES_SERVICE_PORT_HTTPS)|g;s|__node_name__|$(NODE_NAME)|g' /var/lib/openyurt/setup_edgenode && cp /var/lib/openyurt/setup_edgenode /tmp && nsenter -t 1 -m -u -n -i /var/tmp/setup_edgenode {{.action}} {{.provider}}"
securityContext:
privileged: true
volumeMounts:
- mountPath: /tmp
name: host-var-tmp
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
`
)
|
package main
import (
"fmt"
"database/sql"
_ "github.com/godror/godror"
)
func main(){
db, err := sql.Open("godror", "ani/ani5@192.168.12.215:1521/aspwdm")
if err != nil {
fmt.Println(err)
return
}
defer db.Close()
rows,err := db.Query("select count(*) from ani.ani_airports_t")
if err != nil {
fmt.Println("Error running query")
fmt.Println(err)
return
}
defer rows.Close()
var thedate string
for rows.Next() {
rows.Scan(&thedate)
}
fmt.Printf("airports count is: %s\n", thedate)
}
|
package command
import (
"fmt"
"github.com/kenlabs/pando/pkg/system"
"github.com/spf13/cobra"
"gopkg.in/yaml.v2"
"os"
"path/filepath"
)
func InitCmd() *cobra.Command {
return &cobra.Command{
Use: "init",
Short: "Initialize server config file.",
RunE: func(cmd *cobra.Command, args []string) error {
if err := checkPandoRoot(); err != nil {
return err
}
configFile := filepath.Join(Opt.PandoRoot, Opt.ConfigFile)
fmt.Println("Init pando-server configs at ", configFile)
if err := checkConfigExists(configFile); err != nil {
return err
}
if err := setBandwidth(); err != nil {
return err
}
if err := setIdentity(); err != nil {
return err
}
if err := saveConfig(configFile); err != nil {
return err
}
fmt.Printf("init complete.\n")
return nil
},
}
}
func checkPandoRoot() error {
const failedError = "check pando root failed:\n\t%v\n"
rootExists, err := system.IsDirExists(Opt.PandoRoot)
if err != nil {
return fmt.Errorf(failedError, err)
}
if !rootExists {
fmt.Printf("pando root %s does not exist, try to create...\n", Opt.PandoRoot)
err := os.MkdirAll(Opt.PandoRoot, 0755)
if err != nil {
return fmt.Errorf("create pando root %s failed: %v", Opt.PandoRoot, err)
}
}
rootWritable, err := system.IsDirWritable(Opt.PandoRoot)
if err != nil {
return fmt.Errorf(failedError, err)
}
if !rootWritable {
return fmt.Errorf("pando root %s is not writable\n", Opt.PandoRoot)
}
return nil
}
func checkConfigExists(configFile string) error {
configExists, err := system.IsFileExists(configFile)
if err != nil {
return fmt.Errorf("init config failed: %v", err)
}
if configExists {
return fmt.Errorf("config file exists: %s", configFile)
}
return nil
}
func setBandwidth() error {
var err error
if !Opt.DisableSpeedTest {
Opt.RateLimit.Bandwidth, err = system.TestInternetSpeed(false)
if err != nil {
return err
}
} else {
Opt.RateLimit.Bandwidth = 10.0
}
return nil
}
func setIdentity() error {
var err error
Opt.Identity.PeerID, Opt.Identity.PrivateKey, err = system.CreateIdentity()
if err != nil {
return err
}
return nil
}
func saveConfig(configFile string) error {
buff, err := yaml.Marshal(Opt)
if err != nil {
return err
}
file, err := os.OpenFile(configFile, os.O_RDWR|os.O_CREATE, 0755)
defer func(file *os.File) {
_ = file.Close()
}(file)
if err != nil {
return err
}
_, err = file.WriteString(string(buff))
if err != nil {
return err
}
return nil
}
|
package service
import (
"errors"
"net/http"
"github.com/yerlan-tleubekov/go-redis/internal/models"
"github.com/yerlan-tleubekov/go-redis/pkg/jwt"
)
type Authenticator interface {
SignUp(user *models.User) error
SignIn(userID int)
}
func (service *Service) SignUp(user *models.User) error {
if err := service.repository.CreateUser(user); err != nil {
return err
}
return nil
}
func (service *Service) SignIn(userID int) (string, error, int) {
token, err := jwt.CreateToken(uint64(userID))
if err != nil {
return "", errors.New("Server error"), http.StatusInternalServerError
}
if err = service.repository.SignIn(userID, token); err != nil {
return "", err, http.StatusInternalServerError
}
return token, nil, http.StatusOK
}
|
package core
import (
"log"
"github.com/gorilla/websocket"
)
// The Connection type represents a websocket connection.
type Connection struct {
conn *websocket.Conn
handler Handler
sendCh chan Message
}
func (c *Connection) serve() {
go c.receive()
c.send()
c.conn.Close()
}
func (c *Connection) receive() {
defer func() {
close(c.sendCh)
c.conn.Close()
}()
for {
_, msg, err := c.conn.ReadMessage()
if err != nil {
log.Printf("ReadMessage error : %v", err)
break
}
m := c.handler.Decode(msg)
c.handler.OnEvent(EventRecv, c, m)
}
}
func (c *Connection) send() {
defer func() {
c.conn.Close()
}()
for {
select {
case msg, ok := <-c.sendCh:
if !ok {
log.Println("SendCh closed")
return
}
data := c.handler.Encode(msg)
err := c.conn.WriteMessage(websocket.TextMessage, data)
if err != nil {
log.Println("WriteMessage error: ", err)
return
}
c.handler.OnEvent(EventSend, c, msg)
}
}
}
// Send sends a message to the connection.
func (c *Connection) Send(m Message) {
c.sendCh <- m
}
// Close closes a connection.
func (c *Connection) Close() {
c.conn.Close()
}
// NewConnection creates a websocket connection.
func NewConnection(c *websocket.Conn, h Handler) *Connection {
return &Connection{
conn: c,
handler: h,
sendCh: make(chan Message),
}
}
|
package main
func main() {
continue
}
|
package quiz
import (
"fmt"
"time"
"github.com/fedepaol/quiz/interaction"
)
// Question represents a single quiz question with answer.
type Question struct {
Question string
Answer string
}
// QuestionService implements all the methods related to a single quiz question.
type QuestionService interface {
Ask() bool
}
// Quiz represents a quiz run with all the questions and answers.
type Quiz struct {
questions []Question
Asker interaction.Asker
questionsnum int
}
type Result struct {
goodreplies int
questionsasked int
}
// QuizService holds all the methods that can be applied to a Quiz.
type QuizService interface {
Run()
AddQuestion(Question)
}
// Run runs an iteration of a quiz.
func (q *Quiz) Run(timeout <-chan time.Time) (res Result) {
replies := make(chan bool)
go func() {
for _, qq := range q.questions {
reply := q.Asker.Ask(qq.Question)
if reply == qq.Answer {
replies <- true
} else {
replies <- false
}
}
close(replies)
}()
T:
for {
select {
case success, ok := <-replies:
if !ok {
break T
}
if success {
res.goodreplies++
}
res.questionsasked++
case <-timeout:
break T
}
}
return
}
// AddQuestion adds a question to the quiz.
func (q *Quiz) AddQuestion(question Question) {
q.questions = append(q.questions, question)
}
func (r Result) String() (res string) {
res = fmt.Sprintf("%d good replies out of %d questions asked", r.goodreplies, r.questionsasked)
return
}
|
package gotezos
import "github.com/pkg/errors"
// CycleService is a struct wrapper for cycle functions
type CycleService struct {
gt *GoTezos
}
// NewCycleService returns a new CycleService
func (gt *GoTezos) newCycleService() *CycleService {
return &CycleService{gt: gt}
}
// GetCurrent gets the current cycle of the chain
func (s *CycleService) GetCurrent() (int, error) {
block, err := s.gt.Block.GetHead()
if err != nil {
return 0, errors.Wrap(err, "could not get current cycle")
}
return block.Metadata.Level.Cycle, nil
}
|
// Copyright (C) 2019 Michael J. Fromberger. All Rights Reserved.
package otp_test
import (
"crypto/sha256"
"encoding/base64"
"fmt"
"log"
"github.com/creachadair/otp"
)
func fixedTime(z uint64) func() uint64 { return func() uint64 { return z } }
func Example() {
cfg := otp.Config{
Hash: sha256.New, // default is sha1.New
Digits: 8, // default is 6
// By default, time-based OTP generation uses time.Now. You can plug in
// your own function to control how time steps are generated.
// This example uses a fixed time step so the output will be consistent.
TimeStep: fixedTime(1),
}
// 2FA setup tools often present the shared secret as a base32 string.
// ParseKey decodes this format.
if err := cfg.ParseKey("MFYH A3DF EB2G C4TU"); err != nil {
log.Fatalf("Parsing key: %v", err)
}
fmt.Println("HOTP", 0, cfg.HOTP(0))
fmt.Println("HOTP", 1, cfg.HOTP(1))
fmt.Println()
fmt.Println("TOTP", cfg.TOTP())
// Output:
// HOTP 0 59590364
// HOTP 1 86761489
//
// TOTP 86761489
}
func ExampleConfig_customFormat() {
// Use settings compatible with Steam Guard: 5 characters and a custom alphabet.
cfg := otp.Config{
Digits: 5,
Format: otp.FormatAlphabet("23456789BCDFGHJKMNPQRTVWXY"),
TimeStep: fixedTime(9876543210),
}
if err := cfg.ParseKey("CQKQ QEQR AAR7 77X5"); err != nil {
log.Fatalf("Parsing key: %v", err)
}
fmt.Println(cfg.TOTP())
// Output:
// FKNK3
}
func ExampleConfig_rawFormat() {
// The default formatting functions use the RFC 4226 truncation rules, but a
// custom formatter may do whatever it likes with the HMAC value.
// This example converts to base64.
cfg := otp.Config{
Digits: 10,
Format: func(hash []byte, nb int) string {
return base64.StdEncoding.EncodeToString(hash)[:nb]
},
}
if err := cfg.ParseKey("MNQWE YTBM5 SAYTS MVQXI"); err != nil {
log.Fatalf("Parsing key: %v", err)
}
fmt.Println(cfg.HOTP(17))
// Output:
// j0fLbXLh1Z
}
|
/**
邻接表
单向双向图,有权无权图(交叉4种情况)
储存顶点的数组内从第1位开始
*/
package graph
import (
"fmt"
"github.com/kakiezhang/Algo/geekTime/linkedlist"
)
type Graph struct {
Vtx []*linkedlist.DoublyLinkedList // 存储顶点的数组
Max int
}
type Vertex struct {
data interface{}
weight int
}
func (g *Graph) String() string {
var rs string
for k, v := range g.Vtx {
rs += fmt.Sprintf("[%d] => %v\n", k, v)
}
return rs
}
func (v *Vertex) String() string {
return fmt.Sprintf("[v:%v w:%d]", v.data, v.weight)
}
func (v *Vertex) GetData() interface{} {
return v.data
}
func NewGraph(max int) *Graph {
return &Graph{
Vtx: make([]*linkedlist.DoublyLinkedList, max+1),
Max: max + 1,
}
}
func newVertex(v, w int) *Vertex {
return &Vertex{
data: v,
weight: w,
}
}
func (g *Graph) AddEdge(s int, v *Vertex) {
// 添加一条从s指向v的边
if s <= 0 {
panic("vertex index has to be gt zero")
}
if s >= g.Max+1 {
panic("vertex index has to be le max")
}
if g.Vtx[s] == nil {
g.Vtx[s] = linkedlist.NewDoublyLinkedList()
}
g.Vtx[s].Insert(v)
}
func (g *Graph) AddOneEdge(s, t, w int) {
// 添加单向的边,s指向t
g.AddEdge(s, newVertex(t, w))
}
func (g *Graph) AddTwoEdge(s, t, w int) {
// 添加双向的边,s指向t,t指向s
g.AddEdge(s, newVertex(t, w))
g.AddEdge(t, newVertex(s, w))
}
func (g *Graph) FindEdge(s int, t interface{}) bool {
// 找s和t之间是否存在一条s指向t边
f := func(v interface{}) interface{} {
return v.(*Vertex).data
}
if g.Vtx[s].FindNode(t, f) != nil {
return true
} else {
return false
}
}
|
package utils
import (
"context"
"github.com/azak-azkaran/goproxy"
"net"
"net/http"
"os"
"testing"
"time"
)
func TestGetResponse(t *testing.T) {
Init(os.Stdout, os.Stdout, os.Stderr)
resp, err := GetResponse("", "https://www.google.de")
if err != nil {
t.Error("Error while requesting without proxy, ", err)
}
if resp.StatusCode != 200 {
t.Error("Google could not be requested, ", resp.Status)
}
proxy := goproxy.NewProxyHttpServer()
proxy.ConnectDial = func(network, address string) (net.Conn, error) {
return net.DialTimeout(network, address, 5*time.Second)
}
var server *http.Server
go func() {
Init(os.Stdout, os.Stdout, os.Stderr)
Info.Println("serving end proxy server at localhost:8082")
server = &http.Server{
Addr: "localhost:8082",
Handler: proxy,
}
err := server.ListenAndServe()
if err != http.ErrServerClosed {
t.Error("Other Error then ServerClose", err)
}
}()
time.Sleep(1 * time.Second)
resp, err = GetResponse("http://localhost:8082", "https://www.google.de")
if err != nil {
t.Error("Error while requesting without proxy, ", err)
}
if resp.StatusCode != 200 {
t.Error("Google could not be requested, ", resp.Status)
}
err = server.Shutdown(context.TODO())
if err != nil {
t.Error("Error while shutting down server")
}
}
func TestGetResponseDump(t *testing.T) {
Init(os.Stdout, os.Stdout, os.Stderr)
dump, err := GetResponseDump("", "https://www.google.de")
if err != nil {
t.Error("Error while requesting without proxy, ", err)
}
if len(dump) == 0 {
t.Error("Google response was empty")
}
}
|
// Package sr implements an Ingest for Standard Release Legacy foods
package sr
import (
"encoding/csv"
"fmt"
"io"
"log"
"os"
"strconv"
"time"
"github.com/littlebunch/gnutdata-bfpd-api/admin/ingest"
"github.com/littlebunch/gnutdata-bfpd-api/admin/ingest/dictionaries"
"github.com/littlebunch/gnutdata-bfpd-api/ds"
fdc "github.com/littlebunch/gnutdata-bfpd-api/model"
)
var (
cnts ingest.Counts
err error
)
// Sr for implementing the interface
type Sr struct {
Doctype string
}
// ProcessFiles loads a set of Standard Release csv files
func (p Sr) ProcessFiles(path string, dc ds.DataSource) error {
var errs, errn, errndb error
rcndb, rcs, rcn := make(chan error), make(chan error), make(chan error)
c1, c2, c3 := true, true, true
cnts.Foods, err = foods(path, dc, p.Doctype)
if err != nil {
log.Fatal(err)
}
go ndbnoCw(path, dc, rcndb)
go servings(path, dc, rcs)
go nutrients(path, dc, rcn)
for c1 || c2 || c3 {
select {
case errs, c1 = <-rcs:
if c1 {
if errs != nil {
fmt.Printf("Error from servings: %v\n", errs)
} else {
fmt.Printf("Servings ingest complete.\n")
}
}
case errn, c2 = <-rcn:
if c2 {
if err != nil {
fmt.Printf("Error from nutrients: %v\n", errn)
} else {
fmt.Printf("Nutrient ingest complete.\n")
}
}
case errndb, c3 = <-rcndb:
if c3 {
if errndb != nil {
fmt.Printf("Error from ndbno crosswalk: %v\n", errndb)
} else {
fmt.Printf("ndbno crosswalk complete.\n")
}
}
}
}
log.Printf("Finished. Counts: %d Foods %d Servings %d Nutrients\n", cnts.Foods, cnts.Servings, cnts.Nutrients)
return err
}
func foods(path string, dc ds.DataSource, t string) (int, error) {
var il interface{}
var dt *fdc.DocType
dtype := dt.ToString(fdc.FGSR)
fn := path + "food.csv"
cnt := 0
f, err := os.Open(fn)
if err != nil {
return 0, err
}
err = dc.GetDictionary("gnutdata", dtype, 0, 500, &il)
if err != nil {
fmt.Printf("Cannot load food group dictionary")
return 0, err
}
fgmap := dictionaries.InitFoodGroupInfoMap(il)
r := csv.NewReader(f)
for {
record, err := r.Read()
if err == io.EOF {
break
}
if err != nil {
return cnt, err
}
cnts.Foods++
if cnts.Foods%1000 == 0 {
log.Println("Count = ", cnts.Foods)
}
pubdate, err := time.Parse("2006-01-02", record[4])
if err != nil {
log.Println(err)
}
f, _ := strconv.ParseInt(record[3], 0, 32)
var fg *fdc.FoodGroup
if fgmap[uint(f)].Code != "" {
fg = &fdc.FoodGroup{ID: fgmap[uint(f)].ID, Code: fgmap[uint(f)].Code, Description: fgmap[uint(f)].Description, Type: dtype}
} else {
fg = nil
}
dc.Update(record[0],
fdc.Food{
FdcID: record[0],
Description: record[2],
PublicationDate: pubdate,
Source: t,
Group: fg,
Type: dt.ToString(fdc.FOOD),
})
}
return cnts.Foods, err
}
func servings(path string, dc ds.DataSource, rc chan error) {
defer close(rc)
fn := path + "food_portion.csv"
f, err := os.Open(fn)
if err != nil {
rc <- err
return
}
r := csv.NewReader(f)
cid := ""
var (
food fdc.Food
s []fdc.Serving
)
for {
record, err := r.Read()
if err == io.EOF {
break
}
if err != nil {
rc <- err
return
}
id := record[1]
if cid != id {
if cid != "" {
food.Servings = s
dc.Update(cid, food)
}
cid = id
dc.Get(id, &food)
s = nil
}
cnts.Servings++
if cnts.Servings%10000 == 0 {
log.Println("Servings Count = ", cnts.Servings)
}
a, err := strconv.ParseFloat(record[3], 32)
if err != nil {
log.Println(record[0] + ": can't parse serving amount " + record[3])
}
w, err := strconv.ParseFloat(record[7], 32)
if err != nil {
log.Println(record[0] + ": can't parse serving weight " + record[7])
}
p, _ := strconv.ParseInt(record[8], 0, 32)
s = append(s, fdc.Serving{
Nutrientbasis: record[5],
Description: record[5],
Servingamount: float32(a),
Weight: float32(w),
Datapoints: int32(p),
})
}
rc <- err
return
}
func ndbnoCw(path string, dc ds.DataSource, rc chan error) {
defer close(rc)
fn := path + "sr_legacy_food.csv"
f, err := os.Open(fn)
if err != nil {
rc <- err
return
}
var food fdc.Food
r := csv.NewReader(f)
for {
record, err := r.Read()
if err == io.EOF {
break
}
if err != nil {
rc <- err
return
}
dc.Get(record[0], &food)
food.NdbNo = record[1]
dc.Update(record[0], food)
}
rc <- nil
return
}
func nutrients(path string, dc ds.DataSource, rc chan error) {
defer close(rc)
var dt *fdc.DocType
fn := path + "food_nutrient.csv"
f, err := os.Open(fn)
if err != nil {
rc <- err
return
}
r := csv.NewReader(f)
cid := ""
var (
food fdc.Food
n []fdc.NutrientData
il interface{}
)
if err := dc.GetDictionary("gnutdata", dt.ToString(fdc.NUT), 0, 500, &il); err != nil {
rc <- err
return
}
nutmap := dictionaries.InitNutrientInfoMap(il)
if err := dc.GetDictionary("gnutdata", dt.ToString(fdc.DERV), 0, 500, &il); err != nil {
rc <- err
return
}
dlmap := dictionaries.InitDerivationInfoMap(il)
for {
record, err := r.Read()
if err == io.EOF {
break
}
if err != nil {
rc <- err
return
}
id := record[1]
if cid != id {
if cid != "" {
food.Nutrients = n
dc.Update(cid, food)
}
cid = id
dc.Get(id, &food)
n = nil
}
cnts.Nutrients++
w, err := strconv.ParseFloat(record[3], 32)
if err != nil {
log.Println(record[0] + ": can't parse value " + record[4])
}
min, _ := strconv.ParseFloat(record[6], 32)
max, _ := strconv.ParseFloat(record[7], 32)
v, err := strconv.ParseInt(record[2], 0, 32)
if err != nil {
log.Println(record[0] + ": can't parse nutrient no " + record[1])
}
d, _ := strconv.ParseInt(record[5], 0, 32)
p, _ := strconv.ParseInt(record[4], 0, 32)
var dv *fdc.Derivation
if dlmap[uint(d)].Code != "" {
dv = &fdc.Derivation{ID: dlmap[uint(d)].ID, Code: dlmap[uint(d)].Code, Type: dt.ToString(fdc.DERV), Description: dlmap[uint(d)].Description}
} else {
dv = nil
}
n = append(n, fdc.NutrientData{
Nutrientno: nutmap[uint(v)].Nutrientno,
Value: float32(w),
Nutrient: nutmap[uint(v)].Name,
Unit: nutmap[uint(v)].Unit,
Derivation: dv,
Datapoints: int(p),
Min: float32(min),
Max: float32(max),
})
if cnts.Nutrients%30000 == 0 {
log.Println("Nutrients Count = ", cnts.Nutrients)
}
}
rc <- nil
return
}
|
package handlers
import (
controller "github.com/Brickchain/go-controller.v2"
httphandler "github.com/Brickchain/go-httphandler.v2"
"github.com/julienschmidt/httprouter"
)
// ControllerWrapper is a wrapper that adds some WithBinding request types
type ControllerWrapper struct {
w *httphandler.Wrapper
bsvc controller.BindingService
}
// NewControllerWrapper returns a new ControllerWrapper instance
func NewControllerWrapper(w *httphandler.Wrapper, bsvc controller.BindingService) *ControllerWrapper {
return &ControllerWrapper{
w: w,
bsvc: bsvc,
}
}
// Wrap is the main wrapper for making the regular httprouter.Handle type in to our Request/Response types
func (wrapper *ControllerWrapper) Wrap(h interface{}) httprouter.Handle {
switch x := h.(type) {
case func(RequestWithBinding) httphandler.Response:
return wrapper.w.Wrap(addBinding(wrapper.bsvc, x))
case func(AuthenticatedRequestWithBinding) httphandler.Response:
return wrapper.w.Wrap(addAuthenticatedBinding(wrapper.bsvc, x))
case func(ActionRequestWithBinding) httphandler.Response:
return wrapper.w.Wrap(addActionBinding(wrapper.bsvc, x))
}
return wrapper.w.Wrap(h)
}
|
package unimatrix
func NewActivitiesSchedulesOperation(realm string) *Operation {
return NewRealmOperation(realm, "activities_schedules")
}
|
package config
import (
"io/ioutil"
"log"
"os"
"path"
"gopkg.in/yaml.v2"
)
type SingleHost struct {
Host string `yaml:"host"`
User string `yaml:"user"`
Key string `yaml:"privateKey"`
}
type HostList struct {
List []SingleHost `yaml:"list"`
}
func GetConfig() []SingleHost {
config := HostList{}
pwd, _ := os.Getwd()
cfg, err := ioutil.ReadFile(path.Join(pwd, `/utils/config/config.yaml`))
if err != nil {
log.Fatalf("io error: %v", err)
}
err = yaml.UnmarshalStrict(cfg, &config)
if err != nil {
log.Fatalf("decode error: %v", err)
}
return config.List
}
|
package main
import (
"log"
"os"
"os/exec"
"strconv"
"testing"
"github.com/blankon/irgsh-go/internal/config"
"github.com/stretchr/testify/assert"
)
func TestMain(m *testing.M) {
log.SetFlags(log.LstdFlags | log.Lshortfile)
irgshConfig, _ = config.LoadConfig()
dir, _ := os.Getwd()
irgshConfig.Builder.Workdir = dir + "/../tmp"
m.Run()
}
func TestBaseInitRepo(t *testing.T) {
err := InitRepo()
if err != nil {
log.Println(err.Error())
assert.Equal(t, true, false, "Should not reach here")
}
cmdStr := "du -s " + irgshConfig.Repo.Workdir + "/verbeek | cut -d '/' -f1 | head -n 1 | sed 's/ //g' | tr -d '\n' | tr -d '\t' "
cmd := exec.Command("bash", "-c", cmdStr)
out, _ := cmd.CombinedOutput()
cmd.Run()
size, err := strconv.Atoi(string(out))
if err != nil {
log.Println(err.Error())
assert.Equal(t, true, false, "Should not reach here")
}
assert.NotEqual(t, size, int(0))
}
func TestBaseInitRepoConfigCheck(t *testing.T) {
t.Skip()
}
|
package types
import (
"fmt"
"reflect"
)
// PackageMeta represents metadata included with a package.
type PackageMeta struct {
// The version of this manifest, only v1 currently
MetaVersion string `json:"apiVersion,omitempty"`
// The name of the package
Name string `json:"name,omitempty"`
// The version of the package
Version string `json:"version,omitempty"`
// The K3s version inside the package
K3sVersion string `json:"k3sVersion,omitempty"`
// The architecture the package was built for
Arch string `json:"arch,omitempty"`
// The format with which images were bundles in the archive.
ImageBundleFormat ImageBundleFormat `json:"imageBundleFormat,omitempty"`
// A listing of the contents of the package
Manifest *Manifest `json:"manifest,omitempty"`
// A configuration containing installation variables
PackageConfig *PackageConfig `json:"config,omitempty"`
// The raw, untemplated package config
PackageConfigRaw []byte `json:"configRaw,omitempty"`
}
// DeepCopy creates a copy of this PackageMeta instance.
// TODO: DeepCopy functions need to be generated
func (p *PackageMeta) DeepCopy() *PackageMeta {
meta := &PackageMeta{
MetaVersion: p.MetaVersion,
Name: p.Name,
Version: p.Version,
K3sVersion: p.K3sVersion,
Arch: p.Arch,
ImageBundleFormat: p.ImageBundleFormat,
PackageConfigRaw: make([]byte, len(p.PackageConfigRaw)),
}
copy(meta.PackageConfigRaw, p.PackageConfigRaw)
if p.Manifest != nil {
meta.Manifest = p.Manifest.DeepCopy()
}
if p.PackageConfig != nil {
meta.PackageConfig = p.PackageConfig.DeepCopy()
}
return meta
}
// Sanitize will iterate the PackageConfig and convert any `map[interface{}]interface{}`
// to `map[string]interface{}`. This is required for serializing meta until I find a better
// way to deal with helm values. For convenience, the pointer to the PackageMeta is returned.
func (p *PackageMeta) Sanitize() *PackageMeta {
if p.PackageConfig == nil {
return p
}
newHelmValues := make(map[string]interface{})
for key, value := range p.PackageConfig.HelmValues {
newHelmValues[key] = sanitizeValue(value)
}
p.PackageConfig.HelmValues = newHelmValues
return p
}
func sanitizeValue(val interface{}) interface{} {
switch reflect.TypeOf(val).Kind() {
case reflect.Map:
if m, ok := val.(map[interface{}]interface{}); ok {
newMap := make(map[string]interface{})
for k, v := range m {
kStr := fmt.Sprintf("%v", k)
newMap[kStr] = sanitizeValue(v)
}
return newMap
}
if m, ok := val.(map[string]interface{}); ok {
// if the keys are already strings, we still need to descend
newMap := make(map[string]interface{})
for k, v := range m {
newMap[k] = sanitizeValue(v)
}
return newMap
}
// otherwise just return the regular map, but this may not catch
// all cases yet
return val
default:
return val
}
}
// GetName returns the name of the package.
func (p *PackageMeta) GetName() string { return p.Name }
// GetVersion returns the version of the package.
func (p *PackageMeta) GetVersion() string { return p.Version }
// GetK3sVersion returns the K3s version for the package.
func (p *PackageMeta) GetK3sVersion() string { return p.K3sVersion }
// GetArch returns the CPU architecture fo rthe package.
func (p *PackageMeta) GetArch() string { return p.Arch }
// GetManifest returns the manifest of the package.
func (p *PackageMeta) GetManifest() *Manifest { return p.Manifest }
// GetPackageConfig returns the package config if of the package or nil if there is none.
func (p *PackageMeta) GetPackageConfig() *PackageConfig { return p.PackageConfig }
// GetRegistryImageName returns the name that would have been used for a container image
// containing the registry contents.
// TODO: Needing to keep this logic here and BuildRegistryOptions is not a good design probably.
func (p *PackageMeta) GetRegistryImageName() string {
return fmt.Sprintf("%s-private-registry-data:%s", p.Name, p.Version)
}
// NewEmptyMeta returns a new empty PackageMeta instance.
func NewEmptyMeta() *PackageMeta {
return &PackageMeta{Manifest: NewEmptyManifest()}
}
|
package httpx_test
import (
"fmt"
"io"
"net/http"
"net/http/httptest"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/socialpoint-labs/bsk/httpx"
)
func TestAddHeaderDecorator(t *testing.T) {
assert := assert.New(t)
h := httpx.AddHeaderDecorator("key", "value1")(
httpx.AddHeaderDecorator("key", "value2")(
httpx.NoopHandler()))
w := httptest.NewRecorder()
r := &http.Request{}
h.ServeHTTP(w, r)
headers := w.Result().Header[http.CanonicalHeaderKey("key")]
assert.Equal("value1", headers[0])
assert.Equal("value2", headers[1])
}
func TestSetHeaderDecorator(t *testing.T) {
assert := assert.New(t)
h := httpx.SetHeaderDecorator("key", "value1")(
httpx.SetHeaderDecorator("key", "value2")(
httpx.NoopHandler()))
w := httptest.NewRecorder()
r := &http.Request{}
h.ServeHTTP(w, r)
assert.Equal("value2", w.Header().Get("key"))
}
func TestCheckHeaderDecorator(t *testing.T) {
assert := assert.New(t)
header := "foo"
value := "bar"
code := http.StatusInternalServerError
w := httptest.NewRecorder()
r := &http.Request{}
handler := httpx.CheckHeaderDecorator(header, value, code)(httpx.NoopHandler())
handler.ServeHTTP(w, r)
assert.Equal(w.Code, code)
content, err := io.ReadAll(w.Body)
assert.NoError(err)
assert.Equal(string(content), "Internal Server Error")
// now try the same thing but with the header in the request
r, err = http.NewRequest("GET", "http://foo.bar", nil)
assert.NoError(err)
r.Header.Set(header, value)
w = httptest.NewRecorder()
handler.ServeHTTP(w, r)
assert.Equal(w.Code, http.StatusOK)
content, err = io.ReadAll(w.Body)
assert.NoError(err)
assert.Equal(string(content), "")
}
func TestRootDecorator(t *testing.T) {
assert := assert.New(t)
h := httpx.RootDecorator()(httpx.NoopHandler())
// Test a request to the root path
w := httptest.NewRecorder()
req, err := http.NewRequest("GET", "/", nil)
assert.NoError(err)
h.ServeHTTP(w, req)
assert.Equal(http.StatusOK, w.Code)
// Test a request to a random non-root path
w = httptest.NewRecorder()
req, err = http.NewRequest("GET", "/whatever", nil)
assert.NoError(err)
h.ServeHTTP(w, req)
assert.Equal(http.StatusNotFound, w.Code)
}
func TestEnableCORSDecorator(t *testing.T) {
assert := assert.New(t)
h := httpx.EnableCORSDecorator()(httpx.NoopHandler())
w := httptest.NewRecorder()
r := &http.Request{}
r.Method = "OPTIONS"
h.ServeHTTP(w, r)
assert.Equal("*", w.Result().Header.Get("Access-Control-Allow-Origin"))
assert.Equal("GET,POST,PUT,PATCH,DELETE,HEAD,OPTIONS", w.Result().Header.Get("Access-Control-Allow-Methods"))
assert.Equal("Origin,Accept,Content-Type,Authorization", w.Result().Header.Get("Access-Control-Allow-Headers"))
assert.Equal(http.StatusOK, w.Code)
}
func TestIfDecorator(t *testing.T) {
trueHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("cond", "true")
})
cond := func(w http.ResponseWriter, r *http.Request) bool {
return r.URL.Path == "/true"
}
h := httpx.IfDecorator(cond, trueHandler)(httpx.NoopHandler())
w := httptest.NewRecorder()
r, _ := http.NewRequest("GET", "http://example.com/true", nil)
h.ServeHTTP(w, r)
assert.Equal(t, "true", w.Header().Get("cond"))
w = httptest.NewRecorder()
r, _ = http.NewRequest("GET", "http://example.com/false", nil)
h.ServeHTTP(nil, r)
assert.Equal(t, "", w.Header().Get("cond"))
}
func TestTimeoutDecorator(t *testing.T) {
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if _, ok := r.Context().Deadline(); ok {
w.Header().Set("status", "deadline")
}
})
h := httpx.TimeoutDecorator(time.Second)(handler)
w := httptest.NewRecorder()
r, err := http.NewRequest("GET", "http://example.com/foo", nil)
assert.NoError(t, err)
h.ServeHTTP(w, r)
assert.Equal(t, "deadline", w.Header().Get("status"))
}
type writerMock struct {
io.Writer
loggedBytes []byte
}
func TestLogging(t *testing.T) {
writerMock := &writerMock{}
text := "Test OK"
router := httpx.NewRouter()
router.Route(
"/test",
http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusOK)
_, _ = fmt.Fprint(w, text)
}),
httpx.LoggingDecorator(writerMock),
)
recorder := httptest.NewRecorder()
req, _ := http.NewRequest("GET", "http://localhost:8080/test", nil)
router.ServeHTTP(recorder, req)
assert.Equal(t, http.StatusOK, recorder.Code)
assert.Equal(t, text, recorder.Body.String())
assert.Contains(t, string(writerMock.loggedBytes), "GET /test HTTP/1.1 200")
}
func (w *writerMock) Write(p []byte) (n int, err error) {
w.loggedBytes = append(w.loggedBytes, p...)
return len(p), nil
}
|
package main
import (
"context"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ecs"
"github.com/coldog/tool-ecs/internal/kv"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"testing"
"time"
)
func init() {
// fixed time to "2017-05-05T00:00:00Z"
GetTime = func() time.Time {
return time.Date(2017, 05, 05, 0, 0, 0, 0, time.UTC)
}
}
type MockECS struct {
mock.Mock
}
func (m *MockECS) Open(ctx context.Context) error { return nil }
func (m *MockECS) RunTask(ctx context.Context, input *ecs.RunTaskInput) error {
return m.Called(input).Error(0)
}
func TestCronJob_Next(t *testing.T) {
job := &CronJob{
LastRun: time.Date(2017, 05, 05, 0, 0, 0, 0, time.UTC),
Cluster: "default",
TaskDefinitionID: "test",
Schedule: "0 * * * *", // at minute zero
}
next, err := job.Next()
assert.Nil(t, err)
assert.Equal(t, "2017-05-05 01:00:00 +0000 UTC", next.String())
}
func TestCronJob_ShouldRun(t *testing.T) {
job := &CronJob{
LastRun: time.Date(2017, 05, 04, 0, 0, 0, 0, time.UTC),
Cluster: "default",
TaskDefinitionID: "test",
Schedule: "0 * * * *", // at minute zero
}
run, err := job.ShouldRun()
assert.Nil(t, err)
assert.True(t, run)
}
func TestCronJob_ShouldNotRun(t *testing.T) {
job := &CronJob{
LastRun: time.Now(), // already run
Cluster: "default",
TaskDefinitionID: "test",
Schedule: "0 * * * *", // at minute zero
}
run, err := job.ShouldRun()
assert.Nil(t, err)
assert.False(t, run)
}
func TestScheduler_Start(t *testing.T) {
mockEcs := &MockECS{}
ctx := context.Background()
sched := &scheduler{
ctx: ctx,
ecs: mockEcs,
kv: kv.NewLocalDB(),
}
sched.kv.Put(context.Background(), CronJobType, "job1", &CronJob{
LastRun: time.Date(2017, 05, 04, 0, 0, 0, 0, time.UTC),
TaskDefinitionID: "testTask",
Cluster: "testCluster",
Schedule: "0 * * * *",
Replicas: 5,
})
input := &ecs.RunTaskInput{
Count: aws.Int64(5),
StartedBy: aws.String("CronScheduler"),
TaskDefinition: aws.String("testTask"),
Cluster: aws.String("testCluster"),
Overrides: &ecs.TaskOverride{},
}
mockEcs.On("RunTask", input).Return(nil)
sched.evaluate()
mockEcs.AssertCalled(t, "RunTask", input)
}
func TestScheduler_DoNotStart(t *testing.T) {
mockEcs := &MockECS{}
ctx := context.Background()
sched := &scheduler{
ctx: ctx,
ecs: mockEcs,
kv: kv.NewLocalDB(),
}
sched.kv.Put(context.Background(), CronJobType, "job1", &CronJob{
LastRun: GetTime(),
TaskDefinitionID: "testTask",
Cluster: "testCluster",
Schedule: "0 * * * *",
})
mockEcs.On("RunTask", "testCluster", "testTask").Return(nil)
sched.evaluate()
mockEcs.AssertNotCalled(t, "RunTask")
}
|
package main
func main() {
type num int
var a num
_ = (5 != a)
}
|
/*
Copyright 2019 The Jetstack cert-manager contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"crypto/x509"
"reflect"
"testing"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"github.com/jetstack/cert-manager/pkg/apis/certmanager/v1alpha1"
cmfake "github.com/jetstack/cert-manager/pkg/client/clientset/versioned/fake"
informers "github.com/jetstack/cert-manager/pkg/client/informers/externalversions"
"github.com/jetstack/cert-manager/test/unit/gen"
)
func TestCalculateDurationUntilRenew(t *testing.T) {
c := IssuerOptions{
RenewBeforeExpiryDuration: v1alpha1.DefaultRenewBefore,
}
currentTime := time.Now()
now = func() time.Time { return currentTime }
defer func() { now = time.Now }()
tests := []struct {
desc string
notBefore time.Time
notAfter time.Time
duration *metav1.Duration
renewBefore *metav1.Duration
expectedExpiry time.Duration
}{
{
desc: "generate an event if certificate duration is lower than requested duration",
notBefore: now(),
notAfter: now().Add(time.Hour * 24 * 90),
duration: &metav1.Duration{time.Hour * 24 * 120},
renewBefore: nil,
expectedExpiry: time.Hour * 24 * 60,
},
{
desc: "default expiry to 30 days",
notBefore: now(),
notAfter: now().Add(time.Hour * 24 * 120),
duration: nil,
renewBefore: nil,
expectedExpiry: (time.Hour * 24 * 120) - (time.Hour * 24 * 30),
},
{
desc: "default expiry to 2/3 of total duration if duration < 30 days",
notBefore: now(),
notAfter: now().Add(time.Hour * 24 * 20),
duration: nil,
renewBefore: nil,
expectedExpiry: time.Hour * 24 * 20 * 2 / 3,
},
{
desc: "expiry of 2/3 of certificate duration when duration < 30 minutes",
notBefore: now(),
notAfter: now().Add(time.Hour),
duration: &metav1.Duration{time.Hour},
renewBefore: &metav1.Duration{time.Hour / 3},
expectedExpiry: time.Hour * 2 / 3,
},
{
desc: "expiry of 60 days of certificate duration",
notBefore: now(),
notAfter: now().Add(time.Hour * 24 * 365),
duration: &metav1.Duration{time.Hour * 24 * 365},
renewBefore: &metav1.Duration{time.Hour * 24 * 60},
expectedExpiry: (time.Hour * 24 * 365) - (time.Hour * 24 * 60),
},
{
desc: "expiry of 2/3 of certificate duration when renewBefore greater than certificate duration",
notBefore: now(),
notAfter: now().Add(time.Hour * 24 * 35),
duration: &metav1.Duration{time.Hour * 24 * 35},
renewBefore: &metav1.Duration{time.Hour * 24 * 40},
expectedExpiry: time.Hour * 24 * 35 * 2 / 3,
},
}
for k, v := range tests {
cert := &v1alpha1.Certificate{
Spec: v1alpha1.CertificateSpec{
Duration: v.duration,
RenewBefore: v.renewBefore,
},
}
x509Cert := &x509.Certificate{NotBefore: v.notBefore, NotAfter: v.notAfter}
duration := c.CalculateDurationUntilRenew(x509Cert, cert)
if duration != v.expectedExpiry {
t.Errorf("test # %d - %s: got %v, expected %v", k, v.desc, duration, v.expectedExpiry)
}
}
}
func TestGetGenericIssuer(t *testing.T) {
var nilIssuer *v1alpha1.Issuer
var nilClusterIssuer *v1alpha1.ClusterIssuer
type testT struct {
Name string
Kind string
Namespace string
CMObjects []runtime.Object
NilClusterIssuerLister bool
Err bool
Expected v1alpha1.GenericIssuer
}
tests := map[string]testT{
"get a named Issuer resource": {
Name: "name-of-issuer",
Kind: "Issuer",
Namespace: gen.DefaultTestNamespace,
CMObjects: []runtime.Object{gen.Issuer("name-of-issuer")},
Expected: gen.Issuer("name-of-issuer"),
},
"get a named ClusterIssuer resource": {
Name: "name-of-clusterissuer",
Kind: "ClusterIssuer",
CMObjects: []runtime.Object{gen.ClusterIssuer("name-of-clusterissuer")},
Expected: gen.ClusterIssuer("name-of-clusterissuer"),
},
"fail to get a Issuer": {
Name: "name",
Kind: "Issuer",
Err: true,
Expected: nilIssuer,
},
"fail to get a ClusterIssuer": {
Name: "name",
Kind: "ClusterIssuer",
Err: true,
Expected: nilClusterIssuer,
},
"fail when no kind is specified": {
Name: "name",
Err: true,
Expected: nilIssuer,
},
"fail to get clusterissuer when clusterissuer lister is nil": {
Name: "name",
Kind: "ClusterIssuer",
NilClusterIssuerLister: true,
Err: true,
},
}
for n, row := range tests {
t.Run(n, func(t *testing.T) {
cl := cmfake.NewSimpleClientset(row.CMObjects...)
f := informers.NewSharedInformerFactory(cl, 0)
h := &helperImpl{
issuerLister: f.Certmanager().V1alpha1().Issuers().Lister(),
clusterIssuerLister: f.Certmanager().V1alpha1().ClusterIssuers().Lister(),
}
if row.NilClusterIssuerLister {
h.clusterIssuerLister = nil
}
stopCh := make(chan struct{})
defer close(stopCh)
f.Start(stopCh)
f.WaitForCacheSync(stopCh)
actual, err := h.GetGenericIssuer(v1alpha1.ObjectReference{Name: row.Name, Kind: row.Kind}, row.Namespace)
if err != nil && !row.Err {
t.Errorf("Expected no error, but got: %s", err)
}
if !reflect.DeepEqual(actual, row.Expected) {
t.Errorf("Expected %#v but got %#v", row.Expected, actual)
}
})
}
}
|
package main
import (
"github.com/backstage/my-go-service/cmd"
)
func main() {
cmd.Execute()
}
|
// Copyright 2014 Dirk Jablonowski. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"github.com/dirkjabl/bricker/net/head"
"github.com/dirkjabl/bricker/net/optionaldata"
"github.com/dirkjabl/bricker/net/payload"
"testing"
)
func TestComputeLength(t *testing.T) {
p := &Packet{nil, nil, nil}
l := p.ComputeLength()
if l != 0 {
t.Fatalf("Error TestComputeLength: Length = %d should 0", l)
}
p.Head = &head.Head{}
l = p.ComputeLength()
if l != 8 {
t.Fatalf("Error TestComputeLength: Length = %d should 8", l)
}
p.Payload = payload.New([]byte("123")) // 3 bytes
l = p.ComputeLength()
if l != (8 + 3) {
t.Fatalf("Error TestComputeLength: Length = %d should %d", l, (8 + 3))
}
p.OptionalData = optionaldata.New([]byte("1234")) // 4 bytes
l = p.ComputeLength()
if l != (8 + 3 + 4) {
t.Fatalf("Error TestComputeLength: Length = %d should %d", l, (8 + 3 + 4))
}
p.Head = nil
l = p.ComputeLength()
if l != (3 + 4) {
t.Fatalf("Error TestComputeLength: Length = %d should %d", l, (3 + 4))
}
p.Payload = nil
l = p.ComputeLength()
if l != 4 {
t.Fatalf("Error TestComputeLength: Length = %d should %d", l, 4)
}
p.OptionalData = nil
p.Payload = payload.New([]byte("123")) // 3 bytes
l = p.ComputeLength()
if l != 3 {
t.Fatalf("Error TestComputeLength: Length = %d should %d", l, 3)
}
p.Payload = nil
p.OptionalData = optionaldata.New([]byte("1234")) // 4 bytes
p.Head = &head.Head{}
l = p.ComputeLength()
if l != (8 + 4) {
t.Fatalf("Error TestComputeLength: Length = %d should %d", l, (8 + 4))
}
}
// t.Fatalf
|
package midec
/*
Based on image/format.go in standard library.
---
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
import (
"bufio"
"errors"
"io"
"sync"
"sync/atomic"
)
// ErrFormat indicates that detecting encountered an unknown format.
var ErrFormat = errors.New("midec: unknown format")
type format struct {
name, magic string
isAnimated func(io.Reader) (bool, error)
}
var (
formatsMu sync.Mutex
atomicFormats atomic.Value
)
// RegisterFormat registers an image format for use by IsAnimated.
func RegisterFormat(name, magic string, isAnimated func(io.Reader) (bool, error)) {
formatsMu.Lock()
formats, _ := atomicFormats.Load().([]format)
atomicFormats.Store(append(formats, format{name, magic, isAnimated}))
formatsMu.Unlock()
}
// A reader is an io.Reader that can also peek ahead.
type reader interface {
io.Reader
Peek(int) ([]byte, error)
}
// asReader converts an io.Reader to a reader.
func asReader(r io.Reader) reader {
if rr, ok := r.(reader); ok {
return rr
}
return bufio.NewReader(r)
}
// Match reports whether magic matches b. Magic may contain "?" wildcards.
func match(magic string, b []byte) bool {
if len(magic) != len(b) {
return false
}
for i, c := range b {
if magic[i] != c && magic[i] != '?' {
return false
}
}
return true
}
// Sniff determines the format of r's data.
func sniff(r reader) format {
formats, _ := atomicFormats.Load().([]format)
for _, f := range formats {
b, err := r.Peek(len(f.magic))
if err == nil && match(f.magic, b) {
return f
}
}
return format{}
}
// IsAnimated detects whether it is an animated image that has been encoded in a registered format.
func IsAnimated(r io.Reader) (bool, error) {
rr := asReader(r)
f := sniff(rr)
if f.isAnimated == nil {
return false, ErrFormat
}
m, err := f.isAnimated(rr)
return m, err
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.