text
stringlengths 11
4.05M
|
|---|
package ocrsdk
import (
"./src/parametrs"
"./src/responses"
"./src/utils"
"bytes"
"encoding/base64"
"errors"
"fmt"
"strings"
"time"
)
type OCRSDK struct {
authorizationKey string
}
func CreateOCRSDK(name string, pwd string) (ocrsdk *OCRSDK, err error) {
if len(name) == 0 || len(pwd) == 0 {
err = errors.New("Wrong name or password of the ocrsdk application.")
return
}
var writer bytes.Buffer
encoder := base64.NewEncoder(base64.StdEncoding, &writer)
encoder.Write([]byte(fmt.Sprintf("%s:%s", name, pwd)))
encoder.Close()
ocrsdk = &OCRSDK{
authorizationKey: writer.String(),
}
return
}
// Метод отправляет одно изображение на обработку
func (o *OCRSDK) ProcessImage(params *parametrs.ProcessImage, data []byte) (result *responses.Success, err error) {
return o.invoke("processImage", params.ToMap(), data)
}
func (o *OCRSDK) ProcessImageWait(params *parametrs.ProcessImage, data []byte,
status chan<- string, result chan<- responses.ComplexResult) {
var err error
var resp *responses.Success
resp, err = o.invoke("processImage", params.ToMap(), data)
if err != nil {
result <- *responses.CreateErrorComplexResult(err)
return
}
task := resp.GetTask()
o.Wait(&task, status, result)
}
// Метод добавляет изображение к существующей задаче или создает новую задачу.
// Эта задача не выполняется до вызова метода processDocument или processFields.
func (o *OCRSDK) SubmitImage(params *parametrs.SubmitImage, data []byte) (result *responses.Success, err error) {
return o.invoke("submitImage", params.ToMap(), data)
}
// Этот метод позволяет обрабатывать несколько изображений и получить
// результат распознавания в виде многостраничного документа.
//
// Внимание Может вызываться только после метода «submitImage»
func (o *OCRSDK) ProcessDocument(params *parametrs.ProcessDocument) (result *responses.Success, err error) {
return o.invoke("processDocument", params.ToMap(), []byte{})
}
// Метод позволяет распознавать визитные карточки на изображении.
func (o *OCRSDK) ProcessBusinessCard(params *parametrs.ProcessBusinessCard, data []byte) (result *responses.Success, err error) {
return o.invoke("processBusinessCard", params.ToMap(), data)
}
// Метод позволяет извлечь значение текстового поля на данные текстового поля
func (o *OCRSDK) ProcessTextField(params *parametrs.ProcessTextField, data []byte) (result *responses.Success, err error) {
return o.invoke("processTextField", params.ToMap(), data)
}
// Метод позволяет извлекать значение штрих-кода на изображении.
func (o *OCRSDK) ProcessBarcodeField(params *parametrs.ProcessBarcodeField, data []byte) (result *responses.Success, err error) {
return o.invoke("processBarcodeField", params.ToMap(), data)
}
// Метод позволяет извлекать значение галочкой на изображении
func (o *OCRSDK) ProcessCheckmarkField(params *parametrs.ProcessCheckmarkField, data []byte) (result *responses.Success, err error) {
return o.invoke("processCheckmarkField", params.ToMap(), data)
}
// Метод позволяет распознавать несколько полей в документе
//
// Внимание: Может вызываться только после метода «submitImage»
func (o *OCRSDK) ProcessFields(params *parametrs.ProcessFields, data []byte) (result *responses.Success, err error) {
return o.invoke("processFields", params.ToMap(), data)
}
// Метод извлекает информацию с официальных проездных документов (паспорта и т.д.)
func (o *OCRSDK) ProcessMRZ(data []byte) (result *responses.Success, err error) {
return o.invoke("processMRZ", map[string]string{}, data)
}
// Метод позволяет распознать изображение квитанции.
// Поддерживаются долько америнские документы. Документы других стран в режиме бета
func (o *OCRSDK) ProcessReceipt(params *parametrs.ProcessReceipt, data []byte) (result *responses.Success, err error) {
return o.invoke("processReceipt", params.ToMap(), data)
}
// Метод возвращает текущее состояние задачи и URL результата обработки для завершенных задач.
func (o *OCRSDK) GetTaskStatus(taskId string) (result *responses.Success, err error) {
return o.invoke("getTaskStatus", map[string]string{"taskId": taskId}, []byte{})
}
// Метод удаляет задачу и изображения, связанные с этой задачей
func (o *OCRSDK) DeleteTask(taskId string) (result *responses.Success, err error) {
return o.invoke("deleteTask", map[string]string{"taskId": taskId}, []byte{})
}
// Метод возвращает список задач (по умолчанию удаленные задачи не отображаются,
// но можно указать параметр чтобы они отображались)
func (o *OCRSDK) ListTasks(params *parametrs.ListTasks) (result *responses.Success, err error) {
return o.invoke("listTasks", params.ToMap(), []byte{})
}
// Метод возвращает список готовых задач
func (o *OCRSDK) ListFinishedTasks() (result *responses.Success, err error) {
return o.invoke("listFinishedTasks", map[string]string{}, []byte{})
}
// Метод позволяет получить информацию о типе приложения, его текущий баланс и срок действия.
// Чтобы разрешить вызов этого метода, необходимо включить соответствующую настройку в профиле
func (o *OCRSDK) GetApplicationInfo() (result *responses.Success, err error) {
return o.invoke("getApplicationInfo", map[string]string{}, []byte{})
}
// Выполнить отправку запроса на сервер
func (o *OCRSDK) invoke(method string, params map[string]string, body []byte) (result *responses.Success, err error) {
var req *utils.Request
req, err = utils.CreateRequest(method, o.authorizationKey, params, body)
if err == nil {
result, err = req.Send()
}
return
}
func (o *OCRSDK) Wait(sourceTask *responses.Task, status chan<- string, result chan<- responses.ComplexResult) {
retval := *responses.CreateComplexResult(sourceTask)
status <- retval.ProccesTask.Status
for {
time.Sleep(time.Second * 2)
resp, err := o.GetTaskStatus(retval.ProccesTask.Id)
if err != nil {
retval.Error = err
result <- retval
break
}
task := resp.GetTask()
retval.ProccesTask.Update(&task)
status <- retval.ProccesTask.Status
if strings.Compare(retval.ProccesTask.Status, responses.TaskStatus_Completed) == 0 {
if err = loadResults(&retval.Files, retval.ProccesTask); err != nil {
retval.Error = err
}
result <- retval
break
} else if strings.Compare(retval.ProccesTask.Status, responses.TaskStatus_ProcessingFailed) == 0 ||
strings.Compare(retval.ProccesTask.Status, responses.TaskStatus_Deleted) == 0 ||
strings.Compare(retval.ProccesTask.Status, responses.TaskStatus_NotEnoughCredits) == 0 {
if len(retval.ProccesTask.Error) == 0 {
retval.Error = errors.New("Process is " + retval.ProccesTask.Status)
} else {
retval.Error = errors.New(retval.ProccesTask.Error)
}
result <- retval
break
}
}
}
func loadResults(collect *map[string][]byte, task *responses.Task) error {
if len(task.ResultUrl) > 0 {
data, err := utils.DownloadData(task.ResultUrl)
if err != nil {
return err
} else {
(*collect)[task.ResultUrl] = data
}
}
if len(task.ResultUrl2) > 0 {
data, err := utils.DownloadData(task.ResultUrl2)
if err != nil {
return err
} else {
(*collect)[task.ResultUrl2] = data
}
}
if len(task.ResultUrl3) > 0 {
data, err := utils.DownloadData(task.ResultUrl3)
if err != nil {
return err
} else {
(*collect)[task.ResultUrl3] = data
}
}
return nil
}
|
package ir
import "sort"
// ===================== Functions implementing a sort interface ======================
// By is the type of a "less" function that defines a ordering for SearchResult.
type By func(r1, r2 *SearchResult) bool
// Sort function for ordering the search results.
func (by By) Sort(results []SearchResult) {
rs := &resultsSorter{
results: results,
by: by, // The Sort method's receiver is the function (closure) that defines the sort order.
}
sort.Sort(rs)
}
// Joins a By function and a slice of SearchResult to be sorted.
type resultsSorter struct {
results []SearchResult
by func(r1, r2 *SearchResult) bool // Closure used in the Less method.
}
// Len is part of sort.Interface.
func (s *resultsSorter) Len() int {
return len(s.results)
}
// Swap is part of sort.Interface.
func (s *resultsSorter) Swap(i, j int) {
s.results[i], s.results[j] = s.results[j],s.results[i]
}
// Less is part of sort.Interface. It is implemented by calling the "by" closure in the sorter.
func (s *resultsSorter) Less(i, j int) bool {
return s.by(&s.results[i], &s.results[j])
}
|
package main
import (
"fmt"
"github.com/sihendra/go-msgbuzz"
"time"
)
func main() {
// Create msgbuzz instance
msgBus := msgbuzz.NewRabbitMqClient("amqp://127.0.0.1:5672", 4)
// Register consumer of some topic
msgBus.On("profile.created", "reco_engine", func(confirm msgbuzz.MessageConfirm, bytes []byte) error {
defer confirm.Ack()
fmt.Printf("Incoming message: %s", string(bytes))
return nil
})
go func(client *msgbuzz.RabbitMqClient) {
// Wait consumer start, if no consumer no message will be saved by rabbitmq
time.Sleep(time.Second * 1)
// Publish to topic
msgBus.Publish("profile.created", []byte(`{"name":"Dodo"}`))
// Wait for consumer picking the message before stopping
time.Sleep(time.Second * 1)
msgBus.Close()
}(msgBus)
// Will block until msgbuzz closed
fmt.Println("Start Consuming")
msgBus.StartConsuming()
fmt.Println("Finish Consuming")
}
|
package main
import (
"fmt"
"os"
"os/signal"
"github.com/siggy/bbox/beatboxer/render/web"
log "github.com/sirupsen/logrus"
)
func main() {
sig := make(chan os.Signal, 1)
signal.Notify(sig, os.Interrupt, os.Kill)
w := web.InitWeb()
for {
select {
case <-sig:
fmt.Printf("Received OS signal, exiting")
return
case p := <-w.Phone():
log.Infof("Phone event: %+v", p)
}
}
}
// 255,0,214
// 0,255,9
// 255,255,0
// 0,0,255
// 255,0,0
// 0,255,255
// 128,0,255
// 128,255,0
// 166,0,255
// 58,255,0
|
package main
import (
"fmt"
"math/rand"
"sync"
"time"
)
var arraySize = 4
var array1 = []int{3, 6, 3, 2}
var array2 = []int{2, 9, 4, 1}
var array3 = []int{8, 3, 2, 0}
type CyclicBarrier struct {
phase int
count int
parties int
trigger *sync.Cond
}
func (b *CyclicBarrier) nextGeneration() {
b.trigger.Broadcast()
b.count = b.parties
b.phase++
}
func (b *CyclicBarrier) await() {
b.trigger.L.Lock()
defer b.trigger.L.Unlock()
phase := b.phase
b.count--
if b.count == 0 {
b.nextGeneration()
} else {
for phase == b.phase {
b.trigger.Wait()
}
}
}
func calculateSums() (int, int, int) {
var sum1, sum2, sum3 = 0, 0, 0
for i := 0; i < arraySize; i++ {
sum1 += array1[i]
sum2 += array2[i]
sum3 += array3[i]
}
return sum1, sum2, sum3
}
func start(arr []int, b *CyclicBarrier) {
for {
rand.Seed(time.Now().UnixNano())
if rand.Intn(2) == 0 {
arr[rand.Intn(arraySize)] += -1
} else {
arr[rand.Intn(arraySize)] += 1
}
b.await()
var sum1, sum2, sum3 = calculateSums()
if sum1 == sum2 && sum2 == sum3 {
fmt.Printf("Finished, sum = %d\n", sum1)
break
} else {
fmt.Printf("Current sums: %d,%d,%d\n", sum1, sum2, sum3)
}
}
}
func main() {
cyclicBarrier := CyclicBarrier{}
cyclicBarrier.count = 3
cyclicBarrier.parties = 3
cyclicBarrier.trigger = sync.NewCond(&sync.Mutex{})
go start(array1, &cyclicBarrier)
go start(array2, &cyclicBarrier)
go start(array3, &cyclicBarrier)
_, _ = fmt.Scanln()
}
|
package mira
import (
"bytes"
b64 "encoding/base64"
"encoding/json"
"net/http"
"net/url"
"strings"
"time"
)
type Credentials struct {
ClientID string
ClientSecret string
Username string
Password string
UserAgent string
}
// Authenticate returns *Reddit object that has been authed
func Authenticate(c *Credentials) (*Reddit, error) {
// URL to get access_token
authURL := RedditBase + "api/v1/access_token"
// Define the data to send in the request
form := url.Values{}
form.Add("grant_type", "password")
form.Add("username", c.Username)
form.Add("password", c.Password)
// Encode the Authorization Header
raw := c.ClientID + ":" + c.ClientSecret
encoded := b64.StdEncoding.EncodeToString([]byte(raw))
// Create a request to allow customised headers
r, err := http.NewRequest("POST", authURL, strings.NewReader(form.Encode()))
if err != nil {
return nil, err
}
// Customise request headers
r.Header.Set("User-Agent", c.UserAgent)
r.Header.Set("Authorization", "Basic "+encoded)
// Create client
client := &http.Client{}
// Run the request
response, err := client.Do(r)
if err != nil {
return nil, err
}
defer response.Body.Close()
buf := new(bytes.Buffer)
buf.ReadFrom(response.Body)
data := buf.Bytes()
if err := findRedditError(data); err != nil {
return nil, err
}
auth := Reddit{}
auth.Chain = make(chan *ChainVals, 32)
json.Unmarshal(data, &auth)
auth.Creds = *c
return &auth, nil
}
// This goroutine reauthenticates the user
// every 45 minutes. It should be run with the go
// statement
func (c *Reddit) autoRefresh() {
for {
time.Sleep(45 * time.Minute)
c.updateCredentials()
}
}
// Reauthenticate and updates the object itself
func (c *Reddit) updateCredentials() {
temp, _ := Authenticate(&c.Creds)
// Just updated the token
c.Token = temp.Token
}
// SetDefault sets all default values
func (c *Reddit) SetDefault() {
c.Stream = Streaming{
CommentListInterval: 8,
PostListInterval: 10,
ReportsInterval: 15,
ModQueueInterval: 15,
PostListSlice: 25,
}
c.Values = RedditVals{
GetSubmissionFromCommentTries: 32,
}
}
// SetClient sets mira's *http.Client to make requests
func (c *Reddit) SetClient(client *http.Client) {
c.Client = client
}
|
package dao
func NewDao(debug bool) *Dao {
return new(Dao)
}
type Dao struct {
Debug bool
IgnoreSign bool
SecretKey string
}
|
package loadbalancer
import (
"context"
"reflect"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/elb"
"github.com/giantswarm/apiextensions/pkg/apis/provider/v1alpha1"
"github.com/giantswarm/micrologger/microloggertest"
awsclient "github.com/giantswarm/aws-operator/client/aws"
"github.com/giantswarm/aws-operator/service/controller/legacy/v30/controllercontext"
)
func Test_clusterLoadBalancers(t *testing.T) {
t.Parallel()
customObject := v1alpha1.AWSConfig{
Spec: v1alpha1.AWSConfigSpec{
Cluster: v1alpha1.Cluster{
ID: "test-cluster",
},
},
}
testCases := []struct {
description string
obj v1alpha1.AWSConfig
expectedState *LoadBalancerState
loadBalancers []LoadBalancerMock
}{
{
description: "basic match with no load balancers",
obj: customObject,
expectedState: &LoadBalancerState{
LoadBalancerNames: []string{},
},
},
{
description: "basic match with load balancer",
obj: customObject,
expectedState: &LoadBalancerState{
LoadBalancerNames: []string{
"test-elb",
},
},
loadBalancers: []LoadBalancerMock{
{
loadBalancerName: "test-elb",
loadBalancerTags: []*elb.Tag{
{
Key: aws.String("kubernetes.io/cluster/test-cluster"),
Value: aws.String("owned"),
},
{
Key: aws.String("kubernetes.io/service-name"),
Value: aws.String("hello-world"),
},
},
},
},
},
{
description: "no matching load balancer",
obj: customObject,
expectedState: &LoadBalancerState{
LoadBalancerNames: []string{},
},
loadBalancers: []LoadBalancerMock{
{
loadBalancerName: "test-elb",
loadBalancerTags: []*elb.Tag{
{
Key: aws.String("kubernetes.io/cluster/other-cluster"),
Value: aws.String("owned"),
},
{
Key: aws.String("kubernetes.io/service-name"),
Value: aws.String("hello-world"),
},
},
},
},
},
{
description: "multiple load balancers",
obj: customObject,
expectedState: &LoadBalancerState{
LoadBalancerNames: []string{
"test-elb",
"test-elb-2",
},
},
loadBalancers: []LoadBalancerMock{
{
loadBalancerName: "test-elb",
loadBalancerTags: []*elb.Tag{
{
Key: aws.String("kubernetes.io/cluster/test-cluster"),
Value: aws.String("owned"),
},
{
Key: aws.String("kubernetes.io/service-name"),
Value: aws.String("hello-world"),
},
},
},
{
loadBalancerName: "test-elb-2",
loadBalancerTags: []*elb.Tag{
{
Key: aws.String("kubernetes.io/cluster/test-cluster"),
Value: aws.String("owned"),
},
{
Key: aws.String("kubernetes.io/service-name"),
Value: aws.String("hello-world-2"),
},
},
},
},
},
{
description: "multiple load balancers some not matching",
obj: customObject,
expectedState: &LoadBalancerState{
LoadBalancerNames: []string{
"test-elb",
"test-elb-2",
},
},
loadBalancers: []LoadBalancerMock{
{
loadBalancerName: "test-elb",
loadBalancerTags: []*elb.Tag{
{
Key: aws.String("kubernetes.io/cluster/test-cluster"),
Value: aws.String("owned"),
},
{
Key: aws.String("kubernetes.io/service-name"),
Value: aws.String("hello-world"),
},
},
},
{
loadBalancerName: "test-elb-2",
loadBalancerTags: []*elb.Tag{
{
Key: aws.String("kubernetes.io/cluster/test-cluster"),
Value: aws.String("owned"),
},
{
Key: aws.String("kubernetes.io/service-name"),
Value: aws.String("hello-world-2"),
},
},
},
{
loadBalancerName: "test-elb-3",
loadBalancerTags: []*elb.Tag{
{
Key: aws.String("kubernetes.io/cluster/another-cluster"),
Value: aws.String("owned"),
},
{
Key: aws.String("kubernetes.io/service-name"),
Value: aws.String("hello-world-2"),
},
},
},
},
},
{
description: "missing service tag",
obj: customObject,
expectedState: &LoadBalancerState{
LoadBalancerNames: []string{},
},
loadBalancers: []LoadBalancerMock{
{
loadBalancerName: "test-elb",
loadBalancerTags: []*elb.Tag{
{
Key: aws.String("kubernetes.io/cluster/test-cluster"),
Value: aws.String("owned"),
},
},
},
},
},
}
var err error
var newResource *Resource
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
c := Config{
Logger: microloggertest.New(),
}
newResource, err = New(c)
if err != nil {
t.Error("expected", nil, "got", err)
}
awsClients := awsclient.Clients{
ELB: &ELBClientMock{
loadBalancers: tc.loadBalancers,
},
}
ctx := context.TODO()
cc := controllercontext.Context{
Client: controllercontext.ContextClient{
TenantCluster: controllercontext.ContextClientTenantCluster{
AWS: awsClients,
},
},
}
ctx = controllercontext.NewContext(ctx, cc)
result, err := newResource.clusterLoadBalancers(ctx, tc.obj)
if err != nil {
t.Errorf("unexpected error %v", err)
}
if !reflect.DeepEqual(result, tc.expectedState) {
t.Errorf("expected current state '%#v', got '%#v'", tc.expectedState, result)
}
})
}
}
func Test_splitLoadBalancers(t *testing.T) {
t.Parallel()
testCases := []struct {
name string
loadBalancerNames []*string
chunkSize int
expectedChunks [][]*string
}{
{
name: "case 0: empty lb names returns empty chunks",
loadBalancerNames: []*string{},
chunkSize: 20,
expectedChunks: [][]*string{},
},
{
name: "case 1: single batch",
loadBalancerNames: []*string{
aws.String("lb-1"),
aws.String("lb-2"),
},
chunkSize: 20,
expectedChunks: [][]*string{{
aws.String("lb-1"),
aws.String("lb-2"),
}},
},
{
name: "case 2: multiple even chunks",
loadBalancerNames: []*string{
aws.String("lb-1"),
aws.String("lb-2"),
aws.String("lb-3"),
aws.String("lb-4"),
aws.String("lb-5"),
aws.String("lb-6"),
},
chunkSize: 2,
expectedChunks: [][]*string{
{
aws.String("lb-1"),
aws.String("lb-2"),
},
{
aws.String("lb-3"),
aws.String("lb-4"),
},
{
aws.String("lb-5"),
aws.String("lb-6"),
},
},
},
{
name: "case 3: multiple chunks of different sizes",
loadBalancerNames: []*string{
aws.String("lb-1"),
aws.String("lb-2"),
aws.String("lb-3"),
aws.String("lb-4"),
aws.String("lb-5"),
aws.String("lb-6"),
aws.String("lb-7"),
},
chunkSize: 3,
expectedChunks: [][]*string{
{
aws.String("lb-1"),
aws.String("lb-2"),
aws.String("lb-3"),
},
{
aws.String("lb-4"),
aws.String("lb-5"),
aws.String("lb-6"),
},
{
aws.String("lb-7"),
},
},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
result := splitLoadBalancers(tc.loadBalancerNames, tc.chunkSize)
if !reflect.DeepEqual(result, tc.expectedChunks) {
t.Fatalf("chunks == %#v, want %#v", result, tc.expectedChunks)
}
})
}
}
|
package main
import (
"fmt"
"io/ioutil"
"net/http"
"net/url"
)
func main() {
res, err := http.Get("http://127.0.0.1:9090/query?name=zansan&age=10")
if err != nil {
fmt.Println(err)
return
}
// 从res中把服务端返回的数据读取出来
b, err := ioutil.ReadAll(res.Body)
if err != nil {
fmt.Println(err)
return
}
fmt.Println(string(b))
data := url.Values{}
urlObj, _ := url.Parse("http://127.0.0.1:9090/query")
data.Set("name", "周林")
data.Set("age", "100")
// 对请求进行编码
queryStr := data.Encode()
urlObj.RawQuery = queryStr
req, err := http.NewRequest("GET", urlObj.String(), nil)
if err != nil {
fmt.Println(err)
return
}
fmt.Println(req)
}
|
// Copyright 2023 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metrics
import (
"github.com/pingcap/tidb/metrics"
"github.com/pingcap/tidb/parser/mysql"
"github.com/prometheus/client_golang/prometheus"
)
// server metrics vars
var (
QueryTotalCountOk []prometheus.Counter
QueryTotalCountErr []prometheus.Counter
DisconnectNormal prometheus.Counter
DisconnectByClientWithError prometheus.Counter
DisconnectErrorUndetermined prometheus.Counter
ConnIdleDurationHistogramNotInTxn prometheus.Observer
ConnIdleDurationHistogramInTxn prometheus.Observer
AffectedRowsCounterInsert prometheus.Counter
AffectedRowsCounterUpdate prometheus.Counter
AffectedRowsCounterDelete prometheus.Counter
AffectedRowsCounterReplace prometheus.Counter
ReadPacketBytes prometheus.Counter
WritePacketBytes prometheus.Counter
)
func init() {
InitMetricsVars()
}
// InitMetricsVars init server metrics vars.
func InitMetricsVars() {
QueryTotalCountOk = []prometheus.Counter{
mysql.ComSleep: metrics.QueryTotalCounter.WithLabelValues("Sleep", "OK"),
mysql.ComQuit: metrics.QueryTotalCounter.WithLabelValues("Quit", "OK"),
mysql.ComInitDB: metrics.QueryTotalCounter.WithLabelValues("InitDB", "OK"),
mysql.ComQuery: metrics.QueryTotalCounter.WithLabelValues("Query", "OK"),
mysql.ComPing: metrics.QueryTotalCounter.WithLabelValues("Ping", "OK"),
mysql.ComFieldList: metrics.QueryTotalCounter.WithLabelValues("FieldList", "OK"),
mysql.ComStmtPrepare: metrics.QueryTotalCounter.WithLabelValues("StmtPrepare", "OK"),
mysql.ComStmtExecute: metrics.QueryTotalCounter.WithLabelValues("StmtExecute", "OK"),
mysql.ComStmtFetch: metrics.QueryTotalCounter.WithLabelValues("StmtFetch", "OK"),
mysql.ComStmtClose: metrics.QueryTotalCounter.WithLabelValues("StmtClose", "OK"),
mysql.ComStmtSendLongData: metrics.QueryTotalCounter.WithLabelValues("StmtSendLongData", "OK"),
mysql.ComStmtReset: metrics.QueryTotalCounter.WithLabelValues("StmtReset", "OK"),
mysql.ComSetOption: metrics.QueryTotalCounter.WithLabelValues("SetOption", "OK"),
}
QueryTotalCountErr = []prometheus.Counter{
mysql.ComSleep: metrics.QueryTotalCounter.WithLabelValues("Sleep", "Error"),
mysql.ComQuit: metrics.QueryTotalCounter.WithLabelValues("Quit", "Error"),
mysql.ComInitDB: metrics.QueryTotalCounter.WithLabelValues("InitDB", "Error"),
mysql.ComQuery: metrics.QueryTotalCounter.WithLabelValues("Query", "Error"),
mysql.ComPing: metrics.QueryTotalCounter.WithLabelValues("Ping", "Error"),
mysql.ComFieldList: metrics.QueryTotalCounter.WithLabelValues("FieldList", "Error"),
mysql.ComStmtPrepare: metrics.QueryTotalCounter.WithLabelValues("StmtPrepare", "Error"),
mysql.ComStmtExecute: metrics.QueryTotalCounter.WithLabelValues("StmtExecute", "Error"),
mysql.ComStmtFetch: metrics.QueryTotalCounter.WithLabelValues("StmtFetch", "Error"),
mysql.ComStmtClose: metrics.QueryTotalCounter.WithLabelValues("StmtClose", "Error"),
mysql.ComStmtSendLongData: metrics.QueryTotalCounter.WithLabelValues("StmtSendLongData", "Error"),
mysql.ComStmtReset: metrics.QueryTotalCounter.WithLabelValues("StmtReset", "Error"),
mysql.ComSetOption: metrics.QueryTotalCounter.WithLabelValues("SetOption", "Error"),
}
DisconnectNormal = metrics.DisconnectionCounter.WithLabelValues(metrics.LblOK)
DisconnectByClientWithError = metrics.DisconnectionCounter.WithLabelValues(metrics.LblError)
DisconnectErrorUndetermined = metrics.DisconnectionCounter.WithLabelValues("undetermined")
ConnIdleDurationHistogramNotInTxn = metrics.ConnIdleDurationHistogram.WithLabelValues("0")
ConnIdleDurationHistogramInTxn = metrics.ConnIdleDurationHistogram.WithLabelValues("1")
AffectedRowsCounterInsert = metrics.AffectedRowsCounter.WithLabelValues("Insert")
AffectedRowsCounterUpdate = metrics.AffectedRowsCounter.WithLabelValues("Update")
AffectedRowsCounterDelete = metrics.AffectedRowsCounter.WithLabelValues("Delete")
AffectedRowsCounterReplace = metrics.AffectedRowsCounter.WithLabelValues("Replace")
ReadPacketBytes = metrics.PacketIOCounter.WithLabelValues("read")
WritePacketBytes = metrics.PacketIOCounter.WithLabelValues("write")
}
|
// Package snapshot Amazon Seller Utilities API Responses
package snapshot
import (
"encoding/json"
"log"
"net/http"
)
// DownloadSnapshotError An error response from the API
func DownloadSnapshotError(w http.ResponseWriter, version int, code int, err error) error {
apiResponse := DownloadSnapshotAPIResponse{
Version: version,
Success: true,
Status: code,
// Results: DownloadSnapshotResponse{},
Error: err.Error(),
}
apiResponseJSON, err := json.Marshal(apiResponse)
if err != nil {
log.Panic(err)
panic(err)
}
log.Output(1, string(apiResponseJSON))
return json.NewEncoder(w).Encode(apiResponse)
}
|
package routers
import (
"github.com/genshen/ssh-web-console/src/controllers"
"github.com/genshen/ssh-web-console/src/controllers/files"
"github.com/genshen/ssh-web-console/src/utils"
_ "github.com/genshen/ssh-web-console/statik"
"github.com/rakyll/statik/fs"
"log"
"net/http"
"os"
)
const (
RunModeDev = "dev"
RunModeProd = "prod"
)
func Register() {
// serve static files
// In dev mode, resource files (for example /static/*) and views(fro example /index.html) are served separately.
// In production mode, resource files and views are served by statikFS (for example /*).
if utils.Config.Site.RunMode == RunModeDev {
if utils.Config.Dev.StaticPrefix == utils.Config.Dev.ViewsPrefix {
log.Fatal(`static prefix and views prefix can not be the same, check your config.`)
return
}
// server resource files
if utils.Config.Dev.StaticRedirect == "" {
// serve locally
localFile := justFilesFilesystem{http.Dir(utils.Config.Dev.StaticDir)}
http.Handle(utils.Config.Dev.StaticPrefix, http.StripPrefix(utils.Config.Dev.StaticPrefix, http.FileServer(localFile)))
} else {
// serve by redirection
http.HandleFunc(utils.Config.Dev.StaticPrefix, func(writer http.ResponseWriter, req *http.Request) {
http.Redirect(writer, req, utils.Config.Dev.StaticRedirect+req.URL.Path, http.StatusMovedPermanently)
})
}
// serve views files.
utils.MemStatic(utils.Config.Dev.ViewsDir)
http.HandleFunc(utils.Config.Dev.ViewsPrefix, func(w http.ResponseWriter, r *http.Request) {
utils.ServeHTTP(w, r) // server soft static files.
})
} else {
statikFS, err := fs.New()
if err != nil {
log.Fatal(err)
}
http.Handle(utils.Config.Prod.StaticPrefix, http.StripPrefix(utils.Config.Prod.StaticPrefix, http.FileServer(statikFS)))
}
// set api prefix
apiPrefix := ""
if utils.Config.Site.RunMode == RunModeDev && utils.Config.Dev.ApiPrefix != "" {
apiPrefix = utils.Config.Dev.ApiPrefix
}
if utils.Config.Site.RunMode == RunModeProd && utils.Config.Prod.ApiPrefix != "" {
apiPrefix = utils.Config.Prod.ApiPrefix
}
if apiPrefix == "" {
log.Println("api serving at endpoint `/`")
} else {
log.Printf("api serving at endpoint `%s`", apiPrefix)
}
bct := utils.Config.SSH.BufferCheckerCycleTime
// api
http.HandleFunc(apiPrefix+"/api/signin", controllers.SignIn)
http.HandleFunc(apiPrefix+"/api/sftp/upload", controllers.AuthPreChecker(files.FileUpload{}))
http.HandleFunc(apiPrefix+"/api/sftp/ls", controllers.AuthPreChecker(files.List{}))
http.HandleFunc(apiPrefix+"/api/sftp/dl", controllers.AuthPreChecker(files.Download{}))
http.HandleFunc(apiPrefix+"/ws/ssh", controllers.AuthPreChecker(controllers.NewSSHWSHandle(bct)))
http.HandleFunc(apiPrefix+"/ws/sftp", controllers.AuthPreChecker(files.SftpEstablish{}))
}
/*
* disable directory index, code from https://groups.google.com/forum/#!topic/golang-nuts/bStLPdIVM6w
*/
type justFilesFilesystem struct {
fs http.FileSystem
}
func (fs justFilesFilesystem) Open(name string) (http.File, error) {
f, err := fs.fs.Open(name)
if err != nil {
return nil, err
}
return neuteredReaddirFile{f}, nil
}
type neuteredReaddirFile struct {
http.File
}
func (f neuteredReaddirFile) Readdir(count int) ([]os.FileInfo, error) {
return nil, nil
}
|
package redis
import (
"encoding/json"
"fmt"
"log"
"time"
"github.com/hailongz/kk-lib/dynamic"
"github.com/hailongz/kk-logic/logic"
"gopkg.in/redis.v5"
)
type Redis struct {
client *redis.Client
prefix string
}
type RedisOpenLogic struct {
logic.Logic
redis *Redis
}
func (L *RedisOpenLogic) Recycle() {
if L.redis != nil {
L.redis.client.Close()
}
}
func (L *RedisOpenLogic) Exec(ctx logic.IContext, app logic.IApp) error {
L.Logic.Exec(ctx, app)
name := dynamic.StringValue(L.Get(ctx, app, "name"), "redis")
if L.redis == nil {
log.Println("[REDIS] [OPEN]")
prefix := dynamic.StringValue(L.Get(ctx, app, "prefix"), "")
addr := dynamic.StringValue(L.Get(ctx, app, "addr"), "127.0.0.1:6379")
pwd := dynamic.StringValue(L.Get(ctx, app, "password"), "")
db := dynamic.IntValue(L.Get(ctx, app, "db"), 0)
v := redis.NewClient(&redis.Options{
Addr: addr,
Password: pwd, // no password set
DB: int(db), // use default DB
})
_, err := v.Ping().Result()
if err != nil {
if L.Has("error") {
ctx.Set(logic.ErrorKeys, logic.GetErrorObject(err))
return L.Done(ctx, app, "error")
}
return err
}
L.redis = &Redis{v, prefix}
}
ctx.SetGlobal(name, L.redis)
return L.Done(ctx, app, "done")
}
type RedisGetLogic struct {
logic.Logic
}
func (L *RedisGetLogic) Exec(ctx logic.IContext, app logic.IApp) error {
L.Logic.Exec(ctx, app)
name := dynamic.StringValue(L.Get(ctx, app, "name"), "redis")
ttype := dynamic.StringValue(L.Get(ctx, app, "type"), "text")
key := dynamic.StringValue(L.Get(ctx, app, "key"), "")
v := ctx.Get([]string{name})
if v == nil {
return L.Error(ctx, app, logic.NewError(logic.ERROR_UNKNOWN, fmt.Sprintf("未找到 Redis [%s]", name)))
}
r, ok := v.(*Redis)
if !ok {
return L.Error(ctx, app, logic.NewError(logic.ERROR_UNKNOWN, fmt.Sprintf("未找到 Redis [%s]", name)))
}
vv, err := r.client.Get(key).Result()
if err != nil {
return L.Error(ctx, app, err)
}
if ttype == "json" {
v = nil
err = json.Unmarshal([]byte(vv), &v)
if err != nil {
return L.Error(ctx, app, err)
}
ctx.Set(logic.ResultKeys, v)
} else {
ctx.Set(logic.ResultKeys, vv)
}
return L.Done(ctx, app, "done")
}
type RedisSetLogic struct {
logic.Logic
}
func (L *RedisSetLogic) Exec(ctx logic.IContext, app logic.IApp) error {
L.Logic.Exec(ctx, app)
name := dynamic.StringValue(L.Get(ctx, app, "name"), "redis")
ttype := dynamic.StringValue(L.Get(ctx, app, "type"), "text")
key := dynamic.StringValue(L.Get(ctx, app, "key"), "")
expires := dynamic.IntValue(L.Get(ctx, app, "expires"), 0)
value := L.Get(ctx, app, "value")
v := ctx.Get([]string{name})
if v == nil {
return L.Done(ctx, app, "done")
}
r, ok := v.(*Redis)
if !ok {
return L.Done(ctx, app, "done")
}
var vv string = ""
if ttype == "json" {
b, _ := json.Marshal(value)
vv = string(b)
} else {
vv = dynamic.StringValue(value, "")
}
_, err := r.client.Set(key, vv, time.Duration(expires)*time.Second).Result()
if err != nil {
return L.Error(ctx, app, err)
}
return L.Done(ctx, app, "done")
}
type RedisDelLogic struct {
logic.Logic
}
func (L *RedisDelLogic) Exec(ctx logic.IContext, app logic.IApp) error {
L.Logic.Exec(ctx, app)
name := dynamic.StringValue(L.Get(ctx, app, "name"), "redis")
key := dynamic.StringValue(L.Get(ctx, app, "key"), "")
v := ctx.Get([]string{name})
if v == nil {
return L.Done(ctx, app, "done")
}
r, ok := v.(*Redis)
if !ok {
return L.Done(ctx, app, "done")
}
_, err := r.client.Del(key).Result()
if err != nil {
return L.Error(ctx, app, err)
}
return L.Done(ctx, app, "done")
}
func init() {
logic.Openlib("kk.Logic.Redis.Open", func(object interface{}) logic.ILogic {
v := RedisOpenLogic{}
v.Init(object)
return &v
})
logic.Openlib("kk.Logic.Redis.Get", func(object interface{}) logic.ILogic {
v := RedisGetLogic{}
v.Init(object)
return &v
})
logic.Openlib("kk.Logic.Redis.Set", func(object interface{}) logic.ILogic {
v := RedisSetLogic{}
v.Init(object)
return &v
})
logic.Openlib("kk.Logic.Redis.Del", func(object interface{}) logic.ILogic {
v := RedisDelLogic{}
v.Init(object)
return &v
})
}
|
package main
import "fmt"
func main() {
//十进制
var i1 = 101
fmt.Printf("%d\n", i1)
fmt.Printf("%b\n", i1) //把十进制转换成二进制
fmt.Printf("%o\n", i1) //把十进制转换成八进制
fmt.Printf("%x\n", i1) // 把十进制转换成十六进制
//八进制
i2 := 077
fmt.Printf("%d\n", i2)
//十六进制
i3 := 0x123
fmt.Printf("%d\n", i3)
//查看变量的类型
fmt.Printf("i3 type is:%T\n", i3)
//声明int8类型的变量
i4 := int8(9)
fmt.Printf("i4 type is:%T\n", i4)
}
|
package channelserver
import (
"fmt"
"strings"
"github.com/Andoryuuta/Erupe/network/binpacket"
"github.com/Andoryuuta/Erupe/network/mhfpacket"
"github.com/Andoryuuta/byteframe"
)
// MSG_SYS_CAST[ED]_BINARY types enum
const (
BinaryMessageTypeState = 0
BinaryMessageTypeChat = 1
BinaryMessageTypeEmote = 6
)
// MSG_SYS_CAST[ED]_BINARY broadcast types enum
const (
BroadcastTypeTargeted = 0x01
BroadcastTypeStage = 0x03
BroadcastTypeWorld = 0x0a
)
func sendServerChatMessage(s *Session, message string) {
// Make the inside of the casted binary
bf := byteframe.NewByteFrame()
bf.SetLE()
msgBinChat := &binpacket.MsgBinChat{
Unk0: 0,
Type: 5,
Flags: 0x80,
Message: message,
SenderName: "Erupe",
}
msgBinChat.Build(bf)
castedBin := &mhfpacket.MsgSysCastedBinary{
CharID: s.charID,
Type1: BinaryMessageTypeChat,
RawDataPayload: bf.Data(),
}
s.QueueSendMHF(castedBin)
}
func handleMsgSysCastBinary(s *Session, p mhfpacket.MHFPacket) {
pkt := p.(*mhfpacket.MsgSysCastBinary)
// Parse out the real casted binary payload
var realPayload []byte
var msgBinTargeted *binpacket.MsgBinTargeted
if pkt.Type0 == BroadcastTypeTargeted {
bf := byteframe.NewByteFrameFromBytes(pkt.RawDataPayload)
msgBinTargeted = &binpacket.MsgBinTargeted{}
err := msgBinTargeted.Parse(bf)
if err != nil {
s.logger.Warn("Failed to parse targeted cast binary")
return
}
realPayload = msgBinTargeted.RawDataPayload
} else {
realPayload = pkt.RawDataPayload
}
// Make the response to forward to the other client(s).
resp := &mhfpacket.MsgSysCastedBinary{
CharID: s.charID,
Type0: pkt.Type0, // (The client never uses Type0 upon receiving)
Type1: pkt.Type1,
RawDataPayload: realPayload,
}
// Send to the proper recipients.
switch pkt.Type0 {
case BroadcastTypeWorld:
s.server.BroadcastMHF(resp, s)
case BroadcastTypeStage:
s.stage.BroadcastMHF(resp, s)
case BroadcastTypeTargeted:
for _, targetID := range (*msgBinTargeted).TargetCharIDs {
char := s.server.FindSessionByCharID(targetID)
if char != nil {
char.QueueSendMHF(resp)
}
}
default:
s.Lock()
haveStage := s.stage != nil
if haveStage {
s.stage.BroadcastMHF(resp, s)
}
s.Unlock()
}
// Handle chat
if pkt.Type1 == BinaryMessageTypeChat {
bf := byteframe.NewByteFrameFromBytes(realPayload)
// IMPORTANT! Casted binary objects are sent _as they are in memory_,
// this means little endian for LE CPUs, might be different for PS3/PS4/PSP/XBOX.
bf.SetLE()
chatMessage := &binpacket.MsgBinChat{}
chatMessage.Parse(bf)
fmt.Printf("Got chat message: %+v\n", chatMessage)
// Discord integration
if s.server.erupeConfig.Discord.Enabled {
message := fmt.Sprintf("%s: %s", chatMessage.SenderName, chatMessage.Message)
s.server.discordSession.ChannelMessageSend(s.server.erupeConfig.Discord.ChannelID, message)
}
if strings.HasPrefix(chatMessage.Message, "!tele ") {
var x, y int16
n, err := fmt.Sscanf(chatMessage.Message, "!tele %d %d", &x, &y)
if err != nil || n != 2 {
sendServerChatMessage(s, "Invalid command. Usage:\"!tele 500 500\"")
} else {
sendServerChatMessage(s, fmt.Sprintf("Teleporting to %d %d", x, y))
// Make the inside of the casted binary
payload := byteframe.NewByteFrame()
payload.SetLE()
payload.WriteUint8(2) // SetState type(position == 2)
payload.WriteInt16(x) // X
payload.WriteInt16(y) // Y
payloadBytes := payload.Data()
s.QueueSendMHF(&mhfpacket.MsgSysCastedBinary{
CharID: s.charID,
Type1: BinaryMessageTypeState,
RawDataPayload: payloadBytes,
})
}
}
}
}
func handleMsgSysCastedBinary(s *Session, p mhfpacket.MHFPacket) {}
|
// Implement the dining philosopher’s problem with the following constraints/modifications.
//
// 1. There should be 5 philosophers sharing chopsticks, with one chopstick between each adjacent pair of philosophers.
// 2. Each philosopher should eat only 3 times (not in an infinite loop as we did in lecture)
// 3. The philosophers pick up the chopsticks in any order, not lowest-numbered first (which we did in lecture).
// 4. In order to eat, a philosopher must get permission from a host which executes in its own goroutine.
// 5. The host allows no more than 2 philosophers to eat concurrently.
// 6. Each philosopher is numbered, 1 through 5.
// 7. When a philosopher starts eating (after it has obtained necessary locks) it prints “starting to eat <number>”
// on a line by itself, where <number> is the number of the philosopher.
// 8. When a philosopher finishes eating (before it has released its locks) it prints “finishing eating <number>”
// on a line by itself, where <number> is the number of the philosopher.
package main
import (
"fmt"
"sync"
)
type Chopstick struct {
}
// Each philosopher is numbered, 1 through 5
type Philosopher struct {
num int
}
var pool = sync.Pool{
New: func() interface{} {
return new(Chopstick)
},
}
var wg sync.WaitGroup
func (p Philosopher) eat(host chan int) {
defer wg.Done()
// get permission from the host
<-host
fmt.Printf("starting to eat %d\n", p.num)
// pick up chopsticks in any order
left := pool.Get()
right := pool.Get()
// then return the chopsticks
pool.Put(left)
pool.Put(right)
fmt.Printf("finishing eating %d\n", p.num)
host <- 1
}
func main() {
// host allows no more than 2 philosophers to eat concurrently
host := make(chan int, 2)
// initialize chopsticks pool
for i := 0; i < 5; i++ {
pool.Put(new(Chopstick))
}
// initialize philosophers
philosophers := make([]*Philosopher, 5)
for i := 0; i < 5; i++ {
// each philosopher is numbered, 1 through 5
philosophers[i] = &Philosopher{i + 1}
}
for i := 0; i < 5; i++ {
for j := 0; j < 3; j++ { // only eat 3 times
wg.Add(1)
go philosophers[i].eat(host)
}
}
host <- 1
host <- 1
wg.Wait()
}
|
package main
import (
"github.com/beego/beego/v2/client/orm/migration"
)
// DO NOT MODIFY
type User_20210628_221252 struct {
migration.Migration
}
// DO NOT MODIFY
func init() {
m := &User_20210628_221252{}
m.Created = "20210628_221252"
migration.Register("User_20210628_221252", m)
}
// Run the migrations
func (m *User_20210628_221252) Up() {
// use m.SQL("CREATE TABLE ...") to make schema update
m.SQL("ALTER TABLE nomadiclife.`user` MODIFY COLUMN username varchar(100) CHARACTER SET utf8mb4 COLLATE utf8mb4_0900_ai_ci NULL;")
}
// Reverse the migrations
func (m *User_20210628_221252) Down() {
// use m.SQL("DROP TABLE ...") to reverse schema update
}
|
package spatial
import (
"bytes"
"encoding/binary"
)
type MultiPolygonData []PolygonData
func (mpd *MultiPolygonData) decodeFrom(data *bytes.Reader) error {
var length uint32
if err := binary.Read(data, byteOrder, &length); nil != err {
return err
}
*mpd = make([]PolygonData, length)
var p Polygon
for i := uint32(0); i < length; i++ {
if err := p.decodeFrom(data, false); nil != err {
return err
}
(*mpd)[i] = p.Data
}
return nil
}
func (mpd *MultiPolygonData) encodeTo(data *bytes.Buffer) {
length := uint32(len(*mpd))
binary.Write(data, byteOrder, length)
for i := uint32(0); i < length; i++ {
(*baseGeometry).encodeHeaderTo(nil, data, false, GEOMETRY_TYPE_POLYGON)
(*mpd)[i].encodeTo(data)
}
}
type MultiPolygon struct {
baseGeometry
Data MultiPolygonData
}
func NewMultiPolygon(srid Srid) *MultiPolygon {
return &MultiPolygon{baseGeometry: baseGeometry{srid: srid}}
}
func (mp *MultiPolygon) Decode(data []byte) error {
return mp.decodeFrom(bytes.NewReader(data), true)
}
func (mp *MultiPolygon) Encode() []byte {
data := newEncodeBuffer()
mp.encodeTo(data, true)
return data.Bytes()
}
func (mp *MultiPolygon) decodeFrom(data *bytes.Reader, decodeSrid bool) error {
if _, err := mp.decodeHeaderFrom(data, decodeSrid, GEOMETRY_TYPE_MULTI_POLYGON); nil != err {
return err
}
return mp.Data.decodeFrom(data)
}
func (mp *MultiPolygon) encodeTo(data *bytes.Buffer, encodeSrid bool) {
mp.encodeHeaderTo(data, encodeSrid, GEOMETRY_TYPE_MULTI_POLYGON)
mp.Data.encodeTo(data)
}
|
package handler
import (
"net/http"
"github.com/chonla/oddsvr-api/jwt"
jwtgo "github.com/dgrijalva/jwt-go"
"github.com/labstack/echo"
)
func (h *Handler) Me(c echo.Context) error {
user := c.Get("user").(*jwtgo.Token)
claims := user.Claims.(*jwt.Claims)
uid := claims.ID
me, e := h.vr.Profile(uid)
if e != nil {
return c.JSON(http.StatusInternalServerError, e)
}
stats, e := h.vr.AthleteSummary(me.ID)
if e != nil {
return c.JSON(http.StatusInternalServerError, e)
}
me.Stats = stats
return c.JSON(http.StatusOK, me)
}
|
package ordersprices
import (
"database/sql"
"fmt"
. "go-sugar/config"
. "go-sugar/db"
)
// OrderPrice main struct
type OrderPrice struct {
OrderID int
UserID NullInt64
PriceID NullInt64
}
// Repo entity
var Repo = Repository{tableName: Config.DB.Schema + ".orders_prices"}
func parseRows(rows *sql.Rows) []OrderPrice {
var users []OrderPrice
for rows.Next() {
p, err := parseRow(rows)
if err != nil {
fmt.Println("Parse Error")
fmt.Println(err)
continue
}
users = append(users, p)
}
return users
}
func parseRow(row *sql.Rows) (OrderPrice, error) {
p := OrderPrice{}
err := row.Scan(&p.OrderID, &p.UserID, &p.PriceID)
return p, err
}
|
package main
import (
"io"
"os"
"time"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
var sugarLogger *zap.SugaredLogger
var file os.File
func init() {
encoder := getEncoder()
defaultWriter := getLogWriter("E:\\Projects\\project-github\\little-go\\practices\\log\\logs")
core := zapcore.NewCore(
encoder, defaultWriter, zap.LevelEnablerFunc(
func(l zapcore.Level) bool {
return l >= zapcore.DebugLevel
}),
)
logger := zap.New(core, zap.AddCaller())
sugarLogger = logger.Sugar()
}
func main() {
defer sugarLogger.Sync()
defer file.Close()
url := "https://azusachino.cn"
write(&url)
}
func write(url *string) {
done := make(chan bool)
go func() {
time.Sleep(time.Minute)
done <- true
}()
for {
select {
case <-done:
return
default:
sugarLogger.Infof("failed to fetch URL %s", *url)
}
}
}
func getEncoder() zapcore.Encoder {
cfg := zap.NewProductionEncoderConfig()
cfg.EncodeTime = zapcore.ISO8601TimeEncoder
cfg.EncodeLevel = zapcore.CapitalLevelEncoder
return zapcore.NewConsoleEncoder(cfg)
}
func getLogWriter(filePath string) zapcore.WriteSyncer {
writer := getWriter(filePath)
return zapcore.AddSync(writer)
}
func getWriter(filePath string) io.Writer {
today := time.Now().Format("2006-01-02")
file, err := os.Create(filePath + "\\" + today + ".log")
if err != nil {
panic(err)
}
return file
}
func sample() {
logger, _ := zap.NewProduction()
defer logger.Sync()
url := "https://azusachino.cn"
sugar := logger.Sugar()
sugar.Infow("failed to fetch URL",
"url", "http://azusachino.cn",
"attempt", 3)
sugar.Infof("Failed again: %s", "abba")
logger.Info("failed to fetch URL",
// Structured context as strongly typed Field values.
zap.String("url", url),
zap.Int("attempt", 3),
zap.Duration("backoff", time.Second),
)
}
|
/*
Package clouddatastore provides Cloud Datastore implementation of datastore.Client.
This package wrapping cloud.google.com/go/datastore package.
*/
package clouddatastore // import "go.mercari.io/datastore/clouddatastore"
|
package iteration
import (
"fmt"
"testing"
)
func TestRepeat(t *testing.T) {
repeated := Repeat("a", 3)
expected := "aaa"
if repeated != expected {
t.Errorf("expected %q but got %q", expected, repeated)
}
}
func BenchmarkRepeat(b *testing.B) {
for i := 0; i < b.N; i++ {
Repeat("a", 3)
}
}
func ExampleRepeat() {
repeated := Repeat("a", 6)
fmt.Println(repeated)
// output "aaaaaa"
}
func TestRepeatString(t *testing.T) {
assertCorrectMessage := func(t *testing.T, got, want string) {
t.Helper()
if got != want {
t.Errorf("got %q want %q", got, want)
}
}
t.Run("RepeatString", func(t *testing.T) {
got := RepeatString("a", 5)
want := "aaaaa"
assertCorrectMessage(t, got, want)
})
t.Run("RepeatString", func(t *testing.T) {
got := RepeatString1("a", 5)
want := "aaaaa"
assertCorrectMessage(t, got, want)
})
t.Run("RepeatString", func(t *testing.T) {
got := RepeatString2("a", 5)
want := "aaaaa"
assertCorrectMessage(t, got, want)
})
t.Run("RepeatString", func(t *testing.T) {
got := RepeatString3("a", 5)
want := "aaaaa"
assertCorrectMessage(t, got, want)
})
t.Run("RepeatString", func(t *testing.T) {
got := RepeatString4("a", 5)
want := "aaaaa"
assertCorrectMessage(t, got, want)
})
}
|
package config
import (
"fmt"
"github.com/spf13/viper"
)
type Configurations struct {
Multi bool
Broker BrokerConfig
Actor ActorConfig
}
type BrokerConfig struct {
Hostname string
Port int
}
type ActorConfig struct {
Hostname string
Port int
Services []string `yaml:"services"`
}
func LoadConfig(configPath ...string) (configuration Configurations, err error) {
configName := "config"
if len(configPath) > 1 {
configName = configPath[1]
}
viper.SetConfigName(configName)
viper.SetConfigType("yml")
viper.AddConfigPath(configPath[0])
err = viper.ReadInConfig()
if err != nil {
return
}
err = viper.Unmarshal(&configuration)
if err != nil {
fmt.Printf("Unable to decode into struct, %v", err)
}
return
}
|
package order
import (
"context"
"errors"
)
var (
ErrOrderNotFound = errors.New("order not found")
ErrCmdRepository = errors.New("unable to command repository")
ErrQueryRepository = errors.New("unable to query repository")
)
// Service describes the Order service.
type Service interface {
Create(ctx context.Context, order Order) (string, error)
GetByID(ctx context.Context, id string) (Order, error)
ChangeStatus(ctx context.Context, id string, status string) error
}
|
package streak
import (
"errors"
"net/http"
"regexp"
"strconv"
"time"
"github.com/PuerkitoBio/goquery"
)
// Streak with count and the dates of the streak
type Streak struct {
From time.Time `json:"from"`
To time.Time `json:"to"`
Count int `json:"count"`
}
// Client with http Client for dependency injection
type Client struct {
Client *http.Client
BaseURL string
}
func getStreakFromCalendar(doc *goquery.Document) (Streak, Streak, error) {
curStreak := Streak{Count: 0}
longestStreak := Streak{Count: 0}
isStreak := false
allDays := doc.Find(".day")
allDays.Each(func(i int, days *goquery.Selection) {
count, existsCount := days.Attr("data-count")
date, existsDate := days.Attr("data-date")
if !existsCount || !existsDate {
return
}
parsedDate, err := time.Parse("2006-1-2", date)
if count != "0" && !isStreak {
// New streak
curStreak.Count++
if err == nil {
curStreak.From = parsedDate
}
isStreak = true
} else if count != "0" && isStreak {
// Still has a streak
curStreak.Count++
} else if count == "0" && isStreak && allDays.Length()-1 != i {
// Lost streak
if err == nil {
curStreak.To = parsedDate.AddDate(0, 0, -1)
}
if longestStreak.Count < curStreak.Count {
longestStreak = curStreak
}
curStreak = Streak{Count: 0}
isStreak = false
}
})
// Check last day
last := allDays.Last()
count, existsCount := last.Attr("data-count")
lastDate, existsDate := last.Attr("data-date")
if existsCount && existsDate {
parsedLastDate, err := time.Parse("2006-1-2", lastDate)
if isStreak && err == nil && count != "0" {
// If the last day has a streak
curStreak.To = parsedLastDate
} else if isStreak && err == nil && count == "0" {
// If the user hasn't commited on the last day, set current streak to the day before,
// since it doesn't mean that the user lost that streak if the user didn't commit yet on the current day
curStreak.To = parsedLastDate.AddDate(0, 0, -1)
}
}
// If the current streak is the longest streak, then return that for longest
if curStreak.Count > longestStreak.Count {
return curStreak, curStreak, nil
}
return curStreak, longestStreak, nil
}
func getCalendarFromGitHub(client Client, username string, date time.Time) (*http.Response, error) {
resp, err := client.Client.Get(client.BaseURL + "/users/" + username + "/contributions?to=" + date.Format("2006-1-2"))
if resp.StatusCode != 200 {
return resp, errors.New("Cannot get calendar")
}
return resp, err
}
func getContributions(doc *goquery.Document) int {
reg, _ := regexp.Compile(`([\d]*) contributions`)
matchArr := reg.FindStringSubmatch(doc.Find(".f4").Text())
if len(matchArr) < 1 {
return 0
}
numOfContributionsStr := matchArr[1]
// Reg exp match should only have numbers at this point
numOfContributions, _ := strconv.Atoi(numOfContributionsStr)
return numOfContributions
}
// FindStreakInPastYear returns the Current streak as the first return and the Longest streak in the second return as well as a potential error.
func FindStreakInPastYear(client Client, username string) (Streak, Streak, error) {
now := time.Now()
resp, err := getCalendarFromGitHub(client, username, now)
if err != nil {
return Streak{}, Streak{}, errors.New("Cannot get calendar")
}
defer resp.Body.Close()
// Load the HTML document
doc, err := goquery.NewDocumentFromReader(resp.Body)
if err != nil {
return Streak{}, Streak{}, errors.New("Could not load calendar")
}
numOfContributions := getContributions(doc)
if numOfContributions != 0 {
return getStreakFromCalendar(doc)
}
return Streak{}, Streak{}, errors.New("No contributions")
}
|
package cmds
import (
"testing"
)
func TestReqLog(t *testing.T) {
l := &ReqLog{}
req1, err := NewEmptyRequest()
if err != nil {
t.Fatal(err)
}
req2, err := NewEmptyRequest()
if err != nil {
t.Fatal(err)
}
req1.Values()["foo"] = 42
req2.Values()["bar"] = 23
rle1 := l.Add(req1)
rle2 := l.Add(req2)
l.ClearInactive()
if len(l.Report()) != 2 {
t.Fatal("cleaned up too much")
}
rle1.Active = false
l.ClearInactive()
l.Finish(rle2)
if len(l.Report()) != 1 {
t.Fatal("cleaned up too much")
}
l.ClearInactive()
if len(l.Report()) != 0 {
t.Fatal("cleaned up too little")
}
}
|
//Generated TestNew
//Generated TestScanner_Next
package lex
import (
//"bufio"
"io"
//"os"
"reflect"
"testing"
)
func TestNew(t *testing.T) {
//file, _ := os.Open("../red/dwarf.red")
//reader := bufio.NewReader(file)
type args struct {
name string
r io.ByteReader
}
tests := []struct {
name string
args args
want *Scanner
}{
/*
{
name: "Test New()",
args: args{
name: "My Scanner",
r: reader,
},
want: &Scanner{
r: reader,
name: "My Scanner",
line: 1,
tokens: make(chan Token, 2),
state: lexAny,
},
},
*/
}
for _, tt := range tests {
if got := New(tt.args.name, tt.args.r); !reflect.DeepEqual(got, tt.want) {
t.Errorf("%q. New() = %v, want %v", tt.name, got, tt.want)
}
}
}
func TestScanner_Next(t *testing.T) {
type fields struct {
tokens chan Token
r io.ByteReader
done bool
name string
buf []byte
input string
leftDelim string
rightDelim string
state stateFn
line int
pos int
start int
width int
}
tests := []struct {
name string
fields fields
want Token
}{
// TODO: Add test cases.
}
for _, tt := range tests {
l := &Scanner{
tokens: tt.fields.tokens,
r: tt.fields.r,
done: tt.fields.done,
name: tt.fields.name,
buf: tt.fields.buf,
input: tt.fields.input,
leftDelim: tt.fields.leftDelim,
rightDelim: tt.fields.rightDelim,
state: tt.fields.state,
line: tt.fields.line,
pos: tt.fields.pos,
start: tt.fields.start,
width: tt.fields.width,
}
if got := l.Next(); !reflect.DeepEqual(got, tt.want) {
t.Errorf("%q. Scanner.Next() = %v, want %v", tt.name, got, tt.want)
}
}
}
|
package gen
import (
"io/ioutil"
"os"
"path/filepath"
"regexp"
"testing"
)
func TestMerge(t *testing.T) {
debug := true
type tcase struct {
name string
infiles map[string]string // file structure to start with
out map[string][]string // regexps to match in output files
outNot map[string][]string // regexps to NOT match in output files
}
tcList := []tcase{
{
name: "simple",
infiles: map[string]string{
"file1.go": "package main\nfunc main(){}",
"file2.go": "package main\nvar a string",
},
out: map[string][]string{
"out.go": {`func main`, `var a string`},
},
},
{
name: "comments",
infiles: map[string]string{
"file1.go": "package main\n// main comment here\nfunc main(){}",
"file2.go": "package main\nvar a string // a comment here\n",
},
out: map[string][]string{
"out.go": {`func main`, `// main comment here`, `var a string`, `// a comment here`},
},
},
{
name: "import-dedup",
infiles: map[string]string{
"file1.go": "package main\nimport \"fmt\"\n// main comment here\nfunc main(){}",
"file2.go": "package main\nimport \"fmt\"\nvar a string // a comment here\n",
},
out: map[string][]string{
"out.go": {`import "fmt"`},
},
outNot: map[string][]string{
"out.go": {`(?ms)import "fmt".*import "fmt"`},
},
},
{
name: "import-dedup-2",
infiles: map[string]string{
"file1.go": "package main\nimport \"fmt\"\n// main comment here\nfunc main(){}",
"file2.go": "package main\nimport \"fmt\"\nimport \"log\"\nvar a string // a comment here\n",
},
out: map[string][]string{
"out.go": {`import "fmt"`, `import "log"`},
},
outNot: map[string][]string{
"out.go": {`(?ms)\}.*import "log"`},
},
},
}
for _, tc := range tcList {
tc := tc
t.Run(tc.name, func(t *testing.T) {
tmpDir, err := ioutil.TempDir("", "TestMerge")
if err != nil {
t.Fatal(err)
}
if debug {
t.Logf("Test %q using tmpDir: %s", tc.name, tmpDir)
} else {
defer os.RemoveAll(tmpDir)
t.Parallel()
}
tstWriteFiles(tmpDir, tc.infiles)
var in []string
for k := range tc.infiles {
// in = append(in, filepath.Join(tmpDir, k))
in = append(in, k)
}
err = mergeGoFiles(tmpDir, "out.go", in...)
if err != nil {
t.Fatal(err)
}
for fname, patterns := range tc.out {
b, err := ioutil.ReadFile(filepath.Join(tmpDir, fname))
if err != nil {
t.Errorf("failed to read file %q after Run: %v", fname, err)
continue
}
for _, pattern := range patterns {
re := regexp.MustCompile(pattern)
if !re.Match(b) {
t.Errorf("failed to match regexp on file %q: %s", fname, pattern)
}
}
}
for fname, patterns := range tc.outNot {
b, err := ioutil.ReadFile(filepath.Join(tmpDir, fname))
if err != nil {
t.Errorf("failed to read file %q after Run: %v", fname, err)
continue
}
for _, pattern := range patterns {
re := regexp.MustCompile(pattern)
if re.Match(b) {
t.Errorf("incorrectly matched regexp on file %q: %s", fname, pattern)
}
}
}
if debug {
outb, _ := ioutil.ReadFile(filepath.Join(tmpDir, "out.go"))
t.Logf("OUTPUT:\n%s", outb)
}
})
}
}
|
package dao
import "git.dustess.com/mk-training/mk-blog-svc/pkg/tags/model"
// FindOne 查询一条数据
func (m *TagDao) FindOne(filter interface{}) (result model.Tag, err error) {
err = m.dao.FindOne(m.ctx, filter).Decode(&result)
return
}
// FindMany 批量查询
func (m *TagDao) FindMany(filter interface{})(result []model.Tag, err error) {
cursor, e := m.dao.Find(m.ctx, filter)
if e != nil {
return nil, err
}
if err := cursor.All(m.ctx, &result); err != nil {
return nil, err
}
return result, nil
}
|
package executors
import (
"context"
"io"
"github.com/pkg/errors"
"github.com/projecteru2/phistage/common"
"github.com/sirupsen/logrus"
)
type PhistageExecutor interface {
Execute(ctx context.Context) error
}
// JobExecutor is an executor to execute the job.
// It is designed stateful, so job is not a parameter to any methods.
// Each job is executed by an independent JobExecutor.
type JobExecutor interface {
// Prepare does the preparation phase.
// Usually creates container / virtual machine runtime,
// setups the environment for job to run.
Prepare(ctx context.Context) error
// Execute does the execution phase.
// Usually actually executes all the steps in the job.
Execute(ctx context.Context) error
// Cleanup does the clean up phase.
// Usually it does cleaning work, collects necessary artifacts,
// and remove the container / virtual machine runtime.
Cleanup(ctx context.Context) error
}
// ExecutorProvider is basically the factory of JobExecutor
// It can be registered with its name, which identifies the type.
type ExecutorProvider interface {
// GetName returns the name of this ExecutorProvider.
GetName() string
// GetJobExecutor returns a JobExecutor with the given job and phistage,
// all job executors in use should be generated from this method.
GetJobExecutor(job *common.Job, phistage *common.Phistage, output io.Writer) (JobExecutor, error)
}
var executorProviders = make(map[string]ExecutorProvider)
// ErrorExecuteProviderNotFound is returned when fail to find executor provider
var ErrorExecuteProviderNotFound = errors.New("ExecutorProvider not found")
// RegisterExecutorProvider registers the executor provider with its name.
// Executor Providers with the same name can be registered for multiple times,
// latter registration will override former ones.
func RegisterExecutorProvider(ep ExecutorProvider) {
executorProviders[ep.GetName()] = ep
logrus.WithField("executor", ep.GetName()).Info("ExecutorProvider registered")
}
// GetExecutorProvider gets executor provider by the given name.
func GetExecutorProvider(name string) ExecutorProvider {
return executorProviders[name]
}
|
package main
import "flag"
import "fmt"
import "os"
func main() {
wordPtr := flag.String("word", "foo", "a string")
numPtr := flag.Int("numb", 11, "an int")
var boo bool
flag.BoolVar(&boo, "fork", false, "a bool")
flag.Parse()
fmt.Println("word : ", *wordPtr)
fmt.Println("num : ", *numPtr)
fmt.Println("bool : ", boo)
fmt.Println("bool var : ", boo)
fmt.Println("tail : ", flag.Args())
fmt.Println("os args : ", os.Args)
}
|
package main
import (
"crypto/sha1"
"fmt"
)
func main() {
s := "my name is ankita"
h := sha1.New()
h.Write([]byte(s))
bs := h.Sum(nil)
fmt.Print(s)
fmt.Print("%x", bs)
}
|
package chme
import (
"net/http"
)
// DefaultInputName is used as the key of the value to which request method is changed.
const DefaultInputName = "_method"
var defaultChme = NewChme(DefaultInputName)
var changeableMethods = map[string]bool{
http.MethodPut: true,
http.MethodPatch: true,
http.MethodDelete: true,
}
// Chme provides methods which change request method to others.
type Chme interface {
ChangePostToHiddenMethod(next http.Handler) http.Handler
}
type chme struct {
inputName string
}
// NewChme returns new instance which implements Che interface.
func NewChme(name string) Chme {
return &chme{
inputName: name,
}
}
// ChangePostToHiddenMethod changes POST to the method set in FormValue named "_method".
func ChangePostToHiddenMethod(next http.Handler) http.Handler {
return defaultChme.ChangePostToHiddenMethod(next)
}
// ChangePostToHiddenMethod changes POST to the method set in FormValue named when NewChme.
func (c chme) ChangePostToHiddenMethod(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
next.ServeHTTP(w, r)
return
}
method := r.FormValue(c.inputName)
if ok := changeableMethods[method]; ok {
r.Method = method
}
next.ServeHTTP(w, r)
})
}
|
package eventlog
import (
"github.com/iotaledger/wasp/packages/coretypes"
"github.com/iotaledger/wasp/packages/kv"
"github.com/iotaledger/wasp/packages/kv/kvdecoder"
"github.com/iotaledger/wasp/packages/kv/codec"
"github.com/iotaledger/wasp/packages/kv/collections"
"github.com/iotaledger/wasp/packages/kv/dict"
)
// initialize is mandatory
func initialize(ctx coretypes.Sandbox) (dict.Dict, error) {
ctx.Log().Debugf("eventlog.initialize.success hname = %s", Interface.Hname().String())
return nil, nil
}
// getNumRecords gets the number of eventlog records for contarct
// Parameters:
// - ParamContractHname Hname of the contract to view the logs
func getNumRecords(ctx coretypes.SandboxView) (dict.Dict, error) {
params := kvdecoder.New(ctx.Params())
contractHname, err := params.GetHname(ParamContractHname)
if err != nil {
return nil, err
}
ret := dict.New()
thelog := collections.NewTimestampedLogReadOnly(ctx.State(), kv.Key(contractHname.Bytes()))
ret.Set(ParamNumRecords, codec.EncodeInt64(int64(thelog.MustLen())))
return ret, nil
}
// getRecords returns records between timestamp interval for the hname
// In time descending order
// Parameters:
// - ParamContractHname Filter param, Hname of the contract to view the logs
// - ParamFromTs From interval. Defaults to 0
// - ParamToTs To Interval. Defaults to now (if both are missing means all)
// - ParamMaxLastRecords Max amount of records that you want to return. Defaults to 50
func getRecords(ctx coretypes.SandboxView) (dict.Dict, error) {
params := kvdecoder.New(ctx.Params())
contractHname, err := params.GetHname(ParamContractHname)
if err != nil {
return nil, err
}
maxLast, err := params.GetInt64(ParamMaxLastRecords, DefaultMaxNumberOfRecords)
if err != nil {
return nil, err
}
fromTs, err := params.GetInt64(ParamFromTs, 0)
if err != nil {
return nil, err
}
toTs, err := params.GetInt64(ParamToTs, ctx.GetTimestamp())
if err != nil {
return nil, err
}
theLog := collections.NewTimestampedLogReadOnly(ctx.State(), kv.Key(contractHname.Bytes()))
tts := theLog.MustTakeTimeSlice(fromTs, toTs)
if tts.IsEmpty() {
// empty time slice
return nil, nil
}
ret := dict.New()
first, last := tts.FromToIndicesCapped(uint32(maxLast))
data := theLog.MustLoadRecordsRaw(first, last, true) // descending
a := collections.NewArray(ret, ParamRecords)
for _, s := range data {
a.MustPush(s)
}
return ret, nil
}
|
package signal_test
import (
"os"
"syscall"
"github.com/flexi-cache/pkg/signal"
)
func ExampleHandlers() {
signals := signal.NewHandlers()
stop := signals.StartListen()
defer stop()
var cleanup = func(os.Signal) error {
// This will be called when termination signals received. e.g. SIGINT
return nil
}
signals.RegisterTerminationProcedure(cleanup, "cleaning something up")
var doSomething = func(os.Signal) {
// This will be called when SIGUSR1 or SIGUSR2 received.
}
signals.RegisterSignalHandler(doSomething, syscall.SIGUSR1, syscall.SIGUSR2)
//Output:
}
|
package commonMiddleware
import (
"net/http"
"time"
"github.com/MiteshSharma/gateway/common"
"github.com/felixge/httpsnoop"
"go.uber.org/zap"
)
// LoggerMiddleware struct
type LoggerMiddleware struct {
}
// NewLoggerMiddleware function returns instance of logger middleware
func NewLoggerMiddleware() *LoggerMiddleware {
loggerMiddleware := &LoggerMiddleware{}
loggerMiddleware.Init()
return loggerMiddleware
}
// Init function to init anything required for middleware
func (lm *LoggerMiddleware) Init() {
}
// GetMiddlewareHandler function returns middleware used to log requests
func (lm *LoggerMiddleware) GetMiddlewareHandler() func(http.ResponseWriter, *http.Request, http.HandlerFunc) {
return func(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) {
metrix := httpsnoop.CaptureMetrics(next, rw, r)
common.ServerObj.Logger.Info("Request handling completed from logger middleware ", zap.String("Host", r.Host),
zap.String("Method", r.Method), zap.String("Request", r.RequestURI), zap.String("RemoteAddress", r.RemoteAddr),
zap.String("Referer", r.Referer()), zap.String("UserAgent", r.UserAgent()), zap.Int("StatusCode", metrix.Code),
zap.Int("Duration", int(metrix.Duration/time.Millisecond)))
}
}
|
package htlc
import (
sdk "github.com/irisnet/irishub/types"
)
// NewHandler handles all "htlc" messages
func NewHandler(k Keeper) sdk.Handler {
return func(ctx sdk.Context, msg sdk.Msg) sdk.Result {
switch msg := msg.(type) {
case MsgCreateHTLC:
return handleMsgCreateHTLC(ctx, k, msg)
case MsgClaimHTLC:
return handleMsgClaimHTLC(ctx, k, msg)
case MsgRefundHTLC:
return handleMsgRefundHTLC(ctx, k, msg)
default:
return sdk.ErrTxDecode("invalid message parsed in HTLC module").Result()
}
}
}
// handleMsgCreateHTLC handles MsgCreateHTLC
func handleMsgCreateHTLC(ctx sdk.Context, k Keeper, msg MsgCreateHTLC) sdk.Result {
secret := make([]byte, 0)
expireHeight := msg.TimeLock + uint64(ctx.BlockHeight())
state := OPEN
htlc := NewHTLC(
msg.Sender,
msg.To,
msg.ReceiverOnOtherChain,
msg.Amount,
secret,
msg.Timestamp,
expireHeight,
state,
)
tags, err := k.CreateHTLC(ctx, htlc, msg.HashLock)
if err != nil {
return err.Result()
}
return sdk.Result{
Tags: tags,
}
}
// handleMsgClaimHTLC handles MsgClaimHTLC
func handleMsgClaimHTLC(ctx sdk.Context, k Keeper, msg MsgClaimHTLC) sdk.Result {
tags, err := k.ClaimHTLC(ctx, msg.HashLock, msg.Secret)
if err != nil {
return err.Result()
}
return sdk.Result{
Tags: tags,
}
}
// handleMsgRefundHTLC handles MsgRefundHTLC
func handleMsgRefundHTLC(ctx sdk.Context, k Keeper, msg MsgRefundHTLC) sdk.Result {
tags, err := k.RefundHTLC(ctx, msg.HashLock)
if err != nil {
return err.Result()
}
return sdk.Result{
Tags: tags,
}
}
|
/*
* List intra-datacenter firewall policies associated with a given account.
* Optionally filter results to policies associated with a second "destination" account.
*/
package main
import (
"flag"
"fmt"
"os"
"path"
"strings"
"github.com/grrtrr/clcv2/clcv2cli"
"github.com/grrtrr/exit"
"github.com/kr/pretty"
"github.com/olekukonko/tablewriter"
)
func main() {
var simple = flag.Bool("simple", false, "Use simple (debugging) output format")
var dst = flag.String("dst", "", "Destination account to filter policies by")
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "usage: %s [options] <Location>\n", path.Base(os.Args[0]))
flag.PrintDefaults()
}
flag.Parse()
if flag.NArg() != 1 {
flag.Usage()
os.Exit(1)
}
client, err := clcv2cli.NewCLIClient()
if err != nil {
exit.Fatal(err.Error())
}
fwpl, err := client.GetIntraDataCenterFirewallPolicyList(flag.Arg(0), *dst)
if err != nil {
exit.Fatalf("failed to list intra-datacenter firewall policies at %s: %s", flag.Arg(0), err)
}
if len(fwpl) == 0 {
fmt.Printf("Empty result - nothing listed at %s.\n", flag.Arg(0))
} else if *simple {
pretty.Println(fwpl)
} else {
fmt.Printf("Intra-Datacenter Firewall Policies for %s at %s:\n", client.AccountAlias, strings.ToUpper(flag.Arg(0)))
table := tablewriter.NewWriter(os.Stdout)
table.SetAutoFormatHeaders(false)
table.SetAlignment(tablewriter.ALIGN_CENTER)
table.SetAutoWrapText(true)
table.SetHeader([]string{"Source", "Destination", "Ports",
"Dst Account", "Enabled", "State", "Id",
})
for _, p := range fwpl {
table.Append([]string{
strings.Join(p.Source, ", "),
strings.Join(p.Destination, ", "),
strings.Join(p.Ports, ", "),
strings.ToUpper(p.DestinationAccount),
fmt.Sprint(p.Enabled), p.Status, p.ID,
})
}
table.Render()
}
}
|
/*
* VMaaS Webapp
*
* No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
*
* API version: 1.3.2
* Generated by: OpenAPI Generator (https://openapi-generator.tech)
*/
package vmaas
import (
_context "context"
_ioutil "io/ioutil"
_nethttp "net/http"
_neturl "net/url"
"strings"
"github.com/antihax/optional"
)
// Linger please
var (
_ _context.Context
)
// CvesApiService CvesApi service
type CvesApiService service
/*
AppCVEHandlerGetGet Method for AppCVEHandlerGetGet
Get details about CVEs. It is possible to use POSIX regular expression as a pattern for CVE names.
* @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background().
* @param cve CVE name or POSIX regular expression pattern
@return CvesResponse
*/
func (a *CvesApiService) AppCVEHandlerGetGet(ctx _context.Context, cve string) (CvesResponse, *_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodGet
localVarPostBody interface{}
localVarFormFileName string
localVarFileName string
localVarFileBytes []byte
localVarReturnValue CvesResponse
)
// create path and map variables
localVarPath := a.client.cfg.BasePath + "/v1/cves/{cve}"
localVarPath = strings.Replace(localVarPath, "{"+"cve"+"}", _neturl.QueryEscape(parameterToString(cve, "")) , -1)
localVarHeaderParams := make(map[string]string)
localVarQueryParams := _neturl.Values{}
localVarFormParams := _neturl.Values{}
// to determine the Content-Type header
localVarHTTPContentTypes := []string{}
// set Content-Type header
localVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)
if localVarHTTPContentType != "" {
localVarHeaderParams["Content-Type"] = localVarHTTPContentType
}
// to determine the Accept header
localVarHTTPHeaderAccepts := []string{"application/json"}
// set Accept header
localVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)
if localVarHTTPHeaderAccept != "" {
localVarHeaderParams["Accept"] = localVarHTTPHeaderAccept
}
r, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)
if err != nil {
return localVarReturnValue, nil, err
}
localVarHTTPResponse, err := a.client.callAPI(r)
if err != nil || localVarHTTPResponse == nil {
return localVarReturnValue, localVarHTTPResponse, err
}
localVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)
localVarHTTPResponse.Body.Close()
if err != nil {
return localVarReturnValue, localVarHTTPResponse, err
}
if localVarHTTPResponse.StatusCode >= 300 {
newErr := GenericOpenAPIError{
body: localVarBody,
error: localVarHTTPResponse.Status,
}
if localVarHTTPResponse.StatusCode == 200 {
var v CvesResponse
err = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get("Content-Type"))
if err != nil {
newErr.error = err.Error()
return localVarReturnValue, localVarHTTPResponse, newErr
}
newErr.model = v
}
return localVarReturnValue, localVarHTTPResponse, newErr
}
err = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get("Content-Type"))
if err != nil {
newErr := GenericOpenAPIError{
body: localVarBody,
error: err.Error(),
}
return localVarReturnValue, localVarHTTPResponse, newErr
}
return localVarReturnValue, localVarHTTPResponse, nil
}
// AppCVEHandlerPostPostOpts Optional parameters for the method 'AppCVEHandlerPostPost'
type AppCVEHandlerPostPostOpts struct {
CvesRequest optional.Interface
}
/*
AppCVEHandlerPostPost Method for AppCVEHandlerPostPost
Get details about CVEs with additional parameters. As a \"cve_list\" parameter a complete list of CVE names can be provided OR one POSIX regular expression.
* @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background().
* @param optional nil or *AppCVEHandlerPostPostOpts - Optional Parameters:
* @param "CvesRequest" (optional.Interface of CvesRequest) -
@return CvesResponse
*/
func (a *CvesApiService) AppCVEHandlerPostPost(ctx _context.Context, localVarOptionals *AppCVEHandlerPostPostOpts) (CvesResponse, *_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodPost
localVarPostBody interface{}
localVarFormFileName string
localVarFileName string
localVarFileBytes []byte
localVarReturnValue CvesResponse
)
// create path and map variables
localVarPath := a.client.cfg.BasePath + "/v1/cves"
localVarHeaderParams := make(map[string]string)
localVarQueryParams := _neturl.Values{}
localVarFormParams := _neturl.Values{}
// to determine the Content-Type header
localVarHTTPContentTypes := []string{"application/json"}
// set Content-Type header
localVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)
if localVarHTTPContentType != "" {
localVarHeaderParams["Content-Type"] = localVarHTTPContentType
}
// to determine the Accept header
localVarHTTPHeaderAccepts := []string{"application/json"}
// set Accept header
localVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)
if localVarHTTPHeaderAccept != "" {
localVarHeaderParams["Accept"] = localVarHTTPHeaderAccept
}
// body params
if localVarOptionals != nil && localVarOptionals.CvesRequest.IsSet() {
localVarOptionalCvesRequest, localVarOptionalCvesRequestok := localVarOptionals.CvesRequest.Value().(CvesRequest)
if !localVarOptionalCvesRequestok {
return localVarReturnValue, nil, reportError("cvesRequest should be CvesRequest")
}
localVarPostBody = &localVarOptionalCvesRequest
}
r, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)
if err != nil {
return localVarReturnValue, nil, err
}
localVarHTTPResponse, err := a.client.callAPI(r)
if err != nil || localVarHTTPResponse == nil {
return localVarReturnValue, localVarHTTPResponse, err
}
localVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)
localVarHTTPResponse.Body.Close()
if err != nil {
return localVarReturnValue, localVarHTTPResponse, err
}
if localVarHTTPResponse.StatusCode >= 300 {
newErr := GenericOpenAPIError{
body: localVarBody,
error: localVarHTTPResponse.Status,
}
if localVarHTTPResponse.StatusCode == 200 {
var v CvesResponse
err = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get("Content-Type"))
if err != nil {
newErr.error = err.Error()
return localVarReturnValue, localVarHTTPResponse, newErr
}
newErr.model = v
}
return localVarReturnValue, localVarHTTPResponse, newErr
}
err = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get("Content-Type"))
if err != nil {
newErr := GenericOpenAPIError{
body: localVarBody,
error: err.Error(),
}
return localVarReturnValue, localVarHTTPResponse, newErr
}
return localVarReturnValue, localVarHTTPResponse, nil
}
|
package main
import (
"database/sql"
"fmt"
"github.com/exasol/exasol-driver-go"
"log"
)
func main() {
fmt.Printf("Drivers=%#v\n", sql.Drivers())
database, err := sql.Open("exasol", exasol.NewConfig("sys", "exasol").
Host("localhost").
Port(8563).
ValidateServerCertificate(false).
String())
onError(err)
defer database.Close()
err = database.Ping()
onError(err)
database.Exec("CREATE SCHEMA IF NOT EXISTS my_schema")
database.Exec("OPEN SCHEMA my_schema")
_, err = database.Exec("CREATE OR REPLACE TABLE my_schema.CUSTOMERS (ref_id int , b VARCHAR(20)) ")
onError(err)
result, err := database.Exec(fmt.Sprintf(`IMPORT INTO my_schema.CUSTOMERS FROM LOCAL CSV FILE './data.csv' COLUMN SEPARATOR = ';' ENCODING = 'UTF-8'
ROW SEPARATOR = 'LF'`))
onError(err)
log.Println(result.RowsAffected())
rows, err := database.Query("SELECT * FROM my_schema.CUSTOMERS")
onError(err)
defer rows.Close()
printColumns(rows)
printRows(rows)
}
|
package bundler
import (
"encoding/json"
"fmt"
"log"
"strconv"
)
// The file in an app directory where we expect to find the metadata
const MetadataName string = "Siphonfile"
type PlatformMetadata struct {
Language string `json:"language"`
StoreName string `json:"store_name"`
}
// Metadata represents the parsed content of a Siphonfile and Publish
// directory
type Metadata struct {
BaseVersion string `json:"base_version"`
DisplayName string `json:"display_name"`
FacebookAppID string `json:"facebook_app_id"`
IOS PlatformMetadata `json:"ios"`
Android PlatformMetadata `json:"android"`
}
// ParseMetadata loads a raw Siphonfile, parses it from JSON and checks
// its values. It raises an error if (a) the JSON was malformed, (b) there
// are unknown keys or (c) one of the values is invalid
func ParseMetadata(b []byte) (metadata *Metadata, err error) {
var m Metadata
if err := json.Unmarshal(b, &m); err != nil {
log.Printf("[ParseMetadata() error] %v", err)
return nil, fmt.Errorf("The format of your %s is invalid. "+
"Please check the documentation.", MetadataName)
}
// Validate the "base_version" key
if m.BaseVersion == "" {
return nil, fmt.Errorf(`The "base_version" key in your %s is empty, `+
`it should be a string like "0.1".`,
MetadataName)
}
if _, err := strconv.ParseFloat(m.BaseVersion, 64); err != nil {
return nil, fmt.Errorf(`The "base_version" key in your %s is not in `+
`the correct format, it should be a string like "0.1".`,
MetadataName)
}
// Validate the optional "display_name" key
if len(m.DisplayName) > 32 {
return nil, fmt.Errorf(`The "display_name" key in your %s is too `+
`long. The maximum is 32 characters.`,
MetadataName)
}
// Validate the optional "facebook_app_id" key. Anything longer than
// 32 chars is definitely erroneous (although it doesn't seem like there's
// a specific length).
if len(m.FacebookAppID) > 32 {
return nil, fmt.Errorf(`The "facebook_app_id" key in your %s is too `+
`long. You can find the app ID in your app's dashboard at `+
`https://developers.facebook.com`,
MetadataName)
}
// Validate the optional iOS metadata
if len(m.IOS.StoreName) > 255 {
return nil, fmt.Errorf(`The iOS "store_name" key in your %s is too `+
`long. The maximum is 255 characters.`,
MetadataName)
}
if len(m.IOS.Language) > 7 {
return nil, fmt.Errorf(`The iOS "language" key in your %s is too `+
`long. The maximum is 7 characters.`,
MetadataName)
}
// Validate the optional Android metadata
if len(m.Android.StoreName) > 30 {
return nil, fmt.Errorf(`The Android "store_name" key in your %s is too `+
`long. The maximum is 30 characters.`,
MetadataName)
}
if len(m.Android.Language) > 7 {
return nil, fmt.Errorf(`The Android "language" key in your %s is too `+
`long. The maximum is 7 characters.`,
MetadataName)
}
return &m, nil
}
|
package LeetCode
func FairCandySwap(A []int, B []int) []int {
SumA, SumB := 0,0
MapA := map[int]bool{}
MapB := map[int]bool{}
res := []int{}
for _,v := range A {
SumA += v
MapA[v] = true
}
for _,v := range B {
SumB += v
MapB[v] = true
}
diff := (SumB - SumA)/2
for _,v := range A {
if _,ok := MapB[v+diff]; ok {
res = append(res, v, v+diff)
break
}
}
return res
}
|
package practice
// Brute Force: Recursion
// Runtime: 628 ms
// Memory Usage: 2.1 MB
func findTargetSumWays(nums []int, S int) int {
if len(nums) == 0 {
if S == 0 {
return 1
}
return 0
}
f := findTargetSumWays
return f(nums[1:], S+nums[0]) + f(nums[1:], S-nums[0])
}
// Recursion with Memoization
// Runtime: 96 ms
// Memory Usage: 7 MB
func findTargetSumWays1(nums []int, S int) int {
return dfs(nums, S, make(map[[2]int]int))
}
// DFS with memorize
func dfs(nums []int, S int, mem map[[2]int]int) int {
if len(nums) == 0 {
if S == 0 {
return 1
}
return 0
}
curr := [2]int{len(nums), S}
if n, ok := mem[curr]; ok {
return n
}
mem[curr] = dfs(nums[1:], S-nums[0], mem) + dfs(nums[1:], S+nums[0], mem)
return mem[curr]
}
// 2D Dynamic Programming
// 二维动态规划
// dp[i][j] 表示从第 i 个索引能到达总和为 j 的方案数量,有如下递推关系:
// dp[i][sum+nums[i]] += dp[i-1][sum]
// dp[i][sum-nums[i]] += dp[i-1][sum]
// 由题目条件可知总和 sum 的范围是 [-1000,1000] ,将 sum + 1000,便于用作数组索引。
// Runtime: 8 ms
// Memory Usage: 6.6 MB
func findTargetSumWays2(nums []int, S int) int {
dp := make([][2001]int, len(nums))
for i, num := range nums {
if i == 0 {
dp[i][num+1000]++
dp[i][-num+1000]++
continue
}
for sum := -1000; sum <= 1000; sum++ {
if dp[i-1][sum+1000] > 0 {
dp[i][sum+1000+num] += dp[i-1][sum+1000]
dp[i][sum+1000-num] += dp[i-1][sum+1000]
}
}
}
if S > 1000 || S < -1000 {
return 0
}
if len(nums) == 0 {
return 1
}
return dp[len(nums)-1][S+1000]
}
// 1D Dynamic Programming
// 观察上一个方案,只有最后一行 dp 是有用的。所以我们可以用一维动态规划数组来节省
// 空间。唯一的区别是每遍历一行就要更新同一个 dp 数组。
// Runtime: 8 ms
// Memory Usage: 2.3 MB
func findTargetSumWays31(nums []int, S int) int {
var dp [2001]int
for i, num := range nums {
if i == 0 {
dp[num+1000]++
dp[-num+1000]++
continue
}
var next [2001]int
for sum := -1000; sum <= 1000; sum++ {
if dp[sum+1000] > 0 {
next[sum+num+1000] += dp[sum+1000]
next[sum-num+1000] += dp[sum+1000]
}
}
dp = next
}
if S > 1000 || S < -1000 {
return 0
}
if len(nums) == 0 {
return 1
}
return dp[S+1000]
}
// Runtime: 60 ms
// Memory Usage: 6.5 MB
func findTargetSumWays32(nums []int, S int) int {
dp := make(map[int]int)
dp[0] = 1
for _, num := range nums {
ndp := make(map[int]int)
for n, cnt := range dp {
if cnt > 0 {
ndp[n+num] += cnt
ndp[n-num] += cnt
}
}
dp = ndp
}
return dp[S]
}
// Runtime: 0 ms
// Memory Usage: 2.4 MB
func findTargetSumWays33(nums []int, S int) int {
// 全取正值
var s int
for _, num := range nums {
s += num
}
if s < S || (s+S)%2 != 0 {
return 0
}
target := (s + S) / 2
// dp[i] 数组中和为 i 的数量。
dp := make([]int, target+1)
dp[0] = 1
for _, num := range nums {
for i := target; i >= num; i-- {
dp[i] += dp[i-num]
}
}
return dp[target]
}
// Runtime: 0 ms
// Memory Usage: 2.4 MB
func findTargetSumWays34(nums []int, S int) int {
s := S
for _, num := range nums {
s += num
}
target := s / 2
if target < S || s%2 != 0 {
return 0
}
// dp[i] 数组中和为 i 的数量。
dp := make([]int, target+1)
dp[0] = 1
for _, num := range nums {
for i := target; i >= num; i-- {
dp[i] += dp[i-num]
}
}
return dp[target]
}
|
package github
import (
"errors"
"fmt"
"os"
"github.com/FINTLabs/fint-consumer/common/utils"
"gopkg.in/src-d/go-git.v4"
"gopkg.in/src-d/go-git.v4/plumbing"
)
func Clone(name string, cloneUrl string, reference string) error {
ref := plumbing.ReferenceName(reference)
r, err := git.PlainClone(utils.GetWorkingDir(name), false, &git.CloneOptions{
URL: cloneUrl,
Progress: os.Stdout,
Depth: 1,
ReferenceName: ref,
})
_, err = r.Head()
if err != nil {
return errors.New(fmt.Sprintf("Failed to clone %s", cloneUrl))
}
return nil
}
|
package gorms
import (
"gorm.io/driver/sqlite"
)
func NewSqliteAdapter(settings GormSettings) *adapter {
return &adapter{
dialector: sqlite.Open(settings.ConnectionString),
settings: settings,
}
}
|
package main
import (
"fmt"
)
func printRankings(board map[int]int, leaders []int, scores []int, lowest int) {
max := leaders[0]
min := leaders[len(leaders) - 1]
idx := len(leaders) - 1
for _, i := range scores {
if val, ok := board[i]; ok {
fmt.Println(val)
} else if i > max {
fmt.Println(1)
} else if i < min {
fmt.Println(lowest)
} else {
for j := idx; j >= 0; j-- {
if leaders[j] > i {
idx = j + 1
fmt.Println(board[leaders[j + 1]])
break
}
}
}
}
}
func main() {
numScores, leader, levels, score := 0, 0, 0, 0
var leaders, scores []int
fmt.Scanf("%d", &numScores)
lowest := 1
board := make(map[int]int)
for i := 0; i < numScores; i++ {
fmt.Scanf("%d", &leader)
if _, ok := board[leader]; !ok {
board[leader] = lowest
lowest++
}
leaders = append(leaders, leader)
}
fmt.Scanf("%d", &levels)
for i := 0; i < levels; i++ {
fmt.Scanf("%d", &score)
scores = append(scores, score)
}
printRankings(board, leaders, scores, lowest)
}
|
// Package multipart to handle MultipartForm
package multipart
import (
"fmt"
"io"
"mime"
"mime/multipart"
"net/http"
)
type MultipartForm struct {
r *http.Request
}
type FormFile struct {
part *multipart.Part
name string // form name
filename string // file name
// size int64 // readed size
}
var _ io.Reader = (*FormFile)(nil)
func New(r *http.Request) *MultipartForm {
return &MultipartForm{r: r}
}
func (m *MultipartForm) GetFormFile(targetName string) (*FormFile, error) {
mr, err := m.multipartReader(false)
if err != nil {
return nil, err
}
p, err := mr.NextPart()
if err != nil {
return nil, err
}
name := p.FormName()
if name != targetName {
return nil, fmt.Errorf("want %s got %s", targetName, name)
}
filename := p.FileName()
return &FormFile{
part: p,
name: name,
filename: filename,
}, nil
}
// code copy from http/request.go:447
func (m *MultipartForm) multipartReader(allowMixed bool) (*multipart.Reader, error) {
r := m.r
v := r.Header.Get("Content-Type")
if v == "" {
return nil, http.ErrNotMultipart
}
d, params, err := mime.ParseMediaType(v)
if err != nil || !(d == "multipart/form-data" || allowMixed && d == "multipart/mixed") {
return nil, http.ErrNotMultipart
}
boundary, ok := params["boundary"]
if !ok {
return nil, http.ErrMissingBoundary
}
return multipart.NewReader(r.Body, boundary), nil
}
func (f *FormFile) Read(p []byte) (n int, err error) {
return f.part.Read(p)
}
func (f *FormFile) FileName() string {
return f.filename
}
func (f *FormFile) Name() string {
return f.name
}
|
package azure
import (
"fmt"
"github.com/pkg/errors"
"github.com/spf13/cobra"
azmigrate "github.com/openshift/installer/pkg/migrate/azure"
"github.com/openshift/installer/pkg/types/azure"
)
var (
azureMigrateOpts struct {
cloudName string
zone string
resourceGroup string
virtualNetwork string
vnetResourceGroup string
link bool
}
)
func runMigrateAzurePrivateDNSMigrateCmd(cmd *cobra.Command, args []string) error {
switch azure.CloudEnvironment(azureMigrateOpts.cloudName) {
case azure.PublicCloud, azure.USGovernmentCloud, azure.ChinaCloud, azure.GermanCloud:
default:
return errors.Errorf("cloud-name must be one of %s, %s, %s, %s", azure.PublicCloud, azure.USGovernmentCloud, azure.ChinaCloud, azure.GermanCloud)
}
if azureMigrateOpts.zone == "" {
return errors.New("zone is a required argument")
}
if azureMigrateOpts.resourceGroup == "" {
return errors.New("resource-group is a required argument")
}
if azureMigrateOpts.link && azureMigrateOpts.virtualNetwork == "" {
return errors.New("link requires virtual-network to be set")
}
if azureMigrateOpts.virtualNetwork != "" && azureMigrateOpts.vnetResourceGroup == "" {
return errors.New("virtual-network requires virtual-network-resource-group to be set")
}
return azmigrate.Migrate(
azure.CloudEnvironment(azureMigrateOpts.cloudName),
azureMigrateOpts.resourceGroup,
azureMigrateOpts.zone,
azureMigrateOpts.virtualNetwork,
azureMigrateOpts.vnetResourceGroup,
azureMigrateOpts.link,
)
}
// NewMigrateAzurePrivateDNSMigrateCmd adds the migrate command to openshift-install
func NewMigrateAzurePrivateDNSMigrateCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "azure-privatedns",
Short: "Migrate a legacy Azure zone",
Long: "This will migrate a legacy Azure private zone to a new style private zone.",
RunE: runMigrateAzurePrivateDNSMigrateCmd,
}
cmd.PersistentFlags().StringVar(
&azureMigrateOpts.cloudName,
"cloud-name",
string(azure.PublicCloud),
fmt.Sprintf("cloud environment name, one of: %s, %s, %s, %s", azure.PublicCloud, azure.USGovernmentCloud, azure.ChinaCloud, azure.GermanCloud),
)
cmd.PersistentFlags().StringVar(&azureMigrateOpts.zone, "zone", "", "The zone to migrate")
cmd.PersistentFlags().StringVar(&azureMigrateOpts.resourceGroup, "resource-group", "", "The resource group of the zone")
cmd.PersistentFlags().StringVar(&azureMigrateOpts.virtualNetwork, "virtual-network", "", "The virtual network to create the private zone in")
cmd.PersistentFlags().StringVar(&azureMigrateOpts.vnetResourceGroup, "virtual-network-resource-group", "", "The resource group the virtual network is in")
cmd.PersistentFlags().BoolVar(&azureMigrateOpts.link, "link", false, "Link the newly created private zone to the virtual network")
return cmd
}
|
package drivers
import (
"crypto/tls"
"fmt"
"log"
"net/smtp"
"os"
"github.com/mitchdennett/flameframework/contracts"
)
type MailSmtpDriver struct {
to string
from string
message string
subject string
}
func (m MailSmtpDriver) To(email string) contracts.MailContract {
m.to = email
return m
}
func (m MailSmtpDriver) Subject(subject string) contracts.MailContract {
m.subject = subject
return m
}
func (m MailSmtpDriver) From(email string) contracts.MailContract {
m.from = email
return m
}
func (m MailSmtpDriver) Send(message string) {
m.message = message
fmt.Println("sending smtp")
fmt.Println(m.to)
fmt.Println(m.message)
if m.from == "" {
m.from = os.Getenv("MAIL_FROM")
}
host := os.Getenv("MAIL_HOST")
port := os.Getenv("MAIL_PORT")
// Set up authentication information.
auth := smtp.PlainAuth(
"",
os.Getenv("MAIL_USERNAME"),
os.Getenv("MAIL_PASSWORD"),
host,
)
// TLS config
tlsconfig := &tls.Config{
InsecureSkipVerify: true,
ServerName: host,
}
// Here is the key, you need to call tls.Dial instead of smtp.Dial
// for smtp servers running on 465 that require an ssl connection
// from the very beginning (no starttls)
conn, err := tls.Dial("tcp", host+":"+port, tlsconfig)
if err != nil {
log.Panic(err)
}
c, err := smtp.NewClient(conn, host)
if err != nil {
log.Panic(err)
}
// Auth
if err = c.Auth(auth); err != nil {
log.Panic(err)
}
// To && From
if err = c.Mail(m.from); err != nil {
log.Panic(err)
}
if err = c.Rcpt(m.to); err != nil {
log.Panic(err)
}
// Data
w, err := c.Data()
if err != nil {
log.Panic(err)
}
msg := []byte("To: " + m.to + "\r\n" +
"Subject: " + m.subject + "\r\n" +
"\r\n" + m.message +
"\r\n")
_, err = w.Write([]byte(msg))
if err != nil {
log.Panic(err)
}
err = w.Close()
if err != nil {
log.Panic(err)
}
c.Quit()
}
|
package main
import "fmt"
type Stu struct {
Name string
Age int
}
func (stu *Stu) String() string {
str := fmt.Sprintf("name=[%v] age=[%v]", stu.Name, stu.Age)
return str
}
func main() {
stu := Stu{
Name: "zcr",
Age: 12,
}
fmt.Println(&stu)
}
|
package hydrator
import (
"context"
"github.com/asecurityteam/nexpose-vuln-hydrator/pkg/domain"
)
// HydratorConfig contains items to be configured for the HydratorComponent.
type HydratorConfig struct {
Nexpose *NexposeConfig
}
// Name is used by the settings library and will add a "HYDRATOR_"
// prefix to all app environment variables
func (*HydratorConfig) Name() string {
return "hydrator"
}
// HydratorComponent contains other components that need to be configured.
type HydratorComponent struct {
Nexpose *NexposeComponent
}
// NewHydratorComponent generates and returns a HydratorComponent
func NewHydratorComponent() *HydratorComponent {
return &HydratorComponent{
Nexpose: NewNexposeComponent(),
}
}
// Settings can be used to populate default values if there are any
func (c *HydratorComponent) Settings() *HydratorConfig {
return &HydratorConfig{
Nexpose: c.Nexpose.Settings(),
}
}
// New configures and returns a new Hydrator with all default configs set
func (c *HydratorComponent) New(ctx context.Context, config *HydratorConfig) (*Hydrator, error) {
nexposeClient, e := c.Nexpose.New(ctx, config.Nexpose)
if e != nil {
return nil, e
}
hydrator := &Hydrator{
AssetVulnerabilitiesFetcher: nexposeClient,
BatchAssetVulnerabilityHydrator: &batchAssetVulnerabilityHydrator{
AssetVulnerabilityHydrator: &assetVulnerabilityHydrator{
VulnerabilityDetailsFetcher: nexposeClient,
VulnerabilitySolutionsFetcher: nexposeClient,
BatchSolutionFetcher: &batchSolutionFetcher{
SolutionFetcher: nexposeClient,
},
VulnerabilityChecksFetcher: nexposeClient,
BatchCheckFetcher: &batchCheckFetcher{
CheckFetcher: nexposeClient,
},
},
},
DependencyChecker: nexposeClient,
}
return hydrator, nil
}
// Hydrator implements the domain.Hydrator interface
type Hydrator struct {
AssetVulnerabilitiesFetcher AssetVulnerabilitiesFetcher
BatchAssetVulnerabilityHydrator BatchAssetVulnerabilityHydrator
DependencyChecker domain.DependencyChecker
}
// HydrateVulnerabilities accepts a domain.Asset and fetches the information necessary to populate its vulnerabilities
func (h *Hydrator) HydrateVulnerabilities(ctx context.Context, a domain.Asset) (domain.AssetVulnerabilityDetails, error) {
assetVulnerabilities, err := h.AssetVulnerabilitiesFetcher.FetchAssetVulnerabilities(ctx, a.ID)
if err != nil {
return domain.AssetVulnerabilityDetails{}, err
}
hydratedVulnerabilities, err := h.BatchAssetVulnerabilityHydrator.BatchHydrateAssetVulnerabilities(ctx, assetVulnerabilities)
if err != nil {
return domain.AssetVulnerabilityDetails{}, err
}
return domain.AssetVulnerabilityDetails{
Asset: a,
Vulnerabilities: hydratedVulnerabilities,
}, nil
}
// CheckDependencies makes a call to the nexpose endppoint "/api/3".
// Because asset producer endpoints vary user to user, we want to hit an endpoint
// that is consistent for any Nexpose user
func (h *Hydrator) CheckDependencies(ctx context.Context) error {
// There is no need to check dependencies for h.BatchAssetVulnerabilityHydrator, for
// they share the same NexposeClient
return h.DependencyChecker.CheckDependencies(ctx)
}
|
// Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package implementation
import (
"math"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/parser/model"
plannercore "github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/planner/memo"
"github.com/pingcap/tidb/statistics"
)
// TableDualImpl implementation of PhysicalTableDual.
type TableDualImpl struct {
baseImpl
}
// NewTableDualImpl creates a new table dual Implementation.
func NewTableDualImpl(dual *plannercore.PhysicalTableDual) *TableDualImpl {
return &TableDualImpl{baseImpl{plan: dual}}
}
// CalcCost calculates the cost of the table dual Implementation.
func (*TableDualImpl) CalcCost(_ float64, _ ...memo.Implementation) float64 {
return 0
}
// MemTableScanImpl implementation of PhysicalTableDual.
type MemTableScanImpl struct {
baseImpl
}
// NewMemTableScanImpl creates a new table dual Implementation.
func NewMemTableScanImpl(dual *plannercore.PhysicalMemTable) *MemTableScanImpl {
return &MemTableScanImpl{baseImpl{plan: dual}}
}
// CalcCost calculates the cost of the table dual Implementation.
func (*MemTableScanImpl) CalcCost(_ float64, _ ...memo.Implementation) float64 {
return 0
}
// TableReaderImpl implementation of PhysicalTableReader.
type TableReaderImpl struct {
baseImpl
tblInfo *model.TableInfo
tblColHists *statistics.HistColl
}
// NewTableReaderImpl creates a new table reader Implementation.
func NewTableReaderImpl(reader *plannercore.PhysicalTableReader, source *plannercore.DataSource) *TableReaderImpl {
base := baseImpl{plan: reader}
impl := &TableReaderImpl{
baseImpl: base,
tblInfo: source.TableInfo(),
tblColHists: source.TblColHists,
}
return impl
}
// CalcCost calculates the cost of the table reader Implementation.
func (impl *TableReaderImpl) CalcCost(outCount float64, children ...memo.Implementation) float64 {
reader := impl.plan.(*plannercore.PhysicalTableReader)
width := impl.tblColHists.GetAvgRowSize(impl.plan.SCtx(), reader.Schema().Columns, false, false)
sessVars := reader.SCtx().GetSessionVars()
// TableReaderImpl don't have tableInfo property, so using nil to replace it.
// Todo add the tableInfo property for the TableReaderImpl.
networkCost := outCount * sessVars.GetNetworkFactor(impl.tblInfo) * width
// copTasks are run in parallel, to make the estimated cost closer to execution time, we amortize
// the cost to cop iterator workers. According to `CopClient::Send`, the concurrency
// is Min(DistSQLScanConcurrency, numRegionsInvolvedInScan), since we cannot infer
// the number of regions involved, we simply use DistSQLScanConcurrency.
copIterWorkers := float64(sessVars.DistSQLScanConcurrency())
impl.cost = (networkCost + children[0].GetCost()) / copIterWorkers
return impl.cost
}
// GetCostLimit implements Implementation interface.
func (impl *TableReaderImpl) GetCostLimit(costLimit float64, _ ...memo.Implementation) float64 {
reader := impl.plan.(*plannercore.PhysicalTableReader)
sessVars := reader.SCtx().GetSessionVars()
copIterWorkers := float64(sessVars.DistSQLScanConcurrency())
if math.MaxFloat64/copIterWorkers < costLimit {
return math.MaxFloat64
}
return costLimit * copIterWorkers
}
// TableScanImpl implementation of PhysicalTableScan.
type TableScanImpl struct {
baseImpl
tblColHists *statistics.HistColl
tblCols []*expression.Column
}
// NewTableScanImpl creates a new table scan Implementation.
func NewTableScanImpl(ts *plannercore.PhysicalTableScan, cols []*expression.Column,
hists *statistics.HistColl) *TableScanImpl {
base := baseImpl{plan: ts}
impl := &TableScanImpl{
baseImpl: base,
tblColHists: hists,
tblCols: cols,
}
return impl
}
// CalcCost calculates the cost of the table scan Implementation.
func (impl *TableScanImpl) CalcCost(outCount float64, _ ...memo.Implementation) float64 {
ts := impl.plan.(*plannercore.PhysicalTableScan)
width := impl.tblColHists.GetTableAvgRowSize(impl.plan.SCtx(), impl.tblCols, kv.TiKV, true)
sessVars := ts.SCtx().GetSessionVars()
impl.cost = outCount * sessVars.GetScanFactor(ts.Table) * width
if ts.Desc {
impl.cost = outCount * sessVars.GetDescScanFactor(ts.Table) * width
}
return impl.cost
}
// IndexReaderImpl is the implementation of PhysicalIndexReader.
type IndexReaderImpl struct {
baseImpl
tblInfo *model.TableInfo
tblColHists *statistics.HistColl
}
// GetCostLimit implements Implementation interface.
func (impl *IndexReaderImpl) GetCostLimit(costLimit float64, _ ...memo.Implementation) float64 {
reader := impl.plan.(*plannercore.PhysicalIndexReader)
sessVars := reader.SCtx().GetSessionVars()
copIterWorkers := float64(sessVars.DistSQLScanConcurrency())
if math.MaxFloat64/copIterWorkers < costLimit {
return math.MaxFloat64
}
return costLimit * copIterWorkers
}
// CalcCost implements Implementation interface.
func (impl *IndexReaderImpl) CalcCost(outCount float64, children ...memo.Implementation) float64 {
reader := impl.plan.(*plannercore.PhysicalIndexReader)
sessVars := reader.SCtx().GetSessionVars()
networkCost := outCount * sessVars.GetNetworkFactor(impl.tblInfo) *
impl.tblColHists.GetAvgRowSize(reader.SCtx(), children[0].GetPlan().Schema().Columns,
true, false)
copIterWorkers := float64(sessVars.DistSQLScanConcurrency())
impl.cost = (networkCost + children[0].GetCost()) / copIterWorkers
return impl.cost
}
// NewIndexReaderImpl creates a new IndexReader Implementation.
func NewIndexReaderImpl(reader *plannercore.PhysicalIndexReader, source *plannercore.DataSource) *IndexReaderImpl {
return &IndexReaderImpl{
baseImpl: baseImpl{plan: reader},
tblInfo: source.TableInfo(),
tblColHists: source.TblColHists,
}
}
// IndexScanImpl is the Implementation of PhysicalIndexScan.
type IndexScanImpl struct {
baseImpl
tblColHists *statistics.HistColl
}
// CalcCost implements Implementation interface.
func (impl *IndexScanImpl) CalcCost(outCount float64, _ ...memo.Implementation) float64 {
is := impl.plan.(*plannercore.PhysicalIndexScan)
sessVars := is.SCtx().GetSessionVars()
rowSize := impl.tblColHists.GetIndexAvgRowSize(is.SCtx(), is.Schema().Columns, is.Index.Unique)
cost := outCount * rowSize * sessVars.GetScanFactor(is.Table)
if is.Desc {
cost = outCount * rowSize * sessVars.GetDescScanFactor(is.Table)
}
cost += float64(len(is.Ranges)) * sessVars.GetSeekFactor(is.Table)
impl.cost = cost
return impl.cost
}
// NewIndexScanImpl creates a new IndexScan Implementation.
func NewIndexScanImpl(scan *plannercore.PhysicalIndexScan, tblColHists *statistics.HistColl) *IndexScanImpl {
return &IndexScanImpl{
baseImpl: baseImpl{plan: scan},
tblColHists: tblColHists,
}
}
|
package cmd
import (
"fmt"
"os"
"github.com/spf13/cobra"
)
var rootCmd = &cobra.Command{
Use: "todoapp",
Short: "Todo App is file base command line todo list",
}
func init() {
rootCmd.AddCommand(listCmd)
rootCmd.AddCommand(showCmd)
rootCmd.AddCommand(newCmd)
rootCmd.AddCommand(addCmd)
rootCmd.AddCommand(editCmd)
rootCmd.AddCommand(deleteCmd)
}
func Execute() {
if err := rootCmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(1)
}
}
|
// Copyright 2019 Stefan Prisca
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"fmt"
"log"
"regexp"
"time"
"github.com/golang/protobuf/proto"
"github.com/hyperledger/fabric/core/chaincode/shim"
pb "github.com/hyperledger/fabric/protos/peer"
tttPb "github.com/stefanprisca/strategy-protobufs/tictactoe"
)
// Dummy struct for hyperledger
type GameContract struct {
}
const CONTRACT_STATE_KEY = "contract.tictactoe"
const BOARD_SIZE = 9
func (gc *GameContract) Init(APIstub shim.ChaincodeStubInterface) pb.Response {
positions := make([]tttPb.Mark, BOARD_SIZE)
for i := 0; i < BOARD_SIZE; i++ {
positions[i] = tttPb.Mark_E
}
tttContract := &tttPb.TttContract{
State: tttPb.TttContract_XTURN,
Positions: positions,
XPlayer: "player1",
OPlayer: "player2",
}
tttState, err := proto.Marshal(tttContract)
if err != nil {
errMsg := fmt.Sprintf("Could not marshal the contract. Error: %s", err.Error())
return shim.Error(errMsg)
}
APIstub.PutState(CONTRACT_STATE_KEY, tttState)
return shim.Success(tttState)
}
func (gc *GameContract) Invoke(APIstub shim.ChaincodeStubInterface) pb.Response {
// creator, errc := APIstub.GetCreator()
// if errc == nil {
// fmt.Println("Creator: ", string(creator))
// }
// The first argument is the function name!
// Second will be our protobuf payload.
invokeST := time.Now()
defer func() {
invokeDuration := time.Since(invokeST).Seconds()
log.Printf("#############\n\t FINISHED INVOKE FUNCTION IN < %v > seconds", invokeDuration)
}()
protoTrxArgs := APIstub.GetArgs()[1]
trxArgs := &tttPb.TrxArgs{}
err := proto.Unmarshal(protoTrxArgs, trxArgs)
if err != nil {
errMsg := fmt.Sprintf("Could not parse transaction args %v. Error %s", trxArgs, err.Error())
return shim.Error(errMsg)
}
switch trxArgs.Type {
case tttPb.TrxType_MOVE:
return move(APIstub, trxArgs.MovePayload)
}
return shim.Error(fmt.Sprintf("Unkown transaction type < %v >", trxArgs.Type))
}
func move(APIstub shim.ChaincodeStubInterface, payload *tttPb.MoveTrxPayload) pb.Response {
if payload == nil {
return shim.Error("Unexpected empty payload. Failed to do move.")
}
contract, err := getLedgerContract(APIstub)
if err != nil {
return shim.Error(err.Error())
}
if contractTerminated(*contract) {
return shim.Error(fmt.Sprintf("Contract already terminated with state %v", contract.State))
}
if err = validateMoveArgs(APIstub, *contract, *payload); err != nil {
return shim.Error(err.Error())
}
newContract, err := applyMove(*contract, *payload)
if err != nil {
return shim.Error(err.Error())
}
newProtoContract, err := proto.Marshal(&newContract)
if err != nil {
return shim.Error(err.Error())
}
err = APIstub.PutState(CONTRACT_STATE_KEY, newProtoContract)
if err != nil {
return shim.Error(err.Error())
}
return shim.Success(newProtoContract)
}
var terminatedRegexp = regexp.MustCompile(
fmt.Sprintf("^%v|%v|%v$",
tttPb.TttContract_XWON, tttPb.TttContract_OWON, tttPb.TttContract_TIE))
func contractTerminated(contract tttPb.TttContract) bool {
return terminatedRegexp.MatchString(
fmt.Sprintf("%v", contract.State))
}
var positionRegexp = regexp.MustCompile(fmt.Sprintf("^[0-8]%v$", tttPb.Mark_E))
func positionValidationString(position int32, mark tttPb.Mark) string {
return fmt.Sprintf("%d%v", position, mark)
}
var turnRegexp = regexp.MustCompile(
fmt.Sprintf("^(%s|%s)$",
turnValidationString(tttPb.Mark_X, tttPb.TttContract_XTURN),
turnValidationString(tttPb.Mark_O, tttPb.TttContract_OTURN)))
func turnValidationString(mark tttPb.Mark, state tttPb.TttContract_State) string {
return fmt.Sprintf("%v%v", mark, state)
}
func validateMoveArgs(APIstub shim.ChaincodeStubInterface, contract tttPb.TttContract, payload tttPb.MoveTrxPayload) error {
pvs := positionValidationString(payload.Position,
contract.Positions[payload.Position])
if !positionRegexp.MatchString(pvs) {
return fmt.Errorf("Invalid position or position not empty. Position < %d >, mark < %v >",
payload.Position, contract.Positions[payload.Position])
}
tvs := turnValidationString(payload.Mark, contract.State)
if !turnRegexp.MatchString(tvs) {
return fmt.Errorf("Invalid turn. Got mark < %v >, expected < %v >", payload.Mark, contract.State)
}
return nil
}
func getLedgerContract(APIstub shim.ChaincodeStubInterface) (*tttPb.TttContract, error) {
contractBytes, err := APIstub.GetState(CONTRACT_STATE_KEY)
if err != nil {
return nil, fmt.Errorf("Could not get the contract from state. Error: %s", err.Error())
}
actualContract := &tttPb.TttContract{}
err = proto.Unmarshal(contractBytes, actualContract)
if err != nil {
return nil, fmt.Errorf("Could not unmarshal the proto contract. Error: %s", err.Error())
}
return actualContract, nil
}
func applyMove(contract tttPb.TttContract, payload tttPb.MoveTrxPayload) (tttPb.TttContract, error) {
newPositions := contract.Positions
newPositions[payload.Position] = payload.Mark
nextState, err := computeNextState(newPositions, contract.State)
if err != nil {
return tttPb.TttContract{}, err
}
return tttPb.TttContract{
Positions: newPositions,
State: nextState,
XPlayer: contract.XPlayer,
OPlayer: contract.OPlayer,
}, nil
}
func computeNextState(positions []tttPb.Mark, state tttPb.TttContract_State) (tttPb.TttContract_State, error) {
posString, err := winValidationString(positions)
if err != nil {
return state, err
}
switch state {
case tttPb.TttContract_XTURN:
if won(tttPb.Mark_X, posString) {
return tttPb.TttContract_XWON, nil
} else if boardFull(posString) {
return tttPb.TttContract_TIE, nil
} else {
return tttPb.TttContract_OTURN, nil
}
case tttPb.TttContract_OTURN:
if won(tttPb.Mark_O, posString) {
return tttPb.TttContract_OWON, nil
} else if boardFull(posString) {
return tttPb.TttContract_TIE, nil
} else {
return tttPb.TttContract_XTURN, nil
}
default:
return state, fmt.Errorf("Could not determine next state")
}
}
func winValidationString(positions []tttPb.Mark) (string, error) {
if len(positions) != BOARD_SIZE {
return "", fmt.Errorf(
"Invalid number of positions detected. Expected %d, got %d",
BOARD_SIZE, len(positions))
}
result := ""
for i := 0; i < BOARD_SIZE; i++ {
result += positions[i].String()
}
return result, nil
}
func won(m tttPb.Mark, positions string) bool {
return threeInRowRegex(m).MatchString(positions) ||
threeInColRegex(m).MatchString(positions) ||
threeInDiagRegex(m).MatchString(positions)
}
func threeInRowRegex(m tttPb.Mark) *regexp.Regexp {
return regexp.MustCompile(
fmt.Sprintf("(^|...)(%s%s%s)(...|$)",
m.String(), m.String(), m.String()))
}
func threeInColRegex(m tttPb.Mark) *regexp.Regexp {
return regexp.MustCompile(
fmt.Sprintf("^(((%s..){3})|((.%s.){3})|((..%s){3}))$",
m.String(), m.String(), m.String()))
}
func threeInDiagRegex(m tttPb.Mark) *regexp.Regexp {
return regexp.MustCompile(
fmt.Sprintf("^(%s...%s...%s|..%s.%s.%s..)$",
m.String(), m.String(), m.String(),
m.String(), m.String(), m.String()))
}
var anyEmptyRegex = regexp.MustCompile(fmt.Sprintf("^.*%s.*$", tttPb.Mark_E.String()))
func boardFull(positions string) bool {
return !anyEmptyRegex.MatchString(positions)
}
// The main function is only relevant in unit test mode. Only included here for completeness.
func main() {
// Create a new Smart Contract
err := shim.Start(new(GameContract))
if err != nil {
fmt.Printf("Error creating new Smart Contract: %s", err)
}
}
|
package models
import (
"encoding/json"
"fmt"
"io/ioutil"
"time"
log "github.com/sirupsen/logrus"
)
// Post 微博的具体信息
type Post struct {
URL string `json:"url"`
UID uint64 `json:"uid"`
PostCreatedAt string `json:"created_at"`
ID string `json:"id" gorm:"primaryKey"`
MID string `json:"mid"`
Text string `json:"text"`
RepostsCount int32 `json:"reposts_count"`
CommentsCount int32 `json:"comments_count"`
AttitudesCount int32 `json:"attitudes_count"`
IsLongText bool `json:"isLongText"`
PicNum int32 `json:"pic_num"`
CreatedAt time.Time
UpdatedAt time.Time
}
func (p Post) Save() error {
if err := CreateDirIfNotExist("./output"); err != nil {
log.WithFields(log.Fields{"dir": "./output", "err": err.Error()}).Error("failed to create directory")
return err
}
if err := CreateDirIfNotExist(fmt.Sprintf("./output/%v", p.UID)); err != nil {
log.WithFields(log.Fields{"dir": fmt.Sprintf("./output/%v", p.UID), "err": err.Error()}).Error("failed to create directory")
return err
}
filename := fmt.Sprintf("./output/%v/%v.json", p.UID, p.ID)
if !FileExist(filename) {
jstr, _ := json.MarshalIndent(p, "", "")
_ = ioutil.WriteFile(filename, jstr, 0644)
}
return nil
}
|
package model
import (
"fmt"
"log"
"testing"
"github.com/davecgh/go-spew/spew"
"github.com/vhaoran/vchat/lib"
"github.com/vhaoran/vchat/lib/ypg"
)
type Abc struct {
ypg.BaseModel
ID int `gorm:"PRIMARY_KEY;AUTO_INCREMENT" json:"id"`
CName string
}
func (r *Abc) TableName() string {
return "abc"
}
func prepare() {
// load config
opt := &lib.LoadOption{
LoadMicroService: false,
LoadEtcd: false,
//-----------attention here------------
LoadPg: true,
//-----------attention here------------
LoadRedis: false,
LoadMongo: false,
LoadMq: false,
LoadJwt: false,
}
_, err := lib.InitModulesOfOptions(opt)
if err != nil {
log.Println(err)
panic(err.Error())
}
}
func Test_pg_insert(t *testing.T) {
var err error
prepare()
if ypg.X.HasTable(new(Abc)) {
err := ypg.X.DropTable(new(Abc)).Error
if err != nil {
log.Println(err)
return
}
}
if !ypg.X.HasTable(new(Abc)) {
er := ypg.X.CreateTable(new(Abc)).Error
if er != nil {
fmt.Println("---create table err---", err, "-----------")
return
}
}
//
for i := 0; i < 10; i++ {
bean := &Abc{
ID: i,
CName: "whr_test" +
"",
}
err = ypg.X.Save(bean).Error
if err != nil {
log.Println(err)
return
}
fmt.Println("------", "ok", "-----------")
}
fmt.Println("------", "demo find", "-----------")
l := make([]*Abc, 0)
err = ypg.X.Find(&l).Error
if err != nil {
log.Println(err)
return
}
spew.Dump(l)
}
func Test_pg_update(t *testing.T) {
prepare()
var err error
for i := 0; i < 10; i++ {
bean := &Abc{
ID: i,
}
//err = ypg.X.Model(bean).Update("CName", "hello").Error
err = ypg.X.Model(bean).Updates(Abc{CName: "test "}).Error
if err != nil {
log.Println(err)
return
}
fmt.Println("------", "ok", "-----------")
}
fmt.Println("------", "demo find", "-----------")
l := make([]*Abc, 0)
err = ypg.X.Find(&l).Error
if err != nil {
log.Println(err)
return
}
spew.Dump(l)
}
|
package errs
import (
"errors"
)
func fnWithError() error {
return errors.New("some error")
}
func fnNoError() error {
return nil
}
|
package cacheutil
func DistributeShards(newCluster *CacheCluster, cluster *CacheCluster, args *CacheClusterArgs) []*Shard {
// cluster.Mux.Lock()
// defer cluster.Mux.Unlock()
// if cluster.ShardsAmount < args.ShardsAmount {
// diff := args.ShardsAmount - cluster.ShardsAmount
// for addingShardIdx := 0; addingShardIdx < diff; addingShardIdx++ {
// newShard := CreateShard(args.ShardSize*mbBytes, args.CachePolicy)
// for shardIdx := range cluster.shards {
// for hashedKey := range cluster.shards[shardIdx].Hashmap {
// newShardIdx := jumpConsistentHash(hashedKey, args.ShardsAmount)
// if newShardIdx != int64(args.ShardsAmount-addingShardIdx-1) {
// continue
// }
// shardItem := cluster.shards[shardIdx].Hashmap[hashedKey]
// TTL := cluster.shards[shardIdx].Policy.HashMap[hashedKey].TTL
// newShard.set(hashedKey, cluster.shards[shardIdx].Items[shardItem.Index], TTL)
// cluster.shards[shardIdx].delete(hashedKey, shardItem.Index, shardItem.Length)
// }
// }
// newCluster.shards = append(newCluster.shards, newShard)
// }
// }
// if cluster.ShardsAmount > args.ShardsAmount {
// diff := cluster.ShardsAmount - args.ShardsAmount
// for removingShardIdx := 1; removingShardIdx <= diff; removingShardIdx++ {
// for hashedKey := range cluster.shards[cluster.ShardsAmount-removingShardIdx].Hashmap {
// newShardIdx := jumpConsistentHash(hashedKey, cluster.ShardsAmount-removingShardIdx-1)
// shardItem := cluster.shards[cluster.ShardsAmount-removingShardIdx].Hashmap[hashedKey]
// TTL := cluster.shards[cluster.ShardsAmount-removingShardIdx].Policy.HashMap[hashedKey].TTL
// newCluster.shards[newShardIdx].set(hashedKey, cluster.shards[newCluster.ShardsAmount-removingShardIdx].Items[shardItem.Index], TTL)
// }
// cluster.shards[newCluster.ShardsAmount-removingShardIdx] = nil
// cluster.shards = cluster.shards[:newCluster.ShardsAmount-removingShardIdx]
// }
// }
// // TODO: resize shards as well
// newCluster.ShardSize = args.ShardSize
// newCluster.ShardsAmount = len(newCluster.shards)
// debug.SetGCPercent(GCPercentRatio(args.ShardsAmount, args.ShardSize))
return newCluster.shards
}
|
package base
import "math/rand"
// RandomGenerator is the random generator for gorse.
type RandomGenerator struct {
*rand.Rand
}
// NewRandomGenerator creates a RandomGenerator.
func NewRandomGenerator(seed int64) RandomGenerator {
return RandomGenerator{rand.New(rand.NewSource(int64(seed)))}
}
// NewUniformVectorInt makes a vec filled with uniform random integers.
func (rng RandomGenerator) NewUniformVectorInt(size, low, high int) []int {
ret := make([]int, size)
scale := high - low
for i := 0; i < len(ret); i++ {
ret[i] = rng.Intn(scale) + low
}
return ret
}
// NewUniformVector makes a vec filled with uniform random floats,
func (rng RandomGenerator) NewUniformVector(size int, low, high float64) []float64 {
ret := make([]float64, size)
scale := high - low
for i := 0; i < len(ret); i++ {
ret[i] = rng.Float64()*scale + low
}
return ret
}
// NewNormalVector makes a vec filled with normal random floats.
func (rng RandomGenerator) NewNormalVector(size int, mean, stdDev float64) []float64 {
ret := make([]float64, size)
for i := 0; i < len(ret); i++ {
ret[i] = rng.NormFloat64()*stdDev + mean
}
return ret
}
// NewNormalMatrix makes a matrix filled with normal random floats.
func (rng RandomGenerator) NewNormalMatrix(row, col int, mean, stdDev float64) [][]float64 {
ret := make([][]float64, row)
for i := range ret {
ret[i] = rng.NewNormalVector(col, mean, stdDev)
}
return ret
}
// NewUniformMatrix makes a matrix filled with uniform random floats.
func (rng RandomGenerator) NewUniformMatrix(row, col int, low, high float64) [][]float64 {
ret := make([][]float64, row)
for i := range ret {
ret[i] = rng.NewUniformVector(col, low, high)
}
return ret
}
|
package storage
import (
"bytes"
"context"
"io"
"github.com/klauspost/compress/gzip"
"github.com/klauspost/compress/snappy"
"github.com/klauspost/compress/zstd"
"github.com/pingcap/errors"
"github.com/pingcap/log"
"go.uber.org/zap"
)
// CompressType represents the type of compression.
type CompressType uint8
const (
// NoCompression won't compress given bytes.
NoCompression CompressType = iota
// Gzip will compress given bytes in gzip format.
Gzip
// Snappy will compress given bytes in snappy format.
Snappy
// Zstd will compress given bytes in zstd format.
Zstd
)
type flusher interface {
Flush() error
}
type emptyFlusher struct{}
func (*emptyFlusher) Flush() error {
return nil
}
type interceptBuffer interface {
io.WriteCloser
flusher
Len() int
Cap() int
Bytes() []byte
Reset()
Compressed() bool
}
func createSuffixString(compressType CompressType) string {
txtSuffix := ".txt"
switch compressType {
case Gzip:
txtSuffix += ".gz"
case Snappy:
txtSuffix += ".snappy"
case Zstd:
txtSuffix += ".zst"
default:
return ""
}
return txtSuffix
}
func newInterceptBuffer(chunkSize int, compressType CompressType) interceptBuffer {
if compressType == NoCompression {
return newNoCompressionBuffer(chunkSize)
}
return newSimpleCompressBuffer(chunkSize, compressType)
}
func newCompressWriter(compressType CompressType, w io.Writer) simpleCompressWriter {
switch compressType {
case Gzip:
return gzip.NewWriter(w)
case Snappy:
return snappy.NewBufferedWriter(w)
case Zstd:
newWriter, err := zstd.NewWriter(w)
if err != nil {
log.Warn("Met error when creating new writer for Zstd type file", zap.Error(err))
}
return newWriter
default:
return nil
}
}
func newCompressReader(compressType CompressType, r io.Reader) (io.Reader, error) {
switch compressType {
case Gzip:
return gzip.NewReader(r)
case Snappy:
return snappy.NewReader(r), nil
case Zstd:
return zstd.NewReader(r)
default:
return nil, nil
}
}
type noCompressionBuffer struct {
*bytes.Buffer
}
func (*noCompressionBuffer) Flush() error {
return nil
}
func (*noCompressionBuffer) Close() error {
return nil
}
func (*noCompressionBuffer) Compressed() bool {
return false
}
func newNoCompressionBuffer(chunkSize int) *noCompressionBuffer {
return &noCompressionBuffer{bytes.NewBuffer(make([]byte, 0, chunkSize))}
}
type simpleCompressWriter interface {
io.WriteCloser
flusher
}
type simpleCompressBuffer struct {
*bytes.Buffer
compressWriter simpleCompressWriter
cap int
}
func (b *simpleCompressBuffer) Write(p []byte) (int, error) {
written, err := b.compressWriter.Write(p)
return written, errors.Trace(err)
}
func (b *simpleCompressBuffer) Len() int {
return b.Buffer.Len()
}
func (b *simpleCompressBuffer) Cap() int {
return b.cap
}
func (b *simpleCompressBuffer) Reset() {
b.Buffer.Reset()
}
func (b *simpleCompressBuffer) Flush() error {
return b.compressWriter.Flush()
}
func (b *simpleCompressBuffer) Close() error {
return b.compressWriter.Close()
}
func (*simpleCompressBuffer) Compressed() bool {
return true
}
func newSimpleCompressBuffer(chunkSize int, compressType CompressType) *simpleCompressBuffer {
bf := bytes.NewBuffer(make([]byte, 0, chunkSize))
return &simpleCompressBuffer{
Buffer: bf,
cap: chunkSize,
compressWriter: newCompressWriter(compressType, bf),
}
}
type bufferedWriter struct {
buf interceptBuffer
writer ExternalFileWriter
}
func (u *bufferedWriter) Write(ctx context.Context, p []byte) (int, error) {
bytesWritten := 0
for u.buf.Len()+len(p) > u.buf.Cap() {
// We won't fit p in this chunk
// Is this chunk full?
chunkToFill := u.buf.Cap() - u.buf.Len()
if chunkToFill > 0 {
// It's not full so we write enough of p to fill it
prewrite := p[0:chunkToFill]
w, err := u.buf.Write(prewrite)
bytesWritten += w
if err != nil {
return bytesWritten, errors.Trace(err)
}
p = p[w:]
// continue buf because compressed data size may be less than Cap - Len
if u.buf.Compressed() {
continue
}
}
_ = u.buf.Flush()
err := u.uploadChunk(ctx)
if err != nil {
return 0, errors.Trace(err)
}
}
w, err := u.buf.Write(p)
bytesWritten += w
return bytesWritten, errors.Trace(err)
}
func (u *bufferedWriter) uploadChunk(ctx context.Context) error {
if u.buf.Len() == 0 {
return nil
}
b := u.buf.Bytes()
u.buf.Reset()
_, err := u.writer.Write(ctx, b)
return errors.Trace(err)
}
func (u *bufferedWriter) Close(ctx context.Context) error {
u.buf.Close()
err := u.uploadChunk(ctx)
if err != nil {
return errors.Trace(err)
}
return u.writer.Close(ctx)
}
// NewUploaderWriter wraps the Writer interface over an uploader.
func NewUploaderWriter(writer ExternalFileWriter, chunkSize int, compressType CompressType) ExternalFileWriter {
return newBufferedWriter(writer, chunkSize, compressType)
}
// newBufferedWriter is used to build a buffered writer.
func newBufferedWriter(writer ExternalFileWriter, chunkSize int, compressType CompressType) *bufferedWriter {
return &bufferedWriter{
writer: writer,
buf: newInterceptBuffer(chunkSize, compressType),
}
}
// BytesWriter is a Writer implementation on top of bytes.Buffer that is useful for testing.
type BytesWriter struct {
buf *bytes.Buffer
}
// Write delegates to bytes.Buffer.
func (u *BytesWriter) Write(_ context.Context, p []byte) (int, error) {
return u.buf.Write(p)
}
// Close delegates to bytes.Buffer.
func (*BytesWriter) Close(_ context.Context) error {
// noop
return nil
}
// Bytes delegates to bytes.Buffer.
func (u *BytesWriter) Bytes() []byte {
return u.buf.Bytes()
}
// String delegates to bytes.Buffer.
func (u *BytesWriter) String() string {
return u.buf.String()
}
// Reset delegates to bytes.Buffer.
func (u *BytesWriter) Reset() {
u.buf.Reset()
}
// NewBufferWriter creates a Writer that simply writes to a buffer (useful for testing).
func NewBufferWriter() *BytesWriter {
return &BytesWriter{buf: &bytes.Buffer{}}
}
|
package main
import "fmt"
func main() {
fmt.Printf("My name is %s, I am %d years old\n", "Alan", 3)
cupcakes := 3
calories := 320.5
fmt.Printf("One cupcake has %f calories. I ate %d, so in total I consumed %.2f calories", calories, cupcakes, float64(cupcakes)*calories)
}
|
package main
import (
"fmt"
"github.com/graphql-go/graphql"
"github.com/graphql-go/handler"
"go_graphql/petstore/mutations"
"go_graphql/petstore/queries"
"log"
"net/http"
"os"
)
func main() {
f, err := os.OpenFile("development.log", os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)
if err != nil {
fmt.Println("Unable to open file for logging", err)
}
defer f.Close()
log.SetOutput(f)
schemaConfig := graphql.SchemaConfig{
Query: graphql.NewObject(graphql.ObjectConfig{
Name: "RootQuery",
Fields: queries.GetRootFields(),
}),
Mutation: graphql.NewObject(graphql.ObjectConfig{
Name: "RootMutation",
Fields: mutations.GetRootFields(),
}),
}
schema, err := graphql.NewSchema(schemaConfig)
if err != nil {
log.Fatal("Failed to create new schema ", err, err.Error())
}
httpHandler := handler.New(&handler.Config{
Schema: &schema,
})
http.Handle("/", httpHandler)
http.ListenAndServe(":8080", nil)
}
|
package entity
import "github.com/google/uuid"
type Todo struct {
Id uuid.UUID
Description string
Finished bool
}
|
package gateway
import (
"crypto/rand"
"errors"
"fmt"
"github.com/adonese/noebs/utils"
"github.com/dgrijalva/jwt-go"
"github.com/gin-gonic/gin"
"github.com/gin-gonic/gin/binding"
"github.com/go-redis/redis"
"github.com/jinzhu/gorm"
log "github.com/sirupsen/logrus"
"golang.org/x/crypto/bcrypt"
"net/http"
"os"
"strconv"
"strings"
"time"
)
var apiKey = make([]byte, 16)
var jwtKey = keyFromEnv()
//GenerateAPIKey An Admin-only endpoint that is used to generate api key for our clients
// the user must submit their email to generate a unique token per email.
func GenerateAPIKey(c *gin.Context) {
var m map[string]string
if err := c.ShouldBindJSON(&m); err != nil {
if _, ok := m["email"]; ok {
k, _ := generateApiKey()
getRedis := utils.GetRedis()
getRedis.SAdd("apikeys", k)
c.JSON(http.StatusOK, gin.H{"result": k})
return
} else {
c.JSON(http.StatusBadRequest, gin.H{"message": "missing_field"})
return
}
} else {
c.JSON(http.StatusBadRequest, gin.H{"message": "error in email"})
}
}
//ApiKeyMiddleware used to authenticate clients using X-Email and X-API-Key headers
func ApiKeyMiddleware(c *gin.Context) {
email := c.GetHeader("X-Email")
key := c.GetHeader("X-API-Key")
if email == "" || key == "" {
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"message": "unauthorized"})
return
}
redisClient := utils.GetRedis()
res, err := redisClient.HGet("api_keys", email).Result()
if err != redis.Nil {
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"message": "unauthorized"})
return
}
if key == res {
c.Next()
} else {
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"message": "unauthorized"})
return
}
}
func IpFilterMiddleware(c *gin.Context) {
ip := c.ClientIP()
if u := c.GetString("username"); u != "" {
redisClient := utils.GetRedis()
redisClient.HIncrBy(u+":ips_count", ip, 1)
c.Next()
} else {
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"message": "unauthorized_access"})
}
}
func LoginHandler(c *gin.Context) {
var req UserLogin
if err := c.ShouldBindWith(&req, binding.JSON); err != nil {
// The request is wrong
log.Printf("The request is wrong. %v", err)
c.JSON(http.StatusBadRequest, gin.H{"message": err.Error(), "code": "bad_request"})
return
}
//db connection. Not good.
db, err := gorm.Open("sqlite3", "test.db")
if err != nil {
log.Fatalf("There's an error in DB connection, %v", err)
}
defer db.Close()
// do the Models migrations here. The ones you will be using
db.AutoMigrate(&Service{}, &JWT{}, &UserModel{}, &UserLogin{})
log.Printf("the processed request is: %v\n", req)
var u UserModel
if notFound := db.Preload("jwt").Where("username = ?", strings.ToLower(req.Username)).First(&u).RecordNotFound(); notFound {
// service id is not found
log.Printf("User with service_id %s is not found.", req.Username)
c.JSON(http.StatusBadRequest, gin.H{"message": notFound, "code": "not_found"})
return
}
// Make sure the user doesn't have any active sessions!
redisClient := utils.GetRedis()
lCount, err := redisClient.Get(req.Username + ":logged_in_devices").Result()
num, _ := strconv.Atoi(lCount)
// Allow for the user to be logged in -- add allowance through someway
if err != redis.Nil && num > 1 {
// The user is already logged in somewhere else. Communicate that to them, clearly!
//c.JSON(http.StatusBadRequest, gin.H{"code": "user_logged_elsewhere",
// "error": "You are logging from another device. You can only have one valid session"})
//return
log.Print("The user is logging from a different location")
}
// make sure number of failed logged_in counts doesn't exceed the allowed threshold.
res, err := redisClient.Get(req.Username + ":login_counts").Result()
if err == redis.Nil {
// the has just logged in
redisClient.Set(req.Username+":login_counts", 0, time.Hour)
} else if err == nil {
count, _ := strconv.Atoi(res)
if count >= 5 {
// Allow users to use another login method (e.g., totp, or they should reset their password)
// Lock their account
//redisClient.HSet(req.Username, "suspecious_behavior", 1)
redisClient.HIncrBy(req.Username, "suspicious_behavior", 1)
ttl, _ := redisClient.TTL(req.Username + ":login_counts").Result()
c.JSON(http.StatusBadRequest, gin.H{"message": "Too many wrong login attempts",
"code": "maximum_login", "ttl_minutes": ttl.Minutes()})
return
}
}
if err := bcrypt.CompareHashAndPassword([]byte(u.Password), []byte(req.Password)); err != nil {
log.Printf("there is an error in the password %v", err)
redisClient.Incr(req.Username + ":login_counts")
c.JSON(http.StatusBadRequest, gin.H{"message": "wrong password entered", "code": "wrong_password"})
return
}
// it is a successful login attempt
redisClient.Del(req.Username + ":login_counts")
token, err := GenerateJWT(u.Username, jwtKey)
if err != nil {
c.JSON(http.StatusInternalServerError, err)
return
}
u.jwt.SecretKey = string(jwtKey)
u.jwt.CreatedAt = time.Now().UTC()
err = db.Save(&u).Error
if err != nil {
c.JSON(http.StatusInternalServerError, err.Error())
return
}
c.Writer.Header().Set("Authorization", token)
// Redis add The user is now logged in -- and has active session
redisClient.Incr(req.Username + ":logged_in_devices")
c.JSON(http.StatusOK, gin.H{"authorization": token, "user": u})
}
func RefreshHandler(c *gin.Context) {
// just handle the simplest case, authorization is not provided.
h := c.GetHeader("Authorization")
if h == "" {
c.JSON(http.StatusUnauthorized, gin.H{"message": "empty header was sent", "code": "unauthorized"})
return
}
claims, err := VerifyJWT(h, jwtKey)
if e, ok := err.(*jwt.ValidationError); ok {
if e.Errors&jwt.ValidationErrorExpired != 0 {
log.Printf("the username is: %s", claims.Username)
auth, _ := GenerateJWT(claims.Username, jwtKey)
c.Writer.Header().Set("Authorization", auth)
c.JSON(http.StatusOK, gin.H{"authorization": auth})
} else {
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"message": "Malformed token", "code": "jwt_malformed"})
return
}
} else if err == nil {
// FIXME it is better to let the endpoint explicitly Get the claim off the user
// as we will assume the auth server will reside in a different domain!
log.Printf("the username is: %s", claims.Username)
auth, _ := GenerateJWT(claims.Username, jwtKey)
c.Writer.Header().Set("Authorization", auth)
c.JSON(http.StatusOK, gin.H{"authorization": auth})
}
}
func LogOut(c *gin.Context) {
//TODO implement logout API to limit the number of currently logged in devices
// just handle the simplest case, authorization is not provided.
h := c.GetHeader("Authorization")
if h == "" {
c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{"message": "empty header was sent",
"code": "unauthorized"})
return
}
claims, err := VerifyJWT(h, jwtKey)
if err != nil {
c.JSON(http.StatusUnauthorized, gin.H{"message": err.Error(), "code": "malformed_jwt_token"})
return
}
username := claims.Username
if username != "" {
redisClient := utils.GetRedis()
redisClient.Decr(username + ":logged_in_devices")
c.JSON(http.StatusNoContent, gin.H{"message": "Device Successfully Logged Out"})
return
} else {
c.JSON(http.StatusUnauthorized, gin.H{"message": "Unauthorized", "code": "unauthorized"})
return
}
}
func CreateUser(c *gin.Context) {
db, err := gorm.Open("sqlite3", "test.db")
if err != nil {
c.AbortWithStatusJSON(500, gin.H{"message": serverError.Error()})
return
}
defer db.Close()
var u UserModel
if err := db.AutoMigrate(&UserModel{}).Error; err != nil {
// log the error, but don't quit.
c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{"message": err.Error()})
return
}
err = c.ShouldBindBodyWith(&u, binding.JSON)
// make the errors insane
if err != nil {
c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{"message": err.Error()})
return
}
// make sure that the user doesn't exist in the database
err = u.hashPassword()
if err != nil {
c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{"message": err.Error()})
}
// make the user capital - small
u.SanitizeName()
if err := db.Create(&u).Error; err != nil {
// unable to create this user; see possible reasons
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"message": err.Error(), "code": "duplicate_username"})
return
}
redisClient := utils.GetRedis()
redisClient.Set(u.Mobile, u.Username, 0)
ip := c.ClientIP()
redisClient.HSet(u.Username+":ips_count", "first_ip", ip)
c.JSON(http.StatusCreated, gin.H{"ok": "object was successfully created", "details": u})
}
func GetServiceID(c *gin.Context) {
db, err := gorm.Open("sqlite3", "test.db")
if err != nil {
c.AbortWithStatusJSON(500, gin.H{"message": err.Error()})
}
defer db.Close()
db.AutoMigrate(&Service{})
id := c.Query("id")
if id == "" {
c.AbortWithStatusJSON(400, gin.H{"message": errNoServiceID.Error()})
}
fmt.Printf("the qparam is: %v\n", id)
var res Service
if err := db.Where("username = ?", id).First(&res).Error; err != nil {
c.AbortWithStatusJSON(404, gin.H{"message": err.Error()})
return
}
c.JSON(http.StatusOK, gin.H{"ok": "this object is available"})
}
var (
serverError = errors.New("unable to connect to the DB")
ErrCreateDbRow = errors.New("unable to create a new db row/column")
errNoServiceID = errors.New("empty Service ID was entered")
errObjectNotFound = errors.New("object not found")
)
//AuthMiddleware is a JWT authorization middleware. It is used in our consumer services
//to get a username from the payload (maybe change it to mobile number at somepoint)
func AuthMiddleware() gin.HandlerFunc {
return func(c *gin.Context) {
// just handle the simplest case, authorization is not provided.
h := c.GetHeader("Authorization")
if h == "" {
c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{"message": "empty header was sent",
"code": "unauthorized"})
return
}
claims, err := VerifyJWT(h, jwtKey)
if e, ok := err.(*jwt.ValidationError); ok {
if e.Errors&jwt.ValidationErrorExpired != 0 {
// in this case you might need to give it another spin
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"message": "Token has expired", "code": "jwt_expired"})
return
// allow for expired tokens to live...FIXME
//c.Set("username", claims.Username)
//c.Next()
} else {
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"message": "Malformed token", "code": "jwt_malformed"})
return
}
} else if err == nil {
// FIXME it is better to let the endpoint explicitly Get the claim off the user
// as we will assume the auth server will reside in a different domain!
c.Set("username", claims.Username)
log.Printf("the username is: %s", claims.Username)
c.Next()
}
}
}
//ApiAuth API-Key middleware. Currently is used by consumer services
func ApiAuth() gin.HandlerFunc {
r := utils.GetRedis()
return func(c *gin.Context) {
if key := c.GetHeader("api-key"); key != "" {
if !isMember("apikeys", key, r) {
c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{"code": "wrong_api_key",
"message": "visit https://soluspay.net/contact for a key"})
return
}
}
c.Next()
}
}
func GenerateSecretKey(n int) ([]byte, error) {
key := make([]byte, n)
if _, err := rand.Read(key); err != nil {
return nil, err
}
return key, nil
}
// keyFromEnv either generates or retrieve a jwt which will be used to generate a secret key
func keyFromEnv() []byte {
// it either checks for environment for the specific key, or generates and saves a one
if key := os.Getenv("noebs_jwt"); key != "" {
return []byte(key)
}
redisClient := utils.GetRedis()
if key := redisClient.Get("jwt").String(); key != "" {
return []byte(key)
}
key, _ := GenerateSecretKey(50)
redisClient.Set("jwt", key, 0)
err := os.Setenv("noebs_jwt", string(key))
log.Printf("the error in env is: %v", err)
return key
}
func OptionsMiddleware(c *gin.Context) {
if c.Request.Method != "OPTIONS" {
c.Header("Access-Control-Allow-Origin", "*")
c.Next()
} else {
c.Header("Access-Control-Allow-Origin", "*")
c.Header("Access-Control-Allow-Methods", "GET,POST,PUT,PATCH,DELETE,OPTIONS")
c.Header("Access-Control-Allow-Headers", "authorization, origin, content-type, accept")
c.Header("Allow", "HEAD,GET,POST,PUT,PATCH,DELETE,OPTIONS")
c.Header("Content-Type", "application/json")
c.AbortWithStatus(http.StatusOK)
}
}
func generateApiKey() (string, error) {
_, err := rand.Read(apiKey)
a := fmt.Sprintf("%x", apiKey)
return a, err
}
func isMember(key, val string, r *redis.Client) bool {
b, _ := r.SIsMember(key, val).Result()
return b
}
func getMap(key, val string, r *redis.Client) (bool, error) {
res, err := r.HGet("apikeys", key).Result()
if err != nil {
return false, err
}
if res != val {
return false, errors.New("wrong_key")
}
return true, nil
}
|
package user
import (
"encoding/json"
"fmt"
"github.com/gorilla/mux"
"net/http"
"strconv"
)
const (
userURL = "/user/{id}"
searchURL = "/user/search/"
)
var _ Handler = &userHandler{}
type userHandler struct {
UserService Service
}
type AppError struct {
Message string `json:"error"`
}
type Handler interface {
Register(router *mux.Router)
}
func (h *userHandler) Register(router *mux.Router) {
router.HandleFunc(userURL, h.getUserById).Methods(http.MethodGet)
router.HandleFunc(searchURL, h.getUserByNickname).Methods(http.MethodGet)
}
func (h *userHandler) getUserById(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
id := mux.Vars(r)["id"]
// after response increment prometheus metrics
defer getUserRequestsTotal.Inc()
if _, err :=strconv.Atoi(id); err !=nil{
// after response increment prometheus metrics
defer getUserRequestsError.Inc()
// after response increment prometheus metrics
defer httpStatusCodes.WithLabelValues(strconv.Itoa(http.StatusTeapot), http.MethodGet).Inc()
//render result to client
renderJSON(w, &AppError{Message: "nothing interresing"}, http.StatusTeapot)
h.UserService.error(err)
return
}
// call user service to get requested user from cache, if not found get from storage and place to cache
user, err := h.UserService.getByID(id)
if err != nil {
// after response increment prometheus metrics
defer getUserRequestsError.Inc()
// after response increment prometheus metrics
defer httpStatusCodes.WithLabelValues(strconv.Itoa(http.StatusNotFound), http.MethodGet).Inc()
//render result to client
renderJSON(w, &AppError{Message: "not found"}, http.StatusNotFound)
h.UserService.error(err)
return
}
// after response increment prometheus metrics
defer getUserRequestsSuccess.Inc()
// after response increment prometheus metrics
defer httpStatusCodes.WithLabelValues(strconv.Itoa(http.StatusOK), http.MethodGet).Inc()
//render result to client
renderJSON(w, &user, http.StatusOK)
}
func (h *userHandler) getUserByNickname(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
nickname := r.FormValue("nickname")
fmt.Println(nickname)
// after response increment prometheus metrics
defer getUserRequestsTotal.Inc()
// call user service to get requested user from cache, if not found get from storage and place to cache
user, err := h.UserService.findByNickname(nickname)
if err != nil {
// after response increment prometheus metrics
defer getUserRequestsError.Inc()
// after response increment prometheus metrics
defer httpStatusCodes.WithLabelValues(strconv.Itoa(http.StatusNotFound), http.MethodGet).Inc()
//render result to client
renderJSON(w, &AppError{Message: "not found"}, http.StatusNotFound)
h.UserService.error(err)
return
}
// after response increment prometheus metrics
defer getUserRequestsSuccess.Inc()
// after response increment prometheus metrics
defer httpStatusCodes.WithLabelValues(strconv.Itoa(http.StatusOK), http.MethodGet).Inc()
//render result to client
renderJSON(w, &user, http.StatusOK)
}
func GetHandler(userService Service) Handler {
h := userHandler{
UserService: userService,
}
return &h
}
func renderJSON (w http.ResponseWriter, val interface{}, statusCode int) {
w.WriteHeader(statusCode)
_ = json.NewEncoder(w).Encode(val)
}
|
// Copyright 2023 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
alphapb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/compute/alpha/compute_alpha_go_proto"
emptypb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/empty_go_proto"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/alpha"
)
// NetworkServer implements the gRPC interface for Network.
type NetworkServer struct{}
// ProtoToNetworkRoutingConfigRoutingModeEnum converts a NetworkRoutingConfigRoutingModeEnum enum from its proto representation.
func ProtoToComputeAlphaNetworkRoutingConfigRoutingModeEnum(e alphapb.ComputeAlphaNetworkRoutingConfigRoutingModeEnum) *alpha.NetworkRoutingConfigRoutingModeEnum {
if e == 0 {
return nil
}
if n, ok := alphapb.ComputeAlphaNetworkRoutingConfigRoutingModeEnum_name[int32(e)]; ok {
e := alpha.NetworkRoutingConfigRoutingModeEnum(n[len("ComputeAlphaNetworkRoutingConfigRoutingModeEnum"):])
return &e
}
return nil
}
// ProtoToNetworkRoutingConfig converts a NetworkRoutingConfig object from its proto representation.
func ProtoToComputeAlphaNetworkRoutingConfig(p *alphapb.ComputeAlphaNetworkRoutingConfig) *alpha.NetworkRoutingConfig {
if p == nil {
return nil
}
obj := &alpha.NetworkRoutingConfig{
RoutingMode: ProtoToComputeAlphaNetworkRoutingConfigRoutingModeEnum(p.GetRoutingMode()),
}
return obj
}
// ProtoToNetwork converts a Network resource from its proto representation.
func ProtoToNetwork(p *alphapb.ComputeAlphaNetwork) *alpha.Network {
obj := &alpha.Network{
Description: dcl.StringOrNil(p.GetDescription()),
GatewayIPv4: dcl.StringOrNil(p.GetGatewayIpv4()),
Name: dcl.StringOrNil(p.GetName()),
AutoCreateSubnetworks: dcl.Bool(p.GetAutoCreateSubnetworks()),
RoutingConfig: ProtoToComputeAlphaNetworkRoutingConfig(p.GetRoutingConfig()),
Mtu: dcl.Int64OrNil(p.GetMtu()),
Project: dcl.StringOrNil(p.GetProject()),
SelfLink: dcl.StringOrNil(p.GetSelfLink()),
SelfLinkWithId: dcl.StringOrNil(p.GetSelfLinkWithId()),
}
return obj
}
// NetworkRoutingConfigRoutingModeEnumToProto converts a NetworkRoutingConfigRoutingModeEnum enum to its proto representation.
func ComputeAlphaNetworkRoutingConfigRoutingModeEnumToProto(e *alpha.NetworkRoutingConfigRoutingModeEnum) alphapb.ComputeAlphaNetworkRoutingConfigRoutingModeEnum {
if e == nil {
return alphapb.ComputeAlphaNetworkRoutingConfigRoutingModeEnum(0)
}
if v, ok := alphapb.ComputeAlphaNetworkRoutingConfigRoutingModeEnum_value["NetworkRoutingConfigRoutingModeEnum"+string(*e)]; ok {
return alphapb.ComputeAlphaNetworkRoutingConfigRoutingModeEnum(v)
}
return alphapb.ComputeAlphaNetworkRoutingConfigRoutingModeEnum(0)
}
// NetworkRoutingConfigToProto converts a NetworkRoutingConfig object to its proto representation.
func ComputeAlphaNetworkRoutingConfigToProto(o *alpha.NetworkRoutingConfig) *alphapb.ComputeAlphaNetworkRoutingConfig {
if o == nil {
return nil
}
p := &alphapb.ComputeAlphaNetworkRoutingConfig{}
p.SetRoutingMode(ComputeAlphaNetworkRoutingConfigRoutingModeEnumToProto(o.RoutingMode))
return p
}
// NetworkToProto converts a Network resource to its proto representation.
func NetworkToProto(resource *alpha.Network) *alphapb.ComputeAlphaNetwork {
p := &alphapb.ComputeAlphaNetwork{}
p.SetDescription(dcl.ValueOrEmptyString(resource.Description))
p.SetGatewayIpv4(dcl.ValueOrEmptyString(resource.GatewayIPv4))
p.SetName(dcl.ValueOrEmptyString(resource.Name))
p.SetAutoCreateSubnetworks(dcl.ValueOrEmptyBool(resource.AutoCreateSubnetworks))
p.SetRoutingConfig(ComputeAlphaNetworkRoutingConfigToProto(resource.RoutingConfig))
p.SetMtu(dcl.ValueOrEmptyInt64(resource.Mtu))
p.SetProject(dcl.ValueOrEmptyString(resource.Project))
p.SetSelfLink(dcl.ValueOrEmptyString(resource.SelfLink))
p.SetSelfLinkWithId(dcl.ValueOrEmptyString(resource.SelfLinkWithId))
return p
}
// applyNetwork handles the gRPC request by passing it to the underlying Network Apply() method.
func (s *NetworkServer) applyNetwork(ctx context.Context, c *alpha.Client, request *alphapb.ApplyComputeAlphaNetworkRequest) (*alphapb.ComputeAlphaNetwork, error) {
p := ProtoToNetwork(request.GetResource())
res, err := c.ApplyNetwork(ctx, p)
if err != nil {
return nil, err
}
r := NetworkToProto(res)
return r, nil
}
// applyComputeAlphaNetwork handles the gRPC request by passing it to the underlying Network Apply() method.
func (s *NetworkServer) ApplyComputeAlphaNetwork(ctx context.Context, request *alphapb.ApplyComputeAlphaNetworkRequest) (*alphapb.ComputeAlphaNetwork, error) {
cl, err := createConfigNetwork(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return s.applyNetwork(ctx, cl, request)
}
// DeleteNetwork handles the gRPC request by passing it to the underlying Network Delete() method.
func (s *NetworkServer) DeleteComputeAlphaNetwork(ctx context.Context, request *alphapb.DeleteComputeAlphaNetworkRequest) (*emptypb.Empty, error) {
cl, err := createConfigNetwork(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return &emptypb.Empty{}, cl.DeleteNetwork(ctx, ProtoToNetwork(request.GetResource()))
}
// ListComputeAlphaNetwork handles the gRPC request by passing it to the underlying NetworkList() method.
func (s *NetworkServer) ListComputeAlphaNetwork(ctx context.Context, request *alphapb.ListComputeAlphaNetworkRequest) (*alphapb.ListComputeAlphaNetworkResponse, error) {
cl, err := createConfigNetwork(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
resources, err := cl.ListNetwork(ctx, request.GetProject())
if err != nil {
return nil, err
}
var protos []*alphapb.ComputeAlphaNetwork
for _, r := range resources.Items {
rp := NetworkToProto(r)
protos = append(protos, rp)
}
p := &alphapb.ListComputeAlphaNetworkResponse{}
p.SetItems(protos)
return p, nil
}
func createConfigNetwork(ctx context.Context, service_account_file string) (*alpha.Client, error) {
conf := dcl.NewConfig(dcl.WithUserAgent("dcl-test"), dcl.WithCredentialsFile(service_account_file))
return alpha.NewClient(conf), nil
}
|
package main
import "testing"
func TestCheckResponseCode(t *testing.T) {
c := CIDR{
Name: "Test",
Mask: 24,
}
m1 := map[string]interface{}{"code": float64(201)}
m2 := map[string]interface{}{"code": float64(500)}
_, err := checkResponseCode(c, m1)
if err != nil {
t.Errorf("Expected nil, but received %v\n", err)
}
_, err = checkResponseCode(c, m2)
if err == nil {
t.Errorf("Expected error, but received %v\n", err)
}
}
|
package sync
import (
"strings"
"sync"
)
type fileIndex struct {
fileMap map[string]*FileInformation
fileMapMutex sync.Mutex
}
func newFileIndex() *fileIndex {
return &fileIndex{
fileMap: make(map[string]*FileInformation),
}
}
func (f *fileIndex) Lock() {
f.fileMapMutex.Lock()
}
func (f *fileIndex) Unlock() {
f.fileMapMutex.Unlock()
}
func (f *fileIndex) Set(file *FileInformation) {
f.fileMap[file.Name] = file
}
// Function assumes that fileMap is locked for access
func (f *fileIndex) CreateDirInFileMap(dirpath string) {
if dirpath == "/" {
return
}
pathParts := strings.Split(dirpath, "/")
for i := len(pathParts); i > 1; i-- {
subPath := strings.Join(pathParts[:i], "/")
if f.fileMap[subPath] == nil && subPath != "" {
f.fileMap[subPath] = &FileInformation{
Name: subPath,
IsDirectory: true,
}
}
}
}
// Function assumes that fileMap is locked for access
// TODO: This function is very expensive O(n), is there a better solution?
func (f *fileIndex) RemoveDirInFileMap(dirpath string) {
if f.fileMap[dirpath] != nil {
delete(f.fileMap, dirpath)
dirpath = dirpath + "/"
for key := range f.fileMap {
if strings.Index(key, dirpath) == 0 {
delete(f.fileMap, key)
}
}
}
}
|
/*
Copyright (C) 2016 Red Hat, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"io/ioutil"
"os"
"strings"
"testing"
)
func TestDisplayConsoleInMachineReadable(t *testing.T) {
testDir, err := ioutil.TempDir("", "minishift-config-")
if err != nil {
t.Error()
}
defer os.RemoveAll(testDir)
f, err := os.Create(testDir + "out.txt")
if err != nil {
t.Fatal("Error creating test file", err)
}
defer f.Close()
os.Stdout = f
expectedStdout := `HOST=192.168.1.1
PORT=8443
CONSOLE_URL=https://192.168.99.103:8443`
displayConsoleInMachineReadable("192.168.1.1", "https://192.168.99.103:8443")
if _, err := f.Seek(0, 0); err != nil {
t.Fatal("Error setting offset back", err)
}
data, err := ioutil.ReadAll(f)
if err != nil {
t.Fatal("Error reading file", err)
}
actualStdout := string(data)
if strings.TrimSpace(actualStdout) != expectedStdout {
t.Fatalf("Expected:\n '%s' \nGot\n '%s'", expectedStdout, actualStdout)
}
}
|
package client
import (
"crypto/tls"
"crypto/x509"
"fmt"
"io/ioutil"
"net/http"
"testing"
"github.com/google/go-cmp/cmp"
)
var certPool = x509.NewCertPool()
func init() {
data, err := ioutil.ReadFile("testdata/ca.crt")
if err != nil {
panic(err)
}
if !certPool.AppendCertsFromPEM(data) {
panic("could not append certificate data")
}
}
func Get(url string) (int, string, error) {
client := http.Client{Transport: &http.Transport{TLSClientConfig: &tls.Config{RootCAs: certPool}}}
resp, err := client.Get(url)
if err != nil {
return 0, "", fmt.Errorf("could not send a request: %w", err)
}
defer resp.Body.Close()
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return resp.StatusCode, "", fmt.Errorf("could not read response body: %w", err)
}
return resp.StatusCode, string(b), nil
}
func GetAndVerify(t *testing.T, url string, code int, body string) {
gotCode, gotBody, err := Get(url)
if err != nil {
t.Errorf("could not open browser request: %s", err)
return
}
if gotCode != code {
t.Errorf("status wants %d but %d", code, gotCode)
}
if gotBody != body {
t.Errorf("response body did not match: %s", cmp.Diff(gotBody, body))
}
}
|
package main
import (
"fmt"
"github.com/jBugman/go-pocket/pocket"
"os"
"pocket/analysis"
)
const CONSUMER_KEY = "<my-key>"
const ACCESS_TOKEN = "<access-token>"
func dump(model analysis.Model, filename string) {
fmt.Println("Dumping model to " + filename)
f, err := os.Create(filename)
if err != nil {
panic(err)
}
defer f.Close()
for tag, counter := range model.Source {
f.WriteString(tag + "\n")
for _, countItem := range counter.ItemsWithThreshold(1) {
f.WriteString(fmt.Sprintf("\t%s: %d\n", countItem.Key, countItem.Count))
}
f.WriteString("\n")
}
}
func main() {
api := pocket.Api{CONSUMER_KEY, ACCESS_TOKEN}
// items, err := api.Retrieve(pocket.Request{Count: 1})
items, err := api.RetrieveAllArticles()
if err != nil {
panic(err)
}
fmt.Printf("Total items: %d\n", len(items))
corpus := analysis.ConvertItems(items)
model := analysis.TrainModel(corpus)
dump(model, "model-src.txt")
model.Dump("model.txt")
for _, item := range corpus {
if len(item.Tags) == 0 {
fmt.Println(item)
fmt.Println(model.Predict(item))
println()
}
}
}
|
package main
import (
"bufio"
"fmt"
"github.com/Knetic/govaluate"
"os"
"sort"
"strconv"
"strings"
"unicode/utf8"
)
//User_To_Server(UID, ORDER, ELEMENT) Examp: User_To_Server(30, 1, "6")
//Server_To_User(UID, RESULT) Examp: Server_To_User(27, "= 8")
var strNum = 1
func main() {
userHashmap := make(map[int64]map[int64]string)
result := make(map[int64]string)
file, err := os.Open("workshop-3-logPARSER/IKB_2_10.txt")
if err != nil {
panic(err)
}
defer file.Close()
myScan := bufio.NewScanner(file)
for myScan.Scan() {
line := myScan.Text()
line = strings.Replace(strings.Replace(line, strconv.Itoa(strNum), "", 1), ": ", "", 1)
strNum++
if strings.Contains(line, "User_To_Server") {
// Replacing garbage
userInfo := strings.Replace(strings.Replace(line, "User_To_Server(", "", 1), ")", "", 1)
userInfo = strings.ReplaceAll(userInfo, " ", "")
userInfo = strings.ReplaceAll(userInfo, "\"", "")
userInfoArr := strings.Split(userInfo, ",")
userID, _ := strconv.ParseInt(userInfoArr[0], 10, 10)
userOpOrd, _ := strconv.ParseInt(userInfoArr[1], 10, 10)
// Cheking nested key
data, ok := userHashmap[userID]
if !ok {
data = make(map[int64]string)
data[userOpOrd] = userInfoArr[2]
userHashmap[userID] = data
} else {
userHashmap[userID][userOpOrd] = userInfoArr[2]
}
} else {
// Replacing garbage
userInfo := strings.Replace(strings.Replace(line, "Server_To_User(", "", 1), ")", "", 1)
userInfo = strings.ReplaceAll(userInfo, " ", "")
userInfo = strings.ReplaceAll(userInfo, "\"", "")
userInfoArr := strings.Split(userInfo, ",")
userID, _ := strconv.ParseInt(userInfoArr[0], 10, 10)
result[userID] = userInfoArr[1]
}
}
// Сортировка
keys := make([]int, 0, len(userHashmap))
for k := range userHashmap {
keys = append(keys, int(k))
}
sort.Ints(keys)
for _, k := range keys {
resultStringSlice := []string(nil)
fmt.Println(k, userHashmap[int64(k)])
for kk := 1; kk < 9; kk++ {
resultStringSlice = append(resultStringSlice, userHashmap[int64(k)][int64(kk)])
}
resultString := strings.Join(resultStringSlice, "")
// Avoiding -- -+
resultString = strings.ReplaceAll(resultString, "--", "+")
resultString = strings.ReplaceAll(resultString, "+-", "-")
// Avoiding *-
replaceCounter := strings.Count(resultString, "*-")
for z := 0; z < replaceCounter; z++ {
myRune := []rune(resultString)
tmpRune := []rune("*(-")
for i := 1; i < utf8.RuneCountInString(resultString); i++ {
if string(myRune[i]) == "-" && string(myRune[i-1]) == "*" {
//-9*-5*-5--8
tmp := myRune[i+1]
myRune[i+1] = ')' //-9*-)*-5--8
i++
tmpRune = append(tmpRune, tmp)
resultString = strings.Replace(string(myRune), "*-", string(tmpRune), 1)
break
}
}
}
//fmt.Println(resultString)
expression, err := govaluate.NewEvaluableExpression(resultString)
if err != nil {
panic(err)
}
res, err := expression.Evaluate(nil)
fmt.Println("U>", resultString)
fmt.Println("Gotten result>", res)
fmt.Println("Serv to UID >", k, result[int64(k)])
fmt.Println("~~~~~~~~~~~~~~~~~~~~~~")
}
}
|
package build
import (
cref "github.com/pip-services3-go/pip-services3-commons-go/refer"
cbuild "github.com/pip-services3-go/pip-services3-components-go/build"
conn "github.com/pip-services3-go/pip-services3-mongodb-go/connect"
)
//DefaultMongoDbFactory helps creates MongoDb components by their descriptors.
//See Factory
//See MongoDbConnection
type DefaultMongoDbFactory struct {
cbuild.Factory
}
// NewDefaultMongoDbFactory are create a new instance of the factory.
// Return *DefaultMongoDbFactory
func NewDefaultMongoDbFactory() *DefaultMongoDbFactory {
c := DefaultMongoDbFactory{}
mongoDbConnectionDescriptor := cref.NewDescriptor("pip-services", "connection", "mongodb", "*", "1.0")
c.RegisterType(mongoDbConnectionDescriptor, conn.NewMongoDbConnection)
return &c
}
|
package main
import "fmt"
func main() {
var (
m uint64
n uint64
a uint64
)
fmt.Scanf("%d %d %d",&m,&n,&a)
x := m / a
if m % a != 0{
x = x + 1
}
y := n / a
if n % a != 0 {
y = y + 1
}
result := x*y
fmt.Printf("%d\n",result)
}
|
package main
import (
"crypto/tls"
"crypto/x509"
"io/ioutil"
"net/http"
)
// newClient creates an http.Client configured for certificate authorization and
// verification against cert-api.access.redhat.com.
func newClient(certFile, keyFile string) (*http.Client, error) {
caCert, err := ioutil.ReadFile("/etc/insights-client/cert-api.access.redhat.com.pem")
if err != nil {
return nil, err
}
caCertPool, err := x509.SystemCertPool()
if err != nil {
return nil, err
}
caCertPool.AppendCertsFromPEM(caCert)
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
if err != nil {
return nil, err
}
tlsConfig := tls.Config{
RootCAs: caCertPool,
Certificates: []tls.Certificate{cert},
MaxVersion: tls.VersionTLS12, // cloud.redhat.com appears to exhibit this openssl bug https://github.com/openssl/openssl/issues/9767
}
tlsConfig.BuildNameToCertificate()
transport := http.Transport{
TLSClientConfig: &tlsConfig,
}
client := http.Client{
Transport: &transport,
}
return &client, nil
}
|
package postgres
import (
"database/sql"
"fmt"
"log"
"path"
"runtime"
"github.com/BurntSushi/toml"
_ "github.com/lib/pq"
)
type DatabaseConfig struct {
Username string
Password string
Database string
Hostname string
}
func (d *DatabaseConfig) IsValid() bool {
return (d.Username != "" &&
d.Password != "" &&
d.Database != "" &&
d.Hostname != "")
}
func Connect(conf DatabaseConfig) *sql.DB {
db, err := sql.Open("postgres", fmt.Sprintf("user=%s dbname=%s sslmode=%s", conf.Username, conf.Database, "disable"))
if err != nil {
log.Fatal(err)
}
return db
}
func GetConfig(filepath string, env string) DatabaseConfig {
_, currentfile, _, _ := runtime.Caller(1) // get current file path
abspath := path.Join(path.Dir(currentfile), filepath)
tmpconf := map[string]DatabaseConfig{}
if _, err := toml.DecodeFile(abspath, &tmpconf); err != nil {
log.Fatal(err)
}
if conf, ok := tmpconf[env]; ok {
return conf
} else {
panic(env + " configuration is not present in " + filepath)
}
return tmpconf[env]
}
|
package command
import (
"github.com/fatih/structs"
"github.com/flosch/pongo2"
"mix/plugins/micro"
)
func (p *Handler) makeConfig() {
renderConfig := micro.ReadRenderConfig(p.project)
renderConfigData := pongo2.Context(structs.Map(renderConfig))
p.renderDynamic(ProjectConfigGlobalPath, renderConfigData)
p.renderDynamic(ProjectConfigMakePath, renderConfigData)
p.renderDynamic(ProjectConfigGetterPath, renderConfigData)
p.renderDynamic(ProjectConfigInitPath, renderConfigData)
p.renderDynamic(ProjectConfigMergePath, renderConfigData)
p.renderDynamic(ProjectConfigValidatorPath, renderConfigData)
return
}
|
// Package user 业务
package user
import (
"github.com/thoohv5/template/internal/service/user"
"github.com/thoohv5/template/internal/service/user/entity"
"github.com/thoohv5/template/pkg/http"
"github.com/gin-gonic/gin"
"github.com/thoohv5/template/internal/pkg/config"
"github.com/thoohv5/template/pkg/log"
)
type server struct {
// 配置
cf config.IConfig
// 日志
log log.ILog
// 业务
svr user.IService
}
// IServer 用户服务标准
type IServer interface {
// Register 用户注册
Register(gtx *gin.Context)
// Detail 用户详情
Detail(gtx *gin.Context)
// List 用户列表
List(gtx *gin.Context)
// Edit 用户编辑
Edit(gtx *gin.Context)
}
// New 创建
func New(cf config.IConfig, log log.ILog, svr user.IService) IServer {
return &server{
cf: cf,
log: log,
svr: svr,
}
}
// Register godoc
// @Summary 用户注册
// @Description 用户注册的接口
// @Tags 用户模块
// @Accept json
// @Produce json
// @Param req body RegisterReq true "请求值"
// @Success 200 {object} http.ResponseEntity{data=RegisterResp}
// @Router /register [post]
func (s *server) Register(gtx *gin.Context) {
resp := http.NewResponse(gtx)
req := new(RegisterReq)
if err := gtx.Bind(req); nil != err {
resp.Error(err)
return
}
// 创建
ret, err := s.svr.Create(gtx, req.AccountType, req.Identity, req.Extra)
if nil != err {
resp.Error(err)
return
}
resp.DefaultSuccess(&RegisterResp{
UserIdentity: ret,
})
}
// Detail godoc
// @Summary 用户详情
// @Description 用户的详情
// @Tags 用户模块
// @Accept json
// @Produce json
// @Param req body DetailReq true "请求值"
// @Success 200 {object} http.ResponseEntity{data=DetailResp}
// @Router /detail [get]
func (s *server) Detail(gtx *gin.Context) {
resp := http.NewResponse(gtx)
req := new(DetailReq)
if err := gtx.Bind(req); nil != err {
resp.Error(err)
return
}
// 详情
ret, err := s.svr.Detail(gtx, req.AccountType, req.Identity)
if nil != err {
resp.Error(err)
return
}
resp.DefaultSuccess(&DetailResp{
Detail: ret,
})
}
// List godoc
// @Summary 用户列表
// @Description 用户的列表
// @Tags 用户模块
// @Accept json
// @Produce json
// @Param req body ListReq true "请求值"
// @Success 200 {object} http.ResponseEntity{data=ListResp}
// @Router /list [get]
func (s *server) List(gtx *gin.Context) {
resp := http.NewResponse(gtx)
req := new(ListReq)
if err := gtx.Bind(req); nil != err {
resp.Error(err)
return
}
// 列表
list, err := s.svr.List(gtx, &entity.ListParam{
BasePage: http.BasePage{
Start: req.Start,
Limit: req.Limit,
},
Type: req.Type,
})
if nil != err {
resp.Error(err)
return
}
resp.DefaultSuccess(&ListResp{
List: list,
})
}
// Edit godoc
// @Summary 用户编辑
// @Description 编辑用户
// @Tags 用户模块
// @Accept json
// @Produce json
// @Param req body EditReq true "请求值"
// @Success 200 {object} http.ResponseEntity
// @Router /edit [post]
func (s *server) Edit(gtx *gin.Context) {
resp := http.NewResponse(gtx)
req := new(EditReq)
if err := gtx.Bind(req); nil != err {
resp.Error(err)
return
}
// 编辑
err := s.svr.Edit(gtx, req.UserIdentity, &entity.EditParam{
AccountType: req.AccountType,
Identity: req.Identity,
Extra: req.Extra,
})
if nil != err {
resp.Error(err)
return
}
resp.DefaultSuccess(nil)
}
|
// Copyright 2021 BoCloud
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package agent
import (
"context"
"fmt"
"net"
"github.com/go-logr/logr"
"github.com/jjeffery/stringset"
"gopkg.in/yaml.v2"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
"github.com/fabedge/fabedge/pkg/common/constants"
"github.com/fabedge/fabedge/pkg/common/netconf"
"github.com/fabedge/fabedge/pkg/operator/allocator"
"github.com/fabedge/fabedge/pkg/operator/predicates"
storepkg "github.com/fabedge/fabedge/pkg/operator/store"
"github.com/fabedge/fabedge/pkg/operator/types"
)
const (
controllerName = "agent-controller"
agentConfigTunnelFileName = "tunnels.yaml"
agentConfigServicesFileName = "services.yaml"
agentConfigTunnelsFilepath = "/etc/fabedge/tunnels.yaml"
agentConfigServicesFilepath = "/etc/fabedge/services.yaml"
)
type ObjectKey = client.ObjectKey
var _ reconcile.Reconciler = &agentController{}
type agentController struct {
client client.Client
alloc allocator.Interface
store storepkg.Interface
newEndpoint types.NewEndpointFunc
log logr.Logger
namespace string
agentImage string
strongswanImage string
edgePodCIRD string
masqOutgoing bool
}
type Config struct {
Allocator allocator.Interface
Store storepkg.Interface
Manager manager.Manager
Namespace string
AgentImage string
StrongswanImage string
MasqOutgoing bool
EdgePodCIDR string
ConnectorConfig string
NewEndpoint types.NewEndpointFunc
}
func AddToManager(cnf Config) error {
mgr := cnf.Manager
reconciler := &agentController{
namespace: cnf.Namespace,
agentImage: cnf.AgentImage,
strongswanImage: cnf.StrongswanImage,
edgePodCIRD: cnf.EdgePodCIDR,
masqOutgoing: cnf.MasqOutgoing,
alloc: cnf.Allocator,
store: cnf.Store,
newEndpoint: cnf.NewEndpoint,
log: mgr.GetLogger().WithName(controllerName),
client: mgr.GetClient(),
}
c, err := controller.New(
controllerName,
mgr,
controller.Options{
Reconciler: reconciler,
},
)
if err != nil {
return err
}
return c.Watch(
&source.Kind{Type: &corev1.Node{}},
&handler.EnqueueRequestForObject{},
predicates.EdgeNodePredicate(),
)
}
func (ctl *agentController) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
var node corev1.Node
log := ctl.log.WithValues("key", request)
if err := ctl.client.Get(ctx, request.NamespacedName, &node); err != nil {
if errors.IsNotFound(err) {
log.Info("edge node is deleted, clear resources allocated to this node")
return reconcile.Result{}, ctl.clearAllocatedResourcesForEdgeNode(ctx, request.Name)
}
log.Error(err, "unable to get edge node")
return reconcile.Result{}, err
}
if node.DeletionTimestamp != nil {
ctl.log.Info("edge node is terminating, clear resources allocated to this node")
return reconcile.Result{}, ctl.clearAllocatedResourcesForEdgeNode(ctx, request.Name)
}
currentEndpoint := ctl.newEndpoint(node)
if currentEndpoint.IP == "" {
log.V(5).Info("This node has no ip, skip reconciling")
return reconcile.Result{}, nil
}
if !ctl.isValidSubnets(currentEndpoint.Subnets) {
if err := ctl.allocateSubnet(ctx, node); err != nil {
return reconcile.Result{}, err
}
} else {
ctl.store.SaveEndpoint(currentEndpoint)
}
if err := ctl.syncAgentConfig(ctx, node); err != nil {
return reconcile.Result{}, err
}
if err := ctl.createAgentPodIfNeeded(ctx, &node); err != nil {
return reconcile.Result{}, err
}
return reconcile.Result{}, nil
}
func (ctl *agentController) isValidSubnets(cidrs []string) bool {
for _, cidr := range cidrs {
_, subnet, err := net.ParseCIDR(cidr)
if err != nil {
return false
}
if !ctl.alloc.Contains(*subnet) {
return false
}
}
return true
}
func (ctl *agentController) clearAllocatedResourcesForEdgeNode(ctx context.Context, nodeName string) error {
if err := ctl.deleteAgentPodIfNeeded(ctx, nodeName); err != nil {
return err
}
if err := ctl.deleteAgentConfigIfNeeded(ctx, nodeName); err != nil {
return err
}
return ctl.reclaimSubnets(nodeName)
}
func (ctl *agentController) reclaimSubnets(nodeName string) error {
log := ctl.log.WithValues("nodeName", nodeName)
ep, ok := ctl.store.GetEndpoint(nodeName)
if !ok {
return nil
}
ctl.store.DeleteEndpoint(nodeName)
log.V(5).Info("endpoint is delete from store", "endpoint", ep)
for _, sn := range ep.Subnets {
_, subnet, err := net.ParseCIDR(sn)
if err != nil {
log.Error(err, "invalid subnet, skip reclaiming subnets")
continue
}
ctl.alloc.Reclaim(*subnet)
log.V(5).Info("subnet is reclaimed", "subnet", subnet)
}
return nil
}
func (ctl *agentController) deleteAgentPodIfNeeded(ctx context.Context, nodeName string) error {
agentPodName := getAgentPodName(nodeName)
key := ObjectKey{
Name: agentPodName,
Namespace: ctl.namespace,
}
var pod corev1.Pod
if err := ctl.client.Get(ctx, key, &pod); err != nil {
return err
}
ctl.log.V(5).Info("Agent pod is found, delete it now", "nodeName", nodeName, "podName", agentPodName, "namespace", ctl.namespace)
err := ctl.client.Delete(ctx, &pod)
if err != nil {
ctl.log.Error(err, "failed to delete agent pod")
}
return err
}
func (ctl *agentController) allocateSubnet(ctx context.Context, node corev1.Node) error {
log := ctl.log.WithValues("nodeName", node.Name)
log.V(5).Info("this node need subnet allocation")
subnet, err := ctl.alloc.GetFreeSubnetBlock(node.Name)
if err != nil {
log.Error(err, "failed to allocate subnet for node")
return err
}
log = log.WithValues("subnet", subnet.String())
log.V(5).Info("an subnet is allocated to node")
if node.Annotations == nil {
node.Annotations = map[string]string{}
}
// for now, we just supply one subnet allocation
node.Annotations[constants.KeyNodeSubnets] = subnet.String()
err = ctl.client.Update(ctx, &node)
if err != nil {
log.Error(err, "failed to record node subnet allocation")
ctl.alloc.Reclaim(*subnet)
log.V(5).Info("subnet is reclaimed")
return err
}
ctl.store.SaveEndpoint(ctl.newEndpoint(node))
return nil
}
func (ctl *agentController) syncAgentConfig(ctx context.Context, node corev1.Node) error {
configName := getAgentConfigMapName(node.Name)
log := ctl.log.WithValues("nodeName", node.Name, "configName", configName, "namespace", ctl.namespace)
log.V(5).Info("Sync agent config")
var agentConfig corev1.ConfigMap
err := ctl.client.Get(ctx, ObjectKey{Name: configName, Namespace: ctl.namespace}, &agentConfig)
if err != nil && !errors.IsNotFound(err) {
ctl.log.Error(err, "failed to get agent configmap")
return err
}
isConfigNotFound := errors.IsNotFound(err)
networkConf := ctl.buildNetworkConf(node.Name)
configDataBytes, err := yaml.Marshal(networkConf)
if err != nil {
ctl.log.Error(err, "not able to marshal NetworkConf")
return err
}
configData := string(configDataBytes)
if isConfigNotFound {
ctl.log.V(5).Info("Agent configMap is not found, create it now")
configMap := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: configName,
Namespace: ctl.namespace,
Labels: map[string]string{
constants.KeyFabedgeAPP: constants.AppAgent,
constants.KeyCreatedBy: constants.AppOperator,
},
},
Data: map[string]string{
agentConfigTunnelFileName: configData,
// agent controller just create configmap, the load balance rules is kept by proxy controller
agentConfigServicesFileName: "",
},
}
return ctl.client.Create(ctx, configMap)
}
if configData == agentConfig.Data[agentConfigTunnelFileName] {
log.V(5).Info("agent config is not changed, skip updating")
return nil
}
agentConfig.Data[agentConfigTunnelFileName] = configData
err = ctl.client.Update(ctx, &agentConfig)
if err != nil {
log.Error(err, "failed to update agent configmap")
}
return err
}
func (ctl *agentController) createAgentPodIfNeeded(ctx context.Context, node *corev1.Node) error {
agentPodName := getAgentPodName(node.Name)
key := ObjectKey{
Name: agentPodName,
Namespace: ctl.namespace,
}
var pod corev1.Pod
err := ctl.client.Get(ctx, key, &pod)
if err == nil || !errors.IsNotFound(err) {
return err
}
ctl.log.V(5).Info("Agent pod not found, create it now", "nodeName", node.Name, "podName", agentPodName, "namespace", ctl.namespace)
agentPod := ctl.buildAgentPod(ctl.namespace, node.Name, agentPodName)
err = ctl.client.Create(ctx, agentPod)
if err != nil {
ctl.log.Error(err, "failed to create agent pod")
}
return err
}
func (ctl *agentController) buildAgentPod(namespace, nodeName, podName string) *corev1.Pod {
hostPathDirectory := corev1.HostPathDirectory
hostPathFile := corev1.HostPathFile
privileged := true
defaultMode := int32(420)
agentConfigName := getAgentConfigMapName(nodeName)
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Namespace: namespace,
Labels: map[string]string{
constants.KeyFabedgeAPP: constants.AppAgent,
constants.KeyCreatedBy: constants.AppOperator,
},
},
Spec: corev1.PodSpec{
NodeName: nodeName,
HostNetwork: true,
RestartPolicy: corev1.RestartPolicyAlways,
Tolerations: []corev1.Toleration{
{
Key: "node-role.kubernetes.io/edge",
Effect: corev1.TaintEffectNoSchedule,
},
},
Containers: []corev1.Container{
{
Name: "agent",
Image: ctl.agentImage,
ImagePullPolicy: corev1.PullIfNotPresent,
Args: []string{
"-tunnels-conf",
agentConfigTunnelsFilepath,
"-services-conf",
agentConfigServicesFilepath,
"-edge-pod-cidr",
ctl.edgePodCIRD,
fmt.Sprintf("-masq-outgoing=%t", ctl.masqOutgoing),
},
SecurityContext: &corev1.SecurityContext{
Privileged: &privileged,
},
Resources: corev1.ResourceRequirements{},
VolumeMounts: []corev1.VolumeMount{
{
Name: "netconf",
MountPath: "/etc/fabedge",
},
{
Name: "var-run",
MountPath: "/var/run/",
},
{
Name: "cni",
MountPath: "/etc/cni",
},
{
Name: "lib-modules",
MountPath: "/lib/modules",
ReadOnly: true,
},
{
Name: "ipsec-d",
MountPath: "/etc/ipsec.d",
ReadOnly: true,
},
},
},
{
Name: "strongswan",
Image: ctl.strongswanImage,
ImagePullPolicy: corev1.PullIfNotPresent,
SecurityContext: &corev1.SecurityContext{
Privileged: &privileged,
},
Resources: corev1.ResourceRequirements{},
VolumeMounts: []corev1.VolumeMount{
{
Name: "var-run",
MountPath: "/var/run/",
},
{
Name: "ipsec-d",
MountPath: "/etc/ipsec.d",
ReadOnly: true,
},
{
Name: "ipsec-secrets",
MountPath: "/etc/ipsec.secrets",
ReadOnly: true,
},
},
},
},
Volumes: []corev1.Volume{
{
Name: "var-run",
VolumeSource: corev1.VolumeSource{
EmptyDir: &corev1.EmptyDirVolumeSource{},
},
},
{
Name: "cni",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: "/etc/cni",
Type: &hostPathDirectory,
},
},
},
{
Name: "lib-modules",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: "/lib/modules",
Type: &hostPathDirectory,
},
},
},
{
Name: "netconf",
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: agentConfigName,
},
DefaultMode: &defaultMode,
},
},
},
{
Name: "ipsec-d",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: "/etc/fabedge/ipsec",
Type: &hostPathDirectory,
},
},
},
{
Name: "ipsec-secrets",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: "/etc/fabedge/ipsec/ipsec.secrets",
Type: &hostPathFile,
},
},
},
},
},
}
return pod
}
func (ctl *agentController) deleteAgentConfigIfNeeded(ctx context.Context, nodeName string) error {
configName := getAgentConfigMapName(nodeName)
log := ctl.log.WithValues("nodeName", nodeName, "configName", configName, "namespace", ctl.namespace)
var cm corev1.ConfigMap
if err := ctl.client.Get(ctx, ObjectKey{Name: configName, Namespace: ctl.namespace}, &cm); err != nil {
if errors.IsNotFound(err) {
return nil
}
log.Error(err, "failed to get configmap")
return err
}
log.V(5).Info("Agent configmap is found, delete it now")
err := ctl.client.Delete(ctx, &cm)
if err != nil {
if errors.IsNotFound(err) {
err = nil
} else {
log.Error(err, "failed to delete agent configmap")
}
}
return err
}
// getNetworkConfig to parse network config from connector configmap or agent configmap
func (ctl *agentController) getNetworkConfig(ctx context.Context, namespace, cmName, configFile string) (cm corev1.ConfigMap, conf netconf.NetworkConf, err error) {
key := client.ObjectKey{
Namespace: namespace,
Name: cmName,
}
if err = ctl.client.Get(ctx, key, &cm); err != nil {
return
}
tmp := cm.Data[configFile]
if err = yaml.Unmarshal([]byte(tmp), &conf); err != nil {
return
}
return
}
func (ctl *agentController) buildNetworkConf(name string) netconf.NetworkConf {
store := ctl.store
endpoint, _ := store.GetEndpoint(name)
peerEndpoints := ctl.getPeers(name)
conf := netconf.NetworkConf{
TunnelEndpoint: endpoint.ConvertToTunnelEndpoint(),
Peers: make([]netconf.TunnelEndpoint, 0, len(peerEndpoints)),
}
for _, ep := range peerEndpoints {
conf.Peers = append(conf.Peers, ep.ConvertToTunnelEndpoint())
}
return conf
}
func (ctl *agentController) getPeers(name string) []types.Endpoint {
store := ctl.store
nameSet := stringset.New(constants.ConnectorEndpointName)
for _, community := range store.GetCommunitiesByEndpoint(name) {
nameSet.Add(community.Members.Values()...)
}
nameSet.Remove(name)
return store.GetEndpoints(nameSet.Values()...)
}
func getAgentConfigMapName(nodeName string) string {
return fmt.Sprintf("fabedge-agent-config-%s", nodeName)
}
func getAgentPodName(nodeName string) string {
return fmt.Sprintf("fabedge-agent-%s", nodeName)
}
|
package game
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestShuffleDeck(t *testing.T) {
deck := ShuffleDeck()
t.Run("Contains 24 Cards", func(t *testing.T) {
require.Len(t, deck, 24)
})
t.Run("Contains No Duplicates", func(t *testing.T) {
for i := 0; i < len(deck)-1; i++ {
for j := i + 1; j < len(deck); j++ {
assert.False(
t,
deck[i].Suit == deck[j].Suit && deck[i].Rank == deck[j].Rank,
fmt.Sprintf(
"Expected no duplicates but found a duplicate %d=%s %d=(%s)",
i, deck[i].String(),
j, deck[j].String(),
),
)
}
}
})
}
|
package server
import (
"net"
)
type ServerOption struct {
Addr net.TCPAddr
}
|
package encoding
import (
"encoding/binary"
"testing"
)
func BenchmarkBinaryBigEndianPutUint16(b *testing.B) {
b.ReportAllocs()
v := make([]byte, 2)
for i := 0; i < b.N; i++ {
binary.BigEndian.PutUint16(v, 12)
}
b.SetBytes(int64(len(v)))
}
func BenchmarkBinaryBigEndianPutUint64(b *testing.B) {
b.ReportAllocs()
v := make([]byte, 8)
for i := 0; i < b.N; i++ {
binary.BigEndian.PutUint64(v, 12)
}
b.SetBytes(int64(len(v)))
}
|
package storage
import (
"github.com/biezhi/gorm-paginator/pagination"
md "github.com/ebikode/eLearning-core/model"
)
type DBActivityLogStorage struct {
*MDatabase
}
// Initialize ActivityLog Storage
func NewDBActivityLogStorage(db *MDatabase) *DBActivityLogStorage {
return &DBActivityLogStorage{db}
}
// Get all activityLogs
func (aldb *DBActivityLogStorage) GetAll(page, limit int) []*md.ActivityLog {
var activityLogs []*md.ActivityLog
q := aldb.db.Preload("Admin").Order("created_at desc")
pagination.Paging(&pagination.Param{
DB: q.Find(&activityLogs),
Page: page,
Limit: limit,
OrderBy: []string{"created_at desc"},
}, &activityLogs)
return activityLogs
}
// Add a new actegory
func (aldb *DBActivityLogStorage) Store(c md.ActivityLog) error {
act := c
err := aldb.db.Create(&act).Error
if err != nil {
return err
}
return nil
}
// Delete a actegory
func (aldb *DBActivityLogStorage) Delete(c md.ActivityLog, isPermarnant bool) (bool, error) {
var err error
if isPermarnant {
err = aldb.db.Unscoped().Delete(c).Error
}
if !isPermarnant {
err = aldb.db.Delete(c).Error
}
if err != nil {
return false, err
}
return true, nil
}
|
package controllers
import "sharemusic/models"
func (c *MusicController) LeftRoom() {
id, _ := c.GetInt64("id")
res, _ := models.GetOneById(id)
var err error
if res.UserNum == 1 {
err = models.DelRoomInfo(id)
} else {
err = models.UpdateRoomInfoUserName(id, res.UserNum-1)
}
c.Ctx.Output.SetStatus(200)
newBody := map[string]interface{}{
"code": 200,
}
if err != nil {
c.Ctx.Output.SetStatus(500)
newBody["code"] = 500
}
c.Ctx.Output.SetStatus(200)
c.Data["json"] = newBody
c.ServeJSON()
}
|
package fingerprint
import (
"fmt"
"os"
"os/exec"
"path"
"testing"
"github.com/spf13/afero"
"github.com/stretchr/testify/assert"
)
const (
testDataDir = "/test/audio/"
testFile1 = "sample1.mp3"
testFile2 = "sample2.mp3"
testFile3 = "textfile.txt"
)
var (
mockExec = func(command string, args ...string) *exec.Cmd {
cs := []string{"-test.run=TestShellProcessSuccess", "--", command}
cs = append(cs, args...)
cmd := exec.Command(os.Args[0], cs...)
cmd.Env = []string{"GO_TEST_PROCESS=1"}
return cmd
}
mockFailExec = func(command string, args ...string) *exec.Cmd {
cs := []string{"-test.run=TestShellProcessError", "--", command}
cs = append(cs, args...)
cmd := exec.Command(os.Args[0], cs...)
cmd.Env = []string{"GO_TEST_PROCESS=1"}
return cmd
}
)
func TestShellProcessSuccess(t *testing.T) {
if os.Getenv("GO_TEST_PROCESS") != "1" {
return
}
// Print out the test value to stdout
fmt.Fprintf(os.Stdout, `{"duration": 10.5, "fingerprint": "the-fingerprint"}`)
os.Exit(0)
}
func TestShellProcessError(t *testing.T) {
if os.Getenv("GO_TEST_PROCESS") != "1" {
return
}
os.Exit(2)
}
func mustSetupFS() afero.Fs {
mockFS := afero.NewMemMapFs()
err := mockFS.MkdirAll(testDataDir, 0755)
if err != nil {
panic(err)
}
inputFile1Path := path.Join(testDataDir, testFile1)
err = afero.WriteFile(mockFS, inputFile1Path, []byte("file 1"), 0644)
if err != nil {
panic(err)
}
inputFile2Path := path.Join(testDataDir, testFile2)
err = afero.WriteFile(mockFS, inputFile2Path, []byte("file 2"), 0644)
if err != nil {
panic(err)
}
inputFile3Path := path.Join(testDataDir, testFile3)
err = afero.WriteFile(mockFS, inputFile3Path, []byte("text file"), 0644)
if err != nil {
panic(err)
}
return mockFS
}
func TestFingerprintFromFile(t *testing.T) {
mockFS := mustSetupFS()
chromap := NewChromaPrint(mockExec, mockFS)
inputFile := path.Join(testDataDir, testFile1)
fInfo, err := mockFS.Stat(inputFile)
assert.NoError(t, err)
got, err := chromap.CalcFingerprint(inputFile)
assert.NoError(t, err)
assert.Len(t, got, 1)
assert.Equal(t, got[0], &Fingerprint{
Duration: 10.5,
Value: "the-fingerprint",
InputFile: fInfo,
})
}
func TestFingerprintFromDir(t *testing.T) {
mockFS := mustSetupFS()
chromap := NewChromaPrint(mockExec, mockFS)
got, err := chromap.CalcFingerprint(testDataDir)
assert.NoError(t, err)
assert.Len(t, got, 2)
fInfo1, err := mockFS.Stat(path.Join(testDataDir, testFile1))
assert.NoError(t, err)
fInfo2, err := mockFS.Stat(path.Join(testDataDir, testFile2))
assert.NoError(t, err)
assert.ElementsMatch(t, []*Fingerprint{
{
Duration: 10.5,
Value: "the-fingerprint",
InputFile: fInfo1,
},
{
Duration: 10.5,
Value: "the-fingerprint",
InputFile: fInfo2,
},
}, got)
}
func TestInputErrors(t *testing.T) {
mockFS := mustSetupFS()
chromap := NewChromaPrint(mockExec, mockFS)
testcases := []struct {
name string
inputPath string
expectedErr error
}{
{
name: "invalid path",
inputPath: "some/other/dir",
expectedErr: ErrInvalidPath,
},
{
name: "invalid file format",
inputPath: path.Join(testDataDir, testFile3),
expectedErr: ErrInvalidFormat,
},
}
for _, testcase := range testcases {
t.Run(testcase.name, func(t *testing.T) {
_, err := chromap.CalcFingerprint(testcase.inputPath)
assert.EqualError(t, err, testcase.expectedErr.Error())
})
}
}
func TestHandleExecCmdError(t *testing.T) {
mockFS := mustSetupFS()
chromap := NewChromaPrint(mockFailExec, mockFS)
_, err := chromap.CalcFingerprint(testDataDir)
assert.NotNil(t, err)
}
|
package repository
import (
"context"
"time"
"github.com/google/uuid"
)
// Guest represents the repository guest object
type Guest struct {
ID uuid.UUID `json:"id" gorm:"primaryKey,not null"`
Name string `json:"name" gorm:"index:idx_guest_phone,unique,not null"`
Table int `json:"table"`
AccompanyingGuests int `json:"accompanying_guests"`
ActualAccompanyingGuests int `json:"actual_accompanying_guests"`
TimeArrived *time.Time `json:"time_arrived"`
TimeLeaved *time.Time `json:"time_leaved"`
CreatedAt time.Time `json:"created_at" gorm:"not null"`
UpdatedAt time.Time `json:"updated_at" gorm:"not null"`
DeletedAt *time.Time `json:"deleted_at"`
}
// GuestRepository represents the guest repository interface
type GuestRepository interface {
FindAll(ctx context.Context, isArrivedOnly bool) ([]*Guest, error)
FindByID(ctx context.Context, guestID uuid.UUID) (*Guest, error)
FindByName(ctx context.Context, name string) (*Guest, error)
Insert(ctx context.Context, guest *Guest) (*Guest, error)
Update(ctx context.Context, guest *Guest) (*Guest, error)
Delete(ctx context.Context, guest *Guest) error
}
|
package utils
import (
"io"
"math/rand"
"os"
"strings"
"time"
)
var randchars [94]byte
func init() {
var i byte
for i = 33; i <= 126; i++ {
randchars[i-33] = i
}
rand.Seed(time.Now().UnixNano())
}
//Randstr Randstr
func Randstr(strlen int) string {
data := make([]byte, strlen)
for i := 0; i < strlen; i++ {
data[i] = randchars[rand.Intn(94)]
}
return string(data)
}
//Randstr20 Randstr20 > uuid
func Randstr20() string {
data := make([]byte, 20)
for i := 0; i < 20; i++ {
data[i] = randchars[rand.Intn(94)]
}
return string(data)
}
//RandstrLETTERletterNUMBER gen rand str (A~Z && a~z && 0~9)
func RandstrLETTERletterNUMBER(strlen int) string {
chars := [62]byte{
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'}
data := make([]byte, strlen)
for i := 0; i < strlen; i++ {
data[i] = chars[rand.Intn(62)]
}
return string(data)
}
//RandstrLETTERletterNUMBER22 gen rand str (A~Z && a~z && 0~9)
func RandstrLETTERletterNUMBER22() string {
return RandstrLETTERletterNUMBER(22)
}
//Copyfile Copyfile
func Copyfile(dst, src string) error {
in, err := os.Open(src)
if err != nil {
return err
}
defer in.Close()
out, err := os.Create(dst)
if err != nil {
return err
}
defer out.Close()
_, err = io.Copy(out, in)
cerr := out.Close()
if err != nil {
return err
}
return cerr
}
//GetExt 获取得文件的扩展名,最后一个.后面的内容
func GetExt(f string) (ext string) {
// log.Println("ext:", f)
index := strings.LastIndex(f, ".")
data := []byte(f)
for i := index + 1; i < len(data); i++ {
ext = ext + string([]byte{data[i]})
}
return
}
//PathExist PathExist
func PathExist(_path string) bool {
_, err := os.Stat(_path)
if err != nil && os.IsNotExist(err) {
return false
}
return true
}
//Substr Substr
func Substr(s string, pos, length int) string {
runes := []rune(s)
l := pos + length
if l > len(runes) {
l = len(runes)
}
return string(runes[pos:l])
}
//GetParentDirectory GetParentDirectory
func GetParentDirectory(dirctory string) string {
return Substr(dirctory, strings.LastIndex(dirctory, "/")+1, len(dirctory))
}
//DriveLetter2LowerCase DriveLetter2LowerCase
func DriveLetter2LowerCase(dir string) string {
var bdir []byte
bdir = []byte(dir)
if bdir[1] == 0x3A {
if 0x41 <= bdir[0] && bdir[0] <= 0x5A {
bdir[0] = bdir[0] + 32
}
}
return string(bdir)
}
|
package main
import (
"net"
"fmt"
"bufio"
"strings"
)
func main() {
l,err:=net.Listen("tcp",":8080")
if err!=nil{
fmt.Println(err)
}
for{
c,err:=l.Accept() //here we accept the tcp connection in c and now we can read and write on this connection
if err!=nil{
fmt.Println(err)
continue
}
go handleconn(c)
}
}
func handleconn(c net.Conn){
defer c.Close()
request(c)
}
func request(c net.Conn){
var i int
scanner:=bufio.NewScanner(c)
//scanner.Split(bufio.ScanWords) //to print each word in new line
for scanner.Scan(){
data:=scanner.Text()
fmt.Println(data)
//fmt.Fprintln(c,"Received : ",data) // video no. 023
if i==0 {
mux(c, data)
}
if data == ""{
break
}
i++
}
//defer c.Close()
fmt.Println("End Of Prog") //program reaches here when we close the connection i.e close localhost 8080
}
func mux(c net.Conn,data string){
method:=strings.Fields(data)[0]
uri:=strings.Fields(data)[1]
fmt.Println("methos is : ",method,"uri is : ",uri)
//mux
if method == "GET" && uri =="/"{
index(c)
}
if method == "GET" && uri =="/about"{
about(c)
}
if method == "GET" && uri =="/contact"{
contact(c)
}
if method == "GET" && uri == "/apply" {
apply(c)
}
if method == "POST" && uri == "/apply" {
applyProcess(c)
}
}
func index(conn net.Conn) {
body := `<!DOCTYPE html><html lang="en"><head><meta charet="UTF-8"><title></title></head><body>
<strong>INDEX</strong><br>
<a href="/">index</a><br>
<a href="/about">about</a><br>
<a href="/contact">contact</a><br>
<a href="/apply">apply</a><br>
</body></html>`
fmt.Fprint(conn, "HTTP/1.1 200 OK\r\n")
fmt.Fprintf(conn, "Content-Length: %d\r\n", len(body))
fmt.Fprint(conn, "Content-Type: text/html\r\n")
fmt.Fprint(conn, "\r\n")
fmt.Fprint(conn, body)
}
func about(conn net.Conn) {
body := `<!DOCTYPE html><html lang="en"><head><meta charet="UTF-8"><title></title></head><body>
<strong>ABOUT</strong><br>
<a href="/">index</a><br>
<a href="/about">about</a><br>
<a href="/contact">contact</a><br>
<a href="/apply">apply</a><br>
</body></html>`
fmt.Fprint(conn, "HTTP/1.1 200 OK\r\n")
fmt.Fprintf(conn, "Content-Length: %d\r\n", len(body))
fmt.Fprint(conn, "Content-Type: text/html\r\n")
fmt.Fprint(conn, "\r\n")
fmt.Fprint(conn, body)
}
func contact(conn net.Conn) {
body := `<!DOCTYPE html><html lang="en"><head><meta charet="UTF-8"><title></title></head><body>
<strong>CONTACT</strong><br>
<a href="/">index</a><br>
<a href="/about">about</a><br>
<a href="/contact">contact</a><br>
<a href="/apply">apply</a><br>
</body></html>`
fmt.Fprint(conn, "HTTP/1.1 200 OK\r\n")
fmt.Fprintf(conn, "Content-Length: %d\r\n", len(body))
fmt.Fprint(conn, "Content-Type: text/html\r\n")
fmt.Fprint(conn, "\r\n")
fmt.Fprint(conn, body)
}
func apply(c net.Conn) {
body := `<!DOCTYPE html><html lang="en"><head><meta charet="UTF-8"><title></title></head><body>
<strong>APPLY</strong><br>
<a href="/">index</a><br>
<a href="/about">about</a><br>
<a href="/contact">contact</a><br>
<a href="/apply">apply</a><br>
<form method="POST" action="/apply">
<input type="text" name="fname" placeholder="enter first name">
<input type="submit" value="apply">
</form>
</body></html>`
fmt.Fprint(c, "HTTP/1.1 200 OK\r\n")
fmt.Fprintf(c, "Content-Length: %d\r\n", len(body))
fmt.Fprint(c, "Content-Type: text/html\r\n")
fmt.Fprint(c, "\r\n")
fmt.Fprint(c, body)
}
func applyProcess(c net.Conn) {
body := `<!DOCTYPE html><html lang="en"><head><meta charet="UTF-8"><title></title></head><body>
<strong>APPLY PROCESS</strong><br>
<a href="/">index</a><br>
<a href="/about">about</a><br>
<a href="/contact">contact</a><br>
<a href="/apply">apply</a><br>
</body></html>`
fmt.Fprint(c, "HTTP/1.1 200 OK\r\n")
fmt.Fprintf(c, "Content-Length: %d\r\n", len(body))
fmt.Fprint(c, "Content-Type: text/html\r\n")
fmt.Fprint(c, "\r\n")
fmt.Fprint(c, body)
}
|
package hacks
import (
"net/http"
"net/http/httputil"
)
func WorkingKey(w http.ResponseWriter, r *http.Request) {
rev := httputil.ReverseProxy{}
rev.ServeHTTP(w, r)
}
|
/////////////////////////////////////////////////////////////////////
// arataca89@gmail.com
// 20210423
//
// Implementa uma fila de strings usando slice
//
package main
import (
"fmt"
"os"
)
var queue = make([]string, 1)
var start = true
func push(item string) {
if start {
queue[0] = item
start = false
} else {
queue = append(queue, item)
}
}
func pop() {
if start {
fmt.Println("Fila vazia!")
} else {
if len(queue) > 0 {
queue = queue[1:]
}
}
}
func printQueue() {
if start {
fmt.Println("Fila vazia!")
} else {
if len(queue) == 0 {
fmt.Println("Fila vazia!")
} else {
for i := len(queue) - 1; i >= 0; i-- {
fmt.Print(queue[i], "-")
}
}
}
}
func main() {
var i string
for {
fmt.Println("<< 1 >> Inserir item na fila")
fmt.Println("<< 2 >> Retirar item da fila")
fmt.Println("<< 3 >> Exibir fila")
fmt.Println("<< 0 >> Sair")
fmt.Print("Entre com sua opção: ")
fmt.Scanf("%s\r", &i)
if i == "0" {
os.Exit(0)
} else if i == "1" {
fmt.Print("Entre com a string a ser inserida: ")
fmt.Scanf("%s\r", &i)
push(i)
fmt.Println()
} else if i == "2" {
if len(queue) == 0 {
fmt.Printf("\nFila vazia\n\n")
} else {
retirado := queue[0]
pop()
fmt.Println("\nItem retirado", retirado)
fmt.Println()
}
} else if i == "3" {
fmt.Println("\nExibindo a fila")
fmt.Println("----------------")
printQueue()
fmt.Printf("\n\n")
} else {
fmt.Printf("\nOpção inválida!\n\n")
}
}
}
|
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package system
import (
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
)
func TestCreateIfNotExist(t *testing.T) {
testDir := "TestCreateIfNotExist"
defer os.RemoveAll(testDir)
normalCreate := filepath.Join(testDir, "normalCase")
_, err := CreateIfNotExist(normalCreate)
assert.NoError(t, err)
fi, err := os.Stat(normalCreate)
assert.NoError(t, err)
assert.Equal(t, true, fi.IsDir())
normalNestCreate := filepath.Join(testDir, "nested", "normalCase")
_, err = CreateIfNotExist(normalNestCreate)
assert.NoError(t, err)
fi, err = os.Stat(normalNestCreate)
assert.NoError(t, err)
assert.Equal(t, true, fi.IsDir())
}
func TestGetVelaHomeDir(t *testing.T) {
tests := []struct {
name string
want string
preFunc func()
postFun func()
wantErr assert.ErrorAssertionFunc
}{
{
name: "test get vela home dir from env",
preFunc: func() {
_ = os.Setenv(VelaHomeEnv, "/tmp")
},
want: "/tmp",
wantErr: assert.NoError,
},
{
name: "test use default vela home dir",
preFunc: func() {
_ = os.Unsetenv(VelaHomeEnv)
},
want: filepath.Join(os.Getenv("HOME"), defaultVelaHome),
wantErr: assert.NoError,
postFun: func() {
_ = os.RemoveAll(filepath.Join(os.Getenv("HOME"), defaultVelaHome))
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if tt.preFunc != nil {
tt.preFunc()
}
defer func() {
_ = os.Unsetenv(VelaHomeEnv)
}()
got, err := GetVelaHomeDir()
if !tt.wantErr(t, err, "GetVelaHomeDir()") {
return
}
assert.Equalf(t, tt.want, got, "GetVelaHomeDir()")
})
}
}
|
/*
Copyright 2018-2020, Arm Limited and affiliates.
Licensed under the Apache License, Version 2.0 (the License);
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an AS IS BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package offlinemanager
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"sync"
"time"
"github.com/golang/glog"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apiserver/pkg/storage"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/tools/cache"
)
const (
FormatVersion = "1"
FileComplete = "cache.current"
FilePartial = "cache.partial"
CacheSaveInterval = time.Second * 30
InformerResyncPeriod = 0 // Don't resync
)
type LocalCache interface {
Get(resource string, namespace string, name string, opts metav1.GetOptions) (runtime.Object, error)
List(resource string, namespace string, opts metav1.ListOptions) (*unstructured.UnstructuredList, error)
}
type subsetCache struct {
// immutable
resource string
fieldset fields.Set
resourceKind string
initialContents bool
initialVersion string
getObjDeps DependenciesFunc
// Mutable
parent *localCache
store cache.Store
controller cache.Controller
ctx context.Context
cancel context.CancelFunc
wg *sync.WaitGroup
}
func newSubsetCache(parent *localCache, resourceInfo ResourceInfo, fieldset fields.Set, resourceVersion string, contents []unstructured.Unstructured) *subsetCache {
getDeps := resourceInfo.GetDependencies
if getDeps == nil {
getDeps = func(obj runtime.Object) ([]Subset, error) {
return []Subset{}, nil
}
}
c := &subsetCache{
resource: resourceInfo.Name,
resourceKind: resourceInfo.Kind,
fieldset: fieldset,
initialVersion: resourceVersion,
getObjDeps: getDeps,
parent: parent,
wg: &parent.wg,
}
c.ctx, c.cancel = context.WithCancel(parent.ctx)
add := func(obj interface{}) {
us := obj.(*unstructured.Unstructured)
glog.V(8).Infof("Store Add '%v/%v' to '%v'", us.GetNamespace(), us.GetName(), c.String())
c.updateDepsInNewThread(nil, obj)
}
update := func(oldObj, newObj interface{}) {
us := newObj.(*unstructured.Unstructured)
glog.V(8).Infof("Store Update '%v/%v' to '%v'", us.GetNamespace(), us.GetName(), c.String())
c.updateDepsInNewThread(oldObj, newObj)
}
delete := func(obj interface{}) {
us := obj.(*unstructured.Unstructured)
glog.V(8).Infof("Store Delete '%v/%v' to '%v'", us.GetNamespace(), us.GetName(), c.String())
c.updateDepsInNewThread(obj, nil)
}
lw := NewListWatcher(c.ctx, parent.clientset, resourceInfo, fieldset.AsSelector())
store, controller := cache.NewInformer(lw, &unstructured.Unstructured{}, InformerResyncPeriod, cache.ResourceEventHandlerFuncs{
AddFunc: add,
UpdateFunc: update,
DeleteFunc: delete,
})
if contents != nil {
// Add initial contents to the store
newContents := make([]interface{}, 0, len(contents))
for _, content := range contents {
contentsCopy := content
newContents = append(newContents, &contentsCopy)
}
store.Replace(newContents, resourceVersion)
// Mark this cache as having initial contents so it can be used right away
c.initialContents = true
}
c.store = store
c.controller = controller
return c
}
func (c *subsetCache) updateDepsInNewThread(oldObj, newObj interface{}) {
oldDeps := []Subset{}
if us, ok := oldObj.(*unstructured.Unstructured); ok {
if deps, err := c.getObjDeps(us); err == nil {
oldDeps = deps
} else {
glog.Errorf("Error getting old dependencies: '%v'", err)
}
}
newDeps := []Subset{}
if us, ok := newObj.(*unstructured.Unstructured); ok {
if deps, err := c.getObjDeps(us); err == nil {
newDeps = deps
} else {
glog.Errorf("Error getting new dependencies: '%v'", err)
}
}
// Call into parent in a new go routine to prevent deadlock
c.wg.Add(1)
go func() {
defer c.wg.Done()
for _, dep := range newDeps {
c.parent.addSubsetDependency(dep.Name, dep.Set, true)
}
for _, dep := range oldDeps {
c.parent.removeSubsetDependency(dep.Name, dep.Set, true)
}
}()
}
func (c *subsetCache) list(sp storage.SelectionPredicate) (*unstructured.UnstructuredList, error) {
ul := unstructured.UnstructuredList{
Object: map[string]interface{}{
"kind": c.resourceKind + "List",
"apiVersion": "v1",
},
}
if !c.getReady() {
return nil, fmt.Errorf("Subset cache not ready")
}
ul.SetResourceVersion(c.getResourceVersion())
for _, rawObj := range c.store.List() {
obj := rawObj.(*unstructured.Unstructured)
if match, err := sp.Matches(obj); err != nil {
glog.Errorf("Matching error!")
} else if match {
ul.Items = append(ul.Items, *obj.DeepCopy())
}
}
return &ul, nil
}
func (c *subsetCache) getReady() bool {
if c.controller.HasSynced() {
return true
} else if c.initialContents {
return true
}
return false
}
func (c *subsetCache) getResourceVersion() string {
if c.controller.HasSynced() {
return c.controller.LastSyncResourceVersion()
}
return c.initialVersion
}
func (c *subsetCache) getSubset() Subset {
return Subset{Name: c.resource, Set: c.fieldset}
}
func (c *subsetCache) getDeps() []Subset {
ret := []Subset{}
for _, obj := range c.store.List() {
subset, err := c.getObjDeps(obj.(runtime.Object))
if err != nil {
glog.Errorf("Error getting deps '%v'", err)
}
ret = append(ret, subset...)
}
return ret
}
func (c *subsetCache) start() {
c.wg.Add(1)
go func() {
defer c.wg.Done()
c.controller.Run(c.ctx.Done())
for _, dep := range c.getDeps() {
c.parent.removeSubsetDependency(dep.Name, dep.Set, true)
}
}()
}
func (c *subsetCache) String() string {
return c.resource + "/" + c.fieldset.String()
}
type localCache struct {
mux sync.Mutex
wg sync.WaitGroup
ctx context.Context
caches map[string]map[string]*subsetCache
depsAuto Dependencies
depsManual Dependencies
resources map[string]ResourceInfo
storeDir string
clientset dynamic.Interface
active bool
}
// Call with SupportedResources
func NewLocalCache(ctx context.Context, storeDir string, resources []ResourceInfo, dynamicClientset dynamic.Interface) (*localCache, error) {
lc := localCache{
ctx: ctx,
clientset: dynamicClientset,
}
// Setup storage location
absDir, err := filepath.Abs(storeDir)
if err != nil {
return nil, err
}
lc.storeDir = absDir
// Verify the directory exists and that we have access to it
checkFile := path.Join(absDir, FilePartial)
err = ioutil.WriteFile(checkFile, []byte(""), 0600)
if err != nil {
return nil, fmt.Errorf("Unable to access cache dir: %v", err)
}
os.Remove(checkFile)
// Setup resources
resourceMap := map[string]ResourceInfo{}
for _, resource := range resources {
resourceMap[resource.Name] = resource
}
lc.resources = resourceMap
// Load from disk
caches := lc.loadCaches()
for _, cacheType := range caches {
for _, cache := range cacheType {
for _, deps := range cache.getDeps() {
lc.depsAuto.Add(deps)
}
}
}
lc.caches = caches
return &lc, nil
}
func (c *localCache) Run() {
if err := c.run(); err != nil {
panic(err)
}
c.wg.Wait()
}
func (c *localCache) pruneCaches() {
for done := false; !done; {
done = true
for _, cacheType := range c.caches {
for set, cache := range cacheType {
if !c.depsManual.Has(cache.getSubset()) && !c.depsAuto.Has(cache.getSubset()) {
for _, deps := range cache.getDeps() {
c.depsAuto.Remove(deps)
// re-run processing since the dependencies changed
done = false
}
delete(cacheType, set)
}
}
}
}
}
func (c *localCache) run() error {
c.mux.Lock()
defer c.mux.Unlock()
if c.active {
return fmt.Errorf("LocalCache is already running")
}
// Prune caches which have not been referenced
c.pruneCaches()
// Add missing caches
allDeps := append([]Subset{}, c.depsAuto.GetAll()...)
allDeps = append(allDeps, c.depsManual.GetAll()...)
for _, subset := range allDeps {
resource := subset.Name
set := subset.Set
_, found := c.caches[resource][set.String()]
if found {
continue
}
c.caches[resource][set.String()] = newSubsetCache(c, c.resources[resource], set, "0", nil)
}
// Start all caches
for _, cacheType := range c.caches {
for _, cache := range cacheType {
cache.start()
}
}
// Mark this object as active
c.active = true
c.wg.Add(1)
go func() {
defer c.wg.Done()
for {
ticker := time.NewTicker(CacheSaveInterval)
defer ticker.Stop()
select {
case <-c.ctx.Done():
return
case <-ticker.C:
func() {
c.mux.Lock()
defer c.mux.Unlock()
c.saveCaches(c.caches)
}()
}
}
}()
return nil
}
type localCacheClean struct {
Version string `json:"version"`
Caches []*subsetCacheClean `json:"caches"`
}
type subsetCacheClean struct {
Resource string `json:"resource"`
Set fields.Set `json:"set"`
ResourceVersion string `json:"resource-version"`
Items []unstructured.Unstructured `json:"items"`
}
func (c *localCache) loadCaches() map[string]map[string]*subsetCache {
caches := map[string]map[string]*subsetCache{}
for _, resource := range c.resources {
caches[resource.Name] = map[string]*subsetCache{}
}
data, err := ioutil.ReadFile(path.Join(c.storeDir, FileComplete))
if err != nil {
glog.Warningf("Unable to load caches from disk: '%v'", err)
return caches
}
contents := localCacheClean{}
if err := json.Unmarshal(data, &contents); err != nil {
glog.Warningf("Cache unmarshalling failed: '%v'", err)
return caches
}
if contents.Version != FormatVersion {
glog.Warningf("Cache format '%v' is older than current '%v'. Ignoring contents", contents.Version, FormatVersion)
return caches
}
glog.Infof("Loading Offline cache")
for _, cache := range contents.Caches {
resource, found := c.resources[cache.Resource]
if !found {
glog.Warningf("Unsupported resource type '%v' found in cache", cache.Resource)
continue
}
sc := newSubsetCache(c, resource, cache.Set, cache.ResourceVersion, cache.Items)
caches[cache.Resource][cache.Set.String()] = sc
glog.Infof(" %v - %v items", sc.String(), len(cache.Items))
}
glog.Infof("Cache loaded successfully")
return caches
}
func (c *localCache) saveCaches(caches map[string]map[string]*subsetCache) {
contents := &localCacheClean{
Version: FormatVersion,
}
glog.Infof("Saving Offline cache")
for _, cacheType := range caches {
for _, cache := range cacheType {
if !cache.getReady() {
// This cache hasn't loaded initial contents so skip it
continue
}
version := cache.getResourceVersion()
items := InterfaceToUnstructuredSlice(cache.store.List())
cleanCache := &subsetCacheClean{
Resource: cache.resource,
Set: cache.fieldset,
ResourceVersion: version,
Items: items,
}
contents.Caches = append(contents.Caches, cleanCache)
glog.V(1).Infof(" %v - %v items", cache.String(), len(items))
}
}
data, err := json.Marshal(contents)
if err != nil {
glog.Errorf("Could not marshal cache: '%v'", err)
return
}
tmpFile := filepath.Join(c.storeDir, FilePartial)
err = ioutil.WriteFile(tmpFile, data, 0600)
if err != nil {
glog.Errorf("Could not write cache: '%v'", err)
return
}
completeFile := filepath.Join(c.storeDir, FileComplete)
err = os.Rename(tmpFile, completeFile)
if err != nil {
glog.Errorf("Could not rename '%v' to '%v': '%v'", tmpFile, completeFile, err)
return
}
}
// Must be called with the lock held
func (c *localCache) errorIfUnsupported(resource string) error {
_, ok := c.resources[resource]
if !ok {
return fmt.Errorf("Unsupported resource type '%s'", resource)
}
return nil
}
func (c *localCache) Get(resource string, namespace string, name string, opts metav1.GetOptions) (runtime.Object, error) {
c.mux.Lock()
defer c.mux.Unlock()
if err := c.errorIfUnsupported(resource); err != nil {
return nil, err
}
if c.resources[resource].Namespaced && namespace == "" {
glog.Errorf("Get missing namespace for resource '%v'", resource)
}
listOpts := metav1.ListOptions{
IncludeUninitialized: opts.IncludeUninitialized,
ResourceVersion: opts.ResourceVersion,
FieldSelector: fields.Set{"metadata.name": name}.AsSelector().String(),
}
list, err := c.listLocked(resource, namespace, listOpts)
if err != nil {
return nil, fmt.Errorf("Resource '%v/%v/%v' not cached", resource, namespace, name)
} else if len(list.Items) > 1 {
glog.Errorf("Multiple matches for a unique object")
} else if len(list.Items) == 1 {
return &list.Items[0], nil
}
// Item is covered by cache but not found
return nil, fmt.Errorf("Resource '%v/%v/%v' not found", resource, namespace, name)
}
func (c *localCache) List(resource string, namespace string, opts metav1.ListOptions) (*unstructured.UnstructuredList, error) {
c.mux.Lock()
defer c.mux.Unlock()
if err := c.errorIfUnsupported(resource); err != nil {
return nil, err
}
return c.listLocked(resource, namespace, opts)
}
func (c *localCache) listLocked(resource string, namespace string, opts metav1.ListOptions) (*unstructured.UnstructuredList, error) {
fieldSelector, err := fields.ParseSelector(opts.FieldSelector)
if err != nil {
return nil, err
}
if namespace != "" {
fieldSelector = fields.AndSelectors(
fieldSelector,
fields.Set{"metadata.namespace": namespace}.AsSelector(),
)
}
labelSelector, err := labels.Parse(opts.LabelSelector)
if err != nil {
return nil, err
}
sp := storage.SelectionPredicate{
Label: labelSelector,
Field: fieldSelector,
GetAttrs: c.resources[resource].GetAttr,
IncludeUninitialized: opts.IncludeUninitialized,
Limit: opts.Limit,
Continue: opts.Continue,
}
objectIsUnique := SelectorUniquelyIdentifiesObject(sp, c.resources[resource].Namespaced)
for _, cache := range c.caches[resource] {
if !cache.getReady() {
continue
}
// If this cache has everything the selector could match then use it
if SetContainsSelection(cache.fieldset, sp) {
return cache.list(sp)
}
glog.Infof("List '%v/%v' is not a subset of cache '%v'", resource, fieldSelector.String(), cache)
// If there is only one object and this list has it then we know
// this is the complete list
if objectIsUnique {
if list, err := cache.list(sp); err != nil {
glog.Errorf("Call to sublist failed")
} else if len(list.Items) > 1 {
glog.Errorf("Multiple matches for a unique object")
} else if len(list.Items) == 1 {
glog.Infof("List (Unique) '%v/%v' found in cache '%v'", resource, fieldSelector, cache)
return list, nil
}
glog.Infof("List (Unique) '%v/%v' is not in cache '%v'", resource, fieldSelector, cache)
}
}
return nil, fmt.Errorf("Resource '%v' subgroup '%v' not cached", resource, fieldSelector)
}
// Get the number of elements stored in the cache
func (c *localCache) Count() int {
c.mux.Lock()
defer c.mux.Unlock()
count := 0
for _, cacheType := range c.caches {
for _, cache := range cacheType {
count += len(cache.store.List())
}
}
return count
}
func (c *localCache) AddSubsetDependency(resource string, subgroup fields.Set) error {
return c.addSubsetDependency(resource, subgroup, false)
}
func (c *localCache) addSubsetDependency(resource string, subgroup fields.Set, auto bool) error {
c.mux.Lock()
defer c.mux.Unlock()
if err := c.errorIfUnsupported(resource); err != nil {
return err
}
subset := Subset{Name: resource, Set: subgroup}
if auto {
c.depsAuto.Add(subset)
if !c.active {
glog.Errorf("Internal dependency added before activated")
}
} else {
c.depsManual.Add(subset)
}
depType := "manual"
if auto {
depType = "auto"
}
glog.V(3).Infof("Dependency - Increment reference '%v' (auto=%v manual=%v) of '%v'", depType, c.depsAuto.Count(subset), c.depsManual.Count(subset), subset.String())
if !c.active {
return nil
}
// Create a cache if this set doesn't exist
key := subgroup.String()
_, ok := c.caches[resource][key]
if !ok {
cache := newSubsetCache(c, c.resources[resource], subgroup, "0", nil)
c.caches[resource][key] = cache
glog.V(3).Infof(" Adding Cache for '%v'", cache.String())
cache.start()
}
return nil
}
func (c *localCache) RemoveSubsetDependency(resource string, subgroup fields.Set) error {
return c.removeSubsetDependency(resource, subgroup, false)
}
func (c *localCache) removeSubsetDependency(resource string, subgroup fields.Set, auto bool) error {
c.mux.Lock()
defer c.mux.Unlock()
if err := c.errorIfUnsupported(resource); err != nil {
return err
}
subset := Subset{Name: resource, Set: subgroup}
if auto {
if !c.depsAuto.Remove(subset) {
glog.Errorf("Subset does not exist")
return fmt.Errorf("Subset does not exist")
}
} else {
if !c.depsManual.Remove(subset) {
return fmt.Errorf("Subset does not exist")
}
}
depType := "manual"
if auto {
depType = "auto"
}
glog.V(3).Infof("Dependency - Decrement reference '%v' (auto=%v manual=%v) of '%v'", depType, c.depsAuto.Count(subset), c.depsManual.Count(subset), subset.String())
if !c.active {
return nil
}
key := subgroup.String()
cache, ok := c.caches[resource][key]
if !ok {
return fmt.Errorf("Subset does not exist")
}
// Delete this cache if it is no longer in use
if c.depsManual.Count(subset)+c.depsAuto.Count(subset) == 0 {
delete(c.caches[resource], key)
glog.V(3).Infof(" Removing Cache for '%v'", cache.String())
cache.cancel()
}
return nil
}
|
// Copyright Fuzamei Corp. 2018 All Rights Reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package executor
import (
"github.com/33cn/chain33/types"
pty "github.com/GM-Publicchain/gm/plugin/dapp/valnode/types"
)
// Query_GetValNodeByHeight method
func (val *ValNode) Query_GetValNodeByHeight(in *pty.ReqNodeInfo) (types.Message, error) {
height := in.GetHeight()
if height <= 0 {
return nil, types.ErrInvalidParam
}
key := CalcValNodeUpdateHeightKey(height)
values, err := val.GetLocalDB().List(key, nil, 0, 1)
if err != nil {
return nil, err
}
if len(values) == 0 {
return nil, types.ErrNotFound
}
reply := &pty.ValNodes{}
for _, valnodeByte := range values {
var valnode pty.ValNode
err := types.Decode(valnodeByte, &valnode)
if err != nil {
return nil, err
}
reply.Nodes = append(reply.Nodes, &valnode)
}
return reply, nil
}
// Query_GetBlockInfoByHeight method
func (val *ValNode) Query_GetBlockInfoByHeight(in *pty.ReqBlockInfo) (types.Message, error) {
height := in.GetHeight()
if height <= 0 {
return nil, types.ErrInvalidParam
}
key := CalcValNodeBlockInfoHeightKey(height)
value, err := val.GetLocalDB().Get(key)
if err != nil {
return nil, err
}
if len(value) == 0 {
return nil, types.ErrNotFound
}
reply := &pty.TendermintBlockInfo{}
err = types.Decode(value, reply)
if err != nil {
return nil, err
}
return reply, nil
}
|
package message
import (
"context"
"time"
)
type Table map[string]interface{}
// exchange 参数
type ExchangeOptions struct {
Name string
Kind string
Durable bool
AutoDelete bool
Internal bool
NoWait bool
Args Table
// Other options
Context context.Context
}
type ExchangeOption func(*ExchangeOptions)
func ExName(name string) ExchangeOption {
return func(o *ExchangeOptions) {
o.Name = name
}
}
func ExKind(kind string) ExchangeOption {
return func(o *ExchangeOptions) {
o.Kind = kind
}
}
func ExDurable(durable bool) ExchangeOption {
return func(o *ExchangeOptions) {
o.Durable = durable
}
}
func ExAutoDelete(delete bool) ExchangeOption {
return func(o *ExchangeOptions) {
o.AutoDelete = delete
}
}
func ExInternal(internal bool) ExchangeOption {
return func(o *ExchangeOptions) {
o.Internal = internal
}
}
func ExNoWait(noWait bool) ExchangeOption {
return func(o *ExchangeOptions) {
o.NoWait = noWait
}
}
// queue 参数
type QueueOptions struct {
Name string
Durable bool
AutoDelete bool
Exclusive bool
NoWait bool
Args Table
// Other options
Context context.Context
}
type QueueOption func(*QueueOptions)
func QueueName(name string) QueueOption {
return func(o *QueueOptions) {
o.Name = name
}
}
func QueueDurable(durable bool) QueueOption {
return func(o *QueueOptions) {
o.Durable = durable
}
}
func QueueAutoDelete(delete bool) QueueOption {
return func(o *QueueOptions) {
o.AutoDelete = delete
}
}
func QueueExclusive(exclusive bool) QueueOption {
return func(o *QueueOptions) {
o.Exclusive = exclusive
}
}
func QueueNoWait(noWait bool) QueueOption {
return func(o *QueueOptions) {
o.NoWait = noWait
}
}
// publish 参数
type PublishOptions struct {
Exchange string
Queue string
Key string
Mandatory bool
Immediate bool
// MSG
Headers Table
ContentType string // MIME content type
ContentEncoding string // MIME content encoding
DeliveryMode uint8 // Transient (0 or 1) or Persistent (2)
Priority uint8 // 0 to 9
CorrelationId string // correlation identifier
ReplyTo string // address to to reply to (ex: RPC)
Expiration string // message expiration spec
MessageId string // message identifier
Timestamp time.Time // message timestamp
Type string // message type name
UserId string // creating user id - ex: "guest"
AppId string // creating application id
// The application specific payload of the message
Body []byte
// Other options
Context context.Context
}
type PublishOption func(*PublishOptions)
func PubExchange(name string) PublishOption {
return func(o *PublishOptions) {
o.Exchange = name
}
}
func PubQueue(queue string) PublishOption {
return func(o *PublishOptions) {
o.Queue = queue
}
}
func RoutKey(key string) PublishOption {
return func(o *PublishOptions) {
o.Key = key
}
}
func PubBody(body []byte) PublishOption {
return func(o *PublishOptions) {
o.Body = body
}
}
// consume 参数
type ConsumeOptions struct {
Queue string
Consumer string
AutoAck bool
Exclusive bool
// Other options
Context context.Context
}
type ConsumeOption func(*ConsumeOptions)
func ConsumeQueue(queue string) ConsumeOption {
return func(o *ConsumeOptions) {
o.Queue = queue
}
}
|
package main
import (
"io"
"log"
"net/http"
)
func main() {
http.HandleFunc("/", serveIndexHTML)
http.HandleFunc("/tody.jpg", serveTodyPicture)
err := http.ListenAndServe(":8080", nil)
if err != nil {
log.Fatalln(err)
}
}
func serveIndexHTML(res http.ResponseWriter, req *http.Request) {
res.Header().Add("Content-Type", "text/html; charset=utf-8")
io.WriteString(res, `<img src="tody.jpg">`)
}
func serveTodyPicture(res http.ResponseWriter, req *http.Request) {
http.ServeFile(res, req, "../tody.jpg")
}
|
package 数学
func isBoomerang(points [][]int) bool {
v1 := NewVector(points[0][0]-points[1][0],points[0][1]-points[1][1])
v2 := NewVector(points[0][0]-points[2][0],points[0][1]-points[2][1])
return !isParallel(v1,v2)
}
// ----------- Vector ----------
type Vector struct {
X int
Y int
}
func NewVector(x, y int) *Vector {
return &Vector{x, y}
}
func isParallel(v1, v2 *Vector) bool{
return getCross(v1,v2)==0
}
func getCross(v1, v2 *Vector) int {
return v1.X*v2.Y - v1.Y*v2.X
}
|
package main
import (
"encoding/json"
"net/http"
"strconv"
auth "github.com/ahmedash95/authSDK"
"github.com/gorilla/mux"
)
func PostsCreateHandler(w http.ResponseWriter, r *http.Request) {
decoder := json.NewDecoder(r.Body)
var post Post
err := decoder.Decode(&post)
if err != nil {
jsonResponse(w, err, 400)
return
}
validation_errors := post.Validate()
if validation_errors != nil {
jsonResponse(w, validation_errors, 422)
return
}
user := auth.GetUser(r)
post.UserID = user.ID
GetDB().Create(&post)
jsonResponse(w, post, 200)
}
func PostsUpdateHandler(w http.ResponseWriter, r *http.Request) {
decoder := json.NewDecoder(r.Body)
var newPost Post
err := decoder.Decode(&newPost)
if err != nil {
jsonResponse(w, err, 400)
return
}
validation_errors := newPost.Validate()
if validation_errors != nil {
jsonResponse(w, validation_errors, 422)
return
}
vars := mux.Vars(r)
postID, _ := strconv.Atoi(vars["id"])
var post Post
GetDB().Find(&post, postID)
if post.ID != uint(postID) {
jsonResponse(w, H{
"message": "Invalid post id",
}, 404)
return
}
user := auth.GetUser(r)
if post.UserID != user.ID {
jsonResponse(w, H{
"message": "This post doesn't belong to you",
}, 404)
return
}
post.Title = newPost.Title
post.Content = newPost.Content
post.PublishDate = newPost.PublishDate
post.IsDraft = newPost.IsDraft
post.ImageURL = newPost.ImageURL
GetDB().Save(&post)
jsonResponse(w, post, 200)
}
func ShowPostHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
postID, _ := strconv.Atoi(vars["id"])
var post Post
GetDB().Find(&post, postID)
if post.ID != uint(postID) {
jsonResponse(w, H{
"message": "Post not found",
}, 404)
return
}
jsonResponse(w, post, 200)
}
func ShowUserPosts(w http.ResponseWriter, r *http.Request) {
user := auth.GetUser(r)
vars := mux.Vars(r)
userID, _ := strconv.Atoi(vars["id"])
var posts []Post
var count int
GetDB().Model(&Post{}).Where("user_id = ?", userID).Count(&count)
page, _ := strconv.Atoi(r.URL.Query().Get("page"))
perpage := 20
offset := 0
isLastPage := 0
if page < 2 {
page = 0
}
offset = perpage * (page - 1)
if offset+perpage >= count {
isLastPage = 1
}
var query = GetDB()
if user.ID == userID {
query = query.Where("user_id = ?", userID)
} else {
query = query.Where("user_id = ? and is_draft = ?", userID, 1)
}
query.
Select("id,title,SUBSTRING(content,0,150) as content,is_draft,image_url,publish_date,user_id,created_at,updated_at,deleted_at").
Order("publish_date desc").
Offset(offset).
Limit(perpage).
Find(&posts)
w.Header().Add("X-Posts-Total", strconv.Itoa(count))
w.Header().Add("X-Last-Page", strconv.Itoa(isLastPage))
jsonResponse(w, posts, 200)
}
func GetLastPosts(w http.ResponseWriter, r *http.Request) {
var posts []Post
var count int
GetDB().Model(&Post{}).Count(&count)
page, _ := strconv.Atoi(r.URL.Query().Get("page"))
perpage := 20
offset := 0
isLastPage := 0
if page < 2 {
page = 0
}
offset = perpage * (page - 1)
if offset+perpage >= count {
isLastPage = 1
}
GetDB().
Select("id,title,SUBSTRING(content,0,150) as content,is_draft,image_url,publish_date,user_id,created_at,updated_at,deleted_at").
Order("publish_date desc").
Where("is_draft = ?", 0).
Offset(offset).
Limit(perpage).
Find(&posts)
w.Header().Add("X-Posts-Total", strconv.Itoa(count))
w.Header().Add("X-Last-Page", strconv.Itoa(isLastPage))
jsonResponse(w, posts, 200)
}
|
/******************************************************************************
Blogo's main package
/*****************************************************************************/
package main
import (
"fmt"
"github.com/eligiobz/blogo/handlers"
"net/http"
)
// Unpack assets built with go-resources
// github.com/omeid/go-resources
var Assets http.FileSystem
func main() {
Assets = http.Dir("./assets")
if Assets == nil {
panic("No Assets")
}
r := handlers.BuildHandelers()
fmt.Println("Listening @ port 3000")
http.ListenAndServe(":3000", r)
}
|
// Written in 2014 by Petar Maymounkov.
//
// It helps future understanding of past knowledge to save
// this notice, so peers of other times and backgrounds can
// see history clearly.
// Package circuit provides Escher gates
// for building dynamic cloud applications
// using the circuit runtime of http://gocircuit.org
// The following excludes this file from defualt compilation.
// To include, you would have to use this command:
// go build -tags=plugin_faculty_gocircuit ./...
// But We actually compile this into a Go plugin with:
// go build -v -buildmode=plugin -o bin/plugins/faculty/gocircuit.so ./pkg/faculty/gocircuit/
// +build plugin_faculty_gocircuit
package main
import (
"math/rand"
"strconv"
"time"
"github.com/hoijui/circuit/client"
"github.com/hoijui/escher/pkg/be"
"github.com/hoijui/escher/pkg/faculty"
)
// client *client.Client
func Init(discover []string) {
program = &Program{}
if len(discover) > 0 && discover[0] != "" {
program.Client = client.DialDiscover(discover[0], nil)
}
rand.Seed(time.Now().UnixNano())
faculty.Register(be.NewMaterializer(&Process{}), "element", "Process")
faculty.Register(be.NewMaterializer(&Docker{}), "element", "Docker")
}
// Program…
type Program struct {
*client.Client
}
var program *Program
// ChooseID returns a unique textual ID.
func ChooseID() string {
return strconv.FormatUint(uint64(rand.Int63()), 20)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.