text stringlengths 11 4.05M |
|---|
package testbed
import (
"net/http"
"time"
"appengine"
ds "appengine/datastore"
"appengine/memcache"
"github.com/vmihailenco/appengine/context"
"launchpad.net/gocheck"
)
var (
kinds = make([]string, 0)
ctx context.Context
)
func RegisterDsKind(kind string) {
kinds = append(kinds, kind)
}
func FlushDs(c appengine.Context) error {
for _, kind := range kinds {
it := ds.NewQuery(kind).Run(c)
keys := make([]*ds.Key, 0)
for {
key, err := it.Next(nil)
if err == ds.Done {
break
} else if err != nil {
return err
}
keys = append(keys, key)
}
if err := ds.DeleteMulti(c, keys); err != nil {
return err
}
}
return nil
}
func NewContext() context.Context {
return ctx
}
func TestsHandler(w http.ResponseWriter, r *http.Request) {
ctx = context.New(r)
if err := memcache.Flush(ctx); err != nil {
panic(err)
}
if err := FlushDs(ctx); err != nil {
panic(err)
}
conf := &gocheck.RunConf{
Output: w,
Stream: false,
Filter: r.FormValue("f"),
Verbose: true,
Benchmark: false,
BenchmarkTime: time.Second,
}
result := gocheck.RunAll(conf)
w.Write([]byte(result.String()))
}
|
package main
import (
"encoding/json"
"fmt"
"testing"
"github.com/aws/aws-lambda-go/events"
)
func testSum(t *testing.T, a, b, expected int) {
res, err := handler(events.APIGatewayV2HTTPRequest{
Body: fmt.Sprintf(`{"x": %d, "y": %d}`, a, b),
IsBase64Encoded: false,
})
if err != nil {
t.Fatal("Everything should be ok")
}
if res.StatusCode != 200 {
t.Fatal(fmt.Sprintf("StatusCode should be 200, got %d", res.StatusCode))
}
var sum Answer
err = json.Unmarshal([]byte(res.Body), &sum)
if err != nil {
t.Fatal("Couldn't unmarshal json pair ")
}
if sum.Sum != expected {
t.Fatal("Sum should be should be ", expected, "got ", sum.Sum)
}
fmt.Printf("%d + %d = %d\n", a, b, sum.Sum)
}
func TestHandler(t *testing.T) {
t.Run("Successful Request", func(t *testing.T) {
testSum(t, 0, 0, 0)
testSum(t, 1, 0, 1)
testSum(t, 100, 200, 300)
testSum(t, 500, 400, 900)
testSum(t, 1000, 2000, 3000)
testSum(t, 100, 900, 1000)
testSum(t, 99, 1, 100)
})
}
|
package main
import (
"fmt"
"github.com/jrapoport/gothic/cmd/cli/root"
"github.com/jrapoport/gothic/cmd/cli/user"
)
func init() {
root.AddCommand(user.Cmd)
root.AddCommand(codeCmd)
root.AddCommand(migrateCmd)
}
func main() {
if err := root.Execute(); err != nil {
fmt.Printf("Error: %s\n\n", err)
}
}
|
package main
import (
"fmt"
mathematic "golang-book-tasks/chapter-11/math"
)
func main() {
slice := []float64{231.213, 123.213, 4522.2, 2.44}
fmt.Println("Our slice: ", slice)
fmt.Println("Average value: ", mathematic.Average([]float64{}))
fmt.Println("Max value: ", mathematic.Max(slice))
fmt.Println("Min value: ", mathematic.Min(slice))
}
|
// Copyright 2016 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package physicalplan
import (
"context"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/rpc"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql/physicalplan/replicaoracle"
"github.com/cockroachdb/cockroach/pkg/util/log"
)
// SpanResolver resolves key spans to their respective ranges and lease holders.
// Used for planning physical execution of distributed SQL queries.
//
// Sample usage for resolving a bunch of spans:
//
// func resolveSpans(
// ctx context.Context,
// it *execinfra.SpanResolverIterator,
// spans ...spanWithDir,
// ) ([][]kv.ReplicaInfo, error) {
// lr := execinfra.NewSpanResolver(
// distSender, nodeDescs, nodeDescriptor,
// execinfra.BinPackingLeaseHolderChoice)
// it := lr.NewSpanResolverIterator(nil)
// res := make([][]kv.ReplicaInfo, 0)
// for _, span := range spans {
// repls := make([]kv.ReplicaInfo, 0)
// for it.Seek(ctx, span.Span, span.dir); ; it.Next(ctx) {
// if !it.Valid() {
// return nil, it.Error()
// }
// repl, err := it.ReplicaInfo(ctx)
// if err != nil {
// return nil, err
// }
// repls = append(repls, repl)
// if !it.NeedAnother() {
// break
// }
// }
// res = append(res, repls)
// }
// return res, nil
// }
//
//
type SpanResolver interface {
// NewSpanResolverIterator creates a new SpanResolverIterator.
// Txn is used for testing and for determining if follower reads are possible.
NewSpanResolverIterator(txn *kv.Txn) SpanResolverIterator
}
// SpanResolverIterator is used to iterate over the ranges composing a key span.
type SpanResolverIterator interface {
// Seek positions the iterator on the start of a span (span.Key or
// span.EndKey, depending on ScanDir). Note that span.EndKey is exclusive,
// regardless of scanDir.
//
// After calling this, ReplicaInfo() will return information about the range
// containing the start key of the span (or the end key, if the direction is
// Descending).
//
// NeedAnother() will return true until the iterator is positioned on or after
// the end of the span. Possible errors encountered should be checked for
// with Valid().
//
// Seek can be called repeatedly on the same iterator. To make optimal uses of
// caches, Seek()s should be performed on spans sorted according to the
// scanDir (if Descending, then the span with the highest keys should be
// Seek()ed first).
//
// scanDir changes the direction in which Next() will advance the iterator.
Seek(ctx context.Context, span roachpb.Span, scanDir kvcoord.ScanDirection)
// NeedAnother returns true if the current range is not the last for the span
// that was last Seek()ed.
NeedAnother() bool
// Next advances the iterator to the next range. The next range contains the
// last range's end key (but it does not necessarily start there, because of
// asynchronous range splits and caching effects).
// Possible errors encountered should be checked for with Valid().
Next(ctx context.Context)
// Valid returns false if an error was encountered by the last Seek() or Next().
Valid() bool
// Error returns any error encountered by the last Seek() or Next().
Error() error
// Desc returns the current RangeDescriptor.
Desc() roachpb.RangeDescriptor
// ReplicaInfo returns information about the replica that has been picked for
// the current range.
// A RangeUnavailableError is returned if there's no information in nodeDescs
// about any of the replicas.
ReplicaInfo(ctx context.Context) (roachpb.ReplicaDescriptor, error)
}
// spanResolver implements SpanResolver.
type spanResolver struct {
st *cluster.Settings
distSender *kvcoord.DistSender
nodeDesc roachpb.NodeDescriptor
oracle replicaoracle.Oracle
}
var _ SpanResolver = &spanResolver{}
// NewSpanResolver creates a new spanResolver.
func NewSpanResolver(
st *cluster.Settings,
distSender *kvcoord.DistSender,
nodeDescs kvcoord.NodeDescStore,
nodeDesc roachpb.NodeDescriptor,
rpcCtx *rpc.Context,
policy replicaoracle.Policy,
) SpanResolver {
return &spanResolver{
st: st,
nodeDesc: nodeDesc,
oracle: replicaoracle.NewOracle(policy, replicaoracle.Config{
NodeDescs: nodeDescs,
NodeDesc: nodeDesc,
Settings: st,
RPCContext: rpcCtx,
}),
distSender: distSender,
}
}
// spanResolverIterator implements the SpanResolverIterator interface.
type spanResolverIterator struct {
// txn is the transaction using the iterator.
txn *kv.Txn
// it is a wrapped RangeIterator.
it *kvcoord.RangeIterator
// oracle is used to choose a lease holders for ranges when one isn't present
// in the cache.
oracle replicaoracle.Oracle
curSpan roachpb.RSpan
// dir is the direction set by the last Seek()
dir kvcoord.ScanDirection
queryState replicaoracle.QueryState
err error
}
var _ SpanResolverIterator = &spanResolverIterator{}
// NewSpanResolverIterator creates a new SpanResolverIterator.
func (sr *spanResolver) NewSpanResolverIterator(txn *kv.Txn) SpanResolverIterator {
return &spanResolverIterator{
txn: txn,
it: kvcoord.NewRangeIterator(sr.distSender),
oracle: sr.oracle,
queryState: replicaoracle.MakeQueryState(),
}
}
// Valid is part of the SpanResolverIterator interface.
func (it *spanResolverIterator) Valid() bool {
return it.err == nil && it.it.Valid()
}
// Error is part of the SpanResolverIterator interface.
func (it *spanResolverIterator) Error() error {
if it.err != nil {
return it.err
}
return it.it.Error()
}
// Seek is part of the SpanResolverIterator interface.
func (it *spanResolverIterator) Seek(
ctx context.Context, span roachpb.Span, scanDir kvcoord.ScanDirection,
) {
rSpan, err := keys.SpanAddr(span)
if err != nil {
it.err = err
return
}
oldDir := it.dir
it.curSpan = rSpan
it.dir = scanDir
var seekKey roachpb.RKey
if scanDir == kvcoord.Ascending {
seekKey = it.curSpan.Key
} else {
seekKey = it.curSpan.EndKey
}
// Check if the start of the span falls within the descriptor on which we're
// already positioned. If so, and if the direction also corresponds, there's
// no need to change the underlying iterator's state.
if it.dir == oldDir && it.it.Valid() {
reverse := (it.dir == kvcoord.Descending)
desc := it.it.Desc()
if (reverse && desc.ContainsKeyInverted(seekKey)) ||
(!reverse && desc.ContainsKey(seekKey)) {
if log.V(1) {
log.Infof(ctx, "not seeking (key=%s); existing descriptor %s", seekKey, desc)
}
return
}
}
if log.V(1) {
log.Infof(ctx, "seeking (key=%s)", seekKey)
}
it.it.Seek(ctx, seekKey, scanDir)
}
// Next is part of the SpanResolverIterator interface.
func (it *spanResolverIterator) Next(ctx context.Context) {
if !it.Valid() {
panic(it.Error())
}
it.it.Next(ctx)
}
// NeedAnother is part of the SpanResolverIterator interface.
func (it *spanResolverIterator) NeedAnother() bool {
return it.it.NeedAnother(it.curSpan)
}
// Desc is part of the SpanResolverIterator interface.
func (it *spanResolverIterator) Desc() roachpb.RangeDescriptor {
return *it.it.Desc()
}
// ReplicaInfo is part of the SpanResolverIterator interface.
func (it *spanResolverIterator) ReplicaInfo(
ctx context.Context,
) (roachpb.ReplicaDescriptor, error) {
if !it.Valid() {
panic(it.Error())
}
// If we've assigned the range before, return that assignment.
rngID := it.it.Desc().RangeID
if repl, ok := it.queryState.AssignedRanges[rngID]; ok {
return repl, nil
}
repl, err := it.oracle.ChoosePreferredReplica(
ctx, it.txn, it.it.Desc(), it.it.Leaseholder(), it.it.ClosedTimestampPolicy(), it.queryState)
if err != nil {
return roachpb.ReplicaDescriptor{}, err
}
it.queryState.RangesPerNode[repl.NodeID]++
it.queryState.AssignedRanges[rngID] = repl
return repl, nil
}
|
package structs
type VideoDomains []VideoDomain
func (v *VideoDomains) FindAll() error {
return BroadvidDB.Table("video_domains").Find(&v).Error
}
type VideoDomain struct {
ID int64 `form:"id"`
Host string `form:"host"`
GaID string `form:"ga_id"`
ThemeID int64 `form:"theme_id"`
DefaultRedirect int64 `form:"default_redirect"`
}
func (v *VideoDomain) Find(id string) error {
return BroadvidDB.Find(&v, id).Error
}
func (v *VideoDomain) Save() error {
return BroadvidDB.Save(&v).Error
}
|
package main
import (
"container/list"
"fmt"
"os"
"strconv"
"strings"
)
type Point struct {
x int
y int
id string
}
type Location struct {
closest *Point
distance int
tie bool
}
func main() {
points := read()
board := [400][400]Location{}
// fillFrom(&points[0], &board)
// fillFrom(&points[27], &board)
// print(&board)
for i, _ := range points {
fillFrom(&points[i], &board)
print(&board)
}
count := map[string]int{}
edgesstr := map[string]int{}
for i := 0; i <= 349; i++ {
for j := 0; j <= 349; j++ {
if board[i][j].closest == nil {
continue
}
if i == 0 || j == 0 || i == 349 || j == 349 {
edgesstr[board[i][j].closest.id] = 1
}
if !board[i][j].tie {
count[board[i][j].closest.id]++
}
}
}
for k, _ := range edgesstr {
fmt.Println("edgestr: ", k)
}
for k, _ := range count {
if edgesstr[k] != 1 {
fmt.Printf("%d %s\n", count[k], k)
}
}
fmt.Println(count)
}
func read() []Point {
f, _ := os.Open("input.txt")
defer f.Close()
id := 'A'
points := []Point{}
for i := 0; i < 100; i++ {
var x, y int
_, err := fmt.Fscanf(f, "%d, %d\n", &x, &y)
if err != nil {
break
}
points = append(points, Point{x: x, y: y, id: string(id)})
fmt.Printf("%d,%d %s\n", x, y, string(id))
id = id + 1
}
return points
}
func print(board *[400][400]Location) {
var buf strings.Builder
for j := 0; j < 350; j++ {
buf.WriteString(strconv.Itoa(j) + " ")
for i := 0; i < 350; i++ {
if board[i][j].closest != nil && !board[i][j].tie {
buf.WriteString(board[i][j].closest.id)
} else if board[i][j].tie {
buf.WriteString(".")
} else {
buf.WriteString(" ")
}
}
buf.WriteString("\n")
}
buf.WriteString("\n")
fmt.Println(buf.String())
}
func fillFrom(p *Point, board *[400][400]Location) {
stack := list.New()
stack.PushBack(*p)
for stack.Len() > 0 {
fillFromHelper(p, board, stack)
}
//fillFromHelper(*p.x-1, *p.y, p, board)
}
func fillFromHelper(p *Point, board *[400][400]Location, stack *list.List) {
e := stack.Back()
coord := Point(e.Value.(Point))
stack.Remove(e)
x := coord.x
y := coord.y
if x < 0 || x > 350 || y < 0 || y > 350 {
return
}
p_distance := abs(p.x-x) + abs(p.y-y)
if x == 3 && y == 7 {
fmt.Printf("board: %s, p_distance: %d workiing: %s\n", board[x][y], p_distance, p)
}
if board[x][y].closest == nil || board[x][y].distance > p_distance {
board[x][y].closest = p
board[x][y].distance = p_distance
board[x][y].tie = false
} else if board[x][y].closest == p {
return // already visited
} else if board[x][y].distance == p_distance {
// tie!
board[x][y].closest = p
board[x][y].tie = true
// probably won't work and needs some more traveling
} else {
return
}
stack.PushBack(Point{x: x - 1, y: y})
stack.PushBack(Point{x: x + 1, y: y})
stack.PushBack(Point{x: x, y: y - 1})
stack.PushBack(Point{x: x, y: y + 1})
}
func abs(x int) int {
if x < 0 {
return -x
}
return x
}
|
package script
import "divsperf/script/parse"
func Register(addon parse.Addon) {
if _, ok := parse.Addons[addon.Name()]; !ok {
parse.Addons[addon.Name()] = addon
}
}
// todo: 按level并行串行执行各个最外层块 |
package users
import (
"io/ioutil"
"net/http"
"github.com/gin-gonic/gin"
"encoding/json"
"strconv"
)
//HTTPService ...
type HTTPService interface {
Register (*gin.Engine)
}
type httpService struct {
endpoints []*endpoint
}
type endpoint struct {
method string
path string
function gin.HandlerFunc
}
//NewHTTPTransport ...
func NewHTTPTransport(s Service) HTTPService {
endpoints:= makeEndpoints(s)
return httpService{endpoints}
}
func makeEndpoints(s Service) []*endpoint{
list := []*endpoint{}
list = append(list, &endpoint{
method : "GET",
path : "/users",
function: getAll(s),
})
list = append(list, &endpoint{
method : "POST",
path : "/users/register",
function: registerUser(s),
})
list = append(list, &endpoint{
method : "GET",
path : "/users/:id",
function: getByID(s),
})
list = append(list, &endpoint{
method : "DELETE",
path : "/users/:id",
function: deleteByID(s),
})
list = append(list, &endpoint{
method : "POST",
path : "/users/login",
function: login(s),
})
list = append(list, &endpoint{
method : "POST",
path : "/users/changepassword/:id",
function: changePassword(s),
})
return list
}
func changePassword(s Service) gin.HandlerFunc{
return func (c*gin.Context){
var userData User
body:= c.Request.Body
x, _ := ioutil.ReadAll(body)
ID, _ := strconv.ParseInt(c.Param("id"), 6, 12)
json.Unmarshal([]byte(x), &userData)
if userData.Password != ""{
err := s.ChangePassword(ID, userData.Password)
c.JSON(http.StatusOK, gin.H{
"error" : err,
})
}else{
c.JSON(http.StatusConflict, gin.H{
"error" : "Porfavor escriba una contraseña",
})
}
}
}
func login(s Service) gin.HandlerFunc{
return func (c*gin.Context){
body := c.Request.Body
var userData User
x, _ := ioutil.ReadAll(body)
json.Unmarshal([]byte(x), &userData)
token, err := s.Login(userData.Email, userData.Password)
c.JSON(http.StatusOK, gin.H{
"token" : token,
"error" : err,
})
}
}
func getByID(s Service) gin.HandlerFunc{
return func (c*gin.Context){
ID, _ := strconv.ParseInt(c.Param("id"), 6, 12)
user, err := s.GetByID(ID)
c.JSON(http.StatusOK, gin.H{
"user": user,
"error" : err,
})
}
}
func deleteByID(s Service) gin.HandlerFunc{
return func (c*gin.Context){
ID, _ := strconv.ParseInt(c.Param("id"), 6, 12)
err := s.DeleteByID(ID)
c.JSON(http.StatusOK, gin.H{
"error": err,
})
}
}
func registerUser(s Service) gin.HandlerFunc{
return func(c *gin.Context){
body := c.Request.Body
x, _ := ioutil.ReadAll(body)
var userData User
json.Unmarshal([]byte(x), &userData)
user:= User{0,userData.Name,userData.Email,userData.Password}
if user.Name != "" && user.Email != "" && user.Password != ""{
ID,err := s.RegisterUser(user)
c.JSON(http.StatusCreated, gin.H{
"ID": ID,
"err": err,
})
} else{
c.JSON(http.StatusConflict, gin.H{
"Error" : "Porfavor ingrese todos los campos",
})
}
}
}
func getAll(s Service) gin.HandlerFunc{
return func (c *gin.Context){
c.JSON(http.StatusOK, gin.H{
"users" : s.GetAll(),
})
}
}
func (s httpService) Register( r *gin.Engine){
for _, e:= range s.endpoints {
r.Handle(e.method, e.path, e.function)
}
} |
package main
import (
"fmt"
"log"
"net/http"
"os"
"github.com/gorilla/mux"
)
func main() {
fmt.Printf("Hi World! This is fbm-bot!")
router := mux.NewRouter()
router.
HandleFunc("/webhook", Verify).Methods("GET")
if err := http.ListenAndServe(fmt.Sprintf(":%v", getPort()), router); err != nil {
log.Fatalln(err)
}
}
func getPort() string {
if configuredPort := os.Getenv("PORT"); configuredPort == "" {
return "8080"
} else {
return configuredPort
}
}
|
package interpreter
import (
"bytes"
"io"
"testing"
"github.com/pgavlin/warp/bench/data"
"github.com/pgavlin/warp/bench/flate"
"github.com/pgavlin/warp/bench/flate_go"
"github.com/pgavlin/warp/go_wasm_exec"
"github.com/pgavlin/warp/wasi"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestFlate(t *testing.T) {
var stdout bytes.Buffer
err := wasi.Run("flate", NewModuleDefinition(flate.Module), &wasi.RunOptions{
Options: &wasi.Options{
Stdin: bytes.NewReader(data.Enwik8[:1<<20]),
Stdout: &stdout,
},
})
require.NoError(t, err)
assert.Equal(t, data.Enwik8[:1<<20], stdout.Bytes())
}
func TestFlateGo(t *testing.T) {
var stdout bytes.Buffer
err := go_wasm_exec.Run("flate", NewModuleDefinition(flate_go.Module), &go_wasm_exec.Options{
Stdin: bytes.NewReader(data.Enwik8[:1<<20]),
Stdout: &stdout,
})
require.NoError(t, err)
assert.Equal(t, data.Enwik8[:1<<20], stdout.Bytes())
}
func BenchmarkFlate(b *testing.B) {
for i := 0; i < b.N; i++ {
err := wasi.Run("flate", NewModuleDefinition(flate.Module), &wasi.RunOptions{
Options: &wasi.Options{
Stdin: bytes.NewReader(data.Enwik8[:1<<16]),
Stdout: io.Discard,
},
})
if err != nil {
b.Fatal(err)
}
}
}
func BenchmarkFlateGo(b *testing.B) {
for i := 0; i < b.N; i++ {
err := go_wasm_exec.Run("flate", NewModuleDefinition(flate_go.Module), &go_wasm_exec.Options{
Stdin: bytes.NewReader(data.Enwik8[:1<<16]),
Stdout: io.Discard,
})
if err != nil {
b.Fatal(err)
}
}
}
|
package chapter1
// ContainDuplicateCharBy2For は、target文字列中に同じ文字が複数入っていないかを確認する
func ContainDuplicateCharBy2For(target string) bool {
for i, k := range target {
for u, j := range target {
if i != u {
if k == j {
return true
}
}
}
}
return false
}
// ContainDuplicateCharByMap は、target文字列中に同じ文字が複数入っていないかを確認する
func ContainDuplicateCharByMap(target string) bool {
charMap := make(map[int32]int32, 0)
for _, s := range target {
if _, ok := charMap[s]; ok {
return true
}
charMap[s] = s
}
return false
}
|
package main
type NetworksResponse struct {
Networks []Network `json:"networks"`
}
type NetworkResponse struct {
Network Network `json:"network"`
}
type Network struct {
ID string `json:"id"`
Name string `json:"name"`
Location Location `json:"location"`
}
type Location struct {
City string `json:"city"`
Country string `json:"country"`
Latitude float64 `json:"latitude"`
Longitude float64 `json:"longitude"`
}
|
/*
Copyright 2014 Jiang Le
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package models
import (
//"fmt"
"code.google.com/p/go-uuid/uuid"
"errors"
"fmt"
"github.com/astaxie/beego"
"github.com/astaxie/beego/cache"
"github.com/astaxie/beego/orm"
"github.com/disintegration/imaging"
"github.com/naokij/gotalk/setting"
"github.com/naokij/gotalk/utils"
"image"
"image/gif"
"image/jpeg"
"image/png"
"io"
"io/ioutil"
"net/url"
"os"
"path/filepath"
"regexp"
"strings"
"time"
)
type User struct {
Id int
Username string `orm:"size(30);unique"`
Nickname string `orm:"size(30)"`
Password string `orm:"size(128)"`
Url string `orm:"size(100)"`
Company string `orm:"size(30)"`
Location string `orm:"size(30)"`
Email string `orm:"size(80);unique"`
Avatar string `orm:"size(32)"`
Info string ``
Weibo string `orm:"size(30)"`
WeChat string `orm:"size(20)"`
Qq string `orm:"size(20)"`
PublicEmail bool ``
Followers int ``
Following int ``
FavTopics int ``
Topics int
Comments int
Reputation int
Credits int
ExcellentTopics int
IsAdmin bool `orm:"index"`
IsActive bool `orm:"index"`
IsBanned bool `orm:"index"`
Salt string `orm:"size(6)"`
Created time.Time `orm:"auto_now_add;index"`
Updated time.Time `orm:"auto_now"`
}
const (
activeCodeLife = 180
resetPasswordCodeLife
UsernameRegex = `^[\p{Han}a-zA-Z0-9]+$`
)
func (m *User) Insert() error {
if _, err := orm.NewOrm().Insert(m); err != nil {
return err
}
return nil
}
func (m *User) Read(fields ...string) error {
if err := orm.NewOrm().Read(m, fields...); err != nil {
return err
}
return nil
}
func (m *User) Update(fields ...string) error {
if _, err := orm.NewOrm().Update(m, fields...); err != nil {
return err
}
return nil
}
func (m *User) Delete() error {
if _, err := orm.NewOrm().Delete(m); err != nil {
return err
}
return nil
}
func (m *User) AvatarUrl() (url string) {
if m.Avatar == "" {
url = m.gravatarUrl(48)
} else {
url = setting.AvatarFSM.GetConfig().UrlPrefix + string(m.Avatar[0]) + "/" + string(m.Avatar[1]) + "/" + m.Avatar + "-m.png"
}
return url
}
func (m *User) LargeAvatarUrl() (url string) {
if m.Avatar == "" {
url = m.gravatarUrl(220)
} else {
url = setting.AvatarFSM.GetConfig().UrlPrefix + string(m.Avatar[0]) + "/" + string(m.Avatar[1]) + "/" + m.Avatar + "-l.png"
}
return
}
func (m *User) gravatarUrl(size int) (url string) {
hash := utils.EncodeMd5(strings.ToLower(m.Email))
url = fmt.Sprintf("http://gravatar.duoshuo.com/avatar/%s?d=identicon&size=%d", hash, size)
return url
}
func (m *User) ValidUsername() (err error) {
reg := regexp.MustCompile(UsernameRegex)
if !reg.MatchString(m.Username) {
err = errors.New("只能包含英文、数字和汉字")
} else {
if !(utils.HZStringLength(m.Username) >= 3 && utils.HZStringLength(m.Username) <= 16) {
err = errors.New("长度3-16(汉字长度按2计算)")
}
}
return err
}
func (m *User) ValidateUrl() (err error) {
u, err := url.Parse(m.Url)
if err != nil {
err = errors.New("网址无效")
return err
}
if u.Scheme != "https" && u.Scheme != "http" {
err = errors.New("只接受http和https协议的网址")
return err
}
return nil
}
func (m *User) SetPassword(password string) error {
m.Salt = utils.GetRandomString(6)
m.Password = utils.EncodeMd5(utils.EncodeMd5(password) + m.Salt)
return nil
}
func (m *User) VerifyPassword(password string) bool {
if m.Password == utils.EncodeMd5(utils.EncodeMd5(password)+m.Salt) {
return true
}
return false
}
func (m *User) GenerateActivateCode() (code string, err error) {
code = strings.Replace(uuid.New(), "-", "", -1)
if err := setting.Cache.Put("activation:"+code, m.Username, 3600); err != nil {
beego.Error("cache", err)
return "", err
}
return code, nil
}
//验证激活码
//如果验证通过User对象会变成激活码对应的User
func (m *User) VerifyActivateCode(code string) bool {
b := m.TestActivateCode(code)
if b {
if err := m.ConsumeActivateCode(code); err != nil {
beego.Error(err)
}
}
return b
}
//测试激活码
//测试完后不会删除
func (m *User) TestActivateCode(code string) bool {
usernameFromCache := cache.GetString(setting.Cache.Get("activation:" + code))
if usernameFromCache == "" {
return false
}
m.Username = usernameFromCache
if err := m.Read("Username"); err != nil {
return false
}
return true
}
//使用激活码
func (m *User) ConsumeActivateCode(code string) error {
return setting.Cache.Delete("activation:" + code)
}
func (m *User) ValidateAndSetAvatar(avatarFile io.Reader, filename string) error {
var img image.Image
var err error
var ext string
var tmpFile *os.File
if tmpFile, err = ioutil.TempFile(setting.TmpPath, "uploaded-avatar-"); err != nil {
return err
}
defer os.Remove(tmpFile.Name())
defer tmpFile.Close()
if _, err = io.Copy(tmpFile, avatarFile); err != nil {
return err
}
tmpFile.Seek(0, 0)
if filename != "" {
ext = strings.ToLower(filepath.Ext(filename))
} else {
ext = utils.GetImageFormat(tmpFile)
tmpFile.Seek(0, 0)
}
if ext != ".jpg" && ext != ".jpeg" && ext != ".png" && ext != ".gif" {
return errors.New("只允许jpg, png, gif类型的图片")
}
switch ext {
case ".jpg", ".jpeg":
img, err = jpeg.Decode(tmpFile)
if err != nil {
return errors.New("无法识别此jpg文件")
}
case ".png":
img, err = png.Decode(tmpFile)
if err != nil {
return errors.New("无法识别此png文件")
}
case ".gif":
img, err = gif.Decode(tmpFile)
if err != nil {
return errors.New("无法识别此gif文件")
}
}
//crop正方形
bound := img.Bounds()
if w, h := bound.Dx(), bound.Dy(); w > h {
img = imaging.CropCenter(img, h, h)
} else if w < h {
img = imaging.CropCenter(img, w, w)
}
//制作缩略图
imgL := imaging.Resize(img, 220, 220, imaging.Lanczos)
imgM := imaging.Resize(img, 48, 48, imaging.Lanczos)
imgS := imaging.Resize(img, 24, 24, imaging.Lanczos)
uuid := strings.Replace(uuid.New(), "-", "", -1)
imgLName, imgMName, imgSName := setting.TmpPath+uuid+"-l.png", setting.TmpPath+uuid+"-m.png", setting.TmpPath+uuid+"-s.png"
errL, errM, errS := imaging.Save(imgL, imgLName), imaging.Save(imgM, imgMName), imaging.Save(imgS, imgSName)
if errL != nil || errM != nil || errS != nil {
return errors.New("无法保存头像临时文件")
}
defer os.Remove(imgLName)
defer os.Remove(imgMName)
defer os.Remove(imgSName)
_, errL = setting.AvatarFSM.PutFile(imgLName, string(uuid[0])+"/"+string(uuid[1])+"/"+uuid+"-l.png")
_, errM = setting.AvatarFSM.PutFile(imgMName, string(uuid[0])+"/"+string(uuid[1])+"/"+uuid+"-m.png")
_, errS = setting.AvatarFSM.PutFile(imgSName, string(uuid[0])+"/"+string(uuid[1])+"/"+uuid+"-s.png")
if errL != nil || errM != nil || errS != nil {
return errors.New("无法保存头像")
}
if m.Avatar != "" {
errL = setting.AvatarFSM.Delete(m.Avatar + "-l.png")
errM = setting.AvatarFSM.Delete(m.Avatar + "-m.png")
errS = setting.AvatarFSM.Delete(m.Avatar + "-s.png")
}
m.Avatar = uuid
//errL, errM, errS = setting.AvatarFSM.Delete
return nil
}
func (m *User) FollowingUsers() orm.QuerySeter {
return Follows().Filter("User", m.Id)
}
func (m *User) FollowerUsers() orm.QuerySeter {
return Follows().Filter("FollowUser", m.Id)
}
func (m *User) LatestTopics(count int) []*Topic {
var topics []*Topic
qs := orm.NewOrm().QueryTable("topic")
qs.Filter("user_id", m.Id).OrderBy("-created").Limit(count).All(&topics)
return topics
}
func (m *User) LatestComments(count int) []*Comment {
var comments []*Comment
qs := orm.NewOrm().QueryTable("comment").RelatedSel("topic")
qs.Filter("user_id", m.Id).OrderBy("-created").Limit(count).All(&comments)
for k, _ := range comments {
comments[k].SyncContent()
}
return comments
}
func (u *User) Follow(who *User) (err error) {
if err = who.Read(); err == nil {
var mutual bool
reverseFollow := Follow{User: who, FollowUser: u}
if err := reverseFollow.Read("User", "FollowUser"); err == nil {
mutual = true
}
follow := Follow{User: u, FollowUser: who, Mutual: mutual}
if err := follow.Insert(); err == nil && mutual {
reverseFollow.Mutual = mutual
reverseFollow.Update("Mutual")
}
if nums, err := u.FollowingUsers().Count(); err == nil {
u.Following = int(nums)
u.Update("Following")
}
if nums, err := who.FollowerUsers().Count(); err == nil {
who.Followers = int(nums)
who.Update("Followers")
}
} else {
err = fmt.Errorf("%s must be saved before he/she can be followed!")
}
return
}
func (u *User) UnFollow(who *User) (err error) {
num, _ := u.FollowingUsers().Filter("FollowUser", who.Id).Delete()
if num > 0 {
who.FollowingUsers().Filter("FollowUser", u.Id).Update(orm.Params{
"Mutual": false,
})
if nums, err := u.FollowingUsers().Count(); err == nil {
u.Following = int(nums)
u.Update("Following")
}
if nums, err := who.FollowerUsers().Count(); err == nil {
who.Followers = int(nums)
who.Update("Followers")
}
} else {
err = fmt.Errorf("%s not following %s", u.Username, who.Username)
}
return
}
func (u *User) TableEngine() string {
return "INNODB"
}
func Users() orm.QuerySeter {
return orm.NewOrm().QueryTable("user")
}
func init() {
orm.RegisterModel(new(User))
}
|
package odoo
import (
"fmt"
)
// BaseModuleUninstall represents base.module.uninstall model.
type BaseModuleUninstall struct {
LastUpdate *Time `xmlrpc:"__last_update,omptempty"`
CreateDate *Time `xmlrpc:"create_date,omptempty"`
CreateUid *Many2One `xmlrpc:"create_uid,omptempty"`
DisplayName *String `xmlrpc:"display_name,omptempty"`
Id *Int `xmlrpc:"id,omptempty"`
ModelIds *Relation `xmlrpc:"model_ids,omptempty"`
ModuleId *Many2One `xmlrpc:"module_id,omptempty"`
ModuleIds *Relation `xmlrpc:"module_ids,omptempty"`
ShowAll *Bool `xmlrpc:"show_all,omptempty"`
WriteDate *Time `xmlrpc:"write_date,omptempty"`
WriteUid *Many2One `xmlrpc:"write_uid,omptempty"`
}
// BaseModuleUninstalls represents array of base.module.uninstall model.
type BaseModuleUninstalls []BaseModuleUninstall
// BaseModuleUninstallModel is the odoo model name.
const BaseModuleUninstallModel = "base.module.uninstall"
// Many2One convert BaseModuleUninstall to *Many2One.
func (bmu *BaseModuleUninstall) Many2One() *Many2One {
return NewMany2One(bmu.Id.Get(), "")
}
// CreateBaseModuleUninstall creates a new base.module.uninstall model and returns its id.
func (c *Client) CreateBaseModuleUninstall(bmu *BaseModuleUninstall) (int64, error) {
ids, err := c.CreateBaseModuleUninstalls([]*BaseModuleUninstall{bmu})
if err != nil {
return -1, err
}
if len(ids) == 0 {
return -1, nil
}
return ids[0], nil
}
// CreateBaseModuleUninstall creates a new base.module.uninstall model and returns its id.
func (c *Client) CreateBaseModuleUninstalls(bmus []*BaseModuleUninstall) ([]int64, error) {
var vv []interface{}
for _, v := range bmus {
vv = append(vv, v)
}
return c.Create(BaseModuleUninstallModel, vv)
}
// UpdateBaseModuleUninstall updates an existing base.module.uninstall record.
func (c *Client) UpdateBaseModuleUninstall(bmu *BaseModuleUninstall) error {
return c.UpdateBaseModuleUninstalls([]int64{bmu.Id.Get()}, bmu)
}
// UpdateBaseModuleUninstalls updates existing base.module.uninstall records.
// All records (represented by ids) will be updated by bmu values.
func (c *Client) UpdateBaseModuleUninstalls(ids []int64, bmu *BaseModuleUninstall) error {
return c.Update(BaseModuleUninstallModel, ids, bmu)
}
// DeleteBaseModuleUninstall deletes an existing base.module.uninstall record.
func (c *Client) DeleteBaseModuleUninstall(id int64) error {
return c.DeleteBaseModuleUninstalls([]int64{id})
}
// DeleteBaseModuleUninstalls deletes existing base.module.uninstall records.
func (c *Client) DeleteBaseModuleUninstalls(ids []int64) error {
return c.Delete(BaseModuleUninstallModel, ids)
}
// GetBaseModuleUninstall gets base.module.uninstall existing record.
func (c *Client) GetBaseModuleUninstall(id int64) (*BaseModuleUninstall, error) {
bmus, err := c.GetBaseModuleUninstalls([]int64{id})
if err != nil {
return nil, err
}
if bmus != nil && len(*bmus) > 0 {
return &((*bmus)[0]), nil
}
return nil, fmt.Errorf("id %v of base.module.uninstall not found", id)
}
// GetBaseModuleUninstalls gets base.module.uninstall existing records.
func (c *Client) GetBaseModuleUninstalls(ids []int64) (*BaseModuleUninstalls, error) {
bmus := &BaseModuleUninstalls{}
if err := c.Read(BaseModuleUninstallModel, ids, nil, bmus); err != nil {
return nil, err
}
return bmus, nil
}
// FindBaseModuleUninstall finds base.module.uninstall record by querying it with criteria.
func (c *Client) FindBaseModuleUninstall(criteria *Criteria) (*BaseModuleUninstall, error) {
bmus := &BaseModuleUninstalls{}
if err := c.SearchRead(BaseModuleUninstallModel, criteria, NewOptions().Limit(1), bmus); err != nil {
return nil, err
}
if bmus != nil && len(*bmus) > 0 {
return &((*bmus)[0]), nil
}
return nil, fmt.Errorf("base.module.uninstall was not found with criteria %v", criteria)
}
// FindBaseModuleUninstalls finds base.module.uninstall records by querying it
// and filtering it with criteria and options.
func (c *Client) FindBaseModuleUninstalls(criteria *Criteria, options *Options) (*BaseModuleUninstalls, error) {
bmus := &BaseModuleUninstalls{}
if err := c.SearchRead(BaseModuleUninstallModel, criteria, options, bmus); err != nil {
return nil, err
}
return bmus, nil
}
// FindBaseModuleUninstallIds finds records ids by querying it
// and filtering it with criteria and options.
func (c *Client) FindBaseModuleUninstallIds(criteria *Criteria, options *Options) ([]int64, error) {
ids, err := c.Search(BaseModuleUninstallModel, criteria, options)
if err != nil {
return []int64{}, err
}
return ids, nil
}
// FindBaseModuleUninstallId finds record id by querying it with criteria.
func (c *Client) FindBaseModuleUninstallId(criteria *Criteria, options *Options) (int64, error) {
ids, err := c.Search(BaseModuleUninstallModel, criteria, options)
if err != nil {
return -1, err
}
if len(ids) > 0 {
return ids[0], nil
}
return -1, fmt.Errorf("base.module.uninstall was not found with criteria %v and options %v", criteria, options)
}
|
package main
import (
"net"
"fmt"
"ipset/src/common"
)
func main() {
ip,ne,_ := net.ParseCIDR("1.2.3.4/19")
fmt.Println(ne.Mask.Size())
fmt.Println(ip)
fmt.Println([]byte(ne.IP))
fmt.Println([]byte(ne.Mask))
fmt.Println(ne.Contains(net.ParseIP("1.2.3.44")))
fmt.Println([]byte(net.ParseIP("1.2.3.4").To4()))
fmt.Println(string([]byte{0,0,0,0,0,0,0,1}))
var addr net.IP
fmt.Println(string(addr))
ip1 := net.ParseIP("183.207.112.0")
ip3 := net.ParseIP("183.207.113.255")
cidrs,_ := common.GenCIDRFromIP(ip1,ip3)
fmt.Println(cidrs)
fmt.Println(common.IP2Uint(net.ParseIP("113.215.248.0")))
fmt.Println(common.IP2Uint(net.ParseIP("113.215.255.255")))
fmt.Println(net.ParseIP("fe80::ed57:2e8a:ea71:9ed0"))
}
|
package api
import (
"github.com/gin-gonic/gin"
"net/http"
)
type Response struct {
Usage float64
}
func CpuInfo(c *gin.Context) {
c.JSON(http.StatusOK, &Response{
Usage: 100.0,
})
}
|
package common_templates
import (
"fmt"
"strings"
"path/filepath"
"sync"
templatev1 "github.com/openshift/api/template/v1"
core "k8s.io/api/core/v1"
rbac "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/selection"
"kubevirt.io/ssp-operator/internal/common"
"kubevirt.io/ssp-operator/internal/operands"
"sigs.k8s.io/controller-runtime/pkg/client"
)
var (
loadTemplatesOnce sync.Once
templatesBundle []templatev1.Template
deployedTemplates = make(map[string]bool)
)
// Define RBAC rules needed by this operand:
// +kubebuilder:rbac:groups=core,resources=namespaces,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=template.openshift.io,resources=templates,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=clusterroles;roles;rolebindings,verbs=get;list;watch;create;update;patch;delete
// RBAC for created roles
// +kubebuilder:rbac:groups=core,resources=persistentvolumeclaims,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=core,resources=persistentvolumeclaims/status,verbs=get;list;watch
// +kubebuilder:rbac:groups=cdi.kubevirt.io,resources=datavolumes,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=cdi.kubevirt.io,resources=datavolumes/source,verbs=create
type commonTemplates struct{}
var _ operands.Operand = &commonTemplates{}
func GetOperand() operands.Operand {
return &commonTemplates{}
}
func (c *commonTemplates) Name() string {
return operandName
}
const (
operandName = "common-templates"
operandComponent = common.AppComponentTemplating
)
func (c *commonTemplates) AddWatchTypesToScheme(s *runtime.Scheme) error {
return templatev1.Install(s)
}
func (c *commonTemplates) WatchClusterTypes() []client.Object {
return []client.Object{
&rbac.ClusterRole{},
&rbac.Role{},
&rbac.RoleBinding{},
&core.Namespace{},
&templatev1.Template{},
}
}
func (c *commonTemplates) WatchTypes() []client.Object {
return nil
}
func (c *commonTemplates) Reconcile(request *common.Request) ([]common.ResourceStatus, error) {
funcs := []common.ReconcileFunc{
reconcileGoldenImagesNS,
reconcileViewRole,
reconcileViewRoleBinding,
reconcileEditRole,
}
loadTemplates := func() {
var err error
filename := filepath.Join(BundleDir, "common-templates-"+Version+".yaml")
templatesBundle, err = ReadTemplates(filename)
if err != nil {
request.Logger.Error(err, fmt.Sprintf("Error reading from template bundle, %v", err))
panic(err)
}
if len(templatesBundle) == 0 {
panic("No templates could be found in the installed bundle")
}
for _, template := range templatesBundle {
deployedTemplates[template.Name] = true
}
}
// Only load templates Once
loadTemplatesOnce.Do(loadTemplates)
oldTemplateFuncs, err := reconcileOlderTemplates(request)
if err != nil {
return nil, err
}
funcs = append(funcs, oldTemplateFuncs...)
funcs = append(funcs, reconcileTemplatesFuncs(request)...)
return common.CollectResourceStatus(request, funcs...)
}
func (c *commonTemplates) Cleanup(request *common.Request) error {
objects := []client.Object{
newGoldenImagesNS(GoldenImagesNSname),
newViewRole(GoldenImagesNSname),
newViewRoleBinding(GoldenImagesNSname),
newEditRole(),
}
namespace := request.Instance.Spec.CommonTemplates.Namespace
for index := range templatesBundle {
templatesBundle[index].ObjectMeta.Namespace = namespace
objects = append(objects, &templatesBundle[index])
}
for _, obj := range objects {
err := request.Client.Delete(request.Context, obj)
if err != nil && !errors.IsNotFound(err) {
request.Logger.Error(err, fmt.Sprintf("Error deleting \"%s\": %s", obj.GetName(), err))
return err
}
}
return nil
}
func reconcileGoldenImagesNS(request *common.Request) (common.ResourceStatus, error) {
return common.CreateOrUpdate(request).
ClusterResource(newGoldenImagesNS(GoldenImagesNSname)).
WithAppLabels(operandName, operandComponent).
Reconcile()
}
func reconcileViewRole(request *common.Request) (common.ResourceStatus, error) {
return common.CreateOrUpdate(request).
ClusterResource(newViewRole(GoldenImagesNSname)).
WithAppLabels(operandName, operandComponent).
UpdateFunc(func(newRes, foundRes client.Object) {
foundRole := foundRes.(*rbac.Role)
newRole := newRes.(*rbac.Role)
foundRole.Rules = newRole.Rules
}).
Reconcile()
}
func reconcileViewRoleBinding(request *common.Request) (common.ResourceStatus, error) {
return common.CreateOrUpdate(request).
ClusterResource(newViewRoleBinding(GoldenImagesNSname)).
WithAppLabels(operandName, operandComponent).
UpdateFunc(func(newRes, foundRes client.Object) {
newBinding := newRes.(*rbac.RoleBinding)
foundBinding := foundRes.(*rbac.RoleBinding)
foundBinding.Subjects = newBinding.Subjects
foundBinding.RoleRef = newBinding.RoleRef
}).
Reconcile()
}
func reconcileEditRole(request *common.Request) (common.ResourceStatus, error) {
return common.CreateOrUpdate(request).
ClusterResource(newEditRole()).
WithAppLabels(operandName, operandComponent).
UpdateFunc(func(newRes, foundRes client.Object) {
newRole := newRes.(*rbac.ClusterRole)
foundRole := foundRes.(*rbac.ClusterRole)
foundRole.Rules = newRole.Rules
}).
Reconcile()
}
func getOldTemplatesLabelSelector() labels.Selector {
baseRequirement, err := labels.NewRequirement(TemplateTypeLabel, selection.Equals, []string{TemplateTypeLabelBaseValue})
if err != nil {
panic(fmt.Sprintf("Failed creating label selector for '%s=%s'", TemplateTypeLabel, TemplateTypeLabelBaseValue))
}
// Only fetching older templates to prevent duplication of API calls
versionRequirement, err := labels.NewRequirement(TemplateVersionLabel, selection.NotEquals, []string{Version})
if err != nil {
panic(fmt.Sprintf("Failed creating label selector for '%s!=%s'", TemplateVersionLabel, Version))
}
return labels.NewSelector().Add(*baseRequirement, *versionRequirement)
}
func reconcileOlderTemplates(request *common.Request) ([]common.ReconcileFunc, error) {
existingTemplates := &templatev1.TemplateList{}
err := request.Client.List(request.Context, existingTemplates, &client.ListOptions{
LabelSelector: getOldTemplatesLabelSelector(),
Namespace: request.Instance.Spec.CommonTemplates.Namespace,
})
// There might not be any templates (in case of a fresh deployment), so a NotFound error is accepted
if err != nil && !errors.IsNotFound(err) {
return nil, err
}
funcs := make([]common.ReconcileFunc, 0, len(existingTemplates.Items))
for i := range existingTemplates.Items {
template := &existingTemplates.Items[i]
if _, ok := deployedTemplates[template.Name]; ok {
continue
}
funcs = append(funcs, func(*common.Request) (common.ResourceStatus, error) {
return common.CreateOrUpdate(request).
ClusterResource(template).
WithAppLabels(operandName, operandComponent).
UpdateFunc(func(_, foundRes client.Object) {
foundTemplate := foundRes.(*templatev1.Template)
foundTemplate.Annotations[TemplateDeprecatedAnnotation] = "true"
for key := range foundTemplate.Labels {
if strings.HasPrefix(key, TemplateOsLabelPrefix) ||
strings.HasPrefix(key, TemplateFlavorLabelPrefix) ||
strings.HasPrefix(key, TemplateWorkloadLabelPrefix) {
delete(foundTemplate.Labels, key)
}
}
}).
Reconcile()
})
}
return funcs, nil
}
func reconcileTemplatesFuncs(request *common.Request) []common.ReconcileFunc {
namespace := request.Instance.Spec.CommonTemplates.Namespace
funcs := make([]common.ReconcileFunc, 0, len(templatesBundle))
for i := range templatesBundle {
template := &templatesBundle[i]
template.ObjectMeta.Namespace = namespace
funcs = append(funcs, func(request *common.Request) (common.ResourceStatus, error) {
return common.CreateOrUpdate(request).
ClusterResource(template).
WithAppLabels(operandName, operandComponent).
UpdateFunc(func(newRes, foundRes client.Object) {
newTemplate := newRes.(*templatev1.Template)
foundTemplate := foundRes.(*templatev1.Template)
foundTemplate.Objects = newTemplate.Objects
foundTemplate.Parameters = newTemplate.Parameters
}).
Reconcile()
})
}
return funcs
}
|
package altrudos
import (
"database/sql"
"errors"
"fmt"
"net/url"
"strconv"
"time"
"github.com/lib/pq"
"github.com/Masterminds/squirrel"
dbUtil "github.com/monstercat/golib/db"
"github.com/monstercat/pgnull"
"github.com/altrudos/api/pkg/justgiving"
"github.com/jmoiron/sqlx"
"github.com/satori/go.uuid"
)
var DonationCheckExpiration = time.Hour * 24 // If a pending donation is not found after this amount of time, reject it
var (
ErrMissingReferenceCode = errors.New("donation is missing reference code")
ErrInvalidAmount = errors.New("invalid donation amount")
ErrNegativeAmount = errors.New("donation amount can't be negative")
)
var (
TableDonations = "donations"
)
var (
// These statuses are uppercased on JustGiving s well
DonationAccepted DonationStatus = "Accepted"
DonationPending DonationStatus = "Pending"
DonationRejected DonationStatus = "Rejected"
)
var (
ErrNoCurrencyCode = errors.New("no currency code")
ErrNoAmount = errors.New("no donation amount")
ErrNoCharity = errors.New("no charity")
)
var (
DonationInsertBuilder = QueryBuilder.Insert(TableDonations)
DonationUpdateBuilder = QueryBuilder.Update(TableDonations)
)
var (
DonationColumns = map[string]string{
"Id": "id",
"CharityId": "charity_id",
"CreatedAt": "created_at",
"DonorAmount": "donor_amount",
"DonorCurrentyCode": "donor_currency",
"DonorName": "donor_name",
"DriveId": "drive_id",
"FinalAmount": "final_amount",
"FinalCurrency": "final_currency",
"LastChecked": "last_checked",
"Message": "message",
"ReferenceCode": "reference_code",
"Status": "status",
"CharityDescription": "charity_description",
"CharityName": "charity_name",
"CharityWebsiteUrl": "charity_website_url",
}
)
var codeCount = 1
/*
"amount": "2.00",
"currencyCode": "GBP",
"donationDate": "\/Date(1556326412351+0000)\/",
"donationRef": null,
"donorDisplayName": "Awesome Guy",
"donorLocalAmount": "2.75",
"donorLocalCurrencyCode": "EUR",
"donorRealName": "Peter Queue",
"estimatedTaxReclaim": 0.56,
"id": 1234,
"image": "",
"message": "Hope you like my donation. Rock on!",
"source": "SponsorshipDonations",
"status": "Accepted",
"thirdPartyReference": "1234-my-sdi-ref"
*/
type DonationStatus string
type Donation struct {
Charity *Charity `db:"-"`
CharityId string `db:"charity_id"`
CreatedAt time.Time `db:"created_at"`
DonorAmount int `db:"donor_amount"` // What the donor typed in
DonorCurrency string `db:"donor_currency"` // What the donor selected
DonorName pgnull.NullString `db:"donor_name"`
DriveId string `db:"drive_id"`
FinalAmount int `db:"final_amount"`
FinalCurrency pgnull.NullString `db:"final_currency"`
Id string `setmap:"omitinsert"`
LastChecked pgnull.NullTime `db:"last_checked"`
Message pgnull.NullString `db:"message"`
Status DonationStatus
ReferenceCode string `db:"reference_code"`
USDAmount int `db:"usd_amount"`
Drive *Drive `db:"-" setmap:"-""`
// From the join in the view
CharityName pgnull.NullString `db:"charity_name" setmap:"-"`
CharityDescription pgnull.NullString `db:"charity_description" setmap:"-"`
CharityWebsiteUrl pgnull.NullString `db:"charity_website_url" setmap:"-"`
}
// Used in queries
type DonationOperators struct {
*BaseOperator
Statuses []DonationStatus
}
func GetDonationByField(tx sqlx.Queryer, field string, val interface{}) (*Donation, error) {
query, args, err := QueryBuilder.
Select(GetColumns(DonationColumns)...).
From(ViewDonations).Where(field+"=?", val).
ToSql()
if err != nil {
return nil, err
}
var d Donation
err = sqlx.Get(tx, &d, query, args...)
if err != nil {
return nil, err
}
if d.CharityId == "" {
return nil, errors.New("charity has a blank ID")
}
charity, err := GetCharityById(tx, d.CharityId)
if err != nil {
return nil, err
}
d.Charity = charity
return &d, nil
}
func GetDonationById(tx sqlx.Queryer, id string) (*Donation, error) {
return GetDonationByField(tx, "id", id)
}
func GetDonationByReferenceCode(tx sqlx.Queryer, code string) (*Donation, error) {
return GetDonationByField(tx, "reference_code", code)
}
func GetDonationsToCheck(tx sqlx.Queryer, limit int) ([]*Donation, error) {
ops := &DonationOperators{
BaseOperator: &BaseOperator{
Limit: limit,
SortField: "next_check",
SortDir: SortAsc,
},
Statuses: []DonationStatus{DonationPending},
}
return GetDonations(tx, ops)
}
func QueryDonations(q sqlx.Queryer, query *squirrel.SelectBuilder) ([]*Donation, error) {
s, args, err := query.ToSql()
if err != nil {
return nil, err
}
donos := make([]*Donation, 0)
err = sqlx.Select(q, &donos, s, args...)
if err != nil {
if err == sql.ErrNoRows {
return donos, nil
}
return nil, err
}
return donos, nil
}
func GetDonations(q sqlx.Queryer, ops *DonationOperators) ([]*Donation, error) {
query := QueryBuilder.
Select(GetColumns(DonationColumns)...).
From(ViewDonations)
if len(ops.Statuses) > 0 {
query = query.Where("status = ANY (?)", StatusesPQStringArray(ops.Statuses))
}
return QueryDonations(q, &query)
}
func GetDonationsRecent(q sqlx.Queryer, ops *DonationOperators) ([]*Donation, error) {
columns := GetColumns(DonationColumns)
query := QueryBuilder.
Select(columns...).
From(ViewDonations).
Where("status = ?", DonationAccepted).
OrderBy("created_at DESC")
donations, err := QueryDonations(q, &query)
if err != nil {
return nil, err
}
if err := PopulateDonationsDrives(q, donations); err != nil {
return nil, err
}
return donations, nil
}
func (d *Donation) GenerateReferenceCode(ext sqlx.Ext) error {
exists := false
for d.ReferenceCode == "" || exists == true {
str := uuid.NewV4().String()
str = fmt.Sprintf("ch-%d", time.Now().UnixNano())
d.ReferenceCode = str
dupe, err := GetDonationByReferenceCode(ext, d.ReferenceCode)
if err != nil {
if err == sql.ErrNoRows {
return nil
}
return err
}
exists = dupe != nil
}
return nil
}
//Create does magic before insert into db
func (d *Donation) Create(ext sqlx.Ext) error {
if d.CharityId == "" {
return ErrNoCharity
}
charity, err := GetCharityById(ext, d.CharityId)
if err != nil {
if err == sql.ErrNoRows {
return ErrCharityNotFound
}
return err
}
d.Charity = charity
if d.ReferenceCode != "" {
return ErrAlreadyInserted
}
if err := d.Validate(); err != nil {
return err
}
err = d.GenerateReferenceCode(ext)
if err != nil {
return err
}
if d.Status == DonationStatus("") {
d.Status = DonationPending
}
d.CreatedAt = time.Now()
return d.Insert(ext)
}
func (d *Donation) Validate() error {
currency, err := ParseCurrency(d.DonorCurrency)
if err != nil {
return err
}
d.DonorCurrency = currency
if d.DonorAmount < 0 {
return ErrNegativeAmount
}
return nil
}
func (d *Donation) ShouldReject() bool {
if d.Status != DonationPending {
return false
}
return d.CreatedAt.Before(time.Now().Add(DonationCheckExpiration * -1))
}
//Raw insert into db
func (d *Donation) Insert(ext sqlx.Ext) error {
query := DonationInsertBuilder.
SetMap(dbUtil.SetMap(d, true)).
Suffix(RETURNING_ID)
return query.
RunWith(ext).
QueryRow().
Scan(&d.Id)
}
func (d *Donation) Save(ext sqlx.Ext) error {
setMap := dbUtil.SetMap(d, false)
_, err := DonationUpdateBuilder.
SetMap(setMap).
Where("id=?", d.Id).
RunWith(ext).
Exec()
return err
}
/*https://link.justgiving.com/v1/charity/donate/charityId/2096
?amount=10.00
¤cy=USD
&reference=89302483&
exitUrl=http%3A%2F%2Flocalhost%3A9000%2Fconfirm%2F8930248302840%3FjgDonationId%3DJUSTGIVING-DONATION-ID
&message=Woohoo!%20Let's%20fight%20cancer!
*/
func (d *Donation) GetDonationLink(jg *justgiving.JustGiving, baseUrl string) (string, error) {
urls := url.Values{}
if d == nil {
panic("donation is nil")
}
if d.Message.Valid && d.Message.String != "" {
urls.Set("message", d.Message.String)
}
if d.DonorCurrency == "" {
return "", ErrNoCurrencyCode
}
if d.DonorAmount == 0 {
return "", ErrNoAmount
}
if d.Charity == nil {
return "", ErrNoCharity
}
urls.Set("currency", d.DonorCurrency)
urls.Set("amount", AmountToString(d.DonorAmount))
urls.Set("reference", d.ReferenceCode)
urls.Set("exitUrl", fmt.Sprintf("%s/donations/check/%s", baseUrl, d.ReferenceCode))
return jg.GetDonationLink(d.Charity.JustGivingCharityId, urls), nil
}
func (d *Donation) GetJustGivingDonation(jg *justgiving.JustGiving) (*justgiving.Donation, error) {
return jg.GetDonationByReference(d.ReferenceCode)
}
func (d *Donation) GetLastChecked() time.Time {
if d.LastChecked.Valid {
val, err := d.LastChecked.Value()
if err != nil {
return time.Time{}
}
return val.(time.Time)
}
return time.Time{}
}
func (d *Donation) AmountString() string {
return AmountToString(d.FinalAmount)
}
func (d *Donation) IsAnonymous() bool {
return !d.DonorName.Valid || d.DonorName.String == ""
}
func (d *Donation) GetDonorName() string {
if !d.IsAnonymous() {
return d.DonorName.String
}
return "Anonymous"
}
func (d *Donation) CheckStatus(ext sqlx.Ext, jg *justgiving.JustGiving) error {
jgDonation, err := d.GetJustGivingDonation(jg)
var status DonationStatus
if err != nil {
if err == justgiving.ErrDonationNotFound {
// This checks the date
if d.ShouldReject() {
status = DonationRejected
} else {
status = DonationPending
}
} else {
return err
}
} else {
status = DonationStatus(jgDonation.Status)
amount, err := strconv.ParseFloat(jgDonation.Amount, 64)
if err != nil {
return err
}
d.FinalAmount = int(amount * 100)
d.FinalCurrency = pgnull.NullString{jgDonation.CurrencyCode, true}
usd, err := ExchangeToUSD(d.FinalAmount, d.FinalCurrency.String)
if err == nil {
d.USDAmount = usd
}
}
d.LastChecked = pgnull.NullTime{time.Now(), true}
d.Status = status
err = d.Save(ext)
return err
}
func ApplyApproved(q *squirrel.SelectBuilder) {
*q = q.Where("status=?", DonationAccepted)
}
// Takes an amount that is string from the frontend and returns it in cents
func AmountFromString(amount string) (int, error) {
f, err := strconv.ParseFloat(amount, 64)
if err != nil {
return 0, ErrInvalidAmount
}
if f < 0 {
return 0, ErrNegativeAmount
}
// Convert dollars to cents
return int(f * 100), nil
}
func PopulateDonationsDrives(db sqlx.Queryer, donations []*Donation) error {
ids := make([]string, 0)
for _, v := range donations {
ids = append(ids, v.DriveId)
}
drives, err := GetDrives(db, &Cond{
Where: squirrel.Expr("id = ANY (?)", pq.StringArray(ids)),
})
if err != nil {
return err
}
driveMap := make(map[string]*Drive, len(drives))
for _, v := range drives {
driveMap[v.Id] = v
}
for k, v := range donations {
if drive, ok := driveMap[v.DriveId]; ok {
donations[k].Drive = drive
} else {
return errors.New("could not find a drive to populate into donation")
}
}
return nil
}
|
// Package httpx contains HTTP extensions. Specifically we have code to
// create transports and clients more suitable for the OONI needs.
package httpx
import (
"crypto/tls"
"crypto/x509"
"io/ioutil"
"net/http"
"net/url"
"time"
"github.com/ooni/probe-engine/httpx/httplog"
"github.com/ooni/probe-engine/httpx/httptracex"
"github.com/ooni/probe-engine/httpx/netx"
"github.com/ooni/probe-engine/log"
)
// NewTransport creates a new transport suitable. The first argument is
// the function to be used to configure a network proxy. The second argument
// is the TLS client config to use. Using `nil` is fine here. Note that not
// using `nil` causes Go not to automatically upgrade to http2; see this
// issue: <https://github.com/golang/go/issues/14275>.
func NewTransport(
proxy func(req *http.Request) (*url.URL, error), tlsConfig *tls.Config,
) *http.Transport {
return &http.Transport{
Proxy: proxy,
// We use a custom dialer that retries failed
// dialing attempts for extra robustness.
DialContext: (&netx.RetryingDialer{}).DialContext,
TLSClientConfig: tlsConfig,
// These are the same settings of Go stdlib.
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
}
}
// NewTLSConfigWithCABundle constructs a new TLS configuration using
// the specified CA bundle, or returns an error.
func NewTLSConfigWithCABundle(caBundlePath string) (*tls.Config, error) {
cert, err := ioutil.ReadFile(caBundlePath)
if err != nil {
return nil, err
}
pool := x509.NewCertPool()
pool.AppendCertsFromPEM(cert)
return &tls.Config{RootCAs: pool}, nil
}
// NewTracingProxyingClient creates a new http.Client. This new http.Client
// will have the following properties:
//
// 1. it will log debug messages to the specified logger;
//
// 2. it will use netx.RetryingDialer for increased robustness;
//
// 3. it will use proxy to setup a proxy (note that passing
// nil will disable any proxy);
//
// 4. will use the specified tls.Config, if not nil (passing nil
// is preferrable; see NewTransport's docs).
func NewTracingProxyingClient(
logger log.Logger, proxy func(req *http.Request) (*url.URL, error),
tlsConfig *tls.Config,
) *http.Client {
return &http.Client{
Transport: &httptracex.Measurer{
RoundTripper: NewTransport(proxy, tlsConfig),
Handler: &httplog.RoundTripLogger{
Logger: logger,
},
},
}
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package updateutil
import (
"context"
"encoding/json"
"testing"
"time"
)
func TestLoad(t *testing.T) {
wantLen := 5
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
if paygen, err := parsePaygen(ctx, "testdata/TestPaygen.json"); err != nil {
t.Error("Failed to load the testfile")
} else if len(paygen.Deltas) != wantLen {
t.Errorf("Unexpected number of entries; got %d, want %d", len(paygen.Deltas), wantLen)
}
}
func TestFilter(t *testing.T) {
wantLenFilter1 := 3
wantLenFilter2 := 1
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
paygen, err := parsePaygen(ctx, "testdata/TestPaygen.json")
if err != nil {
t.Error("Failed to load the testfile")
}
filtered := paygen.FilterBoardChannelDeltaType("sarien", "canary", "OMAHA")
if len(filtered.Deltas) != wantLenFilter1 {
t.Errorf("Unexpected number of entries after 1st filter; got %d, want %d", len(filtered.Deltas), wantLenFilter1)
}
filtered = paygen.FilterMilestone(97)
if len(filtered.Deltas) != wantLenFilter2 {
t.Errorf("Unexpected number of entries after 2nd filter; got %d, want %d", len(filtered.Deltas), wantLenFilter2)
}
}
func TestLatest(t *testing.T) {
wantVersion := "1.2.4"
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
paygen, err := parsePaygen(ctx, "testdata/TestPaygen.json")
if err != nil {
t.Error("Failed to load the testfile")
}
filtered := paygen.FilterBoardChannelDeltaType("sarien", "canary", "OMAHA")
if latest, err := filtered.FindLatest(); err != nil {
t.Fatal("Unexpected error in finding the latest release: ", err)
} else if latest.ChromeOSVersion != wantVersion {
t.Errorf("Unexpected version for the latest release; got %s, want %s", latest.ChromeOSVersion, wantVersion)
latestJSON, err := json.MarshalIndent(latest, "", " ")
if err != nil {
t.Fatal(err)
}
t.Logf("Latest:\n%s", string(latestJSON))
}
}
func TestVersion(t *testing.T) {
major, minor, patch, err := version("1.2.4")
if err != nil {
t.Fatal("Unexpected error: ", err)
}
if major != 1 || minor != 2 || patch != 4 {
t.Errorf("Unexpected return values; got (%d, %d, %d), want (1, 2, 4)", major, minor, patch)
}
_, _, _, err = version("")
if err == nil {
t.Error("Unexpected result, error should not be nil")
}
}
|
package mocks
import (
"errors"
"github.com/joaodias/hugito-app/domain"
"golang.org/x/oauth2"
)
type UserRepository struct {
NewCalled bool
ReadCalled bool
IsError bool
}
func (ur *UserRepository) New(user domain.User) error {
ur.NewCalled = true
if ur.IsError {
return errors.New("Some Error")
}
return nil
}
func (ur *UserRepository) Read(accessToken string, oauthConfiguration *oauth2.Config) (*domain.User, error) {
ur.ReadCalled = true
if ur.IsError {
return nil, errors.New("Some error")
}
return &domain.User{}, nil
}
|
// Copyright 2016 Andreas Pannewitz. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package d
import (
"github.com/GoLangsam/do"
"github.com/GoLangsam/dk-7.2.2.1/internal/x" // all we need
)
// ===========================================================================
type chooser func(*x.Item) x.Main
// ===========================================================================
type dancer func(x.Main)
// ===========================================================================
// On consolidates what controls behaviour.
type On struct {
choose chooser // mandatory - chooses (next) item to cover - use MRV heuristic per default
search do.It // recursive or flat - algorithm X or C
next dancer // for recursive dance: Callback Dance
down do.It // for recursive dance: Callback Twirl
Init do.It // optional: Called on Open/Entry/Start/Begin
Done do.It // optional: Called on Done/Exit/Finished/End
Skip do.Nok // optional: Called per Enter-Level
Goal do.It // optional: Called per Solution
Fail do.It // optional: Called per Deadend
Leaf do.It // optional: Called per Update
}
// ===========================================================================
// GetInit returns (a pointer to) the function On.Init.
func (a On) GetInit() *do.It { return &a.Init }
// GetSkip returns (a pointer to) the function On.Skip.
func (a On) GetSkip() *do.Nok { return &a.Skip }
// GetGoal returns (a pointer to) the function On.Goal.
func (a On) GetGoal() *do.It { return &a.Goal }
// GetFail returns (a pointer to) the function On.Fail.
func (a On) GetFail() *do.It { return &a.Fail }
// GetLeaf returns (a pointer to) the function On.Leaf.
func (a On) GetLeaf() *do.It { return &a.Leaf }
// GetDone returns (a pointer to) the function On.Done.
func (a On) GetDone() *do.It { return &a.Done }
// ===========================================================================
|
package tyVpnProtocol
import "github.com/tachyon-protocol/udw/udwBytes"
func (packet *VpnPacket) Encode(buf *udwBytes.BufWriter) (n int) {
buf.WriteByte_(packet.Cmd)
buf.WriteBigEndUint64(packet.ClientIdSender)
buf.WriteBigEndUint64(packet.ClientIdReceiver)
buf.Write_(packet.Data)
return buf.GetLen()
}
|
package v1beta1
import (
corev1 "k8s.io/api/core/v1"
)
type CapabilityDisplayName string
type ActiveGateCapability struct {
// The name of the capability known by the user, mainly used in the CR
DisplayName CapabilityDisplayName
// The name used for marking the pod for given capability
ShortName string
// The string passed to the active gate image to enable a given capability
ArgumentName string
}
var (
RoutingCapability = ActiveGateCapability{
DisplayName: "routing",
ShortName: "routing",
ArgumentName: "MSGrouter",
}
KubeMonCapability = ActiveGateCapability{
DisplayName: "kubernetes-monitoring",
ShortName: "kubemon",
ArgumentName: "kubernetes_monitoring",
}
MetricsIngestCapability = ActiveGateCapability{
DisplayName: "metrics-ingest",
ShortName: "metrics-ingest",
ArgumentName: "metrics_ingest",
}
DynatraceApiCapability = ActiveGateCapability{
DisplayName: "dynatrace-api",
ShortName: "dynatrace-api",
ArgumentName: "restInterface",
}
SyntheticCapability = ActiveGateCapability{
DisplayName: "synthetic",
ShortName: "synthetic",
ArgumentName: "synthetic,beacon_forwarder,beacon_forwarder_synthetic",
}
)
var ActiveGateDisplayNames = map[CapabilityDisplayName]struct{}{
RoutingCapability.DisplayName: {},
KubeMonCapability.DisplayName: {},
MetricsIngestCapability.DisplayName: {},
DynatraceApiCapability.DisplayName: {},
SyntheticCapability.DisplayName: {},
}
type ActiveGateSpec struct {
// Activegate capabilities enabled (routing, kubernetes-monitoring, metrics-ingest, dynatrace-api)
// +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Capabilities",order=10,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:advanced","urn:alm:descriptor:com.tectonic.ui:text"}
Capabilities []CapabilityDisplayName `json:"capabilities,omitempty"`
CapabilityProperties `json:",inline"`
// Optional: the name of a secret containing ActiveGate TLS cert+key and password. If not set, self-signed certificate is used.
// server.p12: certificate+key pair in pkcs12 format
// password: passphrase to read server.p12
// +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="TlsSecretName",order=10,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:advanced","urn:alm:descriptor:com.tectonic.ui:text"}
TlsSecretName string `json:"tlsSecretName,omitempty"`
// Optional: Sets DNS Policy for the ActiveGate pods
// +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="DNS Policy",order=24,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:advanced","urn:alm:descriptor:com.tectonic.ui:text"}
DNSPolicy corev1.DNSPolicy `json:"dnsPolicy,omitempty"`
// Optional: If specified, indicates the pod's priority. Name must be defined by creating a PriorityClass object with that
// name. If not specified the setting will be removed from the StatefulSet.
// +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Priority Class name",order=23,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:advanced","urn:alm:descriptor:io.kubernetes:PriorityClass"}
PriorityClassName string `json:"priorityClassName,omitempty"`
// Optional: Adds additional annotations to the ActiveGate pods
// +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Annotations",order=27,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:advanced","urn:alm:descriptor:com.tectonic.ui:text"}
Annotations map[string]string `json:"annotations,omitempty"`
}
// CapabilityProperties is a struct which can be embedded by ActiveGate capabilities
// Such as KubernetesMonitoring or Routing
// It encapsulates common properties
type CapabilityProperties struct {
// Amount of replicas for your ActiveGates
// +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Replicas",order=30,xDescriptors="urn:alm:descriptor:com.tectonic.ui:podCount"
Replicas *int32 `json:"replicas,omitempty"`
// Optional: the ActiveGate container image. Defaults to the latest ActiveGate image provided by the registry on the tenant
// +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Image",order=10,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:advanced","urn:alm:descriptor:com.tectonic.ui:text"}
Image string `json:"image,omitempty"`
// Optional: Set activation group for ActiveGate
// +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Activation group",order=31,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:advanced","urn:alm:descriptor:com.tectonic.ui:text"}
Group string `json:"group,omitempty"`
// Optional: Add a custom properties file by providing it as a value or reference it from a secret
// If referenced from a secret, make sure the key is called 'customProperties'
CustomProperties *DynaKubeValueSource `json:"customProperties,omitempty"`
// Optional: define resources requests and limits for single ActiveGate pods
// +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Resource Requirements",order=34,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:advanced","urn:alm:descriptor:com.tectonic.ui:resourceRequirements"}
Resources corev1.ResourceRequirements `json:"resources,omitempty"`
// Optional: Node selector to control the selection of nodes
// +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Node Selector",order=35,xDescriptors="urn:alm:descriptor:com.tectonic.ui:selector:Node"
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
// Optional: set tolerations for the ActiveGatePods pods
// +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Tolerations",order=36,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:advanced","urn:alm:descriptor:com.tectonic.ui:hidden"}
Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
// Optional: Adds additional labels for the ActiveGate pods
// +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Labels",order=37,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:advanced","urn:alm:descriptor:com.tectonic.ui:text"}
Labels map[string]string `json:"labels,omitempty"`
// Optional: List of environment variables to set for the ActiveGate
// +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Environment variables",order=39,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:advanced","urn:alm:descriptor:com.tectonic.ui:hidden"}
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Environment variables"
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:advanced,urn:alm:descriptor:com.tectonic.ui:text"
Env []corev1.EnvVar `json:"env,omitempty"`
// Optional: Adds TopologySpreadConstraints for the ActiveGate pods
// +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="topologySpreadConstraints",order=40,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:advanced","urn:alm:descriptor:com.tectonic.ui:hidden"}
TopologySpreadConstraints []corev1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"`
}
|
// Copyright 2021 - 2021 The goword Authors. All rights reserved. Use of
// this source code is governed by a MIT license that can be found in
// the LICENSE file.
//
// Package goword providing a set of functions that allow you to write to
// and read from DOCX files. Supports reading and writing
// wordprocessing documents generated by Microsoft Word™ 2007 and later.
// This library needs Go version 1.15 or later.
package elements
// Background Define background struct
type Background struct {
// Background Color
ColorAttr *HexColor `json:"color"`
// Background Theme Color
ThemeColorAttr ThemeColor
// Background Theme Color Tint
ThemeTintAttr *string
// Background Theme Color Shade
ThemeShadeAttr *string
}
func NewBackground() *Background {
ret := &Background{}
return ret
}
|
package env
import (
"fmt"
"strconv"
)
// -- bool Value
type boolValue bool
func newBoolValue(val bool, p *bool) *boolValue {
*p = val
return (*boolValue)(p)
}
func (b *boolValue) Set(s string) error {
v, err := strconv.ParseBool(s)
*b = boolValue(v)
return err
}
func (b *boolValue) Get() interface{} { return bool(*b) }
func (b *boolValue) String() string { return fmt.Sprintf("%v", *b) }
// BoolVar defines a bool environment variable with specified name, default value, and usage string.
// The argument p points to a bool variable in which to store the value of the variable.
func (e *Set) BoolVar(p *bool, name string, value bool, usage string) {
e.Var(newBoolValue(value, p), name, usage)
}
// BoolVar defines a bool environment variable with specified name, default value, and usage string.
// The argument p points to a bool variable in which to store the value of the variable.
func BoolVar(p *bool, name string, value bool, usage string) {
Env.Var(newBoolValue(value, p), name, usage)
}
// Bool defines a bool environment variable with specified name, default value, and usage string.
// The return value is the address of a bool variable that stores the value of the variable.
func (e *Set) Bool(name string, value bool, usage string) *bool {
p := new(bool)
e.BoolVar(p, name, value, usage)
return p
}
// Bool defines a bool environment variable with specified name, default value, and usage string.
// The return value is the address of a bool variable that stores the value of the variable.
func Bool(name string, value bool, usage string) *bool {
return Env.Bool(name, value, usage)
}
|
package output
import (
"fmt"
)
// Log type levels
type Type int
const (
INFO Type = 0
WARN Type = 1
ERR Type = 2
VERB Type = 3
)
var logTypeTags = [4]string{"INFO", "WARN", "ERR", "VERB"}
// Generic Write function
func write(args ...interface{}) {
logType := INFO
message := ""
newline := false
printfArgs := make([]interface{}, 0, len(args))
for _, arg := range args {
switch val := arg.(type) {
case Type:
logType = val
break
case string:
if "" == message {
message = val
} else {
printfArgs = append(printfArgs, val)
}
break
case bool:
newline = val
break
case error:
message = val.Error()
break
default:
printfArgs = append(printfArgs, val)
break
}
}
if newline {
message = message + "\n"
}
Printf("%s %s", logTypeTags[logType], fmt.Sprintf(message, printfArgs...))
if newline {
println("")
}
}
func Info(args ...interface{}) {
write(append(args, INFO)...)
}
func Infoln(args ...interface{}) {
write(append(args, INFO, true)...)
}
func Warn(args ...interface{}) {
write(append(args, WARN)...)
}
func Warnln(args ...interface{}) {
write(append(args, WARN, true)...)
}
func Fatal(args ...interface{}) {
write(append(args, ERR)...)
}
func Fatalln(args ...interface{}) {
write(append(args, ERR, true)...)
}
func Verb(args ...interface{}) {
write(append(args, VERB, false)...)
}
func Verbln(args ...interface{}) {
write(append(args, VERB, true)...)
}
|
package encryption
import (
"github.com/Luzifer/go-openssl"
)
type AESEncryptionService struct {
key []byte
}
func NewAESEncryptionService(key []byte) *AESEncryptionService {
return &AESEncryptionService{
key: key,
}
}
func (e *AESEncryptionService) Encode(text []byte) ([]byte, error) {
return openssl.New().EncryptBytes(string(e.key), text)
}
func (e *AESEncryptionService) Decode(text []byte) ([]byte, error) {
return openssl.New().DecryptBytes(string(e.key), text)
}
|
package model
import (
"time"
)
type View struct {
Database string
ViewName string
CurrentSequence uint64
CurrentTimestamp time.Time
}
|
package main
import (
"Backend/api"
"Backend/server"
"fmt"
"github.com/sirupsen/logrus"
"net/http"
"regexp"
"time"
)
type httpHandler struct {
Server *server.Server
}
// Pass ServeHttp to server instance
func (h *httpHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
h.Server.ServeHTTP(w, r)
}
// Http handler where API i initiated and server is passed to it
func makeHTTPHandler(srv *server.Server) *httpHandler {
api.Init(srv)
var handler httpHandler
handler.Server = srv
return &handler
}
// Make a http server which listens on port 8080
func makeAndStartHTTPServer(srv *server.Server) {
fmt.Println("Starting HTTP server")
httpSrv = &http.Server{
Addr: ":8080",
Handler: makeHTTPHandler(srv),
ReadTimeout: 10 * time.Second,
WriteTimeout: 10 * time.Second,
}
err := httpSrv.ListenAndServe()
if err == http.ErrServerClosed {
return
}
if err != nil {
logrus.Fatalf("HTTP server error: %s", err)
}
}
func makeAndServeFileServer() {
fileServer := http.FileServer(http.Dir("public"))
fileMatcher := regexp.MustCompile(`\.[a-zA-Z]*$`)
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
if !fileMatcher.MatchString(r.URL.Path) {
http.ServeFile(w, r, "public/index.html")
} else {
fileServer.ServeHTTP(w, r)
}
})
http.ListenAndServe(":80", nil)
}
|
// Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package collate
type unicodeCICollator struct {
}
// Compare implements Collator interface. Always return 0 temporary, will change when implement
func (uc *unicodeCICollator) Compare(a, b string) int {
return 0
}
// Key implements Collator interface. Always return nothing temporary, will change when implement
func (uc *unicodeCICollator) Key(str string) []byte {
return []byte{}
}
// Pattern implements Collator interface.
func (uc *unicodeCICollator) Pattern() WildcardPattern {
return &unicodePattern{}
}
type unicodePattern struct {
patChars []rune
patTypes []byte
}
// Compile implements WildcardPattern interface. Do nothing temporary, will change when implement
func (p *unicodePattern) Compile(patternStr string, escape byte) {
}
// DoMatch implements WildcardPattern interface. Always return false temporary, will change when implement
func (p *unicodePattern) DoMatch(str string) bool {
return false
}
|
package controls
import (
"github.com/labstack/echo/v4"
"github.com/rs/xid"
"sofuny/models"
"sofuny/utils"
"time"
)
// 创建 评论
func CreateComment(ctx echo.Context) error {
var comment models.Comment
if err := ctx.Bind(&comment); err != nil {
return ctx.JSON(200, utils.Response{
StatusCode: 201,
Msg: err,
Time: time.Now().Local(),
})
}
var lastComment models.Comment
comment.Uuid = xid.New().String()
db.Find(&lastComment)
comment.ID = lastComment.ID + 1
if err := db.Create(&comment).Error; err != nil {
return ctx.JSON(200, utils.Response{
StatusCode: 201,
Msg: err,
Time: time.Now().Local(),
})
} else {
var article models.Article
db.Where("id=?", comment.ArticleID).Find(&article)
article.CommentCounts += 1
db.Save(&article)
return ctx.JSON(200, utils.Response{
StatusCode: 200,
Msg: "success",
Data: comment,
Time: time.Now().Local(),
})
}
}
// 查找评论
func FindComment(ctx echo.Context) error {
article_id := ctx.QueryParam("article_id")
var commentList []models.Comment
if err := db.Where("article_id=?", article_id).Find(&commentList).Error; err != nil {
return ctx.JSON(200, utils.Response{
StatusCode: 201,
Msg: err,
Time: time.Now().Local(),
})
}
return ctx.JSON(200, utils.Response{
StatusCode: 200,
Msg: "success",
Data: commentList,
Time: time.Now().Local(),
})
}
// 评论喜欢
type CommentLikes struct {
ID int `json:"id"`
}
func LikeComment(ctx echo.Context) error {
var commentLike CommentLikes
var comment models.Comment
if err := ctx.Bind(&commentLike); err != nil {
return ctx.JSON(200, utils.Response{
StatusCode: 201,
Msg: err,
Time: time.Now().Local(),
})
}
if err := db.Limit(1).Where("id=?", commentLike.ID).First(&comment).Error; err != nil {
return ctx.JSON(200, utils.Response{
StatusCode: 201,
Msg: err,
Time: time.Now().Local(),
})
}
comment.Like += 1
db.Save(&comment)
return ctx.JSON(200, utils.Response{
StatusCode: 200,
Msg: "success",
Data: map[string]int{"newCommentLike": comment.Like},
Time: time.Now().Local(),
})
}
// 评论不喜欢
func DislikeComment(ctx echo.Context) error {
var commentDisLike CommentLikes
var comment models.Comment
if err := ctx.Bind(&commentDisLike); err != nil {
return ctx.JSON(200, utils.Response{
StatusCode: 201,
Msg: err,
Time: time.Now().Local(),
})
}
if err := db.Limit(1).Where("id=?", commentDisLike.ID).First(&comment).Error; err != nil {
return ctx.JSON(200, utils.Response{
StatusCode: 201,
Msg: err,
Time: time.Now().Local(),
})
}
comment.DisLike += 1
db.Save(&comment)
return ctx.JSON(200, utils.Response{
StatusCode: 200,
Msg: "success",
Data: map[string]int{"newCommentDislike": comment.DisLike},
Time: time.Now().Local(),
})
}
|
/**
*
* By So http://sooo.site
* -----
* Don't panic.
* -----
*
*/
package apicache
import (
"bytes"
"github.com/gin-gonic/gin"
)
// Response 用于缓存的实例
type Response struct {
gin.ResponseWriter
body *bytes.Buffer
}
// NewResponse 新建缓存实例
func NewResponse(ResponseWriter gin.ResponseWriter) *Response {
return &Response{
ResponseWriter: ResponseWriter,
body: bytes.NewBufferString(""),
}
}
// Write 写入值
func (resp *Response) Write(b []byte) (n int, err error) {
if n, err = resp.body.Write(b); err != nil {
return
}
return resp.ResponseWriter.Write(b)
}
|
package handler
import (
"context"
"github.com/asim/go-micro/v3/client"
log "github.com/asim/go-micro/v3/logger"
authen "creapptive.com/ims-security/api/authen"
gateway "creapptive.com/ims-security/api/gateway"
message "creapptive.com/ims-security/api/message"
user "creapptive.com/ims-security/api/user"
)
const authenServiceName = "creapptive.service.authen"
const userServiceName = "creapptive.service.user"
type Gateway struct {
authenClient authen.AuthenService
userClient user.UserService
}
func New(c client.Client, serviceNameMap map[string]string) gateway.ApigatewayHandler {
return &Gateway{
authenClient: authen.NewAuthenService(serviceNameMap["authen"], c),
userClient: user.NewUserService(serviceNameMap["user"], c),
}
}
// Login is a single request handler called via client.Call or the generated client code
func (g *Gateway) Login(ctx context.Context, req *message.LoginRequest, rsp *message.LoginReply) error {
g.authenClient.Login(ctx, req)
log.Info("Received Gateway.Login request")
return nil
}
// CreateUser is a single request handler called via client.Call or the generated client code
func (g *Gateway) CreateUser(ctx context.Context, req *message.CreateUserRequest, rsp *message.CreateUserReply) error {
log.Infof("CreateUser: %+v", req.User)
response, err := g.userClient.CreateUser(ctx, req)
if err != nil {
return err
}
*rsp = *response
log.Info("Received Gateway.CreateUser request")
return nil
}
// GetUsers is a single request handler called via client.Call or the generated client code
func (g *Gateway) GetUsers(ctx context.Context, req *message.GetUsersRequest, rsp *message.GetUsersReply) error {
g.userClient.GetUsers(ctx, req)
log.Info("Received Gateway.GetUsers request")
return nil
}
// GetUserByID is a single request handler called via client.Call or the generated client code
func (g *Gateway) GetUserByID(ctx context.Context, req *message.GetUserByIDRequest, rsp *message.GetUserByIDReply) error {
log.Info("Received Gateway.GetUserByID request")
return nil
}
// UpdateUser is a single request handler called via client.Call or the generated client code
func (g *Gateway) UpdateUser(ctx context.Context, req *message.UpdateUserRequest, rsp *message.UpdateUserReply) error {
log.Info("Received Gateway.UpdateUser request")
return nil
}
// DeleteUser is a single request handler called via client.Call or the generated client code
func (g *Gateway) DeleteUser(ctx context.Context, req *message.DeleteUserRequest, rsp *message.DeleteUserReply) error {
log.Info("Received Gateway.DeleteUser request")
return nil
}
// ChangePassword is a single request handler called via client.Call or the generated client code
func (g *Gateway) ChangePassword(ctx context.Context, req *message.ChangePasswordRequest, rsp *message.ChangePasswordReply) error {
log.Info("Received Gateway.ChangePassword request")
return nil
}
// ResetPassword is a single request handler called via client.Call or the generated client code
func (g *Gateway) ResetPassword(ctx context.Context, req *message.ResetPasswordRequest, rsp *message.ResetPasswordReply) error {
log.Info("Received Gateway.ResetPassword request")
return nil
}
// GetMyself is a single request handler called via client.Call or the generated client code
func (g *Gateway) GetMyself(ctx context.Context, req *message.GetMyselfRequest, rsp *message.GetMyselfReply) error {
log.Info("Received Gateway.GetMyself request")
return nil
}
// CreatePrivilegeProfile is a single request handler called via client.Call or the generated client code
func (g *Gateway) CreatePrivilegeProfile(ctx context.Context, req *message.CreatePrivilegeProfileRequest, rsp *message.CreatePrivilegeProfileReply) error {
log.Info("Received Gateway.CreatePrivilegeProfile request")
return nil
}
// GetPrivilegeProfiles is a single request handler called via client.Call or the generated client code
func (g *Gateway) GetPrivilegeProfiles(ctx context.Context, req *message.GetPrivilegeProfilesRequest, rsp *message.GetPrivilegeProfilesReply) error {
log.Info("Received Gateway.GetPrivilegeProfiles request")
return nil
}
// GetPrivilegeProfileByID is a single request handler called via client.Call or the generated client code
func (g *Gateway) GetPrivilegeProfileByID(ctx context.Context, req *message.GetPrivilegeProfileByIDRequest, rsp *message.GetPrivilegeProfileByIDReply) error {
log.Info("Received Gateway.GetPrivilegeProfileByID request")
return nil
}
// UpdatePrivilegeProfile is a single request handler called via client.Call or the generated client code
func (g *Gateway) UpdatePrivilegeProfile(ctx context.Context, req *message.UpdatePrivilegeProfileRequest, rsp *message.UpdatePrivilegeProfileReply) error {
log.Info("Received Gateway.UpdatePrivilegeProfile request")
return nil
}
// DeletePrivilegeProfile is a single request handler called via client.Call or the generated client code
func (g *Gateway) DeletePrivilegeProfile(ctx context.Context, req *message.DeletePrivilegeProfileRequest, rsp *message.DeletePrivilegeProfileReply) error {
log.Info("Received Gateway.DeletePrivilegeProfile request")
return nil
}
// GetLocationTree is a single request handler called via client.Call or the generated client code
func (g *Gateway) GetLocationTree(ctx context.Context, req *message.GetLocationTreeRequest, rsp *message.GetLocationTreeReply) error {
log.Info("Received Gateway.GetLocationTree request")
return nil
}
// UpdateLocationTree is a single request handler called via client.Call or the generated client code
func (g *Gateway) UpdateLocationTree(ctx context.Context, req *message.UpdateLocationTreeRequest, rsp *message.UpdateLocationTreeReply) error {
log.Info("Received Gateway.UpdateLocationTree request")
return nil
}
// GetScopeTree is a single request handler called via client.Call or the generated client code
func (g *Gateway) GetScopeTree(ctx context.Context, req *message.GetScopeTreeRequest, rsp *message.GetScopeTreeReply) error {
log.Info("Received Gateway.GetScopeTree request")
return nil
}
// UpdateScopeTree is a single request handler called via client.Call or the generated client code
func (g *Gateway) UpdateScopeTree(ctx context.Context, req *message.UpdateScopeTreeRequest, rsp *message.UpdateScopeTreeReply) error {
log.Info("Received Gateway.UpdateScopeTree request")
return nil
}
// ChangeVersion is a single request handler called via client.Call or the generated client code
func (g *Gateway) ChangeVersion(ctx context.Context, req *message.ChangeVersionRequest, rsp *message.ChangeVersionReply) error {
log.Info("Received Gateway.ChangeVersion request")
return nil
}
|
// Copyright 2019 - 2022 The Samply Community
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fhir
import (
"encoding/json"
"fmt"
"strings"
)
// THIS FILE IS GENERATED BY https://github.com/samply/golang-fhir-models
// PLEASE DO NOT EDIT BY HAND
// HTTPVerb is documented here http://hl7.org/fhir/ValueSet/http-verb
type HTTPVerb int
const (
HTTPVerbGET HTTPVerb = iota
HTTPVerbHEAD
HTTPVerbPOST
HTTPVerbPUT
HTTPVerbDELETE
HTTPVerbPATCH
)
func (code HTTPVerb) MarshalJSON() ([]byte, error) {
return json.Marshal(code.Code())
}
func (code *HTTPVerb) UnmarshalJSON(json []byte) error {
s := strings.Trim(string(json), "\"")
switch s {
case "GET":
*code = HTTPVerbGET
case "HEAD":
*code = HTTPVerbHEAD
case "POST":
*code = HTTPVerbPOST
case "PUT":
*code = HTTPVerbPUT
case "DELETE":
*code = HTTPVerbDELETE
case "PATCH":
*code = HTTPVerbPATCH
default:
return fmt.Errorf("unknown HTTPVerb code `%s`", s)
}
return nil
}
func (code HTTPVerb) String() string {
return code.Code()
}
func (code HTTPVerb) Code() string {
switch code {
case HTTPVerbGET:
return "GET"
case HTTPVerbHEAD:
return "HEAD"
case HTTPVerbPOST:
return "POST"
case HTTPVerbPUT:
return "PUT"
case HTTPVerbDELETE:
return "DELETE"
case HTTPVerbPATCH:
return "PATCH"
}
return "<unknown>"
}
func (code HTTPVerb) Display() string {
switch code {
case HTTPVerbGET:
return "GET"
case HTTPVerbHEAD:
return "HEAD"
case HTTPVerbPOST:
return "POST"
case HTTPVerbPUT:
return "PUT"
case HTTPVerbDELETE:
return "DELETE"
case HTTPVerbPATCH:
return "PATCH"
}
return "<unknown>"
}
func (code HTTPVerb) Definition() string {
switch code {
case HTTPVerbGET:
return "HTTP GET Command."
case HTTPVerbHEAD:
return "HTTP HEAD Command."
case HTTPVerbPOST:
return "HTTP POST Command."
case HTTPVerbPUT:
return "HTTP PUT Command."
case HTTPVerbDELETE:
return "HTTP DELETE Command."
case HTTPVerbPATCH:
return "HTTP PATCH Command."
}
return "<unknown>"
}
|
package kafka
import "time"
type ConsumerConfig struct {
Topics []string
Servers []string
UserName string
Password string
ConsumerGroup string
}
type ProducerConfig struct {
Servers []string
Ak string
Password string
}
var globalConfig Config
type Config struct {
Servers []string
UserName string
Password string
Cert []byte
}
func SetConfig(cfg Config) {
globalConfig = cfg
}
type Message struct {
Key string
Msg string
Topic string
Time time.Time
}
type Notification struct {
Claimed map[string][]int32
Current map[string][]int32
Released map[string][]int32
}
|
package crybsy
// ByHash groups the files by file hash values
func ByHash(files []File) map[string][]File {
fileMap := make(map[string][]File)
for _, f := range files {
list, ok := fileMap[f.Hash]
if !ok {
list = make([]File, 0)
}
list = append(list, f)
fileMap[f.Hash] = list
}
return fileMap
}
// ByPath groups the files by file path
func ByPath(files []File) map[string]File {
fileMap := make(map[string]File)
for _, f := range files {
fileMap[f.Path] = f
}
return fileMap
}
// Duplicates finds files with same hash
func Duplicates(byHash map[string][]File) map[string][]File {
fileMap := make(map[string][]File)
for hash, files := range byHash {
if len(files) > 1 {
fileMap[hash] = files
}
}
return fileMap
}
|
// Copyright 2020 cloudeng llc. All rights reserved.
// Use of this source code is governed by the Apache-2.0
// license that can be found in the LICENSE file.
// Package subcmd provides a multi-level command facility of the following form:
//
// Usage of <tool>
// <sub-command-1> <flags for sub-command-1> <args for sub-comand-1>
// <sub-command-2-1> <flags for sub-command-2-1> <args for sub-comand-2-1>
// ...
// <sub-command-2-2> <flags for sub-command-2-2> <args for sub-comand-2-2>
// ...
// <sub-command-n> <flags for sub-command-n> <args for sub-comand-n>
//
// The primary motivation for this package was to avoid the need to use global
// variables to store flag values packages. Such global variables quickly
// become a maintenance problem as command line tools evolve and in particular
// as functions are refactored. The cloudeng.io/cmdutil/flags package provides
// a means of defining flags as fields in a struct with a struct
// tag providing the flag name, default value and usage and is used to
// represent all flags.
//
// subcmd builds on the standard flag package and mirrors its design but
// without requiring that flag.Parse or any of its global state be used.
//
// Flags are represented by a FlagSet which encapsulates an underlying
// flag.FlagSet but with flag variables provided via cloudeng.io/cmdutil/flags.
//
// The Command type associates a FlagSet with the function that implements
// that command as well as documenting the command. This 'runner' takes as an
// argument the struct used to store its flag values as well as the command
// line arguments; thus avoiding the need for global flag variables at the cost
// of a type assertion.
// A CommandSet is used to create the command hierarchy itself and finally
// the cmdset can be used to dispatch the appropriate command functions
// via cmdset.Dispatch or DispatchWithArgs.
//
// type rangeFlags struct {
// From int `subcmd:"from,1,start value for a range"`
// To int `subcmd:"to,2,end value for a range "`
// }
// func printRange(ctx context.Context, values interface{}, args []string) error {
// r := values.(*rangeFlags)
// fmt.Printf("%v..%v\n", r.From, r.To)
// return nil
// }
//
// func main() {
// ctx := context.Background()
// fs := subcmd.NewFlagSet()
// fs.MustRegisterFlagStruct(&rangeFlags{}, nil, nil)
// // Subcommands are added using the subcmd.WithSubcommands option.
// cmd := subcmd.NewCommand("ranger", fs, printRange, subcmd.WithoutArguments())
// cmd.Document("print an integer range")
// cmdSet := subcmd.NewCommandSet(cmd)
// cmdSet.MustDispatch(ctx)
// }
//
// In addition it is possible to register 'global' flags that may be specified
// before any sub commands on invocation and also to wrap calls to any
// subcommand's runner function. The former is useful for setting common flags
// and the latter for acting on those flags and/or implementing common
// functionality such as profiling or initializing logging etc.
//
// Note that this package will never call flag.Parse and will not associate
// any flags with flag.CommandLine.
package subcmd
import (
"bufio"
"bytes"
"context"
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"cloudeng.io/cmdutil"
"cloudeng.io/cmdutil/flags"
"cloudeng.io/text/linewrap"
)
// FlagSet represents the name, description and flag values for a command.
type FlagSet struct {
flagSet *flag.FlagSet
flagValues interface{}
sm *flags.SetMap
}
// NewFlagSet returns a new instance of FlagSet.
func NewFlagSet() *FlagSet {
fs := flag.NewFlagSet("", flag.ContinueOnError)
fs.Usage = func() {}
fs.SetOutput(ioutil.Discard)
return &FlagSet{flagSet: fs}
}
// RegisterFlagStruct registers a struct, using flags.RegisterFlagsInStructWithSetMap.
// The struct tag must be 'subcomd'. The returned SetMap can be queried by the
// IsSet method.
func (cf *FlagSet) RegisterFlagStruct(flagValues interface{}, valueDefaults map[string]interface{}, usageDefaults map[string]string) error {
sm, err := flags.RegisterFlagsInStructWithSetMap(cf.flagSet, "subcmd", flagValues, valueDefaults, usageDefaults)
cf.flagValues = flagValues
cf.sm = sm
return err
}
// MustRegisterFlagStruct is like RegisterFlagStruct except that it panics
// on encountering an error. Its use is encouraged over RegisterFlagStruct from
// within init functions.
func (cf *FlagSet) MustRegisterFlagStruct(flagValues interface{}, valueDefaults map[string]interface{}, usageDefaults map[string]string) {
err := cf.RegisterFlagStruct(flagValues, valueDefaults, usageDefaults)
if err != nil {
panic(err)
}
}
// RegisterFlagStruct creates a new FlagSet and calls RegisterFlagStruct
// on it.
func RegisterFlagStruct(flagValues interface{}, valueDefaults map[string]interface{}, usageDefaults map[string]string) (*FlagSet, error) {
fs := NewFlagSet()
err := fs.RegisterFlagStruct(flagValues, valueDefaults, usageDefaults)
if err != nil {
return nil, err
}
return fs, nil
}
// MustRegisterFlagStruct is like RegisterFlagStruct except that it panics
// on encountering an error. Its use is encouraged over RegisterFlagStruct from
// within init functions.
func MustRegisterFlagStruct(flagValues interface{}, valueDefaults map[string]interface{}, usageDefaults map[string]string) *FlagSet {
fs, err := RegisterFlagStruct(flagValues, valueDefaults, usageDefaults)
if err != nil {
panic(err)
}
return fs
}
// IsSet returns true if the supplied flag variable's value has been
// set, either via a string literal in the struct or via the valueDefaults
// argument to RegisterFlagStruct.
func (cf *FlagSet) IsSet(field interface{}) (string, bool) {
return cf.sm.IsSet(field)
}
// Runner is the type of the function to be called to run a particular command.
type Runner func(ctx context.Context, flagValues interface{}, args []string) error
// Main is the type of the function that can be used to intercept a call to
// a Runner.
type Main func(ctx context.Context, cmdRunner func() error) error
// Command represents a single command.
type Command struct {
name string
description string
arguments string
runner Runner
flags *FlagSet
opts options
}
// NewCommand returns a new instance of Command.
func NewCommand(name string, flags *FlagSet, runner Runner, options ...CommandOption) *Command {
cmd := &Command{
name: name,
runner: runner,
flags: flags,
}
for _, fn := range options {
fn(&cmd.opts)
}
return cmd
}
// NewCommandLevel returns a new instance of Command with subcommands.
func NewCommandLevel(name string, subcmds *CommandSet) *Command {
cmd := &Command{
name: name,
flags: NewFlagSet(),
}
cmd.opts.subcmds = subcmds
return cmd
}
// Document adds a description of the command and optionally descriptions
// of its arguments.
func (cmd *Command) Document(description string, arguments ...string) {
cmd.description = description
cmd.arguments = strings.Join(arguments, " ")
}
func namesAndDefault(name string, fs *flag.FlagSet) string {
summary := []string{}
fs.VisitAll(func(fl *flag.Flag) {
summary = append(summary, "--"+fl.Name+"="+fl.DefValue)
})
if len(summary) == 0 {
return name
}
return name + " [" + strings.Join(summary, " ") + "]"
}
// Usage returns a string containing a 'usage' message for the command. It
// includes a summary of the command (including a list of any sub commands)
// its flags and arguments and the flag defaults.
func (cmd *Command) Usage() string {
out := &strings.Builder{}
fmt.Fprintf(out, "Usage of command %v", cmd.name)
if len(cmd.description) > 0 {
fmt.Fprintf(out, ": %v", cmd.description)
}
out.WriteString("\n")
fs := cmd.flags.flagSet
cl := namesAndDefault(cmd.name, fs)
out.WriteString(cl)
if sc := cmd.opts.subcmds; sc != nil {
fmt.Fprintf(out, " %v ...", strings.Join(sc.Commands(), "|"))
} else if args := cmd.arguments; len(args) > 0 {
if len(cl) > 0 {
out.WriteString(" ")
}
out.WriteString(args)
}
out.WriteString("\n")
fmt.Fprintf(out, "\n%s\n", printDefaults(cmd.flags.flagSet))
return out.String()
}
func (cmd *Command) summary() (name, desc string) {
return cmd.name, cmd.description
}
// CommandSet represents a set of commands that are peers to each other,
// that is, the command line must specificy one of them.
type CommandSet struct {
document string
global *FlagSet
globalMain Main
cmds []*Command
out io.Writer
}
// CommandOption represents an option controlling the handling of a given
// command.
type CommandOption func(*options)
type options struct {
withoutArgs bool
optionalSingleArg bool
exactArgs bool
atLeastArgs bool
numArgs int
subcmds *CommandSet
}
// WithoutArguments specifies that the command takes no arguments.
func WithoutArguments() CommandOption {
return func(o *options) {
o.withoutArgs = true
}
}
// OptionalSingleArg specifies that the command takes an optional single argument.
func OptionalSingleArgument() CommandOption {
return func(o *options) {
o.optionalSingleArg = true
}
}
// ExactlyNumArguments specifies that the command takes exactly the specified
// number of arguments.
func ExactlyNumArguments(n int) CommandOption {
return func(o *options) {
o.exactArgs = true
o.numArgs = n
}
}
// AtLeastNArguments specifies that the command takes at least N arguments.
func AtLeastNArguments(n int) CommandOption {
return func(o *options) {
o.atLeastArgs = true
o.numArgs = n
}
}
// NewCommandSet creates a new command set.
func NewCommandSet(cmds ...*Command) *CommandSet {
return &CommandSet{out: os.Stderr, cmds: cmds}
}
// WithGlobalFlags adds top-level/global flags that apply to all
// commands. They must be specified before a subcommand, ie:
// command <global-flags>* sub-command <sub-command-pflags>* args
func (cmds *CommandSet) WithGlobalFlags(global *FlagSet) {
cmds.global = global
}
// WithMain arranges for Main to be called by Dispatch to wrap the call
// to the requested RunnerFunc.
func (cmds *CommandSet) WithMain(m Main) {
cmds.globalMain = m
}
// defaults returns the value of Defaults for each command in commands.
func (cmds *CommandSet) defaults() string {
out := &strings.Builder{}
out.WriteString(cmds.globalDefaults())
for i, cmd := range cmds.cmds {
out.WriteString(cmd.Usage())
if i < len(cmds.cmds)-1 {
out.WriteString("\n")
}
}
return out.String()
}
func lineWrapDefaults(input string) string {
out := &strings.Builder{}
sc := bufio.NewScanner(bytes.NewBufferString(input))
block := &strings.Builder{}
writeBlock := func() {
if block.Len() > 0 {
fmt.Fprintf(out, "%s\n", linewrap.Block(4, 75, block.String()))
block.Reset()
}
}
for sc.Scan() {
l := sc.Text()
if len(l) < 3 {
continue
}
if l[:3] == " -" {
writeBlock()
fmt.Fprintf(out, "%s\n", l)
continue
}
fmt.Fprintf(block, "%s\n", l)
}
writeBlock()
return out.String()
}
func printDefaults(fs *flag.FlagSet) string {
out := &strings.Builder{}
orig := fs.Output()
fs.SetOutput(out)
fs.PrintDefaults()
defer fs.SetOutput(orig)
return lineWrapDefaults(out.String())
}
func (cmds *CommandSet) globalDefaults() string {
out := &strings.Builder{}
if cmds.global != nil {
fs := cmds.global.flagSet
fmt.Fprintf(out, "global flags:%s\n", namesAndDefault("", fs))
fmt.Fprintf(out, "%s\n", printDefaults(fs))
}
return out.String()
}
// Usage returns the usage message for the command set.
func (cmds *CommandSet) Usage(name string) string {
return fmt.Sprintf("Usage of %v\n\n%s\n", name, cmds.Summary())
}
// Defaults returns the usage message and flag defaults.
func (cmds *CommandSet) Defaults(name string) string {
out := &strings.Builder{}
out.WriteString(cmds.Usage(name))
if gd := cmds.globalDefaults(); len(gd) > 0 {
fmt.Fprintf(out, "\n%s", gd)
}
return out.String()
}
// Commands returns the list of available commands.
func (cmds *CommandSet) Commands() []string {
out := make([]string, len(cmds.cmds))
for i, cmd := range cmds.cmds {
out[i] = cmd.name
}
return out
}
// Document adds a description for the command set.
func (cmds *CommandSet) Document(doc string) {
cmds.document = doc
}
// Summary returns a summary of the command set that includes its top
// level documentation and a list of its sub-commands.
func (cmds *CommandSet) Summary() string {
max := 0
for _, cmd := range cmds.cmds {
name, _ := cmd.summary()
if l := len(name); l > max {
max = l
}
}
out := &strings.Builder{}
if d := cmds.document; len(d) > 0 {
fmt.Fprintf(out, "%s\n\n", linewrap.Block(1, 80, d))
}
for i, cmd := range cmds.cmds {
name, desc := cmd.summary()
fmt.Fprintf(out, " %v%v - %v", strings.Repeat(" ", max-len(name)), name, desc)
if i < len(cmds.cmds)-1 {
out.WriteByte('\n')
}
}
return out.String()
}
// Dispatch will dispatch the appropriate sub command or return an error.
func (cmds *CommandSet) Dispatch(ctx context.Context) error {
return cmds.DispatchWithArgs(ctx, filepath.Base(os.Args[0]), os.Args[1:]...)
}
// MustDispatch will dispatch the appropriate sub command or exit.
func (cmds *CommandSet) MustDispatch(ctx context.Context) {
err := cmds.DispatchWithArgs(ctx, filepath.Base(os.Args[0]), os.Args[1:]...)
if err != nil {
cmdutil.Exit("%v", err)
}
}
// GlobalFlagSet creates a new FlagSet that is to be used for global flags.
func GlobalFlagSet() *FlagSet {
flag.CommandLine = flag.NewFlagSet(os.Args[0], flag.ContinueOnError)
flag.CommandLine.Usage = func() {}
flag.CommandLine.SetOutput(ioutil.Discard)
return &FlagSet{flagSet: flag.CommandLine}
}
// SetOutput is like flag.FlagSet.SetOutput.
func (cmds *CommandSet) SetOutput(out io.Writer) {
cmds.out = out
}
// Output is like flag.FlagSet.Output.
func (cmds *CommandSet) Output() io.Writer {
return cmds.out
}
// Dispatch determines which top level command has been requested, if any,
// parses the command line appropriately and then runs its associated function.
func (cmds *CommandSet) DispatchWithArgs(ctx context.Context, usage string, args ...string) error {
if cmds.global != nil {
fs := cmds.global.flagSet
if err := fs.Parse(args); err != nil {
if err == flag.ErrHelp {
fmt.Fprintln(cmds.out, cmds.Usage(usage))
if gd := cmds.globalDefaults(); len(gd) > 0 {
fmt.Fprintf(cmds.out, "%s", gd)
}
}
return err
}
args = fs.Args()
}
return cmds.dispatchWithArgs(ctx, usage, args)
}
func (cmds *CommandSet) mainWrapper() Main {
wrapper := cmds.globalMain
if wrapper == nil {
wrapper = func(ctx context.Context, runner func() error) error {
return runner()
}
}
return wrapper
}
func (cmds *CommandSet) dispatchWithArgs(ctx context.Context, usage string, args []string) error {
if len(args) == 0 {
fmt.Fprintln(cmds.out, cmds.Usage(usage))
return fmt.Errorf("no command specified")
}
requested := args[0]
switch requested {
case "-help", "--help", "-h", "--h":
fmt.Fprintln(cmds.out, cmds.Usage(usage))
return flag.ErrHelp
case "help":
if len(args) == 1 {
fmt.Fprintln(cmds.out, cmds.Usage(usage))
return flag.ErrHelp
}
for _, cmd := range cmds.cmds {
if args[1] == cmd.name {
fmt.Fprintln(cmds.out, cmd.Usage())
return flag.ErrHelp
}
}
}
for _, cmd := range cmds.cmds {
fs := cmd.flags.flagSet
if requested == cmd.name {
if cmd.runner == nil && cmd.opts.subcmds == nil {
return fmt.Errorf("no runner registered for %v", requested)
}
args := args[1:]
if err := fs.Parse(args); err != nil {
if err == flag.ErrHelp {
fmt.Fprintln(cmds.out, cmd.Usage())
return err
}
return fmt.Errorf("%v: failed to parse flags: %v", cmd.name, err)
}
return cmds.processChosenCmd(ctx, cmd, usage, fs.Args())
}
}
return fmt.Errorf("%v is not one of the supported commands: %v", requested, strings.Join(cmds.Commands(), ", "))
}
func (cmds *CommandSet) processChosenCmd(ctx context.Context, cmd *Command, usage string, args []string) error {
plural := "arguments"
if cmd.opts.numArgs == 1 {
plural = "argument"
}
switch {
case cmd.opts.withoutArgs:
if len(args) > 0 {
return fmt.Errorf("%v: does not accept any arguments", cmd.name)
}
case cmd.opts.optionalSingleArg:
if len(args) > 1 {
return fmt.Errorf("%v: accepts at most one argument", cmd.name)
}
case cmd.opts.exactArgs:
if len(args) != cmd.opts.numArgs {
return fmt.Errorf("%v: accepts exactly %v %s", cmd.name, cmd.opts.numArgs, plural)
}
case cmd.opts.atLeastArgs:
if len(args) < cmd.opts.numArgs {
return fmt.Errorf("%v: accepts at least %v %s", cmd.name, cmd.opts.numArgs, plural)
}
case cmd.opts.subcmds != nil:
if cmd.opts.subcmds.globalMain == nil {
cmd.opts.subcmds.globalMain = cmds.globalMain
}
return cmd.opts.subcmds.dispatchWithArgs(ctx, usage, args)
}
return cmds.mainWrapper()(ctx, func() error {
return cmd.runner(ctx, cmd.flags.flagValues, args)
})
}
|
package main
import (
json_sim "./simlejson"
"bufio"
"encoding/json"
"fmt"
"net"
"os"
"strings"
)
type Promise interface{}
var accept Promise
var storemap = make(map[string]map[string]interface{})
var new_config = make(map[string]string)
const (
IP string = "127.0.0.1"
PORT string = "1200"
)
func main() {
//初始内容
fmt.Print("Server started, xuncache version 0.2\n")
//读取配置文件
var config = make(map[string]string)
config_file, err := os.Open("config.conf") //打开文件
defer config_file.Close()
if err != nil {
fmt.Print("Can not read configuration file. now exit\n")
os.Exit(0)
}
buff := bufio.NewReader(config_file) //读入缓存
//读取配置文件
for {
line, err := buff.ReadString('\n') //以'\n'为结束符读入一行
if err != nil {
break
}
rs := []rune(line)
if string(rs[0:1]) == "#" || len(line) < 3 {
continue
}
str_type := string(rs[0:strings.Index(line, " ")])
detail := string(rs[strings.Index(line, " ")+1 : len(rs)-1])
config[str_type] = detail
}
//再次过滤 (防止没有配置文件)
new_config := verify(config)
//创建服务端
tcpAddr, err := net.ResolveTCPAddr("tcp4", new_config["bind"]+":"+new_config["port"])
fmt.Printf("The server is now ready to accept connections on %s:%s\n", new_config["bind"], new_config["port"])
checkError(err)
listener, err := net.ListenTCP("tcp", tcpAddr)
checkError(err)
for {
conn, err := listener.Accept()
if err != nil {
continue
}
go handleClient(conn)
}
}
//处理数据
func handleClient(conn net.Conn) {
//标记结束连接
defer conn.Close()
defer fmt.Print("Client closed connection\n")
ipAddr := conn.RemoteAddr()
fmt.Printf("Accepted %s\n", ipAddr)
for {
var back = make(map[string]interface{})
//获取数据
var buf [1024]byte
n, _ := conn.Read(buf[0:])
b := []byte(buf[0:n])
if len(b) < 1 {
return
}
js, _ := json_sim.NewJson(b)
pass, _ := js.Get("Pass").String()
if pass != new_config["password"] && len(new_config["password"]) > 1 {
fmt.Printf("Encountered a connection password is incorrect Accepted %s\n", ipAddr)
back["error"] = true
back["point"] = "password error!"
rewrite(back, conn)
return
}
//获取key
key, _ := js.Get("Key").String()
if len(key) < 1 {
fmt.Printf("Error agreement is key %s\n", key)
back["error"] = true
back["point"] = "Please input Key!"
rewrite(back, conn)
return
}
//获取协议
protocol, _ := js.Get("Protocol").String()
//数据处理
data, _ := js.Get("Data").Map()
if data == nil && protocol == "set" {
fmt.Print("There is no data \n")
return
}
switch protocol {
case "delete":
delete(storemap, key)
back["status"] = true
break
case "set":
storemap[key] = data
back["status"] = true
break
case "find":
back["data"] = storemap[key]
back["status"] = true
break
default:
back["status"] = false
fmt.Print("error protocol \n")
break
}
//返回内容
rewrite(back, conn)
}
}
//写入数据
func rewrite(back map[string]interface{}, conn net.Conn) {
jsback, _ := json.Marshal(back)
//返回内容
conn.Write(jsback)
}
//验证配置文件
func verify(config map[string]string) (config_bak map[string]string) {
if len(config["bind"]) < 3 {
config["bind"] = IP
}
if len(config["port"]) < 1 {
config["port"] = PORT
}
return config
}
//输出错误信息
func checkError(err error) {
if err != nil {
fmt.Fprintf(os.Stderr, "Fatal error: %s", err.Error())
os.Exit(1)
}
}
|
// Copyright 2020 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package policy
import (
"context"
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"time"
"chromiumos/tast/common/fixture"
"chromiumos/tast/common/pci"
"chromiumos/tast/common/policy"
"chromiumos/tast/common/policy/fakedms"
"chromiumos/tast/ctxutil"
"chromiumos/tast/errors"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/browser"
"chromiumos/tast/local/chrome/browser/browserfixt"
"chromiumos/tast/local/chrome/uiauto/filesapp"
"chromiumos/tast/local/cryptohome"
"chromiumos/tast/local/policyutil"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: DownloadRestrictions,
LacrosStatus: testing.LacrosVariantExists,
Desc: "Behavior of DownloadRestrictions policy, check if a file is downloaded or not based on the value of the policy",
Contacts: []string{
"alexanderhartl@google.com", // Test author
},
SoftwareDeps: []string{"chrome"},
Attr: []string{"group:mainline"},
Params: []testing.Param{{
Fixture: fixture.ChromePolicyLoggedIn,
Val: browser.TypeAsh,
}, {
Name: "lacros",
ExtraAttr: []string{"informational"},
ExtraSoftwareDeps: []string{"lacros"},
Fixture: fixture.LacrosPolicyLoggedIn,
Val: browser.TypeLacros,
}},
Data: []string{"download_restrictions_index.html", "download_restrictions.zip"},
SearchFlags: []*testing.StringPair{
pci.SearchFlag(&policy.DownloadRestrictions{}, pci.VerifiedFunctionalityUI),
},
})
}
func DownloadRestrictions(ctx context.Context, s *testing.State) {
cr := s.FixtValue().(chrome.HasChrome).Chrome()
fdms := s.FixtValue().(fakedms.HasFakeDMS).FakeDMS()
// Reserve ten seconds for cleanup.
cleanupCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 10*time.Second)
defer cancel()
// Clear Downloads directory.
downloadsPath, err := cryptohome.DownloadsPath(ctx, cr.NormalizedUser())
if err != nil {
s.Fatal("Failed to get user's Download path: ", err)
}
files, err := ioutil.ReadDir(downloadsPath)
if err != nil {
s.Fatal("Failed to get files from Downloads directory")
}
for _, file := range files {
if err = os.RemoveAll(filepath.Join(downloadsPath, file.Name())); err != nil {
s.Fatal("Failed to remove file: ", file.Name())
}
}
server := httptest.NewServer(http.FileServer(s.DataFileSystem()))
defer server.Close()
for _, param := range []struct {
name string
blocked bool
policy *policy.DownloadRestrictions // policy is the policy we test.
}{
{
name: "unset",
blocked: false,
policy: &policy.DownloadRestrictions{Stat: policy.StatusUnset},
},
{
name: "block_downloads",
blocked: true,
policy: &policy.DownloadRestrictions{Val: 3}, // 3: all downloads are blocked
},
{
name: "allow_downloads",
blocked: false,
policy: &policy.DownloadRestrictions{Val: 0}, // 0: all downloads are allowed
},
} {
s.Run(ctx, param.name, func(ctx context.Context, s *testing.State) {
// Perform cleanup.
if err := policyutil.ResetChrome(ctx, fdms, cr); err != nil {
s.Fatal("Failed to clean up: ", err)
}
// Update policies.
if err := policyutil.ServeAndVerify(ctx, fdms, cr, []policy.Policy{param.policy}); err != nil {
s.Fatal("Failed to update policies: ", err)
}
// Setup browser based on the chrome type.
br, closeBrowser, err := browserfixt.SetUp(ctx, cr, s.Param().(browser.Type))
if err != nil {
s.Fatal("Failed to open the browser: ", err)
}
defer closeBrowser(cleanupCtx)
dconn, err := br.NewConn(ctx, server.URL+"/download_restrictions_index.html")
if err != nil {
s.Fatal("Failed to connect to chrome: ", err)
}
defer dconn.Close()
err = dconn.Eval(ctx, `document.getElementById('dlink').click()`, nil)
if err != nil {
s.Fatal("Failed to execute JS expression: ", err)
}
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
s.Fatal("Failed to create Test API connection: ", err)
}
files, err := filesapp.Launch(ctx, tconn)
if err != nil {
s.Fatal("Launching the Files App failed: ", err)
}
defer files.Close(ctx)
if err := files.OpenDownloads()(ctx); err != nil {
s.Fatal("Opening Downloads folder failed: ", err)
}
if err := files.WithTimeout(5 * time.Second).WaitForFile("download_restrictions.zip")(ctx); err != nil {
if !param.blocked {
if errors.Is(err, context.DeadlineExceeded) {
s.Error("Download was blocked: ", err)
} else {
s.Fatal("Failed to wait for download_restrictions.zip: ", err)
}
}
} else {
if param.blocked {
s.Error("Download was not blocked")
}
if err := os.Remove(filepath.Join(downloadsPath, "download_restrictions.zip")); err != nil {
s.Error("Failed to remove download_restrictions.zip: ", err)
}
}
})
}
}
|
package main
import (
"encoding/csv"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
)
func readCsvFile(path string) [][]string {
file, err := os.Open(path)
if err != nil {
log.Fatal("Unable to read input file "+path, err)
}
defer file.Close()
csvReader := csv.NewReader(file)
records, err := csvReader.ReadAll()
if err != nil {
log.Fatal("Unable to parse file as CSV for "+path, err)
}
return records
}
func getListing(url string) []byte {
response, err := http.Get(url)
if err != nil {
fmt.Println(err)
}
defer response.Body.Close()
body_Text := []byte("")
if response.StatusCode == 200 {
bodyText, err := ioutil.ReadAll(response.Body)
if err != nil {
fmt.Println(err)
}
body_Text = bodyText
}
return body_Text
}
func writCsv(text [][]string) {
csvfile, err := os.Create("teamprofile.csv")
if err != nil {
log.Fatalf("Failed creating file %s", err)
}
csvwriter := csv.NewWriter(csvfile)
err = csvwriter.WriteAll(text) // calls Flush internally
if err != nil {
log.Fatal(err)
}
csvwriter.Flush()
csvfile.Close()
}
func main() {
records := readCsvFile("./profile.csv")
//
for index := range records {
htmlBody := getListing(records[index][0])
fmt.Println(string(htmlBody))
//writCsv(htmlBody)
}
}
|
package lc
import "strconv"
// Time: O(n)
// 0ms 2.6mb | 100%
func calPoints(ops []string) int {
stack := []int{}
sum, points := 0, 0
for _, v := range ops {
switch v {
case "C":
sum -= stack[len(stack)-1]
stack = stack[:len(stack)-1]
continue
case "D":
points = stack[len(stack)-1] * 2
sum += points
case "+":
points = stack[len(stack)-1] + stack[len(stack)-2]
sum += points
default:
points, _ = strconv.Atoi(v)
sum += points
}
stack = append(stack, points)
}
return sum
}
|
package BlackJack
import "testing"
func TestDeckHas52Cards(t *testing.T){
if len(InitializeDeck()) != 52 {
t.Errorf("Expected the result is not 52")
}
}
func TestDealerGetsTheDeck(t* testing.T){
if len(DeckToDealer())!=52 {
t.Errorf("Expected the result is not 52")
}
}
/*
func TestRandomCardFromDeck(t* testing.T){
cards := new(Card)
cards=GenerateRandomCard(DeckToDealer())
if (cards == nil) {
t.Errorf("Card is not generated and assigned to the dealer")
}
}
*/
|
package testutils
import (
"database/sql"
"encoding/hex"
"errors"
"fmt"
"math/rand"
"github.com/renproject/darknode/abi"
"github.com/renproject/darknode/addr"
"github.com/renproject/kv"
"github.com/renproject/kv/db"
"github.com/renproject/lightnode/store"
)
// CheckTableExistence checks the underlying `db` object if there exists a table
// with given name.
func CheckTableExistence(dbName, tableName string, db *sql.DB) error {
switch dbName {
case "sqlite3":
script := fmt.Sprintf("SELECT count(*) FROM sqlite_master WHERE type='table' AND name='%v';", tableName)
var num int
if err := db.QueryRow(script).Scan(&num); err != nil {
return err
}
if num != 1 {
return errors.New("no such table")
}
case "postgres":
script := fmt.Sprintf(`SELECT EXISTS (
SELECT 1
FROM pg_tables
WHERE schemaname = 'public'
AND tablename = '%v'
);`, tableName)
var exist bool
if err := db.QueryRow(script).Scan(&exist); err != nil {
return err
}
if !exist {
return errors.New("no such table")
}
default:
panic("unknown sql db")
}
return nil
}
// NumOfDataEntries returns the number of data entries in the queried table.
func NumOfDataEntries(db *sql.DB, name string) (int, error) {
script := fmt.Sprintf("SELECT count(*) FROM %v;", name)
var num int
err := db.QueryRow(script).Scan(&num)
return num, err
}
// UpdateTxCreatedTime of given tx hash.
func UpdateTxCreatedTime(db *sql.DB, name string, hash abi.B32, createdTime int64) error {
txHash := hex.EncodeToString(hash[:])
script := fmt.Sprintf("UPDATE %v set created_time = %v where hash = $1;", name, createdTime)
_, err := db.Exec(script, txHash)
return err
}
// MultiAddrStore is a store of `addr.MultiAddress`es.
type MultiAddrStore struct {
store db.Table
bootstrapAddrs addr.MultiAddresses
}
// New constructs a new `MultiAddrStore`.
func NewStore(bootstrapAddrs addr.MultiAddresses) *MultiAddrStore {
store := kv.NewTable(kv.NewMemDB(kv.JSONCodec), "addresses")
for _, addr := range bootstrapAddrs {
if err := store.Insert(addr.ID().String(), addr.String()); err != nil {
panic(fmt.Sprintf("[MultiAddrStore] cannot initialize the store with bootstrap nodes addresses"))
}
}
return &MultiAddrStore{
store: store,
bootstrapAddrs: bootstrapAddrs,
}
}
func (multiStore *MultiAddrStore) Insert(addresses addr.MultiAddresses) error {
for _, addr := range addresses {
if err := multiStore.store.Insert(addr.ID().String(), addr.String()); err != nil {
return err
}
}
return nil
}
func (multiStore *MultiAddrStore) RandomAddresses(n int) addr.MultiAddresses {
addrs := addr.MultiAddresses{}
iter := multiStore.store.Iterator()
defer iter.Close()
for iter.Next() {
id, err := iter.Key()
if err != nil {
panic(err)
}
address, err := multiStore.Address(id)
if err != nil {
panic(err)
}
addrs = append(addrs, address)
}
rand.Shuffle(len(addrs), func(i, j int) {
addrs[i], addrs[j] = addrs[j], addrs[i]
})
if len(addrs) < n {
return addrs
}
return addrs[:n]
}
func (multiStore *MultiAddrStore) RandomBootstraps(n int) addr.MultiAddresses {
indexes := rand.Perm(len(multiStore.bootstrapAddrs))
if n > len(multiStore.bootstrapAddrs) {
n = len(multiStore.bootstrapAddrs)
}
addrs := make(addr.MultiAddresses, 0, n)
for _, index := range indexes {
addrs = append(addrs, multiStore.bootstrapAddrs[index])
}
return addrs
}
func (multiStore *MultiAddrStore) Delete(ids []string) error {
for _, id := range ids {
if err := multiStore.store.Delete(id); err != nil {
return err
}
}
return nil
}
func (multiStore *MultiAddrStore) Size() int {
size, err := multiStore.store.Size()
if err != nil {
panic(err)
}
return size
}
func (multiStore *MultiAddrStore) Address(id string) (addr.MultiAddress, error) {
var multiAddrString string
if err := multiStore.store.Get(id, &multiAddrString); err != nil {
if err == db.ErrKeyNotFound {
return addr.MultiAddress{}, store.ErrNotFound
}
return addr.MultiAddress{}, err
}
return addr.NewMultiAddressFromString(multiAddrString)
}
func (multiStore *MultiAddrStore) BootstrapAddresses() addr.MultiAddresses {
return multiStore.bootstrapAddrs
}
|
package main
import (
"fmt"
"strings"
"github.com/electricface/go-gir3/gi"
)
var globalFuncNextIdx int
func pFunction(s *SourceFile, fi *gi.FunctionInfo) {
symbol := fi.Symbol()
s.GoBody.Pn("// %s", symbol)
funcIdx := globalFuncNextIdx
globalFuncNextIdx++
fnName := fi.Name()
// 函数内参数分配器
var varReg VarReg
// 目标函数形参列表
var args []string
// 目标函数返回参数列表,元素是 "名字 类型"
var retParams []string
// 准备传递给 invoker.Call 中的参数的代码之前的语句
var beforeArgLines []string
// 准备传递给 invoker.Call 中的参数的语句
var newArgLines []string
// 传递给 invoker.Call 中的参数列表
var argNames []string
// 在 invoker.Call 执行后需要执行的语句
var afterCallLines []string
// direction 为 inout 或 out 的参数个数
var numArgOut int
numArg := fi.NumArg()
for i := 0; i < numArg; i++ {
fiArg := fi.Arg(i)
argTypeInfo := fiArg.Type()
dir := fiArg.Direction()
switch dir {
case gi.DIRECTION_INOUT, gi.DIRECTION_OUT:
numArgOut++
}
varArg := varReg.alloc(fiArg.Name())
if dir == gi.DIRECTION_IN || dir == gi.DIRECTION_INOUT {
// 作为 go 函数的输入参数之一
type0 := "int/*TODO:TYPE*/"
if dir == gi.DIRECTION_IN {
parseResult := parseArgTypeDirIn(varArg, argTypeInfo, &varReg)
type0 = parseResult.type0
beforeArgLines = append(beforeArgLines, parseResult.beforeArgLines...)
varArg := varReg.alloc("arg_" + varArg)
argNames = append(argNames, varArg)
newArgLines = append(newArgLines, fmt.Sprintf("%s := %s", varArg, parseResult.newArg))
afterCallLines = append(afterCallLines, parseResult.afterCallLines...)
}
args = append(args, varArg+" "+type0)
} else if dir == gi.DIRECTION_OUT {
// 作为 go 函数的返回值之一
// TODO
}
argTypeInfo.Unref()
fiArg.Unref()
}
retTypeInfo := fi.ReturnType()
defer retTypeInfo.Unref()
var varRet string
var varResult string
var parseRetTypeResult *parseRetTypeResult
// 是否**无**返回值
var isRetVoid bool
if gi.TYPE_TAG_VOID == retTypeInfo.Tag() {
// 无返回值
isRetVoid = true
} else {
// 有返回值
varRet = varReg.alloc("ret")
varResult = varReg.alloc("result")
parseRetTypeResult = parseRetType(varRet, retTypeInfo, &varReg)
retParams = append(retParams, varResult+" "+parseRetTypeResult.type0)
}
fnFlags := fi.Flags()
varErr := varReg.alloc("err")
var isThrows bool
if fnFlags&gi.FUNCTION_THROWS != 0 {
// TODO: 需要把 **GError err 加入参数列表
isThrows = true
retParams = append(retParams, varErr+" error")
}
argsJoined := strings.Join(args, ", ")
retParamsJoined := strings.Join(retParams, ", ")
if len(retParams) > 0 {
retParamsJoined = "(" + retParamsJoined + ")"
}
// 输出目标函数头部
s.GoBody.Pn("func %s(%s) %s {", fnName, argsJoined, retParamsJoined)
varInvoker := varReg.alloc("iv")
s.GoBody.Pn("%s, %s := _I.Get(%d, %q, \"\")", varInvoker, varErr, funcIdx, fnName)
{ // 处理 invoker 获取失败的情况
s.GoBody.Pn("if %s != nil {", varErr)
if isThrows {
// 使用 err 变量返回错误
} else {
// 把 err 打印出来
s.GoBody.Pn("log.Println(\"WARN:\", %s) /*go:log*/", varErr)
}
s.GoBody.Pn("return")
s.GoBody.Pn("}") // end if err != nil
}
var varCMemArgs string
if numArgOut > 0 {
varCMemArgs = varReg.alloc("cma")
s.GoBody.Pn("%v := gi.AllocArgs(%v)", varCMemArgs, numArgOut)
}
for _, line := range beforeArgLines {
s.GoBody.Pn(line)
}
for _, line := range newArgLines {
s.GoBody.Pn(line)
}
callArgArgs := "nil"
if len(argNames) > 0 {
// 比如输出 args := []gi.Argument{arg0,arg1}
varArgs := varReg.alloc("args")
s.GoBody.Pn("%s := []gi.Argument{%s}", varArgs, strings.Join(argNames, ", "))
callArgArgs = varArgs
}
callArgRet := "nil"
if !isRetVoid {
// 有返回值
callArgRet = "&" + varRet
s.GoBody.Pn("var %s gi.Argument", varRet)
}
s.GoBody.Pn("%s.Call(%s, %s)", varInvoker, callArgArgs, callArgRet)
if !isRetVoid && parseRetTypeResult != nil {
s.GoBody.Pn("%s = %s", varResult, parseRetTypeResult.expr)
}
for _, line := range afterCallLines {
s.GoBody.Pn(line)
}
if numArgOut > 0 {
s.GoBody.Pn("%v.Free()", varCMemArgs)
}
if !isRetVoid || isThrows {
s.GoBody.Pn("return")
}
s.GoBody.Pn("}") // end func
}
type parseRetTypeResult struct {
expr string // 转换 arguemnt 为返回值类型的表达式
type0 string // 目标函数中返回值类型
}
func parseRetType(varRet string, ti *gi.TypeInfo, varReg *VarReg) *parseRetTypeResult {
expr := ""
type0 := ""
tag := ti.Tag()
switch tag {
case gi.TYPE_TAG_UTF8, gi.TYPE_TAG_FILENAME:
// 字符串类型
// 产生类似如下代码:
// result = ret.String().Take()
expr = varRet + ".String().Take()"
type0 = "string"
case gi.TYPE_TAG_BOOLEAN,
gi.TYPE_TAG_INT8, gi.TYPE_TAG_UINT8,
gi.TYPE_TAG_INT16, gi.TYPE_TAG_UINT16,
gi.TYPE_TAG_INT32, gi.TYPE_TAG_UINT32,
gi.TYPE_TAG_INT64, gi.TYPE_TAG_UINT64,
gi.TYPE_TAG_FLOAT, gi.TYPE_TAG_DOUBLE:
// 简单类型
// 产生类似如下代码:
// result = ret.Bool()
expr = fmt.Sprintf("%s.%s()", varRet, getArgumentMethodPart(tag))
type0 = getTypeTagType(tag)
default:
// 未知类型
expr = varRet + ".Int()/*TODO*/"
type0 = "int/*TODO_TYPE*/"
}
return &parseRetTypeResult{
expr: expr,
type0: type0,
}
}
func parseArgTypeDirOut() {
}
func parseArgTypeDirInOut() {
}
type parseArgTypeDirInResult struct {
newArg string // gi.NewArgument 用的
type0 string // go函数形参中的类型
beforeArgLines []string // 在 arg_xxx = gi.NewXXXArgument 之前执行的语句
afterCallLines []string // 在 invoker.Call() 之后执行的语句
}
// TODO 重命名它
func getTypeTagType(tag gi.TypeTag) (type0 string) {
switch tag {
case gi.TYPE_TAG_BOOLEAN:
type0 = "bool"
case gi.TYPE_TAG_INT8:
type0 = "int8"
case gi.TYPE_TAG_UINT8:
type0 = "uint8"
case gi.TYPE_TAG_INT16:
type0 = "int16"
case gi.TYPE_TAG_UINT16:
type0 = "uint16"
case gi.TYPE_TAG_INT32:
type0 = "int32"
case gi.TYPE_TAG_UINT32:
type0 = "uint32"
case gi.TYPE_TAG_INT64:
type0 = "int64"
case gi.TYPE_TAG_UINT64:
type0 = "uint64"
case gi.TYPE_TAG_FLOAT:
type0 = "float32"
case gi.TYPE_TAG_DOUBLE:
type0 = "float64"
}
return
}
func getArgumentMethodPart(tag gi.TypeTag) (str string) {
switch tag {
case gi.TYPE_TAG_BOOLEAN:
str = "Bool"
case gi.TYPE_TAG_INT8:
str = "Int8"
case gi.TYPE_TAG_UINT8:
str = "Uint8"
case gi.TYPE_TAG_INT16:
str = "Int16"
case gi.TYPE_TAG_UINT16:
str = "Uint16"
case gi.TYPE_TAG_INT32:
str = "Int32"
case gi.TYPE_TAG_UINT32:
str = "Uint32"
case gi.TYPE_TAG_INT64:
str = "Int64"
case gi.TYPE_TAG_UINT64:
str = "Uint64"
case gi.TYPE_TAG_FLOAT:
str = "Float"
case gi.TYPE_TAG_DOUBLE:
str = "Double"
}
return
}
func parseArgTypeDirIn(varArg string, ti *gi.TypeInfo, varReg *VarReg) *parseArgTypeDirInResult {
// 目前只考虑 direction 为 in 的情况
var newArg string
var beforeArgLines []string
var afterCallLines []string
var type0 string
tag := ti.Tag()
switch tag {
case gi.TYPE_TAG_UTF8, gi.TYPE_TAG_FILENAME:
// 字符串类型
// 产生类似如下代码:
// pArg = gi.CString(arg)
// arg = gi.NewStringArgument(pArg)
// after call:
// gi.Free(pArg)
varCArg := varReg.alloc("c_" + varArg)
beforeArgLines = append(beforeArgLines,
fmt.Sprintf("%s := gi.CString(%s)", varCArg, varArg))
newArg = fmt.Sprintf("gi.NewStringArgument(%s)", varCArg)
afterCallLines = append(afterCallLines,
fmt.Sprintf("gi.Free(%s)", varCArg))
type0 = "string"
case gi.TYPE_TAG_BOOLEAN,
gi.TYPE_TAG_INT8, gi.TYPE_TAG_UINT8,
gi.TYPE_TAG_INT16, gi.TYPE_TAG_UINT16,
gi.TYPE_TAG_INT32, gi.TYPE_TAG_UINT32,
gi.TYPE_TAG_INT64, gi.TYPE_TAG_UINT64,
gi.TYPE_TAG_FLOAT, gi.TYPE_TAG_DOUBLE:
// 简单类型
middle := ""
switch tag {
case gi.TYPE_TAG_BOOLEAN:
middle = "Bool"
case gi.TYPE_TAG_INT8:
middle = "Int8"
case gi.TYPE_TAG_UINT8:
middle = "Uint8"
case gi.TYPE_TAG_INT16:
middle = "Int16"
case gi.TYPE_TAG_UINT16:
middle = "Uint16"
case gi.TYPE_TAG_INT32:
middle = "Int32"
case gi.TYPE_TAG_UINT32:
middle = "Uint32"
case gi.TYPE_TAG_INT64:
middle = "Int64"
case gi.TYPE_TAG_UINT64:
middle = "Uint64"
case gi.TYPE_TAG_FLOAT:
middle = "Float"
case gi.TYPE_TAG_DOUBLE:
middle = "Double"
}
newArg = fmt.Sprintf("gi.New%sArgument(%s)", middle, varArg)
switch tag {
case gi.TYPE_TAG_BOOLEAN:
type0 = "bool"
case gi.TYPE_TAG_INT8:
type0 = "int8"
case gi.TYPE_TAG_UINT8:
type0 = "uint8"
case gi.TYPE_TAG_INT16:
type0 = "int16"
case gi.TYPE_TAG_UINT16:
type0 = "uint16"
case gi.TYPE_TAG_INT32:
type0 = "int32"
case gi.TYPE_TAG_UINT32:
type0 = "uint32"
case gi.TYPE_TAG_INT64:
type0 = "int64"
case gi.TYPE_TAG_UINT64:
type0 = "uint64"
case gi.TYPE_TAG_FLOAT:
type0 = "float32"
case gi.TYPE_TAG_DOUBLE:
type0 = "float64"
}
default:
// 未知类型
type0 = "int/*TODO:TYPE*/"
newArg = fmt.Sprintf("gi.NewIntArgument(%s)/*TODO*/", varArg)
}
return &parseArgTypeDirInResult{
newArg: newArg,
type0: type0,
beforeArgLines: beforeArgLines,
afterCallLines: afterCallLines,
}
}
/*
direction: in
作为参数
direction: out
作为返回值
direction: inout
作为参数,之后要把参数给修改了 *arg =
*/
|
package main
import "fmt"
func dailyTemperatures(T []int) []int {
results := make([]int, len(T))
results[len(results)-1] = 0
stack := []int{}
stack = append(stack, T[len(T)-1])
for i := len(T) - 2; i >= 0; i-- {
count := 0
yes := false
for j := len(stack) - 1; j >= 0; j-- {
count++
if stack[j] > T[i] {
yes = true
break
}
}
stack = append(stack, T[i])
if !yes {
results[i] = 0
} else {
results[i] = count
}
}
return results
}
func main() {
t := []int{73, 74, 75, 71, 69, 72, 76, 73}
fmt.Println(dailyTemperatures(t))
}
|
package concurrency
type WebsiteChecker func(string) bool
type result struct {
string
bool
}
func CheckWebsite(wc WebsiteChecker, urls []string) map[string]bool {
ret := make(map[string]bool)
resultChannel := make(chan result)
for _, url := range urls {
go func(u string) { resultChannel <- result{u, wc(u)} }(url)
}
for i := 0; i < len(urls); i++ {
resultFromChannel := <-resultChannel
ret[resultFromChannel.string] = resultFromChannel.bool
}
return ret
}
|
package models
import (
"database/sql"
"time"
)
type User struct {
ID int `db:"id" json:"id,omitempty" validate:"required"`
CreatedAt time.Time `db:"created_at" json:"created_at,omitempty"`
UpdatedAt sql.NullTime `db:"updated_at" json:"updated_at,omitempty"`
Email string `db:"email" json:"email" validate:"required"`
Password string `db:"password" json:"password" validate:"required,lte=255"`
Role string `db:"role" json:"role,omitempty" validate:"required,lte=255"`
VerificationToken sql.NullString `db:"verification_token" json:"verification_token,omitempty"`
UserStatus int `db:"user_status" json:"user_status,omitempty" validate:"required,len=1"`
}
|
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package roundrobin defines a roundrobin balancer. Roundrobin balancer is
// installed as one of the default balancers in gRPC, users don't need to
// explicitly install this balancer.
package round_robin
import (
"math/rand"
"sync"
"time"
"shared/grpc/balancer"
googleBalancer "google.golang.org/grpc/balancer"
googleBase "google.golang.org/grpc/balancer/base"
)
// Name is the name of round_robin balancer.
const Name = "round_robin"
func init() {
rand.Seed(time.Now().UTC().UnixNano())
}
type rrPickerBuilder struct{}
func NewRRPickerBuilder() *rrPickerBuilder {
return &rrPickerBuilder{}
}
func (*rrPickerBuilder) Build(info googleBase.PickerBuildInfo) googleBalancer.Picker {
if len(info.ReadySCs) == 0 {
return balancer.NewErrPicker(googleBalancer.ErrNoSubConnAvailable)
}
scs := make([]googleBalancer.SubConn, 0, len(info.ReadySCs))
for sc := range info.ReadySCs {
scs = append(scs, sc)
}
return &rrPicker{
subConns: scs,
// Start at a random index, as the same RR balancer rebuilds a new
// picker when SubConn states change, and we don't want to apply excess
// load to the first server in the list.
next: rand.Intn(len(scs)),
}
}
type rrPicker struct {
// subConns is the snapshot of the roundrobin balancer when this picker was
// created. The slice is immutable. Each GetBalance() will do a round robin
// selection from it and return the selected SubConn.
subConns []googleBalancer.SubConn
mu sync.Mutex
next int
}
func (p *rrPicker) Pick(googleBalancer.PickInfo) (googleBalancer.PickResult, error) {
p.mu.Lock()
sc := p.subConns[p.next]
p.next = (p.next + 1) % len(p.subConns)
p.mu.Unlock()
return googleBalancer.PickResult{SubConn: sc}, nil
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* The sample smart contract for documentation topic:
* Writing Your First Blockchain Application
*/
package main
/* Imports
* 4 utility libraries for formatting, handling bytes, reading and writing JSON, and string manipulation
* 2 specific Hyperledger Fabric specific libraries for Smart Contracts
*/
import (
"encoding/json"
"fmt"
"strconv"
utils "github.com/cd1/utils-golang"
"github.com/hyperledger/fabric/core/chaincode/shim"
sc "github.com/hyperledger/fabric/protos/peer"
)
type SmartContract struct {
}
type Car struct {
Make string `json:"make"`
Model string `json:"model"`
Colour string `json:"colour"`
Owner string `json:"owner"`
}
type User struct {
UserID string
Name string
Email string
PasswordHash string
Division string
District string
Village string
Thana string
Contact string
Balance int
UserType string
Doctype string
}
type OwnedCrops struct {
CropID string
OwnerID string
OwnerName string
CropKind string
CropName string
Quantity int
Price int
Doctype string
}
type ForAdCrops struct {
CropID string
CropKind string
Quantity string
Price int
SellerID string
SellerName string
Doctype string
}
type Transaction struct {
TransID string
CropID string
BuyerID string
SellerID string
CropKind string
CropAmount int
Price int
Doctype string
}
/*
* The Init method is called when the Smart Contract "fabcar" is instantiated by the blockchain network
* Best practice is to have any Ledger initialization in separate function -- see initLedger()
*/
func (s *SmartContract) Init(APIstub shim.ChaincodeStubInterface) sc.Response {
return shim.Success(nil)
}
/*
* The Invoke method is called as a result of an application request to run the Smart Contract "fabcar"
* The calling application program has also specified the particular smart contract function to be called, with arguments
*/
func (s *SmartContract) Invoke(APIstub shim.ChaincodeStubInterface) sc.Response {
// Retrieve the requested Smart Contract function and arguments
function, args := APIstub.GetFunctionAndParameters()
// Route to the appropriate handler function to interact with the ledger appropriately
if function == "initLedger" {
return s.initLedger(APIstub)
} else if function == "queryAllCrops" {
return s.queryAllCrops(APIstub)
} else if function == "register" {
return s.register(APIstub, args)
} else if function == "login" {
return s.login(APIstub, args)
} else if function == "queryByKind" {
return s.queryByKind(APIstub, args)
} else if function == "querySellHistory" {
return s.querySellHistory(APIstub, args)
} else if function == "queryBuyHistory" {
return s.queryBuyHistory(APIstub, args)
} else if function == "userProfile" {
return s.userProfile(APIstub, args)
}
return shim.Error("Invalid Smart Contract function name.")
}
func (s *SmartContract) register(APIstub shim.ChaincodeStubInterface, args []string) sc.Response {
userID := utils.RandomString()
name := args[0]
email := args[1]
passwordHash := args[2]
division := args[3]
district := args[4]
village := args[5]
thana := args[6]
contact := args[7]
Balance := 50000
userType := args[8]
var user = User{userID, name, email, passwordHash, division, district, village, thana, contact, Balance, userType, "User"}
fmt.Println("User created In register function:", user)
userAsBytes, _ := json.Marshal(user)
APIstub.PutState(userID, userAsBytes)
fmt.Println("User created In register function:", userAsBytes)
return shim.Success([]byte(userAsBytes))
}
func (s *SmartContract) login(APIstub shim.ChaincodeStubInterface, args []string) sc.Response {
if len(args) != 2 {
return shim.Error("Incorrect number of arguments, required 2, given " + strconv.Itoa(len(args)))
}
email := args[0]
passwordHash := args[1]
var user User = getUser(APIstub, email)
if user.PasswordHash != passwordHash {
return shim.Error("password doesnt match")
}
fmt.Println("user found:", user)
userAsBytes, _ := json.Marshal(user)
return shim.Success([]byte(userAsBytes))
}
func getUser(APIstub shim.ChaincodeStubInterface, email string) User {
userQuery1 := newCouchQueryBuilder().addSelector("Doctype", "User").addSelector("Email", email).getQueryString()
user, _ := lastQueryValueForQueryString(APIstub, userQuery1)
var userData User
_ = json.Unmarshal(user, &userData)
return userData
}
func getUserByID(APIstub shim.ChaincodeStubInterface, ID string) User {
userQuery1 := newCouchQueryBuilder().addSelector("Doctype", "User").addSelector("UserID", ID).getQueryString()
user, _ := lastQueryValueForQueryString(APIstub, userQuery1)
var userData1 User
_ = json.Unmarshal(user, &userData1)
return userData1
}
//func getCrop(APIstub shim.ChaincodeStubInterface, Id string) Crop {
// cropQuery1 := newCouchQueryBuilder().addSelector("Doctype", "Crop").addSelector("Id", Id).getQueryString()
// crop, _ := lastQueryValueForQueryString(APIstub, cropQuery1)
// var cropData Crop
// _ = json.Unmarshal(crop, &cropData)
// return cropData
//}
func (s *SmartContract) queryByKind(APIstub shim.ChaincodeStubInterface, args []string) sc.Response {
cropKind := args[0]
//queryString := fmt.Sprintf("{\"selector\":{\"DocType\":\"Crop\",\"CropKind\": \"%s\"}}", cropKind)
//
//queryResults, err := getQueryResultForQueryString(APIstub, queryString)
//if err != nil {
// return shim.Error(err.Error())
//}
//return shim.Success(queryResults)
queryString := newCouchQueryBuilder().addSelector("Doctype", "Crop").addSelector("CropKind", cropKind).getQueryString()
queryResults, err := getQueryResultForQueryString(APIstub, queryString)
if err != nil {
return shim.Error(err.Error())
}
return shim.Success(queryResults)
}
func (s *SmartContract) initLedger(APIstub shim.ChaincodeStubInterface) sc.Response {
cars := []Car{
Car{Make: "Toyota", Model: "Prius", Colour: "blue", Owner: "Tomoko"},
}
i := 0
for i < len(cars) {
fmt.Println("i is ", i)
carAsBytes, _ := json.Marshal(cars[i])
APIstub.PutState("CAR"+strconv.Itoa(i), carAsBytes)
fmt.Println("Added", cars[i])
i = i + 1
}
return shim.Success(nil)
}
//func (s *SmartContract) createCrops(APIstub shim.ChaincodeStubInterface, args []string) sc.Response {
//
// var cropId string = utils.RandomString()
// var id = cropId
// var ownerid string = args[0]
// var ownername string = args[1]
// var cropKind string = args[2]
// quantity, err := strconv.Atoi(args[3])
// if err != nil {
// fmt.Println("quantity must be an integer :/")
// }
// price, err := strconv.Atoi(args[4])
// if err != nil {
// fmt.Println("Price must be an integer :/")
// }
//
// var docType string = "Crop"
// var crop = Crop{id, cropId, ownerid, ownername, cropKind, quantity, price, docType}
// cropAsBytes, err := json.Marshal(crop)
//
// if err != nil {
// return shim.Error(err.Error())
// }
//
// err = APIstub.PutState(id, cropAsBytes)
// if err != nil {
// return shim.Error(err.Error())
// }
//
// return shim.Success(nil)
//}
func (s *SmartContract) queryAllCrops(APIstub shim.ChaincodeStubInterface) sc.Response {
queryString := "{\"selector\":{\"Doctype\":\"Crop\"}}"
queryResults, err := getQueryResultForQueryString(APIstub, queryString)
if err != nil {
return shim.Error(err.Error())
}
fmt.Println("queralllcrops working")
return shim.Success(queryResults)
}
func (s *SmartContract) querySellHistory(APIstub shim.ChaincodeStubInterface, args []string) sc.Response {
ownerId := args[0]
//queryString := fmt.Sprintf("{\"selector\":{\"DocType\":\"Transaction\",\"OwnerId\": \"%s\"}}", ownerId)
queryString := newCouchQueryBuilder().addSelector("Doctype", "Transaction").addSelector("OwnerId", ownerId).getQueryString()
queryResults, err := getQueryResultForQueryString(APIstub, queryString)
if err != nil {
return shim.Error(err.Error())
}
return shim.Success(queryResults)
}
func (s *SmartContract) queryBuyHistory(APIstub shim.ChaincodeStubInterface, args []string) sc.Response {
BuyerId := args[0]
//queryString := fmt.Sprintf("{\"selector\":{\"DocType\":\"Transaction\",\"BuyerId\": \"%s\"}}", BuyerId)
queryString := newCouchQueryBuilder().addSelector("Doctype", "Transaction").addSelector("BuyerId", BuyerId).getQueryString()
queryResults, err := getQueryResultForQueryString(APIstub, queryString)
if err != nil {
return shim.Error(err.Error())
}
return shim.Success(queryResults)
}
//func (s *SmartContract) BuyCrops(APIstub shim.ChaincodeStubInterface, args []string) sc.Response {
//
// if len(args) != 3 {
// return shim.Error("Incorrect number of arguments. Expecting 4")
// }
// cropID := args[0]
// buyerId := args[1]
// quan, err := strconv.Atoi(args[2])
// if err != nil {
// fmt.Println("quantity must be an integer :/")
// }
// cropData := getCrop(APIstub, cropID)
// ownerId := cropData.OwnerId
// ownerData := getUserById(APIstub, ownerId)
// buyerData := getUserById(APIstub, buyerId)
// newPrice := cropData.Price * quan
//
// if buyerData.Balance >= newPrice {
// buyerData.Balance = buyerData.Balance - newPrice
// ownerData.Balance = ownerData.Balance + newPrice
// cropData.Quantity = cropData.Quantity - quan
//
// id := utils.RandomString()
// buyerCrop := Crop{id, id, buyerId, cropData.OwnerName, cropData.CropKind, quan, cropData.Price, cropData.Doctype}
//
// //crop er amount kombe
// cropAsBytes, _ := json.Marshal(cropData)
// APIstub.PutState(cropData.Id, cropAsBytes)
// fmt.Println("after update", cropData)
//
// // buyer er jnno crop create hbe
// buyerCropAsBytes, _ := json.Marshal(buyerCrop)
// APIstub.PutState(buyerCrop.Id, buyerCropAsBytes)
// fmt.Println("after update", buyerData)
//
// //buyer er taka koima jabe
// buyerDataAsBytes, _ := json.Marshal(buyerData)
// APIstub.PutState(buyerData.Id, buyerDataAsBytes)
//
// //seller er tk barbe
// ownererDataAsBytes, _ := json.Marshal(ownerData)
// APIstub.PutState(ownerData.Id, ownererDataAsBytes)
//
// trans_id := utils.RandomString()
// trasn_history := Transaction{trans_id, cropID, buyerId, ownerId, cropData.CropKind, buyerCrop.Quantity, cropData.Price, "Transaction"}
//
// transAsBytes, _ := json.Marshal(trasn_history)
// APIstub.PutState(trans_id, transAsBytes)
// return shim.Success(nil)
//
// } else {
// return shim.Error("error")
// }
//
//}
func (s *SmartContract) userProfile(APIstub shim.ChaincodeStubInterface, args []string) sc.Response {
user_info, err := APIstub.GetState(args[0])
if err != nil {
return shim.Error("vul hoise")
}
return shim.Success(user_info)
}
// func (s *SmartContract) changeCropOwner(APIstub shim.ChaincodeStubInterface, args []string) sc.Response {
// if len(args) != 2 {
// return shim.Error("Incorrect number of arguments. Expecting 2")
// }
// cropAsBytes, _ := APIstub.GetState(args[0])
// crop := Crop{}
// json.Unmarshal(cropAsBytes, &crop)
// crop.Name = args[1]
// cropAsBytes, _ = json.Marshal(crop)
// APIstub.PutState(args[0], cropAsBytes)
// return shim.Success(nil)
// }
// The main function is only relevant in unit test mode. Only included here for completeness.
func main() {
// Create a new Smart Contract
err := shim.Start(new(SmartContract))
if err != nil {
fmt.Printf("Error creating new Smart Contract: %s", err)
}
}
|
// Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package main
import (
"context"
"fmt"
"math"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/quotapool"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
)
// workPool keeps track of what tests still need to run and facilitates
// selecting the next test to run.
type workPool struct {
// count is the total number of times each test has to run. It is constant.
// Not to be confused with the count inside mu.tests, which tracks remaining
// runs.
count int
mu struct {
syncutil.Mutex
// tests with remaining run count.
tests []testWithCount
}
}
func newWorkPool(tests []testSpec, count int) *workPool {
p := &workPool{count: count}
for _, spec := range tests {
p.mu.tests = append(p.mu.tests, testWithCount{spec: spec, count: count})
}
return p
}
// testToRunRes represents the return value of getTestToRun. It provides
// information about what test to run (if any) and what cluster to use for it.
type testToRunRes struct {
// noWork is set if the work pool was empty and thus no test was selected. No
// other fields are set.
noWork bool
// spec is the selected test.
spec testSpec
// runNum is run number. 1 if --count was not used.
runNum int
// canReuseCluster is true if the selected test can reuse the cluster passed
// to testToRun(). Will be false if noWork is set.
canReuseCluster bool
// alloc is set if canReuseCluster is false (and noWork is not set). It
// represents the resources to use for creating a new cluster (matching spec).
// The alloc needs to be transferred to the cluster that is created, or
// otherwise Release()d.
alloc *quotapool.IntAlloc
}
func (p *workPool) workRemaining() []testWithCount {
p.mu.Lock()
defer p.mu.Unlock()
res := make([]testWithCount, len(p.mu.tests))
copy(res, p.mu.tests)
return res
}
// getTestToRun selects a test. It optionally takes a cluster and will try to
// select a test that can reuse that cluster. If it succeeds, then
// testToRunRes.canReuseCluster will be set. Otherwise, the cluster is destroyed
// so its resources are released, and the result will contain a quota alloc to
// be used by the caller for creating a new cluster.
//
// If a new cluster needs to be created, the call blocks until enough resources
// are taken out of qp.
//
// If there are no more tests to run, c will be destroyed and the result will
// have noWork set.
func (p *workPool) getTestToRun(
ctx context.Context,
c *cluster,
qp *quotapool.IntPool,
cr *clusterRegistry,
onDestroy func(),
l *logger,
) (testToRunRes, error) {
// If we've been given a cluster, see if we can reuse it.
if c != nil {
ttr := p.selectTestForCluster(ctx, c.spec, cr)
if ttr.noWork {
// We failed to find a test that can take advantage of this cluster. So
// we're going to release is, which will deallocate its resources, and
// then we'll look for a test below.
l.PrintfCtx(ctx,
"No tests that can reuse cluster %s found (or there are no further tests to run). "+
"Destroying.", c)
c.Destroy(ctx, closeLogger, l)
onDestroy()
} else {
return ttr, nil
}
}
return p.selectTest(ctx, qp)
}
// selectTestForCluster selects a test to run on a cluster with a given spec.
//
// Among tests that match the spec, we do the following:
// - If the cluster is already tagged, we only look at tests with the same tag.
// - Otherwise, we'll choose in the following order of preference:
// 1) tests that leave the cluster usable by anybody afterwards
// 2) tests that leave the cluster usable by some other tests
// 2.1) within this OnlyTagged<foo> category, we'll prefer the tag with the
// fewest existing clusters.
// 3) tests that leave the cluster unusable by anybody
//
// Within each of the categories, we'll give preference to tests with fewer
// runs.
//
// cr is used for its information about how many clusters with a given tag currently exist.
func (p *workPool) selectTestForCluster(
ctx context.Context, spec clusterSpec, cr *clusterRegistry,
) testToRunRes {
p.mu.Lock()
defer p.mu.Unlock()
testsWithCounts := p.findCompatibleTestsLocked(spec)
if len(testsWithCounts) == 0 {
return testToRunRes{noWork: true}
}
tag := ""
if p, ok := spec.ReusePolicy.(reusePolicyTagged); ok {
tag = p.tag
}
// Find the best test to run.
candidateScore := 0
var candidate testWithCount
for _, tc := range testsWithCounts {
score := scoreTestAgainstCluster(tc, tag, cr)
if score > candidateScore {
candidateScore = score
candidate = tc
}
}
p.decTestLocked(ctx, candidate.spec.Name)
runNum := p.count - candidate.count + 1
return testToRunRes{
spec: candidate.spec,
runNum: runNum,
canReuseCluster: true,
}
}
// selectTest selects a test to run based on the available resources. If there are
// no resources available to run any test, it blocks until enough resources become available.
//
// If multiple tests are eligible to run, one with the most runs left is chosen.
// TODO(andrei): We could be smarter in guessing what kind of cluster is best to
// allocate.
func (p *workPool) selectTest(ctx context.Context, qp *quotapool.IntPool) (testToRunRes, error) {
var ttr testToRunRes
alloc, err := qp.AcquireFunc(ctx, func(ctx context.Context, pi quotapool.PoolInfo) (uint64, error) {
p.mu.Lock()
defer p.mu.Unlock()
if len(p.mu.tests) == 0 {
ttr = testToRunRes{
noWork: true,
}
return 0, nil
}
candidateIdx := -1
candidateCount := 0
smallestTest := math.MaxInt64
for i, t := range p.mu.tests {
cpu := t.spec.Cluster.NodeCount * t.spec.Cluster.CPUs
if cpu < smallestTest {
smallestTest = cpu
}
if uint64(cpu) > pi.Available {
continue
}
if t.count > candidateCount {
candidateIdx = i
candidateCount = t.count
}
}
if candidateIdx == -1 {
if uint64(smallestTest) > pi.Capacity {
return 0, fmt.Errorf("not enough CPU quota to run any of the remaining tests")
}
return 0, quotapool.ErrNotEnoughQuota
}
tc := p.mu.tests[candidateIdx]
runNum := p.count - tc.count + 1
p.decTestLocked(ctx, tc.spec.Name)
ttr = testToRunRes{
spec: tc.spec,
runNum: runNum,
canReuseCluster: false,
}
cpu := tc.spec.Cluster.NodeCount * tc.spec.Cluster.CPUs
return uint64(cpu), nil
})
if err != nil {
return testToRunRes{}, err
}
ttr.alloc = alloc
return ttr, nil
}
// scoreTestAgainstCluster scores the suitability of running a test against a
// cluster currently tagged with tag (empty if cluster is not tagged).
//
// cr is used for its information about how many clusters with a given tag
// currently exist.
func scoreTestAgainstCluster(tc testWithCount, tag string, cr *clusterRegistry) int {
t := tc.spec
testPolicy := t.Cluster.ReusePolicy
if tag != "" && testPolicy != (reusePolicyTagged{tag: tag}) {
log.Fatalf(context.TODO(),
"incompatible test and cluster. Cluster tag: %s. Test policy: %+v",
tag, t.Cluster.ReusePolicy)
}
score := 0
if _, ok := testPolicy.(reusePolicyAny); ok {
score = 1000000
} else if _, ok := testPolicy.(reusePolicyTagged); ok {
score = 500000
if tag == "" {
// We have an untagged cluster and a tagged test. Within this category of
// tests, we prefer the tags with the fewest existing clusters.
score -= 1000 * cr.countForTag(tag)
}
} else { // NoReuse policy
score = 0
}
// We prefer tests that have run fewer times (so, that have more runs left).
score += tc.count
return score
}
// findCompatibleTestsLocked returns a list of tests compatible with a cluster spec.
func (p *workPool) findCompatibleTestsLocked(clusterSpec clusterSpec) []testWithCount {
if _, ok := clusterSpec.ReusePolicy.(reusePolicyNone); ok {
panic("can't search for tests compatible with a ReuseNone policy")
}
var tests []testWithCount
for _, tc := range p.mu.tests {
if clustersCompatible(clusterSpec, tc.spec.Cluster) {
tests = append(tests, tc)
}
}
return tests
}
// decTestLocked decrements a test's remaining count and removes it
// from the workPool if it was exhausted.
func (p *workPool) decTestLocked(ctx context.Context, name string) {
idx := -1
for idx = range p.mu.tests {
if p.mu.tests[idx].spec.Name == name {
break
}
}
if idx == -1 {
log.Fatalf(ctx, "failed to find test: %s", name)
}
tc := &p.mu.tests[idx]
tc.count--
if tc.count == 0 {
// We've selected the last run for a test. Take that test out of the pool.
p.mu.tests = append(p.mu.tests[:idx], p.mu.tests[idx+1:]...)
}
}
|
package main
import (
"fmt"
"net/http"
)
type MyHander struct {
}
func (handler *MyHander) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "/" {
sayHelloGolang(w, r)
return
}
http.NotFound(w, r)
return
}
func sayHelloGolang(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "Hello Golang!")
}
func main() {
handler := MyHander{}
http.ListenAndServe(":9091", &handler)
} |
package config
import (
"fmt"
"log"
"github.com/streadway/amqp"
)
type RabbitConfig struct {
mydb *amqp.Connection
myqueue amqp.Queue
mychannel *amqp.Channel
}
var rabbit RabbitConfig
func (r *RabbitConfig) Configure() {
var err, err1 error
r.mychannel, err = r.mydb.Channel()
failOnError(err, "Failed to open a channel")
r.myqueue, err1 = r.mychannel.QueueDeclare(
"hello", // name
true, // durable
false, // delete when unused
false, // exclusive
false, // no-wait
nil, // arguments
)
err = r.mychannel.Qos(
1, // prefetch count
0, // prefetch size
false, // global
)
failOnError(err1, "Failed to declare a queue")
// PUBLISH
}
func Publish(msg string) error {
return rabbit.Publish(msg)
}
func (r *RabbitConfig) Publish(msg string) error {
err := r.mychannel.Publish(
"", // exchange
r.myqueue.Name, // routing key
false, // mandatory
false, // immediate
amqp.Publishing{
DeliveryMode: amqp.Persistent,
ContentType: "text/plain",
Body: []byte(msg),
})
if err != nil {
log.Printf("%s: %s", "Message Queue Error :", err)
}
return err
}
func NewRabbitConfig(conn *amqp.Connection) *RabbitConfig {
rabbit = RabbitConfig{mydb: conn}
return &rabbit
}
func failOnError(err error, msg string) {
if err != nil {
log.Fatalf("%s: %s", msg, err)
panic(fmt.Sprintf("%s: %s", msg, err))
}
}
func CloaseAll() {
rabbit.mychannel.Close()
}
|
// initialize structures with composite literals
// with field name and val
// only val
// use of + sign to print the field name
// struct copy - changes the orig val
package main
import "fmt"
func main() {
type location struct {
lat, long float64
}
// variables are initialized using field-value pairs
opportunity := location{lat: -1.9462, long: 354.4734}
fmt.Println(opportunity)
insight := location{lat: 4.5, long: 135.9}
fmt.Println(insight)
// doesn’t specify field names. Instead, a value must be provided
// for each field in the same order in which they’re listed in the structure
spirit := location{-14.5684, 175.472636}
fmt.Println(spirit)
// we can use + sign to print the field name
fmt.Printf("%+v\n", spirit)
spirit1 := spirit
spirit1.long += 1.0016
fmt.Println(spirit, spirit1)
}
// {-1.9462 354.4734}
// {4.5 135.9}
// {-14.5684 175.472636}
// {lat:-14.5684 long:175.472636}
// {-14.5684 175.472636} {-14.5684 176.474236}
|
package level_ip
import (
"testing"
)
func handleFrame(dev *NetDev, eth_hdr *EthHdr, ifce *TunInterface) {
switch eth_hdr.ethertype {
case ETH_P_ARP:
arpIncoming(dev, eth_hdr, ifce)
case ETH_P_IP:
ipv4_incoming(dev, eth_hdr, ifce)
default:
}
}
func TestArp(t *testing.T) {
dev := netdevInit("10.0.0.4", "00:0c:29:6d:50:25")
ifce, err := tunInit("test")
if err != nil {
t.Fatalf("Creating Tap Err: %v\n", err)
}
for {
buf := make([]byte, 100)
if _, err := ifce.Read(buf); err != nil {
t.Fatalf("ERR: Read from tun_fd: %v\n", err)
}
// DPrintf("Received A Ethernet Frame.\n%s\n", hexdump(buf))
eth_hdr := initEthHdr(buf)
handleFrame(&dev, eth_hdr, ifce)
}
}
|
package collectors
import (
"fmt"
"strconv"
"time"
"bosun.org/cmd/scollector/conf"
"bosun.org/metadata"
"bosun.org/opentsdb"
)
// SNMPCisco registers a SNMP CISCO collector for the given community and host.
func SNMPCisco(cfg conf.SNMP) {
mib := conf.MIB{
BaseOid: "1.3.6.1.4.1.9.9",
Trees: []conf.MIBTree{
{
BaseOid: ".48.1.1.1",
Tags: []conf.MIBTag{
{Key: "name", Oid: ".2"},
},
Metrics: []conf.MIBMetric{
{
Metric: "cisco.mem.used",
Oid: ".5",
},
{
Metric: "cisco.mem.free",
Oid: ".6",
},
},
},
},
}
cpuIntegrator := getTsIntegrator()
collectors = append(collectors,
&IntervalCollector{
F: func() (opentsdb.MultiDataPoint, error) {
return GenericSnmp(cfg, mib)
},
Interval: time.Second * 30,
name: fmt.Sprintf("snmp-cisco-%s", cfg.Host),
},
&IntervalCollector{
F: func() (opentsdb.MultiDataPoint, error) {
return c_cisco_cpu(cfg.Host, cfg.Community, cpuIntegrator)
},
Interval: time.Second * 30,
name: fmt.Sprintf("snmp-cisco-cpu-%s", cfg.Host),
},
)
}
const (
cpmCPUTotal5secRev = ".1.3.6.1.4.1.9.9.109.1.1.1.1.6"
)
func c_cisco_cpu(host, community string, cpuIntegrator tsIntegrator) (opentsdb.MultiDataPoint, error) {
var md opentsdb.MultiDataPoint
cpuRaw, err := snmp_subtree(host, community, cpmCPUTotal5secRev)
if err != nil {
return md, err
}
tags := opentsdb.TagSet{"host": host}
cpu := make(map[string]int)
for k, v := range cpuRaw {
pct, err := strconv.Atoi(fmt.Sprintf("%v", v))
if err != nil {
return md, err
}
cpu[k] = pct
}
if len(cpu) > 1 {
return md, fmt.Errorf("expected only one cpu when monitoring cisco cpu via cpmCPUTotal5secRev")
}
for _, pct := range cpu {
Add(&md, "cisco.cpu", pct, tags, metadata.Gauge, metadata.Pct, "")
Add(&md, osCPU, cpuIntegrator(time.Now().Unix(), float64(pct)), tags, metadata.Counter, metadata.Pct, "")
}
return md, nil
}
|
// Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package scjob
import (
"context"
"github.com/cockroachdb/cockroach/pkg/jobs"
"github.com/cockroachdb/cockroach/pkg/jobs/jobspb"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descs"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/lease"
"github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scexec"
"github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb"
"github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sqlutil"
"github.com/cockroachdb/cockroach/pkg/util/log/logcrash"
)
func init() {
jobs.RegisterConstructor(jobspb.TypeNewSchemaChange, func(
job *jobs.Job, settings *cluster.Settings,
) jobs.Resumer {
pl := job.Payload()
return &newSchemaChangeResumer{
job: job,
targets: pl.GetNewSchemaChange().Targets,
}
})
}
type newSchemaChangeResumer struct {
job *jobs.Job
targets []*scpb.Target
}
type badJobTracker struct {
txn *kv.Txn
descriptors *descs.Collection
codec keys.SQLCodec
}
func (b badJobTracker) GetResumeSpans(
ctx context.Context, tableID descpb.ID, indexID descpb.IndexID,
) ([]roachpb.Span, error) {
table, err := b.descriptors.GetImmutableTableByID(ctx, b.txn, tableID, tree.ObjectLookupFlags{
CommonLookupFlags: tree.CommonLookupFlags{
Required: true,
AvoidCached: true,
},
})
if err != nil {
return nil, err
}
return []roachpb.Span{table.IndexSpan(b.codec, indexID)}, nil
}
func (b badJobTracker) SetResumeSpans(
ctx context.Context, tableID descpb.ID, indexID descpb.IndexID, total, done []roachpb.Span,
) error {
panic("implement me")
}
var _ scexec.JobProgressTracker = (*badJobTracker)(nil)
func (n *newSchemaChangeResumer) Resume(ctx context.Context, execCtxI interface{}) (err error) {
execCtx := execCtxI.(sql.JobExecContext)
if err := n.job.Update(ctx, nil /* txn */, func(txn *kv.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error {
return nil
}); err != nil {
// TODO(ajwerner): Detect transient errors and classify as retriable here or
// in the jobs package.
return err
}
// TODO(ajwerner): Wait for leases on all descriptors before starting to
// avoid restarts.
progress := n.job.Progress()
states := progress.GetNewSchemaChange().States
settings := execCtx.ExtendedEvalContext().Settings
lm := execCtx.LeaseMgr()
db := lm.DB()
ie := execCtx.ExtendedEvalContext().InternalExecutor.(sqlutil.InternalExecutor)
sc, err := scplan.MakePlan(makeTargetStates(ctx, settings, n.targets, states), scplan.Params{
ExecutionPhase: scplan.PostCommitPhase,
})
if err != nil {
return err
}
for _, s := range sc.Stages {
var descriptorsWithUpdatedVersions []lease.IDVersion
if err := descs.Txn(ctx, settings, lm, ie, db, func(ctx context.Context, txn *kv.Txn, descriptors *descs.Collection) error {
jt := badJobTracker{
txn: txn,
descriptors: descriptors,
codec: execCtx.ExecCfg().Codec,
}
if err := scexec.NewExecutor(txn, descriptors, execCtx.ExecCfg().Codec, execCtx.ExecCfg().IndexBackfiller, jt).ExecuteOps(ctx, s.Ops); err != nil {
return err
}
descriptorsWithUpdatedVersions = descriptors.GetDescriptorsWithNewVersion()
return n.job.Update(ctx, txn, func(txn *kv.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error {
pg := md.Progress.GetNewSchemaChange()
pg.States = makeStates(s.After)
ju.UpdateProgress(md.Progress)
return nil
})
}); err != nil {
return err
}
// Wait for new versions.
if err := sql.WaitToUpdateLeasesMultiple(
ctx,
lm,
descriptorsWithUpdatedVersions,
); err != nil {
return err
}
}
return nil
}
func makeStates(next []*scpb.Node) []scpb.State {
states := make([]scpb.State, len(next))
for i := range next {
states[i] = next[i].State
}
return states
}
func makeTargetStates(
ctx context.Context, sv *cluster.Settings, protos []*scpb.Target, states []scpb.State,
) []*scpb.Node {
if len(protos) != len(states) {
logcrash.ReportOrPanic(ctx, &sv.SV, "unexpected slice size mismatch %d and %d",
len(protos), len(states))
}
ts := make([]*scpb.Node, len(protos))
for i := range protos {
ts[i] = &scpb.Node{
Target: protos[i],
State: states[i],
}
}
return ts
}
func (n *newSchemaChangeResumer) OnFailOrCancel(ctx context.Context, execCtx interface{}) error {
panic("unimplemented")
}
|
// Copyright 2019 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package gofer provides a filesystem implementation that is backed by a 9p
// server, interchangably referred to as "gofers" throughout this package.
//
// Lock order:
//
// regularFileFD/directoryFD.mu
// filesystem.renameMu
// dentry.cachingMu
// dentryCache.mu
// dentry.opMu
// dentry.childrenMu
// filesystem.syncMu
// dentry.metadataMu
// *** "memmap.Mappable locks" below this point
// dentry.mapsMu
// *** "memmap.Mappable locks taken by Translate" below this point
// dentry.handleMu
// dentry.dataMu
// filesystem.inoMu
// specialFileFD.mu
// specialFileFD.bufMu
//
// Locking dentry.opMu and dentry.metadataMu in multiple dentries requires that
// either ancestor dentries are locked before descendant dentries, or that
// filesystem.renameMu is locked for writing.
package gofer
import (
"fmt"
"path"
"strconv"
"strings"
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/atomicbitops"
"gvisor.dev/gvisor/pkg/cleanup"
"gvisor.dev/gvisor/pkg/context"
"gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/lisafs"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/refs"
fslock "gvisor.dev/gvisor/pkg/sentry/fsimpl/lock"
"gvisor.dev/gvisor/pkg/sentry/fsutil"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/kernel/pipe"
ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sentry/pgalloc"
"gvisor.dev/gvisor/pkg/sentry/socket/unix/transport"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/sync"
"gvisor.dev/gvisor/pkg/unet"
)
// Name is the default filesystem name.
const Name = "9p"
// Mount option names for goferfs.
const (
moptTransport = "trans"
moptReadFD = "rfdno"
moptWriteFD = "wfdno"
moptAname = "aname"
moptDfltUID = "dfltuid"
moptDfltGID = "dfltgid"
moptCache = "cache"
moptForcePageCache = "force_page_cache"
moptLimitHostFDTranslation = "limit_host_fd_translation"
moptOverlayfsStaleRead = "overlayfs_stale_read"
moptDisableFileHandleSharing = "disable_file_handle_sharing"
moptDisableFifoOpen = "disable_fifo_open"
// Directfs options.
moptDirectfs = "directfs"
)
// Valid values for the "cache" mount option.
const (
cacheFSCache = "fscache"
cacheFSCacheWritethrough = "fscache_writethrough"
cacheRemoteRevalidating = "remote_revalidating"
)
const (
defaultMaxCachedDentries = 1000
maxCachedNegativeChildren = 1000
)
// stringFixedCache is a fixed sized cache, once initialized,
// its size never changes.
//
// +stateify savable
type stringFixedCache struct {
// namesList stores negative names with fifo list.
// name stored in namesList only means it used to be negative
// at the moment you pushed it to the list.
namesList stringList
size uint64
}
func (cache *stringFixedCache) isInited() bool {
return cache.size != 0
}
func (cache *stringFixedCache) init(size uint64) {
elements := make([]stringListElem, size)
for i := uint64(0); i < size; i++ {
cache.namesList.PushFront(&elements[i])
}
cache.size = size
}
// Update will push name to the front of the list,
// and pop the tail value.
func (cache *stringFixedCache) add(name string) string {
tail := cache.namesList.Back()
victimName := tail.str
tail.str = name
cache.namesList.Remove(tail)
cache.namesList.PushFront(tail)
return victimName
}
// +stateify savable
type dentryCache struct {
// mu protects the below fields.
mu sync.Mutex `state:"nosave"`
// dentries contains all dentries with 0 references. Due to race conditions,
// it may also contain dentries with non-zero references.
dentries dentryList
// dentriesLen is the number of dentries in dentries.
dentriesLen uint64
// maxCachedDentries is the maximum number of cachable dentries.
maxCachedDentries uint64
}
// SetDentryCacheSize sets the size of the global gofer dentry cache.
func SetDentryCacheSize(size int) {
if size < 0 {
return
}
if globalDentryCache != nil {
log.Warningf("Global dentry cache has already been initialized. Ignoring subsequent attempt.")
return
}
globalDentryCache = &dentryCache{maxCachedDentries: uint64(size)}
}
// globalDentryCache is a global cache of dentries across all gofers.
var globalDentryCache *dentryCache
// Valid values for "trans" mount option.
const transportModeFD = "fd"
// FilesystemType implements vfs.FilesystemType.
//
// +stateify savable
type FilesystemType struct{}
// filesystem implements vfs.FilesystemImpl.
//
// +stateify savable
type filesystem struct {
vfsfs vfs.Filesystem
// mfp is used to allocate memory that caches regular file contents. mfp is
// immutable.
mfp pgalloc.MemoryFileProvider
// Immutable options.
opts filesystemOptions
iopts InternalFilesystemOptions
// client is the LISAFS client used for communicating with the server. client
// is immutable.
client *lisafs.Client `state:"nosave"`
// clock is a realtime clock used to set timestamps in file operations.
clock ktime.Clock
// devMinor is the filesystem's minor device number. devMinor is immutable.
devMinor uint32
// root is the root dentry. root is immutable.
root *dentry
// renameMu serves two purposes:
//
// - It synchronizes path resolution with renaming initiated by this
// client.
//
// - It is held by path resolution to ensure that reachable dentries remain
// valid. A dentry is reachable by path resolution if it has a non-zero
// reference count (such that it is usable as vfs.ResolvingPath.Start() or
// is reachable from its children), or if it is a child dentry (such that
// it is reachable from its parent).
renameMu sync.RWMutex `state:"nosave"`
dentryCache *dentryCache
// syncableDentries contains all non-synthetic dentries. specialFileFDs
// contains all open specialFileFDs. These fields are protected by syncMu.
syncMu sync.Mutex `state:"nosave"`
syncableDentries dentryList
specialFileFDs specialFDList
// inoByKey maps previously-observed device ID and host inode numbers to
// internal inode numbers assigned to those files. inoByKey is not preserved
// across checkpoint/restore because inode numbers may be reused between
// different gofer processes, so inode numbers may be repeated for different
// files across checkpoint/restore. inoByKey is protected by inoMu.
inoMu sync.Mutex `state:"nosave"`
inoByKey map[inoKey]uint64 `state:"nosave"`
// lastIno is the last inode number assigned to a file. lastIno is accessed
// using atomic memory operations.
lastIno atomicbitops.Uint64
// savedDentryRW records open read/write handles during save/restore.
savedDentryRW map[*dentry]savedDentryRW
// released is nonzero once filesystem.Release has been called.
released atomicbitops.Int32
}
// +stateify savable
type filesystemOptions struct {
fd int
aname string
interop InteropMode // derived from the "cache" mount option
dfltuid auth.KUID
dfltgid auth.KGID
// If forcePageCache is true, host FDs may not be used for application
// memory mappings even if available; instead, the client must perform its
// own caching of regular file pages. This is primarily useful for testing.
forcePageCache bool
// If limitHostFDTranslation is true, apply maxFillRange() constraints to
// host FD mappings returned by dentry.(memmap.Mappable).Translate(). This
// makes memory accounting behavior more consistent between cases where
// host FDs are / are not available, but may increase the frequency of
// sentry-handled page faults on files for which a host FD is available.
limitHostFDTranslation bool
// If overlayfsStaleRead is true, O_RDONLY host FDs provided by the remote
// filesystem may not be coherent with writable host FDs opened later, so
// all uses of the former must be replaced by uses of the latter. This is
// usually only the case when the remote filesystem is a Linux overlayfs
// mount. (Prior to Linux 4.18, patch series centered on commit
// d1d04ef8572b "ovl: stack file ops", both I/O and memory mappings were
// incoherent between pre-copy-up and post-copy-up FDs; after that patch
// series, only memory mappings are incoherent.)
overlayfsStaleRead bool
// If regularFilesUseSpecialFileFD is true, application FDs representing
// regular files will use distinct file handles for each FD, in the same
// way that application FDs representing "special files" such as sockets
// do. Note that this disables client caching for regular files. This option
// may regress performance due to excessive Open RPCs. This option is not
// supported with overlayfsStaleRead for now.
regularFilesUseSpecialFileFD bool
// If disableFifoOpen is true, application attempts to open(2) a host FIFO
// are disallowed.
disableFifoOpen bool
// directfs holds options for directfs mode.
directfs directfsOpts
}
// +stateify savable
type directfsOpts struct {
// If directfs is enabled, the gofer client does not make RPCs to the gofer
// process. Instead, it makes host syscalls to perform file operations.
enabled bool
}
// InteropMode controls the client's interaction with other remote filesystem
// users.
//
// +stateify savable
type InteropMode uint32
const (
// InteropModeExclusive is appropriate when the filesystem client is the
// only user of the remote filesystem.
//
// - The client may cache arbitrary filesystem state (file data, metadata,
// filesystem structure, etc.).
//
// - Client changes to filesystem state may be sent to the remote
// filesystem asynchronously, except when server permission checks are
// necessary.
//
// - File timestamps are based on client clocks. This ensures that users of
// the client observe timestamps that are coherent with their own clocks
// and consistent with Linux's semantics (in particular, it is not always
// possible for clients to set arbitrary atimes and mtimes depending on the
// remote filesystem implementation, and never possible for clients to set
// arbitrary ctimes.)
InteropModeExclusive InteropMode = iota
// InteropModeWritethrough is appropriate when there are read-only users of
// the remote filesystem that expect to observe changes made by the
// filesystem client.
//
// - The client may cache arbitrary filesystem state.
//
// - Client changes to filesystem state must be sent to the remote
// filesystem synchronously.
//
// - File timestamps are based on client clocks. As a corollary, access
// timestamp changes from other remote filesystem users will not be visible
// to the client.
InteropModeWritethrough
// InteropModeShared is appropriate when there are users of the remote
// filesystem that may mutate its state other than the client.
//
// - The client must verify ("revalidate") cached filesystem state before
// using it.
//
// - Client changes to filesystem state must be sent to the remote
// filesystem synchronously.
//
// - File timestamps are based on server clocks. This is necessary to
// ensure that timestamp changes are synchronized between remote filesystem
// users.
//
// Note that the correctness of InteropModeShared depends on the server
// correctly implementing 9P fids (i.e. each fid immutably represents a
// single filesystem object), even in the presence of remote filesystem
// mutations from other users. If this is violated, the behavior of the
// client is undefined.
InteropModeShared
)
// InternalFilesystemOptions may be passed as
// vfs.GetFilesystemOptions.InternalData to FilesystemType.GetFilesystem.
//
// +stateify savable
type InternalFilesystemOptions struct {
// If UniqueID is non-empty, it is an opaque string used to reassociate the
// filesystem with a new server FD during restoration from checkpoint.
UniqueID string
// If LeakConnection is true, do not close the connection to the server
// when the Filesystem is released. This is necessary for deployments in
// which servers can handle only a single client and report failure if that
// client disconnects.
LeakConnection bool
// If OpenSocketsByConnecting is true, silently translate attempts to open
// files identifying as sockets to connect RPCs.
OpenSocketsByConnecting bool
}
// _V9FS_DEFUID and _V9FS_DEFGID (from Linux's fs/9p/v9fs.h) are the default
// UIDs and GIDs used for files that do not provide a specific owner or group
// respectively.
const (
// uint32(-2) doesn't work in Go.
_V9FS_DEFUID = auth.KUID(4294967294)
_V9FS_DEFGID = auth.KGID(4294967294)
)
// Name implements vfs.FilesystemType.Name.
func (FilesystemType) Name() string {
return Name
}
// Release implements vfs.FilesystemType.Release.
func (FilesystemType) Release(ctx context.Context) {}
// GetFilesystem implements vfs.FilesystemType.GetFilesystem.
func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.VirtualFilesystem, creds *auth.Credentials, source string, opts vfs.GetFilesystemOptions) (*vfs.Filesystem, *vfs.Dentry, error) {
mfp := pgalloc.MemoryFileProviderFromContext(ctx)
if mfp == nil {
ctx.Warningf("gofer.FilesystemType.GetFilesystem: context does not provide a pgalloc.MemoryFileProvider")
return nil, nil, linuxerr.EINVAL
}
mopts := vfs.GenericParseMountOptions(opts.Data)
var fsopts filesystemOptions
fd, err := getFDFromMountOptionsMap(ctx, mopts)
if err != nil {
return nil, nil, err
}
fsopts.fd = fd
// Get the attach name.
fsopts.aname = "/"
if aname, ok := mopts[moptAname]; ok {
delete(mopts, moptAname)
if !path.IsAbs(aname) {
ctx.Warningf("gofer.FilesystemType.GetFilesystem: aname is not absolute: %s=%s", moptAname, aname)
return nil, nil, linuxerr.EINVAL
}
fsopts.aname = path.Clean(aname)
}
// Parse the cache policy. For historical reasons, this defaults to the
// least generally-applicable option, InteropModeExclusive.
fsopts.interop = InteropModeExclusive
if cache, ok := mopts[moptCache]; ok {
delete(mopts, moptCache)
switch cache {
case cacheFSCache:
fsopts.interop = InteropModeExclusive
case cacheFSCacheWritethrough:
fsopts.interop = InteropModeWritethrough
case cacheRemoteRevalidating:
fsopts.interop = InteropModeShared
default:
ctx.Warningf("gofer.FilesystemType.GetFilesystem: invalid cache policy: %s=%s", moptCache, cache)
return nil, nil, linuxerr.EINVAL
}
}
// Parse the default UID and GID.
fsopts.dfltuid = _V9FS_DEFUID
if dfltuidstr, ok := mopts[moptDfltUID]; ok {
delete(mopts, moptDfltUID)
dfltuid, err := strconv.ParseUint(dfltuidstr, 10, 32)
if err != nil {
ctx.Warningf("gofer.FilesystemType.GetFilesystem: invalid default UID: %s=%s", moptDfltUID, dfltuidstr)
return nil, nil, linuxerr.EINVAL
}
// In Linux, dfltuid is interpreted as a UID and is converted to a KUID
// in the caller's user namespace, but goferfs isn't
// application-mountable.
fsopts.dfltuid = auth.KUID(dfltuid)
}
fsopts.dfltgid = _V9FS_DEFGID
if dfltgidstr, ok := mopts[moptDfltGID]; ok {
delete(mopts, moptDfltGID)
dfltgid, err := strconv.ParseUint(dfltgidstr, 10, 32)
if err != nil {
ctx.Warningf("gofer.FilesystemType.GetFilesystem: invalid default UID: %s=%s", moptDfltGID, dfltgidstr)
return nil, nil, linuxerr.EINVAL
}
fsopts.dfltgid = auth.KGID(dfltgid)
}
// Handle simple flags.
if _, ok := mopts[moptDisableFileHandleSharing]; ok {
delete(mopts, moptDisableFileHandleSharing)
fsopts.regularFilesUseSpecialFileFD = true
}
if _, ok := mopts[moptDisableFifoOpen]; ok {
delete(mopts, moptDisableFifoOpen)
fsopts.disableFifoOpen = true
}
if _, ok := mopts[moptForcePageCache]; ok {
delete(mopts, moptForcePageCache)
fsopts.forcePageCache = true
}
if _, ok := mopts[moptLimitHostFDTranslation]; ok {
delete(mopts, moptLimitHostFDTranslation)
fsopts.limitHostFDTranslation = true
}
if _, ok := mopts[moptOverlayfsStaleRead]; ok {
delete(mopts, moptOverlayfsStaleRead)
fsopts.overlayfsStaleRead = true
}
if _, ok := mopts[moptDirectfs]; ok {
delete(mopts, moptDirectfs)
fsopts.directfs.enabled = true
}
// fsopts.regularFilesUseSpecialFileFD can only be enabled by specifying
// "cache=none".
// Check for unparsed options.
if len(mopts) != 0 {
ctx.Warningf("gofer.FilesystemType.GetFilesystem: unknown options: %v", mopts)
return nil, nil, linuxerr.EINVAL
}
// Validation.
if fsopts.regularFilesUseSpecialFileFD && fsopts.overlayfsStaleRead {
// These options are not supported together. To support this, when a dentry
// is opened writably for the first time, we need to iterate over all the
// specialFileFDs of that dentry that represent a regular file and call
// fd.hostFileMapper.RegenerateMappings(writable_fd).
ctx.Warningf("gofer.FilesystemType.GetFilesystem: regularFilesUseSpecialFileFD and overlayfsStaleRead options are not supported together.")
return nil, nil, linuxerr.EINVAL
}
// Handle internal options.
iopts, ok := opts.InternalData.(InternalFilesystemOptions)
if opts.InternalData != nil && !ok {
ctx.Warningf("gofer.FilesystemType.GetFilesystem: GetFilesystemOptions.InternalData has type %T, wanted gofer.InternalFilesystemOptions", opts.InternalData)
return nil, nil, linuxerr.EINVAL
}
// If !ok, iopts being the zero value is correct.
// Construct the filesystem object.
devMinor, err := vfsObj.GetAnonBlockDevMinor()
if err != nil {
return nil, nil, err
}
fs := &filesystem{
mfp: mfp,
opts: fsopts,
iopts: iopts,
clock: ktime.RealtimeClockFromContext(ctx),
devMinor: devMinor,
inoByKey: make(map[inoKey]uint64),
}
// Did the user configure a global dentry cache?
if globalDentryCache != nil {
fs.dentryCache = globalDentryCache
} else {
fs.dentryCache = &dentryCache{maxCachedDentries: defaultMaxCachedDentries}
}
fs.vfsfs.Init(vfsObj, &fstype, fs)
rootInode, rootHostFD, err := fs.initClientAndGetRoot(ctx)
if err != nil {
fs.vfsfs.DecRef(ctx)
return nil, nil, err
}
if fs.opts.directfs.enabled {
fs.root, err = fs.getDirectfsRootDentry(ctx, rootHostFD, fs.client.NewFD(rootInode.ControlFD))
} else {
fs.root, err = fs.newLisafsDentry(ctx, &rootInode)
}
if err != nil {
fs.vfsfs.DecRef(ctx)
return nil, nil, err
}
// Set the root's reference count to 2. One reference is returned to the
// caller, and the other is held by fs to prevent the root from being "cached"
// and subsequently evicted.
fs.root.refs = atomicbitops.FromInt64(2)
return &fs.vfsfs, &fs.root.vfsd, nil
}
// initClientAndGetRoot initializes fs.client and returns the root inode for
// this mount point. It handles the attach point (fs.opts.aname) resolution.
func (fs *filesystem) initClientAndGetRoot(ctx context.Context) (lisafs.Inode, int, error) {
sock, err := unet.NewSocket(fs.opts.fd)
if err != nil {
return lisafs.Inode{}, -1, err
}
ctx.UninterruptibleSleepStart(false)
defer ctx.UninterruptibleSleepFinish(false)
var (
rootInode lisafs.Inode
rootHostFD int
)
fs.client, rootInode, rootHostFD, err = lisafs.NewClient(sock)
if err != nil {
return lisafs.Inode{}, -1, err
}
cu := cleanup.Make(func() {
if rootHostFD >= 0 {
_ = unix.Close(rootHostFD)
}
rootControlFD := fs.client.NewFD(rootInode.ControlFD)
rootControlFD.Close(ctx, false /* flush */)
})
defer cu.Clean()
if fs.opts.directfs.enabled {
if fs.opts.aname != "/" {
log.Warningf("directfs does not support aname filesystem option: aname=%q", fs.opts.aname)
return lisafs.Inode{}, -1, unix.EINVAL
}
if rootHostFD < 0 {
log.Warningf("Mount RPC did not return host FD to mount point with directfs enabled")
return lisafs.Inode{}, -1, unix.EINVAL
}
} else {
if rootHostFD >= 0 {
log.Warningf("Mount RPC returned a host FD to mount point without directfs, we didn't ask for it")
_ = unix.Close(rootHostFD)
rootHostFD = -1
}
// Use flipcall channels with lisafs because it makes a lot of RPCs.
if err := fs.client.StartChannels(); err != nil {
return lisafs.Inode{}, -1, err
}
rootInode, err = fs.handleAnameLisafs(ctx, rootInode)
if err != nil {
return lisafs.Inode{}, -1, err
}
}
cu.Release()
return rootInode, rootHostFD, nil
}
func getFDFromMountOptionsMap(ctx context.Context, mopts map[string]string) (int, error) {
// Check that the transport is "fd".
trans, ok := mopts[moptTransport]
if !ok || trans != transportModeFD {
ctx.Warningf("gofer.getFDFromMountOptionsMap: transport must be specified as '%s=%s'", moptTransport, transportModeFD)
return -1, linuxerr.EINVAL
}
delete(mopts, moptTransport)
// Check that read and write FDs are provided and identical.
rfdstr, ok := mopts[moptReadFD]
if !ok {
ctx.Warningf("gofer.getFDFromMountOptionsMap: read FD must be specified as '%s=<file descriptor>'", moptReadFD)
return -1, linuxerr.EINVAL
}
delete(mopts, moptReadFD)
rfd, err := strconv.Atoi(rfdstr)
if err != nil {
ctx.Warningf("gofer.getFDFromMountOptionsMap: invalid read FD: %s=%s", moptReadFD, rfdstr)
return -1, linuxerr.EINVAL
}
wfdstr, ok := mopts[moptWriteFD]
if !ok {
ctx.Warningf("gofer.getFDFromMountOptionsMap: write FD must be specified as '%s=<file descriptor>'", moptWriteFD)
return -1, linuxerr.EINVAL
}
delete(mopts, moptWriteFD)
wfd, err := strconv.Atoi(wfdstr)
if err != nil {
ctx.Warningf("gofer.getFDFromMountOptionsMap: invalid write FD: %s=%s", moptWriteFD, wfdstr)
return -1, linuxerr.EINVAL
}
if rfd != wfd {
ctx.Warningf("gofer.getFDFromMountOptionsMap: read FD (%d) and write FD (%d) must be equal", rfd, wfd)
return -1, linuxerr.EINVAL
}
return rfd, nil
}
// Release implements vfs.FilesystemImpl.Release.
func (fs *filesystem) Release(ctx context.Context) {
fs.released.Store(1)
mf := fs.mfp.MemoryFile()
fs.syncMu.Lock()
for elem := fs.syncableDentries.Front(); elem != nil; elem = elem.Next() {
d := elem.d
d.handleMu.Lock()
d.dataMu.Lock()
if d.isWriteHandleOk() {
// Write dirty cached data to the remote file.
h := d.writeHandle()
if err := fsutil.SyncDirtyAll(ctx, &d.cache, &d.dirty, d.size.Load(), mf, h.writeFromBlocksAt); err != nil {
log.Warningf("gofer.filesystem.Release: failed to flush dentry: %v", err)
}
// TODO(jamieliu): Do we need to flushf/fsync d?
}
// Discard cached pages.
d.cache.DropAll(mf)
d.dirty.RemoveAll()
d.dataMu.Unlock()
// Close host FDs if they exist. We can use RacyLoad() because d.handleMu
// is locked.
if d.readFD.RacyLoad() >= 0 {
_ = unix.Close(int(d.readFD.RacyLoad()))
}
if d.writeFD.RacyLoad() >= 0 && d.readFD.RacyLoad() != d.writeFD.RacyLoad() {
_ = unix.Close(int(d.writeFD.RacyLoad()))
}
d.readFD = atomicbitops.FromInt32(-1)
d.writeFD = atomicbitops.FromInt32(-1)
d.mmapFD = atomicbitops.FromInt32(-1)
d.handleMu.Unlock()
}
// There can't be any specialFileFDs still using fs, since each such
// FileDescription would hold a reference on a Mount holding a reference on
// fs.
fs.syncMu.Unlock()
// If leak checking is enabled, release all outstanding references in the
// filesystem. We deliberately avoid doing this outside of leak checking; we
// have released all external resources above rather than relying on dentry
// destructors. fs.root may be nil if creating the client or initializing the
// root dentry failed in GetFilesystem.
if refs.GetLeakMode() != refs.NoLeakChecking && fs.root != nil {
fs.renameMu.Lock()
fs.root.releaseSyntheticRecursiveLocked(ctx)
fs.evictAllCachedDentriesLocked(ctx)
fs.renameMu.Unlock()
// An extra reference was held by the filesystem on the root to prevent it from
// being cached/evicted.
fs.root.DecRef(ctx)
}
if !fs.iopts.LeakConnection {
// Close the connection to the server. This implicitly closes all FDs.
if fs.client != nil {
fs.client.Close()
}
}
fs.vfsfs.VirtualFilesystem().PutAnonBlockDevMinor(fs.devMinor)
}
// releaseSyntheticRecursiveLocked traverses the tree with root d and decrements
// the reference count on every synthetic dentry. Synthetic dentries have one
// reference for existence that should be dropped during filesystem.Release.
//
// Precondition: d.fs.renameMu is locked for writing.
func (d *dentry) releaseSyntheticRecursiveLocked(ctx context.Context) {
if d.isSynthetic() {
d.decRefNoCaching()
d.checkCachingLocked(ctx, true /* renameMuWriteLocked */)
}
if d.isDir() {
var children []*dentry
d.childrenMu.Lock()
for _, child := range d.children {
children = append(children, child)
}
d.childrenMu.Unlock()
for _, child := range children {
if child != nil {
child.releaseSyntheticRecursiveLocked(ctx)
}
}
}
}
// inoKey is the key used to identify the inode backed by this dentry.
//
// +stateify savable
type inoKey struct {
ino uint64
devMinor uint32
devMajor uint32
}
func inoKeyFromStatx(stat *linux.Statx) inoKey {
return inoKey{
ino: stat.Ino,
devMinor: stat.DevMinor,
devMajor: stat.DevMajor,
}
}
func inoKeyFromStat(stat *unix.Stat_t) inoKey {
return inoKey{
ino: stat.Ino,
devMinor: unix.Minor(stat.Dev),
devMajor: unix.Major(stat.Dev),
}
}
// dentry implements vfs.DentryImpl.
//
// +stateify savable
type dentry struct {
vfsd vfs.Dentry
// refs is the reference count. Each dentry holds a reference on its
// parent, even if disowned. An additional reference is held on all
// synthetic dentries until they are unlinked or invalidated. When refs
// reaches 0, the dentry may be added to the cache or destroyed. If refs ==
// -1, the dentry has already been destroyed. refs is accessed using atomic
// memory operations.
refs atomicbitops.Int64
// fs is the owning filesystem. fs is immutable.
fs *filesystem
// parent is this dentry's parent directory. Each dentry holds a reference
// on its parent. If this dentry is a filesystem root, parent is nil.
// parent is protected by filesystem.renameMu.
parent *dentry
// name is the name of this dentry in its parent. If this dentry is a
// filesystem root, name is the empty string. name is protected by
// filesystem.renameMu.
name string
// inoKey is used to identify this dentry's inode.
inoKey inoKey
// If deleted is non-zero, the file represented by this dentry has been
// deleted is accessed using atomic memory operations.
deleted atomicbitops.Uint32
// cachingMu is used to synchronize concurrent dentry caching attempts on
// this dentry.
cachingMu sync.Mutex `state:"nosave"`
// If cached is true, this dentry is part of filesystem.dentryCache. cached
// is protected by cachingMu.
cached bool
// cacheEntry links dentry into filesystem.dentryCache.dentries. It is
// protected by filesystem.dentryCache.mu.
cacheEntry dentryListElem
// syncableListEntry links dentry into filesystem.syncableDentries. It is
// protected by filesystem.syncMu.
syncableListEntry dentryListElem
// opMu synchronizes operations on this dentry. Operations that mutate
// the dentry tree must hold this lock for writing. Operations that
// only read the tree must hold for reading.
opMu sync.RWMutex `state:"nosave"`
// childrenMu protects the cached children data for this dentry.
childrenMu sync.Mutex `state:"nosave"`
// If this dentry represents a directory, children contains:
//
// - Mappings of child filenames to dentries representing those children.
//
// - Mappings of child filenames that are known not to exist to nil
// dentries (only if InteropModeShared is not in effect and the directory
// is not synthetic).
//
// +checklocks:childrenMu
children map[string]*dentry
// If this dentry represents a directory, negativeChildrenCache cache
// names of negative children.
//
// +checklocks:childrenMu
negativeChildrenCache stringFixedCache
// If this dentry represents a directory, negativeChildren is the number
// of negative children cached in dentry.children
//
// +checklocks:childrenMu
negativeChildren int
// If this dentry represents a directory, syntheticChildren is the number
// of child dentries for which dentry.isSynthetic() == true.
//
// +checklocks:childrenMu
syntheticChildren int
// If this dentry represents a directory,
// dentry.cachedMetadataAuthoritative() == true, and dirents is not
// nil, then dirents is a cache of all entries in the directory, in the
// order they were returned by the server. childrenSet just stores the
// `Name` field of all dirents in a set for fast query. dirents and
// childrenSet share the same lifecycle.
//
// +checklocks:childrenMu
dirents []vfs.Dirent
// +checklocks:childrenMu
childrenSet map[string]struct{}
// Cached metadata; protected by metadataMu.
// To access:
// - In situations where consistency is not required (like stat), these
// can be accessed using atomic operations only (without locking).
// - Lock metadataMu and can access without atomic operations.
// To mutate:
// - Lock metadataMu and use atomic operations to update because we might
// have atomic readers that don't hold the lock.
metadataMu sync.Mutex `state:"nosave"`
ino uint64 // immutable
mode atomicbitops.Uint32 // type is immutable, perms are mutable
uid atomicbitops.Uint32 // auth.KUID, but stored as raw uint32 for sync/atomic
gid atomicbitops.Uint32 // auth.KGID, but ...
blockSize atomicbitops.Uint32 // 0 if unknown
// Timestamps, all nsecs from the Unix epoch.
atime atomicbitops.Int64
mtime atomicbitops.Int64
ctime atomicbitops.Int64
btime atomicbitops.Int64
// File size, which differs from other metadata in two ways:
//
// - We make a best-effort attempt to keep it up to date even if
// !dentry.cachedMetadataAuthoritative() for the sake of O_APPEND writes.
//
// - size is protected by both metadataMu and dataMu (i.e. both must be
// locked to mutate it; locking either is sufficient to access it).
size atomicbitops.Uint64
// If this dentry does not represent a synthetic file, deleted is 0, and
// atimeDirty/mtimeDirty are non-zero, atime/mtime may have diverged from the
// remote file's timestamps, which should be updated when this dentry is
// evicted.
atimeDirty atomicbitops.Uint32
mtimeDirty atomicbitops.Uint32
// nlink counts the number of hard links to this dentry. It's updated and
// accessed using atomic operations. It's not protected by metadataMu like the
// other metadata fields.
nlink atomicbitops.Uint32
mapsMu sync.Mutex `state:"nosave"`
// If this dentry represents a regular file, mappings tracks mappings of
// the file into memmap.MappingSpaces. mappings is protected by mapsMu.
mappings memmap.MappingSet
// - If this dentry represents a regular file or directory, readFD (if not
// -1) is a host FD used for reads by all regularFileFDs/directoryFDs
// representing this dentry.
//
// - If this dentry represents a regular file, writeFD (if not -1) is a host
// FD used for writes by all regularFileFDs representing this dentry.
//
// - If this dentry represents a regular file, mmapFD is the host FD used
// for memory mappings. If mmapFD is -1, no such FD is available, and the
// internal page cache implementation is used for memory mappings instead.
//
// These fields are protected by handleMu. readFD, writeFD, and mmapFD are
// additionally written using atomic memory operations, allowing them to be
// read (albeit racily) with atomic.LoadInt32() without locking handleMu.
//
// readFD and writeFD may or may not be the same file descriptor. Once either
// transitions from closed (-1) to open, it may be mutated with handleMu
// locked, but cannot be closed until the dentry is destroyed.
//
// readFD and writeFD may or may not be the same file descriptor. mmapFD is
// always either -1 or equal to readFD; if the file has been opened for
// writing, it is additionally either -1 or equal to writeFD.
handleMu sync.RWMutex `state:"nosave"`
readFD atomicbitops.Int32 `state:"nosave"`
writeFD atomicbitops.Int32 `state:"nosave"`
mmapFD atomicbitops.Int32 `state:"nosave"`
dataMu sync.RWMutex `state:"nosave"`
// If this dentry represents a regular file that is client-cached, cache
// maps offsets into the cached file to offsets into
// filesystem.mfp.MemoryFile() that store the file's data. cache is
// protected by dataMu.
cache fsutil.FileRangeSet
// If this dentry represents a regular file that is client-cached, dirty
// tracks dirty segments in cache. dirty is protected by dataMu.
dirty fsutil.DirtySet
// pf implements platform.File for mappings of hostFD.
pf dentryPlatformFile
// If this dentry represents a symbolic link, InteropModeShared is not in
// effect, and haveTarget is true, target is the symlink target. haveTarget
// and target are protected by dataMu.
haveTarget bool
target string
// If this dentry represents a synthetic socket file, endpoint is the
// transport endpoint bound to this file.
endpoint transport.BoundEndpoint
// If this dentry represents a synthetic named pipe, pipe is the pipe
// endpoint bound to this file.
pipe *pipe.VFSPipe
locks vfs.FileLocks
// Inotify watches for this dentry.
//
// Note that inotify may behave unexpectedly in the presence of hard links,
// because dentries corresponding to the same file have separate inotify
// watches when they should share the same set. This is the case because it is
// impossible for us to know for sure whether two dentries correspond to the
// same underlying file (see the gofer filesystem section fo vfs/inotify.md for
// a more in-depth discussion on this matter).
watches vfs.Watches
// impl is the specific dentry implementation for non-synthetic dentries.
// impl is immutable.
//
// If impl is nil, this dentry represents a synthetic file, i.e. a
// file that does not exist on the host filesystem. As of this writing, the
// only files that can be synthetic are sockets, pipes, and directories.
impl any
}
// +stateify savable
type stringListElem struct {
// str is the string that this elem represents.
str string
stringEntry
}
// +stateify savable
type dentryListElem struct {
// d is the dentry that this elem represents.
d *dentry
dentryEntry
}
func (fs *filesystem) inoFromKey(key inoKey) uint64 {
fs.inoMu.Lock()
defer fs.inoMu.Unlock()
if ino, ok := fs.inoByKey[key]; ok {
return ino
}
ino := fs.nextIno()
fs.inoByKey[key] = ino
return ino
}
func (fs *filesystem) nextIno() uint64 {
return fs.lastIno.Add(1)
}
// init must be called before first use of d.
func (d *dentry) init(impl any) {
d.pf.dentry = d
d.cacheEntry.d = d
d.syncableListEntry.d = d
// Nested impl-inheritance pattern. In memory it looks like:
// [[[ vfs.Dentry ] dentry ] dentryImpl ]
// All 3 abstractions are allocated in one allocation. We achieve this by
// making each outer dentry implementation hold the inner dentry by value.
// Then the outer most dentry is allocated and we initialize fields inward.
// Each inner dentry has a pointer to the next level of implementation.
d.impl = impl
d.vfsd.Init(d)
refs.Register(d)
}
func (d *dentry) isSynthetic() bool {
return d.impl == nil
}
func (d *dentry) cachedMetadataAuthoritative() bool {
return d.fs.opts.interop != InteropModeShared || d.isSynthetic()
}
// updateMetadataFromStatxLocked is called to update d's metadata after an update
// from the remote filesystem.
// Precondition: d.metadataMu must be locked.
// +checklocks:d.metadataMu
func (d *lisafsDentry) updateMetadataFromStatxLocked(stat *linux.Statx) {
if stat.Mask&linux.STATX_TYPE != 0 {
if got, want := stat.Mode&linux.FileTypeMask, d.fileType(); uint32(got) != want {
panic(fmt.Sprintf("gofer.dentry file type changed from %#o to %#o", want, got))
}
}
if stat.Mask&linux.STATX_MODE != 0 {
d.mode.Store(uint32(stat.Mode))
}
if stat.Mask&linux.STATX_UID != 0 {
d.uid.Store(dentryUID(lisafs.UID(stat.UID)))
}
if stat.Mask&linux.STATX_GID != 0 {
d.gid.Store(dentryGID(lisafs.GID(stat.GID)))
}
if stat.Blksize != 0 {
d.blockSize.Store(stat.Blksize)
}
// Don't override newer client-defined timestamps with old server-defined
// ones.
if stat.Mask&linux.STATX_ATIME != 0 && d.atimeDirty.Load() == 0 {
d.atime.Store(dentryTimestamp(stat.Atime))
}
if stat.Mask&linux.STATX_MTIME != 0 && d.mtimeDirty.Load() == 0 {
d.mtime.Store(dentryTimestamp(stat.Mtime))
}
if stat.Mask&linux.STATX_CTIME != 0 {
d.ctime.Store(dentryTimestamp(stat.Ctime))
}
if stat.Mask&linux.STATX_BTIME != 0 {
d.btime.Store(dentryTimestamp(stat.Btime))
}
if stat.Mask&linux.STATX_NLINK != 0 {
d.nlink.Store(stat.Nlink)
}
if stat.Mask&linux.STATX_SIZE != 0 {
d.updateSizeLocked(stat.Size)
}
}
// updateMetadataFromStatLocked is similar to updateMetadataFromStatxLocked,
// except that it takes a unix.Stat_t argument.
// Precondition: d.metadataMu must be locked.
// +checklocks:d.metadataMu
func (d *directfsDentry) updateMetadataFromStatLocked(stat *unix.Stat_t) error {
if got, want := stat.Mode&unix.S_IFMT, d.fileType(); got != want {
panic(fmt.Sprintf("direct.dentry file type changed from %#o to %#o", want, got))
}
d.mode.Store(stat.Mode)
d.uid.Store(stat.Uid)
d.gid.Store(stat.Gid)
d.blockSize.Store(uint32(stat.Blksize))
// Don't override newer client-defined timestamps with old host-defined
// ones.
if d.atimeDirty.Load() == 0 {
d.atime.Store(dentryTimestampFromUnix(stat.Atim))
}
if d.mtimeDirty.Load() == 0 {
d.mtime.Store(dentryTimestampFromUnix(stat.Mtim))
}
d.ctime.Store(dentryTimestampFromUnix(stat.Ctim))
d.nlink.Store(uint32(stat.Nlink))
d.updateSizeLocked(uint64(stat.Size))
return nil
}
// Preconditions: !d.isSynthetic().
// Preconditions: d.metadataMu is locked.
// +checklocks:d.metadataMu
func (d *dentry) refreshSizeLocked(ctx context.Context) error {
d.handleMu.RLock()
// Can use RacyLoad() because handleMu is locked.
if d.writeFD.RacyLoad() < 0 {
d.handleMu.RUnlock()
// Use a suitable FD if we don't have a writable host FD.
return d.updateMetadataLocked(ctx, noHandle)
}
// Using statx(2) with a minimal mask is faster than fstat(2).
var stat unix.Statx_t
// Can use RacyLoad() because handleMu is locked.
err := unix.Statx(int(d.writeFD.RacyLoad()), "", unix.AT_EMPTY_PATH, unix.STATX_SIZE, &stat)
d.handleMu.RUnlock() // must be released before updateSizeLocked()
if err != nil {
return err
}
d.updateSizeLocked(stat.Size)
return nil
}
// Preconditions: !d.isSynthetic().
func (d *dentry) updateMetadata(ctx context.Context) error {
// d.metadataMu must be locked *before* we stat so that we do not end up
// updating stale attributes in d.updateMetadataFromStatLocked().
d.metadataMu.Lock()
defer d.metadataMu.Unlock()
return d.updateMetadataLocked(ctx, noHandle)
}
func (d *dentry) fileType() uint32 {
return d.mode.Load() & linux.S_IFMT
}
func (d *dentry) statTo(stat *linux.Statx) {
stat.Mask = linux.STATX_TYPE | linux.STATX_MODE | linux.STATX_NLINK | linux.STATX_UID | linux.STATX_GID | linux.STATX_ATIME | linux.STATX_MTIME | linux.STATX_CTIME | linux.STATX_INO | linux.STATX_SIZE | linux.STATX_BLOCKS | linux.STATX_BTIME
stat.Blksize = d.blockSize.Load()
stat.Nlink = d.nlink.Load()
if stat.Nlink == 0 {
// The remote filesystem doesn't support link count; just make
// something up. This is consistent with Linux, where
// fs/inode.c:inode_init_always() initializes link count to 1, and
// fs/9p/vfs_inode_dotl.c:v9fs_stat2inode_dotl() doesn't touch it if
// it's not provided by the remote filesystem.
stat.Nlink = 1
}
stat.UID = d.uid.Load()
stat.GID = d.gid.Load()
stat.Mode = uint16(d.mode.Load())
stat.Ino = uint64(d.ino)
stat.Size = d.size.Load()
// This is consistent with regularFileFD.Seek(), which treats regular files
// as having no holes.
stat.Blocks = (stat.Size + 511) / 512
stat.Atime = linux.NsecToStatxTimestamp(d.atime.Load())
stat.Btime = linux.NsecToStatxTimestamp(d.btime.Load())
stat.Ctime = linux.NsecToStatxTimestamp(d.ctime.Load())
stat.Mtime = linux.NsecToStatxTimestamp(d.mtime.Load())
stat.DevMajor = linux.UNNAMED_MAJOR
stat.DevMinor = d.fs.devMinor
}
// Precondition: fs.renameMu is locked.
func (d *dentry) setStat(ctx context.Context, creds *auth.Credentials, opts *vfs.SetStatOptions, mnt *vfs.Mount) error {
stat := &opts.Stat
if stat.Mask == 0 {
return nil
}
if stat.Mask&^(linux.STATX_MODE|linux.STATX_UID|linux.STATX_GID|linux.STATX_ATIME|linux.STATX_MTIME|linux.STATX_SIZE) != 0 {
return linuxerr.EPERM
}
mode := linux.FileMode(d.mode.Load())
if err := vfs.CheckSetStat(ctx, creds, opts, mode, auth.KUID(d.uid.Load()), auth.KGID(d.gid.Load())); err != nil {
return err
}
if err := mnt.CheckBeginWrite(); err != nil {
return err
}
defer mnt.EndWrite()
if stat.Mask&linux.STATX_SIZE != 0 {
// Reject attempts to truncate files other than regular files, since
// filesystem implementations may return the wrong errno.
switch mode.FileType() {
case linux.S_IFREG:
// ok
case linux.S_IFDIR:
return linuxerr.EISDIR
default:
return linuxerr.EINVAL
}
}
var now int64
if d.cachedMetadataAuthoritative() {
// Truncate updates mtime.
if stat.Mask&(linux.STATX_SIZE|linux.STATX_MTIME) == linux.STATX_SIZE {
stat.Mask |= linux.STATX_MTIME
stat.Mtime = linux.StatxTimestamp{
Nsec: linux.UTIME_NOW,
}
}
// Use client clocks for timestamps.
now = d.fs.clock.Now().Nanoseconds()
if stat.Mask&linux.STATX_ATIME != 0 && stat.Atime.Nsec == linux.UTIME_NOW {
stat.Atime = linux.NsecToStatxTimestamp(now)
}
if stat.Mask&linux.STATX_MTIME != 0 && stat.Mtime.Nsec == linux.UTIME_NOW {
stat.Mtime = linux.NsecToStatxTimestamp(now)
}
}
d.metadataMu.Lock()
defer d.metadataMu.Unlock()
// As with Linux, if the UID, GID, or file size is changing, we have to
// clear permission bits. Note that when set, clearSGID may cause
// permissions to be updated.
clearSGID := (stat.Mask&linux.STATX_UID != 0 && stat.UID != d.uid.Load()) ||
(stat.Mask&linux.STATX_GID != 0 && stat.GID != d.gid.Load()) ||
stat.Mask&linux.STATX_SIZE != 0
if clearSGID {
if stat.Mask&linux.STATX_MODE != 0 {
stat.Mode = uint16(vfs.ClearSUIDAndSGID(uint32(stat.Mode)))
} else {
oldMode := d.mode.Load()
if updatedMode := vfs.ClearSUIDAndSGID(oldMode); updatedMode != oldMode {
stat.Mode = uint16(updatedMode)
stat.Mask |= linux.STATX_MODE
}
}
}
// failureMask indicates which attributes could not be set on the remote
// filesystem. p9 returns an error if any of the attributes could not be set
// but that leads to inconsistency as the server could have set a few
// attributes successfully but a later failure will cause the successful ones
// to not be updated in the dentry cache.
var failureMask uint32
var failureErr error
if !d.isSynthetic() {
if stat.Mask != 0 {
if err := d.prepareSetStat(ctx, stat); err != nil {
return err
}
d.handleMu.RLock()
if stat.Mask&linux.STATX_SIZE != 0 {
// d.dataMu must be held around the update to both the remote
// file's size and d.size to serialize with writeback (which
// might otherwise write data back up to the old d.size after
// the remote file has been truncated).
d.dataMu.Lock()
}
var err error
failureMask, failureErr, err = d.setStatLocked(ctx, stat)
d.handleMu.RUnlock()
if err != nil {
if stat.Mask&linux.STATX_SIZE != 0 {
d.dataMu.Unlock() // +checklocksforce: locked conditionally above
}
return err
}
if stat.Mask&linux.STATX_SIZE != 0 {
if failureMask&linux.STATX_SIZE == 0 {
// d.size should be kept up to date, and privatized
// copy-on-write mappings of truncated pages need to be
// invalidated, even if InteropModeShared is in effect.
d.updateSizeAndUnlockDataMuLocked(stat.Size) // +checklocksforce: locked conditionally above
} else {
d.dataMu.Unlock() // +checklocksforce: locked conditionally above
}
}
}
if d.fs.opts.interop == InteropModeShared {
// There's no point to updating d's metadata in this case since
// it'll be overwritten by revalidation before the next time it's
// used anyway. (InteropModeShared inhibits client caching of
// regular file data, so there's no cache to truncate either.)
return nil
}
}
if stat.Mask&linux.STATX_MODE != 0 && failureMask&linux.STATX_MODE == 0 {
d.mode.Store(d.fileType() | uint32(stat.Mode))
}
if stat.Mask&linux.STATX_UID != 0 && failureMask&linux.STATX_UID == 0 {
d.uid.Store(stat.UID)
}
if stat.Mask&linux.STATX_GID != 0 && failureMask&linux.STATX_GID == 0 {
d.gid.Store(stat.GID)
}
// Note that stat.Atime.Nsec and stat.Mtime.Nsec can't be UTIME_NOW because
// if d.cachedMetadataAuthoritative() then we converted stat.Atime and
// stat.Mtime to client-local timestamps above, and if
// !d.cachedMetadataAuthoritative() then we returned after calling
// d.file.setAttr(). For the same reason, now must have been initialized.
if stat.Mask&linux.STATX_ATIME != 0 && failureMask&linux.STATX_ATIME == 0 {
d.atime.Store(stat.Atime.ToNsec())
d.atimeDirty.Store(0)
}
if stat.Mask&linux.STATX_MTIME != 0 && failureMask&linux.STATX_MTIME == 0 {
d.mtime.Store(stat.Mtime.ToNsec())
d.mtimeDirty.Store(0)
}
d.ctime.Store(now)
if failureMask != 0 {
// Setting some attribute failed on the remote filesystem.
return failureErr
}
return nil
}
// doAllocate performs an allocate operation on d. Note that d.metadataMu will
// be held when allocate is called.
func (d *dentry) doAllocate(ctx context.Context, offset, length uint64, allocate func() error) error {
d.metadataMu.Lock()
defer d.metadataMu.Unlock()
// Allocating a smaller size is a noop.
size := offset + length
if d.cachedMetadataAuthoritative() && size <= d.size.RacyLoad() {
return nil
}
err := allocate()
if err != nil {
return err
}
d.updateSizeLocked(size)
if d.cachedMetadataAuthoritative() {
d.touchCMtimeLocked()
}
return nil
}
// Preconditions: d.metadataMu must be locked.
func (d *dentry) updateSizeLocked(newSize uint64) {
d.dataMu.Lock()
d.updateSizeAndUnlockDataMuLocked(newSize)
}
// Preconditions: d.metadataMu and d.dataMu must be locked.
//
// Postconditions: d.dataMu is unlocked.
// +checklocksrelease:d.dataMu
func (d *dentry) updateSizeAndUnlockDataMuLocked(newSize uint64) {
oldSize := d.size.RacyLoad()
d.size.Store(newSize)
// d.dataMu must be unlocked to lock d.mapsMu and invalidate mappings
// below. This allows concurrent calls to Read/Translate/etc. These
// functions synchronize with truncation by refusing to use cache
// contents beyond the new d.size. (We are still holding d.metadataMu,
// so we can't race with Write or another truncate.)
d.dataMu.Unlock()
if newSize < oldSize {
oldpgend, _ := hostarch.PageRoundUp(oldSize)
newpgend, _ := hostarch.PageRoundUp(newSize)
if oldpgend != newpgend {
d.mapsMu.Lock()
d.mappings.Invalidate(memmap.MappableRange{newpgend, oldpgend}, memmap.InvalidateOpts{
// Compare Linux's mm/truncate.c:truncate_setsize() =>
// truncate_pagecache() =>
// mm/memory.c:unmap_mapping_range(evencows=1).
InvalidatePrivate: true,
})
d.mapsMu.Unlock()
}
// We are now guaranteed that there are no translations of
// truncated pages, and can remove them from the cache. Since
// truncated pages have been removed from the remote file, they
// should be dropped without being written back.
d.dataMu.Lock()
d.cache.Truncate(newSize, d.fs.mfp.MemoryFile())
d.dirty.KeepClean(memmap.MappableRange{newSize, oldpgend})
d.dataMu.Unlock()
}
}
func (d *dentry) checkPermissions(creds *auth.Credentials, ats vfs.AccessTypes) error {
return vfs.GenericCheckPermissions(creds, ats, linux.FileMode(d.mode.Load()), auth.KUID(d.uid.Load()), auth.KGID(d.gid.Load()))
}
func (d *dentry) checkXattrPermissions(creds *auth.Credentials, name string, ats vfs.AccessTypes) error {
// Deny access to the "security" and "system" namespaces since applications
// may expect these to affect kernel behavior in unimplemented ways
// (b/148380782). Allow all other extended attributes to be passed through
// to the remote filesystem. This is inconsistent with Linux's 9p client,
// but consistent with other filesystems (e.g. FUSE).
//
// NOTE(b/202533394): Also disallow "trusted" namespace for now. This is
// consistent with the VFS1 gofer client.
if strings.HasPrefix(name, linux.XATTR_SECURITY_PREFIX) || strings.HasPrefix(name, linux.XATTR_SYSTEM_PREFIX) || strings.HasPrefix(name, linux.XATTR_TRUSTED_PREFIX) {
return linuxerr.EOPNOTSUPP
}
mode := linux.FileMode(d.mode.Load())
kuid := auth.KUID(d.uid.Load())
kgid := auth.KGID(d.gid.Load())
if err := vfs.GenericCheckPermissions(creds, ats, mode, kuid, kgid); err != nil {
return err
}
return vfs.CheckXattrPermissions(creds, ats, mode, kuid, name)
}
func (d *dentry) mayDelete(creds *auth.Credentials, child *dentry) error {
return vfs.CheckDeleteSticky(
creds,
linux.FileMode(d.mode.Load()),
auth.KUID(d.uid.Load()),
auth.KUID(child.uid.Load()),
auth.KGID(child.gid.Load()),
)
}
func dentryUID(uid lisafs.UID) uint32 {
if !uid.Ok() {
return uint32(auth.OverflowUID)
}
return uint32(uid)
}
func dentryGID(gid lisafs.GID) uint32 {
if !gid.Ok() {
return uint32(auth.OverflowGID)
}
return uint32(gid)
}
// IncRef implements vfs.DentryImpl.IncRef.
func (d *dentry) IncRef() {
// d.refs may be 0 if d.fs.renameMu is locked, which serializes against
// d.checkCachingLocked().
r := d.refs.Add(1)
if d.LogRefs() {
refs.LogIncRef(d, r)
}
}
// TryIncRef implements vfs.DentryImpl.TryIncRef.
func (d *dentry) TryIncRef() bool {
for {
r := d.refs.Load()
if r <= 0 {
return false
}
if d.refs.CompareAndSwap(r, r+1) {
if d.LogRefs() {
refs.LogTryIncRef(d, r+1)
}
return true
}
}
}
// DecRef implements vfs.DentryImpl.DecRef.
func (d *dentry) DecRef(ctx context.Context) {
if d.decRefNoCaching() == 0 {
d.checkCachingLocked(ctx, false /* renameMuWriteLocked */)
}
}
// decRefNoCaching decrements d's reference count without calling
// d.checkCachingLocked, even if d's reference count reaches 0; callers are
// responsible for ensuring that d.checkCachingLocked will be called later.
func (d *dentry) decRefNoCaching() int64 {
r := d.refs.Add(-1)
if d.LogRefs() {
refs.LogDecRef(d, r)
}
if r < 0 {
panic("gofer.dentry.decRefNoCaching() called without holding a reference")
}
return r
}
// RefType implements refs.CheckedObject.Type.
func (d *dentry) RefType() string {
return "gofer.dentry"
}
// LeakMessage implements refs.CheckedObject.LeakMessage.
func (d *dentry) LeakMessage() string {
return fmt.Sprintf("[gofer.dentry %p] reference count of %d instead of -1", d, d.refs.Load())
}
// LogRefs implements refs.CheckedObject.LogRefs.
//
// This should only be set to true for debugging purposes, as it can generate an
// extremely large amount of output and drastically degrade performance.
func (d *dentry) LogRefs() bool {
return false
}
// InotifyWithParent implements vfs.DentryImpl.InotifyWithParent.
func (d *dentry) InotifyWithParent(ctx context.Context, events, cookie uint32, et vfs.EventType) {
if d.isDir() {
events |= linux.IN_ISDIR
}
d.fs.renameMu.RLock()
// The ordering below is important, Linux always notifies the parent first.
if d.parent != nil {
d.parent.watches.Notify(ctx, d.name, events, cookie, et, d.isDeleted())
}
d.watches.Notify(ctx, "", events, cookie, et, d.isDeleted())
d.fs.renameMu.RUnlock()
}
// Watches implements vfs.DentryImpl.Watches.
func (d *dentry) Watches() *vfs.Watches {
return &d.watches
}
// OnZeroWatches implements vfs.DentryImpl.OnZeroWatches.
//
// If no watches are left on this dentry and it has no references, cache it.
func (d *dentry) OnZeroWatches(ctx context.Context) {
d.checkCachingLocked(ctx, false /* renameMuWriteLocked */)
}
// checkCachingLocked should be called after d's reference count becomes 0 or
// it becomes disowned.
//
// For performance, checkCachingLocked can also be called after d's reference
// count becomes non-zero, so that d can be removed from the LRU cache. This
// may help in reducing the size of the cache and hence reduce evictions. Note
// that this is not necessary for correctness.
//
// It may be called on a destroyed dentry. For example,
// renameMu[R]UnlockAndCheckCaching may call checkCachingLocked multiple times
// for the same dentry when the dentry is visited more than once in the same
// operation. One of the calls may destroy the dentry, so subsequent calls will
// do nothing.
//
// Preconditions: d.fs.renameMu must be locked for writing if
// renameMuWriteLocked is true; it may be temporarily unlocked.
func (d *dentry) checkCachingLocked(ctx context.Context, renameMuWriteLocked bool) {
d.cachingMu.Lock()
refs := d.refs.Load()
if refs == -1 {
// Dentry has already been destroyed.
d.cachingMu.Unlock()
return
}
if refs > 0 {
// fs.dentryCache.dentries is permitted to contain dentries with non-zero
// refs, which are skipped by fs.evictCachedDentryLocked() upon reaching
// the end of the LRU. But it is still beneficial to remove d from the
// cache as we are already holding d.cachingMu. Keeping a cleaner cache
// also reduces the number of evictions (which is expensive as it acquires
// fs.renameMu).
d.removeFromCacheLocked()
d.cachingMu.Unlock()
return
}
// Deleted and invalidated dentries with zero references are no longer
// reachable by path resolution and should be dropped immediately.
if d.vfsd.IsDead() {
d.removeFromCacheLocked()
d.cachingMu.Unlock()
if !renameMuWriteLocked {
// Need to lock d.fs.renameMu for writing as needed by d.destroyLocked().
d.fs.renameMu.Lock()
defer d.fs.renameMu.Unlock()
// Now that renameMu is locked for writing, no more refs can be taken on
// d because path resolution requires renameMu for reading at least.
if d.refs.Load() != 0 {
// Destroy d only if its ref is still 0. If not, either someone took a
// ref on it or it got destroyed before fs.renameMu could be acquired.
return
}
}
if d.isDeleted() {
d.watches.HandleDeletion(ctx)
}
d.destroyLocked(ctx) // +checklocksforce: renameMu must be acquired at this point.
return
}
if d.vfsd.IsEvictable() {
d.cachingMu.Unlock()
// Attempt to evict.
if renameMuWriteLocked {
d.evictLocked(ctx) // +checklocksforce: renameMu is locked in this case.
return
}
d.evict(ctx)
return
}
// If d still has inotify watches and it is not deleted or invalidated, it
// can't be evicted. Otherwise, we will lose its watches, even if a new
// dentry is created for the same file in the future. Note that the size of
// d.watches cannot concurrently transition from zero to non-zero, because
// adding a watch requires holding a reference on d.
if d.watches.Size() > 0 {
// As in the refs > 0 case, removing d is beneficial.
d.removeFromCacheLocked()
d.cachingMu.Unlock()
return
}
if d.fs.released.Load() != 0 {
d.cachingMu.Unlock()
if !renameMuWriteLocked {
// Need to lock d.fs.renameMu to access d.parent. Lock it for writing as
// needed by d.destroyLocked() later.
d.fs.renameMu.Lock()
defer d.fs.renameMu.Unlock()
}
if d.parent != nil {
d.parent.childrenMu.Lock()
delete(d.parent.children, d.name)
d.parent.childrenMu.Unlock()
}
d.destroyLocked(ctx) // +checklocksforce: see above.
return
}
d.fs.dentryCache.mu.Lock()
// If d is already cached, just move it to the front of the LRU.
if d.cached {
d.fs.dentryCache.dentries.Remove(&d.cacheEntry)
d.fs.dentryCache.dentries.PushFront(&d.cacheEntry)
d.fs.dentryCache.mu.Unlock()
d.cachingMu.Unlock()
return
}
// Cache the dentry, then evict the least recently used cached dentry if
// the cache becomes over-full.
d.fs.dentryCache.dentries.PushFront(&d.cacheEntry)
d.fs.dentryCache.dentriesLen++
d.cached = true
shouldEvict := d.fs.dentryCache.dentriesLen > d.fs.dentryCache.maxCachedDentries
d.fs.dentryCache.mu.Unlock()
d.cachingMu.Unlock()
if shouldEvict {
if !renameMuWriteLocked {
// Need to lock d.fs.renameMu for writing as needed by
// d.evictCachedDentryLocked().
d.fs.renameMu.Lock()
defer d.fs.renameMu.Unlock()
}
d.fs.evictCachedDentryLocked(ctx) // +checklocksforce: see above.
}
}
// Preconditions: d.cachingMu must be locked.
func (d *dentry) removeFromCacheLocked() {
if d.cached {
d.fs.dentryCache.mu.Lock()
d.fs.dentryCache.dentries.Remove(&d.cacheEntry)
d.fs.dentryCache.dentriesLen--
d.fs.dentryCache.mu.Unlock()
d.cached = false
}
}
// Precondition: fs.renameMu must be locked for writing; it may be temporarily
// unlocked.
// +checklocks:fs.renameMu
func (fs *filesystem) evictAllCachedDentriesLocked(ctx context.Context) {
for fs.dentryCache.dentriesLen != 0 {
fs.evictCachedDentryLocked(ctx)
}
}
// Preconditions:
// - fs.renameMu must be locked for writing; it may be temporarily unlocked.
//
// +checklocks:fs.renameMu
func (fs *filesystem) evictCachedDentryLocked(ctx context.Context) {
fs.dentryCache.mu.Lock()
victim := fs.dentryCache.dentries.Back()
fs.dentryCache.mu.Unlock()
if victim == nil {
// fs.dentryCache.dentries may have become empty between when it was
// checked and when we locked fs.dentryCache.mu.
return
}
if victim.d.fs == fs {
victim.d.evictLocked(ctx) // +checklocksforce: owned as precondition, victim.fs == fs
return
}
// The dentry cache is shared between all gofer filesystems and the victim is
// from another filesystem. Have that filesystem do the work. We unlock
// fs.renameMu to prevent deadlock: two filesystems could otherwise wait on
// each others' renameMu.
fs.renameMu.Unlock()
defer fs.renameMu.Lock()
victim.d.evict(ctx)
}
// Preconditions:
// - d.fs.renameMu must not be locked for writing.
func (d *dentry) evict(ctx context.Context) {
d.fs.renameMu.Lock()
defer d.fs.renameMu.Unlock()
d.evictLocked(ctx)
}
// Preconditions:
// - d.fs.renameMu must be locked for writing; it may be temporarily unlocked.
//
// +checklocks:d.fs.renameMu
func (d *dentry) evictLocked(ctx context.Context) {
d.cachingMu.Lock()
d.removeFromCacheLocked()
// d.refs or d.watches.Size() may have become non-zero from an earlier path
// resolution since it was inserted into fs.dentryCache.dentries.
if d.refs.Load() != 0 || d.watches.Size() != 0 {
d.cachingMu.Unlock()
return
}
if d.parent != nil {
d.parent.opMu.Lock()
if !d.vfsd.IsDead() {
// Note that d can't be a mount point (in any mount namespace), since VFS
// holds references on mount points.
rcs := d.fs.vfsfs.VirtualFilesystem().InvalidateDentry(ctx, &d.vfsd)
for _, rc := range rcs {
rc.DecRef(ctx)
}
d.parent.childrenMu.Lock()
delete(d.parent.children, d.name)
d.parent.childrenMu.Unlock()
// We're only deleting the dentry, not the file it
// represents, so we don't need to update
// victim parent.dirents etc.
}
d.parent.opMu.Unlock()
}
// Safe to unlock cachingMu now that d.vfsd.IsDead(). Henceforth any
// concurrent caching attempts on d will attempt to destroy it and so will
// try to acquire fs.renameMu (which we have already acquiredd). Hence,
// fs.renameMu will synchronize the destroy attempts.
d.cachingMu.Unlock()
d.destroyLocked(ctx) // +checklocksforce: owned as precondition.
}
// destroyDisconnected destroys an uncached, unparented dentry. There are no
// locking preconditions.
func (d *dentry) destroyDisconnected(ctx context.Context) {
mf := d.fs.mfp.MemoryFile()
d.handleMu.Lock()
d.dataMu.Lock()
if d.isWriteHandleOk() {
// Write dirty pages back to the remote filesystem.
h := d.writeHandle()
if err := fsutil.SyncDirtyAll(ctx, &d.cache, &d.dirty, d.size.Load(), mf, h.writeFromBlocksAt); err != nil {
log.Warningf("gofer.dentry.destroyLocked: failed to write dirty data back: %v", err)
}
}
// Discard cached data.
if !d.cache.IsEmpty() {
mf.MarkAllUnevictable(d)
d.cache.DropAll(mf)
d.dirty.RemoveAll()
}
d.dataMu.Unlock()
// Close any resources held by the implementation.
d.destroyImpl(ctx)
// Can use RacyLoad() because handleMu is locked.
if d.readFD.RacyLoad() >= 0 {
_ = unix.Close(int(d.readFD.RacyLoad()))
}
if d.writeFD.RacyLoad() >= 0 && d.readFD.RacyLoad() != d.writeFD.RacyLoad() {
_ = unix.Close(int(d.writeFD.RacyLoad()))
}
d.readFD = atomicbitops.FromInt32(-1)
d.writeFD = atomicbitops.FromInt32(-1)
d.mmapFD = atomicbitops.FromInt32(-1)
d.handleMu.Unlock()
if !d.isSynthetic() {
// Note that it's possible that d.atimeDirty or d.mtimeDirty are true,
// i.e. client and server timestamps may differ (because e.g. a client
// write was serviced by the page cache, and only written back to the
// remote file later). Ideally, we'd write client timestamps back to
// the remote filesystem so that timestamps for a new dentry
// instantiated for the same file would remain coherent. Unfortunately,
// this turns out to be too expensive in many cases, so for now we
// don't do this.
// Remove d from the set of syncable dentries.
d.fs.syncMu.Lock()
d.fs.syncableDentries.Remove(&d.syncableListEntry)
d.fs.syncMu.Unlock()
}
// Drop references and stop tracking this child.
d.refs.Store(-1)
refs.Unregister(d)
}
// destroyLocked destroys the dentry.
//
// Preconditions:
// - d.fs.renameMu must be locked for writing; it may be temporarily unlocked.
// - d.refs == 0.
// - d.parent.children[d.name] != d, i.e. d is not reachable by path traversal
// from its former parent dentry.
//
// +checklocks:d.fs.renameMu
func (d *dentry) destroyLocked(ctx context.Context) {
switch d.refs.Load() {
case 0:
// Mark the dentry destroyed.
d.refs.Store(-1)
case -1:
panic("dentry.destroyLocked() called on already destroyed dentry")
default:
panic("dentry.destroyLocked() called with references on the dentry")
}
// Allow the following to proceed without renameMu locked to improve
// scalability.
d.fs.renameMu.Unlock()
// No locks need to be held during destoryDisconnected.
d.destroyDisconnected(ctx)
d.fs.renameMu.Lock()
// Drop the reference held by d on its parent without recursively locking
// d.fs.renameMu.
if d.parent != nil && d.parent.decRefNoCaching() == 0 {
d.parent.checkCachingLocked(ctx, true /* renameMuWriteLocked */)
}
}
func (d *dentry) isDeleted() bool {
return d.deleted.Load() != 0
}
func (d *dentry) setDeleted() {
d.deleted.Store(1)
}
func (d *dentry) listXattr(ctx context.Context, size uint64) ([]string, error) {
if d.isSynthetic() {
return nil, nil
}
return d.listXattrImpl(ctx, size)
}
func (d *dentry) getXattr(ctx context.Context, creds *auth.Credentials, opts *vfs.GetXattrOptions) (string, error) {
if d.isSynthetic() {
return "", linuxerr.ENODATA
}
if err := d.checkXattrPermissions(creds, opts.Name, vfs.MayRead); err != nil {
return "", err
}
return d.getXattrImpl(ctx, opts)
}
func (d *dentry) setXattr(ctx context.Context, creds *auth.Credentials, opts *vfs.SetXattrOptions) error {
if d.isSynthetic() {
return linuxerr.EPERM
}
if err := d.checkXattrPermissions(creds, opts.Name, vfs.MayWrite); err != nil {
return err
}
return d.setXattrImpl(ctx, opts)
}
func (d *dentry) removeXattr(ctx context.Context, creds *auth.Credentials, name string) error {
if d.isSynthetic() {
return linuxerr.EPERM
}
if err := d.checkXattrPermissions(creds, name, vfs.MayWrite); err != nil {
return err
}
return d.removeXattrImpl(ctx, name)
}
// Preconditions:
// - !d.isSynthetic().
// - d.isRegularFile() || d.isDir().
// - fs.renameMu is locked.
func (d *dentry) ensureSharedHandle(ctx context.Context, read, write, trunc bool) error {
// O_TRUNC unconditionally requires us to obtain a new handle (opened with
// O_TRUNC).
if !trunc {
d.handleMu.RLock()
canReuseCurHandle := (!read || d.isReadHandleOk()) && (!write || d.isWriteHandleOk())
d.handleMu.RUnlock()
if canReuseCurHandle {
// Current handles are sufficient.
return nil
}
}
d.handleMu.Lock()
needNewHandle := (read && !d.isReadHandleOk()) || (write && !d.isWriteHandleOk()) || trunc
if !needNewHandle {
d.handleMu.Unlock()
return nil
}
var fdsToCloseArr [2]int32
fdsToClose := fdsToCloseArr[:0]
invalidateTranslations := false
// Get a new handle. If this file has been opened for both reading and
// writing, try to get a single handle that is usable for both:
//
// - Writable memory mappings of a host FD require that the host FD is
// opened for both reading and writing.
//
// - NOTE(b/141991141): Some filesystems may not ensure coherence
// between multiple handles for the same file.
openReadable := d.isReadHandleOk() || read
openWritable := d.isWriteHandleOk() || write
h, err := d.openHandle(ctx, openReadable, openWritable, trunc)
if linuxerr.Equals(linuxerr.EACCES, err) && (openReadable != read || openWritable != write) {
// It may not be possible to use a single handle for both
// reading and writing, since permissions on the file may have
// changed to e.g. disallow reading after previously being
// opened for reading. In this case, we have no choice but to
// use separate handles for reading and writing.
ctx.Debugf("gofer.dentry.ensureSharedHandle: bifurcating read/write handles for dentry %p", d)
openReadable = read
openWritable = write
h, err = d.openHandle(ctx, openReadable, openWritable, trunc)
}
if err != nil {
d.handleMu.Unlock()
return err
}
// Update d.readFD and d.writeFD
if h.fd >= 0 {
if openReadable && openWritable && (d.readFD.RacyLoad() < 0 || d.writeFD.RacyLoad() < 0 || d.readFD.RacyLoad() != d.writeFD.RacyLoad()) {
// Replace existing FDs with this one.
if d.readFD.RacyLoad() >= 0 {
// We already have a readable FD that may be in use by
// concurrent callers of d.pf.FD().
if d.fs.opts.overlayfsStaleRead {
// If overlayfsStaleRead is in effect, then the new FD
// may not be coherent with the existing one, so we
// have no choice but to switch to mappings of the new
// FD in both the application and sentry.
if err := d.pf.hostFileMapper.RegenerateMappings(int(h.fd)); err != nil {
d.handleMu.Unlock()
ctx.Warningf("gofer.dentry.ensureSharedHandle: failed to replace sentry mappings of old FD with mappings of new FD: %v", err)
h.close(ctx)
return err
}
fdsToClose = append(fdsToClose, d.readFD.RacyLoad())
invalidateTranslations = true
d.readFD.Store(h.fd)
} else {
// Otherwise, we want to avoid invalidating existing
// memmap.Translations (which is expensive); instead, use
// dup3 to make the old file descriptor refer to the new
// file description, then close the new file descriptor
// (which is no longer needed). Racing callers of d.pf.FD()
// may use the old or new file description, but this
// doesn't matter since they refer to the same file, and
// any racing mappings must be read-only.
if err := unix.Dup3(int(h.fd), int(d.readFD.RacyLoad()), unix.O_CLOEXEC); err != nil {
oldFD := d.readFD.RacyLoad()
d.handleMu.Unlock()
ctx.Warningf("gofer.dentry.ensureSharedHandle: failed to dup fd %d to fd %d: %v", h.fd, oldFD, err)
h.close(ctx)
return err
}
fdsToClose = append(fdsToClose, h.fd)
h.fd = d.readFD.RacyLoad()
}
} else {
d.readFD.Store(h.fd)
}
if d.writeFD.RacyLoad() != h.fd && d.writeFD.RacyLoad() >= 0 {
fdsToClose = append(fdsToClose, d.writeFD.RacyLoad())
}
d.writeFD.Store(h.fd)
d.mmapFD.Store(h.fd)
} else if openReadable && d.readFD.RacyLoad() < 0 {
readHandleWasOk := d.isReadHandleOk()
d.readFD.Store(h.fd)
// If the file has not been opened for writing, the new FD may
// be used for read-only memory mappings. If the file was
// previously opened for reading (without an FD), then existing
// translations of the file may use the internal page cache;
// invalidate those mappings.
if !d.isWriteHandleOk() {
invalidateTranslations = readHandleWasOk
d.mmapFD.Store(h.fd)
}
} else if openWritable && d.writeFD.RacyLoad() < 0 {
d.writeFD.Store(h.fd)
if d.readFD.RacyLoad() >= 0 {
// We have an existing read-only FD, but the file has just
// been opened for writing, so we need to start supporting
// writable memory mappings. However, the new FD is not
// readable, so we have no FD that can be used to create
// writable memory mappings. Switch to using the internal
// page cache.
invalidateTranslations = true
d.mmapFD.Store(-1)
}
} else {
// The new FD is not useful.
fdsToClose = append(fdsToClose, h.fd)
}
} else if openWritable && d.writeFD.RacyLoad() < 0 && d.mmapFD.RacyLoad() >= 0 {
// We have an existing read-only FD, but the file has just been
// opened for writing, so we need to start supporting writable
// memory mappings. However, we have no writable host FD. Switch to
// using the internal page cache.
invalidateTranslations = true
d.mmapFD.Store(-1)
}
d.updateHandles(ctx, h, openReadable, openWritable)
d.handleMu.Unlock()
if invalidateTranslations {
// Invalidate application mappings that may be using an old FD; they
// will be replaced with mappings using the new FD after future calls
// to d.Translate(). This requires holding d.mapsMu, which precedes
// d.handleMu in the lock order.
d.mapsMu.Lock()
d.mappings.InvalidateAll(memmap.InvalidateOpts{})
d.mapsMu.Unlock()
}
for _, fd := range fdsToClose {
unix.Close(int(fd))
}
return nil
}
func (d *dentry) syncRemoteFile(ctx context.Context) error {
d.handleMu.RLock()
defer d.handleMu.RUnlock()
return d.syncRemoteFileLocked(ctx)
}
// Preconditions: d.handleMu must be locked.
func (d *dentry) syncRemoteFileLocked(ctx context.Context) error {
// Prefer syncing write handles over read handles, since some remote
// filesystem implementations may not sync changes made through write
// handles otherwise.
wh := d.writeHandle()
wh.sync(ctx)
rh := d.readHandle()
rh.sync(ctx)
return nil
}
func (d *dentry) syncCachedFile(ctx context.Context, forFilesystemSync bool) error {
d.handleMu.RLock()
defer d.handleMu.RUnlock()
if d.isWriteHandleOk() {
// Write back dirty pages to the remote file.
d.dataMu.Lock()
h := d.writeHandle()
err := fsutil.SyncDirtyAll(ctx, &d.cache, &d.dirty, d.size.Load(), d.fs.mfp.MemoryFile(), h.writeFromBlocksAt)
d.dataMu.Unlock()
if err != nil {
return err
}
}
if err := d.syncRemoteFileLocked(ctx); err != nil {
if !forFilesystemSync {
return err
}
// Only return err if we can reasonably have expected sync to succeed
// (d is a regular file and was opened for writing).
if d.isRegularFile() && d.isWriteHandleOk() {
return err
}
ctx.Debugf("gofer.dentry.syncCachedFile: syncing non-writable or non-regular-file dentry failed: %v", err)
}
return nil
}
// incLinks increments link count.
func (d *dentry) incLinks() {
if d.nlink.Load() == 0 {
// The remote filesystem doesn't support link count.
return
}
d.nlink.Add(1)
}
// decLinks decrements link count.
func (d *dentry) decLinks() {
if d.nlink.Load() == 0 {
// The remote filesystem doesn't support link count.
return
}
d.nlink.Add(^uint32(0))
}
// fileDescription is embedded by gofer implementations of
// vfs.FileDescriptionImpl.
//
// +stateify savable
type fileDescription struct {
vfsfd vfs.FileDescription
vfs.FileDescriptionDefaultImpl
vfs.LockFD
lockLogging sync.Once `state:"nosave"`
}
func (fd *fileDescription) filesystem() *filesystem {
return fd.vfsfd.Mount().Filesystem().Impl().(*filesystem)
}
func (fd *fileDescription) dentry() *dentry {
return fd.vfsfd.Dentry().Impl().(*dentry)
}
// Stat implements vfs.FileDescriptionImpl.Stat.
func (fd *fileDescription) Stat(ctx context.Context, opts vfs.StatOptions) (linux.Statx, error) {
d := fd.dentry()
const validMask = uint32(linux.STATX_MODE | linux.STATX_UID | linux.STATX_GID | linux.STATX_ATIME | linux.STATX_MTIME | linux.STATX_CTIME | linux.STATX_SIZE | linux.STATX_BLOCKS | linux.STATX_BTIME)
if !d.cachedMetadataAuthoritative() && opts.Mask&validMask != 0 && opts.Sync != linux.AT_STATX_DONT_SYNC {
// Use specialFileFD.handle.fileLisa for the Stat if available, for the
// same reason that we try to use open FD in updateMetadataLocked().
var err error
if sffd, ok := fd.vfsfd.Impl().(*specialFileFD); ok {
err = sffd.updateMetadata(ctx)
} else {
err = d.updateMetadata(ctx)
}
if err != nil {
return linux.Statx{}, err
}
}
var stat linux.Statx
d.statTo(&stat)
return stat, nil
}
// SetStat implements vfs.FileDescriptionImpl.SetStat.
func (fd *fileDescription) SetStat(ctx context.Context, opts vfs.SetStatOptions) error {
fs := fd.filesystem()
fs.renameMu.RLock()
defer fs.renameMu.RUnlock()
return fd.dentry().setStat(ctx, auth.CredentialsFromContext(ctx), &opts, fd.vfsfd.Mount())
}
// ListXattr implements vfs.FileDescriptionImpl.ListXattr.
func (fd *fileDescription) ListXattr(ctx context.Context, size uint64) ([]string, error) {
return fd.dentry().listXattr(ctx, size)
}
// GetXattr implements vfs.FileDescriptionImpl.GetXattr.
func (fd *fileDescription) GetXattr(ctx context.Context, opts vfs.GetXattrOptions) (string, error) {
return fd.dentry().getXattr(ctx, auth.CredentialsFromContext(ctx), &opts)
}
// SetXattr implements vfs.FileDescriptionImpl.SetXattr.
func (fd *fileDescription) SetXattr(ctx context.Context, opts vfs.SetXattrOptions) error {
return fd.dentry().setXattr(ctx, auth.CredentialsFromContext(ctx), &opts)
}
// RemoveXattr implements vfs.FileDescriptionImpl.RemoveXattr.
func (fd *fileDescription) RemoveXattr(ctx context.Context, name string) error {
return fd.dentry().removeXattr(ctx, auth.CredentialsFromContext(ctx), name)
}
// LockBSD implements vfs.FileDescriptionImpl.LockBSD.
func (fd *fileDescription) LockBSD(ctx context.Context, uid fslock.UniqueID, ownerPID int32, t fslock.LockType, block bool) error {
fd.lockLogging.Do(func() {
log.Infof("File lock using gofer file handled internally.")
})
return fd.LockFD.LockBSD(ctx, uid, ownerPID, t, block)
}
// LockPOSIX implements vfs.FileDescriptionImpl.LockPOSIX.
func (fd *fileDescription) LockPOSIX(ctx context.Context, uid fslock.UniqueID, ownerPID int32, t fslock.LockType, r fslock.LockRange, block bool) error {
fd.lockLogging.Do(func() {
log.Infof("Range lock using gofer file handled internally.")
})
return fd.Locks().LockPOSIX(ctx, uid, ownerPID, t, r, block)
}
// UnlockPOSIX implements vfs.FileDescriptionImpl.UnlockPOSIX.
func (fd *fileDescription) UnlockPOSIX(ctx context.Context, uid fslock.UniqueID, r fslock.LockRange) error {
return fd.Locks().UnlockPOSIX(ctx, uid, r)
}
// resolvingPath is just a wrapper around *vfs.ResolvingPath. It additionally
// holds some information around the intent behind resolving the path.
type resolvingPath struct {
*vfs.ResolvingPath
// excludeLast indicates whether the intent is to resolve until the last path
// component. If true, the last path component should remain unresolved.
excludeLast bool
}
func resolvingPathFull(rp *vfs.ResolvingPath) resolvingPath {
return resolvingPath{ResolvingPath: rp, excludeLast: false}
}
func resolvingPathParent(rp *vfs.ResolvingPath) resolvingPath {
return resolvingPath{ResolvingPath: rp, excludeLast: true}
}
func (rp *resolvingPath) done() bool {
if rp.excludeLast {
return rp.Final()
}
return rp.Done()
}
func (rp *resolvingPath) copy() resolvingPath {
return resolvingPath{
ResolvingPath: rp.ResolvingPath.Copy(),
excludeLast: rp.excludeLast,
}
}
// Precondition: !rp.done() && rp.Component() is not "." or "..".
func (rp *resolvingPath) getComponents(emit func(string) bool) {
rp.GetComponents(rp.excludeLast, emit)
}
|
package utils
import (
"encoding/csv"
"fmt"
"log"
"os"
"github.com/tealeg/xlsx"
)
func GetCSVFile(path string) *os.File {
wd, err := os.Getwd()
if err != nil {
log.Fatal(err)
}
f, err := os.Open(wd + path)
if err != nil {
fmt.Print("Error: ", err)
}
defer f.Close()
return f
}
func ReadCSV(path string) [][]string {
wd, err := os.Getwd()
if err != nil {
log.Fatal(err)
}
f, err := os.Open(wd + path)
if err != nil {
log.Fatal(err)
}
defer f.Close() // this needs to be after the err check
lines, err := csv.NewReader(f).ReadAll()
if err != nil {
log.Fatal(err)
}
return lines
}
func ReadXLSX(path string) *xlsx.File {
wd, err := os.Getwd()
if err != nil {
log.Fatal(err)
}
xlFile, err := xlsx.OpenFile(wd + path)
if err != nil {
fmt.Print("ERROR!")
}
return xlFile
}
|
package meter
import (
"errors"
"fmt"
"net/http"
"net/http/cookiejar"
"net/url"
"strings"
"time"
"github.com/evcc-io/evcc/api"
"github.com/evcc-io/evcc/provider"
"github.com/evcc-io/evcc/util"
"github.com/evcc-io/evcc/util/request"
)
func init() {
registry.Add("tq-em", NewTqEmFromConfig)
}
type tqemData struct {
Authentication *bool
Serial string
Obis1_4_0 float64 `json:"1-0:1.4.0*255"`
Obis1_8_0 float64 `json:"1-0:1.8.0*255"`
Obis2_4_0 float64 `json:"1-0:2.4.0*255"`
Obis2_8_0 float64 `json:"1-0:2.8.0*255"`
Obis13_4_0 float64 `json:"1-0:13.4.0*255"`
Obis14_4_0 float64 `json:"1-0:14.4.0*255"`
Obis21_4_0 float64 `json:"1-0:21.4.0*255"`
Obis21_8_0 float64 `json:"1-0:21.8.0*255"`
Obis22_4_0 float64 `json:"1-0:22.4.0*255"`
Obis22_8_0 float64 `json:"1-0:22.8.0*255"`
Obis31_4_0 *float64 `json:"1-0:31.4.0*255"` // optional currents
Obis32_4_0 float64 `json:"1-0:32.4.0*255"`
Obis33_4_0 float64 `json:"1-0:33.4.0*255"`
Obis41_4_0 float64 `json:"1-0:41.4.0*255"`
Obis41_8_0 float64 `json:"1-0:41.8.0*255"`
Obis42_4_0 float64 `json:"1-0:42.4.0*255"`
Obis42_8_0 float64 `json:"1-0:42.8.0*255"`
Obis51_4_0 *float64 `json:"1-0:51.4.0*255"` // optional currents
Obis52_4_0 float64 `json:"1-0:52.4.0*255"`
Obis53_4_0 float64 `json:"1-0:53.4.0*255"`
Obis61_4_0 float64 `json:"1-0:61.4.0*255"`
Obis61_8_0 float64 `json:"1-0:61.8.0*255"`
Obis62_4_0 float64 `json:"1-0:62.4.0*255"`
Obis62_8_0 float64 `json:"1-0:62.8.0*255"`
Obis71_4_0 *float64 `json:"1-0:71.4.0*255"` // optional currents
Obis72_4_0 float64 `json:"1-0:72.4.0*255"`
Obis73_4_0 float64 `json:"1-0:73.4.0*255"`
}
type TqEm struct {
dataG func() (tqemData, error)
}
//go:generate go run ../cmd/tools/decorate.go -f decorateTqEm -b api.Meter -t "api.PhaseCurrents,Currents,func() (float64, float64, float64, error)"
// NewTqEmFromConfig creates a new configurable meter
func NewTqEmFromConfig(other map[string]interface{}) (api.Meter, error) {
cc := struct {
URI string
Password string
Cache time.Duration
}{
Cache: time.Second,
}
if err := util.DecodeOther(other, &cc); err != nil {
return nil, err
}
log := util.NewLogger("tq-em").Redact(cc.Password)
client := request.NewHelper(log)
client.Jar, _ = cookiejar.New(nil)
base := util.DefaultScheme(strings.TrimRight(cc.URI, "/"), "http")
// get serial number
var meter tqemData
uri := fmt.Sprintf("%s/start.php", base)
err := client.GetJSON(uri, &meter)
if err != nil {
return nil, err
}
if meter.Serial == "" {
return nil, errors.New("no serial")
}
dataG := provider.Cached(func() (tqemData, error) {
var res tqemData
uri := fmt.Sprintf("%s/mum-webservice/data.php", base)
err := client.GetJSON(uri, &res)
if err == nil && res.Serial == "" {
data := url.Values{
"login": {meter.Serial},
"password": {cc.Password},
}
var req *http.Request
req, err = request.New(http.MethodPost, fmt.Sprintf("%s/start.php", base), strings.NewReader(data.Encode()), request.URLEncoding)
if err == nil {
_, err = client.DoBody(req)
}
if err == nil {
err = client.GetJSON(uri, &res)
}
}
if err == nil && res.Serial == "" {
err = errors.New("authentication failed")
}
return res, err
}, cc.Cache)
m := &TqEm{
dataG: dataG,
}
res, err := dataG()
if err != nil {
return nil, err
}
if res.Obis31_4_0 != nil {
return decorateTqEm(m, m.currents), nil
}
return m, nil
}
func (m *TqEm) CurrentPower() (float64, error) {
res, err := m.dataG()
return res.Obis1_4_0 - res.Obis2_4_0, err
}
var _ api.MeterEnergy = (*TqEm)(nil)
func (m *TqEm) TotalEnergy() (float64, error) {
res, err := m.dataG()
return res.Obis1_8_0 / 1e3, err
}
func (m *TqEm) currents() (float64, float64, float64, error) {
res, err := m.dataG()
if err != nil {
return 0, 0, 0, err
}
return *res.Obis31_4_0, *res.Obis51_4_0, *res.Obis71_4_0, nil
}
|
package common
import (
"context"
"net"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/status"
)
func IPWhiteListUnaryServerInterceptor(whitelist []string) grpc.UnaryServerInterceptor {
return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
peer, ok := peer.FromContext(ctx)
host, _, err := net.SplitHostPort(peer.Addr.String())
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
if !ok {
return nil, status.Errorf(codes.Unauthenticated, "%s is not whitelisted.", host)
}
var whitelisted bool
for _, ip := range whitelist {
if host == ip {
whitelisted = true
break
}
}
if !whitelisted {
return nil, status.Errorf(codes.Unauthenticated, "%s is not whitelisted.", host)
}
return handler(ctx, req)
}
}
|
package main
import "fmt"
import "time"
func main() {
ans := 0
for year := 1901; year <= 2000; year++ {
for month := 1; month <= 12; month++ {
date := time.Date(year, time.Month(month), 1, 12, 0, 0, 0, &time.Location{})
if date.Weekday().String() == "Sunday" {
ans++
}
}
}
fmt.Println(ans)
}
|
package unmodel
// 存在しないページ
import (
"../get2ch"
"time"
)
type None struct {
ModelComponent
}
func NewNone(host string, _ []string) *None {
model := &None{ModelComponent: CreateModelComponent(ClassNameNone, host)}
model.url = ""
model.title = "そんなページないよ"
model.mod = time.Time{}
model.err = nil
model.g2ch = get2ch.NewGet2ch("", "")
return model
}
func (this *None) GetData() interface{} { return nil }
|
package provider
type Message struct {
msg string
}
// NewMessage Message的构造函数
func NewMessage(msg string) Message {
return Message{
msg: msg,
}
}
|
package repository
import (
"fmt"
"github.com/go-log/log"
pb "github.com/i-coder-robot/go-micro-action-user/proto/frontPermit"
"github.com/jinzhu/gorm"
)
type FrontPermit interface {
Create(frontPermit *pb.FrontPermit) (*pb.FrontPermit, error)
Delete(frontPermit *pb.FrontPermit) (bool, error)
Update(frontPermit *pb.FrontPermit) (bool, error)
Get(frontPermit *pb.FrontPermit) (*pb.FrontPermit, error)
All(req *pb.Request) ([]*pb.FrontPermit, error)
List(req *pb.ListQuery) ([]*pb.FrontPermit, error)
Total(req *pb.ListQuery) (int64, error)
}
type FrontPermitRepository struct {
DB *gorm.DB
}
func (repo *FrontPermitRepository) All(req *pb.Request) (frontPermits []*pb.FrontPermit, err error) {
if err := repo.DB.Find(&frontPermits).Error; err != nil {
log.Log(err)
return nil, err
}
return frontPermits, nil
}
func (repo *FrontPermitRepository) List(req *pb.ListQuery) (frontPermits []*pb.FrontPermit, err error) {
db := repo.DB
limit := req.Limit
offset := req.Page * 10
sort := req.Sort
if req.Where != "" {
db = db.Where(req.Where)
}
if err := db.Or(sort).Limit(limit).Offset(offset).Find(&frontPermits).Error; err != nil {
log.Log(err)
return nil, err
}
return frontPermits, nil
}
func (repo FrontPermitRepository) Total(req *pb.ListQuery) (total int64, err error) {
frontPermits := []pb.FrontPermit{}
db := repo.DB
if req.Where != "" {
db = db.Where(req.Where)
}
if err := db.Find(&frontPermits).Count(&total).Error; err != nil {
log.Log(err)
return total, err
}
return total, nil
}
func (repo *FrontPermitRepository) Get(frontPermit *pb.FrontPermit) (*pb.FrontPermit, error) {
if err := repo.DB.Where(&frontPermit).Find(&frontPermit).Error; err != nil {
return nil, err
}
return frontPermit, nil
}
func (repo *FrontPermitRepository) Create(p *pb.FrontPermit) (*pb.FrontPermit, error) {
err := repo.DB.Create(p).Error
if err != nil {
log.Log(err)
return p, fmt.Errorf("添加前端权限失败")
}
return p, nil
}
func (repo *FrontPermitRepository) Update(p *pb.FrontPermit) (bool, error) {
if p.Id == 0 {
return false, fmt.Errorf("请传入更新 Id")
}
id := &pb.FrontPermit{Id: p.Id}
err := repo.DB.Model(id).Updates(p).Error
if err != nil {
log.Log(err)
return false, err
}
return true, nil
}
func (repo FrontPermitRepository) Delete(p *pb.FrontPermit) (bool, error) {
err := repo.DB.Delete(p).Error
if err != nil {
log.Log(err)
return false, err
}
return true, nil
}
|
/*
Copyright 2021 The DbunderFS Contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package migration
import (
"github.com/kos-v/dbunderfs/internal/container"
"github.com/kos-v/dbunderfs/internal/db/migration"
)
type CommiterStub struct {
Storage *container.Collection
}
func (c *CommiterStub) Commit(migration *migration.Migration) error {
c.Storage.Append(migration.Id)
return nil
}
func (c *CommiterStub) IsCommited(migration *migration.Migration) (bool, error) {
for _, id := range c.Storage.ToList() {
if migration.Id == id {
return true, nil
}
}
return false, nil
}
func (c *CommiterStub) Rollback(migration *migration.Migration) error {
for i, id := range c.Storage.ToList() {
if id == migration.Id {
c.Storage.Remove(i)
break
}
}
return nil
}
|
package views // import "github.com/jenkins-x/octant-jx/pkg/plugin/views"
import (
"fmt"
"github.com/jenkins-x/jx-logging/v3/pkg/log"
"github.com/jenkins-x/octant-jx/pkg/admin"
"github.com/jenkins-x/octant-jx/pkg/common/links"
"github.com/jenkins-x/octant-jx/pkg/common/pluginctx"
"github.com/jenkins-x/octant-jx/pkg/common/viewhelpers"
"github.com/vmware-tanzu/octant/pkg/plugin/service"
"github.com/vmware-tanzu/octant/pkg/view/component"
"k8s.io/apimachinery/pkg/labels"
)
func BuildJobsLogViewForPath(request service.Request, pluginContext pluginctx.Context, path, jobName string) (component.Component, error) {
config := JobsViewConfigs[path]
selector := config.Selector
return BuildJobsViewLogsForPathAndSelector(request, pluginContext, path, jobName, config, selector)
}
func BuildJobsViewLogsForPathAndSelector(request service.Request, pluginContext pluginctx.Context, path, jobName string, config *JobViewConfig, selector labels.Set) (component.Component, error) {
if config == nil {
return component.NewText(fmt.Sprintf("No view configuration found for path %s", path)), nil
}
ctx := request.Context()
client := request.DashboardClient()
ns := pluginContext.Namespace
if jobName != "" && selector["job-name"] == "" {
selector["job-name"] = jobName
}
title := config.Title
if title == "" {
title = "Jobs"
}
parentLink := viewhelpers.ToMarkdownLink(title, admin.JobsViewLink(path))
headerText := viewhelpers.ToBreadcrumbMarkdown(admin.RootBreadcrumb, parentLink)
if jobName != "" {
headerText = viewhelpers.ToBreadcrumbMarkdown(headerText, viewhelpers.ToMarkdownLink("Job", links.GetJobLink(ns, jobName)))
}
// lets try find the pod for the pipeline
var logsView component.Component
pod, err := viewhelpers.FindLatestPodForSelector(ctx, client, pluginContext.Namespace, selector)
if err != nil {
log.Logger().Info(err)
}
if pod != nil {
podName := pod.GetName()
if ns == "" {
ns = pod.Namespace
}
logsView, err = viewhelpers.ViewPipelineLogs(ns, podName)
if err != nil {
log.Logger().Info(err)
logsView = component.NewText(fmt.Sprintf("could not find pod: %s", err.Error()))
}
headerText = viewhelpers.ToBreadcrumbMarkdown(headerText, viewhelpers.ToMarkdownLink("Pod", links.GetPodLink(ns, podName)))
} else {
logsView = component.NewText(fmt.Sprintf("could not find pod for selector: %s", selector.String()))
}
headerText = viewhelpers.ToBreadcrumbMarkdown(headerText, "Logs")
flexLayout := component.NewFlexLayout("")
flexLayout.AddSections(component.FlexLayoutSection{
{Width: component.WidthFull, View: viewhelpers.NewMarkdownText(headerText)},
{Width: component.WidthFull, View: logsView},
})
return flexLayout, nil
}
|
package main
import (
"fmt"
"sort"
)
func main() {
var n int
fmt.Scan(&n)
var nums []int
var sum int
for i := 0; i < n; i++ {
var f int
fmt.Scan(&f)
nums = append(nums, f)
sum += f
}
// print the mean
if len(nums) > 0 {
fmt.Printf("%.1f\n", float64(sum)/float64(len(nums)))
// print the median
fmt.Printf("%.1f\n", median(nums))
// print the mode
fmt.Println(mode(nums))
} else {
fmt.Println(0)
fmt.Println(0)
fmt.Println(0)
}
}
func median(nums []int) float64 {
sort.Ints(nums)
if len(nums)%2 == 0 {
var n1, n2 int
n1 = nums[len(nums)/2-1]
n2 = nums[len(nums)/2]
return float64(n1+n2) / 2
}
return float64(nums[len(nums)/2])
}
func mode(nums []int) int {
frequency := make(map[int]int)
for i := 0; i < len(nums); i++ {
frequency[nums[i]]++
}
maxFreqNum := nums[0]
maxFreq, startFreq := frequency[nums[0]], frequency[nums[0]]
for k, n := range frequency {
if n > maxFreq {
maxFreq = n
maxFreqNum = k
} else if n == maxFreq {
if k < maxFreqNum {
maxFreqNum = k
}
}
}
if startFreq == maxFreq {
sort.Ints(nums)
return nums[0]
}
return maxFreqNum
}
|
// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package packet_test
import (
. "github.com/gardener/gardener-extension-provider-packet/pkg/packet"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
)
var _ = Describe("Secret", func() {
var secret *corev1.Secret
BeforeEach(func() {
secret = &corev1.Secret{}
})
Describe("#ReadCredentialsSecret", func() {
It("should return an error because api token is missing", func() {
credentials, err := ReadCredentialsSecret(secret)
Expect(credentials).To(BeNil())
Expect(err).To(HaveOccurred())
})
It("should return an error because project id is missing", func() {
secret.Data = map[string][]byte{
APIToken: []byte("foo"),
}
credentials, err := ReadCredentialsSecret(secret)
Expect(credentials).To(BeNil())
Expect(err).To(HaveOccurred())
})
It("should return the credentials structure", func() {
var (
apiToken = []byte("foo")
projectID = []byte("bar")
)
secret.Data = map[string][]byte{
APIToken: apiToken,
ProjectID: projectID,
}
credentials, err := ReadCredentialsSecret(secret)
Expect(credentials).To(Equal(&Credentials{
APIToken: apiToken,
ProjectID: projectID,
}))
Expect(err).NotTo(HaveOccurred())
})
})
})
|
package set1
func FixedXOR(data []byte, chiper []byte) []byte {
xor := make([]byte, len(data))
// Assert len(data) == len(chiper)
for i:=0; i<len(data); i+=1 {
xor[i] = data[i] ^ chiper[i]
}
return xor
}
|
package media
import (
"encoding/json"
"regexp"
"github.com/gempir/gempbot/internal/dto"
"github.com/gempir/gempbot/internal/helixclient"
"github.com/gempir/gempbot/internal/log"
"github.com/gempir/gempbot/internal/store"
"github.com/google/uuid"
"github.com/puzpuzpuz/xsync"
)
type PlayerState string
const (
PLAYING PlayerState = "PLAYING"
PAUSED PlayerState = "PAUSED"
)
var (
YOUTUBE_REGEX = regexp.MustCompile(`^((?:https?:)?\/\/)?((?:www|m)\.)?((?:youtube\.com|youtu.be))(\/(?:[\w\-]+\?v=|embed\/|v\/)?)([\w\-]+)(\S+)?$`)
)
type MEDIA_TYPE string
const (
MEDIA_TYPE_YOUTUBE MEDIA_TYPE = "youtube"
)
type DebugMessage struct {
Action string `json:"string"`
Message string `json:"message"`
}
type MediaManager struct {
storage storage
helixClient helixclient.Client
rooms *xsync.MapOf[string, *Room]
connections *xsync.MapOf[string, *Connection]
bot mediaBot
commandsActivatedChannels map[string]bool
}
type Connection struct {
id string
writer func(message []byte)
}
type Room struct {
MediaType MEDIA_TYPE
Url string
QueueID string
Time float32
ChannelID string
State PlayerState
users *xsync.MapOf[string, *Connection]
}
type storage interface {
AddToQueue(queueItem store.MediaQueue) error
GetQueue(channelTwitchId string) []store.MediaQueue
GetAllMediaCommandsBotConfig() []store.BotConfig
}
type mediaBot interface {
RegisterCommand(command string, handler func(dto.CommandPayload))
Say(channel string, message string)
Reply(channel string, parentMsgId, message string)
}
func NewMediaManager(storage storage, helixClient helixclient.Client, bot mediaBot) *MediaManager {
commandsActivatedChannels := make(map[string]bool)
commandActivatedCfgs := storage.GetAllMediaCommandsBotConfig()
mm := &MediaManager{
storage: storage,
helixClient: helixClient,
rooms: xsync.NewMapOf[*Room](),
connections: xsync.NewMapOf[*Connection](),
commandsActivatedChannels: commandsActivatedChannels,
bot: bot,
}
bot.RegisterCommand("sr", mm.handleSongRequest)
for _, cfg := range commandActivatedCfgs {
if cfg.MediaCommands {
commandsActivatedChannels[cfg.OwnerTwitchID] = true
mm.rooms.Store(cfg.OwnerTwitchID, mm.initRoom(cfg.OwnerTwitchID))
}
}
return mm
}
func (m *MediaManager) initRoom(channelID string) *Room {
queue := m.storage.GetQueue(channelID)
room := newRoom()
room.ChannelID = channelID
if len(queue) > 0 {
room.Url = queue[0].Url
room.QueueID = queue[0].ID
}
return room
}
func (m *MediaManager) handleSongRequest(payload dto.CommandPayload) {
if _, ok := m.commandsActivatedChannels[payload.Msg.RoomID]; !ok {
return
}
if !YOUTUBE_REGEX.MatchString(payload.Query) {
m.bot.Reply(payload.Msg.Channel, payload.Msg.ID, "invalid youtube url")
return
}
m.AddUrlToQueue(payload.Query, payload.Msg.User.ID, payload.Msg.RoomID)
}
func (m *MediaManager) AddUrlToQueue(url string, authorID string, channelID string) {
m.storage.AddToQueue(store.MediaQueue{
ChannelTwitchId: channelID,
Author: authorID,
Url: url,
})
}
func (m *MediaManager) HandleJoin(connectionId string, userID string, channel string) {
var joinChannelId string
if channel == "" {
joinChannelId = userID
} else {
res, err := m.helixClient.GetUserByUsername(channel)
if err != nil {
return
}
joinChannelId = res.ID
}
connection, ok := m.connections.Load(connectionId)
if !ok {
return
}
room, ok := m.rooms.Load(joinChannelId)
if !ok {
room = newRoom()
m.rooms.Store(joinChannelId, room)
}
sendPlayerState([]*Connection{connection}, room)
room.users.Store(connectionId, connection)
}
type PlayerStateMessage struct {
Action string `json:"action"`
Url string `json:"url"`
QueueID string `json:"queueId"`
Time float32 `json:"time"`
State PlayerState `json:"state"`
}
type QueueStateMessage struct {
Action string `json:"action"`
Queue []store.MediaQueue `json:"queue"`
}
func (m *MediaManager) HandlePlayerState(connectionId string, userID string, state PlayerState, url string, time float32) {
if userID == "" {
log.Errorf("missing userID time %f on connection %s", time, connectionId)
return
}
roomState := m.getRoom(userID)
roomState.Time = time
roomState.Url = url
roomState.State = state
conns := []*Connection{}
roomState.users.Range(func(key string, conn *Connection) bool {
if conn.id != connectionId {
conns = append(conns, conn)
}
return true
})
if roomState.State == PLAYING && roomState.QueueID == "" {
m.storage.GetQueue(roomState.Url)
}
if roomState.QueueID != "" {
sendPlayerState(conns, roomState)
}
}
func (m *MediaManager) HandleGetQueue(connectionId string, userID string, channel string) {
var channelId string
if channel == "" {
channelId = userID
} else {
res, err := m.helixClient.GetUserByUsername(channel)
if err != nil {
return
}
channelId = res.ID
}
queueItems := m.storage.GetQueue(channelId)
connection, ok := m.connections.Load(connectionId)
if !ok {
return
}
sendQueueState([]*Connection{connection}, queueItems)
}
func (m *MediaManager) getRoom(channelId string) *Room {
room, ok := m.rooms.Load(channelId)
if ok {
return room
}
newRoom := newRoom()
m.rooms.Store(channelId, newRoom)
return newRoom
}
func (m *MediaManager) RegisterConnection(userID string, writeFunc func(message []byte)) string {
connectionId := uuid.NewString()
m.connections.Store(connectionId, &Connection{writer: writeFunc, id: connectionId})
return connectionId
}
func (m *MediaManager) DeregisterConnection(connectionId string) {
m.connections.Delete(connectionId)
}
func newRoom() *Room {
return &Room{
users: xsync.NewMapOf[*Connection](),
Url: "",
QueueID: "",
Time: 0,
State: PAUSED,
}
}
func sendPlayerState(connections []*Connection, room *Room) {
resultMessage, err := json.Marshal(newPlayerStateMessage(room))
if err != nil {
log.Error(err)
return
}
for _, conn := range connections {
conn.writer(resultMessage)
}
}
func sendQueueState(connections []*Connection, queue []store.MediaQueue) {
resultMessage, err := json.Marshal(newQueueStateMessage(queue))
if err != nil {
log.Error(err)
return
}
for _, conn := range connections {
conn.writer(resultMessage)
}
}
func newPlayerStateMessage(room *Room) PlayerStateMessage {
return PlayerStateMessage{
Action: "PLAYER_STATE",
Url: room.Url,
Time: room.Time,
State: room.State,
}
}
func newQueueStateMessage(queue []store.MediaQueue) QueueStateMessage {
return QueueStateMessage{
Action: "QUEUE_STATE",
Queue: queue,
}
}
|
package application
import (
"fmt"
"github.com/dolittle/platform-api/pkg/azure"
dolittleK8s "github.com/dolittle/platform-api/pkg/dolittle/k8s"
"github.com/dolittle/platform-api/pkg/platform"
"github.com/dolittle/platform-api/pkg/platform/application/k8s"
platformK8s "github.com/dolittle/platform-api/pkg/platform/k8s"
"github.com/dolittle/platform-api/pkg/platform/microservice/simple"
"github.com/dolittle/platform-api/pkg/platform/storage"
"github.com/sirupsen/logrus"
networkingv1 "k8s.io/api/networking/v1"
"k8s.io/client-go/kubernetes"
)
func CreateApplicationAndEnvironmentAndWelcomeMicroservice(
client kubernetes.Interface,
storageRepo storage.RepoMicroservice,
simpleRepo simple.Repo,
k8sDolittleRepo platformK8s.K8sPlatformRepo,
application storage.JSONApplication,
terraformCustomer platform.TerraformCustomer,
terraformApplication platform.TerraformApplication,
isProduction bool,
welcomeImage string,
logContext logrus.FieldLogger,
) error {
dockerconfigjson := k8s.MakeCustomerAcrDockerConfig(terraformCustomer)
azureGroupId := terraformApplication.GroupID
azureStorageAccountName := terraformCustomer.AzureStorageAccountName
azureStorageAccountKey := terraformCustomer.AzureStorageAccountKey
tenantInfo := dolittleK8s.Tenant{
Name: application.CustomerName,
ID: application.CustomerID,
}
applicationInfo := dolittleK8s.Application{
Name: application.Name,
ID: application.ID,
}
welcomeMicroservice := platform.HttpInputSimpleInfo{
MicroserviceBase: platform.MicroserviceBase{
Dolittle: platform.HttpInputDolittle{
ApplicationID: application.ID,
CustomerID: application.CustomerID,
MicroserviceID: "",
},
Name: "Welcome",
Kind: platform.MicroserviceKindSimple,
},
Extra: platform.HttpInputSimpleExtra{
Headimage: welcomeImage,
Runtimeimage: "none",
Ingress: platform.HttpInputSimpleIngress{
Path: "/welcome-to-dolittle",
Pathtype: string(networkingv1.PathTypePrefix),
},
Ispublic: true,
},
}
r := k8s.Resources{}
r.ServiceAccounts = k8s.NewServiceAccountsInfo(tenantInfo, applicationInfo)
r.Namespace = k8s.NewNamespace(tenantInfo, applicationInfo)
r.Acr = k8s.NewAcr(tenantInfo, applicationInfo, dockerconfigjson)
r.Storage = k8s.NewStorage(tenantInfo, applicationInfo, azureStorageAccountName, azureStorageAccountKey)
r.DeveloperRbac = k8s.NewDeveloperRbac(tenantInfo, applicationInfo, azureGroupId)
// Create rbac
// Create environments
for _, environment := range application.Environments {
shareName := azure.CreateBackupFileShareName(application.Name, environment.Name)
mongoSettings := k8s.MongoSettings{
ShareName: shareName,
CronJobSchedule: fmt.Sprintf("%d * * * *", GetRandomMinutes()),
VolumeSize: "8Gi",
}
environmentResource := k8s.NewEnvironment(environment.Name, tenantInfo, applicationInfo, mongoSettings, environment.CustomerTenants)
r.Environments = append(r.Environments, environmentResource)
}
// TODO figure out how to know if we are local dev
// Add Local dev bindings
if !isProduction {
r.LocalDevRoleBindingToDeveloper = k8s.NewLocalDevRoleBindingToDeveloper(tenantInfo, applicationInfo)
}
// Create
err := k8s.Do(client, r, k8sDolittleRepo)
if err != nil {
return err
}
// Create welcome microservice
namespace := r.Namespace.Name
for _, environment := range application.Environments {
customerTenants := environment.CustomerTenants
skipMicroserviceCreation := len(customerTenants) == 0
if skipMicroserviceCreation {
continue
}
microservice := welcomeMicroservice
microservice.Dolittle.MicroserviceID = customerTenants[0].MicroservicesRel[0].MicroserviceID
microservice.Environment = environment.Name
// TODO Would be nice to hoist this to the creation of the application, so this is semi immutable
err = storageRepo.SaveMicroservice(
microservice.Dolittle.CustomerID,
microservice.Dolittle.ApplicationID,
microservice.Environment,
microservice.Dolittle.MicroserviceID,
microservice,
)
if err != nil {
return err
}
err := simpleRepo.Create(namespace, tenantInfo, applicationInfo, customerTenants, microservice)
if err != nil {
return err
}
}
return nil
}
|
package rest
import (
"net/http"
"github.com/jrapoport/gothic/core"
"github.com/jrapoport/gothic/core/tokens"
"github.com/jrapoport/gothic/models/user"
"github.com/jrapoport/gothic/store"
"github.com/segmentio/encoding/json"
)
// Server represents an REST server.
type Server struct {
*core.Server
}
// NewServer creates a new REST Server.
func NewServer(s *core.Server) *Server {
return &Server{s}
}
// Clone returns a clone of the server.
func (s *Server) Clone() *Server {
return &Server{s.Server.Clone()}
}
// ValidateAdmin re-checks that a token belongs to an active admin user
func (s *Server) ValidateAdmin(r *http.Request) (user.Role, error) {
aid, err := GetUserID(r)
if err != nil {
return user.InvalidRole, err
}
role, err := s.API.ValidateAdmin(aid)
if err != nil {
return user.InvalidRole, err
}
return role, nil
}
// Response wraps an http JSONContent response. If v is an
// standard error or Error, it writes an Error instead.
func (s *Server) Response(w http.ResponseWriter, v interface{}) {
if v == nil {
s.ResponseCode(w, http.StatusOK, nil)
return
}
switch val := v.(type) {
case error:
s.ResponseCode(w, http.StatusInternalServerError, val)
return
case *tokens.BearerToken:
v = NewBearerResponse(val)
s.Debugf("returned bearer token: %v", v)
break
default:
break
}
b, err := json.Marshal(v)
if err != nil {
s.ResponseCode(w, http.StatusInternalServerError, err)
return
}
s.Debugf("response: %s", string(b))
w.Header().Set(ContentType, JSONContent)
// ResponseWriter.Write() calls w.WriteHeader(http.StatusOK)
if _, err = w.Write(b); err != nil {
s.ResponseCode(w, http.StatusInternalServerError, err)
return
}
}
// ResponseCode logs an error and the writes an sanitized standard response.
func (s *Server) ResponseCode(w http.ResponseWriter, code int, err error) {
if err != nil {
s.Error(err)
}
ResponseCode(w, code, err)
}
// ResponseError logs an error and the writes an sanitized standard response.
func (s *Server) ResponseError(w http.ResponseWriter, err error) {
code := http.StatusOK
if err != nil {
code = http.StatusInternalServerError
}
s.ResponseCode(w, code, err)
}
// AuthResponse will log the error but hide it so we don't leak information
func (s *Server) AuthResponse(w http.ResponseWriter, r *http.Request, tok string, v interface{}) {
UseCookie(w, r, tok, s.Config().Cookies.Duration)
s.Response(w, v)
}
// AuthError will log the error but hide it so we don't leak information
func (s *Server) AuthError(w http.ResponseWriter, err error) {
ClearCookie(w)
s.ResponseCode(w, http.StatusOK, err)
}
// PagedResponse will return a pages response.
func (s *Server) PagedResponse(w http.ResponseWriter, r *http.Request,
v interface{}, page *store.Pagination) {
PaginateResponse(w, r, page)
s.Response(w, v)
}
// ResponseCode writes a standard http response
func ResponseCode(w http.ResponseWriter, code int, err error) {
if code == http.StatusOK {
w.WriteHeader(http.StatusOK)
return
}
msg := http.StatusText(code)
if err != nil && err.Error() == "" {
msg = err.Error()
}
http.Error(w, msg, code)
}
|
// +build ignore
package main
import (
"context"
"encoding/hex"
"fmt"
"io/ioutil"
"math/big"
"os"
"os/exec"
"strings"
"github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethclient"
flag "github.com/spf13/pflag"
)
var (
chainID int64
privKey []byte
ganacheURL string
nonce uint64
calledLibraryTxHash string
)
func main() {
flag.Parse()
eth, err := ethclient.Dial(ganacheURL)
if nil != err {
panic(err)
}
defer eth.Close()
// query the address of calledLibrary
// fetch the receipt to decode the contract address
receipt, err := eth.TransactionReceipt(context.TODO(), common.HexToHash(calledLibraryTxHash))
if nil != err {
panic(err)
}
calledLibraryAddress := receipt.ContractAddress
// END query the address of calledLibrary
// generate the full caller code
callerCode, err := generateBinaryABI("CallExamples.sol", calledLibraryAddress.Hex()[2:])
if err != nil {
panic(err)
}
fmt.Printf("caller code: %x\n", callerCode)
// END generate the full caller code
gasPrice, err := eth.SuggestGasPrice(context.TODO())
if nil != err {
panic(err)
}
chainID := big.NewInt(chainID)
const gasLimit = 2000000
workingDir, err := ioutil.TempDir("", "mastering-eth-*")
if err != nil {
panic(err)
}
defer os.RemoveAll(workingDir)
store, err := newKeyStore(privKey, workingDir)
if err != nil {
panic(fmt.Sprintf("fail to new key store: %v", err))
}
sender := store.Accounts()[0]
// TODO: compare with NewTransaction
tx := types.NewContractCreation(nonce, toWei(0), gasLimit, gasPrice, callerCode)
if tx, err = store.SignTx(sender, tx, chainID); nil != err {
panic(err)
}
if err := eth.SendTransaction(context.TODO(), tx); nil != err {
panic(err)
}
fmt.Println("gasPrice =", gasPrice)
fmt.Println(" account =", sender.Address.Hex())
fmt.Println(" txHash =", tx.Hash().Hex())
}
func generateBinaryABI(contract, calledLibraryAddress string) ([]byte, error) {
workingDir, err := os.Getwd()
if err != nil {
return nil, fmt.Errorf("fail to get current working dir: %w", err)
}
cmdLine := fmt.Sprintf("docker run --rm -v %s/contracts:/contracts --workdir /contracts ethereum/solc:0.7.0 --libraries calledLibrary:%s --bin --optimize %s",
workingDir, calledLibraryAddress, contract)
cmdAndArgs := strings.Split(cmdLine, " ")
cmd := exec.Command(cmdAndArgs[0], cmdAndArgs[1:]...)
var stdout strings.Builder
cmd.Stdout, cmd.Stderr = &stdout, os.Stderr
if err := cmd.Run(); err != nil {
return nil, fmt.Errorf("fail to run cmd: %w", err)
}
raw := strings.Split(strings.TrimSpace(stdout.String()), "\n")
var codeHex string
for i, v := range raw {
if strings.Contains(v, "======= CallExamples.sol:caller =======") {
codeHex = raw[i+2]
break
}
}
//fmt.Println(raw[2])
out, err := hex.DecodeString(codeHex)
if err != nil {
return nil, fmt.Errorf("fail to decode ABI code: %w", err)
}
return out, nil
}
func init() {
flag.Int64VarP(&chainID, "chain", "c", 5777, "ID of chain bootstraped by Ganache")
flag.StringVarP(&ganacheURL, "ganache-url", "g", "http://127.0.0.1:7545", "receiver's address")
flag.BytesHexVarP(&privKey, "key", "k", nil, "sender's key")
flag.Uint64Var(&nonce, "nonce", 0, "nonce of tx")
flag.StringVar(&calledLibraryTxHash, "tx", "", "hash of tx deploying the calledLibrary")
}
func newKeyStore(key []byte, dir string) (*keystore.KeyStore, error) {
const passphrase = "hello-world"
privKey, err := crypto.ToECDSA(key)
if err != nil {
return nil, fmt.Errorf("fail to unmarshal private key: %w", err)
}
store := keystore.NewKeyStore(dir, keystore.StandardScryptN, keystore.StandardScryptP)
account, err := store.ImportECDSA(privKey, passphrase)
if err != nil {
return nil, fmt.Errorf("fail to import private key: %w", err)
}
if err := store.Unlock(account, passphrase); err != nil {
return nil, fmt.Errorf("fail to unlock account: %w", err)
}
return store, nil
}
func toWei(ethers float64) *big.Int {
// 1 ether = 10^18 wei
orders, _ := new(big.Float).SetString("1000000000000000000")
x := big.NewFloat(ethers)
x.Mul(x, orders)
wei, _ := x.Int(nil)
return wei
}
|
package additional
import (
"context"
"fmt"
"strings"
"github.com/openshift/oc-mirror/v2/pkg/api/v1alpha2"
"github.com/openshift/oc-mirror/v2/pkg/api/v1alpha3"
clog "github.com/openshift/oc-mirror/v2/pkg/log"
"github.com/openshift/oc-mirror/v2/pkg/manifest"
"github.com/openshift/oc-mirror/v2/pkg/mirror"
)
func NewWithLocalStorage(log clog.PluggableLoggerInterface,
config v1alpha2.ImageSetConfiguration,
opts mirror.CopyOptions,
mirror mirror.MirrorInterface,
manifest manifest.ManifestInterface,
localStorageFQDN string,
) CollectorInterface {
return &LocalStorageCollector{Log: log, Config: config, Opts: opts, Mirror: mirror, Manifest: manifest, LocalStorageFQDN: localStorageFQDN}
}
type LocalStorageCollector struct {
Log clog.PluggableLoggerInterface
Mirror mirror.MirrorInterface
Manifest manifest.ManifestInterface
Config v1alpha2.ImageSetConfiguration
Opts mirror.CopyOptions
LocalStorageFQDN string
}
// AdditionalImagesCollector - this looks into the additional images field
// taking into account the mode we are in (mirrorToDisk, diskToMirror)
// the image is downloaded in oci format
func (o *LocalStorageCollector) AdditionalImagesCollector(ctx context.Context) ([]v1alpha3.CopyImageSchema, error) {
var allImages []v1alpha3.CopyImageSchema
if o.Opts.Mode == mirrorToDisk {
for _, img := range o.Config.ImageSetConfigurationSpec.Mirror.AdditionalImages {
imgName := img.Name
src := ""
if !strings.Contains(src, "://") { // no transport was provided, assume docker://
src = dockerProtocol + imgName
} else {
transportAndRef := strings.Split(imgName, "://")
imgName = transportAndRef[1] // because we are reusing this to construct dest
}
dest := dockerProtocol + strings.Join([]string{o.LocalStorageFQDN, imgName}, "/")
o.Log.Debug("source %s", src)
o.Log.Debug("destination %s", dest)
allImages = append(allImages, v1alpha3.CopyImageSchema{Source: src, Destination: dest})
}
}
if o.Opts.Mode == diskToMirror {
for _, img := range o.Config.ImageSetConfigurationSpec.Mirror.AdditionalImages {
// TODO Make this more complete
// This logic will be useful for operators and releases
// strip the domain name from the img.Name
src := ""
dst := ""
if !strings.HasPrefix(img.Name, ociProtocol) {
domainAndPathComps := img.Name
// pathComponents := img.Name
// temporarily strip out the transport
transportAndRef := strings.Split(domainAndPathComps, "://")
if len(transportAndRef) > 1 {
domainAndPathComps = transportAndRef[1]
}
src = dockerProtocol + strings.Join([]string{o.LocalStorageFQDN, img.Name}, "/")
dst = strings.Join([]string{o.Opts.Destination, img.Name}, "/")
// the following is for having the destination without the initial domain name => later
// domainAndPathCompsArray := strings.Split(domainAndPathComps, "/")
// if len(domainAndPathCompsArray) > 2 {
// pathComponents = strings.Join(domainAndPathCompsArray[1:], "/")
// } else {
// return allImages, fmt.Errorf("unable to parse image %s correctly", img.Name)
// }
// src = dockerProtocol + strings.Join([]string{o.LocalStorageFQDN, pathComponents}, "/")
// dst = strings.Join([]string{o.Opts.Destination, pathComponents}, "/") // already has a transport protocol
} else {
src = img.Name
transportAndPath := strings.Split(img.Name, "://")
dst = dockerProtocol + strings.Join([]string{o.Opts.Destination, transportAndPath[1]}, "/")
}
if src == "" || dst == "" {
return allImages, fmt.Errorf("unable to determine src %s or dst %s for %s", src, dst, img.Name)
}
o.Log.Debug("source %s", src)
o.Log.Debug("destination %s", dst)
allImages = append(allImages, v1alpha3.CopyImageSchema{Source: src, Destination: dst})
}
}
return allImages, nil
}
// customImageParser - simple image string parser
|
/**
* Copyright (2021, ) Institute of Software, Chinese Academy of Sciences
*/
package kubesys
/**
* author: wuheng@iscas.ac.cn
* date : 2021/9/30
*/
type RuleBase struct {
KindToFullKindMapper map[string][]string
FullKindToApiPrefixMapper map[string]string
FullKindToNameMapper map[string]string
FullKindToNamespaceMapper map[string]bool
FullKindToVersionMapper map[string]string
FullKindToGroupMapper map[string]string
FullKindToVerbsMapper map[string]interface{}
}
|
package lru_store
import (
"fmt"
"strconv"
"testing"
"github.com/minotar/imgd/pkg/storage"
"github.com/minotar/imgd/pkg/storage/util/test_helpers"
test_store "github.com/minotar/imgd/pkg/storage/util/test_store"
)
func TestRetrieveMiss(t *testing.T) {
store, _ := NewLruStore(10)
v, err := store.Retrieve(test_helpers.RandString(32))
if v != nil {
t.Errorf("Retrieve Miss should return a nil value, not: %+v", v)
}
if err != storage.ErrNotFound {
t.Errorf("Retrieve Miss should return a storage.ErrNotFound, not: %s", err)
}
}
func TestInsertAndRetrieve(t *testing.T) {
store, _ := NewLruStore(10)
for i := 0; i < 10; i++ {
key := test_helpers.RandString(32)
store.Insert(key, []byte(strconv.Itoa(i)))
item, _ := store.Retrieve(key)
if string(item) != strconv.Itoa(i) {
t.Fail()
}
}
}
func TestInsertAndDelete(t *testing.T) {
store, _ := NewLruStore(10)
for i := 0; i < 10; i++ {
key := test_helpers.RandString(32)
store.Insert(key, []byte(strconv.Itoa(i)))
store.Remove(key)
_, err := store.Retrieve(key)
if err != storage.ErrNotFound {
t.Errorf("Key should have been removed: %s", key)
}
}
}
func TestHousekeeping(t *testing.T) {
store, _ := NewLruStore(5)
if len := store.Len(); len != 0 {
t.Errorf("Initialized store should be length 0, not %d", len)
}
for i := 0; i < 10; i++ {
key := test_helpers.RandString(32)
store.Insert(key, []byte("var"))
}
if len := store.Len(); len != 5 {
t.Errorf("Full store should be length 5, not %d", len)
}
store.Flush()
if len := store.Len(); len != 0 {
t.Errorf("Flushed store should be length 0, not %d", len)
}
}
func TestLruEviction(t *testing.T) {
store, _ := NewLruStore(5)
var keys [10]string
for i := 0; i < 10; i++ {
keys[i] = test_helpers.RandString(32)
}
// Fill 6 keys - should evict first key
for i := 0; i < 6; i++ {
store.Insert(keys[i], []byte("var"))
}
// Verify first key was evicted
if _, err := store.Retrieve(keys[0]); err != storage.ErrNotFound {
t.Errorf("First added key should have been evicted %s", keys[0])
}
// Verify second key is still present - and bump it's listing (as recently used)
if _, err := store.Retrieve(keys[1]); err == storage.ErrNotFound {
t.Errorf("Second key should not have been evicted %s", keys[0])
}
// Fill 3 more keys - should evict third/fourth/fifth keys - not second
for i := 6; i < 9; i++ {
fmt.Printf("Key id is %d\n", i)
store.Insert(keys[i], []byte("var"))
// Verify specific keys were evicted
if _, err := store.Retrieve(keys[i-4]); err != storage.ErrNotFound {
t.Errorf("keys[%d] should have been evicted when adding keys[%d]", i-4, i)
}
}
// Verify second key is still present
if _, err := store.Retrieve(keys[1]); err == storage.ErrNotFound {
t.Errorf("Second key should not have been evicted %s", keys[0])
}
}
var largeBucket = test_store.NewTestStoreBench()
func BenchmarkInsert(b *testing.B) {
store, _ := NewLruStore(b.N)
largeBucket.MinSize(b.N)
b.ResetTimer()
largeBucket.FillStore(store.Insert, b.N)
}
func BenchmarkLookup(b *testing.B) {
store, _ := NewLruStore(b.N)
// Set TestBucket and Store based on a static size (b.N should only affect loop)
largeBucket.MinSize(1000)
largeBucket.FillStore(store.Insert, 1000)
// Each operation we will read the same set of keys
iter := 10
if b.N < 10 {
iter = b.N
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
for k := 0; k < iter; k++ {
store.Retrieve(largeBucket.Keys[k])
}
}
}
|
package api_test
import (
"fmt"
"log"
"api"
"io"
"net/http"
"net/http/httptest"
"strings"
"testing"
)
var (
server *httptest.Server
reader io.Reader
animalsUrl string
)
func init() {
router := api.NewRouter()
log.Fatal( http.ListenAndServe( ":8080", router ) )
animalsUrl = fmt.Sprintf("%s/animals", router.Path)
}
func TestCreateAnimal( t *testing.T ) {
animalJson := `{ "name": "bear", "leg_count": 4, "life_span", 30, "is_endangered", true }`
reader = strings.NewReader(animalJson)
request, err := http.NewRequest("POST", animalsUrl, reader)
res, err := http.DefaultClient.Do(request)
if err != nil {
t.Error(err)
}
if res.StatusCode != 201 {
t.Errorf("Success expected: %d", res.StatusCode)
}
}
func TestListAnimals(t *testing.T) {
reader = strings.NewReader("")
request, err := http.NewRequest("GET", animalsUrl, reader)
res, err := http.DefaultClient.Do(request)
if err != nil {
t.Error(err)
}
if res.StatusCode != 200 {
t.Errorf("Success expected: %d", res.StatusCode)
}
}
|
package main
import (
"fmt"
"io/ioutil"
"os"
"testing"
)
const expectedWriteLocationsPackageJson = `{
"name": "write-locations",
"version": "1.0.0",
"main": "stage.js",
"scripts": {
"start": "node stage.js"
},
"dependencies": {
"express": "^4.16.2",
"morgan": "^1.9.0",
"prom-client": "^11.0.0",
"request": "^2.83.0",
"topological": "^1.0.39",
"cassandra-driver":"^3.3.0",
"topological-kafka":"^1.0.4"
}
}`
const expectedPackageJson = `{
"name": "predict-arrivals",
"version": "1.0.0",
"main": "stage.js",
"scripts": {
"start": "node stage.js"
},
"dependencies": {
"express": "^4.16.2",
"morgan": "^1.9.0",
"prom-client": "^11.0.0",
"request": "^2.83.0",
"topological": "^1.0.39",
"topological-kafka":"^1.0.4"
}
}`
const expectedImports = `const { Node, Topology } = require('topological'),
express = require('express'),
app = express(),
morgan = require('morgan'),
server = require('http').createServer(app),
promClient = require('prom-client'),
estimatedArrivalsConnectionClass = require('topological-kafka'),
locationsConnectionClass = require('topological-kafka'),
predictArrivalsProcessorClass = require('./processors/predictArrivals.js');`
const expectedConnectionsString = `let estimatedArrivalsConnection = new estimatedArrivalsConnectionClass({
"id": "estimatedArrivals",
"config": {"endpoint": process.env.KAFKA_ENDPOINT, "keyField": process.env.ESTIMATED_ARRIVALS_KEYFIELD, "topic": process.env.ESTIMATED_ARRIVALS_TOPIC}
});
let locationsConnection = new locationsConnectionClass({
"id": "locations",
"config": {"endpoint": process.env.KAFKA_ENDPOINT, "keyField": process.env.LOCATIONS_KEYFIELD, "topic": process.env.LOCATIONS_TOPIC}
});`
const expectedProcessorsString = `let predictArrivalsProcessor = new predictArrivalsProcessorClass({
"id": "predictArrivals",
"config": {}
});`
const expectedNodesString = `new Node({
id: 'predictArrivals',
inputs: [locationsConnection],
processor: predictArrivalsProcessor,
outputs: [estimatedArrivalsConnection]
})`
const expectedTopologyString = `let topology = new Topology({
id: 'topology',
nodes: [
new Node({
id: 'predictArrivals',
inputs: [locationsConnection],
processor: predictArrivalsProcessor,
outputs: [estimatedArrivalsConnection]
})
]
});
topology.start(err => {
if (err) {
topology.log.error("topology start failed with: " + err);
return process.exit(0);
}
});
`
const expectedStageJs = `const { Node, Topology } = require('topological'),
express = require('express'),
app = express(),
morgan = require('morgan'),
server = require('http').createServer(app),
promClient = require('prom-client'),
estimatedArrivalsConnectionClass = require('topological-kafka'),
locationsConnectionClass = require('topological-kafka'),
predictArrivalsProcessorClass = require('./processors/predictArrivals.js');
// CONNECTIONS =============================================================
let estimatedArrivalsConnection = new estimatedArrivalsConnectionClass({
"id": "estimatedArrivals",
"config": {"endpoint": process.env.KAFKA_ENDPOINT, "keyField": process.env.ESTIMATED_ARRIVALS_KEYFIELD, "topic": process.env.ESTIMATED_ARRIVALS_TOPIC}
});
let locationsConnection = new locationsConnectionClass({
"id": "locations",
"config": {"endpoint": process.env.KAFKA_ENDPOINT, "keyField": process.env.LOCATIONS_KEYFIELD, "topic": process.env.LOCATIONS_TOPIC}
});
// PROCESSORS ==============================================================
let predictArrivalsProcessor = new predictArrivalsProcessorClass({
"id": "predictArrivals",
"config": {}
});
// TOPOLOGY ================================================================
let topology = new Topology({
id: 'topology',
nodes: [
new Node({
id: 'predictArrivals',
inputs: [locationsConnection],
processor: predictArrivalsProcessor,
outputs: [estimatedArrivalsConnection]
})
]
});
topology.start(err => {
if (err) {
topology.log.error("topology start failed with: " + err);
return process.exit(0);
}
});
// METRICS ================================================================
app.get("/metrics", (req, res) => {
res.set("Content-Type", promClient.register.contentType);
res.end(promClient.register.metrics());
});
app.use(morgan("combined"));
server.listen(process.env.PORT);
topology.log.info("listening on port: " + process.env.PORT);
promClient.collectDefaultMetrics();
`
func TestFillPackageJson(t *testing.T) {
builder := NewBuilder("fixtures/topology.json", "fixtures/environment.json")
err := builder.Load()
if err != nil {
t.Errorf("builder failed to load: %s", err)
}
deploymentID := "write-locations"
deployment := builder.Environment.Deployments[deploymentID]
nodeJsBuilder := NodeJsPlatformBuilder{
DeploymentID: deploymentID,
Deployment: deployment,
Topology: builder.Topology,
Environment: builder.Environment,
}
packageJson := nodeJsBuilder.FillPackageJson()
if packageJson != expectedWriteLocationsPackageJson {
t.Errorf("package.json did not match:-->%s<-- vs. -->%s<-- did not complete successfully.", packageJson, expectedWriteLocationsPackageJson)
}
}
func TestFillImports(t *testing.T) {
builder := NewBuilder("fixtures/topology.json", "fixtures/environment.json")
err := builder.Load()
if err != nil {
t.Errorf("builder failed to load: %s", err)
}
deploymentID := "predict-arrivals"
deployment := builder.Environment.Deployments[deploymentID]
nodeJsBuilder := NodeJsPlatformBuilder{
DeploymentID: deploymentID,
Deployment: deployment,
Topology: builder.Topology,
Environment: builder.Environment,
}
importsString := nodeJsBuilder.FillImports()
if importsString != expectedImports {
t.Errorf("imports did not match:-->%s<-- vs. -->%s<-- did not complete successfully.", importsString, expectedImports)
}
}
func TestFillConnections(t *testing.T) {
builder := NewBuilder("fixtures/topology.json", "fixtures/environment.json")
err := builder.Load()
if err != nil {
t.Errorf("builder failed to load: %s", err)
}
deploymentID := "predict-arrivals"
deployment := builder.Environment.Deployments[deploymentID]
nodeJsBuilder := NodeJsPlatformBuilder{
DeploymentID: deploymentID,
Deployment: deployment,
Topology: builder.Topology,
Environment: builder.Environment,
}
connectionsString := nodeJsBuilder.FillConnections()
if connectionsString != expectedConnectionsString {
t.Errorf("connections did not match:-->%s<-- vs. -->%s<-- did not complete successfully.", connectionsString, expectedConnectionsString)
}
}
func TestFillProcessors(t *testing.T) {
builder := NewBuilder("fixtures/topology.json", "fixtures/environment.json")
err := builder.Load()
if err != nil {
t.Errorf("builder failed to load: %s", err)
}
deploymentID := "predict-arrivals"
deployment := builder.Environment.Deployments[deploymentID]
nodeJsBuilder := NodeJsPlatformBuilder{
DeploymentID: deploymentID,
Deployment: deployment,
Topology: builder.Topology,
Environment: builder.Environment,
}
processorsString := nodeJsBuilder.FillProcessors()
if processorsString != expectedProcessorsString {
t.Errorf("processors did not match:-->%s<-- vs. -->%s<-- did not complete successfully.", processorsString, expectedProcessorsString)
}
}
func TestFillNodes(t *testing.T) {
builder := NewBuilder("fixtures/topology.json", "fixtures/environment.json")
err := builder.Load()
if err != nil {
t.Errorf("builder failed to load: %s", err)
}
deploymentID := "predict-arrivals"
deployment := builder.Environment.Deployments[deploymentID]
nodeJsBuilder := NodeJsPlatformBuilder{
DeploymentID: deploymentID,
Deployment: deployment,
Topology: builder.Topology,
Environment: builder.Environment,
}
nodesString := nodeJsBuilder.FillNodes()
if nodesString != expectedNodesString {
t.Errorf("nodes did not match:-->%s<-- vs. -->%s<-- did not complete successfully.", nodesString, expectedNodesString)
}
}
func TestFillTopology(t *testing.T) {
builder := NewBuilder("fixtures/topology.json", "fixtures/environment.json")
err := builder.Load()
if err != nil {
t.Errorf("builder failed to load: %s", err)
}
deploymentID := "predict-arrivals"
deployment := builder.Environment.Deployments[deploymentID]
nodeJsBuilder := NodeJsPlatformBuilder{
DeploymentID: deploymentID,
Deployment: deployment,
Topology: builder.Topology,
Environment: builder.Environment,
}
topologyString := nodeJsBuilder.FillTopology()
if topologyString != expectedTopologyString {
t.Errorf("topology did not match:-->%s<-- vs. -->%s<-- did not complete successfully.", topologyString, expectedTopologyString)
}
}
func TestFillStage(t *testing.T) {
builder := NewBuilder("fixtures/topology.json", "fixtures/environment.json")
err := builder.Load()
if err != nil {
t.Errorf("builder failed to load: %s", err)
}
deploymentID := "predict-arrivals"
deployment := builder.Environment.Deployments[deploymentID]
nodeJsBuilder := NodeJsPlatformBuilder{
DeploymentID: deploymentID,
Deployment: deployment,
Topology: builder.Topology,
Environment: builder.Environment,
}
stageJsString := nodeJsBuilder.FillStage()
if stageJsString != expectedStageJs {
t.Errorf("stage.js did not match:-->%s<-- vs. -->%s<-- did not complete successfully.", stageJsString, expectedStageJs)
}
}
func TestBuild(t *testing.T) {
builder := NewBuilder("fixtures/topology.json", "fixtures/environment.json")
_, err := builder.LoadTopology()
if err != nil {
t.Errorf("LoadTopology did not complete successfully.")
}
_, err = builder.LoadEnvironment()
if err != nil {
t.Errorf("LoadEnvironment did not complete successfully.")
}
err = builder.Build()
if err != nil {
t.Errorf("Build did not complete successfully: %s", err)
}
expectedItems := []string{
"build",
"build/production",
"build/production/deploy-all",
"build/production/notify-arrivals",
"build/production/notify-arrivals/Dockerfile",
"build/production/notify-arrivals/devops",
"build/production/notify-arrivals/devops/Chart.yaml",
"build/production/notify-arrivals/devops/start-stage",
"build/production/notify-arrivals/devops/values.yaml",
"build/production/notify-arrivals/devops/templates/deployment.yaml",
"build/production/notify-arrivals/devops/templates/service.yaml",
"build/production/notify-arrivals",
"build/production/notify-arrivals/package.json",
"build/production/notify-arrivals/stage.js",
"build/production/notify-arrivals/processors/notifyArrivals.js",
"build/production/write-locations",
"build/production/write-locations/Dockerfile",
"build/production/write-locations/package.json",
"build/production/write-locations/stage.js",
"build/production/write-locations/processors/writeLocations.js",
"build/production/write-locations/devops",
"build/production/write-locations/devops/Chart.yaml",
"build/production/write-locations/devops/start-stage",
"build/production/write-locations/devops/values.yaml",
"build/production/write-locations/devops/templates/deployment.yaml",
"build/production/write-locations/devops/templates/service.yaml",
"build/production/predict-arrivals",
"build/production/predict-arrivals/Dockerfile",
"build/production/predict-arrivals/package.json",
"build/production/predict-arrivals/stage.js",
"build/production/predict-arrivals/processors/predictArrivals.js",
"build/production/predict-arrivals/devops",
"build/production/predict-arrivals/devops/Chart.yaml",
"build/production/predict-arrivals/devops/start-stage",
"build/production/predict-arrivals/devops/values.yaml",
"build/production/predict-arrivals/devops/templates/deployment.yaml",
"build/production/predict-arrivals/devops/templates/service.yaml",
}
for _, directory := range expectedItems {
if _, err := os.Stat(directory); os.IsNotExist(err) {
t.Errorf("Build did not created expected directory: %s", directory)
}
}
packageJsonBytes, err := ioutil.ReadFile("build/production/predict-arrivals/package.json")
if err != nil {
t.Errorf("Could not read package.json: %s", err)
}
// TODO: how does one really convert a []byte array to string
if fmt.Sprintf("%s", packageJsonBytes) != expectedPackageJson {
t.Errorf("package.json did not match:-->%s<-- vs. -->%s<-- did not complete successfully.", packageJsonBytes, expectedPackageJson)
}
stageJsBytes, err := ioutil.ReadFile("build/production/predict-arrivals/stage.js")
if err != nil {
t.Errorf("Could not read stage.js: %s", err)
}
// TODO: how does one really convert a []byte array to string
if fmt.Sprintf("%s", stageJsBytes) != expectedStageJs {
t.Errorf("stage.js did not match:-->%s<-- vs. -->%s<-- did not complete successfully.", stageJsBytes, expectedStageJs)
}
}
|
package test
import (
"bytes"
"crypto/rand"
"fmt"
"os"
"testing"
"github.com/stretchr/testify/assert"
"github.com/zhaohaijun/matrixchain/common/log"
"github.com/zhaohaijun/matrixchain/common/serialization"
"github.com/zhaohaijun/matrixchain/core/types"
. "github.com/zhaohaijun/matrixchain/smartcontract"
neovm2 "github.com/zhaohaijun/matrixchain/smartcontract/service/neovm"
"github.com/zhaohaijun/matrixchain/vm/neovm"
)
func TestRandomCodeCrash(t *testing.T) {
log.InitLog(4)
defer func() {
os.RemoveAll("Log")
}()
config := &Config{
Time: 10,
Height: 10,
Tx: &types.Transaction{},
}
var code []byte
defer func() {
if err := recover(); err != nil {
fmt.Printf("code %x \n", code)
}
}()
for i := 1; i < 10; i++ {
fmt.Printf("test round:%d \n", i)
code := make([]byte, i)
for j := 0; j < 10; j++ {
rand.Read(code)
//cache := storage.NewCloneCache(testBatch)
sc := SmartContract{
Config: config,
Gas: 10000,
CacheDB: nil,
}
engine, _ := sc.NewExecuteEngine(code)
engine.Invoke()
}
}
}
func TestOpCodeDUP(t *testing.T) {
log.InitLog(4)
defer func() {
os.RemoveAll("Log")
}()
config := &Config{
Time: 10,
Height: 10,
Tx: &types.Transaction{},
}
var code = []byte{byte(neovm.DUP)}
sc := SmartContract{
Config: config,
Gas: 10000,
CacheDB: nil,
}
engine, _ := sc.NewExecuteEngine(code)
_, err := engine.Invoke()
assert.NotNil(t, err)
}
func TestOpReadMemAttack(t *testing.T) {
log.InitLog(4)
defer func() {
os.RemoveAll("Log")
}()
config := &Config{
Time: 10,
Height: 10,
Tx: &types.Transaction{},
}
bf := new(bytes.Buffer)
builder := neovm.NewParamsBuilder(bf)
builder.Emit(neovm.SYSCALL)
bs := bytes.NewBuffer(builder.ToArray())
builder.EmitPushByteArray([]byte(neovm2.NATIVE_INVOKE_NAME))
l := 0X7fffffc7 - 1
serialization.WriteVarUint(bs, uint64(l))
b := make([]byte, 4)
bs.Write(b)
sc := SmartContract{
Config: config,
Gas: 100000,
CacheDB: nil,
}
engine, _ := sc.NewExecuteEngine(bs.Bytes())
_, err := engine.Invoke()
assert.NotNil(t, err)
}
|
package gowhere
import (
"bytes"
"testing"
)
func TestParseRules(t *testing.T) {
data := []byte("redirect 301 /project/def/new_page.html /project/def/other_page.html")
input := bytes.NewReader(data)
rs, err := ParseRules(input)
if err != nil {
t.Errorf("got error: %v", err)
}
if len(rs.rules) != 1 {
t.Errorf("got %d rules expected 1", len(rs.rules))
}
r := rs.rules[0]
if r.Directive != "redirect" {
t.Errorf("got directive %s expected redirect", r.Directive)
}
if r.Pattern != "/project/def/new_page.html" {
t.Errorf("got pattern %s expected /project/def/new_page.html", r.Pattern)
}
if r.Target != "/project/def/other_page.html" {
t.Errorf("got target %s expected /project/def/other_page.html", r.Target)
}
}
func TestParseRulesIgnoreComments(t *testing.T) {
data := []byte("#redirect 301 /project/def/new_page.html /project/def/other_page.html")
input := bytes.NewReader(data)
rs, err := ParseRules(input)
if err != nil {
t.Errorf("got error: %v", err)
}
if len(rs.rules) != 0 {
t.Errorf("got %d rules expected 0", len(rs.rules))
}
}
func TestParseRulesIgnoreBlankLines(t *testing.T) {
data := []byte("\nredirect 301 /pattern /target\n")
input := bytes.NewReader(data)
rs, err := ParseRules(input)
if err != nil {
t.Errorf("got error: %v", err)
}
if len(rs.rules) != 1 {
t.Errorf("got %d rules expected 0", len(rs.rules))
}
}
|
package factory
import (
"fmt"
"github.com/RackHD/ipam/interfaces"
)
// factory is a storage map for resource creator functions registered via init.
var factory = make(map[string]interfaces.ResourceCreator)
// Register associates the resource identifier with a resource creator function.
func Register(resource string, creator interfaces.ResourceCreator) {
factory[resource] = creator
}
// Request finds a resource creator function by the resource identifier and calls
// the creator returning the result. The resulting resource may be a default version
// if the reqeusted version is not present.
func Request(resource string, version string) (interfaces.Resource, error) {
if creator, ok := factory[resource]; ok {
return creator(version)
}
return nil, fmt.Errorf("Request: Unable to locate resource %s.", resource)
}
// Require finds a resource creator function by the resource identifier and verifies
// the created resource matches the requested version. If not, an error will be
// returned.
func Require(resource string, version string) (interfaces.Resource, error) {
provided, err := Request(resource, version)
if err != nil {
return nil, err
}
if provided.Version() != version {
return nil, fmt.Errorf("Require: Unable to locate resource %s, version %s.", resource, version)
}
return provided, nil
}
|
package chat
import (
"errors"
"sync"
"time"
"github.com/microcosm-cc/bluemonday"
blackfriday "gopkg.in/russross/blackfriday.v2"
)
type Chat struct {
mutex sync.RWMutex
store Store
rooms map[string]*room
}
func New(s Store) *Chat {
return &Chat{
store: s,
rooms: map[string]*room{},
}
}
func (c *Chat) NewClient(room string, conn Connection) {
c.mutex.Lock()
defer c.mutex.Unlock()
r, exists := c.rooms[room]
if !exists {
r = newRoom(room)
c.rooms[room] = r
}
r.newClient(conn)
go r.broadcastClientsCount()
go func() {
msgs, err := c.store.GetMessages(room)
if err != nil {
return
}
if len(msgs) > 0 {
conn.SendMessages(msgs)
}
}()
}
var ErrRoomDoesNotExists = errors.New("room does not exists")
func (c *Chat) RemoveClient(room string, conn Connection) error {
c.mutex.Lock()
defer c.mutex.Unlock()
r, exists := c.rooms[room]
if !exists {
return ErrRoomDoesNotExists
}
r.removeClient(conn)
if r.clientsCount() == 0 {
delete(c.rooms, room)
} else {
go r.broadcastClientsCount()
}
return nil
}
func (c *Chat) NewMessage(room string, from Connection, text string) error {
c.mutex.RLock()
defer c.mutex.RUnlock()
r, exists := c.rooms[room]
if !exists {
return ErrRoomDoesNotExists
}
id, err := c.store.GetMessageID(room)
if err != nil {
return errors.New("failed to get message ID from store: " + err.Error())
}
unsafe := blackfriday.Run([]byte(text))
html := bluemonday.UGCPolicy().SanitizeBytes(unsafe)
m := Message{
ID: id,
Time: time.Now().UTC().Format(
"Mon Jan 02 2006 15:04:05 GMT-0700 (MST)"),
HTML: string(html),
}
err = c.store.AddMessage(room, m)
if err != nil {
return errors.New("failed to add message to store: " + err.Error())
}
go r.broadcast(from, m)
return nil
}
|
package main
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"strings"
"github.com/cello-proj/cello/internal/requests"
"github.com/cello-proj/cello/internal/responses"
"github.com/cello-proj/cello/internal/types"
"github.com/cello-proj/cello/service/internal/credentials"
"github.com/cello-proj/cello/service/internal/db"
"github.com/cello-proj/cello/service/internal/env"
"github.com/cello-proj/cello/service/internal/git"
"github.com/cello-proj/cello/service/internal/workflow"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/gorilla/mux"
upper "github.com/upper/db/v4"
"gopkg.in/yaml.v2"
)
const (
numOfTokensLimit = 2
)
// Represents a JWT token.
type token struct {
Token string `json:"token"`
}
// Represents an error response.
type errorResponse struct {
ErrorMessage string `json:"error_message"`
}
// Generates error response JSON.
func generateErrorResponseJSON(message string) string {
er := errorResponse{
ErrorMessage: message,
}
// TODO swallowing error since this is only internally ever passed message
jsonData, _ := json.Marshal(er)
return string(jsonData)
}
// HTTP handler
type handler struct {
logger log.Logger
newCredentialsProvider func(a credentials.Authorization, env env.Vars, h http.Header, vaultConfig credentials.VaultConfigFn, fn credentials.VaultSvcFn) (credentials.Provider, error)
argo workflow.Workflow
argoCtx context.Context
config *Config
gitClient git.Client
env env.Vars
dbClient db.Client
}
// Service HealthCheck
func (h *handler) healthCheck(w http.ResponseWriter, r *http.Request) {
vaultEndpoint := fmt.Sprintf("%s/v1/sys/health", h.env.VaultAddress)
l := h.requestLogger(r, "op", "health-check", "vault-endpoint", vaultEndpoint)
// #nosec
response, err := http.Get(vaultEndpoint)
if err != nil {
level.Error(l).Log("message", "received error connecting to vault", "error", err)
w.WriteHeader(http.StatusServiceUnavailable)
fmt.Fprintln(w, "Health check failed")
return
}
// We don't care about the body but need to read it all and close it
// regardless.
// https://golang.org/pkg/net/http/#Client.Do
defer response.Body.Close()
_, err = io.ReadAll(response.Body)
if err != nil {
level.Warn(l).Log("message", "unable to read vault body; continuing", "error", err)
// Continue on and handle the actual response code from Vault accordingly.
}
if response.StatusCode != 200 && response.StatusCode != 429 {
level.Error(l).Log("message", fmt.Sprintf("received code %d which is not 200 (initialized, unsealed, and active) or 429 (unsealed and standby) when connecting to vault", response.StatusCode))
w.WriteHeader(http.StatusServiceUnavailable)
fmt.Fprintln(w, "Health check failed")
return
}
if err = h.dbClient.Health(r.Context()); err != nil {
level.Error(l).Log("message", fmt.Sprintf("received code error %s when connecting to database", err.Error()))
w.WriteHeader(http.StatusServiceUnavailable)
fmt.Fprintln(w, "Health check failed")
return
}
w.Header().Set("Content-Type", "text/plain")
fmt.Fprintln(w, "Health check succeeded")
}
// Lists workflows
func (h handler) listWorkflows(w http.ResponseWriter, r *http.Request) {
// TODO authenticate user can list this workflow once auth figured out
// TODO fail if project / target does not exist or are not valid format
vars := mux.Vars(r)
projectName := vars["projectName"]
targetName := vars["targetName"]
l := h.requestLogger(r, "op", "list-workflows", "project", projectName, "target", targetName)
level.Debug(l).Log("message", "listing workflows")
workflowList, err := h.argo.ListStatus(h.argoCtx)
if err != nil {
level.Error(l).Log("message", "error listing workflows", "error", err)
h.errorResponse(w, "error listing workflows", http.StatusInternalServerError)
return
}
// Only return workflows the target project / target
workflows := make([]workflow.Status, 0)
prefix := fmt.Sprintf("%s-%s", projectName, targetName)
for _, wf := range workflowList {
if strings.HasPrefix(wf.Name, prefix) {
workflows = append(workflows, wf)
}
}
jsonData, err := json.Marshal(workflows)
if err != nil {
level.Error(l).Log("message", "error serializing workflow IDs", "error", err)
h.errorResponse(w, "error serializing workflow IDs", http.StatusInternalServerError)
return
}
fmt.Fprintln(w, string(jsonData))
}
// Creates workflow init params by pulling manifest from given git repo, commit sha, and code path
func (h handler) loadCreateWorkflowRequestFromGit(repository, commitHash, path string) (requests.CreateWorkflow, error) {
level.Debug(h.logger).Log("message", fmt.Sprintf("retrieving manifest from repository %s at sha %s with path %s", repository, commitHash, path))
fileContents, err := h.gitClient.GetManifestFile(repository, commitHash, path)
if err != nil {
return requests.CreateWorkflow{}, err
}
var cwr requests.CreateWorkflow
err = yaml.Unmarshal(fileContents, &cwr)
return cwr, err
}
func (h handler) createWorkflowFromGit(w http.ResponseWriter, r *http.Request) {
l := h.requestLogger(r, "op", "create-workflow-from-git")
ctx := r.Context()
level.Debug(l).Log("message", "validating authorization header for create workflow from git")
ah := r.Header.Get("Authorization")
a, err := credentials.NewAuthorization(ah)
if err != nil {
h.errorResponse(w, "error unauthorized, invalid authorization header format", http.StatusUnauthorized)
return
}
// TODO we need to ensure this _isn't an admin...
if err := a.Validate(); err != nil {
h.errorResponse(w, "error unauthorized, invalid authorization header", http.StatusUnauthorized)
return
}
level.Debug(l).Log("message", "reading request body")
reqBody, err := io.ReadAll(r.Body)
if err != nil {
level.Error(l).Log("message", "error reading request data", "error", err)
h.errorResponse(w, "error reading request data", http.StatusInternalServerError)
return
}
var cgwr requests.CreateGitWorkflow
err = json.Unmarshal(reqBody, &cgwr)
if err != nil {
level.Error(l).Log("message", "error deserializing request body", "error", err)
h.errorResponse(w, "error deserializing request body", http.StatusBadRequest)
return
}
if err := cgwr.Validate(); err != nil {
level.Error(l).Log("message", "error validating request", "error", err)
h.errorResponse(w, fmt.Sprintf("invalid request, %s", err), http.StatusBadRequest)
return
}
vars := mux.Vars(r)
projectName := vars["projectName"]
projectEntry, err := h.dbClient.ReadProjectEntry(ctx, projectName)
if err != nil {
level.Error(l).Log("message", "error reading project data", "error", err)
h.errorResponse(w, "error reading project data", http.StatusInternalServerError)
return
}
cwr, err := h.loadCreateWorkflowRequestFromGit(projectEntry.Repository, cgwr.CommitHash, cgwr.Path)
if err != nil {
level.Error(l).Log("message", "error loading workflow data from git", "error", err)
h.errorResponse(w, "error loading workflow data from git", http.StatusInternalServerError)
return
}
log.With(l, "project", cwr.ProjectName, "target", cwr.TargetName, "framework", cwr.Framework, "type", cwr.Type, "workflow-template", cwr.WorkflowTemplateName)
level.Debug(l).Log("message", "creating workflow")
h.createWorkflowFromRequest(ctx, w, r, a, cwr, l)
}
// Creates a workflow
func (h handler) createWorkflow(w http.ResponseWriter, r *http.Request) {
l := h.requestLogger(r, "op", "create-workflow")
ctx := r.Context()
level.Debug(l).Log("message", "validating authorization header for create workflow")
ah := r.Header.Get("Authorization")
a, err := credentials.NewAuthorization(ah)
if err != nil {
h.errorResponse(w, "error unauthorized, invalid authorization header format", http.StatusUnauthorized)
return
}
if err := a.Validate(); err != nil {
h.errorResponse(w, "error unauthorized, invalid authorization header", http.StatusUnauthorized)
return
}
level.Debug(l).Log("message", "reading request body")
var cwr requests.CreateWorkflow
reqBody, err := io.ReadAll(r.Body)
if err != nil {
level.Error(l).Log("message", "error reading workflow request data", "error", err)
h.errorResponse(w, "error reading workflow request data", http.StatusInternalServerError)
return
}
if err := json.Unmarshal(reqBody, &cwr); err != nil {
level.Error(l).Log("message", "error deserializing workflow data", "error", err)
h.errorResponse(w, "error deserializing workflow data", http.StatusBadRequest)
return
}
log.With(l, "project", cwr.ProjectName, "target", cwr.TargetName, "framework", cwr.Framework, "type", cwr.Type, "workflow-template", cwr.WorkflowTemplateName)
level.Debug(l).Log("message", "creating workflow")
h.createWorkflowFromRequest(ctx, w, r, a, cwr, l)
}
// Creates a workflow
// Context is not currently used as Argo has its own and Vault doesn't
// currently support it.
func (h handler) createWorkflowFromRequest(_ context.Context, w http.ResponseWriter, r *http.Request, a *credentials.Authorization, cwr requests.CreateWorkflow, l log.Logger) {
types, err := h.config.listTypes(cwr.Framework)
if err != nil {
level.Error(l).Log("message", "error invalid framework", "error", err)
h.errorResponse(
w,
fmt.Sprintf("invalid request, framework must be one of '%s'", strings.Join(h.config.listFrameworks(), " ")),
http.StatusBadRequest,
)
return
}
level.Debug(l).Log("message", "validating workflow parameters")
if err := cwr.Validate(
cwr.ValidateType(types),
); err != nil {
level.Error(l).Log("message", "error validating request", "error", err)
h.errorResponse(w, fmt.Sprintf("error invalid request, %s", err), http.StatusBadRequest)
return
}
workflowFrom := fmt.Sprintf("workflowtemplate/%s", cwr.WorkflowTemplateName)
executeContainerImageURI := cwr.Parameters["execute_container_image_uri"]
environmentVariablesString := generateEnvVariablesString(cwr.EnvironmentVariables)
level.Debug(l).Log("message", "generating command to execute")
commandDefinition, err := h.config.getCommandDefinition(cwr.Framework, cwr.Type)
if err != nil {
level.Error(l).Log("message", "unable to get command definition", "error", err)
h.errorResponse(w, "unable to retrieve command definition", http.StatusInternalServerError)
return
}
executeCommand, err := generateExecuteCommand(commandDefinition, environmentVariablesString, cwr.Arguments)
if err != nil {
level.Error(l).Log("message", "unable to generate command", "error", err)
h.errorResponse(w, "unable to generate command", http.StatusInternalServerError)
return
}
level.Debug(l).Log("message", "creating new credentials provider")
cp, err := h.newCredentialsProvider(*a, h.env, r.Header, credentials.NewVaultConfig, credentials.NewVaultSvc)
if err != nil {
level.Error(l).Log("message", "bad or unknown credentials provider", "error", err)
h.errorResponse(w, "bad or unknown credentials provider", http.StatusInternalServerError)
return
}
level.Debug(l).Log("message", "getting credentials provider token")
credentialsToken, err := cp.GetToken()
if err != nil {
level.Error(l).Log("message", "error getting credentials provider token", "error", err)
h.errorResponse(w, "error retrieving credentials provider token", http.StatusInternalServerError)
return
}
projectExists, err := cp.ProjectExists(cwr.ProjectName)
if err != nil {
level.Error(l).Log("message", "error checking project", "error", err)
h.errorResponse(w, "error checking project", http.StatusInternalServerError)
return
}
if !projectExists {
level.Error(l).Log("message", "project does not exist", "error", err)
h.errorResponse(w, "project does not exist", http.StatusBadRequest)
return
}
targetExists, err := cp.TargetExists(cwr.ProjectName, cwr.TargetName)
if err != nil {
level.Error(l).Log("message", "error retrieving target", "error", err)
h.errorResponse(w, "error retrieving target", http.StatusInternalServerError)
return
}
if !targetExists {
level.Error(l).Log("message", "target not found")
h.errorResponse(w, "target not found", http.StatusBadRequest)
return
}
level.Debug(l).Log("message", "creating workflow parameters")
parameters := workflow.NewParameters(environmentVariablesString, executeCommand, executeContainerImageURI, cwr.TargetName, cwr.ProjectName, cwr.Parameters, credentialsToken)
workflowLabels := map[string]string{txIDHeader: r.Header.Get(txIDHeader)}
level.Debug(l).Log("message", "creating workflow")
workflowName, err := h.argo.Submit(h.argoCtx, workflowFrom, parameters, workflowLabels)
if err != nil {
level.Error(l).Log("message", "error creating workflow", "error", err)
h.errorResponse(w, "error creating workflow", http.StatusInternalServerError)
return
}
l = log.With(l, "workflow", workflowName)
level.Debug(l).Log("message", "workflow created")
tokenHead := credentialsToken[0:8]
level.Info(l).Log("message", fmt.Sprintf("Received token '%s...'", tokenHead))
var cwresp workflow.CreateWorkflowResponse
cwresp.WorkflowName = workflowName
jsonData, err := json.Marshal(cwresp)
if err != nil {
level.Error(l).Log("message", "error serializing workflow response", "error", err)
h.errorResponse(w, "error serializing workflow response", http.StatusInternalServerError)
return
}
fmt.Fprintln(w, string(jsonData))
}
// Gets a workflow
func (h handler) getWorkflow(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
workflowName := vars["workflowName"]
l := h.requestLogger(r, "op", "get-workflow", "workflow", workflowName)
level.Debug(l).Log("message", "getting workflow status")
status, err := h.argo.Status(h.argoCtx, workflowName)
if err != nil {
if strings.Contains(err.Error(), "code = NotFound") {
level.Error(l).Log("message", "error getting workflow", "error", err)
h.errorResponse(w, "workflow not found", http.StatusNotFound)
} else {
level.Error(l).Log("message", "error getting workflow", "error", err)
h.errorResponse(w, "error getting workflow", http.StatusInternalServerError)
}
return
}
level.Debug(l).Log("message", "decoding get workflow response")
jsonData, err := json.Marshal(status)
if err != nil {
level.Error(l).Log("message", "error serializing workflow", "error", err)
h.errorResponse(w, "error serializing workflow", http.StatusInternalServerError)
return
}
fmt.Fprint(w, string(jsonData))
}
// Gets a target
func (h handler) getTarget(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
projectName := vars["projectName"]
targetName := vars["targetName"]
l := h.requestLogger(r, "op", "get-target", "project", projectName, "target", targetName)
level.Debug(l).Log("message", "validating authorization header for get target")
ah := r.Header.Get("Authorization")
a, err := credentials.NewAuthorization(ah)
if err != nil {
h.errorResponse(w, "error unauthorized, invalid authorization header format", http.StatusUnauthorized)
return
}
if err := a.Validate(a.ValidateAuthorizedAdmin(h.env.AdminSecret)); err != nil {
h.errorResponse(w, "error unauthorized, invalid authorization header", http.StatusUnauthorized)
return
}
level.Debug(l).Log("message", "creating credential provider")
cp, err := h.newCredentialsProvider(*a, h.env, r.Header, credentials.NewVaultConfig, credentials.NewVaultSvc)
if err != nil {
level.Error(l).Log("message", "error creating credentials provider", "error", err)
h.errorResponse(w, "error creating credentials provider", http.StatusInternalServerError)
return
}
targetExists, err := cp.TargetExists(projectName, targetName)
if err != nil {
level.Error(l).Log("message", "error retrieving target", "error", err)
h.errorResponse(w, "error retrieving target", http.StatusInternalServerError)
return
}
if !targetExists {
level.Error(l).Log("message", "target not found")
h.errorResponse(w, "target not found", http.StatusNotFound)
return
}
level.Debug(l).Log("message", "getting target information")
targetInfo, err := cp.GetTarget(projectName, targetName)
if err != nil {
level.Error(l).Log("message", "error retrieving target information", "error", err)
h.errorResponse(w, "error retrieving target information", http.StatusInternalServerError)
return
}
jsonResult, err := json.Marshal(targetInfo)
if err != nil {
level.Error(l).Log("message", "error serializing json target data", "error", err)
h.errorResponse(w, "error serializing json target data", http.StatusInternalServerError)
return
}
fmt.Fprint(w, string(jsonResult))
}
// Returns the logs for a workflow
func (h handler) getWorkflowLogs(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
workflowName := vars["workflowName"]
l := h.requestLogger(r, "op", "get-workflow-logs", "workflow", workflowName)
level.Debug(l).Log("message", "retrieving workflow logs")
argoWorkflowLogs, err := h.argo.Logs(h.argoCtx, workflowName)
if err != nil {
level.Error(l).Log("message", "error getting workflow logs", "error", err)
h.errorResponse(w, "error getting workflow logs", http.StatusInternalServerError)
return
}
jsonData, err := json.Marshal(argoWorkflowLogs)
if err != nil {
level.Error(l).Log("message", "error serializing workflow logs", "error", err)
h.errorResponse(w, "error serializing workflow logs", http.StatusInternalServerError)
return
}
fmt.Fprintln(w, string(jsonData))
}
// Streams workflow logs
func (h handler) getWorkflowLogStream(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/plain")
w.Header().Set("X-Accel-Buffering", "no")
vars := mux.Vars(r)
workflowName := vars["workflowName"]
l := h.requestLogger(r, "op", "get-workflow-log-stream", "workflow", workflowName)
level.Debug(l).Log("message", "retrieving workflow logs", "workflow", workflowName)
err := h.argo.LogStream(h.argoCtx, workflowName, w)
if err != nil {
level.Error(l).Log("message", "error getting workflow logstream", "error", err)
h.errorResponse(w, "error getting workflow logs", http.StatusInternalServerError)
return
}
}
// Returns a new Cello token
func newCelloToken(provider string, tok types.Token) *token {
return &token{
Token: fmt.Sprintf("%s:%s:%s", provider, tok.RoleID, tok.Secret),
}
}
// projectExists checks if a project exists using both the credential provider and database
func (h handler) projectExists(ctx context.Context, l log.Logger, cp credentials.Provider, w http.ResponseWriter, projectName string) (bool, error) {
// Checking credential provider
level.Debug(l).Log("message", "checking if project exists")
projectExists, err := cp.ProjectExists(projectName)
if err != nil {
level.Error(l).Log("message", "error checking credentials provider for project", "error", err)
h.errorResponse(w, "error retrieving project", http.StatusInternalServerError)
return false, err
}
if !projectExists {
level.Debug(l).Log("message", "project does not exist in credentials provider")
h.errorResponse(w, "project does not exist", http.StatusNotFound)
return false, err
}
// Checking database
_, err = h.dbClient.ReadProjectEntry(ctx, projectName)
if err != nil {
level.Error(l).Log("message", "error retrieving project from database", "error", err)
if errors.Is(err, upper.ErrNoMoreRows) {
h.errorResponse(w, "project does not exist", http.StatusNotFound)
} else {
h.errorResponse(w, "error retrieving project", http.StatusInternalServerError)
}
return false, err
}
return true, err
}
// Creates a project
func (h handler) createProject(w http.ResponseWriter, r *http.Request) {
l := h.requestLogger(r, "op", "create-project")
level.Debug(l).Log("message", "validating authorization header for create project")
ah := r.Header.Get("Authorization")
a, err := credentials.NewAuthorization(ah)
if err != nil {
h.errorResponse(w, "error unauthorized, invalid authorization header format", http.StatusUnauthorized)
return
}
if err := a.Validate(a.ValidateAuthorizedAdmin(h.env.AdminSecret)); err != nil {
h.errorResponse(w, "error unauthorized, invalid authorization header", http.StatusUnauthorized)
return
}
ctx := r.Context()
var capp requests.CreateProject
reqBody, err := io.ReadAll(r.Body)
if err != nil {
level.Error(l).Log("message", "error reading request body", "error", err)
h.errorResponse(w, "error reading request body", http.StatusInternalServerError)
return
}
if err := json.Unmarshal(reqBody, &capp); err != nil {
level.Error(l).Log("message", "error decoding request", "error", err)
h.errorResponse(w, "error decoding request", http.StatusBadRequest)
return
}
if err := capp.Validate(); err != nil {
level.Error(l).Log("message", "error invalid request", "error", err)
h.errorResponse(w, fmt.Sprintf("invalid request, %s", err.Error()), http.StatusBadRequest)
return
}
l = log.With(l, "project", capp.Name)
level.Debug(l).Log("message", "creating credential provider")
cp, err := h.newCredentialsProvider(*a, h.env, r.Header, credentials.NewVaultConfig, credentials.NewVaultSvc)
if err != nil {
level.Error(l).Log("message", "error creating credentials provider", "error", err)
h.errorResponse(w, "error creating credentials provider", http.StatusInternalServerError)
return
}
projectExists, err := cp.ProjectExists(capp.Name)
if err != nil {
level.Error(l).Log("message", "error checking project", "error", err)
h.errorResponse(w, "error checking project", http.StatusInternalServerError)
return
}
if projectExists {
level.Error(l).Log("error", "project already exists")
h.errorResponse(w, "project already exists", http.StatusBadRequest)
return
}
level.Debug(l).Log("message", "inserting into db")
err = h.dbClient.CreateProjectEntry(ctx, db.ProjectEntry{
ProjectID: capp.Name,
Repository: capp.Repository,
})
if err != nil {
level.Error(l).Log("message", "error inserting project to db", "error", err)
h.errorResponse(w, "error creating project", http.StatusInternalServerError)
return
}
level.Debug(l).Log("message", "creating project")
token, err := cp.CreateProject(capp.Name)
if err != nil {
level.Error(l).Log("message", "error creating project", "error", err)
h.errorResponse(w, "error creating project", http.StatusInternalServerError)
return
}
level.Debug(l).Log("message", "inserting token into DB")
err = h.dbClient.CreateTokenEntry(ctx, token)
if err != nil {
level.Error(l).Log("message", "error inserting token into DB", "error", err)
h.errorResponse(w, "error creating token", http.StatusInternalServerError)
return
}
level.Debug(l).Log("message", "retrieving Cello token")
celloToken := newCelloToken("vault", token)
resp := responses.CreateProject{
Token: celloToken.Token,
TokenID: token.ProjectToken.ID,
}
if err := json.NewEncoder(w).Encode(resp); err != nil {
level.Error(l).Log("message", "error serializing token", "error", err)
h.errorResponse(w, "error serializing token", http.StatusInternalServerError)
return
}
}
// Get a project
func (h handler) getProject(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
projectName := vars["projectName"]
l := h.requestLogger(r, "op", "get-project", "project", projectName)
level.Debug(l).Log("message", "validating authorization header for get project")
ah := r.Header.Get("Authorization")
a, err := credentials.NewAuthorization(ah)
if err != nil {
h.errorResponse(w, "error unauthorized, invalid authorization header format", http.StatusUnauthorized)
return
}
if err := a.Validate(a.ValidateAuthorizedAdmin(h.env.AdminSecret)); err != nil {
h.errorResponse(w, "error unauthorized, invalid authorization header", http.StatusUnauthorized)
return
}
level.Debug(l).Log("message", "getting project from database")
ctx := r.Context()
projectEntry, err := h.dbClient.ReadProjectEntry(ctx, projectName)
if err != nil {
level.Error(l).Log("message", "error retrieving project", "error", err)
if errors.Is(err, upper.ErrNoMoreRows) {
h.errorResponse(w, "error retrieving project", http.StatusNotFound)
} else {
h.errorResponse(w, "error retrieving project", http.StatusInternalServerError)
}
return
}
resp := responses.GetProject{
Name: projectName,
Repository: projectEntry.Repository,
}
if err := json.NewEncoder(w).Encode(resp); err != nil {
level.Error(l).Log("message", "error creating response", "error", err)
h.errorResponse(w, "error creating response object", http.StatusInternalServerError)
return
}
}
// Delete a project
func (h handler) deleteProject(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
projectName := vars["projectName"]
l := h.requestLogger(r, "op", "delete-project", "project", projectName)
level.Debug(l).Log("message", "validating authorization header for delete project")
ctx := r.Context()
ah := r.Header.Get("Authorization")
a, err := credentials.NewAuthorization(ah)
if err != nil {
h.errorResponse(w, "error unauthorized, invalid authorization header format", http.StatusUnauthorized)
return
}
if err := a.Validate(a.ValidateAuthorizedAdmin(h.env.AdminSecret)); err != nil {
h.errorResponse(w, "error unauthorized, invalid authorization header", http.StatusUnauthorized)
return
}
level.Debug(l).Log("message", "creating credential provider")
cp, err := h.newCredentialsProvider(*a, h.env, r.Header, credentials.NewVaultConfig, credentials.NewVaultSvc)
if err != nil {
level.Error(l).Log("message", "error creating credentials provider", "error", err)
h.errorResponse(w, "error creating credentials provider", http.StatusInternalServerError)
return
}
level.Debug(l).Log("message", "checking if project exists")
projectExists, err := cp.ProjectExists(projectName)
if err != nil {
level.Error(l).Log("message", "error checking project", "error", err)
h.errorResponse(w, "error checking project", http.StatusInternalServerError)
return
}
if !projectExists {
level.Debug(l).Log("message", "no action required because project does not exist")
return
}
level.Debug(l).Log("message", "getting all targets in project")
targets, err := cp.ListTargets(projectName)
if err != nil {
level.Error(l).Log("message", "error getting all targets", "error", err)
h.errorResponse(w, "error getting all targets", http.StatusInternalServerError)
return
}
if len(targets) > 0 {
level.Error(l).Log("error", "project has existing targets, not deleting")
h.errorResponse(w, "project has existing targets, not deleting", http.StatusBadRequest)
return
}
level.Debug(l).Log("message", "deleting project")
err = cp.DeleteProject(projectName)
if err != nil {
level.Error(l).Log("message", "error deleting project", "error", err)
h.errorResponse(w, "error deleting project", http.StatusInternalServerError)
return
}
level.Debug(h.logger).Log("message", "deleting from db")
if err = h.dbClient.DeleteProjectEntry(ctx, projectName); err != nil {
level.Error(l).Log("message", "error deleting project in database", "error", err)
h.errorResponse(w, "error deleting project", http.StatusInternalServerError)
return
}
}
// Creates a target
func (h handler) createTarget(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
projectName := vars["projectName"]
l := h.requestLogger(r, "op", "create-target", "project", projectName)
level.Debug(l).Log("message", "validating authorization header for create target")
ah := r.Header.Get("Authorization")
a, err := credentials.NewAuthorization(ah)
if err != nil {
h.errorResponse(w, "error unauthorized, invalid authorization header", http.StatusUnauthorized)
return
}
if err := a.Validate(a.ValidateAuthorizedAdmin(h.env.AdminSecret)); err != nil {
h.errorResponse(w, "unauthorized", http.StatusUnauthorized)
return
}
level.Debug(l).Log("message", "reading request body")
var ctr requests.CreateTarget
reqBody, err := io.ReadAll(r.Body)
if err != nil {
level.Error(l).Log("message", "error reading request data", "error", err)
h.errorResponse(w, "error reading request data", http.StatusInternalServerError)
}
if err := json.Unmarshal(reqBody, &ctr); err != nil {
level.Error(l).Log("message", "error processing request", "error", err)
h.errorResponse(w, "error processing request", http.StatusBadRequest)
return
}
if err := types.Target(ctr).Validate(); err != nil {
level.Error(l).Log("message", "error invalid request", "error", err)
h.errorResponse(w, fmt.Sprintf("invalid request, %s", err), http.StatusBadRequest)
return
}
l = log.With(l, "target", ctr.Name)
level.Debug(l).Log("message", "creating credential provider")
cp, err := h.newCredentialsProvider(*a, h.env, r.Header, credentials.NewVaultConfig, credentials.NewVaultSvc)
if err != nil {
level.Error(l).Log("message", "error creating credentials provider", "error", err)
h.errorResponse(w, "error creating credentials provider", http.StatusInternalServerError)
return
}
projectExists, err := cp.ProjectExists(projectName)
if err != nil {
level.Error(l).Log("message", "error determining if project exists", "error", err)
}
// TODO Perhaps this should be 404
if !projectExists {
level.Error(l).Log("message", "project does not exist")
h.errorResponse(w, "project does not exist", http.StatusBadRequest)
return
}
targetExists, err := cp.TargetExists(projectName, ctr.Name)
if err != nil {
level.Error(l).Log("message", "error retrieving target", "error", err)
h.errorResponse(w, "error retrieving target", http.StatusInternalServerError)
return
}
if targetExists {
level.Error(l).Log("message", "target name must not already exist")
h.errorResponse(w, "target name must not already exist", http.StatusBadRequest)
return
}
level.Debug(l).Log("message", "creating target")
err = cp.CreateTarget(projectName, types.Target(ctr))
if err != nil {
level.Error(l).Log("message", "error creating target", "error", err)
h.errorResponse(w, "error creating target", http.StatusInternalServerError)
return
}
fmt.Fprint(w, "{}")
}
// Deletes a target
func (h handler) deleteTarget(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
projectName := vars["projectName"]
targetName := vars["targetName"]
l := h.requestLogger(r, "op", "delete-target", "project", projectName, "target", targetName)
level.Debug(l).Log("message", "validating authorization header for delete target")
ah := r.Header.Get("Authorization")
a, err := credentials.NewAuthorization(ah)
if err != nil {
h.errorResponse(w, "error unauthorized, invalid authorization header format", http.StatusUnauthorized)
return
}
if err := a.Validate(a.ValidateAuthorizedAdmin(h.env.AdminSecret)); err != nil {
h.errorResponse(w, "error unauthorized, invalid authorization header", http.StatusUnauthorized)
return
}
level.Debug(l).Log("message", "creating credential provider")
cp, err := h.newCredentialsProvider(*a, h.env, r.Header, credentials.NewVaultConfig, credentials.NewVaultSvc)
if err != nil {
level.Error(l).Log("message", "error creating credentials provider", "error", err)
h.errorResponse(w, "error creating credentials provider", http.StatusInternalServerError)
return
}
level.Debug(l).Log("message", "deleting target")
err = cp.DeleteTarget(projectName, targetName)
if err != nil {
level.Error(l).Log("message", "error deleting target", "error", err)
h.errorResponse(w, "error deleting target", http.StatusInternalServerError)
return
}
}
// Lists the targets for a project
func (h handler) listTargets(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
projectName := vars["projectName"]
l := h.requestLogger(r, "op", "list-targets", "project", projectName)
level.Debug(l).Log("message", "validating authorization header for target list")
ah := r.Header.Get("Authorization")
a, err := credentials.NewAuthorization(ah)
if err != nil {
h.errorResponse(w, "error unauthorized, invalid authorization header format", http.StatusUnauthorized)
return
}
if err := a.Validate(a.ValidateAuthorizedAdmin(h.env.AdminSecret)); err != nil {
h.errorResponse(w, "error unauthorized, invalid authorization header", http.StatusUnauthorized)
return
}
level.Debug(l).Log("message", "creating credential provider")
cp, err := h.newCredentialsProvider(*a, h.env, r.Header, credentials.NewVaultConfig, credentials.NewVaultSvc)
if err != nil {
level.Error(l).Log("message", "error creating credentials provider", "error", err)
h.errorResponse(w, "error creating credentials provider", http.StatusInternalServerError)
return
}
level.Debug(l).Log("message", "checking if project exists")
projectExists, err := cp.ProjectExists(projectName)
if err != nil {
level.Error(l).Log("message", "error checking project", "error", err)
h.errorResponse(w, "error checking project", http.StatusInternalServerError)
return
}
if !projectExists {
level.Debug(l).Log("message", "project does not exist")
h.errorResponse(w, "project does not exist", http.StatusNotFound)
return
}
targets, err := cp.ListTargets(projectName)
if err != nil {
level.Error(l).Log("message", "error listing targets", "error", err)
h.errorResponse(w, "error listing targets", http.StatusInternalServerError)
return
}
data, err := json.Marshal(targets)
if err != nil {
level.Error(l).Log("message", "error serializing targets", "error", err)
h.errorResponse(w, "error listing targets", http.StatusInternalServerError)
return
}
fmt.Fprint(w, string(data))
}
// Updates a target
func (h handler) updateTarget(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
projectName := vars["projectName"]
targetName := vars["targetName"]
l := h.requestLogger(r, "op", "update-target", "project", projectName, "target", targetName)
level.Debug(l).Log("message", "validating authorization header for update target")
ah := r.Header.Get("Authorization")
a, err := credentials.NewAuthorization(ah)
if err != nil {
h.errorResponse(w, "error unauthorized, invalid authorization header", http.StatusUnauthorized)
return
}
if err := a.Validate(a.ValidateAuthorizedAdmin(h.env.AdminSecret)); err != nil {
h.errorResponse(w, "unauthorized", http.StatusUnauthorized)
return
}
level.Debug(l).Log("message", "creating credential provider")
cp, err := h.newCredentialsProvider(*a, h.env, r.Header, credentials.NewVaultConfig, credentials.NewVaultSvc)
if err != nil {
level.Error(l).Log("message", "error creating credentials provider", "error", err)
h.errorResponse(w, "error creating credentials provider", http.StatusInternalServerError)
return
}
projectExists, err := cp.ProjectExists(projectName)
if err != nil {
level.Error(l).Log("message", "error determining if project exists", "error", err)
h.errorResponse(w, "error creating credentials provider", http.StatusInternalServerError)
return
}
if !projectExists {
level.Error(l).Log("message", "project does not exist")
h.errorResponse(w, "project does not exist", http.StatusNotFound)
return
}
targetExists, err := cp.TargetExists(projectName, targetName)
if err != nil {
level.Error(l).Log("message", "error retrieving target", "error", err)
h.errorResponse(w, "error retrieving target", http.StatusInternalServerError)
return
}
if !targetExists {
level.Error(l).Log("message", "target not found")
h.errorResponse(w, "target not found", http.StatusNotFound)
return
}
target, err := cp.GetTarget(projectName, targetName)
if err != nil {
level.Error(l).Log("message", "error retrieving existing target")
h.errorResponse(w, "error retrieving target", http.StatusInternalServerError)
return
}
targetType := target.Type
level.Debug(l).Log("message", "reading request body")
reqBody, err := io.ReadAll(r.Body)
if err != nil {
level.Error(l).Log("message", "error reading request data", "error", err)
h.errorResponse(w, "error reading request data", http.StatusInternalServerError)
return
}
// merge request data into existing target struct for update data
if err := json.Unmarshal(reqBody, &target); err != nil {
level.Error(l).Log("message", "error reading target properties data", "error", err)
h.errorResponse(w, "error reading target properties data", http.StatusInternalServerError)
return
}
// overwrite updated target with existing target name and type values so request body doesn't overwrite these values
target.Name = targetName
target.Type = targetType
if err := target.Validate(); err != nil {
level.Error(l).Log("message", "error invalid request", "error", err)
h.errorResponse(w, fmt.Sprintf("invalid request, %s", err), http.StatusBadRequest)
return
}
level.Debug(l).Log("message", "updating target")
err = cp.UpdateTarget(projectName, target)
if err != nil {
level.Error(l).Log("message", "error updating target", "error", err)
h.errorResponse(w, "error updating target", http.StatusInternalServerError)
return
}
data, err := json.Marshal(target)
if err != nil {
level.Error(l).Log("message", "error creating response", "error", err)
h.errorResponse(w, "error creating response object", http.StatusInternalServerError)
return
}
fmt.Fprint(w, string(data))
}
func (h handler) deleteToken(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
projectName := vars["projectName"]
tokenID := vars["tokenID"]
l := h.requestLogger(r, "op", "delete-token", "project", projectName, "tokenID", tokenID)
level.Debug(l).Log("message", "validating authorization header for delete token")
ah := r.Header.Get("Authorization")
a, err := credentials.NewAuthorization(ah)
if err != nil {
h.errorResponse(w, "error unauthorized, invalid authorization header format", http.StatusUnauthorized)
return
}
if err := a.Validate(a.ValidateAuthorizedAdmin(h.env.AdminSecret)); err != nil {
h.errorResponse(w, "error unauthorized, invalid authorization header", http.StatusUnauthorized)
return
}
level.Debug(l).Log("message", "creating credential provider")
cp, err := h.newCredentialsProvider(*a, h.env, r.Header, credentials.NewVaultConfig, credentials.NewVaultSvc)
if err != nil {
level.Error(l).Log("message", "error creating credentials provider", "error", err)
h.errorResponse(w, "error creating credentials provider", http.StatusInternalServerError)
return
}
ctx := r.Context()
level.Debug(l).Log("message", "checking if project exists")
projectExists, err := h.projectExists(ctx, l, cp, w, projectName)
if err != nil || !projectExists {
return
}
// check if token exists in CP and DB
projectToken, err := cp.GetProjectToken(projectName, tokenID)
if err != nil {
// do not return an error if project token is not found
if !errors.Is(err, credentials.ErrProjectTokenNotFound) {
level.Error(l).Log("message", "error retrieving token from credentials provider", "error", err)
h.errorResponse(w, "error retrieving token", http.StatusInternalServerError)
return
}
level.Warn(l).Log("message", "token does not exist in credential provider", "error", err)
}
dbProjectToken, err := h.dbClient.ReadTokenEntry(ctx, tokenID)
if err != nil {
// do not return an error if project token is not found
if !errors.Is(err, upper.ErrNoMoreRows) {
level.Error(l).Log("message", "error retrieving token from DB", "error", err)
h.errorResponse(w, "error retrieving token", http.StatusInternalServerError)
return
}
level.Warn(l).Log("message", "token does not exist in DB", "error", err)
}
// delete token from DB and CP
// only delete token if exists in DB
if !dbProjectToken.IsEmpty() {
level.Debug(l).Log("message", "deleting token from database")
if err = h.dbClient.DeleteTokenEntry(ctx, tokenID); err != nil {
level.Error(l).Log("message", "error deleting token from database", "error", err)
h.errorResponse(w, "error deleting token", http.StatusInternalServerError)
return
}
}
// only delete token if exists in CP
if !projectToken.IsEmpty() {
level.Debug(l).Log("message", "deleting token from credentials provider")
if err = cp.DeleteProjectToken(projectName, tokenID); err != nil {
level.Error(l).Log("message", "error deleting token from credentials provider", "error", err)
h.errorResponse(w, "error deleting token", http.StatusInternalServerError)
return
}
}
}
// Creates a token
func (h handler) createToken(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
projectName := vars["projectName"]
l := h.requestLogger(r, "op", "create-token", "project", projectName)
level.Debug(l).Log("message", "validating authorization header for token create")
ah := r.Header.Get("Authorization")
a, err := credentials.NewAuthorization(ah)
if err != nil {
h.errorResponse(w, "error unauthorized, invalid authorization header format", http.StatusUnauthorized)
return
}
if err := a.Validate(a.ValidateAuthorizedAdmin(h.env.AdminSecret)); err != nil {
h.errorResponse(w, "error unauthorized, invalid authorization header", http.StatusUnauthorized)
return
}
ctx := r.Context()
level.Debug(l).Log("message", "creating credential provider")
cp, err := h.newCredentialsProvider(*a, h.env, r.Header, credentials.NewVaultConfig, credentials.NewVaultSvc)
if err != nil {
level.Error(l).Log("message", "error creating credentials provider", "error", err)
h.errorResponse(w, "error creating credentials provider", http.StatusInternalServerError)
return
}
projectExists, err := h.projectExists(ctx, l, cp, w, projectName)
if err != nil || !projectExists {
return
}
tokens, err := h.dbClient.ListTokenEntries(ctx, projectName)
if err != nil {
level.Error(l).Log("message", "error listing tokens from DB", "error", err)
h.errorResponse(w, "error listing tokens", http.StatusInternalServerError)
return
}
if len(tokens) >= numOfTokensLimit {
level.Error(l).Log("message", "number of tokens allowed per project has been reached")
h.errorResponse(w, "token limit reached", http.StatusInternalServerError)
return
}
level.Debug(l).Log("message", "creating token")
token, err := cp.CreateToken(projectName)
if err != nil {
level.Error(l).Log("message", "error creating token with credentials provider", "error", err)
h.errorResponse(w, "error creating token with credentials provider", http.StatusInternalServerError)
return
}
level.Debug(l).Log("message", "inserting into db")
err = h.dbClient.CreateTokenEntry(ctx, token)
if err != nil {
level.Error(l).Log("message", "error inserting token to db", "error", err)
h.errorResponse(w, "error creating token", http.StatusInternalServerError)
return
}
celloToken := newCelloToken("vault", token)
resp := responses.CreateToken{
CreatedAt: token.CreatedAt,
ExpiresAt: token.ExpiresAt,
Token: celloToken.Token,
TokenID: token.ProjectToken.ID,
}
if err := json.NewEncoder(w).Encode(resp); err != nil {
level.Error(l).Log("message", "error serializing project token", "error", err)
h.errorResponse(w, "error listing project tokens", http.StatusInternalServerError)
return
}
}
// Lists tokens for a project
func (h handler) listTokens(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
projectName := vars["projectName"]
l := h.requestLogger(r, "op", "list-tokens", "project", projectName)
level.Debug(l).Log("message", "validating authorization header for token list")
ah := r.Header.Get("Authorization")
a, err := credentials.NewAuthorization(ah)
if err != nil {
h.errorResponse(w, "error unauthorized, invalid authorization header format", http.StatusUnauthorized)
return
}
if err := a.Validate(a.ValidateAuthorizedAdmin(h.env.AdminSecret)); err != nil {
h.errorResponse(w, "error unauthorized, invalid authorization header", http.StatusUnauthorized)
return
}
ctx := r.Context()
level.Debug(l).Log("message", "creating credential provider")
cp, err := h.newCredentialsProvider(*a, h.env, r.Header, credentials.NewVaultConfig, credentials.NewVaultSvc)
if err != nil {
level.Error(l).Log("message", "error creating credentials provider", "error", err)
h.errorResponse(w, "error creating credentials provider", http.StatusInternalServerError)
return
}
projectExists, err := h.projectExists(ctx, l, cp, w, projectName)
if err != nil || !projectExists {
return
}
tokens, err := h.dbClient.ListTokenEntries(ctx, projectName)
if err != nil {
level.Error(l).Log("message", "error listing project tokens", "error", err)
h.errorResponse(w, "error listing project tokens", http.StatusInternalServerError)
return
}
resp := []responses.ListTokens{}
for _, tokenEntry := range tokens {
resp = append(resp, responses.ListTokens{
CreatedAt: tokenEntry.CreatedAt,
ExpiresAt: tokenEntry.ExpiresAt,
TokenID: tokenEntry.TokenID,
})
}
if err := json.NewEncoder(w).Encode(resp); err != nil {
level.Error(l).Log("message", "error serializing project tokens", "error", err)
h.errorResponse(w, "error listing project tokens", http.StatusInternalServerError)
return
}
}
// Convenience method that writes a failure response in a standard manner
func (h handler) errorResponse(w http.ResponseWriter, message string, httpStatus int) {
r := generateErrorResponseJSON(message)
w.WriteHeader(httpStatus)
fmt.Fprint(w, r)
}
func generateEnvVariablesString(environmentVariables map[string]string) string {
if len(environmentVariables) == 0 {
return ""
}
r := "env"
for k, v := range environmentVariables {
tmp := r + fmt.Sprintf(" %s=%s", k, v)
r = tmp
}
return r
}
func (h handler) requestLogger(r *http.Request, fields ...interface{}) log.Logger {
return log.With(
h.logger,
append([]interface{}{"txid", r.Header.Get(txIDHeader)}, fields...)...,
)
}
|
/*
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package component
import (
routev1 "github.com/openshift/api/route/v1"
"github.com/snowdrop/component-operator/pkg/apis/component/v1alpha2"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/api/extensions/v1beta1"
"k8s.io/client-go/discovery"
"strings"
)
func newTrue() *bool {
b := true
return &b
}
func newFalse() *bool {
b := false
return &b
}
func (r *ReconcileComponent) isTargetClusterRunningOpenShift() bool {
if r.onOpenShift == nil {
discoveryClient, err := discovery.NewDiscoveryClientForConfig(r.Config)
if err != nil {
panic(err)
}
apiList, err := discoveryClient.ServerGroups()
if err != nil {
panic(err)
}
apiGroups := apiList.Groups
for _, group := range apiGroups {
if strings.HasSuffix(group.Name, "openshift.io") {
r.onOpenShift = newTrue()
break
}
}
if r.onOpenShift == nil {
// we didn't find any api group with the openshift.io suffix, so we're not on OpenShift!
r.onOpenShift = newFalse()
}
}
return *r.onOpenShift
}
func (r *ReconcileComponent) installDevMode(component *v1alpha2.Component, namespace string) (changed bool, e error) {
component.ObjectMeta.Namespace = namespace
// Enrich Component with k8s recommend Labels
component.ObjectMeta.Labels = r.PopulateK8sLabels(component, "Backend")
// Check if Service port exists, otherwise define it
if component.Spec.Port == 0 {
component.Spec.Port = 8080 // Add a default port if empty
}
// Enrich Env Vars with Default values
r.populateEnvVar(component)
// Create PVC if it does not exists
if changed, e = r.CreateIfNeeded(component, &corev1.PersistentVolumeClaim{}); e != nil {
return false, e
}
// Create Deployment if it does not exists
if changed, e = r.CreateIfNeeded(component, &appsv1.Deployment{}); e != nil {
return false, e
}
if changed, e = r.CreateIfNeeded(component, &corev1.Service{}); e != nil {
return false, e
}
if component.Spec.ExposeService {
if r.isTargetClusterRunningOpenShift() {
// Create an OpenShift Route
if changed, e = r.CreateIfNeeded(component, &routev1.Route{}); e != nil {
return false, e
}
} else {
// Create an Ingress resource
if changed, e = r.CreateIfNeeded(component, &v1beta1.Ingress{}); e != nil {
return false, e
}
}
}
return changed, nil
}
func (r *ReconcileComponent) deleteDevMode(component *v1alpha2.Component, namespace string) error {
// todo
return nil
}
|
package problem0073
func setZeroes(m [][]int) {
rows := make([]bool, len(m)) // rows[i] == true ,代表 i 行存在 0 元素
cols := make([]bool, len(m[0])) // cols[j] == true ,代表 j 列存在 0 元素
// 逐个检查元素
for i := range m {
for j := range m[i] {
if m[i][j] == 0 {
rows[i] = true
cols[j] = true
}
}
}
// 按行修改
for i := range rows {
if rows[i] {
for j := range m[i] {
m[i][j] = 0
}
}
}
// 按列修改
for i := range cols {
if cols[i] {
for j := range m {
m[j][i] = 0
}
}
}
}
|
package encoder
import (
"fmt"
"github.com/shanexu/logn/common"
)
type Factory func(*common.Config) (Encoder, error)
type Config struct {
Namespace common.ConfigNamespace `logn-config:",inline"`
}
var encoders = map[string]Factory{}
func RegisterType(name string, gen Factory) {
if _, exists := encoders[name]; exists {
panic(fmt.Sprintf("encoder %q already registered", name))
}
encoders[name] = gen
}
func CreateEncoder(cfg Config) (Encoder, error) {
// default to json encoder
encoder := "json"
if name := cfg.Namespace.Name(); name != "" {
encoder = name
}
factory := encoders[encoder]
if factory == nil {
return nil, fmt.Errorf("'%v' encoder is not available", encoder)
}
return factory(cfg.Namespace.Config())
}
|
package game
import (
"github.com/go-gl/mathgl/mgl32"
)
type StaticPropV5 struct {
Origin mgl32.Vec3
Angles mgl32.Vec3
PropType uint16
FirstLeaf uint16
LeafCount uint16
Solid uint8
Flags uint8
Skin int32
FadeMinDist float32
FadeMaxDist float32
LightingOrigin mgl32.Vec3
ForcedFadeScale float32
}
func (l *StaticPropV5) GetOrigin() mgl32.Vec3 {
return l.Origin
}
func (l *StaticPropV5) GetAngles() mgl32.Vec3 {
return l.Angles
}
func (l *StaticPropV5) GetUniformScale() float32 {
return 1
}
func (l *StaticPropV5) GetPropType() uint16 {
return l.PropType
}
func (l *StaticPropV5) GetFirstLeaf() uint16 {
return l.FirstLeaf
}
func (l *StaticPropV5) GetLeafCount() uint16 {
return l.LeafCount
}
func (l *StaticPropV5) GetSolid() uint8 {
return l.Solid
}
func (l *StaticPropV5) GetFlags() uint8 {
return l.Flags
}
func (l *StaticPropV5) GetSkin() int32 {
return l.Skin
}
func (l *StaticPropV5) GetFadeMinDist() float32 {
return l.FadeMinDist
}
func (l *StaticPropV5) GetFadeMaxDist() float32 {
return l.FadeMaxDist
}
func (l *StaticPropV5) GetLightingOrigin() mgl32.Vec3 {
return l.LightingOrigin
}
func (l *StaticPropV5) GetForcedFadeScale() float32 {
return l.ForcedFadeScale
}
func (l *StaticPropV5) GetMinDXLevel() uint16 {
return 0
}
func (l *StaticPropV5) GetMaxDXLevel() uint16 {
return 0
}
func (l *StaticPropV5) GetMinCPULevel() uint8 {
return 0
}
func (l *StaticPropV5) GetMaxCPULevel() uint8 {
return 0
}
func (l *StaticPropV5) GetMinGPULevel() uint8 {
return 0
}
func (l *StaticPropV5) GetMaxGPULevel() uint8 {
return 0
}
func (l *StaticPropV5) GetDiffuseModulation() float32 {
return 0
}
func (l *StaticPropV5) GetUnknown() float32 {
return 0
}
func (l *StaticPropV5) GetDisableXBox360() bool {
return false
}
|
package ptrie
import (
"bytes"
"encoding/binary"
"fmt"
"github.com/stretchr/testify/assert"
"hash/fnv"
"io"
"log"
"reflect"
"testing"
)
func TestValues_Decode(t *testing.T) {
var useCases = []struct {
description string
values []interface{}
hasError bool
}{
{
description: "string coding",
values: []interface{}{"abc", "xyz", "klm", "xyz", "eee"},
},
{
description: "int coding",
values: []interface{}{int(0), int(10), int(30), int(300), int(4)},
},
{
description: "int8 coding",
values: []interface{}{int8(3), int8(10), int8(30), int8(121), int8(4)},
},
{
description: "bool coding",
values: []interface{}{true, false},
},
{
description: "[]byte coding",
values: []interface{}{[]byte("abc"), []byte("xyz")},
},
{
description: "custom type coding",
values: []interface{}{&foo{ID: 10, Name: "abc"}, &foo{ID: 20, Name: "xyz"}},
},
{
description: "custom type error coding",
values: []interface{}{&bar{ID: 10, Name: "abc"}, &bar{ID: 20, Name: "xyz"}},
hasError: true,
},
}
for _, useCase := range useCases {
values := newValues()
for _, item := range useCase.values {
_, err := values.put(item)
assert.Nil(t, err, useCase.description)
}
writer := new(bytes.Buffer)
err := values.Encode(writer)
if useCase.hasError {
assert.NotNil(t, err, useCase.description)
cloned := newValues()
cloned.useType(reflect.TypeOf(useCase.values[0]))
err = cloned.Decode(writer)
assert.NotNil(t, err)
continue
}
if !assert.Nil(t, err, useCase.description) {
log.Print(err)
continue
}
cloned := newValues()
cloned.useType(reflect.TypeOf(useCase.values[0]))
err = cloned.Decode(writer)
assert.Nil(t, err, useCase.description)
assert.EqualValues(t, len(values.data), len(cloned.data))
for i := range values.data {
assert.EqualValues(t, values.data[i], cloned.data[i], fmt.Sprintf("[%d]: %v", i, useCase.description))
}
}
}
type foo struct {
ID int
Name string
}
type bar foo
func (c *bar) Key() interface{} {
h := fnv.New32a()
_, _ = h.Write([]byte(c.Name))
return c.ID + 100000*int(h.Sum32())
}
func (c *foo) Key() interface{} {
h := fnv.New32a()
_, _ = h.Write([]byte(c.Name))
return c.ID + 100000*int(h.Sum32())
}
func (c *foo) Decode(reader io.Reader) error {
id := int64(0)
if err := binary.Read(reader, binary.BigEndian, &id); err != nil {
return err
}
c.ID = int(id)
length := uint16(0)
err := binary.Read(reader, binary.BigEndian, &length)
if err == nil {
name := make([]byte, length)
if err = binary.Read(reader, binary.BigEndian, name); err == nil {
c.Name = string(name)
}
}
return err
}
func (c *foo) Encode(writer io.Writer) error {
err := binary.Write(writer, binary.BigEndian, int64(c.ID))
if err != nil {
return err
}
length := uint16(len(c.Name))
if err = binary.Write(writer, binary.BigEndian, length); err == nil {
err = binary.Write(writer, binary.BigEndian, []byte(c.Name))
}
return err
}
|
/*
* @lc app=leetcode id=344 lang=golang
*
* [344] Reverse String
*
* https://leetcode.com/problems/reverse-string/description/
*
* algorithms
* Easy (67.94%)
* Likes: 1422
* Dislikes: 682
* Total Accepted: 753.6K
* Total Submissions: 1.1M
* Testcase Example: '["h","e","l","l","o"]'
*
* Write a function that reverses a string. The input string is given as an
* array of characters char[].
*
* Do not allocate extra space for another array, you must do this by modifying
* the input array in-place with O(1) extra memory.
*
* You may assume all the characters consist of printable ascii
* characters.
*
*
*
*
* Example 1:
*
*
* Input: ["h","e","l","l","o"]
* Output: ["o","l","l","e","h"]
*
*
*
* Example 2:
*
*
* Input: ["H","a","n","n","a","h"]
* Output: ["h","a","n","n","a","H"]
*
*
*
*
*/
// @lc code=start
func reverseString(s []byte) {
reverseString2(s)
}
// solution by recursive function:Time complexity: O(n), Space complexity: O(n)
func reverseString2(s []byte) {
helper(s, 0, len(s)-1)
}
func helper(s []byte, start, end int) {
if start >= end {
return
}
s[start], s[end] = s[end], s[start]
helper(s, start+1, end-1)
}
// two pointers, Iteration, exchange first and last element: Time complexity: O(n), Space complexity: O(1)
func reverseString1(s []byte) {
left, right := 0, len(s)-1
for left < right {
s[left], s[right] = s[right], s[left]
left, right = left+1, right-1
}
}
// @lc code=end |
package host
import (
"errors"
"io"
"net"
"os"
"path/filepath"
"strconv"
"github.com/NebulousLabs/Sia/crypto"
"github.com/NebulousLabs/Sia/encoding"
"github.com/NebulousLabs/Sia/modules"
"github.com/NebulousLabs/Sia/types"
)
var (
HostCapacityErr = errors.New("host is at capacity and can not take more files")
)
// allocate allocates space for a file and creates it on disk.
func (h *Host) allocate(filesize uint64) (file *os.File, path string, err error) {
h.spaceRemaining -= int64(filesize)
h.fileCounter++
path = strconv.Itoa(h.fileCounter)
fullpath := filepath.Join(h.saveDir, path)
file, err = os.Create(fullpath)
if err != nil {
return
}
return
}
// deallocate deletes a file and restores its allocated space.
func (h *Host) deallocate(filesize uint64, path string) {
fullpath := filepath.Join(h.saveDir, path)
os.Remove(fullpath)
h.spaceRemaining += int64(filesize)
}
// considerTerms checks that the terms of a potential file contract fall
// within acceptable bounds, as defined by the host.
func (h *Host) considerTerms(terms modules.ContractTerms) error {
switch {
case terms.FileSize < h.MinFilesize:
return errors.New("file is too small")
case terms.FileSize > h.MaxFilesize:
return errors.New("file is too large")
case terms.FileSize > uint64(h.spaceRemaining):
return HostCapacityErr
case terms.Duration < h.MinDuration || terms.Duration > h.MaxDuration:
return errors.New("duration is out of bounds")
case terms.DurationStart >= h.blockHeight:
return errors.New("duration cannot start in the future")
case terms.WindowSize < h.WindowSize:
return errors.New("challenge window is not large enough")
case terms.Price.Cmp(h.Price) < 0:
return errors.New("price does not match host settings")
case terms.Collateral.Cmp(h.Collateral) > 0:
return errors.New("collateral does not match host settings")
case len(terms.ValidProofOutputs) != 1:
return errors.New("payment len does not match host settings")
case terms.ValidProofOutputs[0].UnlockHash != h.UnlockHash:
return errors.New("payment output does not match host settings")
case len(terms.MissedProofOutputs) != 1:
return errors.New("refund len does not match host settings")
case terms.MissedProofOutputs[0].UnlockHash != types.UnlockHash{}:
return errors.New("coins are not paying out to correct address")
}
return nil
}
// verifyTransaction checks that the provided transaction matches the provided
// contract terms, and that the Merkle root provided is equal to the merkle
// root of the transaction file contract.
func verifyTransaction(txn types.Transaction, terms modules.ContractTerms, merkleRoot crypto.Hash) error {
// Check that there is only one file contract.
if len(txn.FileContracts) != 1 {
return errors.New("transaction should have only one file contract.")
}
fc := txn.FileContracts[0]
// Get the expected payout.
sizeCurrency := types.NewCurrency64(terms.FileSize)
durationCurrency := types.NewCurrency64(uint64(terms.Duration))
clientCost := terms.Price.Mul(sizeCurrency).Mul(durationCurrency)
hostCollateral := terms.Collateral.Mul(sizeCurrency).Mul(durationCurrency)
expectedPayout := clientCost.Add(hostCollateral)
switch {
case fc.FileSize != terms.FileSize:
return errors.New("bad file contract file size")
case fc.FileMerkleRoot != merkleRoot:
return errors.New("bad file contract Merkle root")
case fc.WindowStart != terms.DurationStart+terms.Duration:
return errors.New("bad file contract start height")
case fc.WindowEnd != terms.DurationStart+terms.Duration+terms.WindowSize:
return errors.New("bad file contract expiration")
case fc.Payout.Cmp(expectedPayout) != 0:
return errors.New("bad file contract payout")
case len(fc.ValidProofOutputs) != 1:
return errors.New("bad file contract valid proof outputs")
case fc.ValidProofOutputs[0].UnlockHash != terms.ValidProofOutputs[0].UnlockHash:
return errors.New("bad file contract valid proof outputs")
case len(fc.MissedProofOutputs) != 1:
return errors.New("bad file contract missed proof outputs")
case fc.MissedProofOutputs[0].UnlockHash != terms.MissedProofOutputs[0].UnlockHash:
return errors.New("bad file contract missed proof outputs")
case fc.UnlockHash != types.UnlockHash{}:
return errors.New("bad file contract termination hash")
}
return nil
}
// addCollateral takes a transaction and its contract terms and adds the host
// collateral to the transaction.
func (h *Host) addCollateral(txn types.Transaction, terms modules.ContractTerms) (fundedTxn types.Transaction, txnBuilder modules.TransactionBuilder, err error) {
// Determine the amount of colletaral the host needs to provide.
sizeCurrency := types.NewCurrency64(terms.FileSize)
durationCurrency := types.NewCurrency64(uint64(terms.Duration))
collateral := terms.Collateral.Mul(sizeCurrency).Mul(durationCurrency)
txnBuilder = h.wallet.RegisterTransaction(txn, nil)
if collateral.Cmp(types.NewCurrency64(0)) == 0 {
return txn, txnBuilder, nil
}
err = txnBuilder.FundSiacoins(collateral)
if err != nil {
return
}
fundedTxn, _ = txnBuilder.View()
return
}
// rpcContract is an RPC that negotiates a file contract. If the
// negotiation is successful, the file is downloaded and the host begins
// submitting proofs of storage.
func (h *Host) rpcContract(conn net.Conn) (err error) {
// Read the contract terms.
var terms modules.ContractTerms
err = encoding.ReadObject(conn, &terms, maxContractLen)
if err != nil {
return
}
// Consider the contract terms. If they are unacceptable, return an error
// describing why.
lockID := h.mu.RLock()
err = h.considerTerms(terms)
h.mu.RUnlock(lockID)
if err != nil {
err = encoding.WriteObject(conn, err.Error())
return
}
// terms are acceptable; allocate space for file
lockID = h.mu.Lock()
file, path, err := h.allocate(terms.FileSize)
h.mu.Unlock(lockID)
if err != nil {
return
}
defer file.Close()
// rollback everything if something goes wrong
defer func() {
lockID := h.mu.Lock()
defer h.mu.Unlock(lockID)
if err != nil {
h.deallocate(terms.FileSize, path)
}
}()
// signal that we are ready to download file
err = encoding.WriteObject(conn, modules.AcceptTermsResponse)
if err != nil {
return
}
// simultaneously download file and calculate its Merkle root.
tee := io.TeeReader(
// use a LimitedReader to ensure we don't read indefinitely
io.LimitReader(conn, int64(terms.FileSize)),
// each byte we read from tee will also be written to file
file,
)
merkleRoot, err := crypto.ReaderMerkleRoot(tee)
if err != nil {
return
}
// Data has been sent, read in the unsigned transaction with the file
// contract.
var unsignedTxn types.Transaction
err = encoding.ReadObject(conn, &unsignedTxn, maxContractLen)
if err != nil {
return
}
// Verify that the transaction matches the agreed upon terms, and that the
// Merkle root in the file contract matches our independently calculated
// Merkle root.
err = verifyTransaction(unsignedTxn, terms, merkleRoot)
if err != nil {
err = errors.New("transaction does not satisfy terms: " + err.Error())
return
}
// Add the collateral to the transaction, but do not sign the transaction.
collateralTxn, txnBuilder, err := h.addCollateral(unsignedTxn, terms)
if err != nil {
return
}
err = encoding.WriteObject(conn, collateralTxn)
if err != nil {
return
}
// Read in the renter-signed transaction and check that it matches the
// previously accepted transaction.
var signedTxn types.Transaction
err = encoding.ReadObject(conn, &signedTxn, maxContractLen)
if err != nil {
return
}
if collateralTxn.ID() != signedTxn.ID() {
err = errors.New("signed transaction does not match the transaction with collateral")
return
}
// Add the signatures from the renter signed transaction, and then sign the
// transaction, then submit the transaction.
for _, sig := range signedTxn.TransactionSignatures {
txnBuilder.AddTransactionSignature(sig)
if err != nil {
return
}
}
txnSet, err := txnBuilder.Sign(true)
if err != nil {
return
}
err = h.tpool.AcceptTransactionSet(txnSet)
if err != nil {
return
}
// Add this contract to the host's list of obligations.
fcid := signedTxn.FileContractID(0)
fc := signedTxn.FileContracts[0]
proofHeight := fc.WindowStart + StorageProofReorgDepth
co := contractObligation{
ID: fcid,
FileContract: fc,
Path: path,
}
lockID = h.mu.Lock()
h.obligationsByHeight[proofHeight] = append(h.obligationsByHeight[proofHeight], co)
h.obligationsByID[fcid] = co
h.save()
h.mu.Unlock(lockID)
// Send an ack to the renter that all is well.
err = encoding.WriteObject(conn, true)
if err != nil {
return
}
// TODO: we don't currently watch the blockchain to make sure that the
// transaction actually gets into the blockchain.
return
}
|
package auth
import (
"github.com/caos/logging"
"github.com/golang/protobuf/ptypes"
"github.com/caos/zitadel/internal/policy/model"
"github.com/caos/zitadel/pkg/grpc/auth"
)
func passwordComplexityPolicyFromModel(policy *model.PasswordComplexityPolicy) *auth.PasswordComplexityPolicy {
creationDate, err := ptypes.TimestampProto(policy.CreationDate)
logging.Log("GRPC-Lsi3d").OnError(err).Debug("unable to parse timestamp")
changeDate, err := ptypes.TimestampProto(policy.ChangeDate)
logging.Log("GRPC-P0wr4").OnError(err).Debug("unable to parse timestamp")
return &auth.PasswordComplexityPolicy{
Id: policy.AggregateID,
CreationDate: creationDate,
ChangeDate: changeDate,
Description: policy.Description,
Sequence: policy.Sequence,
MinLength: policy.MinLength,
HasLowercase: policy.HasLowercase,
HasUppercase: policy.HasUppercase,
HasNumber: policy.HasNumber,
HasSymbol: policy.HasSymbol,
IsDefault: policy.AggregateID == "",
}
}
|
package cmd
import (
"fmt"
"net"
"net/http/httptest"
"testing"
"github.com/bpicode/fritzctl/config"
"github.com/bpicode/fritzctl/mock"
"github.com/spf13/cobra"
"github.com/stretchr/testify/assert"
)
// TestCommands is a unit test that runs most commands.
func TestCommands(t *testing.T) {
config.Dir = "../testdata/config"
config.Filename = "config_localhost_https_test.json"
testCases := []struct {
cmd *cobra.Command
args []string
srv *httptest.Server
}{
{cmd: versionCmd, srv: mock.New().UnstartedServer()},
{cmd: toggleCmd, args: []string{"SWITCH_3"}, srv: mock.New().UnstartedServer()},
{cmd: temperatureCmd, args: []string{"19.5", "HKR_1"}, srv: mock.New().UnstartedServer()},
{cmd: switchOnCmd, args: []string{"SWITCH_1"}, srv: mock.New().UnstartedServer()},
{cmd: switchOffCmd, args: []string{"SWITCH_2"}, srv: mock.New().UnstartedServer()},
{cmd: sessionIDCmd, srv: mock.New().UnstartedServer()},
{cmd: pingCmd, srv: mock.New().UnstartedServer()},
{cmd: planManifestCmd, args: []string{"../testdata/devicelist_fritzos06.83_plan.yml"}, srv: mock.New().UnstartedServer()},
{cmd: exportManifestCmd, srv: mock.New().UnstartedServer()},
{cmd: applyManifestCmd, args: []string{"../testdata/devicelist_fritzos06.83_plan.yml"}, srv: mock.New().UnstartedServer()},
{cmd: listGroupsCmd, args: []string{}, srv: mock.New().UnstartedServer()},
{cmd: listLanDevicesCmd, args: []string{}, srv: mock.New().UnstartedServer()},
{cmd: listLogsCmd, args: []string{}, srv: mock.New().UnstartedServer()},
{cmd: listSwitchesCmd, srv: mock.New().UnstartedServer()},
{cmd: listThermostatsCmd, srv: mock.New().UnstartedServer()},
{cmd: docManCmd, srv: mock.New().UnstartedServer()},
}
for i, testCase := range testCases {
t.Run(fmt.Sprintf("Test run command %d", i), func(t *testing.T) {
l, err := net.Listen("tcp", ":61666")
assert.NoError(t, err)
testCase.srv.Listener = l
testCase.srv.Start()
defer testCase.srv.Close()
err = testCase.cmd.RunE(testCase.cmd, testCase.args)
assert.NoError(t, err)
})
}
}
// TestCommandsHaveHelp ensures that every command provides
// a help text.
func TestCommandsHaveHelp(t *testing.T) {
for i, c := range coreCommands() {
t.Run(fmt.Sprintf("test long description of command %d", i), func(t *testing.T) {
assert.NotEmpty(t, c.Long)
})
}
}
// TestCommandsHaveUsage tests that command have a usage pattern.
func TestCommandsHaveUsage(t *testing.T) {
for i, c := range allCommands() {
t.Run(fmt.Sprintf("test usage term of command %d", i), func(t *testing.T) {
assert.NotEmpty(t, c.Use)
})
}
}
// TestCommandsHaveSynopsis ensures that every command provides
// short a synopsis text.
func TestCommandsHaveSynopsis(t *testing.T) {
for i, c := range coreCommands() {
t.Run(fmt.Sprintf("test short description of command %d", i), func(t *testing.T) {
assert.NotEmpty(t, c.Short)
})
}
}
func allCommands() []*cobra.Command {
all := []*cobra.Command{
versionCmd,
switchCmd,
manifestCmd,
listCmd,
docCmd,
}
core := coreCommands()
all = append(all, core...)
return all
}
func coreCommands() []*cobra.Command {
return []*cobra.Command{
versionCmd,
toggleCmd,
temperatureCmd,
switchOnCmd,
switchOffCmd,
sessionIDCmd,
pingCmd,
planManifestCmd,
exportManifestCmd,
applyManifestCmd,
listGroupsCmd,
listLanDevicesCmd,
listLogsCmd,
listSwitchesCmd,
listThermostatsCmd,
docManCmd,
}
}
|
package player
import (
"testing"
c "github.com/sergivillar/rock-paper-scissors/config"
)
func TestCpuPlay(t *testing.T) {
result := RockPaperScissor()
if !contains(c.GameOptions, result) {
t.Fatal("Incorrect game option")
}
}
func TestCreatePlayer(t *testing.T) {
expected := Player{"Player"}
p, err := Create("Player")
if err != nil {
t.Fatal(err)
}
if expected != p {
t.Error("Player is not created correctly")
}
}
func TestCreatePlayerNoName(t *testing.T) {
expected := "You must provide a player name"
_, err := Create("")
if err == nil {
t.Fatalf("Expected '%v', but instead got no error", expected)
}
}
func contains(s []string, e string) bool {
for _, item := range s {
if item == e {
return true
}
}
return false
}
|
package health_test
import (
"fmt"
"time"
"github.com/cerana/cerana/acomm"
healthp "github.com/cerana/cerana/providers/health"
"github.com/cerana/cerana/providers/systemd"
"github.com/pborman/uuid"
)
func (s *health) TestUptime() {
goodStatus := s.addService()
tests := []struct {
name string
minUptime time.Duration
expectedErr string
}{
{"", time.Second, "missing arg: name"},
{"foobar", time.Second, "No such file or directory"},
{goodStatus.Name, time.Second, ""},
{goodStatus.Name, time.Minute, ""},
{goodStatus.Name, time.Hour, "uptime less than expected"},
}
for _, test := range tests {
args := healthp.UptimeArgs{
Name: test.name,
MinUptime: test.minUptime,
}
desc := fmt.Sprintf("%+v", args)
req, err := acomm.NewRequest(acomm.RequestOptions{
Task: "health-uptime",
ResponseHook: s.responseHook,
Args: args,
})
s.Require().NoError(err, desc)
resp, stream, err := s.health.Uptime(req)
s.Nil(resp, desc)
s.Nil(stream, desc)
if test.expectedErr == "" {
s.Nil(err, desc)
} else {
s.Contains(err.Error(), test.expectedErr, desc)
}
}
}
func (s *health) addService() systemd.UnitStatus {
name := uuid.New()
s.systemd.ManualCreate(systemd.CreateArgs{
Name: name,
}, true)
return s.systemd.Data.Statuses[name]
}
|
// Copyright © 2016 Marc Sutter <marc.sutter@swissflow.ch>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"fmt"
tm "github.com/buger/goterm"
"github.com/msutter/nodetree/models"
"github.com/spf13/cobra"
"os"
"sync"
)
// syncCmd represents the sync command
var syncCmd = &cobra.Command{
Use: "sync [stage name]",
Short: "Synchronization of pulp nodes for a given stage",
Long: `Synchronization of pulp nodes in a given stage
Filters can be set on Fqdns and tags.`,
Run: func(cmd *cobra.Command, args []string) {
if len(args) != 1 {
ErrorExitWithUsage(cmd, "sync needs a name for the stage")
}
if len(pRepositories) == 0 {
ErrorExitWithUsage(cmd, "sync needs a repository name")
}
currentStage := stageTree.GetStageByName(args[0])
// check for flags
if len(pFqdns) == 0 && len(pTags) == 0 && !pAllNode {
fmt.Printf("\nWARNING: This will sync the complete tree for the '%v' stage!\n", args[0])
currentStage.Show()
fmt.Println("")
fmt.Printf("you can get rid of this warning by setting the --all flag\n")
fmt.Printf("Are you sure you want to continue? (yes/no)\n")
userConfirm := askForConfirmation()
if !userConfirm {
ErrorExit("sync canceled !")
} else {
pAllNode = true
}
}
var stage *models.Stage
if pAllNode {
stage = currentStage
} else {
stage = currentStage.Filter(pFqdns, pTags)
}
// Create a progress channel
progressChannel := make(chan models.SyncProgress)
var renderWg sync.WaitGroup
renderWg.Add(1)
switch {
case pSilent:
go RenderSilentView(progressChannel, &renderWg)
case pQuiet:
go RenderQuietView(progressChannel, &renderWg)
default:
go RenderQuietView(progressChannel, &renderWg)
// go RenderProgressView(stage, progressChannel, &renderWg)
}
if pAllRepositories {
stage.SyncAll(progressChannel)
} else {
stage.Sync(pRepositories, progressChannel)
}
renderWg.Wait()
if stage.HasError() {
switch {
case pSilent:
// no report
default:
RenderErrorSummary(stage)
}
os.Exit(1)
}
},
}
func init() {
pulpCmd.AddCommand(syncCmd)
// Here you will define your flags and configuration settings.
// Cobra supports Persistent Flags which will work for this command
// and all subcommands, e.g.:
//syncCmd.PersistentFlags().String("foo", "", "A help for foo")
// Cobra supports local flags which will only run when this command
// is called directly, e.g.:
// syncCmd.Flags().StringSlice("fqdns", []string{}, "Filter on Fqdns")
}
// simple view. No in place updates
func RenderQuietView(progressChannel chan models.SyncProgress, wg *sync.WaitGroup) {
depthChar := "--- "
defer wg.Done()
syncStates := make(map[string]map[string]string)
for sp := range progressChannel {
if _, exists := syncStates[sp.Node.Fqdn]; !exists {
syncStates[sp.Node.Fqdn] = make(map[string]string)
}
switch sp.State {
case "skipped":
for i := 0; i < sp.Node.Depth; i++ {
fmt.Printf(depthChar)
}
line := fmt.Sprintf("%v %v %v", sp.Node.Fqdn, sp.Repository, sp.State)
tm.Printf(tm.Color(tm.Bold(line), tm.MAGENTA))
tm.Flush()
case "error":
for i := 0; i < sp.Node.Depth; i++ {
fmt.Printf(depthChar)
}
line := fmt.Sprintf("%v %v %v", sp.Node.Fqdn, sp.Repository, sp.State)
tm.Printf(tm.Color(tm.Bold(line), tm.RED))
tm.Flush()
case "running":
// only output state changes
if syncStates[sp.Node.Fqdn][sp.Repository] != sp.State {
for i := 0; i < sp.Node.Depth; i++ {
fmt.Printf(depthChar)
}
line := fmt.Sprintf("%v %v %v", sp.Node.Fqdn, sp.Repository, sp.State)
tm.Printf(tm.Color(line, tm.BLUE))
tm.Flush()
}
syncStates[sp.Node.Fqdn][sp.Repository] = sp.State
case "finished":
for i := 0; i < sp.Node.Depth; i++ {
fmt.Printf(depthChar)
}
line := fmt.Sprintf("%v %v %v", sp.Node.Fqdn, sp.Repository, sp.State)
tm.Printf(tm.Color(tm.Bold(line), tm.GREEN))
tm.Flush()
}
}
}
// silent view
func RenderSilentView(progressChannel chan models.SyncProgress, wg *sync.WaitGroup) {
defer wg.Done()
for sp := range progressChannel {
// do nothing
_ = sp
}
}
|
// Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package physical
import (
"bytes"
"fmt"
"github.com/cockroachdb/cockroach/pkg/sql/opt"
)
// Required properties are interesting characteristics of an expression that
// impact its layout, presentation, or location, but not its logical content.
// Examples include row order, column naming, and data distribution (physical
// location of data ranges). Physical properties exist outside of the relational
// algebra, and arise from both the SQL query itself (e.g. the non-relational
// ORDER BY operator) and by the selection of specific implementations during
// optimization (e.g. a merge join requires the inputs to be sorted in a
// particular order).
//
// Required properties are derived top-to-bottom - there is a required physical
// property on the root, and each expression can require physical properties on
// one or more of its operands. When an expression is optimized, it is always
// with respect to a particular set of required physical properties. The goal
// is to find the lowest cost expression that provides those properties while
// still remaining logically equivalent.
type Required struct {
// Presentation specifies the naming, membership (including duplicates),
// and order of result columns. If Presentation is not defined, then no
// particular column presentation is required or provided.
Presentation Presentation
// Ordering specifies the sort order of result rows. Rows can be sorted by
// one or more columns, each of which can be sorted in either ascending or
// descending order. If Ordering is not defined, then no particular ordering
// is required or provided.
Ordering OrderingChoice
// LimitHint specifies a "soft limit" to the number of result rows that may
// be required of the expression. If requested, an expression will still need
// to return all result rows, but it can be optimized based on the assumption
// that only the hinted number of rows will be needed.
// A LimitHint of 0 indicates "no limit". The LimitHint is an intermediate
// float64 representation, and can be converted to an integer number of rows
// using math.Ceil.
LimitHint float64
}
// MinRequired are the default physical properties that require nothing and
// provide nothing.
var MinRequired = &Required{}
// Defined is true if any physical property is defined. If none is defined, then
// this is an instance of MinRequired.
func (p *Required) Defined() bool {
return !p.Presentation.Any() || !p.Ordering.Any() || p.LimitHint != 0
}
// ColSet returns the set of columns used by any of the physical properties.
func (p *Required) ColSet() opt.ColSet {
colSet := p.Ordering.ColSet()
for _, col := range p.Presentation {
colSet.Add(col.ID)
}
return colSet
}
func (p *Required) String() string {
var buf bytes.Buffer
output := func(name string, fn func(*bytes.Buffer)) {
if buf.Len() != 0 {
buf.WriteByte(' ')
}
buf.WriteByte('[')
buf.WriteString(name)
buf.WriteString(": ")
fn(&buf)
buf.WriteByte(']')
}
if !p.Presentation.Any() {
output("presentation", p.Presentation.format)
}
if !p.Ordering.Any() {
output("ordering", p.Ordering.Format)
}
if p.LimitHint != 0 {
output("limit hint", func(buf *bytes.Buffer) { fmt.Fprintf(buf, "%.2f", p.LimitHint) })
}
// Handle empty properties case.
if buf.Len() == 0 {
return "[]"
}
return buf.String()
}
// Equals returns true if the two physical properties are identical.
func (p *Required) Equals(rhs *Required) bool {
return p.Presentation.Equals(rhs.Presentation) && p.Ordering.Equals(&rhs.Ordering) && p.LimitHint == rhs.LimitHint
}
// Presentation specifies the naming, membership (including duplicates), and
// order of result columns that are required of or provided by an operator.
// While it cannot add unique columns, Presentation can rename, reorder,
// duplicate and discard columns. If Presentation is not defined, then no
// particular column presentation is required or provided. For example:
// a.y:2 a.x:1 a.y:2 column1:3
type Presentation []opt.AliasedColumn
// Any is true if any column presentation is allowed or can be provided.
func (p Presentation) Any() bool {
return p == nil
}
// Equals returns true iff this presentation exactly matches the given
// presentation.
func (p Presentation) Equals(rhs Presentation) bool {
// The 0 column presentation is not the same as the nil presentation.
if p.Any() != rhs.Any() {
return false
}
if len(p) != len(rhs) {
return false
}
for i := 0; i < len(p); i++ {
if p[i] != rhs[i] {
return false
}
}
return true
}
func (p Presentation) String() string {
var buf bytes.Buffer
p.format(&buf)
return buf.String()
}
func (p Presentation) format(buf *bytes.Buffer) {
for i, col := range p {
if i > 0 {
buf.WriteString(",")
}
fmt.Fprintf(buf, "%s:%d", col.Alias, col.ID)
}
}
|
// generated by stringer -type=ErrorCode; DO NOT EDIT
package meechum
import "fmt"
const _ErrorCode_name = "OKWARNINGFATAL"
var _ErrorCode_index = [...]uint8{0, 2, 9, 14}
func (i ErrorCode) String() string {
if i+1 >= ErrorCode(len(_ErrorCode_index)) {
return fmt.Sprintf("ErrorCode(%d)", i)
}
return _ErrorCode_name[_ErrorCode_index[i]:_ErrorCode_index[i+1]]
}
|
// Copyright 2019 Yunion
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jdcloud
import (
"fmt"
commodels "github.com/jdcloud-api/jdcloud-sdk-go/services/common/models"
"github.com/jdcloud-api/jdcloud-sdk-go/services/vpc/apis"
"github.com/jdcloud-api/jdcloud-sdk-go/services/vpc/client"
"github.com/jdcloud-api/jdcloud-sdk-go/services/vpc/models"
"yunion.io/x/pkg/util/netutils"
api "yunion.io/x/onecloud/pkg/apis/compute"
"yunion.io/x/onecloud/pkg/cloudprovider"
"yunion.io/x/onecloud/pkg/multicloud"
"yunion.io/x/onecloud/pkg/util/rbacutils"
)
type SNetwork struct {
multicloud.SResourceBase
multicloud.JdcloudTags
wire *SWire
models.Subnet
}
func (n *SNetwork) GetId() string {
return n.SubnetId
}
func (n *SNetwork) GetName() string {
return n.SubnetName
}
func (n *SNetwork) GetGlobalId() string {
return n.GetId()
}
func (n *SNetwork) GetStatus() string {
return api.NETWORK_STATUS_AVAILABLE
}
func (n *SNetwork) Refresh() error {
return nil
}
func (n *SNetwork) IsEmulated() bool {
return false
}
func (n *SNetwork) GetProjectId() string {
return ""
}
func (n *SNetwork) GetIWire() cloudprovider.ICloudWire {
return n.wire
}
func (n *SNetwork) GetIpStart() string {
return n.StartIp
}
func (n *SNetwork) GetIpEnd() string {
return n.EndIp
}
func (n *SNetwork) Cidr() string {
return n.AddressPrefix
}
func (n *SNetwork) GetIpMask() int8 {
pref, _ := netutils.NewIPV4Prefix(n.Cidr())
return pref.MaskLen
}
func (n *SNetwork) GetGateway() string {
return ""
}
func (n *SNetwork) GetServerType() string {
return api.NETWORK_TYPE_GUEST
}
func (n *SNetwork) GetIsPublic() bool {
return true
}
func (n *SNetwork) GetPublicScope() rbacutils.TRbacScope {
return rbacutils.ScopeDomain
}
func (n *SNetwork) Delete() error {
return cloudprovider.ErrNotImplemented
}
func (n *SNetwork) GetAllocTimeoutSeconds() int {
return 120
}
func (r *SRegion) GetNetworks(vpcId string, pageNumber int, pageSize int) ([]SNetwork, int, error) {
filters := []commodels.Filter{}
if vpcId != "" {
filters = append(filters, commodels.Filter{
Name: "vpcId",
Values: []string{vpcId},
})
}
req := apis.NewDescribeSubnetsRequestWithAllParams(r.ID, &pageNumber, &pageSize, filters)
client := client.NewVpcClient(r.Credential)
client.Logger = Logger{}
resp, err := client.DescribeSubnets(req)
if err != nil {
return nil, 0, err
}
if resp.Error.Code >= 400 {
return nil, 0, fmt.Errorf(resp.Error.Message)
}
nets := make([]SNetwork, len(resp.Result.Subnets))
for i := range nets {
nets[i] = SNetwork{
Subnet: resp.Result.Subnets[i],
}
}
return nets, resp.Result.TotalCount, nil
}
func (r *SRegion) GetNetworkById(id string) (*SNetwork, error) {
req := apis.NewDescribeSubnetRequest(r.ID, id)
client := client.NewVpcClient(r.Credential)
client.Logger = Logger{}
resp, err := client.DescribeSubnet(req)
if err != nil {
return nil, err
}
if resp.Error.Code >= 400 {
return nil, fmt.Errorf(resp.Error.Message)
}
return &SNetwork{
Subnet: resp.Result.Subnet,
}, nil
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.