text stringlengths 11 4.05M |
|---|
/*
* @lc app=leetcode.cn id=807 lang=golang
*
* [807] 保持城市天际线
*/
package main
import (
"math"
)
// @lc code=start
func maxIncreaseKeepingSkyline(grid [][]int) int {
maxRow := make([]int, len(grid))
maxCol := make([]int, len(grid[0]))
for i := 0; i < len(grid); i++ {
maxRow[i] = grid[i][0]
for j := 1; j < len(grid[i]); j++ {
if grid[i][j] > maxRow[i] {
maxRow[i] = grid[i][j]
}
}
}
for j := 0; j < len(grid[0]); j++ {
maxCol[j] = grid[0][j]
for i := 0; i < len(grid); i++ {
if grid[i][j] > maxCol[j] {
maxCol[j] = grid[i][j]
}
}
}
ret := 0
for i := 0; i < len(grid); i++ {
for j := 0; j < len(grid[0]); j++ {
ret += int(math.Min(float64(maxRow[i]), float64(maxCol[j]))) - grid[i][j]
}
}
return ret
}
// func main() {
// fmt.Println(maxIncreaseKeepingSkyline([][]int{
// {3, 0, 8, 4},
// {2, 4, 5, 7},
// {9, 2, 6, 3},
// {0, 3, 1, 0},
// }))
// fmt.Println(maxIncreaseKeepingSkyline([][]int{
// {0, 0, 0, 0},
// {0, 0, 0, 0},
// {0, 0, 0, 0},
// {0, 0, 0, 0},
// }))
// }
// @lc code=end
|
/**
* Given a string containing just the characters '(', ')', '{', '}', '[' and ']', determine if the input string is valid.
* The brackets must close in the correct order, "()" and "()[]{}" are all valid but "(]" and "([)]" are not.
*/
func isValid(s string) bool {
var stack []int32 = make([]int32, 0, len(s))
t1, t2, t3 := int32(")"[0]), int32("]"[0]), int32("}"[0])
for _, c := range s {
if c == t1 || c == t2 || c == t3 {// ])}
length := len(stack)
if length == 0 {
return false
}
v := c - stack[length -1]
if v > 2 || v < 0 {
return false
} else {
stack = append(stack[:(length -1)], stack[length:]...)
}
} else {
stack = append(stack, c)
}
}
return len(stack) == 0
}
|
package config
import (
env "github.com/mauhftw/genialo/helpers"
)
// TODO: Add a prefix env variable
// Define environment variables here
var (
GithubAccessToken = env.GetEnvVar("GITHUB_CHANGELOG_TOKEN", "github_token").(string)
)
|
package annotations
import (
"strings"
"time"
"github.com/haproxytech/config-parser/v3/types"
"github.com/haproxytech/kubernetes-ingress/controller/haproxy/api"
"github.com/haproxytech/kubernetes-ingress/controller/store"
)
type DefaultTimeout struct {
name string
data *types.SimpleTimeout
client api.HAProxyClient
}
func NewDefaultTimeout(n string, c api.HAProxyClient) *DefaultTimeout {
return &DefaultTimeout{name: n, client: c}
}
func (a *DefaultTimeout) GetName() string {
return a.name
}
func (a *DefaultTimeout) Parse(input store.StringW, forceParse bool) error {
if input.Status == store.EMPTY && !forceParse {
return ErrEmptyStatus
}
if input.Status == store.DELETED {
return nil
}
timeout, err := time.ParseDuration(input.Value)
if err != nil {
return err
}
s := timeout.String()
if strings.HasSuffix(s, "m0s") {
s = s[:len(s)-2]
}
if strings.HasSuffix(s, "h0m") {
s = s[:len(s)-2]
}
a.data = &types.SimpleTimeout{Value: s}
return nil
}
func (a *DefaultTimeout) Update() error {
timeout := strings.TrimPrefix(a.name, "timeout-")
if a.data == nil {
logger.Infof("Removing default timeout-%s ", timeout)
return a.client.DefaultTimeout(timeout, nil)
}
logger.Infof("Setting default timeout-%s to %s", timeout, a.data.Value)
return a.client.DefaultTimeout(timeout, a.data)
}
|
package commands
import (
"fmt"
"testing"
"reflect"
"encoding/json"
"github.com/JFrogDev/artifactory-cli-go/utils"
)
func TestConfig(t *testing.T){
inputDetails := utils.ArtifactoryDetails { "http://localhost:8080/artifactory", "admin", "password", "", nil }
Config(&inputDetails, false, false)
outputConfig := GetConfig()
printConfigStruct(&inputDetails)
printConfigStruct(outputConfig)
if !reflect.DeepEqual(inputDetails, *outputConfig) {
t.Error("Unexpected configuration was saved to file. Expected: " + configStructToString(&inputDetails) + " Got " + configStructToString(outputConfig))
}
}
func configStructToString(artConfig *utils.ArtifactoryDetails) string {
marshaledStruct, _ := json.Marshal(*artConfig)
return string(marshaledStruct)
}
func printConfigStruct(artConfig *utils.ArtifactoryDetails){
stringSturct := configStructToString(artConfig)
fmt.Println(stringSturct)
} |
package main
import "fmt"
func sortColors(nums []int) {
zero := -1
two :=len(nums)
for i:=0;i<two;{
if nums[i] == 1{
i++
}else if nums[i] == 2{
two--
nums[i],nums[two] = nums[two],nums[i]
}else{
zero++
nums[zero],nums[i] = nums[i],nums[zero] //直接交换即可,因为nums[zero]一定是1
i++
}
}
}
//使用快排思路,最左边全是0,中间是1,最右边是2,
//遇到0或者2就和当前位置交换,遇到1则继续往前走
func main() {
nums := []int{2,1,0,1,2,0}
sortColors(nums)
fmt.Println(nums)
}
|
package services
import (
"finrgo/exhanges"
"fmt"
"sync"
"time"
)
type (
OpenOrdersBusy struct {
IsDebugRunService bool
isBusy bool
sync.RWMutex
Exchange *exhanges.Exchanges
Sleep time.Duration
}
)
func NewServiceOpenOrder(ex *exhanges.Exchanges) *OpenOrdersBusy {
return &OpenOrdersBusy{Exchange: ex}
}
func (oob *OpenOrdersBusy) RunGo(exchange exhanges.IOrders) {
go func() {
for {
if oob.IsDebugRunService {
fmt.Println("run.....")
}
openOrders, err := oob.Exchange.GetCountOpenOrders(exchange)
if err != nil {
oob.SetBusyOn()
} else {
if openOrders == 0 {
oob.SetBusyOff()
} else {
oob.SetBusyOn()
}
}
time.Sleep(oob.Sleep)
}
}()
}
func (oob *OpenOrdersBusy) GetIsBusy() bool {
//oob.RLock()
isBusy := oob.isBusy
//oob.Unlock()
return isBusy
}
func (oob *OpenOrdersBusy) SetBusyOn() {
//oob.Lock()
//defer oob.Unlock()
oob.isBusy = true
}
func (oob *OpenOrdersBusy) SetBusyOff() {
oob.Lock()
defer oob.Unlock()
oob.isBusy = false
}
|
package env
import (
"context"
"github.com/dollarshaveclub/acyl/pkg/models"
"github.com/dollarshaveclub/acyl/pkg/spawner"
)
var _ spawner.EnvironmentSpawner = &Manager{}
// Destroy is the same as Delete and is needed to satisfy the interface
func (m *Manager) Destroy(ctx context.Context, rd models.RepoRevisionData, reason models.QADestroyReason) error {
return m.Delete(ctx, &rd, reason)
}
// DestroyExplicitly destroys an environment and is triggered by API call
func (m *Manager) DestroyExplicitly(ctx context.Context, qa *models.QAEnvironment, reason models.QADestroyReason) error {
return m.Delete(ctx, qa.RepoRevisionDataFromQA(), reason)
}
// Success isn't used by Nitro but is needed to satisfy the interface
func (m *Manager) Success(context.Context, string) error {
return nil
}
// Failure isn't used by Nitro but is needed to satisfy the interface
func (m *Manager) Failure(context.Context, string, string) error {
return nil
}
|
package main
import (
"encoding/json"
"log"
"net/http"
"fmt"
"net"
"github.com/gorilla/mux"
"sync"
"bufio"
)
type Contact struct {
Name string `json:"name"`
Phone string `json:"phone"`
Email string `json:"email"`
}
type Planta struct {
S_lenght float64 `json:"s_lenght"`
S_width float64 `json:"s_width"`
P_lenght float64 `json:"p_lenght"`
P_width float64 `json:"p_width"`
Plant_type string `json:"plant_type"`
}
var contacts []Contact
var wg sync.WaitGroup
func main(){
r := mux.NewRouter()
contacts = append(contacts, Contact{Name: "Friend_1", Phone: "989999999", Email: "123@gmail.com"})
contacts = append(contacts, Contact{Name: "Friend_2", Phone: "979999999", Email: "456@gmail.com"})
contacts = append(contacts, Contact{Name: "Friend_3", Phone: "969999999", Email: "789@gmail.com"})
r.HandleFunc("/contacts", getContacts).Methods("GET")
r.HandleFunc("/contacts/{name}", getContact).Methods("GET")
r.HandleFunc("/contacts", createContact).Methods("POST")
r.HandleFunc("/contacts/{name}", updateContact).Methods("PUT")
r.HandleFunc("/contacts/{name}", deleteContact).Methods("DELETE")
r.HandleFunc("/plant", enviarAPuerto8000).Methods("POST")
log.Fatal(http.ListenAndServe(":3000", r))
}
//Listar todos los contactos
func getContacts(w http.ResponseWriter, r *http.Request){
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(contacts)
}
//Buscar un contacto por nombre
func getContact(w http.ResponseWriter, r *http.Request){
w.Header().Set("Content-Type", "application/json")
params := mux.Vars(r)
for _, item := range contacts {
if item.Name == params["name"]{
json.NewEncoder(w).Encode(item)
return
}
}
json.NewEncoder(w).Encode(&Contact{})
}
//Agregar un nuevo contacto
func createContact(w http.ResponseWriter, r *http.Request){
w.Header().Set("Content-Type", "application/json")
var contact Contact
_ = json.NewDecoder(r.Body).Decode(&contact)
contacts = append(contacts, contact)
json.NewEncoder(w).Encode(contact)
}
func enviarAPuerto8000(w http.ResponseWriter, r *http.Request){
w.Header().Set("Content-Type", "application/json")
var plant Planta
_ = json.NewDecoder(r.Body).Decode(&plant)
conn, _ := net.Dial("tcp", "localhost:8000")
defer conn.Close()
fmt.Println(plant)
jsonBytes, _ := json.Marshal(plant)
fmt.Fprintf(conn, "%s\n", string(jsonBytes))
reccon,_:=net.Listen("tcp","localhost:3001")
defer reccon.Close()
for i:=0;i<1;i++{
accept,_:=reccon.Accept()
defer accept.Close()
reader:=bufio.NewReader(accept)
jsonString, _ := reader.ReadString('\n')
var plant Planta
json.Unmarshal([]byte(jsonString), &plant)
fmt.Println("Recibido: ", plant)
json.NewEncoder(w).Encode(plant)
}
}
//Eliminar un contacto
func deleteContact(w http.ResponseWriter, r *http.Request){
w.Header().Set("Content-Type", "application/json")
params := mux.Vars(r)
for idx, item := range contacts {
if item.Name == params["name"] {
contacts = append(contacts[:idx], contacts[idx+1:]...)
break
}
}
json.NewEncoder(w).Encode(contacts)
}
//Actualizar contacto
func updateContact(w http.ResponseWriter, r *http.Request){
w.Header().Set("Content-Type", "application/json")
params := mux.Vars(r)
for idx, item := range contacts {
if item.Name == params["name"] {
contacts = append(contacts[:idx], contacts[idx+1:]...)
var contact Contact
_ = json.NewDecoder(r.Body).Decode(&contact)
contact.Name = params["name"]
contacts = append(contacts, contact)
json.NewEncoder(w).Encode(contact)
return
}
}
} |
/*
You are given an integer array arr. You can choose a set of integers and remove all the occurrences of these integers in the array.
Return the minimum size of the set so that at least half of the integers of the array are removed.
Example 1:
Input: arr = [3,3,3,3,5,5,5,2,2,7]
Output: 2
Explanation: Choosing {3,7} will make the new array [5,5,5,2,2] which has size 5 (i.e equal to half of the size of the old array).
Possible sets of size 2 are {3,5},{3,2},{5,2}.
Choosing set {2,7} is not possible as it will make the new array [3,3,3,3,5,5,5] which has a size greater than half of the size of the old array.
Example 2:
Input: arr = [7,7,7,7,7,7]
Output: 1
Explanation: The only possible set you can choose is {7}. This will make the new array empty.
Constraints:
2 <= arr.length <= 10^5
arr.length is even.
1 <= arr[i] <= 10^5
*/
package main
import (
"sort"
)
func main() {
assert(minsetsize([]int{3, 3, 3, 3, 5, 5, 5, 2, 2, 7}) == 2)
assert(minsetsize([]int{7, 7, 7, 7, 7, 7}) == 1)
assert(minsetsize([]int{1, 2, 3, 4, 5, 6, 7, 8}) == 4)
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func minsetsize(a []int) int {
p := freqsort(a)
n := len(p)
return uniq(p[n/2:])
}
func freqsort(a []int) []int {
m := make(map[int]int)
for _, v := range a {
m[v]++
}
r := append([]int{}, a...)
sort.Slice(r, func(i, j int) bool {
return m[r[i]] < m[r[j]]
})
return r
}
func uniq(a []int) int {
m := make(map[int]bool)
for _, v := range a {
m[v] = true
}
return len(m)
}
|
package web_dao
import (
"2021/yunsongcailu/yunsong_server/dial"
"2021/yunsongcailu/yunsong_server/web/web_model"
)
type CommentDao interface {
// 插入一条评论
InsertCommentOne(comment *web_model.CommentModel) (id int64,err error)
// 根据文章ID 获取评论
QueryCommentByArticleId(articleId int64,count,start int) (commentData []web_model.CommentModel,err error)
}
type commentDao struct {}
func NewCommentDao() CommentDao {
return &commentDao{}
}
// 插入一条评论
func (cmtD *commentDao) InsertCommentOne(comment *web_model.CommentModel) (id int64,err error) {
return dial.DB.InsertOne(comment)
}
// 根据文章ID 获取评论
func (cmtD *commentDao) QueryCommentByArticleId(articleId int64,count,start int) (commentData []web_model.CommentModel,err error) {
err = dial.DB.Where("article_id = ?",articleId).OrderBy("create_time desc").Limit(count,start).Find(&commentData)
return
} |
package main
import (
"fmt"
"github.com/tylertreat/BoomFilters"
)
func main() {
sbf := boom.NewDefaultScalableBloomFilter(0.01)
if sbf.Add([]byte("a")).Test([]byte("a")) {
fmt.Println("contains a")
}
if !sbf.TestAndAdd([]byte("b")) {
fmt.Println("doesn't contain b")
}
if sbf.Test([]byte("b")) {
fmt.Println("now it contains b!")
}
sbf.Reset()
}
|
package html
import (
"fmt"
"github.com/elliotchance/gedcom"
"github.com/elliotchance/gedcom/html/core"
"io"
"strings"
)
const symbolLetter = '#'
func write(w io.Writer, data []byte) (int64, error) {
n, err := w.Write(data)
return int64(n), err
}
func writeString(w io.Writer, data string) (int64, error) {
return write(w, []byte(data))
}
func appendString(w io.Writer, data string) int64 {
n, err := writeString(w, data)
if err != nil {
panic(err)
}
return n
}
func appendComponent(w io.Writer, component core.Component) int64 {
n, err := component.WriteHTMLTo(w)
if err != nil {
panic(err)
}
return n
}
func writeSprintf(w io.Writer, format string, args ...interface{}) (int64, error) {
return writeString(w, fmt.Sprintf(format, args...))
}
func appendSprintf(w io.Writer, format string, args ...interface{}) int64 {
n, err := writeSprintf(w, format, args...)
if err != nil {
panic(err)
}
return n
}
func writeNothing() (int64, error) {
return 0, nil
}
func PageIndividuals(firstLetter rune) string {
if firstLetter == symbolLetter {
return "individuals-symbol.html"
}
return fmt.Sprintf("individuals-%c.html", firstLetter)
}
func PageIndividual(document *gedcom.Document, individual *gedcom.IndividualNode, visibility LivingVisibility, placesMap map[string]*place) string {
if individual.IsLiving() {
switch visibility {
case LivingVisibilityHide, LivingVisibilityPlaceholder:
return "#"
case LivingVisibilityShow:
// Proceed.
}
}
individuals := GetIndividuals(document, placesMap)
for key, value := range individuals {
if value.Is(individual) {
return fmt.Sprintf("%s.html", key)
}
}
return "#"
}
func PagePlaces() string {
return "places.html"
}
func PagePlace(place string, places map[string]*place) string {
for key, value := range places {
if value.PrettyName == place {
return fmt.Sprintf("%s.html", key)
}
}
return "#"
}
func PageFamilies() string {
return "families.html"
}
func PageSources() string {
return "sources.html"
}
func PageSource(source *gedcom.SourceNode) string {
return fmt.Sprintf("%s.html", source.Pointer())
}
func PageStatistics() string {
return "statistics.html"
}
func PageSurnames() string {
return "surnames.html"
}
func colorForIndividual(individual *gedcom.IndividualNode) string {
if individual == nil {
return "black"
}
sex := individual.Sex()
switch {
case sex.IsMale():
return IndividualMaleColor
case sex.IsFemale():
return IndividualFemaleColor
}
return "black"
}
func colorClassForSex(sex *gedcom.SexNode) string {
switch {
case sex.IsMale():
return "primary"
case sex.IsFemale():
return "danger"
}
return "info"
}
func colorClassForIndividual(individual *gedcom.IndividualNode) string {
if individual == nil {
return "info"
}
return colorClassForSex(individual.Sex())
}
func getUniqueKey(individualMap map[string]*gedcom.IndividualNode, s string, placesMap map[string]*place) string {
i := -1
for {
i += 1
testString := s
if i > 0 {
testString = fmt.Sprintf("%s-%d", s, i)
}
if _, ok := individualMap[testString]; ok {
continue
}
if _, ok := placesMap[testString]; ok {
continue
}
return testString
}
// This should not be possible
panic(s)
}
func surnameStartsWith(individual *gedcom.IndividualNode, letter rune) bool {
name := individual.Name().Format(gedcom.NameFormatIndex)
if name == "" {
name = "#"
}
lowerName := strings.ToLower(name)
firstLetter := rune(lowerName[0])
return firstLetter == letter
}
func individualForNode(doc *gedcom.Document, node gedcom.Node) *gedcom.IndividualNode {
for _, individual := range doc.Individuals() {
if gedcom.HasNestedNode(individual, node) {
return individual
}
}
return nil
}
|
package graph
// This file will be automatically regenerated based on the schema, any resolver implementations
// will be copied through when generating and any unknown code will be moved to the end.
import (
"context"
"errors"
"github.com/LFSCamargo/twitter-go/auth"
"github.com/LFSCamargo/twitter-go/constants"
"github.com/LFSCamargo/twitter-go/graph/generated"
"github.com/LFSCamargo/twitter-go/graph/model"
"github.com/LFSCamargo/twitter-go/graph/services/reply"
"github.com/LFSCamargo/twitter-go/graph/services/tweets"
"github.com/LFSCamargo/twitter-go/graph/services/user"
)
func (r *mutationResolver) Login(ctx context.Context, input model.LoginInput) (*model.TokenOutput, error) {
return user.LoginUser(input)
}
func (r *mutationResolver) Register(ctx context.Context, input model.RegisterInput) (*model.TokenOutput, error) {
return user.RegisterNewUser(input)
}
func (r *mutationResolver) UpdateProfile(ctx context.Context, input *model.UpdateProfileInput) (*model.User, error) {
userFromCTX := auth.ForContext(ctx)
if userFromCTX == nil {
return nil, errors.New(constants.NotLogged)
}
return user.UpdateProfile(ctx, input, userFromCTX.ID.Hex())
}
func (r *mutationResolver) AddReply(ctx context.Context, input model.CreateTweet, tweetID string) (*model.Reply, error) {
return reply.CreateTweet(ctx, input, tweetID)
}
func (r *mutationResolver) DeleteReply(ctx context.Context, input string) (*model.MessageOutput, error) {
return reply.DeleteReply(ctx, input)
}
func (r *mutationResolver) CreateTweet(ctx context.Context, input model.CreateTweet) (*model.Tweet, error) {
return tweets.CreateTweet(ctx, input)
}
func (r *mutationResolver) DeleteTweet(ctx context.Context, id string) (*model.MessageOutput, error) {
return tweets.DeleteTweet(ctx, id)
}
func (r *mutationResolver) LikeTweet(ctx context.Context, id string) (*model.Tweet, error) {
return tweets.LikeTweet(ctx, id)
}
func (r *mutationResolver) LikeReply(ctx context.Context, id string) (*model.Reply, error) {
return reply.LikeReply(ctx, id)
}
func (r *queryResolver) GetUser(ctx context.Context, id string) (*model.User, error) {
userFromCTX := auth.ForContext(ctx)
if userFromCTX == nil {
return nil, errors.New(constants.NotLogged)
}
return user.GetUserFromID(ctx, id)
}
func (r *queryResolver) Me(ctx context.Context) (*model.User, error) {
user := auth.ForContext(ctx)
if user == nil {
return nil, errors.New("Not Logged")
}
return &model.User{
Email: user.Email,
Username: user.Username,
Picture: user.Picture,
ID: user.ID.Hex(),
}, nil
}
func (r *queryResolver) Reply(ctx context.Context, id string) (*model.Reply, error) {
return reply.GetReply(ctx, id)
}
func (r *queryResolver) Tweets(ctx context.Context, input *model.PaginationInput) (*model.TweetsPaginationOutput, error) {
return tweets.GetTweets(ctx, input)
}
func (r *queryResolver) Tweet(ctx context.Context, id string) (*model.Tweet, error) {
return tweets.GetTweet(ctx, id)
}
func (r *tweetResolver) Replies(ctx context.Context, obj *model.Tweet, input *model.PaginationInput) (*model.RepliesPaginationOutput, error) {
return reply.GetReplies(ctx, input, obj.ID)
}
// Mutation returns generated.MutationResolver implementation.
func (r *Resolver) Mutation() generated.MutationResolver { return &mutationResolver{r} }
// Query returns generated.QueryResolver implementation.
func (r *Resolver) Query() generated.QueryResolver { return &queryResolver{r} }
// Tweet returns generated.TweetResolver implementation.
func (r *Resolver) Tweet() generated.TweetResolver { return &tweetResolver{r} }
type mutationResolver struct{ *Resolver }
type queryResolver struct{ *Resolver }
type tweetResolver struct{ *Resolver }
|
// Package sessions provides the interface and default session store for
// RiveScript.
package sessions
/*
Interface SessionManager describes a session manager for user variables
in RiveScript.
The session manager keeps track of getting and setting user variables,
for example when the `<set>` or `<get>` tags are used in RiveScript
or when API functions like `SetUservar()` are called.
By default RiveScript stores user sessions in memory and provides methods
to export and import them (e.g. to persist them when the bot shuts down
so they can be reloaded). If you'd prefer a more 'active' session storage,
for example one that puts user variables into a database or cache, you can
create your own session manager that implements this interface.
*/
type SessionManager interface {
// Init makes sure a username has a session (creates one if not). It returns
// the pointer to the user data in either case.
Init(username string) *UserData
// Set user variables from a map.
Set(username string, vars map[string]string)
// AddHistory adds input and reply to the user's history.
AddHistory(username, input, reply string)
// SetLastMatch sets the last matched trigger.
SetLastMatch(username, trigger string)
// Get a user variable.
Get(username string, key string) (string, error)
// Get all variables for a user.
GetAny(username string) (*UserData, error)
// Get all variables about all users.
GetAll() map[string]*UserData
// GetLastMatch returns the last trigger the user matched.
GetLastMatch(username string) (string, error)
// GetHistory returns the user's history.
GetHistory(username string) (*History, error)
// Clear all variables for a given user.
Clear(username string)
// Clear all variables for all users.
ClearAll()
// Freeze makes a snapshot of a user's variables.
Freeze(string) error
// Thaw unfreezes a snapshot of a user's variables and returns an error
// if the user had no frozen variables.
Thaw(username string, ThawAction ThawAction) error
}
// HistorySize is the number of entries stored in the history.
const HistorySize int = 9
// UserData is a container for user variables.
type UserData struct {
Variables map[string]string `json:"vars"`
LastMatch string `json:"lastMatch"`
*History `json:"history"`
}
// History keeps track of recent input and reply history.
type History struct {
Input []string `json:"input"`
Reply []string `json:"reply"`
}
// NewHistory creates a new History object with the history arrays filled out.
func NewHistory() *History {
h := &History{
Input: []string{},
Reply: []string{},
}
for i := 0; i < HistorySize; i++ {
h.Input = append(h.Input, "undefined")
h.Reply = append(h.Reply, "undefined")
}
return h
}
// Type ThawAction describes the action for the `Thaw()` method.
type ThawAction int
// Valid options for ThawAction.
const (
// Thaw means to restore the user variables and erase the frozen copy.
Thaw = iota
// Discard means to cancel the frozen copy and not restore them.
Discard
// Keep means to restore the user variables and still keep the frozen copy.
Keep
)
|
package mylogger
import (
"fmt"
"path"
"runtime"
"strings"
"time"
)
type LogLevel uint16
const (
DEBUG LogLevel = iota
TRACE
INFO
WARNING
ERROR
FATAL
)
type Logger struct {
level LogLevel
}
func parseLogLevel(s string) LogLevel {
s = strings.ToLower(s)
switch s {
case "debug":
return DEBUG
case "trace":
return TRACE
case "info":
return INFO
case "warning":
return WARNING
case "fatal":
return FATAL
default:
return DEBUG
}
}
// 构造函数
func NewLog(level string) Logger {
levelLog := parseLogLevel(level)
return Logger{
level: levelLog,
}
}
func log(lv LogLevel, msg string) {
now := time.Now()
funcName, fileName, lineNo := getInfo(3)
nowTime := now.Format("2006-01-02 15:04:05")
fmt.Printf("[%s] [DEBUG] [%s:%s:%d] %s\n", nowTime, fileName, funcName, lineNo, msg)
}
func (l Logger) Debug(msg string) {
}
func (l Logger) Info(msg string) {
fmt.Println(msg)
}
func getInfo(n int) (funcName, fileName string, lineNo int) {
pc, file, lineNo, ok := runtime.Caller(n)
if !ok {
fmt.Println("runtime.Caller() failed\n")
}
funcName = runtime.FuncForPC(pc).Name()
fileName = path.Base(file)
return
}
|
package main
import (
"net"
"fmt"
"os"
)
/*
·服务端在本机的8888端口建立UDP监听,得到广口连接
·循环接收客户端消息,不管客户端说什么,都自动回复“已阅xxx”
·如果客户端说的是“im off”,则回复“bye”
*/
func main() {
//解析得到UDP地址
udpAddr, err := net.ResolveUDPAddr("udp", "localhost:8888")
ServerHandleError(err, "net.ResolveUDPAddr")
//建立UDP监听,得到广口连接
udpConn, err := net.ListenUDP("udp", udpAddr)
ServerHandleError(err, "net.ListenUDP")
//创建消息缓冲区
buffer := make([]byte, 1024)
//从广口连接中源源不断地读取(来自任何客户端的)数据包
for {
//读取一个数据包到消息缓冲区,同时获得该数据包的客户端信息
n, remoteAddr, _ := udpConn.ReadFromUDP(buffer)
//打印数据包消息内容
clientMsg := string(buffer[:n])
fmt.Printf("收到来自%v的消息:%s\n", remoteAddr, clientMsg)
//回复该数据包的客户端
if clientMsg != "im off"{
udpConn.WriteToUDP([]byte("已阅:"+clientMsg), remoteAddr)
}else{
udpConn.WriteToUDP([]byte("fuckoff"), remoteAddr)
}
}
}
func ServerHandleError(err error, when string) {
if err != nil {
fmt.Println(err, when)
os.Exit(1)
}
}
|
package middleware
import (
"encoding/json"
"fmt"
"net/http"
"net/url"
"strconv"
"strings"
"sync"
"time"
_const "github.com/IcanFun/utils/const"
"github.com/IcanFun/utils/utils/log"
"github.com/gin-gonic/gin"
"github.com/IcanFun/utils/i18n"
"github.com/dgrijalva/jwt-go"
goi18n "github.com/nicksnyder/go-i18n/i18n"
"github.com/IcanFun/utils/utils"
)
const CTX = "api_ctx"
const Authority = "api_authority"
type CheckApiKeyFunc func(apiKey string, ctx *gin.Context) (claims CustomClaims, appErr *utils.AppError)
type Context struct {
CustomClaims CustomClaims
TokenString string
Params *ApiParams
Err *utils.AppError
T goi18n.TranslateFunc
RequestId string
IpAddress string
Path string
siteURLHeader string
}
var JWTSecret string
var CheckApiKey CheckApiKeyFunc
func ApiHandler(h func(*Context, http.ResponseWriter, *http.Request)) http.Handler {
return &handler{
handleFunc: h,
requireCustomClaims: false,
trustRequester: false,
isApi: true,
}
}
func ApiCustomClaimsRequired(h func(*Context, http.ResponseWriter, *http.Request)) http.Handler {
return &handler{
handleFunc: h,
requireCustomClaims: true,
trustRequester: false,
isApi: true,
}
}
func AppHandler(h func(*Context, http.ResponseWriter, *http.Request)) http.Handler {
return &handler{
handleFunc: h,
requireCustomClaims: false,
trustRequester: false,
isApi: false,
}
}
type handler struct {
handleFunc func(*Context, http.ResponseWriter, *http.Request)
requireCustomClaims bool
trustRequester bool
isApi bool
}
func (h handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
log.Debug("%v - %v", r.Method, r.URL.Path)
c := &Context{}
c.T, _ = utils.GetTranslationsAndLocale(r)
c.RequestId = utils.NewId()
c.IpAddress = utils.GetIpAddress(r)
c.Params = ApiParamsFromRequest(r)
tokenString := ""
authHeader := r.Header.Get(_const.HEADER_AUTH)
if len(authHeader) > 6 && strings.ToUpper(authHeader[0:6]) == _const.HEADER_BEARER {
tokenString = authHeader[7:]
} else if len(authHeader) > 5 && strings.ToLower(authHeader[0:5]) == _const.HEADER_TOKEN {
tokenString = authHeader[6:]
}
if len(tokenString) == 0 {
if cookie, err := r.Cookie(_const.SESSION_COOKIE_TOKEN); err == nil {
tokenString = cookie.Value
if h.requireCustomClaims && !h.trustRequester {
if r.Header.Get(_const.HEADER_REQUESTED_WITH) != _const.HEADER_REQUESTED_WITH_XML {
c.Err = utils.NewLocAppError("ServeHTTP",
"api.context.session_expired.app_error", nil,
"tokenString="+tokenString+" Appears to be a CSRF attempt",
)
tokenString = ""
}
}
}
}
if len(tokenString) == 0 {
tokenString = r.URL.Query().Get("access_token")
}
c.SetSiteURLHeader(GetProtocol(r) + "://" + r.Host)
w.Header().Set(_const.HEADER_REQUEST_ID, c.RequestId)
if !h.isApi {
w.Header().Set("X-Frame-Options", "SAMEORIGIN")
w.Header().Set("Content-Security-Policy", "frame-ancestors 'self'")
} else {
w.Header().Set("Content-Type", "application/json")
if r.Method == "GET" {
w.Header().Set("Expires", "0")
}
}
if len(tokenString) != 0 && h.requireCustomClaims {
token, err := jwt.ParseWithClaims(tokenString, &CustomClaims{}, func(token *jwt.Token) (interface{}, error) {
if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {
return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"])
}
return []byte(JWTSecret), nil
})
if err != nil {
c.Err = utils.NewLocAppError("ServeHTTP",
"api.context.jwt_parse.app_error", nil, "err="+err.Error()+",tokenString="+tokenString,
)
c.Err.StatusCode = http.StatusUnauthorized
} else if claims, ok := token.Claims.(*CustomClaims); ok && token.Valid {
c.CustomClaims = *claims
c.TokenString = tokenString
} else {
c.Err = utils.NewLocAppError("ServeHTTP",
"api.context.jwt_parse.app_error", nil, "tokenString="+tokenString,
)
c.Err.StatusCode = http.StatusUnauthorized
}
}
c.Path = r.URL.Path
if c.Err == nil && h.requireCustomClaims {
c.CustomClaimsRequired()
}
if c.Err == nil {
h.handleFunc(c, w, r)
}
if c.Err != nil {
c.Err.Translate(c.T)
c.Err.RequestId = c.RequestId
c.LogError(c.Err)
c.Err.Where = r.URL.Path
if h.isApi {
w.WriteHeader(c.Err.StatusCode)
RenderJson(w, c.Err)
} else {
if c.Err.StatusCode == http.StatusUnauthorized {
http.Redirect(w, r,
c.GetSiteURLHeader()+"/?redirect="+url.QueryEscape(r.URL.Path), http.StatusTemporaryRedirect,
)
} else {
utils.RenderWebError(c.Err.Message, c.Err.DetailedError, "", c.Err.StatusCode, w, r)
}
}
}
}
func (c *Context) SetSiteURLHeader(url string) {
c.siteURLHeader = strings.TrimRight(url, "/")
}
func (c *Context) GetSiteURLHeader() string {
return c.siteURLHeader
}
func (c *Context) RemoveCustomClaimsCookie(w http.ResponseWriter, r *http.Request) {
sessionCookie := &http.Cookie{
Name: _const.SESSION_COOKIE_TOKEN,
Value: "",
Path: "/",
MaxAge: -1,
HttpOnly: true,
}
userCookie := &http.Cookie{
Name: _const.SESSION_COOKIE_USER,
Value: "",
Path: "/",
MaxAge: -1,
}
http.SetCookie(w, sessionCookie)
http.SetCookie(w, userCookie)
}
func (c *Context) SetInvalidParam(parameter string) {
c.Err = NewInvalidParamError(parameter)
}
func (c *Context) SetInvalidUrlParam(parameter string) {
c.Err = NewInvalidUrlParamError(parameter)
}
func NewInvalidParamError(parameter string) *utils.AppError {
err := utils.NewLocAppError("Context",
i18n.PARAM_ERROR, map[string]interface{}{"Name": parameter}, parameter,
)
err.StatusCode = http.StatusBadRequest
return err
}
func NewInvalidUrlParamError(parameter string) *utils.AppError {
err := utils.NewLocAppError("Context",
"api.context.invalid_body_param.app_error", map[string]interface{}{"Name": parameter}, "",
)
err.StatusCode = http.StatusBadRequest
return err
}
func NewTokenError() *utils.AppError {
err := utils.NewLocAppError("ServeHTTP",
i18n.JWT_PARSE_ERROR, nil, "token error",
)
err.StatusCode = 600
return err
}
func (c *Context) IsSystemAdmin() bool {
return CustomClaimsHasPermissionTo(c.CustomClaims, PERMISSION_MANAGE_SYSTEM)
}
func (c *Context) CustomClaimsRequired() {
if len(c.CustomClaims.UserId) == 0 {
c.Err = utils.NewAppError("",
i18n.SESSION_EXPIRED_ERROR, nil, "UserRequired "+c.TokenString, 600,
)
return
}
}
func (c *Context) RequireUserId() *Context {
if c.Err != nil {
return c
}
if c.Params.UserId == _const.ME {
c.Params.UserId = c.CustomClaims.UserId
}
if len(c.Params.UserId) < 24 {
c.SetInvalidUrlParam("user_id")
}
return c
}
func (c *Context) RequireService() *Context {
if c.Err != nil {
return c
}
if len(c.Params.Service) == 0 {
c.SetInvalidUrlParam("service")
}
return c
}
func (c *Context) LogError(err *utils.AppError) {
if err.Id == "web.check_browser_compatibility.app_error" {
c.LogDebug(err)
} else {
log.Error(utils.TDefault("api.context.log.error"), c.Path, err.Where, err.StatusCode,
c.RequestId, c.CustomClaims.UserId, c.IpAddress, err.SystemMessage(utils.TDefault), err.DetailedError,
)
}
}
func (c *Context) LogDebug(err *utils.AppError) {
log.Debug(utils.TDefault("api.context.log.error"), c.Path, err.Where, err.StatusCode,
c.RequestId, c.CustomClaims.UserId, c.IpAddress, err.SystemMessage(utils.TDefault), err.DetailedError,
)
}
func IsApiCall(r *http.Request) bool {
return strings.Index(r.URL.Path, "/api/") == 0
}
func (c *Context) SetPermissionError(permission *Permission) {
c.Err = utils.NewLocAppError("Permissions", "api.context.permissions.app_error", nil, "userId="+c.CustomClaims.UserId+", "+"permission="+permission.Id)
c.Err.StatusCode = http.StatusForbidden
}
func ApiCustomClaimsRequiredMiddleware(ctx *gin.Context) {
r := ctx.Request
c := InitContext(ctx)
c.Path = r.URL.Path
if c.Err == nil {
c.CustomClaimsRequired()
}
if c.Err != nil {
c.Err.Translate(c.T)
c.Err.RequestId = c.RequestId
c.LogError(c.Err)
c.Err.Where = r.URL.Path
utils.RenderError(ctx, c.Err)
ctx.Abort()
}
ctx.Set(CTX, c)
ctx.Next()
}
func InitContext(ctx *gin.Context) *Context {
r := ctx.Request
w := ctx.Writer
c := &Context{}
c.T, _ = utils.GetTranslationsAndLocale(r)
c.RequestId = utils.NewId()
c.IpAddress = utils.GetIpAddress(r)
c.Params = ApiParamsFromGinRequest(ctx)
c.SetSiteURLHeader(GetProtocol(r) + "://" + r.Host)
w.Header().Set(_const.HEADER_REQUEST_ID, c.RequestId)
w.Header().Set("Content-Type", "application/json")
if r.Method == "GET" {
w.Header().Set("Expires", "0")
}
if apiKey := r.Header.Get(_const.HEADER_API_KEY); apiKey != "" {
//验证apiKey
if CheckApiKey == nil {
c.Err = utils.NewLocAppError("ServeHTTP",
i18n.APIKEY_PARSE_ERROR, nil, "apiKey="+apiKey,
)
} else {
c.CustomClaims, c.Err = CheckApiKey(apiKey, ctx)
}
return c
}
tokenString := ""
authHeader := r.Header.Get(_const.HEADER_AUTH)
if len(authHeader) > 6 && strings.ToUpper(authHeader[0:6]) == _const.HEADER_BEARER {
tokenString = authHeader[7:]
} else if len(authHeader) > 5 && strings.ToLower(authHeader[0:5]) == _const.HEADER_TOKEN {
tokenString = authHeader[6:]
}
if len(tokenString) == 0 {
if cookie, err := r.Cookie(_const.SESSION_COOKIE_TOKEN); err == nil {
tokenString = cookie.Value
if r.Header.Get(_const.HEADER_REQUESTED_WITH) != _const.HEADER_REQUESTED_WITH_XML {
c.Err = utils.NewLocAppError("ServeHTTP",
i18n.SESSION_EXPIRED_ERROR, nil,
"tokenString="+tokenString+" Appears to be a CSRF attempt",
)
c.Err.StatusCode = 600
tokenString = ""
}
}
}
if len(tokenString) == 0 {
tokenString = r.URL.Query().Get("access_token")
}
if len(tokenString) != 0 {
token, err := jwt.ParseWithClaims(tokenString, &CustomClaims{}, func(token *jwt.Token) (interface{}, error) {
if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {
return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"])
}
return []byte(JWTSecret), nil
})
if err != nil {
c.Err = utils.NewLocAppError("ServeHTTP",
i18n.JWT_PARSE_ERROR, nil, "err="+err.Error()+",tokenString="+tokenString,
)
c.Err.StatusCode = 600
} else if claims, ok := token.Claims.(*CustomClaims); ok && token.Valid {
c.CustomClaims = *claims
c.TokenString = tokenString
} else {
c.Err = utils.NewLocAppError("ServeHTTP",
i18n.JWT_PARSE_ERROR, nil, "tokenString="+tokenString,
)
c.Err.StatusCode = 600
}
}
return c
}
func GetApiContext(c *gin.Context) *Context {
if ctx, exists := c.Get(CTX); exists {
return ctx.(*Context)
}
return nil
}
var ipRequest = sync.Map{}
func CheckRateLimit(ip, request string) bool {
current := int(time.Now().Unix())
currentStr := strconv.Itoa(current)
//limit 100次
//timeset 600秒
//限制600秒最多访问100次
limit, timeset := GetRateLimitConfig()
if limit == 0 || timeset == 0 {
return false
}
allowanceStr, timestampStr := LoadAllowance(ip, request)
allowance, _ := strconv.Atoi(allowanceStr)
timestamp, _ := strconv.Atoi(timestampStr)
allowance += int(current-timestamp) * limit / timeset
if allowance > limit {
allowance = limit
}
if allowance < 1 {
SaveAllowance(ip, request, "0", currentStr)
//返回true 代表速率超过,进行错误输出
return true
} else {
allowanceStr = strconv.Itoa(allowance - 1)
SaveAllowance(ip, request, allowanceStr, currentStr)
//返回false 代表速率未超过
return false
}
}
func LoadAllowance(ip, request string) (allowance, timestamp string) {
res, ok := ipRequest.Load(ip + "_" + request)
if !ok {
currentStr := string(time.Now().Unix())
defaultLimitInt, _ := GetRateLimitConfig()
defaultLimitStr := strconv.Itoa(defaultLimitInt)
allowance, timestamp = defaultLimitStr, currentStr
} else {
kv := strings.Split(res.(string), "-")
allowance, timestamp = kv[0], kv[1]
}
return
}
func GetRateLimitConfig() (limit, timeset int) {
return 0, 0
}
func SaveAllowance(ip, request, allowance, current string) {
ipRequest.Store(ip+"_"+request, allowance+"-"+current)
}
func RateLimit(ctx *gin.Context) {
if CheckRateLimit(ctx.ClientIP(), ctx.Request.URL.Path) {
err := utils.NewLocAppError("context.RateLimit", i18n.REQUEST_RATE_LIMIT, nil,
fmt.Sprintf("ip = %s path %s", ctx.ClientIP(), ctx.Request.URL.Path))
utils.RenderError(ctx, err)
//log.Warn("RateLimit=>ip = %s path %s", ctx.ClientIP(), ctx.Request.URL.Path)
ctx.Abort()
} else {
ctx.Next()
}
}
func RenderJson(w http.ResponseWriter, o interface{}) {
if b, err := json.Marshal(o); err != nil {
w.Write([]byte(""))
} else {
w.Write(b)
}
}
|
package utils
import (
"archive/zip"
"bytes"
"crypto/md5"
"encoding/hex"
"errors"
"fmt"
"io"
"mime/multipart"
"os"
"reflect"
"strings"
"time"
)
// 返回当前时间
func GetDate() string {
timestamp := time.Now().Unix()
tm := time.Unix(timestamp, 0)
return tm.Format("2006-01-02 03:04:05")
}
// 获取当前系统环境
func GetRunTime() string {
//获取系统环境变量
RUN_TIME := os.Getenv("RUN_TIME")
if RUN_TIME == "" {
fmt.Println("No RUN_TIME Can't start")
}
return RUN_TIME
}
// MD5 加密字符串
func GetMD5(plainText string) string {
h := md5.New()
h.Write([]byte(plainText))
return hex.EncodeToString(h.Sum(nil))
}
//计算文件的md5,适用于本地文件计算
func GetMd5(path string) (string, error) {
f, err := os.Open(path)
if err != nil {
return "", err
}
defer f.Close()
md5hash := md5.New()
if _, err := io.Copy(md5hash, f); err != nil {
return "", err
}
return hex.EncodeToString(md5hash.Sum(nil)), nil
}
//从流中直接读取数据计算md5 并返回流的副本,不能用于计算大文件流否则内存占用很大
//@return io.Reader @params file的副本
func GetMd52(file io.Reader) (io.Reader, string, error) {
var b bytes.Buffer
md5hash := md5.New()
if _, err := io.Copy(&b, io.TeeReader(file, md5hash)); err != nil {
return nil, "", err
}
return &b, hex.EncodeToString(md5hash.Sum(nil)), nil
}
//解压
func DeCompress(zipFile, dest string) error {
reader, err := zip.OpenReader(zipFile)
if err != nil {
return err
}
defer reader.Close()
for _, file := range reader.File {
if file.FileInfo().IsDir() {
continue
}
rc, err := file.Open()
if err != nil {
return err
}
defer rc.Close()
filename := dest + file.Name
err = os.MkdirAll(getDir(filename), 0755)
if err != nil {
return err
}
w, err := os.Create(filename)
if err != nil {
return err
}
defer w.Close()
_, err = io.Copy(w, rc)
if err != nil {
return err
}
}
return nil
}
func getDir(path string) string {
return subString(path, 0, strings.LastIndex(path, "/"))
}
func subString(str string, start, end int) string {
rs := []rune(str)
length := len(rs)
if start < 0 || start > length {
panic("start is wrong")
}
if end < start || end > length {
panic("end is wrong")
}
return string(rs[start:end])
}
func UploadFile(file *multipart.FileHeader, path string) (string, error) {
if reflect.ValueOf(file).IsNil() || !reflect.ValueOf(file).IsValid() {
return "", errors.New("invalid memory address or nil pointer dereference")
}
src, err := file.Open()
defer src.Close()
if err != nil {
return "", err
}
err = MkDir(path)
if err != nil {
return "", err
}
// Destination
// 去除空格
filename := strings.Replace(file.Filename, " ", "", -1)
// 去除换行符
filename = strings.Replace(filename, "\n", "", -1)
dst, err := os.Create(path + filename)
if err != nil {
return "", err
}
defer dst.Close()
// Copy
if _, err = io.Copy(dst, src); err != nil {
return "", err
}
return filename, nil
}
func GetFileSize(filePath string) (int64, error) {
fileInfo, err := os.Stat(filePath)
if err != nil {
return 0, err
}
//文件大小
fsize := fileInfo.Size()
return fsize, nil
}
/**
* 判断文件是否存在 存在返回 true 不存在返回false
*/
func CheckFileIsExist(filename string) bool {
var exist = true
if _, err := os.Stat(filename); os.IsNotExist(err) {
exist = false
}
return exist
} |
package fibo
//Num вычисляет число Фибоначчи для неотрицательного n
func Num(n int) int {
if n == 0 {
return 0
}
x1, x2 := 0, 1
for i := 1; i < n; i++ {
x1, x2 = x2, x1 + x2
}
return x2
}
|
// go run gota_usage.go
package main
import (
"fmt"
"log"
"os"
"github.com/go-gota/gota/dataframe"
"github.com/go-gota/gota/series"
)
func main() {
csvfile, err := os.Open("test.csv")
if err != nil {
log.Fatal(err)
}
df := dataframe.ReadCSV(csvfile)
fmt.Println(df)
df = df.Filter(dataframe.F{"3", "==", 3})
fmt.Println(df)
s := series.New([]string{"b", "a", "c", "d"}, series.String, "SeriesName")
fmt.Println(s)
s1 := series.New([]string{"b", "a"}, series.String, "COL.1")
s2 := series.New([]int{1, 2}, series.Int, "COL.2")
s3 := series.New([]float64{3.0, 4.0}, series.Float, "COL.3")
fmt.Println(s1)
fmt.Println(s2)
fmt.Println(s3)
dfx := dataframe.New(
series.New([]string{"b", "a"}, series.String, "COL.1"),
series.New([]int{1, 2}, series.Int, "COL.2"),
series.New([]float64{3.0, 4.0}, series.Float, "COL.3"),
)
fmt.Println(dfx)
fmt.Println("***********************")
fmt.Println("Or load data from structs...")
type User struct {
Name string
Age int
Accuracy float64
ignored bool
}
users := []User{
{"Aram", 17, 0.2, true},
{"Juan", 18, 0.8, true},
{"Ana", 22, 0.5, true},
}
dfs := dataframe.LoadStructs(users)
fmt.Println(dfs)
dfs = dfs.Filter(dataframe.F{"Age", ">=", 18})
fmt.Println(dfs)
fmt.Println(dfs.Select([]string{"Name", "Accuracy"}))
// see more at https://github.com/go-gota/gota
}
|
// 写出下面程序的输出及简要解释
package main
import (
"fmt"
)
func main() {
test1()
test2()
test3()
test4()
fmt.Println("test5: ", test5())
fmt.Println("test6: ", test6())
fmt.Println("test7: ", test7())
fmt.Println("test8: ", test8())
}
func test1() {
defer a()
defer b()
fmt.Println("test1")
}
// test1,b,a
func test2() {
defer func() {
defer a()
defer b()
fmt.Println("test2.defer")
}()
fmt.Println("test2")
}
// test2,test2.defer,b,a
func test3() {
i := 0
defer func() {
fmt.Println(i)
}()
i++
fmt.Println("test3")
}
// test3,1
func test4() {
i := 0
defer func(i int) {
fmt.Println(i)
}(i)
i++
fmt.Println("test4")
}
// test4,0
func test5() int {
i := 0
defer func() {
i++
}()
return i
}
// 1 -> 0
func test6() int {
i := 0
defer func(i int) {
i++
}(i)
return i
}
// 0
func test7() (i int) {
defer func() {
i++
}()
return
}
// 1
func test8() (i int) {
defer func(i int) {
i++
}(i)
return
}
// 0
func a() {
fmt.Println("a")
}
func b() {
fmt.Println("b")
}
|
package virtual_security
import (
"errors"
"reflect"
"testing"
"time"
)
type testPriceStore struct {
getBySymbolCode1 *symbolPrice
getBySymbolCode2 error
getBySymbolCodeHistory []string
set1 error
setHistory []*symbolPrice
}
func (t *testPriceStore) getBySymbolCode(symbolCode string) (*symbolPrice, error) {
t.getBySymbolCodeHistory = append(t.getBySymbolCodeHistory, symbolCode)
return t.getBySymbolCode1, t.getBySymbolCode2
}
func (t *testPriceStore) set(price *symbolPrice) error {
t.setHistory = append(t.setHistory, price)
return t.set1
}
func Test_getPriceStore(t *testing.T) {
clock := &testClock{now1: time.Date(2021, 5, 22, 7, 11, 0, 0, time.Local)}
got := getPriceStore(clock)
want := &priceStore{
store: map[string]*symbolPrice{},
clock: clock,
expireTime: time.Date(2021, 5, 22, 8, 0, 0, 0, time.Local),
}
if !reflect.DeepEqual(want, got) {
t.Errorf("%s error\nwant: %+v\ngot: %+v\n", t.Name(), want, got)
}
}
func Test_priceStore_isExpired(t *testing.T) {
t.Parallel()
tests := []struct {
name string
priceStore *priceStore
arg time.Time
want bool
}{
{name: "有効期限より引数の時刻が前",
priceStore: &priceStore{expireTime: time.Date(2021, 5, 22, 8, 0, 0, 0, time.Local)},
arg: time.Date(2021, 5, 22, 7, 0, 0, 0, time.Local),
want: false},
{name: "有効期限と引数の時刻が一致",
priceStore: &priceStore{expireTime: time.Date(2021, 5, 22, 8, 0, 0, 0, time.Local)},
arg: time.Date(2021, 5, 22, 8, 0, 0, 0, time.Local),
want: true},
{name: "有効期限より引数の時刻が後",
priceStore: &priceStore{expireTime: time.Date(2021, 5, 22, 8, 0, 0, 0, time.Local)},
arg: time.Date(2021, 5, 22, 9, 0, 0, 0, time.Local),
want: true},
}
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
t.Parallel()
got := test.priceStore.isExpired(test.arg)
if !reflect.DeepEqual(test.want, got) {
t.Errorf("%s error\nwant: %+v\ngot: %+v\n", t.Name(), test.want, got)
}
})
}
}
func Test_priceStore_setCalculatedExpireTime(t *testing.T) {
t.Parallel()
tests := []struct {
name string
priceStore *priceStore
arg time.Time
want time.Time
}{
{name: "現在時刻が8時以前なら当日の8時をセット",
priceStore: &priceStore{},
arg: time.Date(2021, 5, 22, 7, 0, 0, 0, time.Local),
want: time.Date(2021, 5, 22, 8, 0, 0, 0, time.Local)},
{name: "現在時刻が8時なら翌日の8時をセット",
priceStore: &priceStore{},
arg: time.Date(2021, 5, 22, 8, 0, 0, 0, time.Local),
want: time.Date(2021, 5, 23, 8, 0, 0, 0, time.Local)},
{name: "現在時刻が8時以降なら翌日の8時をセット",
priceStore: &priceStore{},
arg: time.Date(2021, 5, 22, 9, 0, 0, 0, time.Local),
want: time.Date(2021, 5, 23, 8, 0, 0, 0, time.Local)},
}
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
t.Parallel()
test.priceStore.setCalculatedExpireTime(test.arg)
got := test.priceStore.expireTime
if !reflect.DeepEqual(test.want, got) {
t.Errorf("%s error\nwant: %+v\ngot: %+v\n", t.Name(), test.want, got)
}
})
}
}
func Test_priceStore_GetBySymbolCode(t *testing.T) {
t.Parallel()
tests := []struct {
name string
priceStore *priceStore
arg string
want1 *symbolPrice
want2 error
}{
{name: "指定した銘柄が存在したらそれを返す",
priceStore: &priceStore{
clock: &testClock{now1: time.Date(2021, 5, 22, 7, 0, 0, 0, time.Local)},
store: map[string]*symbolPrice{"1234": {SymbolCode: "1234", Price: 100}, "2345": {SymbolCode: "2345", Price: 200}, "3456": {SymbolCode: "3456", Price: 300}},
expireTime: time.Date(2021, 5, 22, 8, 0, 0, 0, time.Local),
},
arg: "2345",
want1: &symbolPrice{SymbolCode: "2345", Price: 200},
want2: nil},
{name: "指定した銘柄が存在しなければエラーを返す",
priceStore: &priceStore{
clock: &testClock{now1: time.Date(2021, 5, 22, 7, 0, 0, 0, time.Local)},
store: map[string]*symbolPrice{"1234": {SymbolCode: "1234", Price: 100}, "2345": {SymbolCode: "2345", Price: 200}, "3456": {SymbolCode: "3456", Price: 300}},
expireTime: time.Date(2021, 5, 22, 8, 0, 0, 0, time.Local),
},
arg: "0000",
want1: nil,
want2: NoDataError},
{name: "指定した銘柄が存在しても、有効期限が切れていればstoreを空にして有効期限を更新し、エラーを返す",
priceStore: &priceStore{
clock: &testClock{now1: time.Date(2021, 5, 22, 8, 0, 0, 0, time.Local)},
store: map[string]*symbolPrice{"1234": {SymbolCode: "1234", Price: 100}, "2345": {SymbolCode: "2345", Price: 200}, "3456": {SymbolCode: "3456", Price: 300}},
expireTime: time.Date(2021, 5, 22, 8, 0, 0, 0, time.Local),
},
arg: "2345",
want1: nil,
want2: ExpiredDataError},
}
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
t.Parallel()
got1, got2 := test.priceStore.getBySymbolCode(test.arg)
if !reflect.DeepEqual(test.want1, got1) && !errors.Is(got2, test.want2) {
t.Errorf("%s error\nwant: %+v, %+v\ngot: %+v, %+v\n", t.Name(), test.want1, test.want2, got1, got2)
}
})
}
}
func Test_priceStore_Set(t *testing.T) {
t.Parallel()
tests := []struct {
name string
priceStore *priceStore
arg *symbolPrice
want error
wantStore map[string]*symbolPrice
wantExpireTime time.Time
}{
{name: "引数の情報に更新日時がまったくなければ何もしない",
priceStore: &priceStore{
store: map[string]*symbolPrice{},
expireTime: time.Date(2021, 5, 23, 8, 0, 0, 0, time.Local)},
arg: &symbolPrice{},
wantStore: map[string]*symbolPrice{},
wantExpireTime: time.Date(2021, 5, 23, 8, 0, 0, 0, time.Local)},
{name: "ストアの有効期限が切れていなければ、storeに追加する",
priceStore: &priceStore{
clock: &testClock{now1: time.Date(2021, 5, 25, 9, 0, 0, 0, time.Local)},
store: map[string]*symbolPrice{"1234": {SymbolCode: "1234", Price: 100}, "2345": {SymbolCode: "2345", Price: 200}, "3456": {SymbolCode: "3456", Price: 300}},
expireTime: time.Date(2021, 5, 26, 8, 0, 0, 0, time.Local)},
arg: &symbolPrice{SymbolCode: "2345", Price: 400, PriceTime: time.Date(2021, 5, 25, 9, 0, 0, 0, time.Local)},
wantStore: map[string]*symbolPrice{"1234": {SymbolCode: "1234", Price: 100}, "2345": {SymbolCode: "2345", Price: 400, PriceTime: time.Date(2021, 5, 25, 9, 0, 0, 0, time.Local)}, "3456": {SymbolCode: "3456", Price: 300}},
wantExpireTime: time.Date(2021, 5, 26, 8, 0, 0, 0, time.Local)},
{name: "有効期限が切れていれば、storeをクリアし、有効期限を延長してから、storeに追加する",
priceStore: &priceStore{
clock: &testClock{now1: time.Date(2021, 5, 25, 9, 0, 0, 0, time.Local)},
store: map[string]*symbolPrice{"1234": {SymbolCode: "1234", Price: 100}, "2345": {SymbolCode: "2345", Price: 200}, "3456": {SymbolCode: "3456", Price: 300}},
expireTime: time.Date(2021, 5, 25, 8, 0, 0, 0, time.Local)},
arg: &symbolPrice{SymbolCode: "2345", Price: 400, PriceTime: time.Date(2021, 5, 25, 9, 0, 0, 0, time.Local)},
wantStore: map[string]*symbolPrice{"2345": {SymbolCode: "2345", Price: 400, PriceTime: time.Date(2021, 5, 25, 9, 0, 0, 0, time.Local)}},
wantExpireTime: time.Date(2021, 5, 26, 8, 0, 0, 0, time.Local)},
{name: "引数がnilならエラー",
priceStore: &priceStore{
clock: &testClock{now1: time.Date(2021, 5, 25, 9, 0, 0, 0, time.Local)},
store: map[string]*symbolPrice{},
expireTime: time.Date(2021, 5, 25, 8, 0, 0, 0, time.Local)},
arg: nil,
want: NilArgumentError,
wantStore: map[string]*symbolPrice{},
wantExpireTime: time.Date(2021, 5, 25, 8, 0, 0, 0, time.Local)},
}
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
t.Parallel()
got := test.priceStore.set(test.arg)
if !errors.Is(got, test.want) ||
!reflect.DeepEqual(test.wantStore, test.priceStore.store) ||
!reflect.DeepEqual(test.wantExpireTime, test.priceStore.expireTime) {
t.Errorf("%s error\nresult: %+v, %+v, %+v\nwant: %+v, %+v, %+v\ngot: %+v, %+v, %+v\n", t.Name(),
!errors.Is(got, test.want), !reflect.DeepEqual(test.wantStore, test.priceStore.store), !reflect.DeepEqual(test.wantExpireTime, test.priceStore.expireTime),
test.want, test.wantStore, test.wantExpireTime,
got, test.priceStore.store, test.priceStore.expireTime)
}
})
}
}
|
package dynamodb
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/dynamodb"
)
// GetUsersAttrDefs returns the definitions
func GetUsersAttrDefs() []*dynamodb.AttributeDefinition {
return []*dynamodb.AttributeDefinition{{
AttributeName: aws.String("Login"),
AttributeType: aws.String("S"),
}, {
AttributeName: aws.String("Password"),
AttributeType: aws.String("S"),
}}
}
// GetUsersAttrSchemas returns the schemas
func GetUsersAttrSchemas() []*dynamodb.KeySchemaElement {
return []*dynamodb.KeySchemaElement{{
AttributeName: aws.String("Login"),
KeyType: aws.String("HASH"),
}, {
AttributeName: aws.String("Password"),
KeyType: aws.String("RANGE"),
}}
}
// UsersItem holds the features for a particular login
type UsersItem struct {
Login string
Password string
}
|
package main
import "fmt"
type S1 struct{}
func (s *S1) String() string {
return "S1.String"
}
type S2 struct{}
type S3 struct{}
func (s *S3) String(in string) string {
return "S3.String"
}
type I interface {
String() string
}
func main() {
s1 := new(S1)
// s2 := new(S2)
// s3 := new(S3)
var i I
i = s1
fmt.Println(i)
// i = s2
// i = s3
}
|
package fmm
import "testing"
func TestVersion(t *testing.T) {
tests := []struct {
input string
output string
outputWithBuild string
p0, p1, p2, p3 uint16
}{
{"1.0", "1.0.0", "1.0.0.0", 1, 0, 0, 0},
{"1.1.15", "1.1.15", "1.1.15.0", 1, 1, 15, 0},
{"2.3.4.5", "2.3.4", "2.3.4.5", 2, 3, 4, 5},
{"010.001.0100.0000001", "10.1.100", "10.1.100.1", 10, 1, 100, 1},
}
for _, test := range tests {
ver, err := NewVersion(test.input)
if err != nil {
t.Error(err)
}
if ver[0] != test.p0 || ver[1] != test.p1 || ver[2] != test.p2 || ver[3] != test.p3 {
t.Error("Version parse mismatch:", test.input, ver)
}
verStr := ver.ToString(false)
if verStr != test.output {
t.Error("Version string mismatch:", test.input, verStr)
}
verStrWithBuild := ver.ToString(true)
if verStrWithBuild != test.outputWithBuild {
t.Error("Version string mismatch:", test.input, verStr)
}
}
}
func TestVersionCmp(t *testing.T) {
tests := []struct {
v1 Version
v2 Version
res VersionCmpRes
}{
{Version{1, 3, 1}, Version{2, 0}, VersionLt},
{Version{1, 5, 3}, Version{1, 5, 2}, VersionGt},
{Version{1, 5}, Version{1, 5, 0, 0}, VersionEq},
}
for _, test := range tests {
res := test.v1.Cmp(&test.v2)
if res != test.res {
t.Error("Version comparison failure:", test.v1, test.v2, test.res, res)
}
}
}
|
package ppm
import (
"errors"
"fmt"
"os"
"strings"
)
//PpImage is an ppm image object
type PpImage struct {
name string
mode string
wdith, height int
maxPixel uint16
pixel [][]Vector
Colors
}
//Vector used for rgb color
type Vector struct {
X, Y, Z float64
}
type Colors interface {
CreateColors(interface{})
}
//CreateColors init the pixel indo
func (p *PpImage) CreateColors(colorsSet [][]Vector) *PpImage {
/*
. . . | . . . | . . . | . . .
. . . | . . . | . . . | . . .
. . . | . . . | . . . | . . .
. . . | . . . | . . . | . . .
[[{10 10 10} {10 10 10}] [{10 10 10} {10 10 10}]] //2 * 2
*/
p.pixel = make([][]Vector, p.height)
for i := range p.pixel {
p.pixel[i] = make([]Vector, p.wdith)
}
for i := 0; i < p.height; i++ {
for j := 0; j < p.wdith; j++ {
p.pixel[i][j].X = colorsSet[i][j].X
p.pixel[i][j].Y = colorsSet[i][j].Y
p.pixel[i][j].Z = colorsSet[i][j].Z
}
}
fmt.Println("Pixel => ", p.pixel)
return p
}
const color = 255.9999999
var (
nilSizeErr = errors.New("nil image can not to draw")
nilNameErr = errors.New("nil image name can not to draw")
nameFormatErr = errors.New("file name format shoule be .ppm")
)
//NewPPMImage init an ppm image
func NewPPMImage(name, mode string, wdith, height int, MaxPixel uint16) *PpImage {
return &PpImage{
name: name,
mode: mode,
wdith: wdith,
height: height,
maxPixel: MaxPixel,
}
}
//SetName set the image name.
func (p *PpImage) SetName(name string) *PpImage {
p.name = name
return p
}
//SetMode set the ppm format, Mode default is "p6".
func (p *PpImage) SetMode(mode ...string) *PpImage {
if len(mode) == 0 {
p.mode = "p6"
}
p.mode = mode[0]
return p
}
//SetWidthAndHeight set image's size.
func (p *PpImage) SetWidthAndHeight(wdith, height int) *PpImage {
p.wdith = wdith
p.height = height
return p
}
//SetPixelNum set num of pixel, MaxPixel's max size is 256.
func (p *PpImage) SetPixelNum(maxPixel uint16) *PpImage {
p.maxPixel = maxPixel
return p
}
//TODO
func (p *PpImage) FillColor(colors [][]Vector) *PpImage {
p.pixel = colors
return p
}
//Draw the ppm info to file.
func (p *PpImage) Draw() (err error) {
if p.height == 0 || p.wdith == 0 {
return nilSizeErr
}
if p.name == "" {
return nilNameErr
}
if !strings.HasSuffix(p.name, "ppm") {
return nameFormatErr
}
//TODO
f, err := os.Create(p.name)
defer f.Close()
defer fmt.Println("OK => SAVED: ", p.name)
if err != nil {
return err
}
for _, colorW := range p.pixel {
for _, colorH := range colorW {
_, err = fmt.Fprintf(f, "%d %d %d\n", colorH.X, colorH.Y, colorH.Z)
if err != nil {
panic(err)
}
}
}
return nil
}
|
package db
import (
"github.com/go-xorm/xorm"
"log"
)
const (
DB_HOST = "127.0.0.1:3306"
DB_USER = "root"
DB_PWD = "chendong"
DB_NAME = "test"
)
func GetEngine ()(*xorm.Engine){
engine, err := xorm.NewEngine("mysql", DB_USER+":"+DB_PWD+"@/"+DB_NAME+"?charset=utf8")
if err!=nil{
log.Println(err)
}
engine.ShowSQL=true
return engine
}
|
package utils
import (
"fmt"
"sync"
"github.com/astaxie/beego"
"github.com/astaxie/beego/logs"
"github.com/samuel/go-zookeeper/zk"
)
//反解dubbo结构,ip:[func1,func2,...]
type FuncMap map[string][]string
//返回查询结果的结构体
type Result struct {
Weight int `json:"weight"`
Disable bool `json:"disable"`
}
//查询dubbo结构体
type ResFind map[string]map[string]Result
//zk连接情况
type dubbozk *zk.Conn
var ZkMap map[string]FuncMap
var ZkConn map[string]dubbozk
//全局锁
var Mutex *sync.Mutex
//日志
var applog *logs.BeeLogger
func Init() {
Mutex = new(sync.Mutex)
ZkMap = map[string]FuncMap{}
ZkConn = map[string]dubbozk{}
applog = logs.NewLogger()
applog.SetLogger(logs.AdapterFile, fmt.Sprintf(`{"filename":"%s"}`, beego.AppConfig.String("logs")))
applog.EnableFuncCallDepth(true)
}
|
package lib
const (
// DefaultOAuth2URL is default OAuth2 server address.
DefaultOAuth2URL = "https://oauth.lycam.tv"
// DefaultAPIURL is default api server address.
DefaultAPIURL = "https://api.lycam.tv"
// DefaultTokenPath is default api path.
DefaultTokenPath = "/oauth2/token"
// DefaultAPIVersion is api version.
DefaultAPIVersion = "v1"
// DefaultUsername master name.
DefaultUsername = "master"
)
// variables
var username = DefaultUsername
var (
appKey string
appSecret string
password string
)
// InitKey .
func InitKey(_appKey, _appSecret, _masterSecret string) {
appKey = _appKey
appSecret = _appSecret
password = _masterSecret
}
// SetAppKey .
func SetAppKey(_appKey string) {
appKey = _appKey
}
// SetAppSecret .
func SetAppSecret(_appSecret string) {
appSecret = _appSecret
}
// SetMasterSecret .
func SetMasterSecret(_masterSecret string) {
password = _masterSecret
}
|
package model
type SignedBlock struct {
Block Block `json:"block"`
Justification Bytes `json:"justification"`
}
|
package handlers_test
import (
"bytes"
"encoding/json"
"errors"
"net/http"
"net/http/httptest"
"strings"
"github.com/pivotal-cf-experimental/envoy/domain"
"github.com/pivotal-cf-experimental/envoy/internal/handlers"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
type Provisioner struct {
WasCalledWith domain.ProvisionRequest
WasCalled bool
Error error
DashboardURL string
}
func NewProvisioner() *Provisioner {
return &Provisioner{}
}
func (p *Provisioner) Provision(req domain.ProvisionRequest) (domain.ProvisionResponse, error) {
p.WasCalledWith = req
p.WasCalled = true
return domain.ProvisionResponse{
DashboardURL: p.DashboardURL,
}, p.Error
}
var _ = Describe("Provision Handler", func() {
var handler handlers.ProvisionHandler
var provisioner *Provisioner
BeforeEach(func() {
provisioner = NewProvisioner()
handler = handlers.NewProvisionHandler(provisioner)
})
Context("when dashboard URL is not specified", func() {
It("returns empty JSON and a 201 on successful provision", func() {
writer := httptest.NewRecorder()
reqBody, err := json.Marshal(map[string]string{
"service_id": "my-service-id",
"plan_id": "my-plan-id",
"organization_guid": "my-organization-guid",
"space_guid": "my-space-guid",
})
if err != nil {
panic(err)
}
request, err := http.NewRequest("PUT", "/v2/service_instances/i-dont-care-terribly", bytes.NewBuffer(reqBody))
if err != nil {
panic(err)
}
handler.ServeHTTP(writer, request)
Expect(writer.Code).To(Equal(http.StatusCreated))
Expect(writer.Header()["Content-Type"]).To(Equal([]string{"application/json"}))
Expect(writer.Body.String()).To(MatchJSON("{}"))
Expect(provisioner.WasCalledWith).To(Equal(domain.ProvisionRequest{
InstanceID: "i-dont-care-terribly",
PlanID: "my-plan-id",
ServiceID: "my-service-id",
OrganizationGUID: "my-organization-guid",
SpaceGUID: "my-space-guid",
}))
})
})
Context("when dashboard URL is specified", func() {
BeforeEach(func() {
provisioner.DashboardURL = "http://www.example.com/my-silly-dashboard-url"
})
It("returns JSON with dashboard URL and a 201 on successful provision", func() {
writer := httptest.NewRecorder()
reqBody, err := json.Marshal(map[string]string{
"service_id": "my-service-id",
"plan_id": "my-plan-id",
"organization_guid": "my-organization-guid",
"space_guid": "my-space-guid",
})
if err != nil {
panic(err)
}
request, err := http.NewRequest("PUT", "/v2/service_instances/some-other-guid", bytes.NewBuffer(reqBody))
if err != nil {
panic(err)
}
handler.ServeHTTP(writer, request)
Expect(writer.Code).To(Equal(http.StatusCreated))
Expect(writer.Header()["Content-Type"]).To(Equal([]string{"application/json"}))
Expect(writer.Body.String()).To(MatchJSON(`{
"dashboard_url":"http://www.example.com/my-silly-dashboard-url"
}`))
Expect(provisioner.WasCalledWith).To(Equal(domain.ProvisionRequest{
InstanceID: "some-other-guid",
PlanID: "my-plan-id",
ServiceID: "my-service-id",
OrganizationGUID: "my-organization-guid",
SpaceGUID: "my-space-guid",
}))
})
})
Context("when there is a provision failure", func() {
BeforeEach(func() {
provisioner.Error = errors.New("BOOM!")
})
It("returns a 500 and the error as the body", func() {
writer := httptest.NewRecorder()
reqBody, err := json.Marshal(map[string]string{
"service_id": "my-service-id",
"plan_id": "my-plan-id",
"organization_guid": "my-organization-guid",
"space_guid": "my-space-guid",
})
if err != nil {
panic(err)
}
request, err := http.NewRequest("PUT", "/v2/service_instances/some-other-guid", bytes.NewBuffer(reqBody))
if err != nil {
panic(err)
}
handler.ServeHTTP(writer, request)
Expect(writer.Code).To(Equal(http.StatusInternalServerError))
Expect(writer.Header()["Content-Type"]).To(Equal([]string{"application/json"}))
Expect(writer.Body.String()).To(MatchJSON(`{"description":"BOOM!"}`))
})
})
Context("when the service instance has already been provisioned", func() {
BeforeEach(func() {
provisioner.Error = domain.ServiceInstanceAlreadyExistsError("already exists")
})
It("returns a 409 and the error message", func() {
writer := httptest.NewRecorder()
reqBody, err := json.Marshal(map[string]string{
"service_id": "my-service-id",
"plan_id": "my-plan-id",
"organization_guid": "my-organization-guid",
"space_guid": "my-space-guid",
})
if err != nil {
panic(err)
}
request, err := http.NewRequest("PUT", "/v2/service_instances/a-duplicate-guid", bytes.NewBuffer(reqBody))
if err != nil {
panic(err)
}
handler.ServeHTTP(writer, request)
Expect(writer.Code).To(Equal(http.StatusConflict))
Expect(writer.Header()["Content-Type"]).To(Equal([]string{"application/json"}))
Expect(writer.Body.String()).To(MatchJSON(`{}`))
})
})
Context("when the request body is not valid JSON", func() {
It("should not call the provisioner", func() {
writer := httptest.NewRecorder()
request, err := http.NewRequest("PUT", "/v2/service_instances/a-guid", strings.NewReader("{"))
if err != nil {
panic(err)
}
handler.ServeHTTP(writer, request)
Expect(provisioner.WasCalled).To(BeFalse())
})
It("should return a 400 and an error message", func() {
writer := httptest.NewRecorder()
request, err := http.NewRequest("PUT", "/v2/service_instances/a-guid", strings.NewReader("{"))
if err != nil {
panic(err)
}
handler.ServeHTTP(writer, request)
Expect(writer.Code).To(Equal(http.StatusBadRequest))
Expect(writer.Header()["Content-Type"]).To(Equal([]string{"application/json"}))
var msg struct {
Description string `json:"description"`
}
Expect(json.Unmarshal(writer.Body.Bytes(), &msg)).To(Succeed())
Expect(msg.Description).To(ContainSubstring("JSON"))
})
})
Context("when the request body is missing a required field", func() {
It("should not call the provisioner", func() {
writer := httptest.NewRecorder()
request, err := http.NewRequest("PUT", "/v2/service_instances/a-guid", strings.NewReader("{}"))
if err != nil {
panic(err)
}
handler.ServeHTTP(writer, request)
Expect(provisioner.WasCalled).To(BeFalse())
})
It("should return a 400 and an error message", func() {
writer := httptest.NewRecorder()
request, err := http.NewRequest("PUT", "/v2/service_instances/a-guid", strings.NewReader("{}"))
if err != nil {
panic(err)
}
handler.ServeHTTP(writer, request)
Expect(writer.Code).To(Equal(http.StatusBadRequest))
Expect(writer.Header()["Content-Type"]).To(Equal([]string{"application/json"}))
var msg struct {
Description string `json:"description"`
}
Expect(json.Unmarshal(writer.Body.Bytes(), &msg)).To(Succeed())
Expect(msg.Description).To(ContainSubstring("missing required field"))
})
})
})
|
package main
import (
"mysql_byroad/model"
"sync"
)
type TaskIdMap struct {
cmap map[int64]*model.Task
sync.RWMutex
}
func NewTaskIdMap(size int) *TaskIdMap {
cmap := new(TaskIdMap)
cmap.cmap = make(map[int64]*model.Task, size)
return cmap
}
func (this *TaskIdMap) Get(id int64) *model.Task {
this.RLock()
defer this.RUnlock()
return this.cmap[id]
}
func (this *TaskIdMap) Set(id int64, value *model.Task) {
this.Lock()
defer this.Unlock()
this.cmap[id] = value
}
func (this *TaskIdMap) Delete(id int64) {
this.Lock()
defer this.Unlock()
delete(this.cmap, id)
}
func (this *TaskIdMap) Iter() <-chan *model.Task {
ch := make(chan *model.Task)
go func(c chan *model.Task) {
this.RLock()
for _, task := range this.cmap {
c <- task
}
this.RUnlock()
close(c)
}(ch)
return ch
}
func (this *TaskIdMap) IterBuffered() <-chan *model.Task {
ch := make(chan *model.Task, len(this.cmap))
go func(c chan *model.Task) {
this.RLock()
for _, task := range this.cmap {
c <- task
}
this.RUnlock()
close(c)
}(ch)
return ch
}
|
package models
import (
"go.mongodb.org/mongo-driver/bson/primitive"
"time"
)
type Post struct {
ID primitive.ObjectID `bson:"_id"`
Caption string `bson:"caption"`
Username string `bson:"username"`
Filename string `bson:"filename"`
Likes int `bson:"likes"`
DatePosted time.Time `bson:"dateposted"`
UserLikesIDS []primitive.ObjectID `bson:"userlikesids"`
}
type PostWithEncode struct {
ID primitive.ObjectID `bson:"_id"`
Caption string `bson:"caption"`
Username string `bson:"username"`
Filename string `bson:"filename"`
Likes int `bson:"likes"`
DatePosted string `bson:"dateposted"`
UserLikesIDS []primitive.ObjectID `bson:"userlikesids"`
EncodedFile string
IsLiked string
DeleteButton bool
}
type PostToDB struct {
Username string `bson:"username"`
Caption string `bson:"caption"`
Filename string `bson:"filename"`
Likes int `bson:"likes"`
DatePosted time.Time `bson:"dateposted"`
UserLikesIDS []primitive.ObjectID `bson:"userlikesids"`
}
type FSFile struct{
ID primitive.ObjectID `bson:"_id"`
Length int `bson:"length"`
ChunkSize int `bson:"chunksize"`
UploadTime time.Time `bson:"uploadDate"`
Filename string `bson:"filename"`
} |
package model
import (
"github.com/lichunchengPG/go-pratice/goblog/pkg/logger"
"gorm.io/driver/mysql"
"gorm.io/gorm"
)
// DB gorm.DB 对象
var DB *gorm.DB
// 初始化模型
func ConnectDB() *gorm.DB {
var err error
config := mysql.New(mysql.Config{
DSN: "root:secret@tcp(127.0.0.1:3306)/goblog?charset=utf8&parseTime=True&loc=Local",
})
// 准备数据库连接池
DB, err = gorm.Open(config, &gorm.Config{})
logger.LogError(err)
return DB
} |
// Copyright © 2017 NAME HERE <EMAIL ADDRESS>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"github.com/spf13/cobra"
"github.com/OlegKuleba/goCobra/utils"
"github.com/OlegKuleba/goCobra/models"
"strings"
"fmt"
)
// addContactCmd represents the addContact command
var addContactCmd = &cobra.Command{
Use: "addContact",
Short: "addContact phoneNumber name - добавляет новый контакт в файл",
Long: `Команда добавляет новый контакт в файл, требует передачу минимум 5-ти, максимум 6-ти аргументов:
номерТелефона имяВладельца город улица дом [квартира](в указанном порядке)
параметры в квадратных скобках являются опциональными
Например: addContact +380501234567 Anya Dnipro Gagarina 103a 5`,
Args: cobra.MinimumNArgs(5),
Run: addContact,
}
func addContact(cmd *cobra.Command, args []string) {
if !utils.CheckParamsExceptApartment(args[0], args[1], args[2], args[3], args[4]) { // Если аргументы не проходят валидацию (все, кроме квартиры, т.к. она опциональная), выводим инфу об этом и выходим
return
}
// Извлекаем аргументы из командной строки и записываем в адрес
address := []string{args[2], args[3], args[4]}
if len(args) > 5 { // Если квартира указана, то
address = append(address, args[5]) // добавляем квартиру в адрес
if !utils.Validate(args[5], utils.BuildingOrApartmentFlag) { // и валидируем ее. Если не валидно - выходим
utils.PrintValidationMessages()
return
}
}
// Собираем контакт в структуру и отдаем на запись (адрес в структуре является строкой)
contact := models.NewContact(args[0], args[1], strings.Join(address, "-")) // Для удобочитаемости файла элементы адреса будут разделены символом "-"
if utils.AddContact(contact) {
fmt.Println("Запись успешно добавлена")
} else {
fmt.Println("Запись не добавлена")
}
}
func init() {
rootCmd.AddCommand(addContactCmd)
// Here you will define your flags and configuration settings.
// Cobra supports Persistent Flags which will work for this command
// and all subcommands, e.g.:
// addContactCmd.PersistentFlags().String("foo", "", "A help for foo")
// Cobra supports local flags which will only run when this command
// is called directly, e.g.:
// addContactCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
}
|
package response
import (
"github.com/gin-gonic/gin"
"net/http"
)
type RestErr struct {
Message gin.H
Status int `json:"status"`
}
func NewBadRequest(message gin.H) *RestErr {
return &RestErr{Message: message, Status: http.StatusBadRequest}
}
func NewPartialProcessError(message gin.H) *RestErr {
return &RestErr{Message: message, Status: http.StatusPartialContent}
}
func NewNotFound(message gin.H) *RestErr {
return &RestErr{Message: message, Status: http.StatusNotFound}
}
func NewInternalServerError(message gin.H) *RestErr {
return &RestErr{Message: message, Status: http.StatusInternalServerError}
}
func NewNotImplementedError(message gin.H) *RestErr {
return &RestErr{Message: message, Status: http.StatusNotImplemented}
}
func NewEntityAlreadyExists(message gin.H) *RestErr {
return &RestErr{Message: message, Status: http.StatusAlreadyReported}
}
func NewAuthError(message gin.H) *RestErr {
return &RestErr{Message: message, Status: http.StatusUnauthorized}
}
func NewCollectorTypeError(message gin.H) *RestErr {
return &RestErr{Message: message, Status: http.StatusBadRequest}
}
|
package line
import (
"context"
"net/http"
"time"
"github.com/otamoe/oauth-client"
)
type (
Client struct {
oauth.OAuth2
}
)
var Endpoint = oauth.Endpoint{
Name: "line",
AuthorizeURL: "https://access.line.me/oauth2/v2.1/authorize",
AccessTokenURL: "https://api.line.me/oauth2/v2.1/token",
RevokeTokenURL: "https://api.line.me/oauth2/v2.1/token",
APIURL: "https://api.line.me/v2",
ClientHeader: "Basic",
TokenHeader: "Bearer",
}
func (c *Client) User(ctx context.Context, token *oauth.Token) (user *oauth.User, err error) {
now := time.Now()
var req *http.Request
if req, err = http.NewRequest("GET", c.Endpoint.APIURL+"/profile", nil); err != nil {
return
}
httpClient := oauth.HTTPClient(ctx, c, token)
var raw map[string]interface{}
if raw, err = c.Response(ctx, httpClient, req); err != nil {
return
}
user = &oauth.User{
ID: raw["userId"].(string),
Raw: raw,
Updated: &now,
}
if v, ok := raw["displayName"].(string); ok {
user.Nickname = v
}
if v, ok := raw["pictureUrl"].(string); ok {
user.Avatar = v
}
return
}
|
package tts
import (
"log"
"regexp"
)
func matchConvertResult(r convertResult) (string, error) {
s := r.Result()
re, _ := regexp.Compile(`(?P<resultCode>[[:digit:]]+)&(?P<resultMsg>[\s[:alnum:]]+)&?(?P<covertID>[[:digit:]]+)?`)
if !re.MatchString(s) || re.FindStringSubmatch(s)[1] != "0" {
log.Fatalf("matchConvertResult: %s, not match or fail: %v\n", s, re.FindStringSubmatch(s))
}
log.Printf("matchConvertResult: %v\n", re.FindStringSubmatch(s))
return re.FindStringSubmatch(s)[3], nil
}
func matchStatusResult(r statusResult) string {
s := r.Result()
re, _ := regexp.Compile(
`(?P<resultCode>[[:digit:]]+)&(?P<resultMsg>[[:alnum:]]+)` +
`&(?P<statusCode>[[:digit:]]+)&(?P<statusMsg>[[:alnum:]]+)&?(?P<url>.*)?`)
if !re.MatchString(s) {
log.Printf("matchStatusResult: not match %v\n", re.FindStringSubmatch(s))
return ""
} else if re.FindStringSubmatch(s)[3] != "2" {
log.Printf("matchStatusResult: not completed yet %v\n", re.FindStringSubmatch(s))
return ""
}
return re.FindStringSubmatch(s)[5]
}
|
package main
import (
"flag"
"log"
"os"
"os/signal"
"syscall"
"tesla_exporter/exporter"
"time"
"github.com/prometheus/client_golang/prometheus"
)
var s *exporter.Server
var (
email string
password string
)
func main() {
// var email = flag.String("email", "", "tesla email address.")
// var password = flag.String("password", "", "tesla account password.")
var internal = flag.Duration("expire", 30*time.Second, "expire cache metrics.")
var addr = flag.String("addr", "0.0.0.0:9610", "the server and port.")
flag.Parse()
// init collector
collector := exporter.NewCollector(email, password, *internal)
go collector.Refresh()
r := prometheus.NewRegistry()
if err := r.Register(collector); err != nil {
log.Fatal("Register collector failed with %w", err)
}
s = exporter.NewServer(*addr, r)
go s.ListenAndServe()
// handle exit signal
stop := make(chan os.Signal, 1)
signal.Notify(stop, os.Interrupt, syscall.SIGTERM)
<-stop
s.Stop()
}
|
package cmd
import (
"fmt"
"go/build"
"io/ioutil"
"os"
"strings"
"path/filepath"
"github.com/spf13/cobra"
tmversion "github.com/tendermint/tendermint/version"
"github.com/cosmos/cosmos-sdk/version"
)
var remoteBasecoinPath = "github.com/cosmos/cosmos-sdk/docs/examples/basecoin"
// Replacer to replace all instances of basecoin/basecli/BasecoinApp to project specific names
// Gets initialized when initCmd is executing after getting the project name from user
var replacer *strings.Replacer
// Remote path for the project.
var remoteProjectPath string
func init() {
initCmd.Flags().StringVarP(&remoteProjectPath, "project-path", "p", "", "Remote project path. eg: github.com/your_user_name/project_name")
rootCmd.AddCommand(initCmd)
}
var initCmd = &cobra.Command{
Use: "init [ProjectName]",
Short: "Initialize your new cosmos zone",
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
fmt.Print("Thanks for choosing Cosmos-SDK to build your project.\n\n")
projectName := args[0]
capitalizedProjectName := strings.Title(projectName)
shortProjectName := strings.ToLower(projectName)
remoteProjectPath = strings.ToLower(strings.TrimSpace(remoteProjectPath))
if remoteProjectPath == "" {
remoteProjectPath = strings.ToLower(shortProjectName)
}
replacer = strings.NewReplacer("basecli", shortProjectName+"cli",
"basecoind", shortProjectName+"d",
"BasecoinApp", capitalizedProjectName+"App",
remoteBasecoinPath, remoteProjectPath,
"basecoin", shortProjectName,
"Basecoin", capitalizedProjectName)
return setupBasecoinWorkspace(shortProjectName, remoteProjectPath)
},
}
func resolveProjectPath(remoteProjectPath string) string {
gopath := os.Getenv("GOPATH")
if gopath == "" {
gopath = build.Default.GOPATH
// Use $HOME/go
}
return gopath + string(os.PathSeparator) + "src" + string(os.PathSeparator) + remoteProjectPath
}
// nolint: unparam, errcheck
func copyBasecoinTemplate(projectName string, projectPath string, remoteProjectPath string) {
basecoinProjectPath := resolveProjectPath(remoteBasecoinPath)
filepath.Walk(basecoinProjectPath, func(path string, f os.FileInfo, err error) error {
if !f.IsDir() {
data, err := ioutil.ReadFile(path)
if err != nil {
return err
}
contents := string(data)
// Extract relative file path eg: app/app.go instead of /Users/..../github.com/cosmos/...examples/basecoin/app/app.go
relativeFilePath := path[len(basecoinProjectPath)+1:]
// Evaluating the filepath in the new project folder
projectFilePath := projectPath + string(os.PathSeparator) + relativeFilePath
projectFilePath = replacer.Replace(projectFilePath)
lengthOfRootDir := strings.LastIndex(projectFilePath, string(os.PathSeparator))
// Extracting the path of root directory from the filepath
rootDir := projectFilePath[0:lengthOfRootDir]
// Creating the required directory first
os.MkdirAll(rootDir, os.ModePerm)
fmt.Println("Creating " + projectFilePath)
// Writing the contents to a file in the project folder
contents = replacer.Replace(contents)
ioutil.WriteFile(projectFilePath, []byte(contents), os.ModePerm)
}
return nil
})
}
// nolint: errcheck
func createGopkg(projectPath string) {
// Create gopkg.toml file
dependencies := map[string]string{
"github.com/cosmos/cosmos-sdk": "=" + version.Version,
"github.com/stretchr/testify": "=1.2.1",
"github.com/spf13/cobra": "=0.0.1",
"github.com/spf13/viper": "=1.0.0",
}
overrides := map[string]string{
"github.com/golang/protobuf": "1.1.0",
"github.com/tendermint/tendermint": tmversion.Version,
}
contents := ""
for dependency, version := range dependencies {
contents += "[[constraint]]\n\tname = \"" + dependency + "\"\n\tversion = \"" + version + "\"\n\n"
}
for dependency, version := range overrides {
contents += "[[override]]\n\tname = \"" + dependency + "\"\n\tversion = \"=" + version + "\"\n\n"
}
contents += "[prune]\n\tgo-tests = true\n\tunused-packages = true"
ioutil.WriteFile(projectPath+"/Gopkg.toml", []byte(contents), os.ModePerm)
}
// nolint: errcheck
func createMakefile(projectPath string) {
// Create makefile
// TODO: Should we use tools/ directory as in Cosmos-SDK to get tools for linting etc.
makefileContents := `PACKAGES=$(shell go list ./... | grep -v '/vendor/')
all: get_tools get_vendor_deps build test
get_tools:
go get github.com/golang/dep/cmd/dep
build:
go build -o bin/basecli cmd/basecli/main.go && go build -o bin/basecoind cmd/basecoind/main.go
get_vendor_deps:
@rm -rf vendor/
@dep ensure
test:
@go test $(PACKAGES)
benchmark:
@go test -bench=. $(PACKAGES)
.PHONY: all build test benchmark`
// Replacing instances of base* to project specific names
makefileContents = replacer.Replace(makefileContents)
ioutil.WriteFile(projectPath+"/Makefile", []byte(makefileContents), os.ModePerm)
}
func setupBasecoinWorkspace(projectName string, remoteProjectPath string) error {
projectPath := resolveProjectPath(remoteProjectPath)
fmt.Println("Configuring your project in " + projectPath)
// Check if the projectPath already exists or not
if _, err := os.Stat(projectPath); !os.IsNotExist(err) {
return fmt.Errorf("Unable to initialize the project. %s already exists", projectPath)
}
copyBasecoinTemplate(projectName, projectPath, remoteProjectPath)
createGopkg(projectPath)
createMakefile(projectPath)
fmt.Printf("Initialized a new project at %s.\nHappy hacking!\n", projectPath)
return nil
}
|
package objects
import (
"io"
"log"
"net/http"
"os"
"strings"
)
var storagePath string
func init() {
storagePath = os.Getenv("STORAGE_ROOT") + "/objects/"
}
// Handler handles http requests
func Handler(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case http.MethodPut:
// put method store object
put(w, r)
case http.MethodGet:
// get method get object
get(w, r)
default:
w.WriteHeader(http.StatusMethodNotAllowed)
}
}
func put(w http.ResponseWriter, r *http.Request) {
f, err := os.Create(storagePath + strings.Split(r.URL.EscapedPath(), "/")[2])
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
log.Printf("store error: %s\n", err.Error())
return
}
log.Printf("[debug] store obj (%s)\n", f.Name())
defer f.Close()
io.Copy(f, r.Body)
}
func get(w http.ResponseWriter, r *http.Request) {
f, err := os.Open(storagePath + strings.Split(r.URL.EscapedPath(), "/")[2])
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
log.Printf("get error: %s\n", err.Error())
return
}
log.Printf("[debug] get obj (%s)\n", f.Name())
defer f.Close()
io.Copy(w, f)
}
|
package leetcode
import "testing"
func TestMostCommonWord(t *testing.T) {
if mostCommonWord("Bob hit a ball, the hit BALL flew far after it was hit.", []string{"hit"}) != "ball" {
t.Fatal()
}
}
|
package pgtune
import (
"math"
"math/rand"
"testing"
"github.com/timescale/timescaledb-tune/internal/parse"
)
// defaultMemoryToBaseVals provides a memory from test memory levels to expected "base"
// memory settings. These "base" values are the values if there is only 1 CPU
// and 20 max connections to the database. Most settings are actually
// unaffected by number of CPUs and max connections; the exception is work_mem,
// so the adjustment is done in the init function
var defaultMemoryToBaseVals = map[uint64]map[string]uint64{
10 * parse.Gigabyte: {
SharedBuffersKey: 2560 * parse.Megabyte,
EffectiveCacheKey: 7680 * parse.Megabyte,
MaintenanceWorkMemKey: 1280 * parse.Megabyte,
WorkMemKey: 64 * parse.Megabyte,
},
12 * parse.Gigabyte: {
SharedBuffersKey: 3 * parse.Gigabyte,
EffectiveCacheKey: 9 * parse.Gigabyte,
MaintenanceWorkMemKey: 1536 * parse.Megabyte,
WorkMemKey: 78643 * parse.Kilobyte,
},
32 * parse.Gigabyte: {
SharedBuffersKey: 8 * parse.Gigabyte,
EffectiveCacheKey: 24 * parse.Gigabyte,
MaintenanceWorkMemKey: maintenanceWorkMemLimit,
WorkMemKey: 209715 * parse.Kilobyte,
},
}
// promscaleMemoryToBaseVals provides a memory from test memory levels to expected "base"
// memory settings. These "base" values are the values if there is only 1 CPU
// and 20 max connections to the database. Most settings are actually
// unaffected by number of CPUs and max connections; the exception is work_mem,
// so the adjustment is done in the init function
var promscaleMemoryToBaseVals = map[uint64]map[string]uint64{
10 * parse.Gigabyte: {
SharedBuffersKey: 5120 * parse.Megabyte,
EffectiveCacheKey: 7680 * parse.Megabyte,
MaintenanceWorkMemKey: 1280 * parse.Megabyte,
WorkMemKey: 64 * parse.Megabyte,
},
12 * parse.Gigabyte: {
SharedBuffersKey: 6 * parse.Gigabyte,
EffectiveCacheKey: 9 * parse.Gigabyte,
MaintenanceWorkMemKey: 1536 * parse.Megabyte,
WorkMemKey: 78643 * parse.Kilobyte,
},
32 * parse.Gigabyte: {
SharedBuffersKey: 16 * parse.Gigabyte,
EffectiveCacheKey: 24 * parse.Gigabyte,
MaintenanceWorkMemKey: maintenanceWorkMemLimit,
WorkMemKey: 209715 * parse.Kilobyte,
},
}
// highCPUs is the number of CPUs that is high enough that work_mem would normally
// fall below the minimum (64KB) using the standard formula
const highCPUs = 9000
var (
// cpuVals is the different amounts of CPUs to test
cpuVals = []int{1, 4, 5, highCPUs}
// connVals is the different number of conns to test
connVals = []uint64{0, 19, 20, 50}
// defaultMemorySettingsMatrix stores the test cases for MemoryRecommend along with
// the expected values
defaultMemorySettingsMatrix = map[uint64]map[int]map[uint64]map[string]string{}
// promscaleMemorySettingsMatrix stores the test cases for PromscaleMemoryRecommend along with
// the expected values
promscaleMemorySettingsMatrix = map[uint64]map[int]map[uint64]map[string]string{}
)
func init() {
for mem, baseMatrix := range defaultMemoryToBaseVals {
defaultMemorySettingsMatrix[mem] = make(map[int]map[uint64]map[string]string)
for _, cpus := range cpuVals {
defaultMemorySettingsMatrix[mem][cpus] = make(map[uint64]map[string]string)
for _, conns := range connVals {
defaultMemorySettingsMatrix[mem][cpus][conns] = make(map[string]string)
defaultMemorySettingsMatrix[mem][cpus][conns][SharedBuffersKey] = parse.BytesToPGFormat(baseMatrix[SharedBuffersKey])
defaultMemorySettingsMatrix[mem][cpus][conns][EffectiveCacheKey] = parse.BytesToPGFormat(baseMatrix[EffectiveCacheKey])
defaultMemorySettingsMatrix[mem][cpus][conns][MaintenanceWorkMemKey] = parse.BytesToPGFormat(baseMatrix[MaintenanceWorkMemKey])
if cpus == highCPUs {
defaultMemorySettingsMatrix[mem][cpus][conns][WorkMemKey] = parse.BytesToPGFormat(workMemMin)
} else {
// CPU only affects work_mem in groups of 2 (i.e. 2 and 3 CPUs are treated as the same)
cpuFactor := math.Round(float64(cpus) / 2.0)
// Our work_mem values are derivied by a certain amount of memory lost/gained when
// moving away from baseConns
connFactor := float64(MaxConnectionsDefault) / float64(baseConns)
if conns != 0 {
connFactor = float64(conns) / float64(baseConns)
}
defaultMemorySettingsMatrix[mem][cpus][conns][WorkMemKey] =
parse.BytesToPGFormat(uint64(float64(baseMatrix[WorkMemKey]) / connFactor / cpuFactor))
}
}
}
}
for mem, baseMatrix := range promscaleMemoryToBaseVals {
promscaleMemorySettingsMatrix[mem] = make(map[int]map[uint64]map[string]string)
for _, cpus := range cpuVals {
promscaleMemorySettingsMatrix[mem][cpus] = make(map[uint64]map[string]string)
for _, conns := range connVals {
promscaleMemorySettingsMatrix[mem][cpus][conns] = make(map[string]string)
promscaleMemorySettingsMatrix[mem][cpus][conns][SharedBuffersKey] = parse.BytesToPGFormat(baseMatrix[SharedBuffersKey])
promscaleMemorySettingsMatrix[mem][cpus][conns][EffectiveCacheKey] = parse.BytesToPGFormat(baseMatrix[EffectiveCacheKey])
promscaleMemorySettingsMatrix[mem][cpus][conns][MaintenanceWorkMemKey] = parse.BytesToPGFormat(baseMatrix[MaintenanceWorkMemKey])
if cpus == highCPUs {
promscaleMemorySettingsMatrix[mem][cpus][conns][WorkMemKey] = parse.BytesToPGFormat(workMemMin)
} else {
// CPU only affects work_mem in groups of 2 (i.e. 2 and 3 CPUs are treated as the same)
cpuFactor := math.Round(float64(cpus) / 2.0)
// Our work_mem values are derivied by a certain amount of memory lost/gained when
// moving away from baseConns
connFactor := float64(MaxConnectionsDefault) / float64(baseConns)
if conns != 0 {
connFactor = float64(conns) / float64(baseConns)
}
promscaleMemorySettingsMatrix[mem][cpus][conns][WorkMemKey] =
parse.BytesToPGFormat(uint64(float64(baseMatrix[WorkMemKey]) / connFactor / cpuFactor))
}
}
}
}
}
func TestNewMemoryRecommender(t *testing.T) {
for i := 0; i < 1000000; i++ {
mem := rand.Uint64()
cpus := rand.Intn(128)
r := NewMemoryRecommender(mem, cpus, MaxConnectionsDefault)
if r == nil {
t.Errorf("unexpected nil recommender")
}
if got := r.totalMemory; got != mem {
t.Errorf("recommender has incorrect cpus: got %d want %d", got, cpus)
}
if got := r.cpus; got != cpus {
t.Errorf("recommender has incorrect cpus: got %d want %d", got, cpus)
}
if !r.IsAvailable() {
t.Errorf("unexpectedly not available")
}
}
}
func TestNewPromscaleMemoryRecommender(t *testing.T) {
for i := 0; i < 1000000; i++ {
mem := rand.Uint64()
cpus := rand.Intn(128)
r := NewPromscaleMemoryRecommender(mem, cpus, MaxConnectionsDefault)
if r == nil {
t.Errorf("unexpected nil recommender")
}
if got := r.totalMemory; got != mem {
t.Errorf("recommender has incorrect cpus: got %d want %d", got, cpus)
}
if got := r.cpus; got != cpus {
t.Errorf("recommender has incorrect cpus: got %d want %d", got, cpus)
}
if !r.IsAvailable() {
t.Errorf("unexpectedly not available")
}
}
}
func TestMemoryRecommenderRecommendWindows(t *testing.T) {
cases := []struct {
desc string
totalMemory uint64
cpus int
conns uint64
want string
}{
{
desc: "1GB",
totalMemory: 1 * parse.Gigabyte,
cpus: 1,
conns: baseConns,
want: "6553" + parse.KB, // from pgtune
},
{
desc: "1GB, 10 conns",
totalMemory: 1 * parse.Gigabyte,
cpus: 1,
conns: 10,
want: "13107" + parse.KB, // from pgtune
},
{
desc: "1GB, 4 cpus",
totalMemory: 1 * parse.Gigabyte,
cpus: 4,
conns: baseConns,
want: "3276" + parse.KB, // from pgtune
},
{
desc: "2GB",
totalMemory: 2 * parse.Gigabyte,
cpus: 1,
conns: baseConns,
want: "13107" + parse.KB, // from pgtune
},
{
desc: "2GB, 5 cpus",
totalMemory: 2 * parse.Gigabyte,
cpus: 5,
conns: baseConns,
want: "4369" + parse.KB, // from pgtune
},
{
desc: "3GB",
totalMemory: 3 * parse.Gigabyte,
cpus: 1,
conns: baseConns,
want: "21845" + parse.KB, // from pgtune
},
{
desc: "3GB, 3 cpus",
totalMemory: 3 * parse.Gigabyte,
cpus: 3,
conns: baseConns,
want: "10922" + parse.KB, // from pgtune
},
{
desc: "8GB",
totalMemory: 8 * parse.Gigabyte,
cpus: 1,
conns: baseConns,
want: "64" + parse.MB, // from pgtune
},
{
desc: "8GB, 8 cpus",
totalMemory: 8 * parse.Gigabyte,
cpus: 8,
conns: baseConns,
want: "16" + parse.MB, // from pgtune
},
{
desc: "16GB",
totalMemory: 16 * parse.Gigabyte,
cpus: 1,
conns: baseConns,
want: "135441" + parse.KB, // from pgtune
},
{
desc: "16GB, 10 cpus",
totalMemory: 16 * parse.Gigabyte,
cpus: 10,
conns: baseConns,
want: "27088" + parse.KB, // from pgtune
},
{
desc: "1GB, 9000 cpus",
totalMemory: parse.Gigabyte,
cpus: highCPUs,
conns: baseConns,
want: "64" + parse.KB,
},
}
for _, c := range cases {
mr := NewMemoryRecommender(c.totalMemory, c.cpus, c.conns)
if got := mr.recommendWindows(); got != c.want {
t.Errorf("%s: incorrect value: got %s want %s", c.desc, got, c.want)
}
}
}
func TestMemoryRecommenderRecommend(t *testing.T) {
for totalMemory, cpuMatrix := range defaultMemorySettingsMatrix {
for cpus, connMatrix := range cpuMatrix {
for conns, cases := range connMatrix {
mr := NewMemoryRecommender(totalMemory, cpus, conns)
testRecommender(t, mr, MemoryKeys, cases)
}
}
}
}
func TestPromscaleMemoryRecommenderRecommend(t *testing.T) {
for totalMemory, cpuMatrix := range promscaleMemorySettingsMatrix {
for cpus, connMatrix := range cpuMatrix {
for conns, cases := range connMatrix {
mr := NewPromscaleMemoryRecommender(totalMemory, cpus, conns)
testRecommender(t, mr, MemoryKeys, cases)
}
}
}
}
func TestMemoryRecommenderNoRecommendation(t *testing.T) {
r := NewMemoryRecommender(1, 1, 1)
if r.Recommend("foo") != NoRecommendation {
t.Error("Recommendation was provided when there should have been none")
}
}
func TestPromscaleMemoryRecommenderNoRecommendation(t *testing.T) {
r := NewPromscaleMemoryRecommender(1, 1, 1)
if r.Recommend("foo") != NoRecommendation {
t.Error("Recommendation was provided when there should have been none")
}
}
func TestMemorySettingsGroup(t *testing.T) {
for totalMemory, cpuMatrix := range defaultMemorySettingsMatrix {
for cpus, connMatrix := range cpuMatrix {
for conns, matrix := range connMatrix {
config := getDefaultTestSystemConfig(t)
config.CPUs = cpus
config.Memory = totalMemory
config.maxConns = conns
sg := GetSettingsGroup(MemoryLabel, config)
testSettingGroup(t, sg, DefaultProfile, matrix, MemoryLabel, MemoryKeys)
}
}
}
}
func TestPromscaleMemorySettingsGroup(t *testing.T) {
for totalMemory, cpuMatrix := range promscaleMemorySettingsMatrix {
for cpus, connMatrix := range cpuMatrix {
for conns, matrix := range connMatrix {
config := getDefaultTestSystemConfig(t)
config.CPUs = cpus
config.Memory = totalMemory
config.maxConns = conns
sg := GetSettingsGroup(MemoryLabel, config)
testSettingGroup(t, sg, PromscaleProfile, matrix, MemoryLabel, MemoryKeys)
}
}
}
}
|
// Package solver implements a general-purpose solver for boolean
// constraint satisfiability problems.
package solver
|
package git
/*
#include <git2.h>
extern int _go_git_index_add_all(git_index*, const git_strarray*, unsigned int, void*);
extern int _go_git_index_update_all(git_index*, const git_strarray*, void*);
extern int _go_git_index_remove_all(git_index*, const git_strarray*, void*);
*/
import "C"
import (
"fmt"
"runtime"
"unsafe"
)
type IndexMatchedPathCallback func(string, string) error
type indexMatchedPathCallbackData struct {
callback IndexMatchedPathCallback
errorTarget *error
}
// IndexAddOption is a set of flags for APIs that add files matching pathspec.
type IndexAddOption uint
const (
IndexAddDefault IndexAddOption = C.GIT_INDEX_ADD_DEFAULT
IndexAddForce IndexAddOption = C.GIT_INDEX_ADD_FORCE
IndexAddDisablePathspecMatch IndexAddOption = C.GIT_INDEX_ADD_DISABLE_PATHSPEC_MATCH
IndexAddCheckPathspec IndexAddOption = C.GIT_INDEX_ADD_CHECK_PATHSPEC
)
// IndexStageState indicates the state of the git index.
type IndexStageState int
const (
// IndexStageAny matches any index stage.
//
// Some index APIs take a stage to match; pass this value to match
// any entry matching the path regardless of stage.
IndexStageAny IndexStageState = C.GIT_INDEX_STAGE_ANY
// IndexStageNormal is a normal staged file in the index.
IndexStageNormal IndexStageState = C.GIT_INDEX_STAGE_NORMAL
// IndexStageAncestor is the ancestor side of a conflict.
IndexStageAncestor IndexStageState = C.GIT_INDEX_STAGE_ANCESTOR
// IndexStageOurs is the "ours" side of a conflict.
IndexStageOurs IndexStageState = C.GIT_INDEX_STAGE_OURS
// IndexStageTheirs is the "theirs" side of a conflict.
IndexStageTheirs IndexStageState = C.GIT_INDEX_STAGE_THEIRS
)
type Index struct {
doNotCompare
ptr *C.git_index
repo *Repository
}
type IndexTime struct {
Seconds int32
Nanoseconds uint32
}
type IndexEntry struct {
Ctime IndexTime
Mtime IndexTime
Mode Filemode
Uid uint32
Gid uint32
Size uint32
Id *Oid
Path string
}
func newIndexEntryFromC(entry *C.git_index_entry) *IndexEntry {
if entry == nil {
return nil
}
return &IndexEntry{
IndexTime{int32(entry.ctime.seconds), uint32(entry.ctime.nanoseconds)},
IndexTime{int32(entry.mtime.seconds), uint32(entry.mtime.nanoseconds)},
Filemode(entry.mode),
uint32(entry.uid),
uint32(entry.gid),
uint32(entry.file_size),
newOidFromC(&entry.id),
C.GoString(entry.path),
}
}
func populateCIndexEntry(source *IndexEntry, dest *C.git_index_entry) {
dest.ctime.seconds = C.int32_t(source.Ctime.Seconds)
dest.ctime.nanoseconds = C.uint32_t(source.Ctime.Nanoseconds)
dest.mtime.seconds = C.int32_t(source.Mtime.Seconds)
dest.mtime.nanoseconds = C.uint32_t(source.Mtime.Nanoseconds)
dest.mode = C.uint32_t(source.Mode)
dest.uid = C.uint32_t(source.Uid)
dest.gid = C.uint32_t(source.Gid)
dest.file_size = C.uint32_t(source.Size)
if source.Id != nil {
dest.id = *source.Id.toC()
}
dest.path = C.CString(source.Path)
}
func freeCIndexEntry(entry *C.git_index_entry) {
C.free(unsafe.Pointer(entry.path))
}
func newIndexFromC(ptr *C.git_index, repo *Repository) *Index {
idx := &Index{ptr: ptr, repo: repo}
runtime.SetFinalizer(idx, (*Index).Free)
return idx
}
// NewIndex allocates a new index. It won't be associated with any
// file on the filesystem or repository
func NewIndex() (*Index, error) {
var ptr *C.git_index
runtime.LockOSThread()
defer runtime.UnlockOSThread()
if err := C.git_index_new(&ptr); err < 0 {
return nil, MakeGitError(err)
}
return newIndexFromC(ptr, nil), nil
}
// OpenIndex creates a new index at the given path. If the file does
// not exist it will be created when Write() is called.
func OpenIndex(path string) (*Index, error) {
var ptr *C.git_index
var cpath = C.CString(path)
defer C.free(unsafe.Pointer(cpath))
runtime.LockOSThread()
defer runtime.UnlockOSThread()
if err := C.git_index_open(&ptr, cpath); err < 0 {
return nil, MakeGitError(err)
}
return newIndexFromC(ptr, nil), nil
}
// Path returns the index' path on disk or an empty string if it
// exists only in memory.
func (v *Index) Path() string {
ret := C.GoString(C.git_index_path(v.ptr))
runtime.KeepAlive(v)
return ret
}
// Clear clears the index object in memory; changes must be explicitly
// written to disk for them to take effect persistently
func (v *Index) Clear() error {
runtime.LockOSThread()
defer runtime.UnlockOSThread()
err := C.git_index_clear(v.ptr)
runtime.KeepAlive(v)
if err < 0 {
return MakeGitError(err)
}
return nil
}
// Add adds or replaces the given entry to the index, making a copy of
// the data
func (v *Index) Add(entry *IndexEntry) error {
var centry C.git_index_entry
populateCIndexEntry(entry, ¢ry)
defer freeCIndexEntry(¢ry)
runtime.LockOSThread()
defer runtime.UnlockOSThread()
err := C.git_index_add(v.ptr, ¢ry)
runtime.KeepAlive(v)
if err < 0 {
return MakeGitError(err)
}
return nil
}
func (v *Index) AddByPath(path string) error {
cstr := C.CString(path)
defer C.free(unsafe.Pointer(cstr))
runtime.LockOSThread()
defer runtime.UnlockOSThread()
ret := C.git_index_add_bypath(v.ptr, cstr)
runtime.KeepAlive(v)
if ret < 0 {
return MakeGitError(ret)
}
return nil
}
// AddFromBuffer adds or replaces an index entry from a buffer in memory
func (v *Index) AddFromBuffer(entry *IndexEntry, buffer []byte) error {
var centry C.git_index_entry
populateCIndexEntry(entry, ¢ry)
defer freeCIndexEntry(¢ry)
var cbuffer unsafe.Pointer
if len(buffer) > 0 {
cbuffer = unsafe.Pointer(&buffer[0])
}
runtime.LockOSThread()
defer runtime.UnlockOSThread()
if err := C.git_index_add_from_buffer(v.ptr, ¢ry, cbuffer, C.size_t(len(buffer))); err < 0 {
return MakeGitError(err)
}
return nil
}
func (v *Index) AddAll(pathspecs []string, flags IndexAddOption, callback IndexMatchedPathCallback) error {
cpathspecs := C.git_strarray{}
cpathspecs.count = C.size_t(len(pathspecs))
cpathspecs.strings = makeCStringsFromStrings(pathspecs)
defer freeStrarray(&cpathspecs)
var err error
data := indexMatchedPathCallbackData{
callback: callback,
errorTarget: &err,
}
runtime.LockOSThread()
defer runtime.UnlockOSThread()
var handle unsafe.Pointer
if callback != nil {
handle = pointerHandles.Track(&data)
defer pointerHandles.Untrack(handle)
}
ret := C._go_git_index_add_all(
v.ptr,
&cpathspecs,
C.uint(flags),
handle,
)
runtime.KeepAlive(v)
if ret == C.int(ErrorCodeUser) && err != nil {
return err
}
if ret < 0 {
return MakeGitError(ret)
}
return nil
}
func (v *Index) UpdateAll(pathspecs []string, callback IndexMatchedPathCallback) error {
cpathspecs := C.git_strarray{}
cpathspecs.count = C.size_t(len(pathspecs))
cpathspecs.strings = makeCStringsFromStrings(pathspecs)
defer freeStrarray(&cpathspecs)
var err error
data := indexMatchedPathCallbackData{
callback: callback,
errorTarget: &err,
}
runtime.LockOSThread()
defer runtime.UnlockOSThread()
var handle unsafe.Pointer
if callback != nil {
handle = pointerHandles.Track(&data)
defer pointerHandles.Untrack(handle)
}
ret := C._go_git_index_update_all(
v.ptr,
&cpathspecs,
handle,
)
runtime.KeepAlive(v)
if ret == C.int(ErrorCodeUser) && err != nil {
return err
}
if ret < 0 {
return MakeGitError(ret)
}
return nil
}
func (v *Index) RemoveAll(pathspecs []string, callback IndexMatchedPathCallback) error {
cpathspecs := C.git_strarray{}
cpathspecs.count = C.size_t(len(pathspecs))
cpathspecs.strings = makeCStringsFromStrings(pathspecs)
defer freeStrarray(&cpathspecs)
var err error
data := indexMatchedPathCallbackData{
callback: callback,
errorTarget: &err,
}
runtime.LockOSThread()
defer runtime.UnlockOSThread()
var handle unsafe.Pointer
if callback != nil {
handle = pointerHandles.Track(&data)
defer pointerHandles.Untrack(handle)
}
ret := C._go_git_index_remove_all(
v.ptr,
&cpathspecs,
handle,
)
runtime.KeepAlive(v)
if ret == C.int(ErrorCodeUser) && err != nil {
return err
}
if ret < 0 {
return MakeGitError(ret)
}
return nil
}
//export indexMatchedPathCallback
func indexMatchedPathCallback(cPath, cMatchedPathspec *C.char, payload unsafe.Pointer) C.int {
data, ok := pointerHandles.Get(payload).(*indexMatchedPathCallbackData)
if !ok {
panic("invalid matched path callback")
}
err := data.callback(C.GoString(cPath), C.GoString(cMatchedPathspec))
if err != nil {
*data.errorTarget = err
return C.int(ErrorCodeUser)
}
return C.int(ErrorCodeOK)
}
func (v *Index) RemoveByPath(path string) error {
cstr := C.CString(path)
defer C.free(unsafe.Pointer(cstr))
runtime.LockOSThread()
defer runtime.UnlockOSThread()
ret := C.git_index_remove_bypath(v.ptr, cstr)
runtime.KeepAlive(v)
if ret < 0 {
return MakeGitError(ret)
}
return nil
}
// RemoveDirectory removes all entries from the index under a given directory.
func (v *Index) RemoveDirectory(dir string, stage int) error {
cstr := C.CString(dir)
defer C.free(unsafe.Pointer(cstr))
runtime.LockOSThread()
defer runtime.UnlockOSThread()
ret := C.git_index_remove_directory(v.ptr, cstr, C.int(stage))
runtime.KeepAlive(v)
if ret < 0 {
return MakeGitError(ret)
}
return nil
}
func (v *Index) WriteTreeTo(repo *Repository) (*Oid, error) {
oid := new(Oid)
runtime.LockOSThread()
defer runtime.UnlockOSThread()
ret := C.git_index_write_tree_to(oid.toC(), v.ptr, repo.ptr)
runtime.KeepAlive(v)
runtime.KeepAlive(repo)
if ret < 0 {
return nil, MakeGitError(ret)
}
return oid, nil
}
// ReadTree replaces the contents of the index with those of the given
// tree
func (v *Index) ReadTree(tree *Tree) error {
runtime.LockOSThread()
defer runtime.UnlockOSThread()
ret := C.git_index_read_tree(v.ptr, tree.cast_ptr)
runtime.KeepAlive(v)
runtime.KeepAlive(tree)
if ret < 0 {
return MakeGitError(ret)
}
return nil
}
func (v *Index) WriteTree() (*Oid, error) {
oid := new(Oid)
runtime.LockOSThread()
defer runtime.UnlockOSThread()
ret := C.git_index_write_tree(oid.toC(), v.ptr)
runtime.KeepAlive(v)
if ret < 0 {
return nil, MakeGitError(ret)
}
return oid, nil
}
func (v *Index) Write() error {
runtime.LockOSThread()
defer runtime.UnlockOSThread()
ret := C.git_index_write(v.ptr)
runtime.KeepAlive(v)
if ret < 0 {
return MakeGitError(ret)
}
return nil
}
func (v *Index) Free() {
runtime.SetFinalizer(v, nil)
C.git_index_free(v.ptr)
}
func (v *Index) EntryCount() uint {
ret := uint(C.git_index_entrycount(v.ptr))
runtime.KeepAlive(v)
return ret
}
func (v *Index) EntryByIndex(index uint) (*IndexEntry, error) {
centry := C.git_index_get_byindex(v.ptr, C.size_t(index))
if centry == nil {
return nil, fmt.Errorf("Index out of Bounds")
}
ret := newIndexEntryFromC(centry)
runtime.KeepAlive(v)
return ret, nil
}
func (v *Index) EntryByPath(path string, stage int) (*IndexEntry, error) {
cpath := C.CString(path)
defer C.free(unsafe.Pointer(cpath))
runtime.LockOSThread()
defer runtime.UnlockOSThread()
centry := C.git_index_get_bypath(v.ptr, cpath, C.int(stage))
if centry == nil {
return nil, MakeGitError(C.int(ErrorCodeNotFound))
}
ret := newIndexEntryFromC(centry)
runtime.KeepAlive(v)
return ret, nil
}
func (v *Index) Find(path string) (uint, error) {
cpath := C.CString(path)
defer C.free(unsafe.Pointer(cpath))
runtime.LockOSThread()
defer runtime.UnlockOSThread()
var pos C.size_t
ret := C.git_index_find(&pos, v.ptr, cpath)
runtime.KeepAlive(v)
if ret < 0 {
return uint(0), MakeGitError(ret)
}
return uint(pos), nil
}
func (v *Index) FindPrefix(prefix string) (uint, error) {
cprefix := C.CString(prefix)
defer C.free(unsafe.Pointer(cprefix))
runtime.LockOSThread()
defer runtime.UnlockOSThread()
var pos C.size_t
ret := C.git_index_find_prefix(&pos, v.ptr, cprefix)
runtime.KeepAlive(v)
if ret < 0 {
return uint(0), MakeGitError(ret)
}
return uint(pos), nil
}
func (v *Index) HasConflicts() bool {
ret := C.git_index_has_conflicts(v.ptr) != 0
runtime.KeepAlive(v)
return ret
}
// FIXME: this might return an error
func (v *Index) CleanupConflicts() {
C.git_index_conflict_cleanup(v.ptr)
runtime.KeepAlive(v)
}
func (v *Index) AddConflict(ancestor *IndexEntry, our *IndexEntry, their *IndexEntry) error {
var cancestor *C.git_index_entry
var cour *C.git_index_entry
var ctheir *C.git_index_entry
if ancestor != nil {
cancestor = &C.git_index_entry{}
populateCIndexEntry(ancestor, cancestor)
defer freeCIndexEntry(cancestor)
}
if our != nil {
cour = &C.git_index_entry{}
populateCIndexEntry(our, cour)
defer freeCIndexEntry(cour)
}
if their != nil {
ctheir = &C.git_index_entry{}
populateCIndexEntry(their, ctheir)
defer freeCIndexEntry(ctheir)
}
runtime.LockOSThread()
defer runtime.UnlockOSThread()
ecode := C.git_index_conflict_add(v.ptr, cancestor, cour, ctheir)
runtime.KeepAlive(v)
runtime.KeepAlive(ancestor)
runtime.KeepAlive(our)
runtime.KeepAlive(their)
if ecode < 0 {
return MakeGitError(ecode)
}
return nil
}
type IndexConflict struct {
Ancestor *IndexEntry
Our *IndexEntry
Their *IndexEntry
}
func (v *Index) Conflict(path string) (IndexConflict, error) {
var cancestor *C.git_index_entry
var cour *C.git_index_entry
var ctheir *C.git_index_entry
cpath := C.CString(path)
defer C.free(unsafe.Pointer(cpath))
runtime.LockOSThread()
defer runtime.UnlockOSThread()
ecode := C.git_index_conflict_get(&cancestor, &cour, &ctheir, v.ptr, cpath)
if ecode < 0 {
return IndexConflict{}, MakeGitError(ecode)
}
ret := IndexConflict{
Ancestor: newIndexEntryFromC(cancestor),
Our: newIndexEntryFromC(cour),
Their: newIndexEntryFromC(ctheir),
}
runtime.KeepAlive(v)
return ret, nil
}
// deprecated: You should use `Index.Conflict()` instead.
func (v *Index) GetConflict(path string) (IndexConflict, error) {
return v.Conflict(path)
}
func (v *Index) RemoveConflict(path string) error {
cpath := C.CString(path)
defer C.free(unsafe.Pointer(cpath))
runtime.LockOSThread()
defer runtime.UnlockOSThread()
ecode := C.git_index_conflict_remove(v.ptr, cpath)
runtime.KeepAlive(v)
if ecode < 0 {
return MakeGitError(ecode)
}
return nil
}
type IndexConflictIterator struct {
doNotCompare
ptr *C.git_index_conflict_iterator
index *Index
}
func newIndexConflictIteratorFromC(index *Index, ptr *C.git_index_conflict_iterator) *IndexConflictIterator {
i := &IndexConflictIterator{ptr: ptr, index: index}
runtime.SetFinalizer(i, (*IndexConflictIterator).Free)
return i
}
func (v *IndexConflictIterator) Index() *Index {
return v.index
}
func (v *IndexConflictIterator) Free() {
runtime.SetFinalizer(v, nil)
C.git_index_conflict_iterator_free(v.ptr)
}
func (v *Index) ConflictIterator() (*IndexConflictIterator, error) {
var i *C.git_index_conflict_iterator
runtime.LockOSThread()
defer runtime.UnlockOSThread()
ecode := C.git_index_conflict_iterator_new(&i, v.ptr)
if ecode < 0 {
return nil, MakeGitError(ecode)
}
return newIndexConflictIteratorFromC(v, i), nil
}
func (v *IndexConflictIterator) Next() (IndexConflict, error) {
var cancestor *C.git_index_entry
var cour *C.git_index_entry
var ctheir *C.git_index_entry
runtime.LockOSThread()
defer runtime.UnlockOSThread()
ecode := C.git_index_conflict_next(&cancestor, &cour, &ctheir, v.ptr)
if ecode < 0 {
return IndexConflict{}, MakeGitError(ecode)
}
ret := IndexConflict{
Ancestor: newIndexEntryFromC(cancestor),
Our: newIndexEntryFromC(cour),
Their: newIndexEntryFromC(ctheir),
}
runtime.KeepAlive(v)
return ret, nil
}
|
package api
import (
"mingchuan.me/api/models"
"mingchuan.me/app/errors"
)
// models.go
// This file wraps some common swagger models
// ModelServiceError -
func ModelServiceError(err *errors.Error) *models.ServiceError {
errCode := int64(err.ErrorCode)
return &models.ServiceError{
Name: &(err.Name),
Code: &errCode,
Detail: &(err.Detail),
}
}
|
package main
var x = 100
func main(){
enum_const()
} |
package simplegfs
import (
"fmt"
"github.com/wweiw/simplegfs/pkg/cache"
log "github.com/Sirupsen/logrus"
"time"
sgfsErr "github.com/wweiw/simplegfs/error"
)
type Client struct {
masterAddr string
clientId uint64
locationCache *cache.Cache
leaseHolderCache *cache.Cache
}
func NewClient(masterAddr string) *Client {
c := &Client{
masterAddr: masterAddr,
locationCache: cache.New(CacheTimeout, CacheGCInterval),
leaseHolderCache: cache.New(CacheTimeout, CacheGCInterval),
}
reply := &NewClientIdReply{}
call(masterAddr, "MasterServer.NewClientId", struct{}{}, reply)
c.clientId = reply.ClientId
return c
}
// Client APIs
// Create a file
func (c *Client) Create(path string) (bool, error) {
// TODO: Error handling
reply := new(bool)
err := call(c.masterAddr, "MasterServer.Create", path, reply)
return *reply, err
}
// Mkdir
func (c *Client) Mkdir(path string) (bool, error) {
reply := new(bool)
err := call(c.masterAddr, "MasterServer.Mkdir", path, reply)
return *reply, err
}
// List dir
func (c *Client) List(path string) ([]string, error) {
reply := new(ListReply)
err := call(c.masterAddr, "MasterServer.List", path, reply)
return reply.Paths, err
}
// Delete a directory or a file
func (c *Client) Delete(path string) (bool, error) {
reply := new(bool)
err := call(c.masterAddr, "MasterServer.Delete", path, reply)
return *reply, err
}
// Append writes data to an offset chosen by the primary chunk server.
// Data is only appended if its size if less then AppendSize, which is one
// fourth of ChunkSize.
// Returns (offset chosen by primary, nil) if success, appropriate
// error otherwise.
// The caller must check return error before using the offset.
func (c *Client) Append(path string, data []byte) (uint64, error) {
// First check if the size is valid.
if len(data) > AppendSize {
log.Println("ERROR: Data size exceeds append limit.")
return 0, sgfsErr.ErrAppendLimitExceeded
}
// To calculate chunkIndex we must get the length.
fileLength, err := c.getFileLength(path)
if err != nil {
log.Println("ERROR: Get file length failed.")
return 0, err
}
chunkIndex := uint64(fileLength / ChunkSize)
// Get chunkHandle and chunkLocations
chunkHandle, chunkLocations, err := c.guaranteeChunkLocations(path, chunkIndex)
if err != nil {
return 0, err
}
// Construct dataId with clientId and current timestamp.
dataId := DataId{
ClientId: c.clientId,
Timestamp: time.Now(),
}
// Push data to all replicas' memory.
err = c.pushData(chunkLocations, dataId, data)
if err != nil {
return 0, err
}
// Once data is pushed to all replicas, send append request to the primary.
primary := c.findLeaseHolder(chunkHandle)
if primary == "" {
return 0, sgfsErr.ErrLeaseHolderNotFound
}
// Construct Append RPC arguments and replies.
appendArgs := AppendArgs {
DataId: dataId,
ChunkHandle: chunkHandle,
ChunkIndex: chunkIndex,
Path: path,
ChunkLocations: chunkLocations,
}
appendReply := new(AppendReply)
// Send Append request.
err = call(primary, "ChunkServer.Append", appendArgs, appendReply)
if err != nil {
// If not enough space on the target chunk for append, retry append
// request on a new chunk.
if err.Error() == sgfsErr.ErrNotEnoughSpace.Error() {
c.Append(path, data)
} else {
return 0, err
}
}
return appendReply.Offset, nil
}
// Write file at a specific offset
func (c *Client) Write(path string, offset uint64, bytes []byte) bool {
// TODO: Split one write into multiple RPC
length := uint64(len(bytes))
startChunkIndex := offset / ChunkSize
endChunkIndex := (offset + length - 1) / ChunkSize // inclusive
startIdx := uint64(0)
for i := startChunkIndex; i <= endChunkIndex; i++ {
startOffset := uint64(0)
endOffset := uint64(ChunkSize) // exclusive
if i == startChunkIndex {
startOffset = offset % ChunkSize
}
if i == endChunkIndex {
if rem := (offset + length) % ChunkSize; rem == 0 {
endOffset = ChunkSize
} else {
endOffset = rem
}
}
if ok := c.write(path, i, startOffset, endOffset, bytes[startIdx:startIdx+endOffset-startOffset]); !ok {
return false
}
startIdx += endOffset - startOffset
}
return true
}
// Read file at a specific offset
func (c *Client) Read(path string, offset uint64, bytes []byte) (n int, err error) {
fileLength, err := c.getFileLength(path)
if err != nil {
return 0, err
}
length := uint64(len(bytes))
limit := min(offset + length, uint64(fileLength)) // Read should not exceed the boundary.
startChunkIndex := offset / ChunkSize
endChunkIndex := (limit - 1) / ChunkSize // inclusive
startIdx := uint64(0) // start index at a chunk
total := 0
for i := startChunkIndex; i <= endChunkIndex; i++ {
startOffset := uint64(0)
endOffset := uint64(ChunkSize) // exclusive
if i == startChunkIndex {
startOffset = offset % ChunkSize
}
if i == endChunkIndex {
if rem := limit % ChunkSize; rem == 0 {
endOffset = ChunkSize
} else {
endOffset = rem
}
}
n, err = c.read(path, i, startOffset, bytes[startIdx:startIdx+endOffset-startOffset])
if err != nil {
return total, err
}
total = int(startIdx) + n
startIdx += endOffset - startOffset
}
return int(limit - offset), nil
}
// Release any resources held by client here.
func (c *Client) Stop() {
c.locationCache.Stop()
c.leaseHolderCache.Stop()
}
func (c *Client) read(path string, chunkIndex, start uint64,
bytes []byte) (n int, err error) {
// Get chunkhandle and locations
length := uint64(len(bytes))
log.Debugln(c.clientId, "read", path, chunkIndex, start, len(bytes))
chunkHandle, chunkLocations, err := c.findChunkLocations(path, chunkIndex)
if err != nil {
// TODO: Error handling. Define error code or something.
return 0, nil
}
cs := chunkLocations[0] // TODO: Use random location for load balance
// TODO: Fault tolerance (e.g. chunk server down)
args := ReadArgs{
ChunkHandle: chunkHandle,
Offset: int64(start),
Length: length,
}
resp := new(ReadReply)
resp.Bytes = bytes
call(cs, "ChunkServer.Read", args, resp)
return resp.Length, nil // TODO: Error handling
}
// The pushData function pushes data to all replica's memory through RPC.
func (c *Client) pushData(chunkLocations []string, dataId DataId, data []byte) error {
// Push data to each replica's memory.
for _, cs := range chunkLocations {
pushDataReply := new(PushDataReply)
if err := call(cs, "ChunkServer.PushData", &PushDataArgs{dataId, data},
pushDataReply); err != nil {
return err
}
}
return nil
}
// The guaranteeChunkLocations takes in a path name and a chunkIndex and
// guarantees to return a chunkHandle and chunkLocations unless some
// unexpected error occurs and the operation is not retriable. It does this
// by first looking for chunk locations normally, if not found, tries to
// add the chunk to master server.
func (c *Client) guaranteeChunkLocations(path string, chunkIndex uint64) (uint64, []string, error) {
// Find locations
chunkHandle, chunkLocations, err := c.findChunkLocations(path, chunkIndex)
// If cannot find chunk, add the chunk.
if err != nil {
chunkHandle, chunkLocations, err = c.addChunk(path, chunkIndex)
}
// Other client might have added the chunk simultaneously,
// must check error code. If it already exists, find the location again.
if err != nil && err.Error() == sgfsErr.ErrChunkExist.Error() {
chunkHandle, chunkLocations, err = c.findChunkLocations(path, chunkIndex)
}
// Either some other err occurred during add Chunk, or the second
// findChunkLocation fails.
if err != nil {
return chunkHandle, chunkLocations, err
}
// If no unexpected error has occurred.
return chunkHandle, chunkLocations, nil
}
func (c *Client) write(path string, chunkIndex, start, end uint64,
bytes []byte) bool {
// Get chunkhandle and locations.
// For auditing
// log.Debugln(c.clientId, "write", path, chunkIndex, start, end, string(bytes))
// Get chunkHandle and chunkLocations
chunkHandle, chunkLocations, err := c.guaranteeChunkLocations(path, chunkIndex)
if err != nil {
return false
}
// Construct dataId with clientId and current timestamp.
dataId := DataId{
ClientId: c.clientId,
Timestamp: time.Now(),
}
// Push data to all replicas' memory.
err = c.pushData(chunkLocations, dataId, bytes)
if err != nil {
log.Println("Data did not push to all replicas.")
return false
}
// Once data is pushed to all replicas, send write request to the primary.
primary := c.findLeaseHolder(chunkHandle)
if primary == "" {
log.Println("Primary chunk server not found.")
return false
}
writeArgs := WriteArgs{
DataId: dataId,
Path: path,
ChunkIndex: chunkIndex,
ChunkHandle: chunkHandle,
Offset: start,
ChunkLocations: chunkLocations,
}
writeReply := new(WriteReply)
if err := call(primary, "ChunkServer.Write", writeArgs,
writeReply); err != nil {
return false
}
return true
}
func (c *Client) addChunk(path string, chunkIndex uint64) (uint64, []string,
error) {
args := AddChunkArgs{
Path: path,
ChunkIndex: chunkIndex,
}
reply := new(AddChunkReply)
err := call(c.masterAddr, "MasterServer.AddChunk", args, reply)
return reply.ChunkHandle, reply.ChunkLocations, err
}
// Find chunkhandle and chunk locations given filename and chunkIndex
func (c *Client) findChunkLocations(path string, chunkIndex uint64) (uint64, []string, error) {
key := fmt.Sprintf("%s,%d", path, chunkIndex)
value, ok := c.locationCache.Get(key)
if ok {
reply := value.(*FindLocationsReply)
return reply.ChunkHandle, reply.ChunkLocations, nil
}
args := FindLocationsArgs{
Path: path,
ChunkIndex: chunkIndex,
}
reply := new(FindLocationsReply)
err := call(c.masterAddr, "MasterServer.FindLocations", args, reply)
if err == nil {
// Set cache entry to the answers we get.
c.locationCache.Set(key, reply)
}
return reply.ChunkHandle, reply.ChunkLocations, err
}
// findLeaseHolder first check with leaseHolderCache, if a lease holder is not
// found, RPC master server with chunkhandle to find the current lease holder
// of the target chunk.
func (c *Client) findLeaseHolder(chunkhandle uint64) string {
// First check with the leaseHolderCache
key := fmt.Sprintf("%d", chunkhandle)
value, ok := c.leaseHolderCache.Get(key)
if ok {
reply := value.(*FindLeaseHolderReply)
return reply.Primary
}
// If not found in cache, RPC the master server.
args := FindLeaseHolderArgs{
ChunkHandle: chunkhandle,
}
reply := new(FindLeaseHolderReply)
err := call(c.masterAddr, "MasterServer.FindLeaseHolder", args, reply)
if err == nil {
// Cache lease holder, set cache entry expiration time to lease expiration
// time.
c.leaseHolderCache.SetWithTimeout(key, reply,
reply.LeaseEnds.Sub(time.Now()))
return reply.Primary
}
return ""
}
func (c *Client) getFileLength(path string) (int64, error) {
args := path
reply := new(int64)
ok := call(c.masterAddr, "MasterServer.GetFileLength", args, reply)
log.Debugln(path, "file length:", *reply)
return *reply, ok
}
|
package unshare
import (
"github.com/criyle/go-sandbox/pkg/mount"
"github.com/criyle/go-sandbox/pkg/rlimit"
"github.com/criyle/go-sandbox/pkg/seccomp"
"github.com/criyle/go-sandbox/runner"
)
// Runner runs program in unshared namespaces
type Runner struct {
// argv and env for the child process
Args []string
Env []string
// fexecve param
ExecFile uintptr
// workdir is the current dir after unshare mount namespaces
WorkDir string
// file disriptors for new process, from 0 to len - 1
Files []uintptr
// Resource limit set by set rlimit
RLimits []rlimit.RLimit
// Resource limit enforced by tracer
Limit runner.Limit
// Seccomp defines the seccomp filter attach to the process (should be whitelist only)
Seccomp seccomp.Filter
// New root
Root string
// Mount syscalls
Mounts []mount.SyscallParams
// hostname & domainname
HostName, DomainName string
// Show Details
ShowDetails bool
// Use by cgroup to add proc
SyncFunc func(pid int) error
}
|
package oidc_test
import (
"encoding/json"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/authelia/authelia/v4/internal/configuration/schema"
"github.com/authelia/authelia/v4/internal/oidc"
)
func TestNewOpenIDConnectWellKnownConfiguration(t *testing.T) {
testCases := []struct {
desc string
pkcePlainChallenge bool
enforcePAR bool
clients map[string]oidc.Client
discovery schema.IdentityProvidersOpenIDConnectDiscovery
expectCodeChallengeMethodsSupported, expectSubjectTypesSupported []string
expectedIDTokenSigAlgsSupported, expectedUserInfoSigAlgsSupported []string
expectedRequestObjectSigAlgsSupported, expectedRevocationSigAlgsSupported, expectedTokenAuthSigAlgsSupported []string
}{
{
desc: "ShouldHaveStandardCodeChallengeMethods",
pkcePlainChallenge: false,
clients: map[string]oidc.Client{"a": &oidc.BaseClient{}},
expectCodeChallengeMethodsSupported: []string{oidc.PKCEChallengeMethodSHA256},
expectSubjectTypesSupported: []string{oidc.SubjectTypePublic, oidc.SubjectTypePairwise},
expectedIDTokenSigAlgsSupported: []string{oidc.SigningAlgRSAUsingSHA256, oidc.SigningAlgNone},
expectedUserInfoSigAlgsSupported: []string{oidc.SigningAlgRSAUsingSHA256, oidc.SigningAlgNone},
expectedRequestObjectSigAlgsSupported: []string{oidc.SigningAlgRSAUsingSHA256, oidc.SigningAlgRSAUsingSHA384, oidc.SigningAlgRSAUsingSHA512, oidc.SigningAlgECDSAUsingP256AndSHA256, oidc.SigningAlgECDSAUsingP384AndSHA384, oidc.SigningAlgECDSAUsingP521AndSHA512, oidc.SigningAlgRSAPSSUsingSHA256, oidc.SigningAlgRSAPSSUsingSHA384, oidc.SigningAlgRSAPSSUsingSHA512, oidc.SigningAlgNone},
expectedRevocationSigAlgsSupported: []string{oidc.SigningAlgHMACUsingSHA256, oidc.SigningAlgHMACUsingSHA384, oidc.SigningAlgHMACUsingSHA512, oidc.SigningAlgRSAUsingSHA256, oidc.SigningAlgRSAUsingSHA384, oidc.SigningAlgRSAUsingSHA512, oidc.SigningAlgECDSAUsingP256AndSHA256, oidc.SigningAlgECDSAUsingP384AndSHA384, oidc.SigningAlgECDSAUsingP521AndSHA512, oidc.SigningAlgRSAPSSUsingSHA256, oidc.SigningAlgRSAPSSUsingSHA384, oidc.SigningAlgRSAPSSUsingSHA512},
expectedTokenAuthSigAlgsSupported: []string{oidc.SigningAlgHMACUsingSHA256, oidc.SigningAlgHMACUsingSHA384, oidc.SigningAlgHMACUsingSHA512, oidc.SigningAlgRSAUsingSHA256, oidc.SigningAlgRSAUsingSHA384, oidc.SigningAlgRSAUsingSHA512, oidc.SigningAlgECDSAUsingP256AndSHA256, oidc.SigningAlgECDSAUsingP384AndSHA384, oidc.SigningAlgECDSAUsingP521AndSHA512, oidc.SigningAlgRSAPSSUsingSHA256, oidc.SigningAlgRSAPSSUsingSHA384, oidc.SigningAlgRSAPSSUsingSHA512},
},
{
desc: "ShouldHaveAllCodeChallengeMethods",
pkcePlainChallenge: true,
clients: map[string]oidc.Client{"a": &oidc.BaseClient{}},
expectCodeChallengeMethodsSupported: []string{oidc.PKCEChallengeMethodSHA256, oidc.PKCEChallengeMethodPlain},
expectSubjectTypesSupported: []string{oidc.SubjectTypePublic, oidc.SubjectTypePairwise},
expectedIDTokenSigAlgsSupported: []string{oidc.SigningAlgRSAUsingSHA256, oidc.SigningAlgNone},
expectedUserInfoSigAlgsSupported: []string{oidc.SigningAlgRSAUsingSHA256, oidc.SigningAlgNone},
expectedRequestObjectSigAlgsSupported: []string{oidc.SigningAlgRSAUsingSHA256, oidc.SigningAlgRSAUsingSHA384, oidc.SigningAlgRSAUsingSHA512, oidc.SigningAlgECDSAUsingP256AndSHA256, oidc.SigningAlgECDSAUsingP384AndSHA384, oidc.SigningAlgECDSAUsingP521AndSHA512, oidc.SigningAlgRSAPSSUsingSHA256, oidc.SigningAlgRSAPSSUsingSHA384, oidc.SigningAlgRSAPSSUsingSHA512, oidc.SigningAlgNone},
expectedRevocationSigAlgsSupported: []string{oidc.SigningAlgHMACUsingSHA256, oidc.SigningAlgHMACUsingSHA384, oidc.SigningAlgHMACUsingSHA512, oidc.SigningAlgRSAUsingSHA256, oidc.SigningAlgRSAUsingSHA384, oidc.SigningAlgRSAUsingSHA512, oidc.SigningAlgECDSAUsingP256AndSHA256, oidc.SigningAlgECDSAUsingP384AndSHA384, oidc.SigningAlgECDSAUsingP521AndSHA512, oidc.SigningAlgRSAPSSUsingSHA256, oidc.SigningAlgRSAPSSUsingSHA384, oidc.SigningAlgRSAPSSUsingSHA512},
expectedTokenAuthSigAlgsSupported: []string{oidc.SigningAlgHMACUsingSHA256, oidc.SigningAlgHMACUsingSHA384, oidc.SigningAlgHMACUsingSHA512, oidc.SigningAlgRSAUsingSHA256, oidc.SigningAlgRSAUsingSHA384, oidc.SigningAlgRSAUsingSHA512, oidc.SigningAlgECDSAUsingP256AndSHA256, oidc.SigningAlgECDSAUsingP384AndSHA384, oidc.SigningAlgECDSAUsingP521AndSHA512, oidc.SigningAlgRSAPSSUsingSHA256, oidc.SigningAlgRSAPSSUsingSHA384, oidc.SigningAlgRSAPSSUsingSHA512},
},
{
desc: "ShouldIncludeDiscoveredResponseObjectSigningAlgs",
pkcePlainChallenge: false,
clients: map[string]oidc.Client{"a": &oidc.BaseClient{}},
discovery: schema.IdentityProvidersOpenIDConnectDiscovery{
ResponseObjectSigningAlgs: []string{oidc.SigningAlgECDSAUsingP521AndSHA512},
},
expectCodeChallengeMethodsSupported: []string{oidc.PKCEChallengeMethodSHA256},
expectSubjectTypesSupported: []string{oidc.SubjectTypePublic, oidc.SubjectTypePairwise},
expectedIDTokenSigAlgsSupported: []string{oidc.SigningAlgRSAUsingSHA256, oidc.SigningAlgECDSAUsingP521AndSHA512, oidc.SigningAlgNone},
expectedUserInfoSigAlgsSupported: []string{oidc.SigningAlgRSAUsingSHA256, oidc.SigningAlgECDSAUsingP521AndSHA512, oidc.SigningAlgNone},
expectedRequestObjectSigAlgsSupported: []string{oidc.SigningAlgRSAUsingSHA256, oidc.SigningAlgRSAUsingSHA384, oidc.SigningAlgRSAUsingSHA512, oidc.SigningAlgECDSAUsingP256AndSHA256, oidc.SigningAlgECDSAUsingP384AndSHA384, oidc.SigningAlgECDSAUsingP521AndSHA512, oidc.SigningAlgRSAPSSUsingSHA256, oidc.SigningAlgRSAPSSUsingSHA384, oidc.SigningAlgRSAPSSUsingSHA512, oidc.SigningAlgNone},
expectedRevocationSigAlgsSupported: []string{oidc.SigningAlgHMACUsingSHA256, oidc.SigningAlgHMACUsingSHA384, oidc.SigningAlgHMACUsingSHA512, oidc.SigningAlgRSAUsingSHA256, oidc.SigningAlgRSAUsingSHA384, oidc.SigningAlgRSAUsingSHA512, oidc.SigningAlgECDSAUsingP256AndSHA256, oidc.SigningAlgECDSAUsingP384AndSHA384, oidc.SigningAlgECDSAUsingP521AndSHA512, oidc.SigningAlgRSAPSSUsingSHA256, oidc.SigningAlgRSAPSSUsingSHA384, oidc.SigningAlgRSAPSSUsingSHA512},
expectedTokenAuthSigAlgsSupported: []string{oidc.SigningAlgHMACUsingSHA256, oidc.SigningAlgHMACUsingSHA384, oidc.SigningAlgHMACUsingSHA512, oidc.SigningAlgRSAUsingSHA256, oidc.SigningAlgRSAUsingSHA384, oidc.SigningAlgRSAUsingSHA512, oidc.SigningAlgECDSAUsingP256AndSHA256, oidc.SigningAlgECDSAUsingP384AndSHA384, oidc.SigningAlgECDSAUsingP521AndSHA512, oidc.SigningAlgRSAPSSUsingSHA256, oidc.SigningAlgRSAPSSUsingSHA384, oidc.SigningAlgRSAPSSUsingSHA512},
},
}
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
c := schema.IdentityProvidersOpenIDConnect{
EnablePKCEPlainChallenge: tc.pkcePlainChallenge,
PAR: schema.IdentityProvidersOpenIDConnectPAR{
Enforce: tc.enforcePAR,
},
Discovery: tc.discovery,
}
actual := oidc.NewOpenIDConnectWellKnownConfiguration(&c)
for _, codeChallengeMethod := range tc.expectCodeChallengeMethodsSupported {
assert.Contains(t, actual.CodeChallengeMethodsSupported, codeChallengeMethod)
}
for _, subjectType := range tc.expectSubjectTypesSupported {
assert.Contains(t, actual.SubjectTypesSupported, subjectType)
}
for _, codeChallengeMethod := range actual.CodeChallengeMethodsSupported {
assert.Contains(t, tc.expectCodeChallengeMethodsSupported, codeChallengeMethod)
}
for _, subjectType := range actual.SubjectTypesSupported {
assert.Contains(t, tc.expectSubjectTypesSupported, subjectType)
}
assert.Equal(t, tc.expectedUserInfoSigAlgsSupported, actual.UserinfoSigningAlgValuesSupported)
assert.Equal(t, tc.expectedIDTokenSigAlgsSupported, actual.IDTokenSigningAlgValuesSupported)
assert.Equal(t, tc.expectedRequestObjectSigAlgsSupported, actual.RequestObjectSigningAlgValuesSupported)
assert.Equal(t, tc.expectedRevocationSigAlgsSupported, actual.RevocationEndpointAuthSigningAlgValuesSupported)
assert.Equal(t, tc.expectedTokenAuthSigAlgsSupported, actual.TokenEndpointAuthSigningAlgValuesSupported)
})
}
}
func TestNewOpenIDConnectProviderDiscovery(t *testing.T) {
provider := oidc.NewOpenIDConnectProvider(&schema.IdentityProvidersOpenIDConnect{
IssuerCertificateChain: schema.X509CertificateChain{},
IssuerPrivateKey: keyRSA2048,
HMACSecret: "asbdhaaskmdlkamdklasmdlkams",
EnablePKCEPlainChallenge: true,
Clients: []schema.IdentityProvidersOpenIDConnectClient{
{
ID: "a-client",
Secret: tOpenIDConnectPlainTextClientSecret,
AuthorizationPolicy: onefactor,
RedirectURIs: []string{
"https://google.com",
},
},
},
}, nil, nil)
a := provider.GetOpenIDConnectWellKnownConfiguration("https://auth.example.com")
data, err := json.Marshal(&a)
assert.NoError(t, err)
b := oidc.OpenIDConnectWellKnownConfiguration{}
assert.NoError(t, json.Unmarshal(data, &b))
assert.Equal(t, a, b)
y := provider.GetOAuth2WellKnownConfiguration("https://auth.example.com")
data, err = json.Marshal(&y)
assert.NoError(t, err)
z := oidc.OAuth2WellKnownConfiguration{}
assert.NoError(t, json.Unmarshal(data, &z))
assert.Equal(t, y, z)
}
func TestNewOpenIDConnectProvider_GetOpenIDConnectWellKnownConfiguration(t *testing.T) {
provider := oidc.NewOpenIDConnectProvider(&schema.IdentityProvidersOpenIDConnect{
IssuerCertificateChain: schema.X509CertificateChain{},
IssuerPrivateKey: keyRSA2048,
HMACSecret: "asbdhaaskmdlkamdklasmdlkams",
Clients: []schema.IdentityProvidersOpenIDConnectClient{
{
ID: "a-client",
Secret: tOpenIDConnectPlainTextClientSecret,
AuthorizationPolicy: onefactor,
RedirectURIs: []string{
"https://google.com",
},
},
},
}, nil, nil)
require.NotNil(t, provider)
disco := provider.GetOpenIDConnectWellKnownConfiguration(examplecom)
assert.Equal(t, examplecom, disco.Issuer)
assert.Equal(t, "https://example.com/jwks.json", disco.JWKSURI)
assert.Equal(t, "https://example.com/api/oidc/authorization", disco.AuthorizationEndpoint)
assert.Equal(t, "https://example.com/api/oidc/token", disco.TokenEndpoint)
assert.Equal(t, "https://example.com/api/oidc/userinfo", disco.UserinfoEndpoint)
assert.Equal(t, "https://example.com/api/oidc/introspection", disco.IntrospectionEndpoint)
assert.Equal(t, "https://example.com/api/oidc/revocation", disco.RevocationEndpoint)
assert.Equal(t, "", disco.RegistrationEndpoint)
assert.Len(t, disco.CodeChallengeMethodsSupported, 1)
assert.Contains(t, disco.CodeChallengeMethodsSupported, oidc.PKCEChallengeMethodSHA256)
assert.Len(t, disco.ScopesSupported, 5)
assert.Contains(t, disco.ScopesSupported, oidc.ScopeOpenID)
assert.Contains(t, disco.ScopesSupported, oidc.ScopeOfflineAccess)
assert.Contains(t, disco.ScopesSupported, oidc.ScopeProfile)
assert.Contains(t, disco.ScopesSupported, oidc.ScopeGroups)
assert.Contains(t, disco.ScopesSupported, oidc.ScopeEmail)
assert.Len(t, disco.ResponseModesSupported, 7)
assert.Contains(t, disco.ResponseModesSupported, oidc.ResponseModeFormPost)
assert.Contains(t, disco.ResponseModesSupported, oidc.ResponseModeQuery)
assert.Contains(t, disco.ResponseModesSupported, oidc.ResponseModeFragment)
assert.Contains(t, disco.ResponseModesSupported, oidc.ResponseModeJWT)
assert.Contains(t, disco.ResponseModesSupported, oidc.ResponseModeFormPostJWT)
assert.Contains(t, disco.ResponseModesSupported, oidc.ResponseModeQueryJWT)
assert.Contains(t, disco.ResponseModesSupported, oidc.ResponseModeFragmentJWT)
assert.Len(t, disco.SubjectTypesSupported, 2)
assert.Contains(t, disco.SubjectTypesSupported, oidc.SubjectTypePublic)
assert.Contains(t, disco.SubjectTypesSupported, oidc.SubjectTypePairwise)
assert.Len(t, disco.ResponseTypesSupported, 7)
assert.Contains(t, disco.ResponseTypesSupported, oidc.ResponseTypeAuthorizationCodeFlow)
assert.Contains(t, disco.ResponseTypesSupported, oidc.ResponseTypeImplicitFlowIDToken)
assert.Contains(t, disco.ResponseTypesSupported, oidc.ResponseTypeImplicitFlowToken)
assert.Contains(t, disco.ResponseTypesSupported, oidc.ResponseTypeImplicitFlowBoth)
assert.Contains(t, disco.ResponseTypesSupported, oidc.ResponseTypeHybridFlowIDToken)
assert.Contains(t, disco.ResponseTypesSupported, oidc.ResponseTypeHybridFlowToken)
assert.Contains(t, disco.ResponseTypesSupported, oidc.ResponseTypeHybridFlowBoth)
assert.Len(t, disco.TokenEndpointAuthMethodsSupported, 5)
assert.Contains(t, disco.TokenEndpointAuthMethodsSupported, oidc.ClientAuthMethodClientSecretBasic)
assert.Contains(t, disco.TokenEndpointAuthMethodsSupported, oidc.ClientAuthMethodClientSecretPost)
assert.Contains(t, disco.TokenEndpointAuthMethodsSupported, oidc.ClientAuthMethodClientSecretJWT)
assert.Contains(t, disco.TokenEndpointAuthMethodsSupported, oidc.ClientAuthMethodPrivateKeyJWT)
assert.Contains(t, disco.TokenEndpointAuthMethodsSupported, oidc.ClientAuthMethodNone)
assert.Len(t, disco.RevocationEndpointAuthMethodsSupported, 5)
assert.Contains(t, disco.RevocationEndpointAuthMethodsSupported, oidc.ClientAuthMethodClientSecretBasic)
assert.Contains(t, disco.RevocationEndpointAuthMethodsSupported, oidc.ClientAuthMethodClientSecretPost)
assert.Contains(t, disco.RevocationEndpointAuthMethodsSupported, oidc.ClientAuthMethodClientSecretJWT)
assert.Contains(t, disco.RevocationEndpointAuthMethodsSupported, oidc.ClientAuthMethodPrivateKeyJWT)
assert.Contains(t, disco.RevocationEndpointAuthMethodsSupported, oidc.ClientAuthMethodNone)
assert.Equal(t, []string{oidc.ClientAuthMethodClientSecretBasic, oidc.ClientAuthMethodNone}, disco.IntrospectionEndpointAuthMethodsSupported)
assert.Equal(t, []string{oidc.GrantTypeAuthorizationCode, oidc.GrantTypeImplicit, oidc.GrantTypeClientCredentials, oidc.GrantTypeRefreshToken}, disco.GrantTypesSupported)
assert.Equal(t, []string{oidc.SigningAlgHMACUsingSHA256, oidc.SigningAlgHMACUsingSHA384, oidc.SigningAlgHMACUsingSHA512, oidc.SigningAlgRSAUsingSHA256, oidc.SigningAlgRSAUsingSHA384, oidc.SigningAlgRSAUsingSHA512, oidc.SigningAlgECDSAUsingP256AndSHA256, oidc.SigningAlgECDSAUsingP384AndSHA384, oidc.SigningAlgECDSAUsingP521AndSHA512, oidc.SigningAlgRSAPSSUsingSHA256, oidc.SigningAlgRSAPSSUsingSHA384, oidc.SigningAlgRSAPSSUsingSHA512}, disco.RevocationEndpointAuthSigningAlgValuesSupported)
assert.Equal(t, []string{oidc.SigningAlgHMACUsingSHA256, oidc.SigningAlgHMACUsingSHA384, oidc.SigningAlgHMACUsingSHA512, oidc.SigningAlgRSAUsingSHA256, oidc.SigningAlgRSAUsingSHA384, oidc.SigningAlgRSAUsingSHA512, oidc.SigningAlgECDSAUsingP256AndSHA256, oidc.SigningAlgECDSAUsingP384AndSHA384, oidc.SigningAlgECDSAUsingP521AndSHA512, oidc.SigningAlgRSAPSSUsingSHA256, oidc.SigningAlgRSAPSSUsingSHA384, oidc.SigningAlgRSAPSSUsingSHA512}, disco.TokenEndpointAuthSigningAlgValuesSupported)
assert.Equal(t, []string{oidc.SigningAlgRSAUsingSHA256, oidc.SigningAlgNone}, disco.IDTokenSigningAlgValuesSupported)
assert.Equal(t, []string{oidc.SigningAlgRSAUsingSHA256, oidc.SigningAlgNone}, disco.UserinfoSigningAlgValuesSupported)
assert.Equal(t, []string{oidc.SigningAlgRSAUsingSHA256, oidc.SigningAlgRSAUsingSHA384, oidc.SigningAlgRSAUsingSHA512, oidc.SigningAlgECDSAUsingP256AndSHA256, oidc.SigningAlgECDSAUsingP384AndSHA384, oidc.SigningAlgECDSAUsingP521AndSHA512, oidc.SigningAlgRSAPSSUsingSHA256, oidc.SigningAlgRSAPSSUsingSHA384, oidc.SigningAlgRSAPSSUsingSHA512, oidc.SigningAlgNone}, disco.RequestObjectSigningAlgValuesSupported)
assert.Len(t, disco.ClaimsSupported, 18)
assert.Contains(t, disco.ClaimsSupported, oidc.ClaimAuthenticationMethodsReference)
assert.Contains(t, disco.ClaimsSupported, oidc.ClaimAudience)
assert.Contains(t, disco.ClaimsSupported, oidc.ClaimAuthorizedParty)
assert.Contains(t, disco.ClaimsSupported, oidc.ClaimClientIdentifier)
assert.Contains(t, disco.ClaimsSupported, oidc.ClaimExpirationTime)
assert.Contains(t, disco.ClaimsSupported, oidc.ClaimIssuedAt)
assert.Contains(t, disco.ClaimsSupported, oidc.ClaimIssuer)
assert.Contains(t, disco.ClaimsSupported, oidc.ClaimJWTID)
assert.Contains(t, disco.ClaimsSupported, oidc.ClaimRequestedAt)
assert.Contains(t, disco.ClaimsSupported, oidc.ClaimSubject)
assert.Contains(t, disco.ClaimsSupported, oidc.ClaimAuthenticationTime)
assert.Contains(t, disco.ClaimsSupported, oidc.ClaimNonce)
assert.Contains(t, disco.ClaimsSupported, oidc.ClaimPreferredEmail)
assert.Contains(t, disco.ClaimsSupported, oidc.ClaimEmailVerified)
assert.Contains(t, disco.ClaimsSupported, oidc.ClaimEmailAlts)
assert.Contains(t, disco.ClaimsSupported, oidc.ClaimGroups)
assert.Contains(t, disco.ClaimsSupported, oidc.ClaimPreferredUsername)
assert.Contains(t, disco.ClaimsSupported, oidc.ClaimFullName)
assert.Len(t, disco.PromptValuesSupported, 2)
assert.Contains(t, disco.PromptValuesSupported, oidc.PromptConsent)
assert.Contains(t, disco.PromptValuesSupported, oidc.PromptNone)
}
func TestNewOpenIDConnectProvider_GetOAuth2WellKnownConfiguration(t *testing.T) {
provider := oidc.NewOpenIDConnectProvider(&schema.IdentityProvidersOpenIDConnect{
IssuerCertificateChain: schema.X509CertificateChain{},
IssuerPrivateKey: keyRSA2048,
HMACSecret: "asbdhaaskmdlkamdklasmdlkams",
Clients: []schema.IdentityProvidersOpenIDConnectClient{
{
ID: "a-client",
Secret: tOpenIDConnectPlainTextClientSecret,
AuthorizationPolicy: onefactor,
RedirectURIs: []string{
"https://google.com",
},
},
},
}, nil, nil)
require.NotNil(t, provider)
disco := provider.GetOAuth2WellKnownConfiguration(examplecom)
assert.Equal(t, examplecom, disco.Issuer)
assert.Equal(t, "https://example.com/jwks.json", disco.JWKSURI)
assert.Equal(t, "https://example.com/api/oidc/authorization", disco.AuthorizationEndpoint)
assert.Equal(t, "https://example.com/api/oidc/token", disco.TokenEndpoint)
assert.Equal(t, "https://example.com/api/oidc/introspection", disco.IntrospectionEndpoint)
assert.Equal(t, "https://example.com/api/oidc/revocation", disco.RevocationEndpoint)
assert.Equal(t, "", disco.RegistrationEndpoint)
require.Len(t, disco.CodeChallengeMethodsSupported, 1)
assert.Equal(t, "S256", disco.CodeChallengeMethodsSupported[0])
assert.Len(t, disco.ScopesSupported, 5)
assert.Contains(t, disco.ScopesSupported, oidc.ScopeOpenID)
assert.Contains(t, disco.ScopesSupported, oidc.ScopeOfflineAccess)
assert.Contains(t, disco.ScopesSupported, oidc.ScopeProfile)
assert.Contains(t, disco.ScopesSupported, oidc.ScopeGroups)
assert.Contains(t, disco.ScopesSupported, oidc.ScopeEmail)
assert.Len(t, disco.ResponseModesSupported, 7)
assert.Contains(t, disco.ResponseModesSupported, oidc.ResponseModeFormPost)
assert.Contains(t, disco.ResponseModesSupported, oidc.ResponseModeQuery)
assert.Contains(t, disco.ResponseModesSupported, oidc.ResponseModeFragment)
assert.Contains(t, disco.ResponseModesSupported, oidc.ResponseModeJWT)
assert.Contains(t, disco.ResponseModesSupported, oidc.ResponseModeFormPostJWT)
assert.Contains(t, disco.ResponseModesSupported, oidc.ResponseModeQueryJWT)
assert.Contains(t, disco.ResponseModesSupported, oidc.ResponseModeFragmentJWT)
assert.Len(t, disco.SubjectTypesSupported, 2)
assert.Contains(t, disco.SubjectTypesSupported, oidc.SubjectTypePublic)
assert.Contains(t, disco.SubjectTypesSupported, oidc.SubjectTypePairwise)
assert.Len(t, disco.ResponseTypesSupported, 7)
assert.Contains(t, disco.ResponseTypesSupported, oidc.ResponseTypeAuthorizationCodeFlow)
assert.Contains(t, disco.ResponseTypesSupported, oidc.ResponseTypeImplicitFlowIDToken)
assert.Contains(t, disco.ResponseTypesSupported, oidc.ResponseTypeImplicitFlowToken)
assert.Contains(t, disco.ResponseTypesSupported, oidc.ResponseTypeImplicitFlowBoth)
assert.Contains(t, disco.ResponseTypesSupported, oidc.ResponseTypeHybridFlowIDToken)
assert.Contains(t, disco.ResponseTypesSupported, oidc.ResponseTypeHybridFlowToken)
assert.Contains(t, disco.ResponseTypesSupported, oidc.ResponseTypeHybridFlowBoth)
assert.Len(t, disco.TokenEndpointAuthMethodsSupported, 5)
assert.Contains(t, disco.TokenEndpointAuthMethodsSupported, oidc.ClientAuthMethodClientSecretBasic)
assert.Contains(t, disco.TokenEndpointAuthMethodsSupported, oidc.ClientAuthMethodClientSecretPost)
assert.Contains(t, disco.TokenEndpointAuthMethodsSupported, oidc.ClientAuthMethodClientSecretJWT)
assert.Contains(t, disco.TokenEndpointAuthMethodsSupported, oidc.ClientAuthMethodPrivateKeyJWT)
assert.Contains(t, disco.TokenEndpointAuthMethodsSupported, oidc.ClientAuthMethodNone)
assert.Len(t, disco.GrantTypesSupported, 4)
assert.Contains(t, disco.GrantTypesSupported, oidc.GrantTypeAuthorizationCode)
assert.Contains(t, disco.GrantTypesSupported, oidc.GrantTypeImplicit)
assert.Contains(t, disco.GrantTypesSupported, oidc.GrantTypeClientCredentials)
assert.Contains(t, disco.GrantTypesSupported, oidc.GrantTypeRefreshToken)
assert.Len(t, disco.ClaimsSupported, 18)
assert.Contains(t, disco.ClaimsSupported, oidc.ClaimAuthenticationMethodsReference)
assert.Contains(t, disco.ClaimsSupported, oidc.ClaimAudience)
assert.Contains(t, disco.ClaimsSupported, oidc.ClaimAuthorizedParty)
assert.Contains(t, disco.ClaimsSupported, oidc.ClaimClientIdentifier)
assert.Contains(t, disco.ClaimsSupported, oidc.ClaimExpirationTime)
assert.Contains(t, disco.ClaimsSupported, oidc.ClaimIssuedAt)
assert.Contains(t, disco.ClaimsSupported, oidc.ClaimIssuer)
assert.Contains(t, disco.ClaimsSupported, oidc.ClaimJWTID)
assert.Contains(t, disco.ClaimsSupported, oidc.ClaimRequestedAt)
assert.Contains(t, disco.ClaimsSupported, oidc.ClaimSubject)
assert.Contains(t, disco.ClaimsSupported, oidc.ClaimAuthenticationTime)
assert.Contains(t, disco.ClaimsSupported, oidc.ClaimNonce)
assert.Contains(t, disco.ClaimsSupported, oidc.ClaimPreferredEmail)
assert.Contains(t, disco.ClaimsSupported, oidc.ClaimEmailVerified)
assert.Contains(t, disco.ClaimsSupported, oidc.ClaimEmailAlts)
assert.Contains(t, disco.ClaimsSupported, oidc.ClaimGroups)
assert.Contains(t, disco.ClaimsSupported, oidc.ClaimPreferredUsername)
assert.Contains(t, disco.ClaimsSupported, oidc.ClaimFullName)
}
func TestNewOpenIDConnectProvider_GetOpenIDConnectWellKnownConfigurationWithPlainPKCE(t *testing.T) {
provider := oidc.NewOpenIDConnectProvider(&schema.IdentityProvidersOpenIDConnect{
IssuerCertificateChain: schema.X509CertificateChain{},
IssuerPrivateKey: keyRSA2048,
HMACSecret: "asbdhaaskmdlkamdklasmdlkams",
EnablePKCEPlainChallenge: true,
Clients: []schema.IdentityProvidersOpenIDConnectClient{
{
ID: "a-client",
Secret: tOpenIDConnectPlainTextClientSecret,
AuthorizationPolicy: onefactor,
RedirectURIs: []string{
"https://google.com",
},
},
},
}, nil, nil)
require.NotNil(t, provider)
disco := provider.GetOpenIDConnectWellKnownConfiguration(examplecom)
require.Len(t, disco.CodeChallengeMethodsSupported, 2)
assert.Equal(t, oidc.PKCEChallengeMethodSHA256, disco.CodeChallengeMethodsSupported[0])
assert.Equal(t, oidc.PKCEChallengeMethodPlain, disco.CodeChallengeMethodsSupported[1])
}
func TestNewOpenIDConnectWellKnownConfiguration_Copy(t *testing.T) {
config := &oidc.OpenIDConnectWellKnownConfiguration{
OAuth2WellKnownConfiguration: oidc.OAuth2WellKnownConfiguration{
CommonDiscoveryOptions: oidc.CommonDiscoveryOptions{
Issuer: "https://example.com",
JWKSURI: "https://example.com/jwks.json",
AuthorizationEndpoint: "",
TokenEndpoint: "",
SubjectTypesSupported: nil,
ResponseTypesSupported: nil,
GrantTypesSupported: nil,
ResponseModesSupported: nil,
ScopesSupported: nil,
ClaimsSupported: nil,
UILocalesSupported: nil,
TokenEndpointAuthMethodsSupported: nil,
TokenEndpointAuthSigningAlgValuesSupported: nil,
ServiceDocumentation: "",
OPPolicyURI: "",
OPTOSURI: "",
SignedMetadata: "",
},
OAuth2DiscoveryOptions: oidc.OAuth2DiscoveryOptions{
IntrospectionEndpoint: "",
RevocationEndpoint: "",
RegistrationEndpoint: "",
IntrospectionEndpointAuthMethodsSupported: nil,
RevocationEndpointAuthMethodsSupported: nil,
RevocationEndpointAuthSigningAlgValuesSupported: nil,
IntrospectionEndpointAuthSigningAlgValuesSupported: nil,
CodeChallengeMethodsSupported: nil,
},
OAuth2DeviceAuthorizationGrantDiscoveryOptions: &oidc.OAuth2DeviceAuthorizationGrantDiscoveryOptions{
DeviceAuthorizationEndpoint: "",
},
OAuth2MutualTLSClientAuthenticationDiscoveryOptions: &oidc.OAuth2MutualTLSClientAuthenticationDiscoveryOptions{
TLSClientCertificateBoundAccessTokens: false,
MutualTLSEndpointAliases: oidc.OAuth2MutualTLSClientAuthenticationAliasesDiscoveryOptions{
AuthorizationEndpoint: "",
TokenEndpoint: "",
IntrospectionEndpoint: "",
RevocationEndpoint: "",
EndSessionEndpoint: "",
UserinfoEndpoint: "",
BackChannelAuthenticationEndpoint: "",
FederationRegistrationEndpoint: "",
PushedAuthorizationRequestEndpoint: "",
RegistrationEndpoint: "",
},
},
OAuth2IssuerIdentificationDiscoveryOptions: &oidc.OAuth2IssuerIdentificationDiscoveryOptions{
AuthorizationResponseIssuerParameterSupported: false,
},
OAuth2JWTIntrospectionResponseDiscoveryOptions: &oidc.OAuth2JWTIntrospectionResponseDiscoveryOptions{
IntrospectionSigningAlgValuesSupported: nil,
IntrospectionEncryptionAlgValuesSupported: nil,
IntrospectionEncryptionEncValuesSupported: nil,
},
OAuth2JWTSecuredAuthorizationRequestDiscoveryOptions: &oidc.OAuth2JWTSecuredAuthorizationRequestDiscoveryOptions{
RequireSignedRequestObject: false,
},
OAuth2PushedAuthorizationDiscoveryOptions: &oidc.OAuth2PushedAuthorizationDiscoveryOptions{
PushedAuthorizationRequestEndpoint: "",
RequirePushedAuthorizationRequests: false,
},
},
OpenIDConnectDiscoveryOptions: oidc.OpenIDConnectDiscoveryOptions{
UserinfoEndpoint: "",
IDTokenSigningAlgValuesSupported: nil,
UserinfoSigningAlgValuesSupported: nil,
RequestObjectSigningAlgValuesSupported: nil,
IDTokenEncryptionAlgValuesSupported: nil,
UserinfoEncryptionAlgValuesSupported: nil,
RequestObjectEncryptionAlgValuesSupported: nil,
IDTokenEncryptionEncValuesSupported: nil,
UserinfoEncryptionEncValuesSupported: nil,
RequestObjectEncryptionEncValuesSupported: nil,
ACRValuesSupported: nil,
DisplayValuesSupported: nil,
ClaimTypesSupported: nil,
ClaimLocalesSupported: nil,
RequestParameterSupported: false,
RequestURIParameterSupported: false,
RequireRequestURIRegistration: false,
ClaimsParameterSupported: false,
},
OpenIDConnectFrontChannelLogoutDiscoveryOptions: &oidc.OpenIDConnectFrontChannelLogoutDiscoveryOptions{
FrontChannelLogoutSupported: false,
FrontChannelLogoutSessionSupported: false,
},
OpenIDConnectBackChannelLogoutDiscoveryOptions: &oidc.OpenIDConnectBackChannelLogoutDiscoveryOptions{
BackChannelLogoutSupported: false,
BackChannelLogoutSessionSupported: false,
},
OpenIDConnectSessionManagementDiscoveryOptions: &oidc.OpenIDConnectSessionManagementDiscoveryOptions{
CheckSessionIFrame: "",
},
OpenIDConnectRPInitiatedLogoutDiscoveryOptions: &oidc.OpenIDConnectRPInitiatedLogoutDiscoveryOptions{
EndSessionEndpoint: "",
},
OpenIDConnectPromptCreateDiscoveryOptions: &oidc.OpenIDConnectPromptCreateDiscoveryOptions{
PromptValuesSupported: nil,
},
OpenIDConnectClientInitiatedBackChannelAuthFlowDiscoveryOptions: &oidc.OpenIDConnectClientInitiatedBackChannelAuthFlowDiscoveryOptions{
BackChannelAuthenticationEndpoint: "",
BackChannelTokenDeliveryModesSupported: nil,
BackChannelAuthRequestSigningAlgValuesSupported: nil,
BackChannelUserCodeParameterSupported: false,
},
OpenIDConnectJWTSecuredAuthorizationResponseModeDiscoveryOptions: &oidc.OpenIDConnectJWTSecuredAuthorizationResponseModeDiscoveryOptions{
AuthorizationSigningAlgValuesSupported: nil,
AuthorizationEncryptionAlgValuesSupported: nil,
AuthorizationEncryptionEncValuesSupported: nil,
},
OpenIDFederationDiscoveryOptions: &oidc.OpenIDFederationDiscoveryOptions{
FederationRegistrationEndpoint: "",
ClientRegistrationTypesSupported: nil,
RequestAuthenticationMethodsSupported: nil,
RequestAuthenticationSigningAlgValuesSupproted: nil,
},
}
x := config.Copy()
assert.Equal(t, config, &x)
y := config.OAuth2WellKnownConfiguration.Copy()
assert.Equal(t, config.OAuth2WellKnownConfiguration, y)
}
|
// This file was generated for SObject PicklistValueInfo, API Version v43.0 at 2018-07-30 03:47:33.116861558 -0400 EDT m=+19.460198196
package sobjects
import (
"fmt"
"strings"
)
type PicklistValueInfo struct {
BaseSObject
DurableId string `force:",omitempty"`
EntityParticleId string `force:",omitempty"`
Id string `force:",omitempty"`
IsActive bool `force:",omitempty"`
IsDefaultValue bool `force:",omitempty"`
Label string `force:",omitempty"`
ValidFor string `force:",omitempty"`
Value string `force:",omitempty"`
}
func (t *PicklistValueInfo) ApiName() string {
return "PicklistValueInfo"
}
func (t *PicklistValueInfo) String() string {
builder := strings.Builder{}
builder.WriteString(fmt.Sprintf("PicklistValueInfo #%s - %s\n", t.Id, t.Name))
builder.WriteString(fmt.Sprintf("\tDurableId: %v\n", t.DurableId))
builder.WriteString(fmt.Sprintf("\tEntityParticleId: %v\n", t.EntityParticleId))
builder.WriteString(fmt.Sprintf("\tId: %v\n", t.Id))
builder.WriteString(fmt.Sprintf("\tIsActive: %v\n", t.IsActive))
builder.WriteString(fmt.Sprintf("\tIsDefaultValue: %v\n", t.IsDefaultValue))
builder.WriteString(fmt.Sprintf("\tLabel: %v\n", t.Label))
builder.WriteString(fmt.Sprintf("\tValidFor: %v\n", t.ValidFor))
builder.WriteString(fmt.Sprintf("\tValue: %v\n", t.Value))
return builder.String()
}
type PicklistValueInfoQueryResponse struct {
BaseQuery
Records []PicklistValueInfo `json:"Records" force:"records"`
}
|
package util
import (
"bufio"
)
func ReadWholeLine(conn *bufio.Reader) ([]byte, error) {
var (
result = []byte{}
isPrefex = true
err error
line []byte
)
for isPrefex {
line, isPrefex, err = conn.ReadLine()
result = append(result, line...)
if err != nil {
return result, err
}
}
return result, nil
}
|
package controllers
import (
"github.com/gorilla/websocket"
"net/http"
"github.com/astaxie/beego"
"fmt"
)
// WebSocketController handles WebSocket requests.
type WebSocketController struct {
beego.Controller
}
// Join method handles WebSocket requests for WebSocketController.
func (this *WebSocketController) Join() {
// Upgrade from http request to WebSocket.
ws, err := websocket.Upgrade(this.Ctx.ResponseWriter, this.Ctx.Request, nil, 1024, 1024)
if _, ok := err.(websocket.HandshakeError); ok {
http.Error(this.Ctx.ResponseWriter, "Not a websocket handshake", 400)
return
} else if err != nil {
beego.Error("Cannot setup WebSocket connection:", err)
return
}
wsConn:=new(WsConn)
wsConn.Conn = ws
wsConn.Ip = ws.RemoteAddr().String()
WSM.Conns.Store(ws.RemoteAddr().String(),wsConn)
// Message receive loop.
for {
_, m, err := ws.ReadMessage()
if err != nil {
fmt.Println("客户端和服务器断开连接"+ws.RemoteAddr().String())
WSM.Conns.Delete(ws.RemoteAddr())
return
}
if m!=nil{
if WSM.received == nil{
WSM.received = make(chan []byte)
}
if len(m)<2 {
fmt.Println("收到错误消息")
continue
}
ws.WriteMessage(websocket.BinaryMessage,m)
WSM.received<-m
fmt.Println(len(WSM.received))
}
}
}
|
package conf
var Num int = 10000 // 首字母大写
|
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package retry
import (
"io"
"time"
)
// Default defines the default retry parameters that should be used throughout
// the program. It is fine to update this variable on start up.
var Default = &Config{
10,
100 * time.Millisecond,
500 * time.Millisecond,
5 * time.Second,
}
// Config defines the retry properties.
type Config struct {
MaxTries int // Maximum number of retries.
SleepMax time.Duration // Maximum duration of a single sleep.
SleepBase time.Duration // Base sleep duration.
SleepMultiplicative time.Duration // Incremental sleep duration for each additional try.
}
// Do runs a Retriable, potentially retrying it multiple times.
func (c *Config) Do(r Retriable) (err error) {
defer func() {
if err2 := r.Close(); err == nil {
err = err2
}
}()
for i := 0; i < c.MaxTries; i++ {
err = r.Do()
if _, ok := err.(Error); !ok {
return err
}
if i != c.MaxTries-1 {
s := c.SleepBase + time.Duration(i)*c.SleepMultiplicative
if s > c.SleepMax {
s = c.SleepMax
}
time.Sleep(s)
}
}
return
}
// Error is an error that can be retried.
type Error struct {
Err error
}
func (e Error) Error() string {
return e.Err.Error()
}
// Retriable is a task that can be retried. It is important that Do be
// idempotent.
type Retriable interface {
io.Closer
Do() error
}
|
package main
import (
"strconv"
"github.com/freignat91/mlearning/api"
"github.com/spf13/cobra"
)
type trainSoluceOptions struct {
}
var (
trainSoluceOpts = trainSoluceOptions{}
)
// TrainSoluceCmd .
var TrainSoluceCmd = &cobra.Command{
Use: "trainSoluce",
Short: "train network right computed samples",
Run: func(cmd *cobra.Command, args []string) {
if err := mlCli.trainSoluce(cmd, args); err != nil {
mlCli.Fatal("Error: %v\n", err)
}
},
}
func init() {
NetworkCmd.AddCommand(TrainSoluceCmd)
}
func (m *mlCLI) trainSoluce(cmd *cobra.Command, args []string) error {
if len(args) < 1 {
m.Fatal("need number of training as first argument\n")
}
nn, errc := strconv.Atoi(args[0])
if errc != nil {
m.Fatal("need number as first argument: %s\n", args[0])
}
api := mlapi.New(m.server)
lines, err := api.TrainSoluce(nn)
if err != nil {
return err
}
displayList(lines)
return nil
}
|
package node
import (
"github.com/projecteru2/cli/cmd/utils"
"github.com/urfave/cli/v2"
)
const (
nodeArgsUsage = "nodename"
)
// Command exports node subommands
func Command() *cli.Command {
return &cli.Command{
Name: "node",
Usage: "node commands",
Subcommands: []*cli.Command{
{
Name: "get",
Usage: "get a node",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "plugins",
Usage: "specify the plugins, e.g. --plugins=cpumem,volume",
},
},
ArgsUsage: nodeArgsUsage,
Action: utils.ExitCoder(cmdNodeGet),
},
{
Name: "remove",
Usage: "remove a node",
ArgsUsage: nodeArgsUsage,
Action: utils.ExitCoder(cmdNodeRemove),
},
{
Name: "workloads",
Usage: "list node workloads",
Flags: []cli.Flag{
&cli.StringSliceFlag{
Name: "label",
Usage: "labels to filter, e.g, a=1, b=2",
},
},
Aliases: []string{"containers"},
ArgsUsage: nodeArgsUsage,
Action: utils.ExitCoder(cmdNodeListWorkloads),
},
{
Name: "up",
Usage: "set node up",
ArgsUsage: nodeArgsUsage,
Action: utils.ExitCoder(cmdNodeSetUp),
},
{
Name: "down",
Usage: "set node down",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "check",
Usage: "check node workloads are online or not",
},
&cli.IntFlag{
Name: "check-timeout",
Usage: "check node timeout",
Value: 20,
},
},
ArgsUsage: nodeArgsUsage,
Action: utils.ExitCoder(cmdNodeSetDown),
},
{
Name: "set-status",
Usage: "set status of node, used for heartbeat",
Flags: []cli.Flag{
&cli.IntFlag{
Name: "ttl",
Usage: "status ttl for node",
Value: 180,
},
&cli.IntFlag{
Name: "interval",
Usage: "if given, will set status every INTERVAL seconds",
Value: 0,
},
},
ArgsUsage: nodeArgsUsage,
Action: utils.ExitCoder(cmdNodeSetStatus),
},
{
Name: "watch-status",
Usage: "watch status of node, used for heartbeat",
Action: utils.ExitCoder(cmdNodeWatchStatus),
},
{
Name: "resource",
Usage: "check node resource",
ArgsUsage: nodeArgsUsage,
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "fix",
Usage: "fix node resource diff",
},
},
Action: utils.ExitCoder(cmdNodeResource),
},
{
Name: "set",
Aliases: []string{"update"},
Usage: "set node resource",
ArgsUsage: nodeArgsUsage,
Action: utils.ExitCoder(cmdNodeSet),
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "mark-workloads-down",
Usage: "mark workloads down",
},
&cli.StringFlag{
Name: "memory",
Usage: `memory, unit can be K/M/G/T,
when using --delta flag, this can be a negtive number indicating how much to add to the current value,
e.g. --memory -10G --delta, means memory will be the current value - 10`,
},
&cli.StringFlag{
Name: "storage",
Usage: `storage, unit can be K/M/G/T,
when using --delta flag, this can be a negtive number indicating how much to add to the current value,
e.g. --storage -10G --delta, means storage will be the current value - 10`,
},
&cli.StringFlag{
Name: "cpu",
Usage: "cpu value in string, e.g. 0:100,1:200,3:50",
},
&cli.StringSliceFlag{
Name: "numa-memory",
Usage: `numa memory values, unit can be K/M/G/T,
when using --delta flag, this can be a negtive number indicating how much to add to the current value,
e.g. --numa-memory -10G --delta, means the value will be current value - 10
this value can be set multiple times, the index will be the numa node ID,
e.g. --numa-memory 10G --numa-memory 15G, means node ID 0 will be 10GB, node ID 1 will be 15GB`,
},
&cli.StringFlag{
Name: "volume",
Usage: `volume value in string, like "/data0:10G,/data1:10G"
when using --delta flag, this can be a negative number indicating how much to add to the current value,
e.g. --volume /data0:-10G,/data1:20G, means /data0 will be subtract 10G and /data1 will be added 20G`,
},
&cli.StringSliceFlag{
Name: "disk",
Usage: `disk value in string, format: device:mounts:read-iops:write-iops:read-bps:write-bps
e.g. --disk /dev/sda1:/data0,/:100:100:100M:100M
when using --delta flag, this can be a negative number indicating how much to add to the current value`,
},
&cli.StringFlag{
Name: "rm-disk",
Usage: `remove disks, e.g. --rm-disk /dev/vda,/dev/vdb
rm-disk is not supported in delta mode`,
},
&cli.StringSliceFlag{
Name: "numa-cpu",
Usage: `numa cpu list, can be set multiple times, the index will be the numa node ID.
e.g. --numa-cpu 0,1,2,3 --numa-cpu 4,5,6,7 means cpu 0,1,2,3 are bound to node ID 0, cpu 4,5,6,7 are bound to node ID 1`,
},
&cli.StringSliceFlag{
Name: "label",
Usage: "label for the node, can set multiple times, e.g. --label a=1 --label b=2",
},
&cli.BoolFlag{
Name: "delta",
Usage: "delta flag for settings, when set, all values will be relative to the current values, refer to each option for details",
},
&cli.StringFlag{
Name: "endpoint",
Usage: "update node endpoint",
},
&cli.StringFlag{
Name: "ca",
Usage: "ca file, like /etc/docker/tls/ca.crt",
Value: "",
},
&cli.StringFlag{
Name: "cert",
Usage: "cert file, like /etc/docker/tls/client.crt",
Value: "",
},
&cli.StringFlag{
Name: "key",
Usage: "key file, like /etc/docker/tls/client.key",
Value: "",
},
&cli.Float64Flag{
Name: "node-storage-usage-threshold",
Usage: "set the node storage usage limit for that node",
},
&cli.Float64Flag{
Name: "pod-storage-usage-threshold",
Usage: "set the pod storage usage limit for this node's pod, has less priority than node usage threshold",
},
&cli.StringSliceFlag{
Name: "workload-limit",
Usage: "set the maximum number of workloads for a particular App that can be deployed on this node, can set multiple times",
},
&cli.StringSliceFlag{
Name: "pod-workload-limit",
Usage: "set the maximum number of workloads for a particular App that can be deployed on each nodes of this pod, can set multiple times",
},
},
},
{
Name: "add",
Usage: "add node",
ArgsUsage: "podname",
Action: utils.ExitCoder(cmdNodeAdd),
Flags: []cli.Flag{
&cli.StringFlag{
Name: "nodename",
Usage: "name of this node, use `hostname` as default",
EnvVars: []string{"HOSTNAME"},
Value: utils.GetHostname(),
},
&cli.StringFlag{
Name: "endpoint",
Usage: "endpoint of docker server",
Value: "",
},
&cli.StringFlag{
Name: "ca",
Usage: "ca file of docker server, like /etc/docker/tls/ca.crt",
Value: "",
},
&cli.StringFlag{
Name: "cert",
Usage: "cert file of docker server, like /etc/docker/tls/client.crt",
Value: "",
},
&cli.StringFlag{
Name: "key",
Usage: "key file of docker server, like /etc/docker/tls/client.key",
Value: "",
},
&cli.IntFlag{
Name: "cpu",
Usage: "cpu count",
DefaultText: "total cpu",
},
&cli.IntFlag{
Name: "share",
Usage: "share count",
DefaultText: "defined in core",
},
&cli.StringFlag{
Name: "memory",
Usage: "memory like -1M or 1G, support K, M, G, T",
},
&cli.StringFlag{
Name: "storage",
Usage: "storage -1M or 1G, support K, M, G, T",
},
&cli.StringSliceFlag{
Name: "label",
Usage: "add label for node, like a=1 b=2, can set multiple times",
},
&cli.StringSliceFlag{
Name: "numa-cpu",
Usage: "numa cpu list, can set multiple times, use comma separated",
},
&cli.StringSliceFlag{
Name: "numa-memory",
Usage: "numa memory, can set multiple times. if not set, it will count numa-cpu groups, and divided by total memory",
},
&cli.StringSliceFlag{
Name: "volumes",
Usage: `device volumes, can set multiple times. e.g. "--volumes /data:100G" `,
},
&cli.StringSliceFlag{
Name: "disk",
Usage: `disk value in string, format: device:mounts:read-iops:write-iops:read-bps:write-bps
e.g. --disk /dev/sda1:/data0,/:100:100:100M:100M`,
},
&cli.StringSliceFlag{
Name: "workload-limit",
Usage: "set the maximum number of workloads for a particular App that can be deployed on this node, can set multiple times",
},
},
},
},
}
}
|
package main
import (
"fmt"
"html/template"
"net/http"
)
func login(writer http.ResponseWriter, request *http.Request) {
request.ParseForm()
fmt.Println("method:", request.Method)
if request.Method == "GET" {
t, _ := template.ParseFiles("login.gtpl")
t.Execute(writer, nil)
} else if request.Method == "POST" {
fmt.Println("username:", request.Form["username"])
fmt.Println("password:", request.Form["password"])
}
}
func main() {
http.HandleFunc("/login", login)
http.ListenAndServe(":9090", nil)
}
|
package storage
import (
"github.com/biezhi/gorm-paginator/pagination"
md "github.com/ebikode/eLearning-core/model"
)
// DBApplicationStorage encapsulates DB Connection Model
type DBApplicationStorage struct {
*MDatabase
}
// NewDBApplicationStorage Initialize Application Storage
func NewDBApplicationStorage(db *MDatabase) *DBApplicationStorage {
return &DBApplicationStorage{db}
}
// Get Fetch Single Application fron DB
func (adb *DBApplicationStorage) Get(id uint) *md.Application {
application := md.Application{}
// Select resource from database
err := adb.db.
Preload("Course").
Preload("Grade").
Preload("User").
Where("applications.id=?", id).First(&application).Error
if application.ID < 1 || err != nil {
return nil
}
return &application
}
// GetAll Fetch all applications from DB
func (adb *DBApplicationStorage) GetAll(page, limit int) []*md.Application {
var applications []*md.Application
pagination.Paging(&pagination.Param{
DB: adb.db.
Preload("Course").
Preload("Grade").
Preload("User").
Order("created_at desc").
Find(&applications),
Page: page,
Limit: limit,
OrderBy: []string{"created_at desc"},
}, &applications)
return applications
}
// GetByUser Fetch all user' applications from DB
func (adb *DBApplicationStorage) GetByUser(userID string) []*md.Application {
var applications []*md.Application
adb.db.
Preload("Course").
Preload("Grade").
Preload("User").
Where("user_id=?", userID).
Find(&applications)
return applications
}
// GetByCourse ...
func (adb *DBApplicationStorage) GetByCourse(courseID int) []*md.Application {
var applications []*md.Application
// Select resource from database
adb.db.
Preload("Course").
Preload("Grade").
Preload("User").
Where("course_id=?", courseID).Order("created_at desc").Find(&applications)
return applications
}
// GetByCourse ...
func (adb *DBApplicationStorage) GetByCourseOwner(userID string) []*md.Application {
var applications []*md.Application
// Select resource from database
adb.db.
Preload("Course").
Preload("Grade").
Preload("User").
Joins("JOIN courses as co ON co.id = applications.course_id").
Where("co.user_id=?", userID).
Order("created_at desc").Find(&applications)
return applications
}
// Store Add a new application
func (adb *DBApplicationStorage) Store(p md.Application) (*md.Application, error) {
application := p
err := adb.db.Create(&application).Error
if err != nil {
return nil, err
}
return adb.Get(application.ID), nil
}
// Update a application
func (adb *DBApplicationStorage) Update(application *md.Application) (*md.Application, error) {
err := adb.db.Save(&application).Error
if err != nil {
return nil, err
}
return application, nil
}
// Delete a application
func (adb *DBApplicationStorage) Delete(c md.Application, isPermarnant bool) (bool, error) {
var err error
if isPermarnant {
err = adb.db.Unscoped().Delete(c).Error
}
if !isPermarnant {
err = adb.db.Delete(c).Error
}
if err != nil {
return false, err
}
return true, nil
}
|
package handlers
import (
"bytes"
"html/template"
"strconv"
"time"
rice "github.com/GeertJohan/go.rice"
"github.com/labstack/echo/v4"
)
// HomeHandler is a default handler
// GET /
func HomeHandler(e echo.Context) error {
t, _ := template.New("index").Parse(
e.Get("TemplatesBox").(*rice.Box).MustString("index.html"),
)
tpl := new(bytes.Buffer)
data := map[string]string{
"UniqueKey": "?v=" + strconv.FormatInt(time.Now().Unix(), 10),
"Environment": e.Get("Environment").(string),
}
if data["Environment"] != "development" {
data["UniqueKey"] = ""
}
if err := t.Execute(tpl, data); err != nil {
e.Logger().Fatalf("Failed To Prase HTML Template, %s", err)
return e.String(500, "Failed To Parse The HTML Template")
}
return e.HTML(200, tpl.String())
}
|
package sshconfig
import (
"errors"
"github.com/spencercjh/sshctx/internal/env"
"github.com/spencercjh/sshctx/internal/testutil"
"io/ioutil"
"os"
"path/filepath"
"testing"
)
func Test_getSSHCtxDataDir(t *testing.T) {
tests := []struct {
name string
want string
wantErr bool
}{
{name: "default", want: filepath.Join(os.Getenv("HOME"), ".sshctx"), wantErr: false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := getSSHCtxDataDir()
if (err != nil) != tt.wantErr {
t.Errorf("getSSHCtxDataDir() error = %v, wantErr %v", err, tt.wantErr)
return
}
if got != tt.want {
t.Errorf("getSSHCtxDataDir() got = %v, want %v", got, tt.want)
}
})
}
}
func TestGetSSHCtxDataPath(t *testing.T) {
tests := []struct {
name string
want string
wantErr bool
}{
{name: "default", want: filepath.Join(os.Getenv("HOME"), ".sshctx", "config.yaml"), wantErr: false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := GetSSHCtxDataPath()
if (err != nil) != tt.wantErr {
t.Errorf("GetSSHCtxDataPath() error = %v, wantErr %v", err, tt.wantErr)
return
}
if got != tt.want {
t.Errorf("GetSSHCtxDataPath() got = %v, want %v", got, tt.want)
}
})
}
}
func Test_getSSHConfigPath(t *testing.T) {
testutil.SetupSSHConfig(t)
defer testutil.TearDownSSHConfig()
tests := []struct {
name string
want string
wantErr bool
}{
{name: "default", want: filepath.Join(os.Getenv("HOME"), ".ssh", "config"), wantErr: false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := getSSHConfigPath()
if (err != nil) != tt.wantErr {
t.Errorf("getSSHConfigPath() error = %v, wantErr %v", err, tt.wantErr)
return
}
if got != tt.want {
t.Errorf("getSSHConfigPath() got = %v, want %v", got, tt.want)
}
})
}
}
func Test_openFile(t *testing.T) {
_ = ioutil.WriteFile("test.txt", []byte("Hello"), 0600)
test, _ := os.OpenFile("test.txt", os.O_RDONLY, 0755)
defer func(test *os.File) {
_ = test.Close()
}(test)
defer func() {
_ = os.Remove("test.txt")
}()
type args struct {
path string
name string
}
tests := []struct {
name string
args args
want *os.File
wantErr bool
}{
{name: "default", args: args{path: "test.txt", name: "test"}, want: test, wantErr: false},
{name: "not-exist", args: args{path: "not-exist", name: "not-exist"}, want: nil, wantErr: true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := openFile(tt.args.path, tt.args.name)
if (err != nil) != tt.wantErr {
t.Errorf("openFile() error = %v, wantErr %v", err, tt.wantErr)
return
}
if got != tt.want && got.Name() != tt.want.Name() {
t.Errorf("openFile() got = %v, want %v", got, tt.want)
}
})
}
}
var cwd, _ = os.Getwd()
func tearUpSSHCTXData() {
path, _ := GetSSHCtxDataPath()
_ = os.Remove(path)
dir, _ := getSSHCtxDataDir()
_ = os.Remove(dir)
}
func TestStandardLoader_LoadSSHCTXData(t *testing.T) {
t.Setenv(env.Debug, "true")
t.Run("default", func(t *testing.T) {
st := &StandardLoader{}
sshctx, err := st.LoadSSHCTXData()
if err != nil {
t.Errorf("LoadSSHCTXData() error = %v", err)
}
if sshctx == nil {
t.Errorf("sshctx should not be nil")
}
})
t.Run("exited-specific-file", func(t *testing.T) {
path := filepath.Join(cwd, "..", "..", "test", "config_example.yaml")
t.Setenv("SSHCTX", path)
if _, err := os.Stat(path); errors.Is(err, os.ErrNotExist) {
t.Errorf("test file: %s should exist but not", path)
}
st := &StandardLoader{}
sshctx, err := st.LoadSSHCTXData()
if err != nil {
t.Errorf("LoadSSHCTXData() error = %v", err)
}
if sshctx == nil {
t.Errorf("sshctx should not be nil")
}
})
t.Run("unsupported-specific-file", func(t *testing.T) {
path := filepath.Join(cwd, "..", "..", "test", "config_example.yaml")
t.Setenv("SSHCTX", path+":"+path)
if _, err := os.Stat(path); errors.Is(err, os.ErrNotExist) {
t.Errorf("test file: %s should exist but not", path)
}
st := &StandardLoader{}
sshctx, err := st.LoadSSHCTXData()
if err == nil {
t.Errorf("LoadSSHCTXData() error should not be nil")
}
if sshctx != nil {
t.Errorf("sshctx should be nil")
}
})
t.Run("non-exited-specific-file", func(t *testing.T) {
defer tearUpSSHCTXData()
path := filepath.Join(cwd, "..", "..", "test", "non-existed.yaml")
t.Setenv("SSHCTX", path)
if _, err := os.Stat(path); errors.Is(err, os.ErrNotExist) {
if err == nil {
t.Errorf("test file: %s shouldn't exist but not", path)
}
}
st := &StandardLoader{}
sshctx, err := st.LoadSSHCTXData()
if err != nil {
t.Errorf("LoadSSHCTXData() error = %v", err)
}
if sshctx == nil {
t.Errorf("sshctx should not be nil")
}
})
t.Run("non-exited-specific-file-and-default-file", func(t *testing.T) {
tearUpSSHCTXData()
defer tearUpSSHCTXData()
path := filepath.Join(cwd, "..", "..", "test", "non-existed.yaml")
t.Setenv("SSHCTX", path)
if _, err := os.Stat(path); errors.Is(err, os.ErrNotExist) {
if err == nil {
t.Errorf("test file: %s shouldn't exist but not", path)
}
}
dir, _ := getSSHCtxDataDir()
_ = os.Remove(filepath.Join(dir, "config.yaml"))
st := &StandardLoader{}
sshctx, err := st.LoadSSHCTXData()
if err != nil {
t.Errorf("LoadSSHCTXData() error = %v", err)
}
if sshctx == nil {
t.Errorf("sshctx should not be nil")
}
})
t.Run("non-exited-specific-file-and-default-file-but-dir-exist", func(t *testing.T) {
tearUpSSHCTXData()
defer tearUpSSHCTXData()
path := filepath.Join(cwd, "..", "..", "test", "non-existed.yaml")
t.Setenv("SSHCTX", path)
if _, err := os.Stat(path); errors.Is(err, os.ErrNotExist) {
if err == nil {
t.Errorf("test file: %s shouldn't exist but not", path)
}
}
dir, _ := getSSHCtxDataDir()
_ = os.Mkdir(dir, 0777)
st := &StandardLoader{}
sshctx, err := st.LoadSSHCTXData()
if err != nil {
t.Errorf("LoadSSHCTXData() error = %v", err)
}
if sshctx == nil {
t.Errorf("sshctx should not be nil")
}
})
}
func TestStandardLoader_LoadSSHConfig(t *testing.T) {
t.Setenv(env.Debug, "true")
testutil.SetupSSHConfig(t)
defer testutil.TearDownSSHConfig()
defaultSSHConfigPath, _ := getSSHConfigPath()
if _, err := os.Stat(defaultSSHConfigPath); err == nil {
// ~/.ssh/config exist
t.Run("default", func(t *testing.T) {
st := &StandardLoader{}
sshconfig, err := st.LoadSSHConfig()
if err != nil {
t.Errorf("LoadSSHConfig() error = %v", err)
}
if sshconfig == nil {
t.Errorf("sshconfig should not be nil")
}
})
}
t.Run("specific-sshconfig", func(t *testing.T) {
path := filepath.Join(cwd, "..", "..", "test", "ssh-config-example")
t.Setenv("SSHCONFIG", path)
if _, err := os.Stat(path); errors.Is(err, os.ErrNotExist) {
if err != nil {
t.Errorf("test file: %s should exist but not", path)
}
}
st := &StandardLoader{}
sshconfig, err := st.LoadSSHConfig()
if err != nil {
t.Errorf("LoadSSHConfig() error = %v", err)
}
if sshconfig == nil {
t.Errorf("sshconfig should not be nil")
}
})
t.Run("non-supported-specific-sshconfig", func(t *testing.T) {
path := filepath.Join(cwd, "..", "..", "test", "ssh-config-example")
t.Setenv("SSHCONFIG", path+":"+path)
if _, err := os.Stat(path); errors.Is(err, os.ErrNotExist) {
if err != nil {
t.Errorf("test file: %s should exist but not", path)
}
}
st := &StandardLoader{}
sshconfig, err := st.LoadSSHConfig()
if err == nil {
t.Errorf("LoadSSHConfig() error shouldn't be nil")
}
if sshconfig != nil {
t.Errorf("sshconfig should be nil")
}
})
t.Run("non-existed-specific-sshconfig", func(t *testing.T) {
path := filepath.Join(cwd, "..", "..", "test", "not-existed")
t.Setenv("SSHCONFIG", path)
if _, err := os.Stat(path); errors.Is(err, os.ErrNotExist) {
if err == nil {
t.Errorf("test file: %s shouldn't exist but not", path)
}
}
st := &StandardLoader{}
sshconfig, err := st.LoadSSHConfig()
if err == nil {
t.Errorf("LoadSSHConfig() error shouldn't be nil")
}
if sshconfig != nil {
t.Errorf("sshconfig should be nil")
}
})
}
|
package main
// fixme : url https://blog.csdn.net/lazyboy_/article/details/103289750
import "fmt"
type Message struct {
id int
name string
address string
phone int
}
func (msg Message) String() {
fmt.Printf("ID:%d \n- Name:%s \n- Address:%s \n- phone:%d\n", msg.id, msg.name, msg.address, msg.phone)
}
func New(id, phone int, name, addr string) Message {
return Message{
id: id,
name: name,
address: addr,
phone: phone,
}
}
type Option func(msg *Message)
var DEFAULT_MESSAGE = Message{id: -1, name: "-1", address: "-1", phone: -1}
func WithID(id int) Option {
return func(m *Message) {
m.id = id
}
}
func WithName(name string) Option {
return func(m *Message) {
m.name = name
}
}
func WithAddress(addr string) Option {
return func(m *Message) {
m.address = addr
}
}
func WithPhone(phone int) Option {
return func(m *Message) {
m.phone = phone
}
}
func NewByOption(opts ...Option) Message {
msg := DEFAULT_MESSAGE
for _, o := range opts {
o(&msg)
}
return msg
}
func NewByOptionWithoutID(id int, opts ...Option) Message {
msg := DEFAULT_MESSAGE
msg.id = id
for _, o := range opts {
o(&msg)
}
return msg
}
func main() {
message1 := New(1, 123, "message1", "cache1")
message1.String()
message2 := NewByOption(WithID(2), WithName("message2"), WithAddress("cache2"), WithPhone(456))
message2.String()
message3 := NewByOptionWithoutID(3, WithAddress("cache3"), WithPhone(789), WithName("message3"))
message3.String()
}
/*
Output
ID:1
- Name:message1
- Address:cache1
- phone:123
ID:2
- Name:message2
- Address:cache2
- phone:456
ID:3
- Name:message3
- Address:cache3
- phone:789
*/
|
package vault
import (
"bytes"
"fmt"
"path/filepath"
"github.com/operator-framework/operator-sdk/pkg/sdk/action"
"github.com/operator-framework/operator-sdk/pkg/sdk/query"
api "github.com/operator-framework/operator-sdk-samples/vault-operator/pkg/apis/vault/v1alpha1"
"k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
// vaultConfigPath is the path that vault pod uses to read config from
vaultConfigPath = "/run/vault/config/vault.hcl"
// vaultTLSAssetDir is the dir where vault's server TLS and etcd TLS assets sits
vaultTLSAssetDir = "/run/vault/tls/"
// serverTLSCertName is the filename of the vault server cert
serverTLSCertName = "server.crt"
// serverTLSKeyName is the filename of the vault server key
serverTLSKeyName = "server.key"
)
// prepareVaultConfig applies our section into Vault config file.
// - If given user configmap, appends into user provided vault config
// and creates another configmap "${configMapName}-copy" for it.
// - Otherwise, creates a new configmap "${vaultName}-copy" with our section.
func prepareVaultConfig(vr *api.VaultService) error {
var cfgData string
cm := &v1.ConfigMap{
TypeMeta: metav1.TypeMeta{
Kind: "ConfigMap",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: vr.Namespace,
},
}
if len(vr.Spec.ConfigMapName) != 0 {
cm.Name = vr.Spec.ConfigMapName
err := query.Get(cm)
if err != nil {
return fmt.Errorf("prepare vault config error: get configmap (%s) failed: %v", vr.Spec.ConfigMapName, err)
}
cfgData = cm.Data[filepath.Base(vaultConfigPath)]
}
cm.Name = configMapNameForVault(vr)
cm.Labels = labelsForVault(vr.Name)
cfgData = newConfigWithDefaultParams(cfgData)
cfgData = newConfigWithEtcd(cfgData, etcdURLForVault(vr.Name))
cm.Data = map[string]string{filepath.Base(vaultConfigPath): cfgData}
addOwnerRefToObject(cm, asOwner(vr))
err := action.Create(cm)
if err != nil && !apierrors.IsAlreadyExists(err) {
return fmt.Errorf("prepare vault config error: create new configmap (%s) failed: %v", cm.Name, err)
}
return nil
}
const listenerFmt = `
listener "tcp" {
address = "0.0.0.0:8200"
cluster_address = "0.0.0.0:8201"
tls_cert_file = "%s"
tls_key_file = "%s"
}
`
const etcdStorageFmt = `
storage "etcd" {
address = "%s"
etcd_api = "v3"
ha_enabled = "true"
tls_ca_file = "%s"
tls_cert_file = "%s"
tls_key_file = "%s"
sync = "false"
}
`
// newConfigWithEtcd returns the new config data combining
// original config and new etcd storage section.
func newConfigWithEtcd(data, etcdURL string) string {
storageSection := fmt.Sprintf(etcdStorageFmt, etcdURL, filepath.Join(vaultTLSAssetDir, "etcd-client-ca.crt"),
filepath.Join(vaultTLSAssetDir, "etcd-client.crt"), filepath.Join(vaultTLSAssetDir, "etcd-client.key"))
data = fmt.Sprintf("%s%s", data, storageSection)
return data
}
// newConfigWithDefaultParams appends to given config data some default params:
// - telemetry setting
// - tcp listener
func newConfigWithDefaultParams(data string) string {
buf := bytes.NewBufferString(data)
buf.WriteString(`
telemetry {
statsd_address = "localhost:9125"
}
`)
listenerSection := fmt.Sprintf(listenerFmt,
filepath.Join(vaultTLSAssetDir, serverTLSCertName),
filepath.Join(vaultTLSAssetDir, serverTLSKeyName))
buf.WriteString(listenerSection)
return buf.String()
}
// configMapNameForVault is the configmap name for the given vault.
// If ConfigMapName is given is spec, it will make a new name based on that.
// Otherwise, we will create a default configmap using the Vault's name.
func configMapNameForVault(v *api.VaultService) string {
n := v.Spec.ConfigMapName
if len(n) == 0 {
n = v.Name
}
return n + "-copy"
}
|
package main
import (
"fmt"
"net/http"
_ "net/http/pprof"
"strings"
"time"
"github.com/grpc-ecosystem/go-grpc-prometheus"
"github.com/prometheus/client_golang/prometheus"
"gopkg.in/op/go-logging.v1"
"cli"
"tools/cache/cluster"
"tools/cache/server"
)
var log = logging.MustGetLogger("rpc_cache_server")
var opts struct {
Usage string `usage:"rpc_cache_server is a server for Please's remote RPC cache.\n\nSee https://please.build/cache.html for more information."`
Port int `short:"p" long:"port" description:"Port to serve on" default:"7677"`
HTTPPort int `long:"http_port" description:"Port to serve HTTP on (for profiling, metrics etc)"`
MetricsPort int `long:"metrics_port" description:"Port to serve Prometheus metrics on"`
Dir string `short:"d" long:"dir" description:"Directory to write into" default:"plz-rpc-cache"`
Verbosity int `short:"v" long:"verbosity" description:"Verbosity of output (higher number = more output, default 2 -> notice, warnings and errors only)" default:"2"`
LogFile string `long:"log_file" description:"File to log to (in addition to stdout)"`
CleanFlags struct {
LowWaterMark cli.ByteSize `short:"l" long:"low_water_mark" description:"Size of cache to clean down to" default:"18G"`
HighWaterMark cli.ByteSize `short:"i" long:"high_water_mark" description:"Max size of cache to clean at" default:"20G"`
CleanFrequency cli.Duration `short:"f" long:"clean_frequency" description:"Frequency to clean cache at" default:"10m"`
MaxArtifactAge cli.Duration `short:"m" long:"max_artifact_age" description:"Clean any artifact that's not been read in this long" default:"720h"`
} `group:"Options controlling when to clean the cache"`
TLSFlags struct {
KeyFile string `long:"key_file" description:"File containing PEM-encoded private key."`
CertFile string `long:"cert_file" description:"File containing PEM-encoded certificate"`
CACertFile string `long:"ca_cert_file" description:"File containing PEM-encoded CA certificate"`
WritableCerts string `long:"writable_certs" description:"File or directory containing certificates that are allowed to write to the cache"`
ReadonlyCerts string `long:"readonly_certs" description:"File or directory containing certificates that are allowed to read from the cache"`
} `group:"Options controlling TLS communication & authentication"`
ClusterFlags struct {
ClusterPort int `long:"cluster_port" default:"7946" description:"Port to gossip among cluster nodes on"`
ClusterAddresses string `short:"c" long:"cluster_addresses" description:"Comma-separated addresses of one or more nodes to join a cluster"`
SeedCluster bool `long:"seed_cluster" description:"Seeds a new cache cluster."`
ClusterSize int `long:"cluster_size" description:"Number of nodes to expect in the cluster.\nMust be passed if --seed_cluster is, has no effect otherwise."`
NodeName string `long:"node_name" description:"Name of this node in the cluster. Only usually needs to be passed if running multiple nodes on the same machine, when it should be unique."`
} `group:"Options controlling clustering behaviour"`
}
func main() {
cli.ParseFlagsOrDie("Please RPC cache server", "5.5.0", &opts)
cli.InitLogging(opts.Verbosity)
if opts.LogFile != "" {
cli.InitFileLogging(opts.LogFile, opts.Verbosity)
}
if (opts.TLSFlags.KeyFile == "") != (opts.TLSFlags.CertFile == "") {
log.Fatalf("Must pass both --key_file and --cert_file if you pass one")
} else if opts.TLSFlags.KeyFile == "" && (opts.TLSFlags.WritableCerts != "" || opts.TLSFlags.ReadonlyCerts != "") {
log.Fatalf("You can only use --writable_certs / --readonly_certs with https (--key_file and --cert_file)")
}
log.Notice("Scanning existing cache directory %s...", opts.Dir)
cache := server.NewCache(opts.Dir, time.Duration(opts.CleanFlags.CleanFrequency),
time.Duration(opts.CleanFlags.MaxArtifactAge),
uint64(opts.CleanFlags.LowWaterMark), uint64(opts.CleanFlags.HighWaterMark))
var clusta *cluster.Cluster
if opts.ClusterFlags.SeedCluster {
if opts.ClusterFlags.ClusterSize < 2 {
log.Fatalf("You must pass a cluster size of > 1 when initialising the seed node.")
}
clusta = cluster.NewCluster(opts.ClusterFlags.ClusterPort, opts.Port, opts.ClusterFlags.NodeName)
clusta.Init(opts.ClusterFlags.ClusterSize)
} else if opts.ClusterFlags.ClusterAddresses != "" {
clusta = cluster.NewCluster(opts.ClusterFlags.ClusterPort, opts.Port, opts.ClusterFlags.NodeName)
clusta.Join(strings.Split(opts.ClusterFlags.ClusterAddresses, ","))
}
if opts.HTTPPort != 0 {
http.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) {
w.Header().Set("Content-Type", "text/plain")
w.Write([]byte(fmt.Sprintf("Total size: %d bytes\nNum files: %d\n", cache.TotalSize(), cache.NumFiles())))
})
go func() {
port := fmt.Sprintf(":%d", opts.HTTPPort)
if opts.TLSFlags.KeyFile != "" {
log.Fatalf("%s\n", http.ListenAndServeTLS(port, opts.TLSFlags.CertFile, opts.TLSFlags.KeyFile, nil))
} else {
log.Fatalf("%s\n", http.ListenAndServe(port, nil))
}
}()
log.Notice("Serving HTTP stats on port %d", opts.HTTPPort)
}
log.Notice("Starting up RPC cache server on port %d...", opts.Port)
s, lis := server.BuildGrpcServer(opts.Port, cache, clusta, opts.TLSFlags.KeyFile, opts.TLSFlags.CertFile,
opts.TLSFlags.CACertFile, opts.TLSFlags.ReadonlyCerts, opts.TLSFlags.WritableCerts)
if opts.MetricsPort != 0 {
grpc_prometheus.Register(s)
grpc_prometheus.EnableHandlingTimeHistogram()
mux := http.NewServeMux()
mux.Handle("/metrics", prometheus.Handler())
log.Notice("Serving Prometheus metrics on port %d /metrics", opts.MetricsPort)
go http.ListenAndServe(fmt.Sprintf(":%d", opts.MetricsPort), mux)
}
server.ServeGrpcForever(s, lis)
}
|
package gateway
import (
"context"
"log"
"net/http"
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"google.golang.org/grpc"
gw "github.com/Scarlet-Fairy/gateway/pb" // Update
)
type Endpoint struct {
Address string
}
type Options struct {
Address string
Endpoints struct {
Manager Endpoint
LogWatcher Endpoint
}
OpenApiDir string
Mux []runtime.ServeMuxOption
}
func Run(ctx context.Context, options Options) error {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
gwMux := runtime.NewServeMux(options.Mux...)
opts := []grpc.DialOption{grpc.WithInsecure()}
if err := gw.RegisterManagerHandlerFromEndpoint(
ctx,
gwMux,
options.Endpoints.Manager.Address,
opts,
); err != nil {
return err
}
if err := gw.RegisterLogWatcherHandlerFromEndpoint(
ctx,
gwMux,
options.Endpoints.LogWatcher.Address,
opts,
); err != nil {
return err
}
mux := http.NewServeMux()
mux.HandleFunc("/api/swagger/", openAPIServer(options.OpenApiDir))
mux.Handle("/api/", gwMux)
s := http.Server{
Addr: options.Address,
Handler: allowCORS(mux),
}
go func() {
<-ctx.Done()
log.Println("Shutting down the http server")
if err := s.Shutdown(context.Background()); err != nil {
log.Fatalf("Failed to shutdown http server: %v", err)
}
}()
log.Printf("Starting listening at %s\n", options.Address)
if err := s.ListenAndServe(); err != http.ErrServerClosed {
return err
}
return nil
}
|
package test
import (
"crypto/sha256"
"encoding/hex"
"github.com/gruntwork-io/terratest/modules/aws"
"github.com/gruntwork-io/terratest/modules/files"
"github.com/gruntwork-io/terratest/modules/random"
"github.com/gruntwork-io/terratest/modules/terraform"
"os"
"strings"
"testing"
)
func TestS3BucketCreated(t *testing.T) {
t.Parallel()
envName := strings.ToLower(random.UniqueId())
awsRegion := "eu-west-1"
tests := map[string]struct {
terraformVariables map[string]interface{}
expectedBucketName string
}{
"short name": {
terraformVariables: map[string]interface{}{
"aws_region": awsRegion,
"company_name": "acme",
"env_name": envName,
"app_name": "orders",
"bucket_purpose": "pictures",
},
expectedBucketName: "acme-" + envName + "-orders-pictures",
},
"long name": {
terraformVariables: map[string]interface{}{
"aws_region": awsRegion,
"company_name": "acme",
"env_name": envName,
"app_name": "orders",
"bucket_purpose": "pictures12345678901234567890123456789012345678901234567890",
},
expectedBucketName: sha256String("acme-" + envName + "-orders-pictures12345678901234567890123456789012345678901234567890")[:63],
},
}
for name, testCase := range tests {
// capture range variables
name := name
testCase := testCase
t.Run(name, func(t *testing.T) {
t.Parallel()
terraformModuleDir, err := files.CopyTerraformFolderToTemp("../terraform/s3", "terratest-")
if err != nil {
t.Fatalf("Error while creating temp dir %v", err)
}
defer os.RemoveAll(terraformModuleDir)
terraformOptions := terraform.WithDefaultRetryableErrors(t, &terraform.Options{
TerraformDir: terraformModuleDir,
Vars: testCase.terraformVariables,
})
defer terraform.Destroy(t, terraformOptions)
terraform.InitAndApply(t, terraformOptions)
aws.AssertS3BucketExists(t, awsRegion, testCase.expectedBucketName)
})
}
}
func sha256String(str string) string {
sha256Bytes := sha256.Sum256([]byte(str))
return hex.EncodeToString(sha256Bytes[:])
}
|
package main
import (
"fmt"
)
func main() {
arr := []string{"Alice", "Bob", "Cott"}
for i, e := range arr {
fmt.Printf("%v: %v\n", i, e)
}
}
|
package form
import (
"fmt"
"net/url"
"regexp"
"strconv"
"strings"
"time"
"unicode/utf8"
)
// PhoneRX represents phone number maching pattern
var PhoneRX = regexp.MustCompile("(^\\+[0-9]{2}|^\\+[0-9]{2}\\(0\\)|^\\(\\+[0-9]{2}\\)\\(0\\)|^00[0-9]{2}|^0)([0-9]{9}$|[0-9\\-\\s]{10}$)")
// EmailRX represents email address maching pattern
var EmailRX = regexp.MustCompile("^[a-zA-Z0-9.!#$%&'*+\\/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$")
// Input represents form input values and validations
type Input struct {
Values url.Values
VErrors ValidationErrors
CSRF string
Cid uint
}
// MinLength checks if a given minium length is satisfied
func (inVal *Input) MinLength(field string, d int) {
value := inVal.Values.Get(field)
if value == "" {
return
}
if utf8.RuneCountInString(value) < d {
inVal.VErrors.Add(field, fmt.Sprintf("This field is too short (minimum is %d characters)", d))
}
}
func (inVal *Input) Date(field string) {
value := inVal.Values.Get(field)
if value == "" {
return
} else {
p := strings.Split(value, " ")
fmt.Println("mine")
fmt.Println(p[0])
fmt.Println("mine")
fmt.Println("next")
fmt.Println(p[1])
fmt.Println("next")
_, err := time.Parse("01/02/2006", p[0])
if err != nil {
inVal.VErrors.Add(field, fmt.Sprintf("Invalid date"))
}
}
}
// Required checks if list of provided form input fields have values
func (inVal *Input) ValidateRequiredFields(fields ...string) {
for _, f := range fields {
value := inVal.Values.Get(f)
fmt.Println(value)
if value == "" {
fmt.Println("empty")
fmt.Println(f)
inVal.VErrors.Add(f, "This field is required field")
}
}
}
//checks if value is number
func (inVal *Input) ValidateFieldsInteger(fields ...string) {
for _, f := range fields {
value := inVal.Values.Get(f)
_, err := strconv.Atoi(value)
if err != nil {
fmt.Println("alpabet")
inVal.VErrors.Add(f, "This field must be a number")
}
}
}
// checks if negative
func (inVal *Input) ValidateFieldsRange(fields ...string) {
for _, f := range fields {
value := inVal.Values.Get(f)
fmt.Println("not")
val, err := strconv.Atoi(value)
if err == nil && val < 0 {
fmt.Println("negative")
inVal.VErrors.Add(f, "This field must be positive number")
}
}
}
// func (inVal *Input) ValidateFieldFile(fields string) {
// w, fh, er := r.FormFile(fields)
// value := inVal.Values.Get(fields)
// fmt.Println("not")
// val, err := strconv.Atoi(value)
// if err == nil && val < 0 {
// fmt.Println("empty")
// inVal.VErrors.Add(fields, "This field must be positive number")
// }
//}
//////discount range0 to 100
// func (inVal *Input) ValidatediscountRange(field string) {
// value := inVal.Values.Get(field)
// fmt.Println("not")
// val, err := strconv.Atoi(value)
// if err == nil && val > 100 {
// fmt.Println("empty")
// inVal.VErrors.Add(field, "This field must be less than 100")
// }
// }
// MatchesPattern checks if a given input form field matchs a given pattern
func (inVal *Input) MatchesPattern(field string, pattern *regexp.Regexp) {
value := inVal.Values.Get(field)
if value == "" {
return
}
if !pattern.MatchString(value) {
inVal.VErrors.Add(field, "The value entered is invalid")
}
}
// PasswordMatches checks if Password and Confirm Password fields match
func (inVal *Input) PasswordMatches(password string, confPassword string) {
pwd := inVal.Values.Get(password)
confPwd := inVal.Values.Get(confPassword)
fmt.Println("first password not match")
fmt.Println(pwd)
fmt.Println("first password not match")
fmt.Println(confPwd)
fmt.Println("first password not match")
if pwd == "" || confPwd == "" {
fmt.Println("third password not match")
return
}
if pwd != confPwd {
fmt.Println("secondpassword not match")
inVal.VErrors.Add(password, "The Password and Confirm Password values did not match")
inVal.VErrors.Add(confPassword, "The Password and Confirm Password values did not match")
}
}
// Valid checks if any form input validation has failed or not
func (inVal *Input) IsValid() bool {
return len(inVal.VErrors) == 0
}
|
package processors
import (
"errors"
"fmt"
"regexp"
"strings"
"github.com/vmware/kube-fluentd-operator/config-reloader/fluentd"
)
const (
tagRegex = `(?:[^\s{}()]*(?:(?:(?:{.*?})|(?:\(.*?\)))[^\s{}()]*)+)|(?:[^\s{}()]+(?:(?:(?:{.*?})|(?:\(.*?\)))[^\s{}()]*)*)`
)
type expandTagsState struct {
BaseProcessorState
tagMatcher *regexp.Regexp
}
func (p *expandTagsState) Process(input fluentd.Fragment) (fluentd.Fragment, error) {
if p.Context.AllowTagExpansion {
return p.ProcessExpandingTags(input)
}
return p.ProcessNotExpandingTags(input)
}
func (p *expandTagsState) ProcessExpandingTags(input fluentd.Fragment) (fluentd.Fragment, error) {
f := func(d *fluentd.Directive, ctx *ProcessorContext) ([]*fluentd.Directive, error) {
if d.Name != "match" && d.Name != "filter" {
return []*fluentd.Directive{d}, nil
}
if p.tagMatcher == nil {
p.tagMatcher = regexp.MustCompile(tagRegex)
}
expandingTags := p.tagMatcher.FindAllString(d.Tag, -1)
remainders := p.tagMatcher.Split(d.Tag, -1)
if len(strings.TrimSpace(strings.Join(remainders, ""))) > 0 {
return nil, fmt.Errorf("Malformed tag %s. Cannot parse it", d.Tag)
}
var processingTags []string
for len(expandingTags) > len(processingTags) {
processingTags = expandingTags
expandingTags = []string{}
for _, t := range processingTags {
expandedTags, err := expandFirstCurlyBraces(t)
if err != nil {
return nil, err
}
expandingTags = append(expandingTags, expandedTags...)
}
}
if len(expandingTags) == 1 {
return []*fluentd.Directive{d}, nil
}
expandedDirectives := make([]*fluentd.Directive, len(expandingTags))
for i, t := range expandingTags {
expandedDirectives[i] = d.Clone()
expandedDirectives[i].Tag = t
}
return expandedDirectives, nil
}
output, err := applyRecursivelyWithState(input, p.Context, f)
if err != nil {
return nil, err
}
return output, nil
}
func expandFirstCurlyBraces(tag string) ([]string, error) {
resultingTags := []string{}
if open := strings.Index(tag, "{"); open >= 0 {
if open > 0 && tag[open-1:open] == "#" {
return nil, errors.New("Pattern #{...} is not yet supported in tag definition")
}
if close := strings.Index(tag, "}"); close > open+1 {
expansionTerm := tag[open+1 : close]
expansionTerms := strings.Split(expansionTerm, ",")
for _, t := range expansionTerms {
resultingTags = append(resultingTags, tag[:open]+strings.TrimSpace(t)+tag[close+1:])
}
} else {
return nil, errors.New("Invalid {...} pattern in tag definition")
}
} else {
resultingTags = append(resultingTags, tag)
}
return resultingTags, nil
}
func applyRecursivelyWithState(directives fluentd.Fragment, ctx *ProcessorContext, callback func(*fluentd.Directive, *ProcessorContext) ([]*fluentd.Directive, error)) (fluentd.Fragment, error) {
if directives == nil {
return nil, nil
}
for _, d := range directives {
output, err := applyRecursivelyWithState(d.Nested, ctx, callback)
if err != nil {
return nil, err
}
d.Nested = output
}
newDirectives := []*fluentd.Directive{}
for _, d := range directives {
output, err := callback(d, ctx)
if err != nil {
return nil, err
}
newDirectives = append(newDirectives, output...)
}
return newDirectives, nil
}
func (p *expandTagsState) ProcessNotExpandingTags(input fluentd.Fragment) (fluentd.Fragment, error) {
f := func(d *fluentd.Directive, ctx *ProcessorContext) error {
if d.Name != "match" && d.Name != "filter" {
return nil
}
if strings.Contains(d.Tag, "{") {
return fmt.Errorf("Processing of {...} pattern in tags is disabled")
}
return nil
}
err := applyRecursivelyInPlace(input, p.Context, f)
if err != nil {
return nil, err
}
return input, nil
}
|
package game
import (
"time"
"github.com/UnnecessaryRain/ironway-core/pkg/mud/chat"
"github.com/UnnecessaryRain/ironway-core/pkg/network/protocol"
log "github.com/sirupsen/logrus"
)
type clientCommand struct {
client protocol.Sender
command Command
}
// Game defines the master game object and everything in the game
type Game struct {
Chat *chat.Chat
CommandChan chan clientCommand
everyone protocol.Sender
}
// NewGame creates a new game object on the heap
func NewGame(broadcaster protocol.Sender) *Game {
return &Game{
Chat: new(chat.Chat),
CommandChan: make(chan clientCommand, 256),
everyone: broadcaster,
}
}
// QueueCommand pushes the command onto the command channel for processing
func (g *Game) QueueCommand(sender protocol.Sender, cmd Command) {
g.CommandChan <- clientCommand{sender, cmd}
}
// RunForever until stop channel closed
func (g *Game) RunForever(stopChan <-chan struct{}) {
chatTicker := time.NewTicker(100 * time.Millisecond)
for {
select {
case <-chatTicker.C:
g.Chat.Flush(g.everyone)
case cmd := <-g.CommandChan:
cmd.command.Run(g)
case <-stopChan:
log.Infoln("Stopping game")
chatTicker.Stop()
return
}
}
}
|
package kuiperbelt
import (
"bytes"
)
type TestSession struct {
*bytes.Buffer
key string
isClosed bool
isNotifiedClose bool
}
func (s *TestSession) Key() string {
return s.key
}
func (s *TestSession) Close() error {
s.isClosed = true
return nil
}
func (s *TestSession) NotifiedClose(isNotified bool) {
s.isNotifiedClose = isNotified
}
|
package service
import (
"context"
"fmt"
"log"
"strconv"
"sync"
"time"
"github.com/ChowRobin/fantim/constant"
"github.com/ChowRobin/fantim/constant/status"
"github.com/ChowRobin/fantim/manager"
"github.com/ChowRobin/fantim/model/bo"
"github.com/ChowRobin/fantim/model/po"
"github.com/ChowRobin/fantim/model/vo"
"github.com/ChowRobin/fantim/util"
)
func SendMessage(ctx context.Context, msg *vo.MessageBody) (msgId int64, es *status.ErrStatus) {
var sender, receiver int64
sender = msg.Sender
if msg.Receiver != 0 {
receiver = msg.Receiver
// 生成conversationId
msg.ConversationId = util.GenConversationId(msg.ConversationType, sender, receiver)
} else if msg.ConversationId != "" {
receiver = util.GetReceiver(msg.ConversationType, msg.ConversationId, sender)
msg.Receiver = receiver
}
if s := checkMsg(msg); s != status.Success {
es = s
return
}
// 生成消息id
msg.MsgId = util.GenId()
if msg.MsgId == 0 {
es = status.ErrServiceInternal
return
}
msg.MsgIdStr = strconv.FormatInt(msg.MsgId, 10)
msg.CreateTime = time.Now().Unix()
// 并发创建索引,优化可用mq解耦
msgIndex := &bo.MessageIndex{
MsgId: msg.MsgId,
ConversationId: msg.ConversationId,
Content: msg.Content,
}
go func() {
err := msgIndex.Create(ctx)
if err != nil {
log.Printf("[SendMessage] msgIndex.Create failed. err=%v", err)
}
}()
// 写入会话链
convInbox := &bo.Inbox{
Ctx: ctx,
InboxType: constant.InboxTypeConversation,
Key: msg.ConversationId,
}
_, err := convInbox.Append(msg)
if err != nil {
log.Printf("[service.SendMessage] convInbox append failed. err=%v", err)
es = status.ErrServiceInternal
return
}
if msg.ConversationType == constant.ConversationTypeSingle {
// 写入用户链 兼容群聊需要抽出
// 接收方用户链
recvInbox := &bo.Inbox{
Ctx: ctx,
InboxType: constant.InboxTypeUser,
Key: fmt.Sprintf(constant.UserInboxKey, receiver),
}
recvIndex, err := recvInbox.Append(msg)
if err != nil {
log.Printf("[service.SendMessage] recvIndex append failed. err=%v", err)
es = status.ErrServiceInternal
return
}
log.Printf("[SendMessage] receiver %d inbox index=%d", sender, recvIndex)
// 发送方用户链
sendInbox := &bo.Inbox{
Ctx: ctx,
InboxType: constant.InboxTypeUser,
Key: fmt.Sprintf(constant.UserInboxKey, sender),
}
sendIndex, err := sendInbox.Append(msg)
if err != nil {
log.Printf("[service.SendMessage] sendIndex append failed. err=%v", err)
es = status.ErrServiceInternal
return
}
log.Printf("[SendMessage] sender %d inbox index=%d", receiver, sendIndex)
// 长链推通知
wg := &sync.WaitGroup{}
wg.Add(1)
go func() {
defer wg.Done()
err = manager.PushMessage(sender, &vo.PushMessage{
Body: msg,
Index: int32(sendIndex),
})
}()
wg.Add(1)
go func() {
defer wg.Done()
err = manager.PushMessage(receiver, &vo.PushMessage{
Body: msg,
Index: int32(recvIndex),
})
}()
wg.Wait()
} else if msg.ConversationType == constant.ConversationTypeGroup {
err = HandleGroupMessage(ctx, msg)
if err != nil {
log.Printf("[SendMessage] HandleGroupMessage failed. err=%v", err)
es = status.ErrServiceInternal
return
}
}
return msg.MsgId, status.Success
}
func checkMsg(msg *vo.MessageBody) *status.ErrStatus {
checkConvId := util.GenConversationId(msg.ConversationType, msg.Sender, msg.Receiver)
if msg.ConversationType == constant.ConversationTypeSingle {
if checkConvId != msg.ConversationId {
return status.ErrInvalidParam
}
}
return status.Success
}
func HandleGroupMessage(ctx context.Context, msg *vo.MessageBody) error {
// 查询群用户
members, err := po.ListMembersByGroupId(ctx, msg.Receiver)
if err != nil {
return err
}
// 写入全部用户链 and 推送通知
wg := &sync.WaitGroup{}
for _, member := range members {
wg.Add(1)
go func(m *po.GroupMember) {
defer wg.Done()
inbox := &bo.Inbox{
Ctx: ctx,
InboxType: constant.InboxTypeUser,
Key: fmt.Sprintf(constant.UserInboxKey, m.UserId),
}
index, err := inbox.Append(msg)
if err != nil {
log.Printf("[HandleGroupMessage] inbox.Append failed. err=%v userId=%d, msg=%+v", err, m.UserId, msg)
}
err = manager.PushMessage(m.UserId, &vo.PushMessage{
Body: msg,
Index: int32(index),
})
if err != nil {
log.Printf("[HandleGroupMessage] manager.PushMessage failed. err=%v userId=%d, msg=%+v", err, m.UserId, msg)
}
}(&member.GroupMember)
}
wg.Wait()
return nil
}
|
package main
import (
"log"
"net"
"xip/xip"
)
func main() {
conn, err := net.ListenUDP("udp", &net.UDPAddr{Port: 53})
if err != nil {
log.Fatal(err.Error())
}
for {
query := make([]byte, 512)
_, addr, err := conn.ReadFromUDP(query)
if err != nil {
log.Println(err.Error())
continue
}
go func() {
response, logMessage, err := xip.QueryResponse(query)
if err != nil {
log.Println(err.Error())
return
}
_, err = conn.WriteToUDP(response, addr)
log.Printf("%v.%d %s", addr.IP, addr.Port, logMessage)
}()
}
}
|
package solutions
import (
"fmt"
"testing"
)
func TestLongestCommonPrefix(t *testing.T) {
t.Run("Test longestCommonPrefix", func(t *testing.T) {
var tests = []struct {
input []string
want string
}{
{[]string{"flower", "flow", "flight"}, "fl"},
{[]string{"dog", "racecar", "car"}, ""},
{[]string{"a"}, "a"},
{[]string{"ab", "a"}, "a"},
{[]string{"a", "a"}, "a"},
}
for _, v := range tests {
t.Run(fmt.Sprintf("input=%v, want=%v", v.input, v.want), func(t *testing.T) {
result := longestCommonPrefix(v.input)
if result != v.want {
t.Errorf("got %v, want %v", result, v.want)
}
})
}
})
}
|
var Configobj map[string]map[string]interface{}
func init(){
Configobj = make(map[string]map[string]interface{})
f, err1 := os.OpenFile(path + "/config.json", os.O_RDONLY, 0666)
if err1 != nil {
utils.Log(err1)
}
err := json.NewDecoder(f).Decode(&Configobj)
if err != nil {
fmt.Println(err)
}
}
//获取对象值
func GetConfig(db,key string) interface{} {
if obj,ok := Configobj[db];ok{
if k,o := obj[key];o{
return k
}else {
panic("不存在")
}
}else{
panic("不存在")
}
return ""
}
|
package main
type widget struct {
name string
data uint64
}
func main() {
ms := dummyStoreMap{
w: map[string]uint64{},
}
_ = ms
}
|
package notice_client
import (
"gocherry-api-gateway/admin/services"
"gopkg.in/gomail.v2"
"strconv"
)
/**
发送邮件告警
notice_client.EmailSend([]string{"9932851@qq.com"}, "333", "333")
*/
func EmailSend(mailTo []string, subject string, body string) bool {
config := services.GetAppConfig()
mailConn := map[string]string{
"user": config.Common.EmailUser,
"pass": config.Common.EmailPass,
"host": config.Common.EmailHost,
"port": config.Common.EtcdPort,
}
port, _ := strconv.Atoi(mailConn["port"]) //转换端口类型为int
m := gomail.NewMessage()
m.SetHeader("From", m.FormatAddress(mailConn["user"], "XX官方")) //这种方式可以添加别名,即“XX官方”
m.SetHeader("To", mailTo...) //发送给多个用户
m.SetHeader("Subject", subject) //设置邮件主题
m.SetBody("text/html", body) //设置邮件正文
d := gomail.NewDialer(mailConn["host"], port, mailConn["user"], mailConn["pass"])
err := d.DialAndSend(m)
if err != nil {
return false
} else {
return true
}
}
|
package libreofficekit
/*
#cgo CFLAGS: -I ./ -D LOK_USE_UNSTABLE_API
#cgo LDFLAGS: -ldl
#include <lokbridge.h>
*/
import "C"
import (
"fmt"
"sync"
"unsafe"
)
type Office struct {
handle *C.struct__LibreOfficeKit
Mutex *sync.Mutex
}
// NewOffice returns new Office or error if LibreOfficeKit fails to load
// required libs (actually, when libreofficekit-dev package isn't installed or path is invalid)
func NewOffice(path string) (*Office, error) {
office := new(Office)
cPath := C.CString(path)
defer C.free(unsafe.Pointer(cPath))
lokit := C.lok_init(cPath)
if lokit == nil {
return nil, fmt.Errorf("failed to initialize LibreOfficeKit with path: '%s'", path)
}
office.handle = lokit
office.Mutex = &sync.Mutex{}
return office, nil
}
// Close destroys C LibreOfficeKit instance
func (office *Office) Close() {
C.destroy_office(office.handle)
}
// GetError returns last happened error message in human-readable format
func (office *Office) GetError() string {
message := C.get_error(office.handle)
return C.GoString(message)
}
// LoadDocument return Document or error, if LibreOffice fails to open document at provided path.
// Actual error message can be retrieved by office.GetError method
func (office *Office) LoadDocument(path string) (*Document, error) {
document := new(Document)
cPath := C.CString(path)
defer C.free(unsafe.Pointer(cPath))
handle := C.document_load(office.handle, cPath)
if handle == nil {
return nil, fmt.Errorf("failed to load document")
}
document.handle = handle
return document, nil
}
type Document struct {
handle *C.struct__LibreOfficeKitDocument
}
// Close destroys document
func (document *Document) Close() {
C.destroy_document(document.handle)
}
// SaveAs saves document at desired path in desired format with applied filter rules
// Actual (from libreoffice) error message can be read with Office.GetError
func (document *Document) SaveAs(path string, format string, filter string) error {
cPath := C.CString(path)
defer C.free(unsafe.Pointer(cPath))
cFormat := C.CString(format)
defer C.free(unsafe.Pointer(cFormat))
cFilter := C.CString(filter)
defer C.free(unsafe.Pointer(cFilter))
status := C.document_save(document.handle, cPath, cFormat, cFilter)
if status != 1 {
return fmt.Errorf("failed to save document")
}
return nil
}
// PostUnoCommand
// Actual (from libreoffice) error message can be read with Office.GetError
func (document *Document) PostUnoCommand(command string, filter string) {
cCommand := C.CString(command)
defer C.free(unsafe.Pointer(cCommand))
cFilter := C.CString(filter)
defer C.free(unsafe.Pointer(cFilter))
C.post_uno_command(document.handle, cCommand, cFilter, false)
}
// GetCommandValues
// Actual (from libreoffice) error message can be read with Office.GetError
func (document *Document) GetCommandValues(command string) string {
cCommand := C.CString(command)
defer C.free(unsafe.Pointer(cCommand))
return C.GoString(C.get_command_values(document.handle, cCommand))
}
|
package main
import (
"bytes"
"compress/gzip"
"encoding/base64"
"fmt"
"github.com/liuzl/phonenumbers"
"io/ioutil"
)
func main() {
fmt.Println("vim-go")
fmt.Println(phonenumbers.CarriersPb)
data, err := base64.StdEncoding.DecodeString(phonenumbers.CarriersPb)
fmt.Println(data, err)
reader, err := gzip.NewReader(bytes.NewReader(data))
fmt.Println(reader, err)
pbBytes, err := ioutil.ReadAll(reader)
fmt.Println(pbBytes, err)
}
|
package utils
import (
"os"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
)
// AWSDebugEnv is the key used for the env variable that toggles debug logs
const AWSDebugEnv = "AWS_DEBUG"
// Logger defines a logger that can be configured for use with the AWS SDK
type Logger interface {
Debug(...interface{})
Error(...interface{})
}
// AWSConfig generates an AWS session Configuration. Only the first provided
// logger is used.
func AWSConfig(dev bool, logger ...Logger) (cfg *aws.Config) {
if dev {
cfg = &aws.Config{
// dynamodb-local
Endpoint: aws.String("http://localhost:8000"),
// static credentials
Credentials: credentials.NewStaticCredentials("robert", "wow", "launchpad"),
// arbitrary region
Region: aws.String("us-west-2"),
}
} else {
// todo: more granular production aws setup
cfg = aws.NewConfig()
}
if os.Getenv(AWSDebugEnv) == "true" {
// log everything
cfg.LogLevel = aws.LogLevel(aws.LogDebug)
} else {
// by default, log error events
cfg.LogLevel = aws.LogLevel(aws.LogDebugWithRequestErrors)
}
// assign logger
if len(logger) > 0 && logger[0] != nil {
var l = logger[0]
cfg.Logger = aws.LoggerFunc(func(args ...interface{}) {
if str, ok := args[0].(string); ok && strings.Contains(str, "ERROR") {
l.Error(args...)
} else {
l.Debug(args...)
}
})
cfg.Logger.Log("aws logger initialized")
}
return cfg
}
// AWSSession initializes an AWS API session with given configs
func AWSSession(cfg ...*aws.Config) (*session.Session, error) {
return session.NewSession(cfg...)
}
|
package dependency
import (
"fmt"
"strings"
"sync"
"github.com/andygrunwald/perseus/dependency/repository"
"github.com/andygrunwald/perseus/types/set"
)
// ComposerResolver is an implementation of Resolver for Composer (PHP)
type ComposerResolver struct {
// repository is the Client to talk to a specific endpoint (e.g. Packagist)
repository repository.Client
// workerCount is the number of worker that will be started
workerCount int
waitGroup sync.WaitGroup
// queue is the channel where all jobs are stored that needs to be processed by the worker
queue chan *Package
// results is the channel where all resolved dependencies will be streamed
results chan *Result
// resolved is a storage to track which packages are already resolved
resolved *set.Set
// queued is a storage to track which packages were already queued
queued *set.Set
// replacee is a hashmap to replace old/renamed/obsolete packages that would throw an error otherwise
replacee map[string]string
}
// GetResultStream will return the channel for results.
// During the process of resolving dependencies, this channel will be filled
// with the results. Those can be processed next to the resolve process.
func (d *ComposerResolver) GetResultStream() <-chan *Result {
return d.results
}
// Resolve will start of the dependency resolver process.
func (d *ComposerResolver) Resolve(packageList []*Package) {
d.startWorker()
// Queue packages
for _, p := range packageList {
d.queuePackage(p)
}
// Wait until all packages are resolved and close everything
d.waitGroup.Wait()
close(d.queue)
close(d.results)
}
// QueuePackage adds package p to the queue
func (d *ComposerResolver) queuePackage(p *Package) {
d.waitGroup.Add(1)
d.markAsQueued(p.Name)
d.queue <- p
}
// startWorker will boot up the worker routines
func (d *ComposerResolver) startWorker() {
for w := 1; w <= d.workerCount; w++ {
go d.worker(w, d.queue, d.results)
}
}
// worker is a single worker routine. This worker will be launched multiple times to work on
// the queue as efficient as possible.
// id is a unique number assigned per worker (only for logging/debugging purpose).
// jobs is the jobs channel. The worker needs to be able to add more jobs to the queue as well.
// results is the channel where all results will be stored once they are resolved.
func (d *ComposerResolver) worker(id int, queue chan<- *Package, results chan<- *Result) {
// Worker has started. Lets do the hard work. Gimme the jobs.
for j := range d.queue {
packageName := j.Name
// We don't need to process system packages.
// System packages (like php or ext-curl) needs to be fulfilled by the system.
// Not by the ApiClient
if d.isSystemPackage(packageName) {
d.waitGroup.Done()
continue
}
// We overwrite specific packages, because they are added as dependencies to some older tags.
// And those was renamed (for some reasons). But we are scanning all tags / branches.
if r, ok := d.replacee[packageName]; ok {
packageName = r
}
// Get information about the package from ApiClient
p, resp, err := d.repository.GetPackageByName(packageName)
if err != nil {
// API Call error here. Request to Packagist failed
r := &Result{
Package: j,
Response: resp,
Error: fmt.Errorf("API returned status code %d: %s", resp.StatusCode, err),
}
results <- r
d.waitGroup.Done()
continue
}
// Check if we got information from Packagist.
// Maybe no error was thrown, but no package comes with the payload.
// For us, as a dependency resolver, this is equal an error.
if p == nil {
// API Call error here. No package received from Packagist
r := &Result{
Package: j,
Response: resp,
Error: fmt.Errorf("API Call to Packagist successful (Status code %d), but no package received", resp.StatusCode),
}
results <- r
d.waitGroup.Done()
continue
}
// Now we got the package.
// Let us determine all requirements / dependencies from all versions,
// because those packages needs to be resolved as well
for _, version := range p.Versions {
// If we don` have required packaged, we can handle the next one
if len(version.Require) == 0 {
continue
}
// Handle dependency per dependency
for dependency := range version.Require {
// We check if this dependency was already queued.
// It is typical that many different versions of one package don't
// change dependencies so often. So we would queue one package
// multiple times. With this small check we save a lot of work here.
if d.shouldPackageBeQueued(dependency) {
d.markAsQueued(dependency)
packageToResolve, _ := NewPackage(dependency, "")
// We add two additional waitgroup entries here.
// You might ask why? Regularly we add a new entry when we have a new package.
// Here we add two, because of a) the new package and b) the new queue
// entry of the package. We queue the package in a new go routine to
// avoid a blocking state here. But we need to know when this go routine
// is finished. So we observice this "Add package to queue" go routine
// with the same waitgroup.
d.waitGroup.Add(2)
go func() {
queue <- packageToResolve
d.waitGroup.Done()
}()
}
}
}
// Package was resolved. Lets do everything which is necessary to change this package to a result.
resolvedPackage, err := NewPackage(p.Name, p.Repository)
r := &Result{
Package: resolvedPackage,
Response: resp,
Error: err,
}
results <- r
d.waitGroup.Done()
d.markAsResolved(p.Name)
}
}
// markAsResolved will mark package p as resolved.
func (d *ComposerResolver) markAsResolved(p string) {
d.resolved.Add(p)
}
// markAsQueued will mark package p as queued.
func (d *ComposerResolver) markAsQueued(p string) {
d.queued.Add(p)
}
// shouldPackageBeQueued will return true if package p should be queued.
// False otherwise.
// A package should be queued if
// - it is not a system package
// - was not already queued
// - was not already resolved
func (d *ComposerResolver) shouldPackageBeQueued(p string) bool {
if d.isSystemPackage(p) {
return false
}
if d.isPackageAlreadyQueued(p) {
return false
}
if d.isPackageAlreadyResolved(p) {
return false
}
return true
}
// isPackageAlreadyResolved returns true if package p was already resolved.
// False otherwise.
func (d *ComposerResolver) isPackageAlreadyResolved(p string) bool {
return d.resolved.Exists(p)
}
// isPackageAlreadyQueued returns true if package p was already queued.
// False otherwise.
func (d *ComposerResolver) isPackageAlreadyQueued(p string) bool {
return d.queued.Exists(p)
}
// isSystemPackage returns true if p is a system package. False otherwise.
//
// A system package is a package that is not part of your package repository
// and and it needs to be fulfilled by the system.
// Examples: php, ext-curl
func (d *ComposerResolver) isSystemPackage(p string) bool {
// If the package name don't contain a "/" we will skip it here.
// In a composer.json in the require / require-dev part you normally add packages
// you depend on. A package name follows the format "vendor/package".
// E.g. symfony/console
// You can put other dependencies in here as well like `php` or `ext-zip`.
// Those dependencies will be skipped (because they don`t have a vendor ;)).
// The reason is simple: If you try to request the package "php" at packagist
// you won`t get a JSON response with information we expect.
// You will get valid HTML of the packagist search.
// To avoid those errors and to save API calls we skip dependencies without a vendor.
//
// This follows the documentation as well:
//
// The package name consists of a vendor name and the project's name.
// Often these will be identical - the vendor name just exists to prevent naming clashes.
// Source: https://getcomposer.org/doc/01-basic-usage.md
return !strings.Contains(p, "/")
}
|
package network
import (
"log"
msg "../messageTypes"
"../network/peers"
)
type Node struct {
id string
messageIDCounter int
networkChannels msg.NetworkChannels
peerUpdateChannelRx chan peers.PeerUpdate
peerTxEnable chan bool
newRequestChannelTx, newRequestChannelRx chan msg.NetworkOrder
newReplyToRequestChannelTx, newReplyToRequestChannelRx chan msg.NetworkOrder
delegateOrderChannelTx, delegateOrderChannelRx chan msg.NetworkOrder
delegateOrderConfirmChannelTx, delegateOrderConfirmChannelRx chan msg.NetworkOrder
orderCompleteChannelTx, orderCompleteChannelRx chan msg.NetworkOrder
orderSyncChannelTx, orderSyncChannelRx chan msg.NetworkHallOrder
receivedMessages map[string][]int
loggerOutgoing, loggerIncoming *log.Logger
}
|
package lengthsafe
import (
"path/filepath"
"strings"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("splitPathOnSymlinkLimit()", func() {
BeforeEach(func() {
ensureSymlinkMaxSet()
})
Context("when path is shorter than symlinkMax", func() {
It("should return dir and remainder", func() {
for _, path := range []string{
pathStr(5),
pathStr(symlinkMax),
} {
dir, remainder := splitPathOnSymlinkLimit(path)
Ω(dir).Should(Equal(path))
Ω(remainder).Should(Equal(""))
}
})
})
Context("when path[:symlinkMax] splits the final file name", func() {
It("should return dir and remainder", func() {
path := pathStr(symlinkMax) + "asdf"
dir, remainder := splitPathOnSymlinkLimit(path)
Ω(dir).Should(Equal(filepath.Dir(path)))
Ω(remainder).Should(Equal(filepath.Base(path)))
})
})
Context("when path[:symlinkMax] splits a dir name", func() {
It("should return dir and remainder", func() {
path := pathStr(symlinkMax) + "asdf/asdf"
dir, remainder := splitPathOnSymlinkLimit(path)
Ω(dir).Should(Equal(filepath.Dir(filepath.Dir(path))))
Ω(remainder).Should(Equal(filepath.Base(filepath.Dir(path)) + "/" + filepath.Base(path)))
})
})
Context("when path[:symlinkMax] ends on '/'", func() {
It("should return dir and remainder", func() {
path := pathStr(symlinkMax-1) + "/asdf"
dir, remainder := splitPathOnSymlinkLimit(path)
Ω(dir).Should(Equal(filepath.Dir(path)))
Ω(remainder).Should(Equal(filepath.Base(path)))
})
})
})
// pathStr returns a string of length which looks like a filesystem path.
// This path will never end with a '/'.
func pathStr(length uint) string {
const sampleStr = "/0123456789"
if length == 0 {
return ""
}
// The length calculation is: ceil(length / |sampleStr|)
s := strings.Repeat(sampleStr, (int(length-1)/len(sampleStr))+1)[:length]
if s[len(s)-1] == '/' {
s = s[:len(s)-1] + "a"
}
return s
}
|
package scoring
import (
"math/rand"
"net/http"
"net/url"
"strings"
"testing"
"github.com/luuphu25/data-sidecar/storage"
"github.com/luuphu25/data-sidecar/util"
)
func withJitter(n float64) float64 {
return n + 50*rand.Float64() - 5
}
func TestScore(t *testing.T) {
x := storage.NewStore()
for i := 0; i < 10000; i++ {
if rand.Float64() > 0.5 {
x.Add(map[string]string{"top": "1"}, rand.ExpFloat64(), int64(i))
} else {
x.Add(map[string]string{"bottom": "1"}, rand.NormFloat64(), int64(i))
}
}
x.Add(map[string]string{"top": "g"}, 1, 0)
x.Add(map[string]string{"_ft": "hello"}, 1, 0)
x.Add(map[string]string{"g": "small"}, 1, 0)
t.Run("score", func(t *testing.T) {
rec := util.NewRecorder()
sc := NewScorer(x, rec)
tmp := map[string]string{"g": "small"}
sc.Add(tmp, 1, 1)
sc.Add(tmp, 1, 2)
sc.Score(tmp)
sc.Add(tmp, 1, 3)
sc.Add(tmp, 1, 4)
sc.Add(tmp, 1, 5)
sc.Score(tmp)
close(rec.Chan)
somethingCameBack := false
for g := range rec.Chan {
t.Log(g)
somethingCameBack = true
}
if somethingCameBack {
t.Error()
}
})
t.Run("scoreitem", func(t *testing.T) {
rec := util.NewRecorder()
store := storage.NewStore()
store.Add(map[string]string{"a": "b"}, 1., 1)
store.Add(map[string]string{"a": "b"}, 2., 2)
store.Add(map[string]string{"a": "b"}, 3., 3)
ScoreItem(map[string]string{"a": "b"}, rec, store)
close(rec.Chan)
somethingCameBack := false
for _ = range rec.Chan {
somethingCameBack = true
}
if somethingCameBack {
t.Error()
}
rec = util.NewRecorder()
store = storage.NewStore()
store.Add(map[string]string{"a": "b"}, 1., 1)
store.Add(map[string]string{"a": "b"}, 2., 2)
store.Add(map[string]string{"a": "b"}, 3., 3)
store.Add(map[string]string{"a": "b"}, 5., 5)
store.Add(map[string]string{"a": "b"}, 6., 6)
store.Add(map[string]string{"a": "b"}, 7., 7)
store.Add(map[string]string{"a": "b"}, 8., 8)
ScoreItem(map[string]string{"a": "b"}, rec, store)
close(rec.Chan)
somethingCameBack = false
for _ = range rec.Chan {
somethingCameBack = true
}
if somethingCameBack {
t.Error()
}
})
}
func TestHandlers(t *testing.T) {
x := storage.NewStore()
rec := util.NewRecorder()
sc := NewScorer(x, rec)
t.Run("score", func(t *testing.T) {
rw := util.NewHTTPResponseWriter()
r := &http.Request{}
sc.ScoreHandleFunc(rw, r)
if g := rw.String(); !strings.Contains(g, "Please query with") {
t.Error(g)
}
rw = util.NewHTTPResponseWriter()
r = &http.Request{Form: url.Values{"data": []string{"[1,2,3,4,5,1,2,3,4,5,1,2,3,4,5,1,2,3,4,5,1,2,3,4,5,1,2,3,4,5,1,2,3,4,5,1,2,3,4,5,1,2,3,4,5]"}}}
sc.ScoreHandleFunc(rw, r)
if g := rw.String(); !strings.Contains(g, "Data") {
t.Error(g)
}
rw = util.NewHTTPResponseWriter()
r = &http.Request{Form: url.Values{"data": []string{"[[1,2,3,4,5]]"}}}
sc.ScoreHandleFunc(rw, r)
if g := rw.String(); !strings.Contains(g, "[[") {
t.Error(g)
}
rw = util.NewHTTPResponseWriter()
r = &http.Request{Form: url.Values{"data": []string{"[1,2,3,4,5]"}, "info": []string{`{"__name__":"hello}`}}}
sc.ScoreHandleFunc(rw, r)
if g := rw.String(); !strings.Contains(g, "invalid info") {
t.Error(g)
}
rw = util.NewHTTPResponseWriter()
r = &http.Request{Form: url.Values{"data": []string{"[1,2,3,4,5,1,2,3,4,5,1,2,3,4,5,1,2,3,4,5,1,2,3,4,5,1,2,3,4,5,1,2,3,4,5,1,2,3,4,5,1,2,3,4,5]"}, "info": []string{`{"__name__":"hello"}`}}}
sc.ScoreHandleFunc(rw, r)
if g := rw.String(); !strings.Contains(g, "Data") || !strings.Contains(g, "hello") {
t.Error(g)
}
})
}
|
package env
import (
"fmt"
"os"
"strconv"
"strings"
)
// GetInt32Slice extracts slice of int32 value with the format "1,2,3" from env. if not set, returns default value.
func GetInt32Slice(key string, def []int32) []int32 {
s, ok := os.LookupEnv(key)
if !ok {
return def
}
if s == "" {
return []int32{}
}
ss := strings.Split(s, ",")
res := make([]int32, len(ss))
for i := range ss {
v, err := strconv.ParseInt(ss[i], decimalBase, bitSize32)
if err != nil {
return def
}
res[i] = int32(v)
}
return res
}
// MustGetInt32Slice extracts slice of int32 value with the format "1,2,3" from env. if not set, it panics.
func MustGetInt32Slice(key string) []int32 {
s, ok := os.LookupEnv(key)
if !ok {
panic(fmt.Sprintf("environment variable '%s' not set", key))
}
if s == "" {
return []int32{}
}
ss := strings.Split(s, ",")
res := make([]int32, len(ss))
for i := range ss {
v, err := strconv.ParseInt(ss[i], decimalBase, bitSize32)
if err != nil {
panic(fmt.Sprintf("invalid environment variable '%s' has been set: %s", key, s))
}
res[i] = int32(v)
}
return res
}
|
// Rule parser using PyPy. To build this you need PyPy installed, but the stock one
// that comes with Ubuntu will not work since it doesn't include shared libraries.
// For now we suggest fetching the upstream packages from pypy.org. Other distros
// might work fine though.
// On OSX installing through Homebrew should be fine.
//
// The interface to PyPy is done through cgo and cffi. This means that we need to write very little
// actual C code; nearly all of it is in interpreter.h and is just declarations. What remains in
// interpreter.c is essentially just glue to handle limitations of cgo and the way we're using
// callbacks etc.
// When changing callbacks or adding new ones, you will need to alter interpreter.c as well.
// Bad Things will obviously happen if the types declared there don't agree with the real ones.
package parse
import (
"crypto/sha1"
"fmt"
"io/ioutil"
"os"
"path"
"runtime"
"sort"
"strings"
"sync"
"time"
"unsafe"
"github.com/kardianos/osext"
"gopkg.in/op/go-logging.v1"
"core"
"update"
)
/*
#cgo CFLAGS: --std=c99 -Werror
#cgo !freebsd LDFLAGS: -ldl
#include "interpreter.h"
*/
import "C"
var log = logging.MustGetLogger("parse")
const subincludePackage = "_remote"
// Communicated back from PyPy to indicate that a parse has been deferred because
// we need to wait for another target to build.
const pyDeferParse = "_DEFER_"
// Communicated back from C to indicate various error states.
const dlopenError = 1
const cffiUnavailable = 3
// To ensure we only initialise once.
var initializeOnce sync.Once
// pythonParser is our implementation of core.Parser. It has no actual state because our parser is a global.
type pythonParser struct{}
// RunPreBuildFunction runs a pre-build function for a target.
func (p *pythonParser) RunPreBuildFunction(threadID int, state *core.BuildState, target *core.BuildTarget) error {
return RunPreBuildFunction(threadID, state, target)
}
// RunPostBuildFunction runs a post-build function for a target.
func (p *pythonParser) RunPostBuildFunction(threadID int, state *core.BuildState, target *core.BuildTarget, output string) error {
return RunPostBuildFunction(threadID, state, target, output)
}
// UndeferAnyParses undefers any pending parses that are waiting for this target to build.
func (p *pythonParser) UndeferAnyParses(state *core.BuildState, target *core.BuildTarget) {
UndeferAnyParses(state, target)
}
// Code to initialise the Python interpreter.
func initializeInterpreter(state *core.BuildState) {
log.Debug("Initialising interpreter...")
config := state.Config
// PyPy becomes very unhappy if Go schedules it to a different OS thread during
// its initialisation. Force it to stay on this one thread for now.
runtime.LockOSThread()
defer runtime.UnlockOSThread()
// Set the hash seed for Python dicts / sets; there isn't a DoS security concern in our context,
// and it's much more useful to us that they are consistent between runs since it's not that hard
// to accidentally write rules that are nondeterministic via {}.items() etc.
os.Setenv("PYTHONHASHSEED", "42")
// If an engine has been explicitly set, by flag or config, we honour it here.
if config.Parse.Engine != "" {
if !initialiseInterpreter(config.Parse.Engine, false) {
log.Fatalf("Failed to initialise requested parser engine [%s]", config.Parse.Engine)
}
} else {
// Okay, now try the standard fallbacks.
// The python3 interpreter isn't ready yet, so don't try that.
// Try the python2 interpreter before attempting to download a portable PyPy.
if !initialiseInterpreter("pypy", false) && !initialiseInterpreter("python2", false) && !initialiseInterpreter("pypy", true) {
log.Fatalf("Can't initialise any Please parser engine. Please is putting itself out of its misery.\n")
}
}
setConfigValue("PLZ_VERSION", config.Please.Version.String())
setConfigValue("BUILD_SANDBOX", pythonBool(config.Build.Sandbox))
setConfigValue("TEST_SANDBOX", pythonBool(config.Test.Sandbox))
setConfigValue("GO_TOOL", config.Go.GoTool)
setConfigValue("GO_VERSION", config.Go.GoVersion)
setConfigValue("GO_TEST_TOOL", config.Go.TestTool)
setConfigValue("GOPATH", config.Go.GoPath)
setConfigValue("CGO_CC_TOOL", config.Go.CgoCCTool)
setConfigValue("PIP_TOOL", config.Python.PipTool)
setConfigValue("PIP_FLAGS", config.Python.PipFlags)
setConfigValue("PEX_TOOL", config.Python.PexTool)
setConfigValue("DEFAULT_PYTHON_INTERPRETER", config.Python.DefaultInterpreter)
setConfigValue("PYTHON_MODULE_DIR", config.Python.ModuleDir)
setConfigValue("PYTHON_DEFAULT_PIP_REPO", config.Python.DefaultPipRepo.String())
setConfigValue("PYTHON_WHEEL_REPO", config.Python.WheelRepo.String())
setConfigValue("USE_PYPI", pythonBool(config.Python.UsePyPI))
setConfigValue("JAVAC_TOOL", config.Java.JavacTool)
setConfigValue("JAVAC_WORKER", config.Java.JavacWorker)
setConfigValue("JARCAT_TOOL", config.Java.JarCatTool)
setConfigValue("JUNIT_RUNNER", config.Java.JUnitRunner)
setConfigValue("DEFAULT_TEST_PACKAGE", config.Java.DefaultTestPackage)
setConfigValue("PLEASE_MAVEN_TOOL", config.Java.PleaseMavenTool)
setConfigValue("JAVA_SOURCE_LEVEL", config.Java.SourceLevel)
setConfigValue("JAVA_TARGET_LEVEL", config.Java.TargetLevel)
setConfigValue("JAVAC_FLAGS", config.Java.JavacFlags)
setConfigValue("JAVAC_TEST_FLAGS", config.Java.JavacTestFlags)
for _, repo := range config.Java.DefaultMavenRepo {
setConfigValue("DEFAULT_MAVEN_REPO", repo.String())
}
setConfigValue("CC_TOOL", config.Cpp.CCTool)
setConfigValue("CPP_TOOL", config.Cpp.CppTool)
setConfigValue("LD_TOOL", config.Cpp.LdTool)
setConfigValue("AR_TOOL", config.Cpp.ArTool)
setConfigValue("ASM_TOOL", config.Cpp.AsmTool)
setConfigValue("LINK_WITH_LD_TOOL", pythonBool(config.Cpp.LinkWithLdTool))
setConfigValue("DEFAULT_OPT_CFLAGS", config.Cpp.DefaultOptCflags)
setConfigValue("DEFAULT_DBG_CFLAGS", config.Cpp.DefaultDbgCflags)
setConfigValue("DEFAULT_OPT_CPPFLAGS", config.Cpp.DefaultOptCppflags)
setConfigValue("DEFAULT_DBG_CPPFLAGS", config.Cpp.DefaultDbgCppflags)
setConfigValue("DEFAULT_LDFLAGS", config.Cpp.DefaultLdflags)
setConfigValue("DEFAULT_NAMESPACE", config.Cpp.DefaultNamespace)
setConfigValue("CPP_COVERAGE", pythonBool(config.Cpp.Coverage))
setConfigValue("OS", runtime.GOOS)
setConfigValue("ARCH", runtime.GOARCH)
for _, language := range config.Proto.Language {
setConfigValue("PROTO_LANGUAGES", language)
}
setConfigValue("PROTOC_TOOL", config.Proto.ProtocTool)
setConfigValue("PROTOC_GO_PLUGIN", config.Proto.ProtocGoPlugin)
setConfigValue("GRPC_PYTHON_PLUGIN", config.Proto.GrpcPythonPlugin)
setConfigValue("GRPC_JAVA_PLUGIN", config.Proto.GrpcJavaPlugin)
setConfigValue("GRPC_CC_PLUGIN", config.Proto.GrpcCCPlugin)
setConfigValue("PROTO_PYTHON_DEP", config.Proto.PythonDep)
setConfigValue("PROTO_JAVA_DEP", config.Proto.JavaDep)
setConfigValue("PROTO_GO_DEP", config.Proto.GoDep)
setConfigValue("PROTO_JS_DEP", config.Proto.JsDep)
setConfigValue("PROTO_PYTHON_PACKAGE", config.Proto.PythonPackage)
setConfigValue("GRPC_PYTHON_DEP", config.Proto.PythonGrpcDep)
setConfigValue("GRPC_JAVA_DEP", config.Proto.JavaGrpcDep)
setConfigValue("GRPC_GO_DEP", config.Proto.GoGrpcDep)
setConfigValue("BAZEL_COMPATIBILITY", pythonBool(config.Bazel.Compatibility))
for k, v := range config.BuildConfig {
setConfigValue(strings.Replace(strings.ToUpper(k), "-", "_", -1), v)
}
// Load all the builtin rules
log.Debug("Loading builtin build rules...")
dir, _ := AssetDir("")
sort.Strings(dir)
for _, filename := range dir {
loadBuiltinRules(filename, MustAsset(filename))
}
loadSubincludePackage()
for _, preload := range config.Parse.PreloadBuildDefs {
mustPreloadFile(preload)
}
state.Parser = &pythonParser{}
log.Debug("Interpreter ready")
}
// pythonBool returns the representation of a bool we're going to send to Python.
// We use strings to avoid having to do a different callback, but using the empty string for
// false means normal truth checks work fine :)
func pythonBool(b bool) string {
if b {
return "true"
}
return ""
}
func initialiseInterpreter(engine string, attemptDownload bool) bool {
if strings.HasPrefix(engine, "/") {
return initialiseInterpreterFrom(engine, attemptDownload)
}
executableDir, err := osext.ExecutableFolder()
if err != nil {
log.Error("Can't determine current executable: %s", err)
return false
}
return initialiseInterpreterFrom(path.Join(executableDir, fmt.Sprintf("libplease_parser_%s.%s", engine, libExtension())), attemptDownload)
}
func initialiseInterpreterFrom(enginePath string, attemptDownload bool) bool {
if !core.PathExists(enginePath) {
return false
}
log.Debug("Attempting to load engine from %s", enginePath)
cEnginePath := C.CString(enginePath)
defer C.free(unsafe.Pointer(cEnginePath))
result := C.InitialiseInterpreter(cEnginePath)
if result == 0 {
log.Info("Using parser engine from %s", enginePath)
return true
} else if result == dlopenError {
dlerror := C.GoString(C.dlerror())
// This is a pretty brittle check, but there is no other interface available, and
// we don't want to download PyPy unless we think that'll solve the problem.
if attemptDownload && strings.Contains(dlerror, "libpypy-c.so: cannot open shared object file") && runtime.GOOS == "linux" {
if update.DownloadPyPy(core.State.Config) {
// Downloading PyPy succeeded, try to initialise again
return initialiseInterpreterFrom(enginePath, false)
}
}
// Low level of logging because it's allowable to fail on libplease_parser_pypy, which we try first.
log.Notice("Failed to initialise interpreter from %s: %s", enginePath, dlerror)
} else if result == cffiUnavailable {
log.Warning("cannot use %s, cffi unavailable", enginePath)
} else {
log.Notice("Failed to initialise interpreter from %s: %s", enginePath, C.GoString(C.dlerror()))
}
return false
}
// libExtension returns the typical extension of shared objects on the current platform.
func libExtension() string {
if runtime.GOOS == "darwin" {
return "dylib"
}
return "so"
}
func setConfigValue(name string, value string) {
cName := C.CString(name)
cValue := C.CString(value)
defer C.free(unsafe.Pointer(cName))
defer C.free(unsafe.Pointer(cValue))
C.SetConfigValue(cName, cValue)
}
func loadBuiltinRules(path string, contents []byte) {
// This is a little inefficient in terms of the number of copies of this data we make.
data := C.CString(string(contents))
defer C.free(unsafe.Pointer(data))
cPackageName := C.CString(path)
defer C.free(unsafe.Pointer(cPackageName))
if result := C.GoString(C.ParseCode(data, cPackageName, 0)); result != "" {
log.Fatalf("Failed to interpret initial build rules from %s: %s", path, result)
}
}
func loadSubincludePackage() {
pkg := core.NewPackage(subincludePackage)
// Set up a builtin package for remote subincludes.
cPackageName := C.CString(pkg.Name)
C.ParseCode(nil, cPackageName, sizep(pkg))
C.free(unsafe.Pointer(cPackageName))
core.State.Graph.AddPackage(pkg)
}
func mustPreloadFile(preload string) {
data, err := ioutil.ReadFile(preload)
if err != nil {
log.Fatalf("Failed to preload requested build_defs file: %s", err)
}
loadBuiltinRules(preload, data)
}
// sizet converts a build target to a C.size_t.
func sizet(t *core.BuildTarget) C.size_t { return C.size_t(uintptr(unsafe.Pointer(t))) }
// sizep converts a package to a C.size_t
func sizep(p *core.Package) C.size_t { return C.size_t(uintptr(unsafe.Pointer(p))) }
// unsizet converts a C.size_t back to a *BuildTarget.
func unsizet(u uintptr) *core.BuildTarget { return (*core.BuildTarget)(unsafe.Pointer(u)) }
// unsizep converts a C.size_t back to a *Package
func unsizep(u uintptr) *core.Package { return (*core.Package)(unsafe.Pointer(u)) }
// parsePackageFile parses a single BUILD file.
// It returns true if parsing is deferred and waiting on other build actions, false otherwise on success
// and will panic on errors.
func parsePackageFile(state *core.BuildState, filename string, pkg *core.Package) bool {
log.Debug("Parsing package file %s", filename)
start := time.Now()
initializeOnce.Do(func() { initializeInterpreter(state) })
data, err := ioutil.ReadFile(filename)
if err != nil {
panic(err)
}
runtime.LockOSThread()
defer runtime.UnlockOSThread()
// TODO(pebers): It seems like we should be calling C.pypy_attach_thread here once per OS thread.
// That only seems to introduce problems though and not solve them; not sure if that is
// because we are doing thread-unsafe things in our parser, more go/c/pypy interface
// issues or something more mysterious. Regardless, it would be nice to understand
// more what's going on there and see if we can solve - I'm not sure we really have
// multithreaded parsing without it.
cFilename := C.CString(filename)
cPackageName := C.CString(pkg.Name)
cData := C.CString(string(data))
defer C.free(unsafe.Pointer(cFilename))
defer C.free(unsafe.Pointer(cPackageName))
defer C.free(unsafe.Pointer(cData))
ret := C.GoString(C.ParseFile(cFilename, cData, cPackageName, sizep(pkg)))
if ret == pyDeferParse {
log.Debug("Deferred parse of package file %s in %0.3f seconds", filename, time.Since(start).Seconds())
return true
} else if ret != "" {
panic(fmt.Sprintf("Failed to parse file %s: %s", filename, ret))
}
log.Debug("Parsed package file %s in %0.3f seconds", filename, time.Since(start).Seconds())
return false
}
// RunCode will run some arbitrary Python code using our embedded interpreter.
func RunCode(state *core.BuildState, code string) error {
initializeOnce.Do(func() { initializeInterpreter(state) })
cCode := C.CString(code)
defer C.free(unsafe.Pointer(cCode))
ret := C.GoString(C.RunCode(cCode))
if ret != "" {
return fmt.Errorf("%s", ret)
}
return nil
}
// IsValidTargetName returns true if the given name is valid in a package.
// This is provided to help error handling on the Python side.
//export IsValidTargetName
func IsValidTargetName(name *C.char) bool {
_, err := core.TryNewBuildLabel("test", C.GoString(name))
return err == nil
}
// AddTarget is a cgo callback to add a new build target to the graph.
//export AddTarget
func AddTarget(pkgPtr uintptr, cName, cCmd, cTestCmd *C.char, binary, test, needsTransitiveDeps,
outputIsComplete, containerise, sandbox, testSandbox, noTestOutput, testOnly, stamp, filegroup, hashFilegroup bool,
flakiness, buildTimeout, testTimeout int, cBuildingDescription *C.char) (ret C.size_t) {
buildingDescription := ""
if cBuildingDescription != nil {
buildingDescription = C.GoString(cBuildingDescription)
}
return sizet(addTarget(pkgPtr, C.GoString(cName), C.GoString(cCmd), C.GoString(cTestCmd),
binary, test, needsTransitiveDeps, outputIsComplete, containerise, sandbox, testSandbox, noTestOutput,
testOnly, stamp, filegroup, hashFilegroup, flakiness, buildTimeout, testTimeout, buildingDescription))
}
// addTarget adds a new build target to the graph.
// Separated from AddTarget to make it possible to test (since you can't mix cgo and go test).
func addTarget(pkgPtr uintptr, name, cmd, testCmd string, binary, test, needsTransitiveDeps,
outputIsComplete, containerise, sandbox, testSandbox, noTestOutput, testOnly, stamp, filegroup, hashFilegroup bool,
flakiness, buildTimeout, testTimeout int, buildingDescription string) *core.BuildTarget {
pkg := unsizep(pkgPtr)
target := core.NewBuildTarget(core.NewBuildLabel(pkg.Name, name))
target.IsBinary = binary
target.IsTest = test
target.NeedsTransitiveDependencies = needsTransitiveDeps
target.OutputIsComplete = outputIsComplete
target.Containerise = containerise
target.Sandbox = sandbox
target.TestSandbox = testSandbox
target.NoTestOutput = noTestOutput
target.TestOnly = testOnly
target.Flakiness = flakiness
target.BuildTimeout = time.Duration(buildTimeout) * time.Second
target.TestTimeout = time.Duration(testTimeout) * time.Second
target.Stamp = stamp
target.IsFilegroup = filegroup || hashFilegroup
target.IsHashFilegroup = hashFilegroup
// Automatically label containerised tests.
if containerise {
target.AddLabel("container")
}
// Automatically label flaky tests.
if flakiness > 0 {
target.AddLabel("flaky")
}
if binary {
target.AddLabel("bin")
}
if buildingDescription != "" {
target.BuildingDescription = buildingDescription
}
if !filegroup {
target.Command = cmd
target.TestCommand = testCmd
}
if _, present := pkg.Targets[name]; present {
// NB. Not logged as an error because Python is now allowed to catch it.
// It will turn into an error later if the exception is not caught.
log.Notice("Duplicate build target in %s: %s", pkg.Name, name)
return nil
}
pkg.Targets[name] = target
if core.State.Graph.Package(pkg.Name) != nil {
// Package already added, so we're probably in a post-build function. Add target directly to graph now.
log.Debug("Adding new target %s directly to graph", target.Label)
target.AddedPostBuild = true
core.State.Graph.AddTarget(target)
pkg.MarkTargetModified(target)
}
return target
}
// SetPreBuildFunction sets a pre-build function on a target.
//export SetPreBuildFunction
func SetPreBuildFunction(callback uintptr, cBytecode *C.char, cTarget uintptr) {
target := unsizet(cTarget)
target.PreBuildFunction = callback
hash := sha1.Sum([]byte(C.GoString(cBytecode)))
target.PreBuildHash = hash[:]
}
// SetPostBuildFunction sets a post-build function on a target.
//export SetPostBuildFunction
func SetPostBuildFunction(callback uintptr, cBytecode *C.char, cTarget uintptr) {
target := unsizet(cTarget)
target.PostBuildFunction = callback
hash := sha1.Sum([]byte(C.GoString(cBytecode)))
target.PostBuildHash = hash[:]
}
// AddDependency is called by the add_dep builtin to add a dependency to an existing target.
// It's only invoked by post-build functions.
//export AddDependency
func AddDependency(cPackage uintptr, cTarget *C.char, cDep *C.char, exported bool) *C.char {
target, err := getTargetPost(cPackage, cTarget)
if err != nil {
return C.CString(err.Error())
}
dep, err := core.TryParseBuildLabel(C.GoString(cDep), target.Label.PackageName)
if err != nil {
return C.CString(err.Error())
}
target.AddMaybeExportedDependency(dep, exported, false)
// Note that here we're in a post-build function so we must call this explicitly
// (in other callbacks it's handled after the package parses all at once).
core.State.Graph.AddDependency(target.Label, dep)
unsizep(cPackage).MarkTargetModified(target)
return nil
}
// AddOutputPost is called by the add_out builtin to add an output to an existing target.
//export AddOutputPost
func AddOutputPost(cPackage uintptr, cTarget, cOut *C.char) *C.char {
target, err := getTargetPost(cPackage, cTarget)
if err != nil {
return C.CString(err.Error())
}
out := C.GoString(cOut)
pkg := unsizep(cPackage)
if err := pkg.RegisterOutput(out, target); err != nil {
return C.CString(err.Error())
}
target.AddOutput(out)
return nil
}
// AddNamedOutputPost is called by the add_out builtin to add an output to an existing target.
//export AddNamedOutputPost
func AddNamedOutputPost(cPackage uintptr, cTarget, cName, cOut *C.char) *C.char {
target, err := getTargetPost(cPackage, cTarget)
if err != nil {
return C.CString(err.Error())
}
out := C.GoString(cOut)
pkg := unsizep(cPackage)
if err := pkg.RegisterOutput(out, target); err != nil {
return C.CString(err.Error())
}
target.AddNamedOutput(C.GoString(cName), out)
return nil
}
// AddLicencePost is called by the add_licence builtin to add a licence to a target
// during a post-build function.
//export AddLicencePost
func AddLicencePost(cPackage uintptr, cTarget *C.char, cLicence *C.char) *C.char {
target, err := getTargetPost(cPackage, cTarget)
if err != nil {
return C.CString(err.Error())
}
target.AddLicence(C.GoString(cLicence))
return nil
}
// GetCommand is a cgo callback that returns the command for a target.
//export GetCommand
func GetCommand(cPackage uintptr, cTarget *C.char, cConfig *C.char) *C.char {
target, err := getTargetPost(cPackage, cTarget)
if err != nil {
log.Fatalf("%s", err) // Too hard to signal this one back to Python.
}
return C.CString(target.GetCommandConfig(C.GoString(cConfig)))
}
// SetCommand is a cgo callback that sets a (possibly config-specific) command on a target.
//export SetCommand
func SetCommand(cPackage uintptr, cTarget *C.char, cConfigOrCommand *C.char, cCommand *C.char) *C.char {
target, err := getTargetPost(cPackage, cTarget)
if err != nil {
return C.CString(err.Error())
}
command := C.GoString(cCommand)
if command == "" {
target.Command = C.GoString(cConfigOrCommand)
} else {
target.AddCommand(C.GoString(cConfigOrCommand), command)
}
// It'd be nice if we could ensure here that we're in the pre-build function
// but not the post-build function which is too late to have any effect.
// OTOH while it's ineffective it shouldn't cause any trouble trying it either...
return nil
}
// getTargetPost is called by above to get a target from the current package.
// Returns an error if the target is not in the current package or has already been built.
func getTargetPost(cPackage uintptr, cTarget *C.char) (*core.BuildTarget, error) {
pkg := unsizep(cPackage)
name := C.GoString(cTarget)
target, present := pkg.Targets[name]
if !present {
return nil, fmt.Errorf("Unknown build target %s in %s", name, pkg.Name)
}
// It'd be cheating to try to modify targets that're already built.
// Prohibit this because it'd likely end up with nasty race conditions.
if target.State() >= core.Built {
return nil, fmt.Errorf("Attempted to modify target %s, but it's already built", target.Label)
}
return target, nil
}
// AddSource is a cgo callback that adds a source to a target.
//export AddSource
func AddSource(cTarget uintptr, cSource *C.char) *C.char {
target := unsizet(cTarget)
source, err := parseSource(C.GoString(cSource), target.Label.PackageName, true)
if err != nil {
return C.CString(err.Error())
}
target.AddSource(source)
return nil
}
// parseSource parses an incoming source label as either a file or a build label.
// Identifies if the file is owned by this package and returns an error if not.
func parseSource(src, packageName string, systemAllowed bool) (core.BuildInput, error) {
if core.LooksLikeABuildLabel(src) {
return core.TryParseNamedOutputLabel(src, packageName)
} else if src == "" {
return nil, fmt.Errorf("Empty source path (in package %s)", packageName)
} else if strings.Contains(src, "../") {
return nil, fmt.Errorf("'%s' (in package %s) is an invalid path; build target paths can't contain ../", src, packageName)
} else if src[0] == '/' || src[0] == '~' {
if !systemAllowed {
return nil, fmt.Errorf("'%s' (in package %s) is an absolute path; that's not allowed", src, packageName)
}
return core.SystemFileLabel{Path: src}, nil
} else if strings.Contains(src, "/") {
// Target is in a subdirectory, check nobody else owns that.
for dir := path.Dir(path.Join(packageName, src)); dir != packageName && dir != "."; dir = path.Dir(dir) {
if core.IsPackage(dir) {
return nil, fmt.Errorf("Package %s tries to use file %s, but that belongs to another package (%s)", packageName, src, dir)
}
}
}
// Make sure it's not the actual build file.
for _, filename := range core.State.Config.Parse.BuildFileName {
if filename == src {
return nil, fmt.Errorf("You can't specify the BUILD file as an input to a rule")
}
}
return core.FileLabel{File: src, Package: packageName}, nil
}
// AddNamedSource adds a named source to a target.
//export AddNamedSource
func AddNamedSource(cTarget uintptr, cName *C.char, cSource *C.char) *C.char {
target := unsizet(cTarget)
source, err := parseSource(C.GoString(cSource), target.Label.PackageName, false)
if err != nil {
return C.CString(err.Error())
}
target.AddNamedSource(C.GoString(cName), source)
return nil
}
// AddCommand adds a (possibly config-specific) command to a target.
//export AddCommand
func AddCommand(cTarget uintptr, cConfig *C.char, cCommand *C.char) *C.char {
unsizet(cTarget).AddCommand(C.GoString(cConfig), C.GoString(cCommand))
return nil
}
// AddTestCommand adds a (possibly config-specific) test command to a target.
//export AddTestCommand
func AddTestCommand(cTarget uintptr, cConfig *C.char, cCommand *C.char) *C.char {
unsizet(cTarget).AddTestCommand(C.GoString(cConfig), C.GoString(cCommand))
return nil
}
// AddSecret adds a secret to a target.
//export AddSecret
func AddSecret(cTarget uintptr, cSecret *C.char) *C.char {
target := unsizet(cTarget)
target.Secrets = append(target.Secrets, C.GoString(cSecret))
return nil
}
// AddData adds a runtime data file to a test.
//export AddData
func AddData(cTarget uintptr, cData *C.char) *C.char {
target := unsizet(cTarget)
data, err := parseSource(C.GoString(cData), target.Label.PackageName, false)
if err != nil {
return C.CString(err.Error())
}
target.AddDatum(data)
return nil
}
// AddOutput adds an output to a build target.
//export AddOutput
func AddOutput(cTarget uintptr, cOutput *C.char) *C.char {
target := unsizet(cTarget)
target.AddOutput(C.GoString(cOutput))
return nil
}
// AddNamedOutput adds a named output to a build target.
//export AddNamedOutput
func AddNamedOutput(cTarget uintptr, cName *C.char, cOutput *C.char) *C.char {
target := unsizet(cTarget)
target.AddNamedOutput(C.GoString(cName), C.GoString(cOutput))
return nil
}
// AddOptionalOutput adds an optional output to a build target.
//export AddOptionalOutput
func AddOptionalOutput(cTarget uintptr, cOutput *C.char) *C.char {
target := unsizet(cTarget)
target.OptionalOutputs = append(target.OptionalOutputs, C.GoString(cOutput))
return nil
}
// AddDep adds a dependency to a target.
//export AddDep
func AddDep(cTarget uintptr, cDep *C.char) *C.char {
target := unsizet(cTarget)
dep, err := core.TryParseBuildLabel(C.GoString(cDep), target.Label.PackageName)
if err != nil {
return C.CString(err.Error())
}
target.AddDependency(dep)
return nil
}
// AddExportedDep adds an exported dependency to a target.
//export AddExportedDep
func AddExportedDep(cTarget uintptr, cDep *C.char) *C.char {
target := unsizet(cTarget)
dep, err := core.TryParseBuildLabel(C.GoString(cDep), target.Label.PackageName)
if err != nil {
return C.CString(err.Error())
}
target.AddMaybeExportedDependency(dep, true, false)
return nil
}
// AddTool adds a tool to a build target.
//export AddTool
func AddTool(cTarget uintptr, cTool *C.char) *C.char {
target := unsizet(cTarget)
tool, err := parseTool(target, C.GoString(cTool))
if err != nil {
return C.CString(err.Error())
}
target.AddTool(tool)
return nil
}
// AddNamedTool adds a named tool to a build target.
//export AddNamedTool
func AddNamedTool(cTarget uintptr, cName *C.char, cTool *C.char) *C.char {
target := unsizet(cTarget)
tool, err := parseTool(target, C.GoString(cTool))
if err != nil {
return C.CString(err.Error())
}
target.AddNamedTool(C.GoString(cName), tool)
return nil
}
// parseTool parses a string into a tool; it's similar to sources but has slightly different semantics.
func parseTool(target *core.BuildTarget, tool string) (core.BuildInput, error) {
if !core.LooksLikeABuildLabel(tool) && !strings.Contains(tool, "/") {
return core.SystemPathLabel{Name: tool, Path: core.State.Config.Build.Path}, nil
}
return parseSource(tool, target.Label.PackageName, true)
}
// AddVis adds a visibility directive to a build target.
//export AddVis
func AddVis(cTarget uintptr, cVis *C.char) *C.char {
target := unsizet(cTarget)
vis := C.GoString(cVis)
if vis == "PUBLIC" || (core.State.Config.Bazel.Compatibility && vis == "//visibility:public") {
target.Visibility = append(target.Visibility, core.WholeGraph[0])
} else {
label, err := core.TryParseBuildLabel(vis, target.Label.PackageName)
if err != nil {
return C.CString(err.Error())
}
target.Visibility = append(target.Visibility, label)
}
return nil
}
// AddLabel adds a label to a build target.
//export AddLabel
func AddLabel(cTarget uintptr, cLabel *C.char) *C.char {
target := unsizet(cTarget)
target.AddLabel(C.GoString(cLabel))
return nil
}
// AddHash adds a hash to a build target.
//export AddHash
func AddHash(cTarget uintptr, cHash *C.char) *C.char {
target := unsizet(cTarget)
target.Hashes = append(target.Hashes, C.GoString(cHash))
return nil
}
// AddLicence adds a licence to a build target.
//export AddLicence
func AddLicence(cTarget uintptr, cLicence *C.char) *C.char {
target := unsizet(cTarget)
target.AddLicence(C.GoString(cLicence))
return nil
}
// AddTestOutput adds a test output file to a build target.
//export AddTestOutput
func AddTestOutput(cTarget uintptr, cTestOutput *C.char) *C.char {
target := unsizet(cTarget)
target.TestOutputs = append(target.TestOutputs, C.GoString(cTestOutput))
return nil
}
// AddRequire adds a require statement to a build target.
//export AddRequire
func AddRequire(cTarget uintptr, cRequire *C.char) *C.char {
target := unsizet(cTarget)
target.Requires = append(target.Requires, C.GoString(cRequire))
// Requirements are also implicit labels
target.AddLabel(C.GoString(cRequire))
return nil
}
// AddProvide adds a provide mapping to a build target.
//export AddProvide
func AddProvide(cTarget uintptr, cLanguage *C.char, cDep *C.char) *C.char {
target := unsizet(cTarget)
label, err := core.TryParseBuildLabel(C.GoString(cDep), target.Label.PackageName)
if err != nil {
return C.CString(err.Error())
}
target.AddProvide(C.GoString(cLanguage), label)
return nil
}
// SetContainerSetting sets a particular container setting on a target.
//export SetContainerSetting
func SetContainerSetting(cTarget uintptr, cName, cValue *C.char) *C.char {
target := unsizet(cTarget)
if err := target.SetContainerSetting(strings.Replace(C.GoString(cName), "_", "", -1), C.GoString(cValue)); err != nil {
return C.CString(err.Error())
}
return nil
}
// GetIncludeFile is a callback to the interpreter that returns the path it
// should be opening in order to include_defs() a file.
// We use in-band signalling for some errors since C can't handle multiple return values :)
//export GetIncludeFile
func GetIncludeFile(cPackage uintptr, cLabel *C.char) *C.char {
label := C.GoString(cLabel)
if !strings.HasPrefix(label, "//") {
return C.CString("__include_defs argument must be an absolute path (ie. start with //)")
}
relPath := strings.TrimLeft(label, "/")
return C.CString(path.Join(core.RepoRoot, relPath))
}
// GetSubincludeFile is a callback to the interpreter that returns the path it
// should be opening in order to subinclude() a build target.
// We use in-band signalling for some errors since C can't handle multiple return values :)
//export GetSubincludeFile
func GetSubincludeFile(cPackage uintptr, cLabel *C.char) *C.char {
return C.CString(getSubincludeFile(unsizep(cPackage), C.GoString(cLabel)))
}
func getSubincludeFile(pkg *core.Package, labelStr string) string {
label := core.ParseBuildLabel(labelStr, pkg.Name)
if label.PackageName == pkg.Name {
return fmt.Sprintf("__Can't subinclude :%s in %s; can't subinclude local targets.", label.Name, pkg.Name)
}
pkgLabel := core.BuildLabel{PackageName: pkg.Name, Name: "all"}
target := core.State.Graph.Target(label)
if target == nil {
// Might not have been parsed yet. Check for that first.
if subincludePackage := core.State.Graph.Package(label.PackageName); subincludePackage == nil {
if deferParse(label, pkg) {
return pyDeferParse // Not an error, they'll just have to wait.
}
target = core.State.Graph.TargetOrDie(label) // Should be there now.
} else {
return fmt.Sprintf("__Failed to subinclude %s; package %s has no target by that name", label, label.PackageName)
}
} else if tmp := core.NewBuildTarget(pkgLabel); !tmp.CanSee(target) {
return fmt.Sprintf("__Can't subinclude %s from %s due to visibility constraints", label, pkg.Name)
} else if len(target.Outputs()) != 1 {
return fmt.Sprintf("__Can't subinclude %s, subinclude targets must have exactly one output", label)
} else if target.State() < core.Built {
if deferParse(label, pkg) {
return pyDeferParse // Again, they'll have to wait for this guy to build.
}
}
pkg.RegisterSubinclude(target.Label)
// Well if we made it to here it's actually ready to go, so tell them where to get it.
return path.Join(target.OutDir(), target.Outputs()[0])
}
// runPreBuildFunction runs the pre-build function for a single target.
func runPreBuildFunction(pkg *core.Package, target *core.BuildTarget) error {
cName := C.CString(target.Label.Name)
defer C.free(unsafe.Pointer(cName))
runtime.LockOSThread()
defer runtime.UnlockOSThread()
f := C.size_t(uintptr(unsafe.Pointer(target.PreBuildFunction)))
if result := C.GoString(C.RunPreBuildFunction(f, sizep(pkg), cName)); result != "" {
return fmt.Errorf("Failed to run pre-build function for target %s: %s", target.Label.String(), result)
}
return nil
}
// runPostBuildFunction runs the post-build function for a single target.
func runPostBuildFunction(pkg *core.Package, target *core.BuildTarget, out string) error {
cName := C.CString(target.Label.Name)
cOutput := C.CString(out)
defer C.free(unsafe.Pointer(cName))
defer C.free(unsafe.Pointer(cOutput))
runtime.LockOSThread()
defer runtime.UnlockOSThread()
f := C.size_t(uintptr(unsafe.Pointer(target.PostBuildFunction)))
if result := C.GoString(C.RunPostBuildFunction(f, sizep(pkg), cName, cOutput)); result != "" {
return fmt.Errorf("Failed to run post-build function for target %s: %s", target.Label.String(), result)
}
return nil
}
// Unfortunately there doesn't seem to be any API to do this dynamically :(
var logLevelFuncs = map[logging.Level]func(format string, args ...interface{}){
logging.CRITICAL: log.Fatalf,
logging.ERROR: log.Errorf,
logging.WARNING: log.Warning,
logging.NOTICE: log.Notice,
logging.INFO: log.Info,
logging.DEBUG: log.Debug,
}
// Log is a cgo callback that is called by the log() builtin to log into our normal logging framework.
//export Log
func Log(level int, cPackage uintptr, cMessage *C.char) {
pkg := unsizep(cPackage)
f, present := logLevelFuncs[logging.Level(level)]
if !present {
f = log.Errorf
}
f("//%s/BUILD: %s", pkg.Name, C.GoString(cMessage))
}
// Glob implements the glob() builtin build function.
//export Glob
func Glob(cPackage *C.char, cIncludes **C.char, numIncludes int, cExcludes **C.char, numExcludes int, includeHidden bool) **C.char {
packageName := C.GoString(cPackage)
includes := cStringArrayToStringSlice(cIncludes, numIncludes, "")
prefixedExcludes := cStringArrayToStringSlice(cExcludes, numExcludes, packageName)
excludes := cStringArrayToStringSlice(cExcludes, numExcludes, "")
// To make sure we can't glob the BUILD file, it is always added to excludes.
excludes = append(excludes, core.State.Config.Parse.BuildFileName...)
filenames := core.Glob(packageName, includes, prefixedExcludes, excludes, includeHidden)
return stringSliceToCStringArray(filenames)
}
// stringSliceToCStringArray converts a Go slice of strings to a C array of char*'s.
// The returned array is terminated by a null pointer - the Python interpreter code will
// understand how to turn this back into Python strings.
func stringSliceToCStringArray(s []string) **C.char {
// This is slightly hacky; we assume that sizeof(char*) == size of a uintptr in Go.
// Presumably that should hold in most cases and is more portable than just hardcoding 8...
const sz = int(unsafe.Sizeof(uintptr(0)))
n := len(s) + 1
ret := (**C.char)(C.malloc(C.size_t(sz * n)))
sl := (*[1 << 30]*C.char)(unsafe.Pointer(ret))[:n:n]
for i, x := range s {
sl[i] = C.CString(x)
}
sl[n-1] = nil
return ret
}
// cStringArrayToStringSlice converts a C array of char*'s to a Go slice of strings.
func cStringArrayToStringSlice(a **C.char, n int, prefix string) []string {
ret := make([]string, n)
// slightly scary incantation found on an internet
sl := (*[1 << 30]*C.char)(unsafe.Pointer(a))[:n:n]
for i, s := range sl {
ret[i] = path.Join(prefix, C.GoString(s))
}
return ret
}
// GetLabels returns the set of labels for a build target and its transitive dependencies.
// The labels are filtered by the given prefix, which is stripped from the returned labels.
//export GetLabels
func GetLabels(cPackage uintptr, cTarget *C.char, cPrefix *C.char) **C.char {
// Two formats are supported here: either passing just the name of a target in the current
// package, or a build label referring specifically to one.
lbl := C.GoString(cTarget)
prefix := C.GoString(cPrefix)
if core.LooksLikeABuildLabel(lbl) {
label, err := core.TryParseBuildLabel(lbl, unsizep(cPackage).Name)
if err != nil {
log.Fatalf("%s", err) // TODO(pebers): report proper errors here and below
}
return stringSliceToCStringArray(getLabels(core.State.Graph.TargetOrDie(label), prefix, core.Built))
}
target, err := getTargetPost(cPackage, cTarget)
if err != nil {
log.Fatalf("%s", err)
}
return stringSliceToCStringArray(getLabels(target, prefix, core.Building))
}
func getLabels(target *core.BuildTarget, prefix string, minState core.BuildTargetState) []string {
if target.State() < minState {
log.Fatalf("get_labels called on a target that is not yet built: %s", target.Label)
}
labels := map[string]bool{}
done := map[*core.BuildTarget]bool{}
var getLabels func(*core.BuildTarget)
getLabels = func(t *core.BuildTarget) {
for _, label := range t.Labels {
if strings.HasPrefix(label, prefix) {
labels[strings.TrimSpace(strings.TrimPrefix(label, prefix))] = true
}
}
done[t] = true
if !t.OutputIsComplete || t == target {
for _, dep := range t.Dependencies() {
if !done[dep] {
getLabels(dep)
}
}
}
}
getLabels(target)
ret := make([]string, len(labels))
i := 0
for label := range labels {
ret[i] = label
i++
}
sort.Strings(ret)
return ret
}
|
package service
import (
"go.uber.org/zap"
"mix/test/codes"
dto "mix/test/dto/core/transaction"
entity "mix/test/entity/core/transaction"
"mix/test/pb/core/transaction"
"mix/test/utils/status"
)
func (p *Transaction) createHotWithdraw(ctx *Context, in *transaction.CreateHotWithdrawInput, out *transaction.HotWithdrawOutput) (err error) {
logger := ctx.logger.With(zap.String("func", "createHotWithdraw"))
db := ctx.db
hotWithdrawEntity := new(entity.HotWithdraw)
hotWithdrawEntity.MerchantId = in.MerchantId
hotWithdrawEntity.SerialId = in.SerialId
hotWithdrawEntity.TransactionId = in.TransactionId
hotWithdrawEntity.FromAccountId = in.FromAccountId
hotWithdrawEntity.FromAddressId = in.FromAddressId
hotWithdrawEntity.Chain = in.Chain
hotWithdrawEntity.Token = in.Token
hotWithdrawEntity.Address = in.Address
hotWithdrawEntity.Tag = in.Tag
hotWithdrawEntity.Amount = in.Amount
hotWithdrawEntity.Status = in.Status
hotWithdrawEntity.Hash = in.Hash
id, err := p.dao.CreateHotWithdraw(logger, db, hotWithdrawEntity)
if err != nil {
return
}
hotWithdrawEntity, err = p.dao.MustGetHotWithdraw(logger, db, id)
if err != nil {
return
}
dto.ToHotWithdrawOutput(hotWithdrawEntity, out)
return
}
func (p *Transaction) getHotWithdraw(ctx *Context, in *transaction.GetHotWithdrawInput, out *transaction.HotWithdrawOutput) (err error) {
logger := ctx.logger.With(zap.String("func", "getHotWithdraw"))
db := ctx.db
hotWithdrawEntity, err := p.dao.GetHotWithdraw(logger, db, in.Id)
if err != nil {
return
}
if hotWithdrawEntity == nil {
err = status.Code(codes.HotWithdrawNotFound)
return
}
dto.ToHotWithdrawOutput(hotWithdrawEntity, out)
return
}
func (p *Transaction) getHotWithdrawList(ctx *Context, in *transaction.Empty, out *transaction.HotWithdrawListOutput) (err error) {
logger := ctx.logger.With(zap.String("func", "getHotWithdrawList"))
db := ctx.db
hotWithdrawEntities, err := p.dao.GetHotWithdrawList(logger, db)
if err != nil {
return
}
dto.ToHotWithdrawListOutput(hotWithdrawEntities, out)
return
}
func (p *Transaction) removeHotWithdraw(ctx *Context, in *transaction.RemoveHotWithdrawInput, out *transaction.Empty) (err error) {
logger := ctx.logger.With(zap.String("func", "removeHotWithdraw"))
db := ctx.db
hotWithdrawEntity, err := p.dao.GetHotWithdraw(logger, db, in.Id)
if err != nil {
return
}
if hotWithdrawEntity == nil {
err = status.Code(codes.HotWithdrawNotFound)
return
}
err = p.dao.RemoveHotWithdraw(logger, db, in.Id)
if err != nil {
return
}
return
}
func (p *Transaction) updateHotWithdraw(ctx *Context, in *transaction.UpdateHotWithdrawInput, out *transaction.HotWithdrawOutput) (err error) {
logger := ctx.logger.With(zap.String("func", "updateHotWithdraw"))
db := ctx.db
hotWithdrawEntity := new(entity.HotWithdraw)
hotWithdrawEntity.Id = in.Id
hotWithdrawEntity.MerchantId = in.MerchantId
hotWithdrawEntity.SerialId = in.SerialId
hotWithdrawEntity.TransactionId = in.TransactionId
hotWithdrawEntity.FromAccountId = in.FromAccountId
hotWithdrawEntity.FromAddressId = in.FromAddressId
hotWithdrawEntity.Chain = in.Chain
hotWithdrawEntity.Token = in.Token
hotWithdrawEntity.Address = in.Address
hotWithdrawEntity.Tag = in.Tag
hotWithdrawEntity.Amount = in.Amount
hotWithdrawEntity.Status = in.Status
hotWithdrawEntity.Hash = in.Hash
err = p.dao.UpdateHotWithdraw(logger, p.db, db, hotWithdrawEntity)
if err != nil {
return
}
dto.ToHotWithdrawOutput(hotWithdrawEntity, out)
return
}
func (p *Transaction) getHotWithdrawByMerchantId(ctx *Context, in *transaction.GetHotWithdrawByMerchantIdInput, out *transaction.HotWithdrawOutput) (err error) {
logger := ctx.logger.With(zap.String("func", "getHotWithdrawByMerchantId"))
db := ctx.db
hotWithdrawEntity, err := p.dao.GetHotWithdrawByMerchantId(logger, db, in.MerchantId, in.SerialId)
if err != nil {
return
}
if hotWithdrawEntity == nil {
err = status.Code(codes.HotWithdrawNotFound)
return
}
dto.ToHotWithdrawOutput(hotWithdrawEntity, out)
return
}
func (p *Transaction) updateHotWithdrawByMerchantId(ctx *Context, in *transaction.UpdateHotWithdrawByMerchantIdInput, out *transaction.HotWithdrawOutput) (err error) {
logger := ctx.logger.With(zap.String("func", "updateHotWithdrawByMerchantId"))
db := ctx.db
hotWithdrawEntity := new(entity.HotWithdraw)
hotWithdrawEntity.MerchantId = in.MerchantId
hotWithdrawEntity.SerialId = in.SerialId
hotWithdrawEntity.TransactionId = in.TransactionId
hotWithdrawEntity.FromAccountId = in.FromAccountId
hotWithdrawEntity.FromAddressId = in.FromAddressId
hotWithdrawEntity.Chain = in.Chain
hotWithdrawEntity.Token = in.Token
hotWithdrawEntity.Address = in.Address
hotWithdrawEntity.Tag = in.Tag
hotWithdrawEntity.Amount = in.Amount
hotWithdrawEntity.Status = in.Status
hotWithdrawEntity.Hash = in.Hash
err = p.dao.UpdateHotWithdrawByMerchantId(logger, p.db, db, hotWithdrawEntity)
if err != nil {
return
}
dto.ToHotWithdrawOutput(hotWithdrawEntity, out)
return
}
func (p *Transaction) removeHotWithdrawByMerchantId(ctx *Context, in *transaction.RemoveHotWithdrawByMerchantIdInput, out *transaction.Empty) (err error) {
logger := ctx.logger.With(zap.String("func", "removeHotWithdrawByMerchantId"))
db := ctx.db
hotWithdrawEntity, err := p.dao.GetHotWithdrawByMerchantId(logger, db, in.MerchantId, in.SerialId)
if err != nil {
return
}
if hotWithdrawEntity == nil {
err = status.Code(codes.HotWithdrawNotFound)
return
}
err = p.dao.RemoveHotWithdrawByMerchantId(logger, db, in.MerchantId, in.SerialId)
if err != nil {
return
}
return
}
|
package signer
import (
"strings"
"github.com/EscherAuth/escher/debug"
"github.com/EscherAuth/escher/request"
)
func (s *signer) CanonicalizeRequest(r request.Interface, headersToSign []string) string {
var u = parsePathQuery(r.RawURL())
parts := make([]string, 0, 6)
parts = append(parts, strings.ToUpper(r.Method()))
parts = append(parts, canonicalizePath(u.Path))
parts = append(parts, canonicalizeQuery(u.Query))
parts = append(parts, s.canonicalizeHeaders(r, headersToSign))
parts = append(parts, s.canonicalizeHeadersToSign(r, headersToSign))
parts = append(parts, s.computeDigest(r.Body()))
canonicalizedRequest := strings.Join(parts, "\n")
debug.Println("CanonicalizeRequest", canonicalizedRequest)
return canonicalizedRequest
}
|
package poc
import (
"database/sql"
"fmt"
"log"
"time"
_ "github.com/go-sql-driver/mysql"
)
// Table
type Table struct {
Name string
TypeGroup string
SQL string
Fields []string
Constraints []Constraint
Triggers []Trigger
}
// Constraint
type Constraint struct {
Name string
ColumnName string
ReferencedTableName string
ReferencedColumnName string
}
// Trigger
type Trigger struct {
Name string
Event string
SQL string
}
// CopySchema
type CopySchemaPoc struct {
driverName string
dbUser string
dbPassword string
dbHost string
dbPort string
dbName string
}
// NewCopySchemaPoc
func NewCopySchemaPoc(
dbDriverName string,
dbUser string,
dbPassword string,
dbHost string,
dbPort string,
dbName string,
) *CopySchemaPoc {
return &CopySchemaPoc{
dbDriverName,
dbUser,
dbPassword,
dbHost,
dbPort,
dbName,
}
}
// Execute
func (p *CopySchemaPoc) Execute() {
log.Printf("Copy Database Schema..")
executionStart := time.Now()
db, err := sql.Open(
p.driverName,
fmt.Sprintf(`%s:%s@tcp(%s:%s)/%s`, p.dbUser, p.dbPassword, p.dbHost, p.dbPort, p.dbName),
)
if err != nil {
log.Print(err.Error())
}
defer db.Close()
var mirrorDbName = "db-trim-mirror"
mirrorDb, err := sql.Open(
p.driverName,
fmt.Sprintf(`%s:%s@tcp(%s:%s)/%s`, p.dbUser, p.dbPassword, p.dbHost, p.dbPort, mirrorDbName),
)
if err != nil {
log.Print(err.Error())
}
// configure session
mirrorDb.Exec(`SET FOREIGN_KEY_CHECKS=0`)
tableList := p.getTableList(db)
// apply table schemas
for _, table := range tableList {
_, err := mirrorDb.Exec(table.SQL)
if err != nil {
log.Print(err.Error())
}
// apply trigger schemas
for _, trigger := range table.Triggers {
_, err := mirrorDb.Exec(trigger.SQL)
if err != nil {
log.Print(err.Error())
}
}
}
// check INSERT FROM SELECT query
var fieldList = ``
var catalogProductEntityTable = `catalog_product_entity`
mirrorDb.Exec(`INSERT INTO ` + mirrorDbName + `.` + catalogProductEntityTable + ` (` + fieldList + `)
SELECT ` + fieldList + ` FROM ` + p.dbName + `.` + catalogProductEntityTable + ` `)
executionElapsed := time.Since(executionStart)
log.Printf("DB Copy took - %s", executionElapsed)
}
// getTableList
func (p *CopySchemaPoc) getTableList(db *sql.DB) []Table {
tableRows, err := db.Query(`SHOW FULL TABLES`)
if err != nil {
panic(err.Error())
}
var tableList = make([]Table, 0)
for tableRows.Next() {
var nextTable = Table{}
err := tableRows.Scan(&nextTable.Name, &nextTable.TypeGroup)
if err != nil {
panic(err.Error())
}
// get SQL difinition
nextTable.SQL = p.getTableSQL(db, nextTable.Name)
// get field list
nextTable.Fields = p.getTableFields(db, nextTable.Name)
// get constraints
nextTable.Constraints = p.getConstraintList(db, nextTable.Name, p.dbName)
// get triggers
nextTable.Triggers = p.getTriggerList(db, nextTable.Name)
tableList = append(tableList, nextTable)
}
return tableList
}
// getTableSQL
func (p *CopySchemaPoc) getTableSQL(db *sql.DB, tableName string) string {
var dummyTableName string
var tableCreateSQL string
err := db.QueryRow(`SHOW CREATE TABLE `+tableName).Scan(&dummyTableName, &tableCreateSQL)
if err != nil {
panic(err.Error())
}
return tableCreateSQL
}
// getConstraintList
func (p *CopySchemaPoc) getConstraintList(db *sql.DB, tableName string, databaseName string) []Constraint {
constraintRows, err := db.Query(
`SELECT CONSTRAINT_NAME, COLUMN_NAME, REFERENCED_TABLE_NAME, REFERENCED_COLUMN_NAME
FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE
WHERE TABLE_NAME = '` + tableName + `' AND REFERENCED_TABLE_SCHEMA = '` + databaseName + `'`,
)
if err != nil {
log.Fatal(err)
}
constraints := make([]Constraint, 0)
for constraintRows.Next() {
var constraint = Constraint{}
err := constraintRows.Scan(
&constraint.Name,
&constraint.ColumnName,
&constraint.ReferencedTableName,
&constraint.ReferencedColumnName,
)
if err != nil {
log.Fatal(err)
}
constraints = append(constraints, constraint)
}
return constraints
}
// getTriggerList
func (p *CopySchemaPoc) getTriggerList(db *sql.DB, tableName string) []Trigger {
triggerRows, err := db.Query(`SHOW TRIGGERS LIKE '` + tableName + `'`)
if err != nil {
panic(err.Error())
}
var dummy string
var triggerList = make([]Trigger, 0)
for triggerRows.Next() {
var nextTrigger = Trigger{}
err := triggerRows.Scan(
&nextTrigger.Name,
&nextTrigger.Event,
&dummy,
&dummy,
&dummy,
&dummy,
&dummy,
&dummy,
&dummy,
&dummy,
&dummy,
)
if err != nil {
panic(err.Error())
}
nextTrigger.SQL = p.getTriggerSQL(db, nextTrigger.Name)
triggerList = append(triggerList, nextTrigger)
}
return triggerList
}
// getTriggerSQL
func (p *CopySchemaPoc) getTriggerSQL(db *sql.DB, triggerName string) string {
var dummy string
var triggerCreateSQL string
err := db.QueryRow(`SHOW CREATE TRIGGER `+triggerName).Scan(
&dummy,
&dummy,
&triggerCreateSQL,
&dummy,
&dummy,
&dummy,
&dummy,
)
if err != nil {
panic(err.Error())
}
return triggerCreateSQL
}
// getTableFields
func (p *CopySchemaPoc) getTableFields(db *sql.DB, tableName string) []string {
fieldRows, err := db.Query(`SHOW COLUMNS FROM ` + tableName)
if err != nil {
panic(err.Error())
}
var fieldList = make([]string, 0)
for fieldRows.Next() {
var dummyStr sql.NullString
var tableField string
err := fieldRows.Scan(&tableField, &dummyStr, &dummyStr, &dummyStr, &dummyStr, &dummyStr)
if err != nil {
panic(err.Error())
}
fieldList = append(fieldList, tableField)
}
return fieldList
}
|
package rizla
import (
"os"
"github.com/iris-contrib/color"
"github.com/mattn/go-colorable"
)
type Printer struct {
*color.Color
// stream is the output stream which the program will use
stream *os.File
}
// NewPrinter returns a new colorable printer
func NewPrinter(out *os.File) *Printer {
c := color.New(colorable.NewColorable(out))
return &Printer{
Color: c,
stream: out,
}
}
// Dangerf prints a message with red colored letters
func (printer *Printer) Dangerf(format string, a ...interface{}) {
printer.Add(color.FgRed)
printer.Printf(format, a...)
}
// Info prints a message with cyan colored letters
func (printer *Printer) Infof(format string, a ...interface{}) {
printer.Add(color.FgCyan)
printer.Printf(format, a...)
}
// Successf prints a message with green colored letters
func (printer *Printer) Successf(format string, a ...interface{}) {
printer.Add(color.FgGreen)
printer.Printf(format, a...)
}
// Close returns the underline output stream Name
func (printer *Printer) Name() string {
return printer.stream.Name()
}
// Close closes the underline output stream
func (printer *Printer) Close() error {
return printer.stream.Close()
}
|
package main
import (
"encoding/json"
"fmt"
"github.com/hyperledger/fabric-contract-api-go/contractapi"
)
// Update changes the value with id in the world state
func (rc *ResourceTypesContract) Update(ctx contractapi.TransactionContextInterface, id string, name string) error {
existing, err := ctx.GetStub().GetState(id)
if err != nil {
return fmt.Errorf("Unable to interact with world state")
}
if existing == nil {
return fmt.Errorf("Cannot update world state pair with id %s. Does not exist", id)
}
var existingResourceType *ResourceType
if err = json.Unmarshal(existing, &existingResourceType); err != nil {
return fmt.Errorf("Unable to unmarshal existing into object")
}
existingResourceType.Name = name
newValue, err := json.Marshal(existingResourceType)
if err != nil {
return fmt.Errorf("Unable to marshal new object")
}
if err = ctx.GetStub().PutState(id, newValue); err != nil {
return fmt.Errorf("Unable to interact with world state")
}
return nil
}
|
package chunkserver
import (
"os"
"testing"
)
func testReadWrite(t *testing.T, s string) {
path := "/tmp/test"
os.Remove(path)
bytes := []byte(s)
WriteDataAt(path, 0, bytes)
got := make([]byte, 100)
n, err := ReadDataAt(path, 0, got)
if err != nil {
t.Error(err)
}
if string(got[:n]) != s {
t.Errorf("read write doesn't match: got %v want %s", string(got), s)
}
}
func testReadWriteHeader(t *testing.T, v, cksum uint64) {
path := "/tmp/test"
os.Remove(path)
h0 := ChunkHeader{
version: v,
checksum: cksum,
}
WriteHeader(path, h0)
h1, err := ReadHeader(path)
if err != nil {
t.Error(err)
}
if h1.version != v || h1.checksum != cksum {
t.Error("read write header failed")
}
}
func TestReadWrite(t *testing.T) {
testReadWrite(t, "hello, world.")
testReadWrite(t, "fine thank you and you?")
testReadWrite(t, "nice to meet you.")
}
func TestReadWriteHeader(t *testing.T) {
testReadWriteHeader(t, 10, 100)
testReadWriteHeader(t, 100, 1000)
testReadWriteHeader(t, 1000, 10000)
}
|
/*
Create a function that takes in an array of grass heights and a variable sequence of lawn mower cuts and outputs the array of successive grass heights.
If after a cut, any single element in the array reaches zero or negative, return "Done" instead of the array of new heights.
A demo:
cuttingGrass([3, 4, 4, 4], 1, 1, 1) ➞ [[2, 3, 3, 3], [1, 2, 2, 2], "Done"]
// 1st cut shaves off 1: [3, 4, 4, 4] ➞ [2, 3, 3, 3]
// 2nd cut shaves off 1: [2, 3, 3, 3] ➞ [1, 2, 2, 2]
// 3rd cut shaves off 1: [1, 2, 2, 2] ➞ [0, 1, 1, 1], but one element reached zero so we return "Done".
Examples
cuttingGrass([5, 6, 7, 5], 1, 2, 1)
➞ [[4, 5, 6, 4], [2, 3, 4, 2], [1, 2, 3, 1]]
cuttingGrass([4, 4, 4, 4], 1, 1, 1, 1)
➞ [[3, 3, 3, 3], [2, 2, 2, 2], [1, 1, 1, 1], "Done"]
cuttingGrass([8, 9, 9, 8, 8], 2, 3, 2, 1)
➞ [[6, 7, 7, 6, 6], [3, 4, 4, 3, 3], [1, 2, 2, 1, 1], "Done"]
cuttingGrass([1, 0, 1, 1], 1, 1, 1) ➞ ["Done", "Done", "Done"]
Notes
The number of lawn cuts is variable.
There will be at least one cut.
Return "Done" onwards for each additional cut if the grass has already been completely mowed (see fourth example).
*/
package main
func main() {
p1 := []interface{}{[]int{2, 3, 3, 3}, []int{1, 2, 2, 2}, "Done"}
p2 := []interface{}{[]int{3, 3, 3, 3}, []int{2, 2, 2, 2}, []int{1, 1, 1, 1}, "Done"}
p3 := []interface{}{[]int{4, 5, 6, 4}, []int{2, 3, 4, 2}, []int{1, 2, 3, 1}}
p4 := []interface{}{[]int{6, 7, 7, 6, 6}, []int{3, 4, 4, 3, 3}, []int{1, 2, 2, 1, 1}, "Done"}
p5 := []interface{}{"Done", "Done", "Done"}
p6 := []interface{}{[]int{2, 3, 2, 3}, []int{1, 2, 1, 2}, "Done"}
p7 := []interface{}{"Done", "Done", "Done"}
asserti(cutgrass([]int{3, 4, 4, 4}, 1, 1, 1), p1)
asserti(cutgrass([]int{4, 4, 4, 4}, 1, 1, 1, 1), p2)
asserti(cutgrass([]int{5, 6, 7, 5}, 1, 2, 1), p3)
asserti(cutgrass([]int{8, 9, 9, 8, 8}, 2, 3, 2, 1), p4)
asserti(cutgrass([]int{1, 0, 1, 1}, 1, 1, 1), p5)
asserti(cutgrass([]int{4, 5, 4, 5}, 2, 1, 1), p6)
asserti(cutgrass([]int{4, 2, 2}, 2, 1, 1), p7)
}
func cutgrass(h []int, c ...int) []interface{} {
p := make([]interface{}, len(c))
r := h
i := 0
loop:
for ; i < len(c); i++ {
q := make([]int, len(h))
for j := range h {
q[j] = r[j] - c[i]
if q[j] <= 0 {
break loop
}
}
p[i], r = q, q
}
for ; i < len(c); i++ {
p[i] = "Done"
}
return p
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func asserti(a, b []interface{}) {
assert(len(a) == len(b))
for i := range a {
x, u := a[i].([]int)
y, v := b[i].([]int)
assert(u == v)
assert((u && eqi(x, y)) || a[i] == b[i])
}
}
func eqi(a, b []int) bool {
if len(a) != len(b) {
return false
}
for i := range a {
if a[i] != b[i] {
return false
}
}
return true
}
|
// Copyright (C) 2018 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package api
import (
"context"
"fmt"
"github.com/google/gapid/gapis/memory"
)
// StateWatcher provides callbacks to track state effects
type StateWatcher interface {
// OnBeginCmd is called at the beginning of each API call
OnBeginCmd(ctx context.Context, cmdID CmdID, cmd Cmd)
// OnEndCmd is called at the end of each API call
OnEndCmd(ctx context.Context, cmdID CmdID, cmd Cmd)
// OnGet is called when a fragment of state (field, map key, array index) is read
OnGet(ctx context.Context, owner Reference, f Fragment, v Reference)
// OnSet is called when a fragment of state (field, map key, array index) is written
OnSet(ctx context.Context, owner Reference, f Fragment, old Reference, new Reference)
// OnWriteSlice is called when writing to a slice
OnWriteSlice(ctx context.Context, s memory.Slice)
// OnReadSlice is called when reading from a slice
OnReadSlice(ctx context.Context, s memory.Slice)
// OnWriteObs is called when a memory write observations become visible
OnWriteObs(ctx context.Context, obs []CmdObservation)
// OnReadObs is called when a memory read observations become visible
OnReadObs(ctx context.Context, obs []CmdObservation)
// OpenForwardDependency is called to begin a forward dependency.
// When `CloseForwardDependency` is called later with the same `dependencyID`,
// a dependency is added from the current command node during the
// `OpenForwardDependency` to the current command node during the
// `CloseForwardDependency` call.
// Each `OpenForwardDependency` call should have at most one matching
// `CloseForwardDependency` call; additional `CloseForwardDependency`
// calls with the same `dependencyID` will **not** result in additional
// forward dependencies.
OpenForwardDependency(ctx context.Context, dependencyID interface{})
// CloseForwardDependency is called to end a forward dependency.
// See `OpenForwardDependency` for an explanation of forward dependencies.
CloseForwardDependency(ctx context.Context, dependencyID interface{})
// DropForwardDependency is called to abandon a previously opened
// forward dependency, without actually adding the forward dependency.
// See `OpenForwardDependency` for an explanation of forward dependencies.
DropForwardDependency(ctx context.Context, dependencyID interface{})
}
// Fragment is an interface which marks types which identify pieces of API objects.
// All of the implementations appear below.
type Fragment interface {
fragment()
}
// FieldFragment is a Fragment identifying a field member of an API object.
// This corresponds to API syntax such as `myObj.fieldName`.
type FieldFragment struct {
Field
}
func (f FieldFragment) Format(s fmt.State, r rune) { fmt.Fprintf(s, ".%s", f.Field.FieldName()) }
func (FieldFragment) fragment() {}
// ArrayIndexFragment is a Fragment identifying an array index.
// This corresponds to syntax such as `myArray[3]`.
type ArrayIndexFragment struct {
Index int
}
func (f ArrayIndexFragment) Format(s fmt.State, r rune) { fmt.Fprintf(s, "[%d]", f.Index) }
func (ArrayIndexFragment) fragment() {}
// MapIndexFragment is a Fragment identifying a map index.
// This corresponds to syntax such as `myMap["foo"]`
type MapIndexFragment struct {
Index interface{}
}
func (f MapIndexFragment) Format(s fmt.State, r rune) { fmt.Fprintf(s, "[%v]", f.Index) }
func (MapIndexFragment) fragment() {}
// CompleteFragment is a Fragment identifying the entire object (all fields),
// map (all key/value pairs) or array (all values).
type CompleteFragment struct{}
func (f CompleteFragment) Format(s fmt.State, r rune) { fmt.Fprintf(s, "[*]") }
func (CompleteFragment) fragment() {}
// Field identifies a field in an API object
type Field interface {
FieldName() string
ClassName() string
}
|
package ratelimiters
import (
"sync"
"time"
"github.com/corverroos/ratelimit"
)
func NewNoopLock(period time.Duration, limit int) *NoopLock {
return &NoopLock{
period: period,
limit: limit,
}
}
type NoopLock struct {
period time.Duration
limit int
mu sync.Mutex
}
func (n *NoopLock) Request(resource string) bool {
n.mu.Lock()
n.mu.Unlock()
return true
}
var _ ratelimit.RateLimiter = (*NoopLock)(nil)
|
package renter
import (
"bytes"
"context"
"time"
"gitlab.com/NebulousLabs/Sia/build"
"gitlab.com/NebulousLabs/Sia/crypto"
"gitlab.com/NebulousLabs/Sia/modules"
"gitlab.com/NebulousLabs/Sia/types"
"gitlab.com/NebulousLabs/errors"
)
// errNotEnoughPieces is returned when there are not enough pieces found to
// successfully complete the download
var errNotEnoughPieces = errors.New("not enough pieces to complete download")
// pieceDownload tracks a worker downloading a piece, whether that piece has
// returned, and what time the piece is/was expected to return.
//
// NOTE: The actual piece data is stored in the projectDownloadChunk after the
// download completes.
type pieceDownload struct {
// 'completed', 'launched', and 'downloadErr' are status variables for the
// piece. If 'launched' is false, it means the piece download has not
// started yet, 'completed' will also be false.
//
// If 'launched' is true and 'completed' is false, it means the download is
// in progress and the result is not known.
//
// If 'completed' is true, the download has been attempted, if it was
// unsuccessful 'downloadErr' will contain the error with which it failed.
// If 'downloadErr' is nil however, it means the piece was successfully
// downloaded.
completed bool
launched bool
downloadErr error
// expectedCompleteTime indicates the time when the download is expected
// to complete. This is used to determine whether or not a download is late.
expectedCompleteTime time.Time
worker *worker
}
// projectDownloadChunk is a bunch of state that helps to orchestrate a download
// from a projectChunkWorkerSet.
//
// The projectDownloadChunk is only ever accessed by a single thread which
// orchestrates the download, which means that it does not need to be thread
// safe.
type projectDownloadChunk struct {
// Parameters for downloading a subset of the data within the chunk.
lengthInChunk uint64
offsetInChunk uint64
// pricePerMS is the amount of money we are willing to spend on faster
// workers. If a certain set of workers is 100ms faster, but that exceeds
// the pricePerMS we are willing to pay for it, we won't use that faster
// worker set. If it is within the budget however, we will favor the faster
// and more expensive worker set.
pricePerMS types.Currency
// Values derived from the chunk download parameters. The offset and length
// specify the offset and length that will be sent to the host, which must
// be segment aligned.
pieceLength uint64
pieceOffset uint64
// availablePieces are pieces that resolved workers think they can fetch.
//
// workersConsideredIndex keeps track of what workers were already
// considered after looking at the 'resolvedWorkers' array defined on the
// pcws. This enables the worker selection code to realize which pieces in
// the worker set have been resolved since the last check.
//
// unresolvedWorkersRemaining is the number of unresolved workers at the
// time the available pieces were last updated. This enables counting the
// hopeful pieces without introducing a race condition in the finished
// check.
availablePieces [][]*pieceDownload
workersConsideredIndex int
unresolvedWorkersRemaining int
// dataPieces is the buffer that is used to place data as it comes back.
// There is one piece per chunk, and pieces can be nil. To know if the
// download is complete, the number of non-nil pieces will be counted.
dataPieces [][]byte
// The completed data gets sent down the response chan once the full
// download is done.
ctx context.Context
downloadResponseChan chan *downloadResponse
workerResponseChan chan *jobReadResponse
workerSet *projectChunkWorkerSet
workerState *pcwsWorkerState
}
// downloadResponse is sent via a channel to the caller of
// 'projectChunkWorkerSet.managedDownload'.
type downloadResponse struct {
data []byte
err error
}
// successful is a small helper method that returns whether the piece was
// successfully downloaded, this is the case when it completed without error.
func (pd *pieceDownload) successful() bool {
return pd.completed && pd.downloadErr == nil
}
// unresolvedWorkers will return the set of unresolved workers from the worker
// state of the pdc. This operation will also update the set of available pieces
// within the pdc to reflect any previously unresolved workers that are now
// available workers.
//
// A channel will also be returned which will be closed when there are new
// unresolved workers available.
func (pdc *projectDownloadChunk) unresolvedWorkers() ([]*pcwsUnresolvedWorker, <-chan struct{}) {
ws := pdc.workerState
ws.mu.Lock()
defer ws.mu.Unlock()
var unresolvedWorkers []*pcwsUnresolvedWorker
for _, uw := range ws.unresolvedWorkers {
unresolvedWorkers = append(unresolvedWorkers, uw)
}
// Add any new resolved workers to the pdc's list of available pieces.
for i := pdc.workersConsideredIndex; i < len(ws.resolvedWorkers); i++ {
// Add the returned worker to available pieces for each piece that the
// resolved worker has.
resp := ws.resolvedWorkers[i]
for _, pieceIndex := range resp.pieceIndices {
pdc.availablePieces[pieceIndex] = append(pdc.availablePieces[pieceIndex], &pieceDownload{
worker: resp.worker,
})
}
}
pdc.workersConsideredIndex = len(ws.resolvedWorkers)
pdc.unresolvedWorkersRemaining = len(ws.unresolvedWorkers)
// If there are more unresolved workers, fetch a channel that will be closed
// when more results from unresolved workers are available.
return unresolvedWorkers, ws.registerForWorkerUpdate()
}
// handleJobReadResponse will take a jobReadResponse from a worker job
// and integrate it into the set of pieces.
func (pdc *projectDownloadChunk) handleJobReadResponse(jrr *jobReadResponse) {
// Prevent a production panic.
if jrr == nil {
pdc.workerSet.staticRenter.log.Critical("received nil job read response in handleJobReadResponse")
return
}
// Figure out which index this read corresponds to.
pieceIndex := 0
for i, root := range pdc.workerSet.staticPieceRoots {
if jrr.staticSectorRoot == root {
pieceIndex = i
break
}
}
// Check whether the job failed.
if jrr.staticErr != nil {
// The download failed, update the pdc available pieces to reflect the
// failure.
pieceFound := false
for i := 0; i < len(pdc.availablePieces[pieceIndex]); i++ {
if pdc.availablePieces[pieceIndex][i].worker.staticHostPubKeyStr == jrr.staticWorker.staticHostPubKeyStr {
if pieceFound {
build.Critical("The list of available pieces contains duplicates.") // sanity check
}
pieceFound = true
pdc.availablePieces[pieceIndex][i].completed = true
pdc.availablePieces[pieceIndex][i].downloadErr = jrr.staticErr
}
}
return
}
// Decrypt the piece that has come back.
//
// TODO: The input to DecryptBytesInPlace needs to accept a block index, if
// we aren't decrypting from the beginning of the chunk this will probably
// fail.
key := pdc.workerSet.staticMasterKey.Derive(pdc.workerSet.staticChunkIndex, uint64(pieceIndex))
_, err := key.DecryptBytesInPlace(jrr.staticData, 0)
if err != nil {
pdc.workerSet.staticRenter.log.Println("decryption of a piece failed")
return
}
// The download succeeded, add the piece to the appropriate index.
pdc.dataPieces[pieceIndex] = jrr.staticData
jrr.staticData = nil // Just in case there's a reference to the job response elsewhere.
pieceFound := false
for i := 0; i < len(pdc.availablePieces[pieceIndex]); i++ {
if pdc.availablePieces[pieceIndex][i].worker.staticHostPubKeyStr == jrr.staticWorker.staticHostPubKeyStr {
if pieceFound {
build.Critical("The list of available pieces contains duplicates.") // sanity check
}
pieceFound = true
pdc.availablePieces[pieceIndex][i].completed = true
}
}
}
// fail will send an error down the download response channel.
func (pdc *projectDownloadChunk) fail(err error) {
dr := &downloadResponse{
data: nil,
err: err,
}
pdc.downloadResponseChan <- dr
}
// finalize will take the completed pieces of the download, decode them,
// and then send the result down the response channel. If there is an error
// during decode, 'pdc.fail()' will be called.
func (pdc *projectDownloadChunk) finalize() {
// Helper variable.
ec := pdc.workerSet.staticErasureCoder
// The chunk download offset and chunk download length are different from
// the requested offset and length because the chunk download offset and
// length are required to be a factor of the segment size of the erasure
// codes.
//
// NOTE: This is one of the places where we assume we are using maximum
// distance separable erasure codes.
chunkDLOffset := pdc.pieceOffset * uint64(ec.MinPieces())
chunkDLLength := pdc.pieceLength * uint64(ec.MinPieces())
// Recover the pieces in to a single byte slice.
buf := bytes.NewBuffer(nil)
err := pdc.workerSet.staticErasureCoder.Recover(pdc.dataPieces, chunkDLOffset+chunkDLLength, buf)
if err != nil {
pdc.fail(errors.AddContext(err, "unable to complete erasure decode of download"))
return
}
data := buf.Bytes()
// The full set of data is recovered, truncate it down to just the pieces of
// data requested by the user and return.
data = data[pdc.offsetInChunk : pdc.offsetInChunk+pdc.lengthInChunk]
// Return the data to the caller.
dr := &downloadResponse{
data: data,
err: nil,
}
pdc.downloadResponseChan <- dr
}
// finished returns true if the download is finished, and returns an error if
// the download is unable to complete.
func (pdc *projectDownloadChunk) finished() (bool, error) {
// Convenience variables.
ec := pdc.workerSet.staticErasureCoder
// Count the number of completed pieces and hopeful pieces in our list of
// potential downloads.
completedPieces := 0
hopefulPieces := 0
for _, piece := range pdc.availablePieces {
// Only count one piece as hopeful per set.
hopeful := false
for _, pieceDownload := range piece {
// If this piece is completed, count it both as hopeful and
// completed, no need to look at other pieces.
if pieceDownload.successful() {
hopeful = true
completedPieces++
break
}
// If this piece has not yet failed, it is hopeful. Keep looking
// through the pieces in case there is a piece that was downloaded
// successfully.
if pieceDownload.downloadErr == nil {
hopeful = true
}
}
if hopeful {
hopefulPieces++
}
}
if completedPieces >= ec.MinPieces() {
return true, nil
}
// Count the number of workers that haven't resolved yet, and thus
// (optimistically) might contribute towards downloading a unique piece.
hopefulPieces += pdc.unresolvedWorkersRemaining
// Ensure that there are enough pieces that could potentially become
// completed to finish the download.
if hopefulPieces < ec.MinPieces() {
return false, errNotEnoughPieces
}
return false, nil
}
// launchWorker will launch a worker and update the corresponding available
// piece.
//
// A time is returned which indicates the expected return time of the worker's
// download. A bool is returned which indicates whether or not the launch was
// successful.
func (pdc *projectDownloadChunk) launchWorker(w *worker, pieceIndex uint64) (time.Time, bool) {
// Sanity check that the pieceOffset and pieceLength are segment aligned.
if pdc.pieceOffset%crypto.SegmentSize != 0 ||
pdc.pieceLength%crypto.SegmentSize != 0 {
build.Critical("pieceOffset or pieceLength is not segment aligned")
}
// Create the read sector job for the worker.
//
// TODO: Ideally we pass the context here so the job is cancellable
// in-flight.
jrs := &jobReadSector{
jobRead: jobRead{
staticResponseChan: pdc.workerResponseChan,
staticLength: pdc.pieceLength,
jobGeneric: newJobGeneric(pdc.ctx, w.staticJobReadQueue, jobReadSectorMetadata{staticSector: pdc.workerSet.staticPieceRoots[pieceIndex]}),
},
staticOffset: pdc.pieceOffset,
staticSector: pdc.workerSet.staticPieceRoots[pieceIndex],
}
// Submit the job.
expectedCompleteTime, added := w.staticJobReadQueue.callAddWithEstimate(jrs)
// Update the status of the piece that was launched. 'launched' should be
// set to 'true'. If the launch failed, 'failed' should be set to 'true'. If
// the launch succeeded, the expected completion time of the job should be
// set.
//
// NOTE: We don't break out of the loop when we find a piece/worker
// match. If all is going well, each worker should appear at most once
// in this piece, but for the sake of defensive programming we check all
// elements anyway.
for _, pieceDownload := range pdc.availablePieces[pieceIndex] {
if w.staticHostPubKeyStr == pieceDownload.worker.staticHostPubKeyStr {
pieceDownload.launched = true
if added {
pieceDownload.expectedCompleteTime = expectedCompleteTime
} else {
pieceDownload.completed = true
pieceDownload.downloadErr = errors.New("unable to add piece to queue")
}
}
}
return expectedCompleteTime, added
}
// threadedCollectAndOverdrivePieces will wait for responses from the workers.
// If workers fail or are late, additional workers will be launched to ensure
// that the download still completes.
func (pdc *projectDownloadChunk) threadedCollectAndOverdrivePieces() {
// Loop until the download has either failed or completed.
for {
// Check whether the download is comlete. An error means that the
// download has failed and can no longer make progress.
completed, err := pdc.finished()
if completed {
pdc.finalize()
return
}
if err != nil {
pdc.fail(err)
return
}
// Run the overdrive code. This code needs to be asynchronous so that it
// does not block receiving on the workerResponseChan. The overdrive
// code will determine whether launching an overdrive worker is
// necessary, and will return a channel that will be closed when enough
// time has elapsed that another overdrive worker should be considered.
workersUpdatedChan, workersLateChan := pdc.tryOverdrive()
// Determine when the next overdrive check needs to run.
select {
case <-pdc.ctx.Done():
pdc.fail(errors.New("download interrupted while waiting for responses"))
return
case jrr := <-pdc.workerResponseChan:
pdc.handleJobReadResponse(jrr)
case <-workersLateChan:
case <-workersUpdatedChan:
}
}
}
// getPieceOffsetAndLen is a helper function to compute the piece offset and
// length of a chunk download, given the erasure coder for the chunk, the offset
// within the chunk, and the length within the chunk.
func getPieceOffsetAndLen(ec modules.ErasureCoder, offset, length uint64) (pieceOffset, pieceLength uint64) {
// Fetch the segment size of the ec.
pieceSegmentSize, partialsSupported := ec.SupportsPartialEncoding()
if !partialsSupported {
// If partials are not supported, the full piece needs to be downloaded.
pieceSegmentSize = modules.SectorSize
}
// Consistency check some of the erasure coder values. If the check fails,
// return that the whole piece must be downloaded.
if pieceSegmentSize == 0 {
build.Critical("pcws has a bad erasure coder")
return 0, modules.SectorSize
}
// Determine the download offset within a single piece. We get this by
// dividing the chunk offset by the number of pieces and then rounding
// down to the nearest segment size.
//
// This is mathematically equivalent to rounding down the chunk size to
// the nearest chunk segment size and then dividing by the number of
// pieces.
pieceOffset = offset / uint64(ec.MinPieces())
pieceOffset = pieceOffset / pieceSegmentSize
pieceOffset = pieceOffset * pieceSegmentSize
// Determine the length that needs to be downloaded. This is done by
// determining the offset that the download needs to reach, and then
// subtracting the pieceOffset from the termination offset.
chunkSegmentSize := pieceSegmentSize * uint64(ec.MinPieces())
chunkTerminationOffset := offset + length
overflow := chunkTerminationOffset % chunkSegmentSize
if overflow != 0 {
chunkTerminationOffset += chunkSegmentSize - overflow
}
pieceTerminationOffset := chunkTerminationOffset / uint64(ec.MinPieces())
pieceLength = pieceTerminationOffset - pieceOffset
return pieceOffset, pieceLength
}
|
package factories
import "github.com/giventocode/azure-blob-md5/internal"
//BlobReader TODO
type BlobReader struct {
readDepth int
az azUtil
blobName string
size int64
}
func newBlobReader(blobName string, size int64, az azUtil) *BlobReader {
return &BlobReader{
readDepth: defaultReadDepth,
blobName: blobName,
size: size,
az: az,
}
}
//Source TODO
func (b *BlobReader) Source() string {
return b.blobName
}
//Size TODO
func (b *BlobReader) Size() int64 {
return b.size
}
func (b *BlobReader) Read() <-chan ReadResponse {
response := make(chan ReadResponse, b.readDepth)
go func() {
defer close(response)
bytesToRead := b.size
count := 8 * internal.MB
var offset int64
for {
if bytesToRead == 0 {
return
}
if bytesToRead < count {
count = bytesToRead
}
data, err := b.az.downloadRange(b.blobName, offset, count)
if err != nil {
response <- ReadResponse{err: err}
return
}
response <- ReadResponse{data: data}
offset = offset + count
bytesToRead = bytesToRead - count
}
}()
return response
}
|
/**
*@Author: haoxiongxiao
*@Date: 2019/2/3
*@Description: CREATE GO FILE controller
*/
package admin
import (
"github.com/kataras/iris"
"math/rand"
"time"
)
type Common struct {
Ctx iris.Context
}
func (this *Common) ReturnJson(status int, message string, args ...interface{}) {
result := make(map[string]interface{})
result["code"] = status
result["message"] = message
key := ""
for _, arg := range args {
switch arg.(type) {
case string:
key = arg.(string)
default:
result[key] = arg
}
}
this.Ctx.JSON(result)
this.Ctx.StopExecution()
return
}
func (this *Common) ReturnSuccess(args ...interface{}) {
result := make(map[string]interface{})
result["code"] = 10000
result["message"] = "success"
key := ""
for _, arg := range args {
switch arg.(type) {
case string:
key = arg.(string)
default:
result[key] = arg
}
}
this.Ctx.JSON(result)
this.Ctx.StopExecution()
return
}
/*KC_RAND_KIND_NUM = 0 // 纯数字
KC_RAND_KIND_LOWER = 1 // 小写字母
KC_RAND_KIND_UPPER = 2 // 大写字母
KC_RAND_KIND_ALL = 3 // 数字、大小写字母
*/
func (this *Common) Krand(size int, kind int) string {
ikind, kinds, result := kind, [][]int{[]int{10, 48}, []int{26, 97}, []int{26, 65}}, make([]byte, size)
is_all := kind > 2 || kind < 0
rand.Seed(time.Now().UnixNano())
for i := 0; i < size; i++ {
if is_all { // random ikind
ikind = rand.Intn(3)
}
scope, base := kinds[ikind][0], kinds[ikind][1]
result[i] = uint8(base + rand.Intn(scope))
}
return string(result)
}
|
package alicloud
import (
"github.com/hashicorp/terraform/helper/resource"
"testing"
)
func TestAccAlicloudDnsDomainsDataSource_ali_domain(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() {
testAccPreCheck(t)
},
Providers: testAccProviders,
Steps: []resource.TestStep{
{
Config: testAccCheckAlicloudDomainsDataSourceAliDomainConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckAlicloudDataSourceID("data.alicloud_dns_domains.domain"),
resource.TestCheckResourceAttr("data.alicloud_dns_domains.domain", "domains.#", "1"),
resource.TestCheckResourceAttr("data.alicloud_dns_domains.domain", "domains.0.domain_id", "6f1a920c-c4a0-4231-98ea-7c4e9a89218a"),
resource.TestCheckResourceAttr("data.alicloud_dns_domains.domain", "domains.0.domain_name", "heguimin.top"),
resource.TestCheckResourceAttr("data.alicloud_dns_domains.domain", "domains.0.version_code", "mianfei"),
resource.TestCheckResourceAttr("data.alicloud_dns_domains.domain", "domains.0.group_name", "newfish"),
resource.TestCheckResourceAttr("data.alicloud_dns_domains.domain", "domains.0.group_id", "85ab8713-4a30-4de4-9d20-155ff830f651"),
resource.TestCheckResourceAttr("data.alicloud_dns_domains.domain", "domains.0.puny_code", "heguimin.top"),
),
},
},
})
}
func TestAccAlicloudDnsDomainsDataSource_version_code(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() {
testAccPreCheck(t)
},
Providers: testAccProviders,
Steps: []resource.TestStep{
{
Config: testAccCheckAlicloudDomainsDataSourceVersionCodeConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckAlicloudDataSourceID("data.alicloud_dns_domains.domain"),
resource.TestCheckResourceAttr("data.alicloud_dns_domains.domain", "domains.#", "2"),
),
},
},
})
}
func TestAccAlicloudDnsDomainsDataSource_name_regex(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() {
testAccPreCheck(t)
},
Providers: testAccProviders,
Steps: []resource.TestStep{
{
Config: testAccCheckAlicloudDomainsDataSourceNameRegexConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckAlicloudDataSourceID("data.alicloud_dns_domains.domain"),
resource.TestCheckResourceAttr("data.alicloud_dns_domains.domain", "domains.#", "1"),
),
},
},
})
}
func TestAccAlicloudDnsDomainsDataSource_group_name_regex(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() {
testAccPreCheck(t)
},
Providers: testAccProviders,
Steps: []resource.TestStep{
{
Config: testAccCheckAlicloudDomainsDataSourceGroupNameRegexConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckAlicloudDataSourceID("data.alicloud_dns_domains.domain"),
resource.TestCheckResourceAttr("data.alicloud_dns_domains.domain", "domains.#", "1"),
),
},
},
})
}
const testAccCheckAlicloudDomainsDataSourceAliDomainConfig = `
data "alicloud_dns_domains" "domain" {
ali_domain = true
}`
const testAccCheckAlicloudDomainsDataSourceVersionCodeConfig = `
data "alicloud_dns_domains" "domain" {
version_code = "mianfei"
}`
const testAccCheckAlicloudDomainsDataSourceNameRegexConfig = `
data "alicloud_dns_domains" "domain" {
domain_name_regex = "^hegui"
}`
const testAccCheckAlicloudDomainsDataSourceGroupNameRegexConfig = `
data "alicloud_dns_domains" "domain" {
group_name_regex = ".*"
}`
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.