text stringlengths 11 4.05M |
|---|
package remove
import "store"
func RemoveEmployeesFromList(ids []int, employees *([]store.Employee)){
for _, id := range ids{
for i:=0; i < len(*employees); i++{
if ((*employees)[i]).GetID() == id{
((*employees)[i]).There = false
}
}
}
}
func RemoveEmployeesFromIdEmpMap(ids []int, idEmpMap *map[int](store.Employee)){
for _, id := range ids{
for mapId:= range *idEmpMap{
if mapId == id{
newEmpl := (*idEmpMap)[mapId]
newEmpl.There = false
(*idEmpMap)[mapId] = newEmpl
}
}
}
}
func RemoveEmployeesFromDeptEmpMap(ids []int, deptEmpMap *map[string]*([](store.Employee))){
for _, id := range ids{
for dept:= range *deptEmpMap{
for i:= 0; i < len(*((*deptEmpMap)[dept])); i++{
if (*((*deptEmpMap)[dept]))[i].GetID() == id{
(*((*deptEmpMap)[dept]))[i].There = false
}
}
}
}
}
func RemoveEmployeesFromLocEmpMap(ids []int, locEmpMap *map[int]*([]store.Employee)){
for _, id := range ids{
for pin:= range *locEmpMap{
for i:= 0; i < len(*((*locEmpMap)[pin])); i++{
if (*((*locEmpMap)[pin]))[i].GetID() == id{
(*((*locEmpMap)[pin]))[i].There = false
}
}
}
}
}
|
package main
type department struct {
jsonobj map[string]interface{}
instructors map[string]bool
courses map[string]bool
rooms map[string]bool
}
func (dep *department) init(obj map[string]interface{}) {
dep.jsonobj = obj
dep.instructors = map[string]bool{}
dep.courses = map[string]bool{}
dep.rooms = map[string]bool{}
dbs, ok := obj["db"].(map[string]interface{})
if ok {
inst, ok := dbs["instructors"].([]interface{})
if ok {
for _, obj := range inst {
dep.instructors[obj.(string)] = true
}
}
courses, ok := dbs["courses"].([]interface{})
if ok {
for _, obj := range courses {
dep.courses[obj.(string)] = true
}
}
rooms, ok := dbs["rooms"].([]interface{})
if ok {
for _, obj := range rooms {
dep.rooms[obj.(string)] = true
}
}
}
}
func (dep *department) containsInstructor(id string) bool {
c, ok := dep.instructors[id]
return ok && c
}
func (dep *department) containsCourse(id string) bool {
c, ok := dep.courses[id]
return ok && c
}
func (dep *department) containsRoom(id string) bool {
c, ok := dep.rooms[id]
return ok && c
}
|
package destinationrule
import (
"encoding/json"
"errors"
"fmt"
"github.com/gogo/protobuf/jsonpb"
istio "istio.io/api/networking/v1alpha3"
"k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/client-go/dynamic"
)
type destinationRuleValidator struct {
destinationRule *unstructured.Unstructured
spec *istio.DestinationRule
}
func mapToSpec(m map[string]interface{}) (*istio.DestinationRule, error) {
var data []byte
var err error
if data, err = json.Marshal(m); err != nil {
return nil, err
}
destinationrule := &istio.DestinationRule{}
if err = jsonpb.UnmarshalString(string(data), destinationrule); err != nil {
return nil, err
}
return destinationrule, nil
}
func (v *destinationRuleValidator) setup() error {
var spec *istio.DestinationRule
var err error
if spec, err = mapToSpec(v.destinationRule.Object["spec"].(map[string]interface{})); err != nil {
return err
}
v.spec = spec
return nil
}
func (v *destinationRuleValidator) validateKind() error {
if v.destinationRule.GetKind() != Kind {
return fmt.Errorf("kind must be %s", Kind)
}
return nil
}
func (drv *destinationRuleValidator) orderedFuncs() []func() error {
return []func() error{
drv.setup,
drv.validateKind,
}
}
func (v *destinationRuleValidator) validate() error {
for _, f := range v.orderedFuncs() {
if err := f(); err != nil {
return err
}
}
return nil
}
func validateUpdate(namespace string, name string, destinationrule *unstructured.Unstructured) error {
if destinationrule.GetNamespace() != namespace {
return errors.New("resource namespace not equal to path namespace")
}
validator := destinationRuleValidator{destinationRule: destinationrule}
return validator.validate()
}
func setUpdateDefaultValues(namespace string, destinationrule *unstructured.Unstructured) {
if destinationrule.GetNamespace() == "" {
destinationrule.SetNamespace(namespace)
}
}
func UpdateDestinationRule(dyclient dynamic.NamespaceableResourceInterface, namespace string, name string, destinationrule *unstructured.Unstructured) (*unstructured.Unstructured, error) {
setUpdateDefaultValues(namespace, destinationrule)
if err := validateUpdate(namespace, name, destinationrule); err != nil {
return nil, err
}
return dyclient.Namespace(namespace).Update(destinationrule, v1.UpdateOptions{})
}
func DeleteDestinationRule(dyclient dynamic.NamespaceableResourceInterface, namespace string, name string) error {
return dyclient.Namespace(namespace).Delete(name, &v1.DeleteOptions{})
}
|
package controllers
import (
// "fmt"
"openvpn/models"
"github.com/astaxie/beego"
)
type UpdateallController struct {
beego.Controller
}
func (this *UpdateallController) Get() {
//检测登录
if !checkAccount(this.Ctx) {
this.Redirect("/login", 302)
return
}
var err error
err = models.UpdateAllUser()
if err != nil {
beego.Error(err)
}
this.Redirect("/", 302)
return
// this.Data["User"], err = models.GetAllUser()
// if err != nil {
// beego.Error(err)
// }
// this.TplName = "index.html"
}
|
package gbt36104
import (
"testing"
"github.com/stretchr/testify/assert"
"gorm.io/driver/sqlite"
"gorm.io/gorm"
)
func TestGormGen(t *testing.T) {
db, err := gorm.Open(sqlite.Open("test.db"), &gorm.Config{
DisableForeignKeyConstraintWhenMigrating: true,
})
assert.NoError(t, err)
assert.NoError(t, db.AutoMigrate(Organization{}))
}
|
package runner
import (
"fmt"
"log"
"os"
"os/exec"
"path/filepath"
"sort"
"strconv"
"strings"
"time"
)
func Run(database string, bin string, dir string, backup int, verbose int) error {
var log = func(text string) {
// fmt.Sprintf("verbose:%d", verbose)
// log.
if verbose > 0 {
// log.DefaultLogger.Println(text)
log.Println(text)
}
}
var logArr = func(text string, arr []string) {
log(text)
for _, v := range arr {
log(fmt.Sprintf(" %s", v))
}
}
var err error
log("任务开始")
cmd := fmt.Sprintf("su - postgres -c \"%s -c %s -p %d \"| gzip > %s/%s__%s.gz",
bin, // bin路劲
database, // 数据库
5432, // 端口号
dir, // 备份目录
database, // 数据库,
time.Now().Format("20060102150405"),
)
log(fmt.Sprintf("cmd:%s", cmd))
c := exec.Command("bash", "-c", cmd)
output, err := c.CombinedOutput()
if err != nil {
return err
}
log(fmt.Sprintf("结果:%s", output))
glob_pattern := fmt.Sprintf("%s/%s__*.gz", dir, database)
log(fmt.Sprintf("glob_pattern:%s", glob_pattern))
matches, err := filepath.Glob(glob_pattern)
if err != nil {
return err
}
// log.
logArr("文件目录", matches)
// 获取路径的key 用于路径排序
var ffileKey = func(filePath string) (int64, error) {
nameArr := strings.Split(filePath, "/")
name := nameArr[len(nameArr)-1]
pieces := strings.Split(name, ".")
withoutExt := pieces[0]
PrefixPieces := strings.Split(withoutExt, "__")
if len(PrefixPieces) < 2 {
return int64(0), nil
}
return strconv.ParseInt(PrefixPieces[1], 10, 64)
}
sort.Slice(matches, func(i, j int) bool {
key1, _ := ffileKey(matches[i])
key2, _ := ffileKey(matches[j])
return key1 > key2
})
logArr("排序文件目录", matches)
len_matches := len(matches)
log(fmt.Sprintf("文件数目:%d, 最大备份数目:%d", len_matches, backup))
if len_matches > backup {
remove_file_list := matches[backup:]
logArr("要移除的文件列表", remove_file_list)
for _, v := range remove_file_list {
err = os.Remove(v)
log(fmt.Sprintf("移除路劲:%s", v))
if err != nil {
return err
}
}
}
return nil
}
|
package db
import (
"fmt"
"time"
)
type PostgreSQLConfig struct {
Server string `envconfig:"server"`
Port string `envconfig:"port"`
User string `envconfig:"user"`
Password string `envconfig:"password"`
DatabaseName string `envconfig:"db_name"`
DialTimeout string `envconfig:"dial_timeout" default:"2s"`
IOTimeout string `envconfig:"io_timeout" default:"5s"`
MaxOpenConns int `envconfig:"maxopenconns"`
MaxIdleConns int `envconfig:"maxidleconns" default:"2"`
Driver string `envconfig:"driver" default:"mysql"`
ConnMaxLifetime time.Duration `envconfig:"connmaxlifetime" default:"10m"`
MigrationsTable string `envconfig:"migrations_table" default:"migrations"`
}
func (cfg *PostgreSQLConfig) GetDsn() string {
return fmt.Sprintf("port=%s host=%s user=%s password=%s database=%s",
cfg.Port,
cfg.Server,
cfg.User,
cfg.Password,
cfg.DatabaseName,
)
}
|
package rate
import (
"math/rand"
)
type RateValue struct {
Rate float64
Value interface{}
}
type Rate struct {
MaxRate float64
RateValues []RateValue
RandFunc func() float64 // return number in [0.0,1.0)
}
func NewRate() *Rate {
return &Rate{}
}
func (r *Rate) Add(rate float64, value interface{}) {
r.MaxRate += rate
r.RateValues = append(r.RateValues, RateValue{rate, value})
}
func (r *Rate) Generate() interface{} {
if r.RandFunc == nil {
r.RandFunc = rand.Float64
}
index := r.RandFunc() * r.MaxRate
var cursor float64
for _, rv := range r.RateValues {
cursor += rv.Rate
if index < cursor {
return rv.Value
}
}
return nil // unreachable code
}
|
package main
import (
"encoding/base64"
"encoding/json"
"errors"
"flag"
"fmt"
"html/template"
"io/ioutil"
"log"
"net/http"
"path/filepath"
"strconv"
"strings"
"github.com/gorilla/securecookie"
"github.com/gorilla/sessions"
)
var configFile = flag.String("config", "./config.json", "config file")
var store = sessions.NewCookieStore(securecookie.GenerateRandomKey(32))
var templates map[string]*template.Template
var loginBytes, _ = ioutil.ReadFile("./templates/login.html")
var loginTemp, _ = template.New("login").Parse(string(loginBytes))
// Load templates on program initialisation
func init() {
//https: //elithrar.github.io/article/approximating-html-template-inheritance/
if templates == nil {
templates = make(map[string]*template.Template)
}
templatesDir := "./templates/"
//pages to show indeed
bases, err := filepath.Glob(templatesDir + "bases/*.html")
if err != nil {
log.Fatal(err)
}
//widgts, header, footer, sidebar, etc.
includes, err := filepath.Glob(templatesDir + "includes/*.html")
if err != nil {
log.Fatal(err)
}
// Generate our templates map from our bases/ and includes/ directories
for _, base := range bases {
files := append(includes, base)
templates[filepath.Base(base)] = template.Must(template.ParseFiles(files...))
}
}
func renderTemplate(w http.ResponseWriter, name string, data interface{}) error {
// Ensure the template exists in the map.
tmpl, ok := templates[name]
if !ok {
return fmt.Errorf("The template %s does not exist.", name)
}
w.Header().Set("Content-Type", "text/html; charset=utf-8")
return tmpl.ExecuteTemplate(w, name, data)
}
func main() {
flag.Parse()
loadConfig()
http.HandleFunc("/logout", func(rw http.ResponseWriter, req *http.Request) {
session, _ := store.Get(req, "gosessionid")
session.Options = &sessions.Options{MaxAge: -1, Path: "/"}
session.Save(req, rw)
http.Redirect(rw, req, "/", http.StatusFound)
})
http.HandleFunc("/", authWrapper(recoverWrapper(indexHandler)))
http.HandleFunc("/login", func(w http.ResponseWriter, r *http.Request) {
var errMsg = ""
if r.Method == http.MethodPost {
username := r.FormValue("username")
password := r.FormValue("password")
if username == serverConfig.User && password == serverConfig.Password {
session, _ := store.Get(r, "gosessionid")
session.Values["userLogin"] = username
session.Save(r, w)
http.Redirect(w, r, "/services", http.StatusFound)
return
}
errMsg = "username or password is not correct"
loginTemp.ExecuteTemplate(w, "login", errMsg)
}
if r.Method == http.MethodGet {
loginTemp.ExecuteTemplate(w, "login", nil)
}
})
http.HandleFunc("/services", authWrapper(recoverWrapper(servicesHandler)))
http.HandleFunc("/s/deactivate/", authWrapper(recoverWrapper(deactivateHandler)))
http.HandleFunc("/s/activate/", authWrapper(recoverWrapper(activateHandler)))
http.HandleFunc("/s/m/", authWrapper(recoverWrapper(modifyHandler)))
http.HandleFunc("/registry", authWrapper(recoverWrapper(registryHandler)))
fs := http.FileServer(http.Dir("web"))
http.Handle("/static/", http.StripPrefix("/static/", fs))
http.ListenAndServe(serverConfig.Host+":"+strconv.Itoa(serverConfig.Port), nil)
}
func authWrapper(h func(w http.ResponseWriter, r *http.Request)) func(w http.ResponseWriter, r *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
session, _ := store.Get(r, "gosessionid")
username := session.Values["userLogin"]
if username != nil {
h(w, r)
} else {
http.Redirect(w, r, "/login", http.StatusFound)
}
}
}
func recoverWrapper(h func(w http.ResponseWriter, r *http.Request)) func(w http.ResponseWriter, r *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
defer func() {
if re := recover(); re != nil {
var err error
fmt.Println("Recovered in registryHandler", re)
switch t := re.(type) {
case string:
err = errors.New(t)
case error:
err = t
default:
err = errors.New("Unknown error")
}
w.WriteHeader(http.StatusOK)
renderTemplate(w, "error.html", err.Error())
}
}()
h(w, r)
}
}
func indexHandler(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, "/services", http.StatusFound)
}
func servicesHandler(w http.ResponseWriter, r *http.Request) {
data := make(map[string]interface{})
data["services"] = reg.fetchServices()
renderTemplate(w, r.URL.Path[1:]+".html", data)
}
func deactivateHandler(w http.ResponseWriter, r *http.Request) {
i := strings.LastIndex(r.URL.Path, "/")
base64ID := r.URL.Path[i+1:]
if b, err := base64.StdEncoding.DecodeString(base64ID); err == nil {
s := string(b)
j := strings.Index(s, "@")
name := s[0:j]
address := s[j+1:]
reg.deactivateService(name, address)
}
http.Redirect(w, r, "/services", http.StatusFound)
}
func activateHandler(w http.ResponseWriter, r *http.Request) {
i := strings.LastIndex(r.URL.Path, "/")
base64ID := r.URL.Path[i+1:]
if b, err := base64.StdEncoding.DecodeString(base64ID); err == nil {
s := string(b)
j := strings.Index(s, "@")
name := s[0:j]
address := s[j+1:]
reg.activateService(name, address)
}
http.Redirect(w, r, "/services", http.StatusFound)
}
func modifyHandler(w http.ResponseWriter, r *http.Request) {
metadata := r.URL.Query()
i := strings.LastIndex(r.URL.Path, "/")
base64ID := r.URL.Path[i+1:]
if b, err := base64.StdEncoding.DecodeString(base64ID); err == nil {
s := string(b)
j := strings.Index(s, "@")
name := s[0:j]
address := s[j+1:]
reg.updateMetadata(name, address, metadata.Encode())
}
http.Redirect(w, r, "/services", http.StatusFound)
}
func registryHandler(w http.ResponseWriter, r *http.Request) {
oldConfig := serverConfig
defer func() {
if re := recover(); re != nil {
bytes, err := json.MarshalIndent(&oldConfig, "", "\t")
if err == nil {
err = ioutil.WriteFile("./config.json", bytes, 0644)
loadConfig()
}
panic(re)
}
}()
if r.Method == "POST" {
registryType := r.FormValue("registry_type")
registryURL := r.FormValue("registry_url")
basePath := r.FormValue("base_path")
serverConfig.RegistryType = registryType
serverConfig.RegistryURL = registryURL
serverConfig.ServiceBaseURL = basePath
bytes, err := json.MarshalIndent(&serverConfig, "", "\t")
if err == nil {
err = ioutil.WriteFile("./config.json", bytes, 0644)
loadConfig()
}
}
renderTemplate(w, r.URL.Path[1:]+".html", serverConfig)
}
type Registry interface {
initRegistry()
fetchServices() []*Service
deactivateService(name, address string) error
activateService(name, address string) error
updateMetadata(name, address string, metadata string) error
}
// Service is a service endpoint
type Service struct {
ID string
Name string
Address string
Metadata string
State string
Group string
}
|
package test
import (
"bytes"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
"strings"
"testing"
"github.com/stretchr/testify/assert"
)
func NewRequest(
method, url string,
ops ...func(*http.Request)) *http.Request {
req, _ := http.NewRequest(method, url, nil)
for _, op := range ops {
op(req)
}
return req
}
func AddRequestHeader(name, value string) func(*http.Request) {
return func(r *http.Request) {
r.Header.Add(name, value)
}
}
func SetRemoteAddr(addr string) func(*http.Request) {
return func(r *http.Request) {
r.RemoteAddr = addr
}
}
func SetHost(host string) func(*http.Request) {
return func(r *http.Request) {
r.Host = host
}
}
func NewResponse(ops ...func(*http.Response)) *http.Response {
res := &http.Response{
Status: http.StatusText(http.StatusOK),
StatusCode: http.StatusOK,
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
Close: false,
Header: make(map[string][]string),
}
for _, op := range ops {
op(res)
}
return res
}
func AddResponseHeader(name, value string) func(*http.Response) {
return func(r *http.Response) {
r.Header.Add(name, value)
}
}
func SetResponseStatusCode(code int) func(*http.Response) {
return func(r *http.Response) {
r.StatusCode = code
}
}
func SetResponseBody(body string) func(*http.Response) {
return func(r *http.Response) {
r.Body = ioutil.NopCloser(bytes.NewBufferString(body))
}
}
func ParseURL(u string) *url.URL {
parsed, err := url.Parse(u)
if err != nil {
log.Fatalf("cannot parse url: %s: %v", u, err)
}
return parsed
}
func AssertHeaderEqual(t *testing.T, expected http.Header, h http.Header) {
for k, v := range expected {
found := false
for kk, vv := range h {
if k == kk {
found = true
assert.ElementsMatch(t, v, vv,
fmt.Sprintf("Header mismatch '%s': '%s' instead of '%s'", k,
strings.Join(vv, ", "),
strings.Join(v, ", ")))
}
}
assert.Equal(t, true, found, fmt.Sprintf("Header not found '%s'", k))
}
for k, v := range h {
found := false
for kk, vv := range expected {
if k == kk {
found = true
assert.ElementsMatch(t, vv, v,
fmt.Sprintf("Header mismatch '%s': '%s' instead of '%s'", k,
strings.Join(vv, ", "),
strings.Join(v, ", ")))
}
}
assert.Equal(t, true, found, fmt.Sprintf("Header not found '%s'", k))
}
}
|
package clusterpipelinetemplate
import (
"log"
devopsv1alpha1 "alauda.io/devops-apiserver/pkg/apis/devops/v1alpha1"
devopsclient "alauda.io/devops-apiserver/pkg/client/clientset/versioned"
"alauda.io/diablo/src/backend/api"
"alauda.io/diablo/src/backend/errors"
"alauda.io/diablo/src/backend/resource/dataselect"
)
// ClusterPipelineTemplateList contains a list of ClusterPipelineTemplate
type ClusterPipelineTemplateList struct {
ListMeta api.ListMeta `json:"listMeta"`
Items []ClusterPipelineTemplate `json:"clusterpipelinetemplates"`
Errors []error `json:"errors"`
}
// ClusterPipelineTemplate is persentation layer view of Kubernetes resources
type ClusterPipelineTemplate struct {
ObjectMeta api.ObjectMeta `json:"metadata"`
api.TypeMeta `json:",inline"`
Spec devopsv1alpha1.PipelineTemplateSpec `json:"spec"`
}
// GetClusterPipelineTemplateList get all ClusterPipelineTemplate in a namespace
func GetClusterPipelineTemplateList(client devopsclient.Interface, namespace string, dsQuery *dataselect.DataSelectQuery) (*ClusterPipelineTemplateList, error) {
originList, err := client.DevopsV1alpha1().ClusterPipelineTemplates().List(api.ListEverything)
if err != nil {
log.Println("error when listing ClusterPipelineTemplate", err)
}
nonCriticalErrors, criticalError := errors.HandleError(err)
if criticalError != nil {
return nil, criticalError
}
return toList(originList.Items, nonCriticalErrors, dsQuery), nil
}
// GetClusterPipelineTemplate get specific ClusterPipelineTemplaet by condition
func GetClusterPipelineTemplate(client devopsclient.Interface, namespace string, name string) (*ClusterPipelineTemplate, error) {
origin, err := client.DevopsV1alpha1().ClusterPipelineTemplates().Get(name, api.GetOptionsInCache)
if err != nil {
log.Println("error when get clusterpipelinetemplate", err)
}
_, criticalError := errors.HandleError(err)
if criticalError != nil {
return nil, err
}
result := toClusterPipelineTemplate(origin)
return &result, nil
}
func toList(originList []devopsv1alpha1.ClusterPipelineTemplate, nonCriticalErrors []error, dsQuery *dataselect.DataSelectQuery) *ClusterPipelineTemplateList {
list := ClusterPipelineTemplateList{
Items: make([]ClusterPipelineTemplate, 0),
ListMeta: api.ListMeta{TotalItems: len(originList)},
}
filteredCells, filteredTotal := dataselect.GenericDataSelectWithFilter(toCells(originList), dsQuery)
items := fromCells(filteredCells)
list.ListMeta = api.ListMeta{TotalItems: filteredTotal}
list.Errors = nonCriticalErrors
for _, origin := range items {
list.Items = append(list.Items, toClusterPipelineTemplate(&origin))
}
return &list
}
func toClusterPipelineTemplate(origin *devopsv1alpha1.ClusterPipelineTemplate) ClusterPipelineTemplate {
return ClusterPipelineTemplate{
ObjectMeta: api.NewObjectMeta(origin.ObjectMeta),
TypeMeta: api.NewTypeMeta(api.ResourceKindClusterPipelineTemplate),
Spec: origin.Spec,
}
}
|
package main
import (
"fmt"
"math/rand"
"sync"
"time"
"../RateLimiter/client"
"../RateLimiter/models"
)
func main() {
/*r, err := client.NewThrottleRateLimiter(
&models.Config{
Throttle: 1 * time.Second,
})*/
/*r, err := client.NewMaxConcurrencyLimiter(&models.Config{
Limit: 2,
TokenResetAfter: 10 * time.Second,
})*/
r, err := client.NewFixedWindowRateLimiter(&models.Config{
Limit: 5,
FixedInterval: 15 * time.Second,
})
if err != nil {
panic(err)
}
var wg sync.WaitGroup
rand.Seed(time.Now().UnixNano())
doWork := func(id int) {
defer wg.Done()
token, err := r.Acquire()
if err != nil {
panic(err)
}
fmt.Printf("rate limit token is %s and acquired at %s", token.ID, token.CreatedAt)
n := rand.Intn(5)
fmt.Printf("Worke %d sleeping for %d\n", id, n)
time.Sleep(time.Duration(n) * time.Second)
fmt.Printf("Woker %d done\n", id)
r.Release(token)
}
for i := 0; i < 10; i++ {
wg.Add(1)
go doWork(i)
}
wg.Wait()
}
|
package impl
import "github.com/t-yuki/panick/internal"
func init() {
iface.GetPanic["go1.8"] = GetPanic
}
func GetPanic() iface.Panic {
if p := getPanic(); p != uintptr(0) {
return &Panic{p: p}
}
return nil
}
type Panic struct {
p uintptr
}
func (p Panic) Recovered() bool {
return panicRecovered(p.p)
}
func (p Panic) Aborted() bool {
return panicAborted(p.p)
}
func (p Panic) Arg() interface{} {
return panicArg(p.p)
}
func (p Panic) Link() iface.Panic {
p2 := panicLink(p.p)
if p2 != uintptr(0) {
return &Panic{p: p2}
}
return nil
}
func getPanic() uintptr
func panicRecovered(p uintptr) bool
func panicAborted(p uintptr) bool
func panicLink(p uintptr) uintptr
func panicArg(p uintptr) interface{}
|
/*
This file is a modified version of 1 file in Deepak Jois' golang usbdrivedetector
Big thank you to him, you can view his original project here https://github.com/deepakjois/gousbdrivedetector
*/
package usbdrivedetector
import (
"bufio"
"bytes"
"log"
"os/exec"
"regexp"
"strings"
)
// Detect returns a list of file paths pointing to the root folder of
// kindle USB storage devices connected to the system.
func Detect() map[string]string {
//drives := make(map[string]string)
driveMap := make(map[string]string)
dfPattern := regexp.MustCompile("^(\\/[^ ]+)[^%]+%[ ]+(.+)$")
cmd := "df"
out, err := exec.Command(cmd).Output()
if err != nil {
log.Printf("Error calling df: %s", err)
}
s := bufio.NewScanner(bytes.NewReader(out))
for s.Scan() {
line := s.Text()
if dfPattern.MatchString(line) {
device := dfPattern.FindStringSubmatch(line)[1]
rootPath := dfPattern.FindStringSubmatch(line)[2]
if ok := isUSBStorage(device); ok {
driveMap[rootPath] = getShortID(device)
}
}
}
return driveMap
}
func isUSBStorage(device string) bool {
deviceVerifier := "ID_USB_DRIVER=usb-storage"
cmd := "udevadm"
args := []string{"info", "-q", "property", "-n", device}
out, err := exec.Command(cmd, args...).Output()
if err != nil {
if device != "/dev/root" {
// Don't log when checking /dev/root. Always an error unless running as root
log.Printf("Error checking device %s: %s", device, err)
}
return false
}
if strings.Contains(string(out), deviceVerifier) {
return true
}
return false
}
func getShortID(device string) string {
idString := "ID_SERIAL_SHORT="
cmd := "udevadm"
args := []string{"info", "-q", "property", "-n", device}
out, err := exec.Command(cmd, args...).Output()
if err != nil {
log.Printf("Error checking device %s: %s", device, err)
return ""
}
parameters := strings.Split(string(out), "\n")
for _, parameter := range parameters {
if strings.Contains(parameter, idString) {
return strings.TrimPrefix(parameter, idString)
}
}
return ""
}
|
package main
import (
"encoding/json"
"fmt"
"github.com/gen2brain/beeep"
"github.com/tardisgo/tardisgo/goroot/haxe/go1.4/src/strconv"
"io/ioutil"
"net/http"
"os"
"time"
)
type CovidData struct {
Centers []struct {
CenterID int `json:"center_id"`
Name string `json:"name"`
StateName string `json:"state_name"`
DistrictName string `json:"district_name"`
BlockName string `json:"block_name"`
Pincode int `json:"pincode"`
Lat int `json:"lat"`
Long int `json:"long"`
From string `json:"from"`
To string `json:"to"`
FeeType string `json:"fee_type"`
Sessions []struct {
SessionID string `json:"session_id"`
Date string `json:"date"`
AvailableCapacity float32 `json:"available_capacity"`
MinAgeLimit int `json:"min_age_limit"`
Vaccine string `json:"vaccine"`
Slots []string `json:"slots"`
} `json:"sessions"`
VaccineFees []struct {
Vaccine string `json:"vaccine"`
Fee string `json:"fee"`
} `json:"vaccine_fees,omitempty"`
} `json:"centers"`
}
func main() {
queryType := os.Args[1]
identifierID := os.Args[2]
frequency := os.Args[3]
daysToSearch := ""
if len(os.Args) > 4 {
daysToSearch = os.Args[4]
}
usingPin := false
usingDistrictID := false
pin := ""
did := ""
var daySpan int
var frequencyInMinutes time.Duration
daySpan = 0
frequencyInMinutes = 1
if queryType == "pin" {
usingPin = true
} else if queryType == "did" {
usingDistrictID = true
} else {
panic("provide either `pin` or `did`")
}
if len(identifierID) == 0 {
panic("provide appropriate `pin` or `did`")
} else if queryType == "pin" {
pin = identifierID
} else {
did = identifierID
}
if len(frequency) > 0 {
parseInt, err := strconv.ParseInt(frequency, 10, 64)
if err != nil {
panic("wrong format of frequency")
}
frequencyInMinutes = time.Duration(parseInt)
}
if len(daysToSearch) > 0 {
days, err := strconv.Atoi(daysToSearch)
if err != nil {
panic("wrong format of day span to search")
}
daySpan = days
}
loc, _ := time.LoadLocation("Asia/Calcutta")
fmt.Print("\033[H\033[2J")
for usingPin {
callCowinUsingPin(pin, GetDate(loc,0 ))
for i := 1; i < daySpan; i++ {
callCowinUsingPin(pin, GetDate(loc, i))
}
fmt.Print("\033[H\033[2J")
time.Sleep(frequencyInMinutes * time.Minute)
}
for usingDistrictID {
callCowinUsingDid(did, GetDate(loc,0 ))
for i := 1; i < daySpan; i++ {
callCowinUsingDid(did, GetDate(loc, i))
}
time.Sleep(frequencyInMinutes * time.Minute)
}
}
func GetDate(loc *time.Location, offset int) string {
istNow := time.Now().AddDate(0,0,1*offset).In(loc)
year, month, day := istNow.Date()
todayString := fmt.Sprintf("%02d-%02d-%d", day, month, year)
return todayString
}
func callCowinUsingPin(pin string, date string) {
fmt.Printf("Results for %s\n", date)
url := "https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/calendarByPin?pincode=" + pin + "&date=" + date
callCowin(url)
}
func callCowinUsingDid(did string, date string) {
fmt.Printf("Results for %s\n", date)
url := "https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/calendarByDistrict?district_id=" + did + "&date=" + date
callCowin(url)
}
func callCowin(url string) {
method := "GET"
client := &http.Client{
}
req, err := http.NewRequest(method, url, nil)
if err != nil {
fmt.Println(err)
return
}
req.Header.Add("accept", "application/json, text/plain, */*")
res, err := client.Do(req)
if err != nil {
fmt.Println(err)
return
}
defer res.Body.Close()
body, err := ioutil.ReadAll(res.Body)
if err != nil {
fmt.Println(err)
return
}
var covidData CovidData
err = json.Unmarshal(body, &covidData)
if err != nil {
fmt.Println("-----------")
fmt.Println(err)
fmt.Println("-----------")
}
totalCenters := len(covidData.Centers)
for i := 0; i < totalCenters; i++ {
totalSessions := len(covidData.Centers[i].Sessions)
for j := 0; j < totalSessions; j++ {
minAgeLimit := covidData.Centers[i].Sessions[j].MinAgeLimit
if minAgeLimit == 18 {
if covidData.Centers[i].CenterID != 582783 && // blacklist a center
covidData.Centers[i].Sessions[j].AvailableCapacity > 0 { // remove unusable centers
fmt.Println("+++++++++++++++Center Information+++++++++++++++++")
fmt.Printf("Center ID:\t\t %d \n", covidData.Centers[i].CenterID)
fmt.Printf("Center Name:\t\t %s \n", covidData.Centers[i].Name)
fmt.Printf("Center Pincode:\t\t %d \n", covidData.Centers[i].Pincode)
fmt.Printf("Center Lat:\t\t %d \n", covidData.Centers[i].Lat)
fmt.Printf("Center Long:\t\t %d \n", covidData.Centers[i].Long)
fmt.Printf("Date:\t\t\t %s \n", covidData.Centers[i].Sessions[j].Date)
fmt.Printf("Available Capacity:\t %f \n", covidData.Centers[i].Sessions[j].AvailableCapacity)
fmt.Printf("Vaccine type:\t %s \n", covidData.Centers[i].Sessions[j].Vaccine)
msgBody := fmt.Sprintf("Date: %s \nCenter Name: %s \nAvailable Capacity: %f", covidData.Centers[i].Sessions[j].Date, covidData.Centers[i].Name, covidData.Centers[i].Sessions[j].AvailableCapacity)
err := beeep.Alert("Found a center", msgBody, "assets/information.png")
if err != nil {
panic(err)
}
}
}
}
}
}
|
package services
import (
"fmt"
"strings"
"github.com/apulis/AIArtsBackend/configs"
"github.com/apulis/AIArtsBackend/models"
)
func CreateVisualJob(userName string, jobName string, logdir string, description string) error {
//step1. create a background job
relateJobId, err := createBackgroundJob(userName, jobName, logdir, description)
if err != nil {
fmt.Printf("create background job failed : [%+v]\n", err)
return err
}
//step2. create visual job record
visualJob := models.VisualJob{
UserName: userName,
Name: jobName,
Status: "scheduling",
LogPath: logdir,
Description: description,
RelateJobId: relateJobId,
}
err = models.CreateVisualJob(visualJob)
if err != nil {
fmt.Printf("create visual job record failed : [%+v]\n", err)
return err
}
return nil
}
func GetAllVisualJobInfo(userName string, pageNum int, pageSize int, orderBy string, status string, jobName string, order string) ([]models.VisualJob, int, int, error) {
//step1. renew all visual job status
err := renewStatusInfo(userName)
if err != nil {
fmt.Printf("job status renew fail : err[%+v]\n", err)
return nil, 0, 0, err
}
//step2. get job info and return
jobList, err := models.GetAllVisualJobByArguments(userName, pageNum, pageSize, status, jobName, order, orderBy)
if err != nil {
fmt.Printf("get job list err[%+v]\n", err)
return nil, 0, 0, err
}
totalJobsNum, err := models.GetVisualJobCountByArguments(userName, status, jobName)
if err != nil {
fmt.Printf("get job list count err[%+v]\n", err)
return nil, 0, 0, err
}
totalPages := totalJobsNum / pageSize
if (totalJobsNum % pageSize) != 0 {
totalPages += 1
}
return jobList, totalJobsNum, totalPages, nil
}
func GetEndpointsPath(userName string, visualJobId int) (string, error) {
visualJobDetail, err := models.GetVisualJobById(visualJobId)
if err != nil {
fmt.Printf("get visual job detail err[%+v]\n", err)
return "", err
}
err, endpointInfo := GetTensorboardPath(userName, visualJobDetail.RelateJobId)
if err != nil {
fmt.Printf("get endpoint path err[%+v]\n", err)
return "", err
}
return endpointInfo.AccessPoint, nil
}
func GetTensorboardPath(userName, jobId string) (error, *models.EndpointWrapper) {
url := fmt.Sprintf("%s/endpoints?userName=%s&jobId=%s", configs.Config.DltsUrl, userName, jobId)
fmt.Println(url)
rspData := make([]models.Endpoint, 0)
err := DoRequest(url, "GET", nil, nil, &rspData)
if err != nil {
fmt.Printf("get visual job path err[%+v]\n", err)
return err, nil
}
appRspData := &models.EndpointWrapper{}
for _, v := range rspData {
if strings.ToLower(v.Name) == "tensorboard" {
appRspData.Name = v.Name
appRspData.Status = v.Status
if v.Status == "running" {
appRspData.AccessPoint = fmt.Sprintf("http://%s.%s/endpoints/%s/", v.NodeName, v.Domain, v.Port)
}
break
}
}
return nil, appRspData
}
func StopVisualJob(userName string, jobId int) error {
targetJob, err := models.GetVisualJobById(jobId)
if err != nil {
fmt.Printf("get job detail err[%+v]\n", err)
return err
}
backgroundJobId := targetJob.RelateJobId
url := fmt.Sprintf("%s/KillJob?userName=%s&jobId=%s", configs.Config.DltsUrl, userName, backgroundJobId)
params := make(map[string]interface{})
job := &models.Job{}
err = DoRequest(url, "GET", nil, params, job)
if err != nil {
fmt.Printf("delete backgournd job err[%+v]\n", err)
return err
}
targetJob.Status = "paused"
targetJob.RelateJobId = ""
err = models.UpdateVisualJob(&targetJob)
if err != nil {
fmt.Printf("kill backgournd job err[%+v]\n", err)
return err
}
_, err = DeleteJob(backgroundJobId)
if err != nil {
fmt.Printf("update visual job info fail: [%+v]\n", err)
return err
}
return nil
}
func ContinueVisualJob(userName string, jobId int) error {
targetJob, err := models.GetVisualJobById(jobId)
if err != nil {
fmt.Printf("get job detail err[%+v]\n", err)
return err
}
relateJobId, err := createBackgroundJob(userName, targetJob.Name, targetJob.LogPath, targetJob.Description)
if err != nil {
fmt.Printf("create background job failed : [%+v]\n", err)
return err
}
targetJob.RelateJobId = relateJobId
targetJob.Status = "scheduling"
err = models.UpdateVisualJob(&targetJob)
if err != nil {
fmt.Printf("update visual job info failed: [%+v]\n", err)
return err
}
return nil
}
func DeleteVisualJob(userName string, jobId int) error {
err := renewStatusInfo(userName)
if err != nil {
fmt.Printf("job status renew fail : err[%+v]\n", err)
return err
}
job, err := models.GetVisualJobById(jobId)
if err != nil {
fmt.Printf("get job detail err[%+v]\n", err)
return err
}
err = models.DeleteVisualJob(&job)
if err != nil {
fmt.Printf("delete visual job record error :[%+v]\n", err)
return err
}
if job.Status == "running" {
err := StopVisualJob(userName, jobId)
if err != nil {
fmt.Printf("stop job error :[%+v]\n", err)
return err
}
}
return nil
}
func createBackgroundJob(userName string, jobName string, logdir string, description string) (string, error) {
//step1. create a job
url := fmt.Sprintf("%s/PostJob", configs.Config.DltsUrl)
params := make(map[string]interface{})
params["userName"] = userName
params["jobName"] = jobName
params["jobType"] = models.JobTypeVisualJob
params["image"] = ConvertImage("apulistech/visualjob:1.0")
fmt.Println(ConvertImage("apulistech/visualjob:1.0"))
params["gpuType"] = "nvidia_gpu_amd64"
params["resourcegpu"] = 0
params["codePath"] = logdir
params["desc"] = description
params["cmd"] = "sleep infinity"
params["containerUserId"] = 0
params["jobtrainingtype"] = "RegularJob"
params["preemptionAllowed"] = false
params["workPath"] = ""
params["enableworkpath"] = true
params["enabledatapath"] = true
params["enablejobpath"] = true
params["jobPath"] = "job"
params["hostNetwork"] = false
params["isPrivileged"] = false
params["interactivePorts"] = false
params["vcName"] = models.DefaultVcName
params["team"] = models.DefaultVcName
id := &models.JobId{}
err := DoRequest(url, "POST", nil, params, id)
if err != nil {
fmt.Printf("post dlts err[%+v]\n", err)
return "", err
}
//step2. create endpoints
url = fmt.Sprintf("%s/endpoints?userName=%s&jobId=%s", configs.Config.DltsUrl, userName, id.Id)
req := &models.CreateEndpointsReq{}
ret := &models.CreateEndpointsRsp{}
req.Endpoints = append(req.Endpoints, "tensorboard")
req.JobId = id.Id
req.Arguments = `{ "tensorboard_log_dir" : "` + logdir + `" }`
err = DoRequest(url, "POST", nil, req, ret)
if err != nil {
fmt.Printf("create endpoints err[%+v]\n", err)
return "", err
}
return id.Id, nil
}
func renewStatusInfo(userName string) error {
visualJobList, err := models.GetAllVisualJobByArguments(userName, 1, -1, "", "", "", "")
if err != nil {
fmt.Printf("get visual job err[%+v]\n", err)
return err
}
for _, job := range visualJobList {
backgroundJobId := job.RelateJobId
if job.Status == "paused" {
continue
}
url := fmt.Sprintf("%s/GetJobDetailV2?userName=%s&jobId=%s", configs.Config.DltsUrl, userName, backgroundJobId)
params := make(map[string]interface{})
backgroundJob := &models.Job{}
err := DoRequest(url, "GET", nil, params, backgroundJob)
if err != nil {
fmt.Printf("get training err[%+v]\n", err)
return err
}
job.Status = backgroundJob.JobStatus
models.UpdateVisualJob(&job)
fmt.Printf(backgroundJobId)
}
return nil
}
|
/**
* Testing file for linked list
**/
package linkedListTesting
import (
"testing"
// "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
ll "linkedList/main.go/linkedlist"
)
type appendSuite struct {
suite.Suite
}
var alr ll.LinkedList
func (s *appendSuite) BeforeTest(suiteName, testName string) {
alr = ll.LinkedList{}
alr.Append(2)
alr.Append(3)
alr.Append(4)
alr.Append(10)
// fmt.Printf("TestName: %v \n", testName)
}
func (s *appendSuite) AfterTest(suiteName, testName string) {
// fmt.Println("This runs after test")
}
// Test for FindItem function
func (s *appendSuite) TestAppendToHead() {
alr.AppendToHead(3)
head := alr.Head()
s.Equal(head.Item, 3, "head does not equal 3")
}
func TestLinkedListAppendSuite(t *testing.T) {
st := new(appendSuite)
suite.Run(t, st)
}
|
package node
import (
"context"
"testing"
"github.com/josetom/go-chain/core"
"github.com/josetom/go-chain/db"
"github.com/josetom/go-chain/test_helper"
"github.com/josetom/go-chain/test_helper/test_helper_core"
)
func TestMine(t *testing.T) {
db.Config.Type = db.LEVEL_DB
test_helper.SetTestDataDirs()
tempDbPath := test_helper.CreateAndGetTestDbFile()
core.Config.State.DbFile = tempDbPath
state, err := core.LoadState()
if err != nil {
t.Error(err)
}
txn := test_helper_core.GetTestTxn()
ctx := context.Background()
miner := InitMiner(state)
miner.addPendingTxn(txn)
block, err := miner.mine(ctx)
if err != nil {
t.Fail()
}
isBlockValid, err := block.IsBlockHashValid()
if err != nil {
t.Error(err)
}
if !isBlockValid {
t.Fail()
}
cleanup := func() {
state.Close()
test_helper.DeleteTestDbFile(tempDbPath)
core.Config.State.DbFile = core.Defaults().State.DbFile
}
t.Cleanup(cleanup)
}
|
package helper
import (
"encoding/json"
"net/http"
)
// JSONError will hold the data that is responded to the client
type Response struct {
Code int
Response interface{}
Error error
}
func CreateResponse(rw http.ResponseWriter, req *http.Request, status int, response interface{}, incomingError error) error {
responseError := Response{status, response, incomingError}
JSONHandler(rw, req)
rw.WriteHeader(status)
err := json.NewEncoder(rw).Encode(responseError)
if err != nil {
responseError = Response{500, response, err}
}
return nil
}
|
package models
import "time"
type User struct {
Id int64
FirstName string
LastName string
Active bool
CreatedAt *time.Time
UpdatedAt *time.Time
}
|
package main
import (
"crypto/sha512"
"encoding/base64"
"log"
"net/smtp"
"strings"
)
// sendEmail sends an email to an user.
// XXX use several SMTP according to the destination email
// provider to speed things up.
func sendEmail(to, subject, msg string) error {
body := "To: " + to + "\r\nSubject: " +
subject + "\r\n\r\n" + msg
auth := smtp.PlainAuth("", C.AuthEmail, C.AuthPasswd, C.SMTPServer)
err := smtp.SendMail(C.SMTPServer+":"+C.SMTPPort,
auth, C.AuthEmail, []string{to}, []byte(body))
if err != nil {
return Err(err)
}
return nil
}
// sendEmail sends a token via email to an user.
func sendToken(email string, token *Token) error {
s := db.GetService2(token.Key)
err := sendEmail(email, "Token for "+s.Name,
"Hi there,\r\n"+
"Here is your token for "+s.Name+" ("+s.Url+")"+": "+token.Token)
if err != nil {
log.Println(err)
return SMTPErr
}
return nil
}
func checkName(name string) error {
switch {
case name == "":
return NoNameErr
case len(name) >= C.LenToken:
return LongNameErr
case strings.Contains(name, "@ \t\n\r"):
return NameFmtErr
}
return nil
}
func checkEmail(email string) error {
switch {
case email == "":
return NoEmailErr
case len(email) >= C.LenToken:
return LongEmailErr
case !strings.Contains(email, "@"):
return EmailFmtErr
}
return nil
}
// isToken check whether the login is a token or a name/email.
func isToken(login string) bool { return len(login) == C.LenToken }
// isEmail check whether the login is a name or an email
func isEmail(login string) bool { return strings.Contains(login, "@") }
// Register add a new user to both database and cache.
// If the registration succeeds, a(n activation) token is
// sent to the user.
func Register(name, email, passwd string) error {
if err := checkName(name); err != nil {
return err
}
if err := checkEmail(email); err != nil {
return err
}
if passwd != "" {
h := sha512.New()
h.Write([]byte(passwd))
passwd = base64.StdEncoding.EncodeToString(h.Sum(nil))
}
u := User{-1, name, email, passwd, false}
if err := db.AddUser(&u); err != nil {
return WrongUser
}
return sendToken(email, NewToken(u.Id, Auth.Key))
}
func Login(login, passwd string) (string, error) {
// login with token
if isToken(login) {
ntoken := UpdateToken(login)
if ntoken == "" {
return "", NoSuchTErr
}
return ntoken, nil
}
// get user associated with login
u, err := db.GetUser2(login)
if err != nil {
return "", NoSuchErr
}
// login with password
if passwd != "" {
h := sha512.New()
h.Write([]byte(passwd))
passwd = base64.StdEncoding.EncodeToString(h.Sum(nil))
if passwd == u.Passwd {
return NewToken(u.Id, Auth.Key).Token, nil
} else {
return "", BadPasswd
}
}
// 2-steps login (sending token through token)
return "", sendToken(u.Email, NewToken(u.Id, Auth.Key))
}
func Logout(token string) {
for _, t := range AllTokens(token) {
RemoveToken(t.Token)
}
}
// update user. XXX quirky, can't create a db.Update(u *User)
// because of UNIQUE constraints, etc.
func Update(token, name, email, passwd, npasswd string) (error, bool) {
c := false
if err := checkName(name); err != nil {
return err, c
}
if err := checkEmail(email); err != nil {
return err, c
}
u, _ := db.GetUser(OwnerToken(token))
if u.Name != name {
if err := db.UpdateName(u.Id, name); err != nil {
return WrongUser, c
}
c = true
}
if u.Email != email {
if err := db.UpdateEmail(u.Id, email); err != nil {
return WrongUser, c
}
}
// update password?
if passwd != "" && npasswd != "" {
h := sha512.New()
h.Write([]byte(passwd))
passwd = base64.StdEncoding.EncodeToString(h.Sum(nil))
if passwd != u.Passwd {
return BadPasswd, c
}
// hash new password
h.Reset()
h.Write([]byte(npasswd))
db.UpdatePassword(u.Id, base64.StdEncoding.EncodeToString(h.Sum(nil)))
}
return nil, c
}
func Unregister(token string) {
email := db.DelUser(OwnerToken(token))
Logout(token)
sendEmail(email, "[AAS] Unregistration confirmation",
"Your account have been deleted.")
}
func IsAdmin(token string) bool {
return db.IsAdmin(OwnerToken(token))
}
func AddService(name, url, address, email string) (string, error) {
if name == "" || url == "" {
return "", EmptyFieldsErr
}
if ServiceMode == Disabled {
return "ko", nil
}
s := Service{-1, name, url, randomString(C.LenKey), false, address, email}
if err := db.AddService(&s); err != nil {
return "", err
}
if ServiceMode == Automatic {
db.SetMode(s.Id, true)
return s.Key, nil
}
// Manual
SendAdmin("New Service "+s.Name,
"Hi there,\r\n"+
s.Name+" ("+s.Address+", "+s.Url+") asks for landing.")
return "ok", nil
}
func CheckService(key, address string) bool {
s := db.GetService2(key)
if s == nil {
return false
}
return s.Address == address && s.Mode
}
func SendAdmin(subject, msg string) {
if admins, err := db.GetAdmins(); err != nil {
log.Println(err)
return
} else {
for _, to := range admins {
if err := sendEmail(to, subject, msg); err != nil {
log.Println(err)
}
}
}
}
|
package main
import (
"context"
"os"
"os/signal"
"path/filepath"
"strconv"
"syscall"
"time"
log "github.com/sirupsen/logrus"
apiv1 "k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes"
kh "github.com/kuberhealthy/kuberhealthy/v2/pkg/checks/external/checkclient"
"github.com/kuberhealthy/kuberhealthy/v2/pkg/checks/external/nodeCheck"
"github.com/kuberhealthy/kuberhealthy/v2/pkg/kubeClient"
)
var (
// K8s config file for the client
kubeConfigFile = filepath.Join(os.Getenv("HOME"), ".kube", "config")
// Namespace the check daemonset will be created in [default = kuberhealthy]
checkNamespaceEnv = os.Getenv("POD_NAMESPACE")
checkNamespace string
// DSPauseContainerImageOverride specifies the sleep image we will use on the daemonset checker
dsPauseContainerImageEnv = os.Getenv("PAUSE_CONTAINER_IMAGE")
dsPauseContainerImage string // specify an alternate location for the DSC pause container - see #114
// Node selectors for the daemonset check
dsNodeSelectorsEnv = os.Getenv("NODE_SELECTOR")
dsNodeSelectors = make(map[string]string)
// Minutes allowed for the shutdown process to complete
shutdownGracePeriodEnv = os.Getenv("SHUTDOWN_GRACE_PERIOD")
shutdownGracePeriod time.Duration
// Check daemonset name
checkDSNameEnv = os.Getenv("CHECK_DAEMONSET_NAME")
checkDSName string
// The priority class to use for the daemonset
podPriorityClassNameEnv = os.Getenv("DAEMONSET_PRIORITY_CLASS_NAME")
podPriorityClassName string
// Check deadline from injected env variable KH_CHECK_RUN_DEADLINE
khDeadline time.Time
checkDeadline time.Time
// Daemonset check configurations
hostName string
tolerationsEnv = os.Getenv("TOLERATIONS")
tolerations []apiv1.Toleration
daemonSetName string
allowedTaintsEnv = os.Getenv("ALLOWED_TAINTS")
allowedTaints map[string]apiv1.TaintEffect
// Time object used for the check.
now time.Time
// K8s client used for the check.
client *kubernetes.Clientset
)
const (
// Default k8s manifest resource names.
defaultCheckDSName = "daemonset"
// Default namespace daemonset check will be performed in
defaultCheckNamespace = "kuberhealthy"
// Default pause container image used for the daemonset check
defaultDSPauseContainerImage = "gcr.io/google-containers/pause:3.1"
// Default shutdown termination grace period
defaultShutdownGracePeriod = time.Duration(time.Minute * 1) // grace period for the check to shutdown after receiving a shutdown signal
// Default daemonset check deadline
defaultCheckDeadline = time.Duration(time.Minute * 15)
// Default user
defaultUser = int64(1000)
// Default priority class name
defaultPodPriorityClassName = ""
)
func init() {
// set debug mode for nodeCheck pkg
nodeCheck.EnableDebugOutput()
// Create a timestamp reference for the daemonset;
// also to reference against daemonsets that should be cleaned up.
now = time.Now()
// Parse all incoming input environment variables and crash if an error occurs
// during parsing process.
parseInputValues()
setCheckConfigurations(now)
}
func main() {
// Create a kubernetes client.
var err error
client, err = kubeClient.Create(kubeConfigFile)
if err != nil {
log.Fatalln("Unable to create kubernetes client:" + err.Error())
}
log.Infoln("Kubernetes client created.")
// this check runs all the nodechecks to ensure node is ready before running the daemonset chek
err = checksNodeReady()
if err != nil {
log.Errorln("Error running when doing the nodechecks :", err)
}
// Catch panics.
defer func() {
r := recover()
if r != nil {
log.Infoln("Recovered panic:", r)
reportErrorsToKuberhealthy([]string{"kuberhealthy/daemonset: " + r.(string)})
}
}()
// create a context for our check to operate on that represents the timelimit the check has
log.Debugln("Allowing this check until", checkDeadline, "to finish.")
// Set ctx and ctxChancel using khDeadline. If timeout is set to checkDeadline, ctxCancel will happen first before
// any of the timeouts are given the chance to report their timeout errors.
log.Debugln("Setting check ctx cancel with timeout", khDeadline.Sub(now))
ctx, ctxCancel := context.WithTimeout(context.Background(), khDeadline.Sub(now))
// Start listening to interrupts.
signalChan := make(chan os.Signal, 5)
go listenForInterrupts(signalChan)
// run check in background and wait for completion
runCheckDoneChan := make(chan error, 1)
go func() {
// Run daemonset check and report errors
runCheckDoneChan <- runCheck(ctx)
}()
// watch for either the check to complete or the OS to get a shutdown signal
select {
case err = <-runCheckDoneChan:
if err != nil {
reportErrorsToKuberhealthy([]string{"kuberhealthy/daemonset: " + err.Error()})
} else {
reportOKToKuberhealthy()
}
log.Infoln("Done running daemonset check")
case <-signalChan:
// TO DO: figure out better way to report shutdown signals. Do we report "error" or "ok" to kuberhealthy when
// a shutdown signal is received? For now, report OK and wait for the next run.
reportOKToKuberhealthy()
log.Errorln("Received shutdown signal. Canceling context and proceeding directly to cleanup.")
ctxCancel() // Causes all functions within the check to return without error and abort. NOT an error
}
// at the end of the check run, we run a clean up for everything that may be left behind
log.Infoln("Running post-check cleanup")
shutdownCtx, shutdownCtxCancel := context.WithTimeout(context.Background(), shutdownGracePeriod)
defer shutdownCtxCancel()
// start a background cleanup
cleanupDoneChan := make(chan error)
go func() {
cleanupDoneChan <- cleanUp(shutdownCtx)
}()
// wait for either the cleanup to complete or the shutdown grace period to expire
select {
case err := <-cleanupDoneChan:
if err != nil {
log.Errorln("Cleanup completed with error:", err)
return
}
log.Infoln("Cleanup completed without error")
case <-time.After(time.Duration(shutdownGracePeriod)):
log.Errorln("Shutdown took too long. Shutting down forcefully!")
}
}
// checksNodeReady checks whether node is ready or not before running the check
func checksNodeReady() error {
// create context
checkTimeLimit := time.Minute * 1
nctx, _ := context.WithTimeout(context.Background(), checkTimeLimit)
// hits kuberhealthy endpoint to see if node is ready
err := nodeCheck.WaitForKuberhealthy(nctx)
if err != nil {
log.Errorln("Error waiting for kuberhealthy endpoint to be contactable by checker pod with error:" + err.Error())
}
return nil
}
// setCheckConfigurations sets Daemonset configurations
func setCheckConfigurations(now time.Time) {
hostName = getHostname()
daemonSetName = checkDSName + "-" + hostName + "-" + strconv.Itoa(int(now.Unix()))
}
// waitForShutdown watches the signal and done channels for termination.
func listenForInterrupts(signalChan chan os.Signal) {
// Relay incoming OS interrupt signals to the signalChan
signal.Notify(signalChan, os.Interrupt, os.Kill, syscall.SIGTERM, syscall.SIGINT)
// watch for interrupts on signalChan
<-signalChan
os.Exit(0)
}
// reportErrorsToKuberhealthy reports the specified errors for this check run.
func reportErrorsToKuberhealthy(errs []string) {
log.Errorln("Reporting errors to Kuberhealthy:", errs)
reportToKuberhealthy(false, errs)
}
// reportOKToKuberhealthy reports that there were no errors on this check run to Kuberhealthy.
func reportOKToKuberhealthy() {
log.Infoln("Reporting success to Kuberhealthy.")
reportToKuberhealthy(true, []string{})
}
// reportToKuberhealthy reports the check status to Kuberhealthy.
func reportToKuberhealthy(ok bool, errs []string) {
var err error
if ok {
err = kh.ReportSuccess()
if err != nil {
log.Fatalln("error reporting to kuberhealthy:", err.Error())
}
return
}
err = kh.ReportFailure(errs)
if err != nil {
log.Fatalln("error reporting to kuberhealthy:", err.Error())
}
return
}
|
package main
import (
"bytes"
"encoding/hex"
"io/ioutil"
"os"
)
var (
_VjsonConfig_need_save bool
//_VC _Tconfig
//_VjsonConfig_bytes []byte
)
func _Fbase_104c__try_to_get_env_id128() {
__Vstr := os.Getenv("id128")
_FpfN(" 823813 01 read env id128 is (%d)[%s]", len(__Vstr), __Vstr)
if "" == __Vstr || len(__Vstr) < 32 { // id env error , try use old id
if 16 != len(_VC.MyId128) { // old id error -> I don't know what happens. exit.
_FpfN(" 823813 03 read env id128 is NULL or too short, and the json.config file id128 error: \n (%d)[%0x] \n",
len(_VC.MyId128), _VC.MyId128)
_Fex(" 823813 05 : Exit now ")
}
_FpfN(" 823813 07 env id128 is NULL , and so old id128 saved in config file ok: \n (%d)[%0x] \n",
len(_VC.MyId128), _VC.MyId128)
return
}
// func hex.DecodeString(s string) ([]byte, error)
//if ( __Vstr[0:2] == "0x" ) || ( __Vstr[0:2] == "0X" ) {
//__Vstr = _FmakeByte( []byte(__Vstr[2:]) )
//}
__Vstr = __Vstr[2:]
__Vbyte, __Verr := hex.DecodeString(__Vstr)
if nil != __Verr {
_FpfN(" 823815 01 read env id128 is error , check what happens : \n (%d)[%s] \n %v\n",
len(__Vbyte), string(__Vbyte), __Verr)
_Fex(" Exit now ")
}
if 16 != len(__Vbyte) {
_FpfN(" 823815 02 read env id128 is len error , check what happens \n (%d)[%0x] \n",
len(__Vbyte), string(__Vbyte))
_Fex(" Exit now ")
}
if bytes.Equal(__Vbyte, _VC.MyId128) {
_FpfN(" 823815 03 read env id128 equals to json's id128\n (%d)[%0x] \n",
len(__Vbyte), string(__Vbyte))
return
}
_FpfN(" 823815 04 read env id128 NOT equals to json's id128\n env : (%d)[%0x] \n json: (%d)[%0x] \n",
len(__Vbyte), string(__Vbyte),
len(_VC.MyId128), string(_VC.MyId128))
_VC.MyId128 = __Vbyte
_VjsonConfig_need_save = true
} // _Fbase_104c__try_to_get_env_id128
func _Fbase_104b__try_to_check_json_config() {
if _VC.Name != _Vself.RoleName {
_VC.Name = _Vself.RoleName
_VjsonConfig_need_save = true
}
} // _Fbase_104b__try_to_check_json_config
// _Fbase_1203__gen_self_md5_sha
func _Fbase_104a__try_to_read_json_config_file() {
__Vfname := _Vself.progPath + ".json"
__VjsonConfig_bytes, __Verr := ioutil.ReadFile(__Vfname)
if nil != __Verr {
_FpfN(" 823816 01 read config file <"+__Vfname+"> error, try to gen it... %v", __Verr)
_VjsonConfig_need_save = true
return
}
_FdecJson___(" 823816 02 ", &__VjsonConfig_bytes, &_VC)
} // _Fbase_104a__try_to_read_json_config_file
func _Fbase_104d__try_to_save_json_config_to_file() {
__Vbyte := _FencJsonExit("823818 01 : jsonConf encoding ", _VC)
__Vfname := _Vself.progPath + ".json"
_FwriteFileExit("823818 03 jsonconf writing ", __Vfname, &__Vbyte)
__Vbyte2 := _FreadFileExit(" 823815 config file re-reading ", __Vfname)
if bytes.Equal(__Vbyte, __Vbyte2) {
_FpfN("823818 05 jsonconf writing succeed :<%s>", __Vbyte2)
return
}
_Fex(" 823818 08 : I don't know what happens.")
} // _Fbase_104d__try_to_save_json_config_to_file
func _Fbase_104e__try_to_reread_json_config_and_recheck_the_result() {
} // _Fbase_104e__try_to_reread_json_config_and_recheck_the_result
func _Fbase_104z__try_to_read_json_config_top() {
_Fbase_104a__try_to_read_json_config_file()
_Fbase_104b__try_to_check_json_config()
_Fbase_104c__try_to_get_env_id128()
_FpfN(" 823819 01 :%t ", _VjsonConfig_need_save)
if true == _VjsonConfig_need_save {
_Fbase_104d__try_to_save_json_config_to_file()
}
//_Vself.progMd5.b128 = _VC.MyId128
//_Vself.RoleName = _VC.Name
//_Fex( " 381919 :Debug Stop here. " )
} // _Fbase_104z__try_to_read_json_config_top
|
package cache
import (
"github.com/despreston/vimlytics/redis"
"log"
"time"
)
const ttl = 72 * time.Hour
func Get(key string) (string, bool) {
var val string
val, err := redis.Client().Get(redis.Ctx, key).Result()
if err == redis.Empty {
return "", false
} else if err != nil {
log.Printf("Error @ redis GET: %v", err)
return "", false
}
return val, true
}
func Set(key string, val string) {
_, err := redis.Client().SetNX(redis.Ctx, key, val, ttl).Result()
if err != nil {
log.Printf("Error @ redis SET: %v", err)
}
}
|
package main
import (
"log"
"shared/protobuf/pb"
)
func (c *Client) GetGachaList(req *pb.C2SGetGachaList) (*pb.S2CGetGachaList, error) {
gameResp, err := c.Request(1201, req)
if err != nil {
return nil, err
}
resp := &pb.S2CGetGachaList{}
err = c.Handle(gameResp, resp)
if err != nil {
return nil, err
}
log.Printf("RESP SUCCESS: GetGachaList resp: %+v", resp)
return resp, nil
}
func (c *Client) UserGachaDrop(req *pb.C2SUserGachaDrop) (*pb.S2CUserGachaDrop, error) {
gameResp, err := c.Request(1203, req)
if err != nil {
return nil, err
}
resp := &pb.S2CUserGachaDrop{}
err = c.Handle(gameResp, resp)
if err != nil {
return nil, err
}
log.Printf("RESP SUCCESS: UserGachaDrop resp: %+v", resp)
return resp, nil
}
|
package zhttp
import (
"bufio"
"fmt"
"net"
"net/http"
"time"
)
// Logger wraps a ResponseWriter and records the resulting status code
// and how many bytes are written
type Logger struct {
http.ResponseWriter
length int64
status int
started time.Time
Now func() time.Time
}
// Write implements ResponseWriter
func (l *Logger) Write(d []byte) (size int, err error) {
if l.status == 0 {
l.WriteHeader(http.StatusOK)
}
size, err = l.ResponseWriter.Write(d)
l.length += int64(size)
return
}
// WriteHeader implements ResponseWriter
func (l *Logger) WriteHeader(status int) {
// suppress duplicate WriteHeader calls, but do save the status code
if l.status == 0 {
l.ResponseWriter.WriteHeader(status)
l.started = l.Now()
}
l.status = status
}
// Flush wraps a nested Flusher
func (l *Logger) Flush() {
if flusher, ok := l.ResponseWriter.(http.Flusher); ok {
flusher.Flush()
}
}
// Hijack wraps a nested Hijacker
func (l *Logger) Hijack() (net.Conn, *bufio.ReadWriter, error) {
if h, ok := l.ResponseWriter.(http.Hijacker); ok {
conn, rw, err := h.Hijack()
if err == nil && l.status == 0 {
l.status = http.StatusSwitchingProtocols
}
if hc, ok := conn.(halfCloser); ok {
// more featureful for tcp.Conn common case
conn = halfCloseLogger{halfCloser: hc, rl: l}
} else {
conn = hijackLogger{Conn: conn, rl: l}
}
return conn, rw, err
}
return nil, nil, fmt.Errorf("Hijacker interface not supported by type %T", l.ResponseWriter)
}
// CloseNotify wraps a nested CloseNotifier
func (l *Logger) CloseNotify() <-chan bool {
//nolint // provided for backwards compatibility
if cn, ok := l.ResponseWriter.(http.CloseNotifier); ok {
return cn.CloseNotify()
}
return make(chan bool)
}
// Push wraps a nested Pusher
func (l *Logger) Push(target string, opts *http.PushOptions) error {
if p, ok := l.ResponseWriter.(http.Pusher); ok {
return p.Push(target, opts)
}
return fmt.Errorf("Pusher interface not supported by type %T", l.ResponseWriter)
}
// Length returns the number of bytes written
func (l *Logger) Length() int64 {
return l.length
}
// Status returns the response status
func (l *Logger) Status() int {
if l.status == 0 {
return http.StatusOK
}
return l.status
}
// Started returns the time at which headers were written
func (l *Logger) Started() time.Time {
if l.started.IsZero() {
return l.Now()
}
return l.started
}
// hijackLogger wraps a hijacked connection in order to enable WriteErrorReason
// to set the status of the request for logging purposes
type hijackLogger struct {
net.Conn
rl *Logger
}
// SetStatus records the final status of a hijacked connection
func (h hijackLogger) SetStatus(status int, length int64) {
h.rl.status = status
h.rl.length = length
}
type halfCloser interface {
net.Conn
CloseRead() error
CloseWrite() error
}
// halfCloseLogger wraps a hijacked connection in order to enable
// WriteErrorReason to set the status of the request for logging purposes
type halfCloseLogger struct {
halfCloser
rl *Logger
}
// SetStatus records the final status of a hijacked connection
func (h halfCloseLogger) SetStatus(status int, length int64) {
h.rl.status = status
h.rl.length = length
}
|
package benchs
import (
"database/sql"
"fmt"
models "github.com/efectn/go-orm-benchmarks/benchs/sqlboiler"
_ "github.com/jackc/pgx/v4/stdlib"
"github.com/volatiletech/sqlboiler/v4/boil"
"github.com/volatiletech/sqlboiler/v4/queries/qm"
)
var sqlboiler *sql.DB
func init() {
st := NewSuite("sqlboiler")
st.InitF = func() {
st.AddBenchmark("Insert", 200*OrmMulti, SqlboilerInsert)
st.AddBenchmark("MultiInsert 100 row", 200*OrmMulti, SqlboilerInsertMulti)
st.AddBenchmark("Update", 200*OrmMulti, SqlboilerUpdate)
st.AddBenchmark("Read", 200*OrmMulti, SqlboilerRead)
st.AddBenchmark("MultiRead limit 100", 200*OrmMulti, SqlboilerReadSlice)
var err error
sqlboiler, err = sql.Open("pgx", OrmSource)
CheckErr(err)
boil.SetDB(sqlboiler)
}
}
func SqlboilerInsert(b *B) {
var m *models.Model
WrapExecute(b, func() {
InitDB()
m = NewModel6()
})
for i := 0; i < b.N; i++ {
m.ID = 0
err := m.Insert(ctx, sqlboiler, boil.Infer())
CheckErr(err, b)
}
}
func SqlboilerInsertMulti(b *B) {
panic(fmt.Errorf("doesn't support bulk-insert"))
}
func SqlboilerUpdate(b *B) {
var m *models.Model
WrapExecute(b, func() {
InitDB()
m = NewModel6()
m.ID = 0
err := m.Insert(ctx, sqlboiler, boil.Infer())
CheckErr(err, b)
})
for i := 0; i < b.N; i++ {
_, err := m.Update(ctx, sqlboiler, boil.Infer())
CheckErr(err, b)
}
}
func SqlboilerRead(b *B) {
var m *models.Model
WrapExecute(b, func() {
InitDB()
m = NewModel6()
m.ID = 0
err := m.Insert(ctx, sqlboiler, boil.Infer())
CheckErr(err, b)
})
for i := 0; i < b.N; i++ {
_, err := models.Models(qm.Where("id = 0")).Exec(sqlboiler)
CheckErr(err, b)
}
}
func SqlboilerReadSlice(b *B) {
var m *models.Model
WrapExecute(b, func() {
InitDB()
m = NewModel6()
for i := 0; i < 100; i++ {
m.ID = 0
err := m.Insert(ctx, sqlboiler, boil.Infer())
CheckErr(err, b)
}
})
for i := 0; i < b.N; i++ {
_, err := models.Models(qm.Where("id > 0"), qm.Limit(100)).All(ctx, sqlboiler)
CheckErr(err, b)
}
}
|
package recurly
import (
"encoding/json"
"net/http"
"strings"
)
// Error contains basic information about the error
type Error struct {
recurlyResponse *ResponseMetadata
Message string
Class ErrorClass
Type ErrorType
Params []ErrorParam
TransactionError *TransactionError
}
// GetResponse returns the ResponseMetadata that generated this error
func (resource *Error) GetResponse() *ResponseMetadata {
return resource.recurlyResponse
}
// setResponse sets the response metadata
func (resource *Error) setResponse(res *ResponseMetadata) {
resource.recurlyResponse = res
}
func (e *Error) Error() string {
return e.Message
}
type ErrorType string
type ErrorClass string
type TransactionErrorCategory string
const (
ErrorClassServer = ErrorClass("server")
ErrorClassClient = ErrorClass("client")
ErrorTypeUnknown = ErrorType("unknown")
ErrorTypeRateLimited = ErrorType("rate_limited")
ErrorTypeTimeout = ErrorType("timeout")
ErrorTypeValidation = ErrorType("validation")
ErrorTypeTransaction = ErrorType("transaction")
ErrorTypeNotFound = ErrorType("not_found")
ErrorTypeBadRequest = ErrorType("bad_request")
ErrorTypeInternalServer = ErrorType("internal_server_error")
ErrorTypeImmutableSubscription = ErrorType("immutable_subscription")
ErrorTypeInvalidApiKey = ErrorType("invalid_api_key")
ErrorTypeInvalidContentType = ErrorType("invalid_content_type")
ErrorTypeInvalidApiVersion = ErrorType("invalid_api_version")
ErrorTypeInvalidPermissions = ErrorType("invalid_permissions")
ErrorTypeInvalidToken = ErrorType("invalid_token")
ErrorTypeSimulaneousRequest = ErrorType("simultaneous_request")
ErrorTypeUnavailableInApiVersion = ErrorType("unavailable_in_api_version")
ErrorTypeUnknownApiVersion = ErrorType("unknown_api_version")
ErrorTypeMissingFeature = ErrorType("missing_feature")
ErrorTypeUnauthorized = ErrorType("unauthorized")
ErrorTypeForbidden = ErrorType("forbidden")
ErrorTypeBadGateway = ErrorType("bad_gateway")
ErrorTypeServiceUnavailable = ErrorType("service_unavailable")
TransactionErrorCategorySoft = TransactionErrorCategory("soft")
TransactionErrorCategoryFraud = TransactionErrorCategory("fraud")
TransactionErrorCategoryHard = TransactionErrorCategory("hard")
TransactionErrorCategoryCommunication = TransactionErrorCategory("communication")
TransactionErrorCategoryUnknown = TransactionErrorCategory("unknown")
)
type errorResponse struct {
Error errorDetails `json:"error"`
}
type errorDetails struct {
Type string `json:"type"`
Message string `json:"message"`
Params []ErrorParam `json:"params"`
TransactionError *TransactionError `json:"transaction_error"`
}
type TransactionError struct {
TransactionID string `json:"transaction_id"`
Category TransactionErrorCategory `json:"category"`
Code string `json:"code"`
Message string `json:"message"`
MerchantAdvice string `json:"merchant_advice"`
ThreeDSecureActionTokenId string `json:"three_d_secure_action_token_id"`
}
type ErrorParam struct {
Property string `json:"param"`
Message string `json:"message"`
}
// parseResponseToError converts an http.Response to the appropriate error
func parseResponseToError(res *http.Response, body []byte) error {
// Is this a client error or a server error?
var errorClass ErrorClass
if res.StatusCode >= 400 && res.StatusCode < 500 {
errorClass = ErrorClassClient
} else {
errorClass = ErrorClassServer
}
// If we have a body, use the details from the body
if strings.HasPrefix(res.Header.Get("Content-type"), "application/json") {
// Return an error formatted from the JSON response
var errResp errorResponse
if err := json.Unmarshal(body, &errResp); err == nil {
return &Error{
Message: errResp.Error.Message,
Class: errorClass,
Type: ErrorType(errResp.Error.Type),
Params: errResp.Error.Params,
TransactionError: errResp.Error.TransactionError,
recurlyResponse: parseResponseMetadata(res),
}
}
}
// If we don't have a body, construct the details from the status code
errMessage := "An unknown error has occurred. Please try again later."
errType := ErrorTypeUnknown
switch res.StatusCode {
case http.StatusUnauthorized: // 401
errMessage = "Unauthorized"
errType = ErrorTypeUnauthorized
case http.StatusForbidden: // 403
errMessage = "The API key is not authorized for this resource"
errType = ErrorTypeForbidden
case http.StatusNotFound: // 404
errMessage = "Requested object or endpoint not found"
errType = ErrorTypeNotFound
case http.StatusUnprocessableEntity: // 422
errMessage = "Invalid request"
errType = ErrorTypeValidation
case http.StatusTooManyRequests: // 429
errMessage = "You made too many API requests"
errType = ErrorTypeRateLimited
case http.StatusInternalServerError: // 500
errMessage = "Server experienced an error"
errType = ErrorTypeInternalServer
case http.StatusBadGateway: // 502
errMessage = "Error contacting server"
errType = ErrorTypeBadGateway
case http.StatusServiceUnavailable: // 503
errMessage = "Service unavailable"
errType = ErrorTypeServiceUnavailable
case http.StatusRequestTimeout: // 408
case http.StatusGatewayTimeout: // 504
errMessage = "Request timed out"
errType = ErrorTypeTimeout
}
return &Error{
Message: errMessage,
Class: errorClass,
Type: errType,
recurlyResponse: parseResponseMetadata(res),
}
}
|
// Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package tracing
import (
"fmt"
"sort"
"sync"
"sync/atomic"
"time"
"github.com/cockroachdb/cockroach/pkg/util/ring"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/cockroach/pkg/util/tracing/tracingpb"
"github.com/cockroachdb/logtags"
"github.com/gogo/protobuf/types"
"github.com/opentracing/opentracing-go"
)
// crdbSpan is a span for internal crdb usage. This is used to power SQL session
// tracing.
type crdbSpan struct {
traceID uint64 // probabilistically unique
spanID uint64 // probabilistically unique
parentSpanID uint64
goroutineID uint64
operation string
startTime time.Time
// logTags are set to the log tags that were available when this Span was
// created, so that there's no need to eagerly copy all of those log tags
// into this Span's tags. If the Span's tags are actually requested, these
// logTags will be copied out at that point.
//
// Note that these tags have not gone through the log tag -> Span tag
// remapping procedure; tagName() needs to be called before exposing each
// tag's key to a user.
logTags *logtags.Buffer
mu crdbSpanMu
testing *testingKnob
}
type testingKnob struct {
clock timeutil.TimeSource
}
type crdbSpanMu struct {
syncutil.Mutex
// duration is initialized to -1 and set on Finish().
duration time.Duration
recording struct {
// recordingType is the recording type of the ongoing recording, if any.
// Its 'load' method may be called without holding the surrounding mutex,
// but its 'swap' method requires the mutex.
recordingType atomicRecordingType
logs sizeLimitedBuffer // of *tracingpb.LogRecords
structured sizeLimitedBuffer // of Structured events
// dropped is true if the span has capped out it's memory limits for
// logs and structured events, and has had to drop some. It's used to
// annotate recordings with the _dropped tag, when applicable.
dropped bool
// children contains the list of child spans started after this Span
// started recording.
children []*crdbSpan
// remoteSpan contains the list of remote child span recordings that
// were manually imported.
remoteSpans []tracingpb.RecordedSpan
}
// tags are only set when recording. These are tags that have been added to
// this Span, and will be appended to the tags in logTags when someone
// needs to actually observe the total set of tags that is a part of this
// Span.
// TODO(radu): perhaps we want a recording to capture all the tags (even
// those that were set before recording started)?
tags opentracing.Tags
// The Span's associated baggage.
baggage map[string]string
}
func newSizeLimitedBuffer(limit int64) sizeLimitedBuffer {
return sizeLimitedBuffer{
limit: limit,
}
}
type sizeLimitedBuffer struct {
ring.Buffer
size int64 // in bytes
limit int64 // in bytes
}
func (b *sizeLimitedBuffer) Reset() {
b.Buffer.Reset()
b.size = 0
}
func (s *crdbSpan) recordingType() RecordingType {
if s == nil {
return RecordingOff
}
return s.mu.recording.recordingType.load()
}
// enableRecording start recording on the Span. From now on, log events and
// child spans will be stored.
//
// If parent != nil, the Span will be registered as a child of the respective
// parent. If nil, the parent's recording will not include this child.
func (s *crdbSpan) enableRecording(parent *crdbSpan, recType RecordingType) {
if parent != nil {
parent.addChild(s)
}
if recType == RecordingOff || s.recordingType() == recType {
return
}
s.mu.Lock()
defer s.mu.Unlock()
s.mu.recording.recordingType.swap(recType)
if recType == RecordingVerbose {
s.setBaggageItemLocked(verboseTracingBaggageKey, "1")
}
}
// resetRecording clears any previously recorded info.
//
// NB: This is needed by SQL SessionTracing, who likes to start and stop
// recording repeatedly on the same Span, and collect the (separate) recordings
// every time.
func (s *crdbSpan) resetRecording() {
s.mu.Lock()
defer s.mu.Unlock()
s.mu.recording.logs.Reset()
s.mu.recording.structured.Reset()
s.mu.recording.dropped = false
s.mu.recording.children = nil
s.mu.recording.remoteSpans = nil
}
func (s *crdbSpan) disableRecording() {
if s.recordingType() == RecordingOff {
return
}
s.mu.Lock()
defer s.mu.Unlock()
oldRecType := s.mu.recording.recordingType.swap(RecordingOff)
// We test the duration as a way to check if the Span has been finished. If it
// has, we don't want to do the call below as it might crash (at least if
// there's a netTr).
if (s.mu.duration == -1) && (oldRecType == RecordingVerbose) {
// Clear the verboseTracingBaggageKey baggage item, assuming that it was set by
// enableRecording().
s.setBaggageItemLocked(verboseTracingBaggageKey, "")
}
}
func (s *crdbSpan) getRecording(everyoneIsV211 bool, wantTags bool) Recording {
if s == nil {
return nil // noop span
}
s.mu.Lock()
if !everyoneIsV211 {
// The cluster may contain nodes that are running v20.2. Unfortunately that
// version can easily crash when a peer returns a recording that that node
// did not expect would get created. To circumvent this, retain the v20.2
// behavior of eliding recordings when verbosity is off until we're sure
// that v20.2 is not around any longer.
//
// TODO(tbg): remove this in the v21.2 cycle.
if s.recordingType() == RecordingOff {
s.mu.Unlock()
return nil
}
}
// The capacity here is approximate since we don't know how many grandchildren
// there are.
result := make(Recording, 0, 1+len(s.mu.recording.children)+len(s.mu.recording.remoteSpans))
// Shallow-copy the children so we can process them without the lock.
children := s.mu.recording.children
result = append(result, s.getRecordingLocked(wantTags))
result = append(result, s.mu.recording.remoteSpans...)
s.mu.Unlock()
for _, child := range children {
result = append(result, child.getRecording(everyoneIsV211, wantTags)...)
}
// Sort the spans by StartTime, except the first Span (the root of this
// recording) which stays in place.
toSort := sortPool.Get().(*Recording) // avoids allocations in sort.Sort
*toSort = result[1:]
sort.Sort(toSort)
*toSort = nil
sortPool.Put(toSort)
return result
}
func (s *crdbSpan) importRemoteSpans(remoteSpans []tracingpb.RecordedSpan) {
// Change the root of the remote recording to be a child of this Span. This is
// usually already the case, except with DistSQL traces where remote
// processors run in spans that FollowFrom an RPC Span that we don't collect.
remoteSpans[0].ParentSpanID = s.spanID
s.mu.Lock()
defer s.mu.Unlock()
s.mu.recording.remoteSpans = append(s.mu.recording.remoteSpans, remoteSpans...)
}
func (s *crdbSpan) setTagLocked(key string, value interface{}) {
if s.mu.tags == nil {
s.mu.tags = make(opentracing.Tags)
}
s.mu.tags[key] = value
}
func (s *crdbSpan) record(msg string) {
if s.recordingType() != RecordingVerbose {
return
}
var now time.Time
if s.testing != nil {
now = s.testing.clock.Now()
} else {
now = time.Now()
}
logRecord := &tracingpb.LogRecord{
Time: now,
Fields: []tracingpb.LogRecord_Field{
{Key: tracingpb.LogMessageField, Value: msg},
},
}
s.recordInternal(logRecord, &s.mu.recording.logs)
}
func (s *crdbSpan) recordStructured(item Structured) {
s.recordInternal(item, &s.mu.recording.structured)
}
// sizable is a subset for protoutil.Message, for payloads (log records and
// structured events) that can be recorded.
type sizable interface {
Size() int
}
func (s *crdbSpan) recordInternal(payload sizable, buffer *sizeLimitedBuffer) {
s.mu.Lock()
defer s.mu.Unlock()
size := int64(payload.Size())
if size > buffer.limit {
// The incoming payload alone blows past the memory limit. Let's just
// drop it.
s.mu.recording.dropped = true
return
}
buffer.size += size
if buffer.size > buffer.limit {
s.mu.recording.dropped = true
}
for buffer.size > buffer.limit {
first := buffer.GetFirst().(sizable)
buffer.RemoveFirst()
buffer.size -= int64(first.Size())
}
buffer.AddLast(payload)
}
func (s *crdbSpan) setBaggageItemAndTag(restrictedKey, value string) {
s.mu.Lock()
defer s.mu.Unlock()
s.setBaggageItemLocked(restrictedKey, value)
// Don't set the tag if this is the special cased baggage item indicating
// span verbosity, as it is named nondescriptly and the recording knows
// how to display its verbosity independently.
if restrictedKey != verboseTracingBaggageKey {
s.setTagLocked(restrictedKey, value)
}
}
func (s *crdbSpan) setBaggageItemLocked(restrictedKey, value string) {
if oldVal, ok := s.mu.baggage[restrictedKey]; ok && oldVal == value {
// No-op.
return
}
if s.mu.baggage == nil {
s.mu.baggage = make(map[string]string)
}
s.mu.baggage[restrictedKey] = value
}
// getRecordingLocked returns the Span's recording. This does not include
// children.
//
// When wantTags is false, no tags will be added. This is a performance
// optimization as stringifying the tag values can be expensive.
func (s *crdbSpan) getRecordingLocked(wantTags bool) tracingpb.RecordedSpan {
rs := tracingpb.RecordedSpan{
TraceID: s.traceID,
SpanID: s.spanID,
ParentSpanID: s.parentSpanID,
GoroutineID: s.goroutineID,
Operation: s.operation,
StartTime: s.startTime,
Duration: s.mu.duration,
}
if rs.Duration == -1 {
// -1 indicates an unfinished Span. For a recording it's better to put some
// duration in it, otherwise tools get confused. For example, we export
// recordings to Jaeger, and spans with a zero duration don't look nice.
rs.Duration = timeutil.Now().Sub(rs.StartTime)
rs.Finished = false
} else {
rs.Finished = true
}
addTag := func(k, v string) {
if rs.Tags == nil {
rs.Tags = make(map[string]string)
}
rs.Tags[k] = v
}
if wantTags {
if s.mu.duration == -1 {
addTag("_unfinished", "1")
}
if s.mu.recording.recordingType.load() == RecordingVerbose {
addTag("_verbose", "1")
}
if s.mu.recording.dropped {
addTag("_dropped", "1")
}
}
if numEvents := s.mu.recording.structured.Len(); numEvents != 0 {
rs.InternalStructured = make([]*types.Any, 0, numEvents)
for i := 0; i < numEvents; i++ {
event := s.mu.recording.structured.Get(i).(Structured)
item, err := types.MarshalAny(event)
if err != nil {
// An error here is an error from Marshal; these
// are unlikely to happen.
continue
}
rs.InternalStructured = append(rs.InternalStructured, item)
}
}
if len(s.mu.baggage) > 0 {
rs.Baggage = make(map[string]string)
for k, v := range s.mu.baggage {
rs.Baggage[k] = v
}
}
if wantTags {
if s.logTags != nil {
setLogTags(s.logTags.Get(), func(remappedKey string, tag *logtags.Tag) {
addTag(remappedKey, tag.ValueStr())
})
}
if len(s.mu.tags) > 0 {
for k, v := range s.mu.tags {
// We encode the tag values as strings.
addTag(k, fmt.Sprint(v))
}
}
}
if numLogs := s.mu.recording.logs.Len(); numLogs != 0 {
rs.Logs = make([]tracingpb.LogRecord, numLogs)
for i := 0; i < numLogs; i++ {
lr := s.mu.recording.logs.Get(i).(*tracingpb.LogRecord)
rs.Logs[i] = *lr
}
}
return rs
}
func (s *crdbSpan) addChild(child *crdbSpan) {
s.mu.Lock()
// Only record the child if the parent still has room.
if len(s.mu.recording.children) < maxChildrenPerSpan {
s.mu.recording.children = append(s.mu.recording.children, child)
}
s.mu.Unlock()
}
// setVerboseRecursively sets the verbosity of the crdbSpan appropriately and
// recurses on its list of children.
func (s *crdbSpan) setVerboseRecursively(to bool) {
if to {
s.enableRecording(nil /* parent */, RecordingVerbose)
} else {
s.disableRecording()
}
s.mu.Lock()
children := s.mu.recording.children
s.mu.Unlock()
for _, child := range children {
child.setVerboseRecursively(to)
}
}
var sortPool = sync.Pool{
New: func() interface{} {
return &Recording{}
},
}
// Less implements sort.Interface.
func (r Recording) Less(i, j int) bool {
return r[i].StartTime.Before(r[j].StartTime)
}
// Swap implements sort.Interface.
func (r Recording) Swap(i, j int) {
r[i], r[j] = r[j], r[i]
}
// Len implements sort.Interface.
func (r Recording) Len() int {
return len(r)
}
type atomicRecordingType RecordingType
// load returns the recording type.
func (art *atomicRecordingType) load() RecordingType {
return RecordingType(atomic.LoadInt32((*int32)(art)))
}
// swap stores the new recording type and returns the old one.
func (art *atomicRecordingType) swap(recType RecordingType) RecordingType {
return RecordingType(atomic.SwapInt32((*int32)(art), int32(recType)))
}
|
package main
import (
"fmt"
"rand"
"time"
. "sort"
)
// sort modifies the slice s so that the integers are sorted in
// place using quicksort
func sort(s []int) {
n := len(s)
if n < 2 {
return
}
pivot := rand.Intn(n)
p := s[pivot]
s[pivot] = s[n-1]
k := partition(s[0:n-1], p)
s[n-1] = s[k]
s[k] = p
sort(s[0:k])
sort(s[k+1 : n])
}
func partition(s []int, pivot int) int {
start, end := 0, len(s)
for {
for {
if start == end {
return start
} else if s[start] <= pivot {
start++
} else {
break
}
}
end--
for {
if start == end {
return start
} else if s[end] > pivot {
end--
} else {
break
}
}
s[start], s[end] = s[end], s[start]
start++
}
panic("unreachable")
}
func main() {
n := 1 << 20
arr := make([]int, n)
arr2 := make([]int, n)
for i, _ := range arr {
arr[i] = rand.Int()
arr2[i] = arr[i]
}
t0 := time.Nanoseconds()
sort(arr)
fmt.Println("quicksort time:", float(time.Nanoseconds()-t0)/1e9)
t0 = time.Nanoseconds()
SortInts(arr2)
fmt.Println("sort.SortInts time:", float(time.Nanoseconds()-t0)/1e9)
// Check for correctness
for i, _ := range arr {
if arr[i] != arr2[i] {
fmt.Println("Sorting failed!")
break
}
}
}
|
package main
import (
"fmt"
"time"
)
func main() {
fmt.Println(fmt.Println(time.Now().Format("060102-150405")))
}
|
package main
import (
"log"
console "github.com/AsynkronIT/goconsole"
"github.com/AsynkronIT/protoactor-go/actor"
"github.com/GuiltyMorishita/money-transfer-saga/saga"
)
func main() {
var (
numberOfTransfers = 1000
uptime = 99.99
refusalProbability = 0.01
busyProbability = 0.01
retryAttempts = 0
verbose = false
)
log.Println("Starting")
props := actor.FromProducer(func() actor.Actor {
return saga.NewRunner(numberOfTransfers, uptime, refusalProbability, busyProbability, retryAttempts, verbose)
})
log.Println("Spawning runner")
actor.SpawnNamed(props, "runner")
console.ReadLine()
}
|
package tengo2lua_test
import (
"fmt"
"github.com/d5/tengo2lua"
)
func ExampleTranspiler() {
src := []byte(`
each := func(x, f) { for k, v in x { f(k, v) } }
sum := 0
each([1, 2, 3], func(i, v) { sum += v })
`)
t := tengo2lua.NewTranspiler(src, nil)
dst, err := t.Convert()
if err != nil {
panic(err)
}
fmt.Println(string(dst))
}
|
package main
import (
"fmt"
"os"
"path"
"io"
"strings"
"path/filepath"
"github.com/codegangsta/cli"
)
func Extract(c *cli.Context) {
if len(c.Args()) == 0 {
fmt.Fprintln(os.Stderr, "Extract error: No outdir argument provided")
return
}
// TODO: Verbose option
verbose := true
outdir := c.Args().Get(1)
fmt.Println(outdir)
if pboFile.HeaderExtension != nil {
prefix, ok := pboFile.HeaderExtension.ExtendedFields["prefix"]
if ok {
prefix = strings.Replace(prefix, "\\", "/", -1)
fmt.Println(prefix)
outdir = path.Join(outdir, prefix)
fmt.Println(outdir)
}
}
exists, err := exists(outdir)
if err != nil {
fmt.Fprintln(os.Stderr, "Extract error: unknown error occurred.", err)
return
}
if !exists {
err := mkdirIntermediate(outdir)
if err != nil {
fmt.Fprintln(os.Stderr, "Extract error: could not create output directory.", err)
return
}
}
// Start creating/copying files
for _, entry := range pboFile.Entries {
if verbose {
fmt.Println("Extracting", entry.Name)
}
outfile := path.Join(outdir, filepath.ToSlash(entry.Name))
if verbose {
fmt.Printf("Extract file path: %s", outfile)
}
err := os.MkdirAll(path.Dir(outfile), 0777)
if err != nil {
fmt.Fprintf(os.Stderr, "Extract error: could not create folder %s.\nError: %s\n", path.Dir(outfile), err)
}
file, err := os.Create(outfile)
if err != nil {
fmt.Fprintf(os.Stderr, "Extract error: could not create file %s.\nError: %s\n", entry.Name, err)
}
entry.Seek(0, os.SEEK_SET)
io.Copy(file, entry)
file.Close()
}
if verbose {
fmt.Println("Done")
}
}
|
package models
import (
"github.com/jinzhu/gorm"
"time"
"github.com/EthereumCommonwealth/go-callisto/common"
)
type Block struct {
gorm.Model
Hash common.Hash `gorm:"unique_index:hash_block"`
ParentHash common.Hash
Miner common.Address
TransactionRoot common.Hash
Difficulty uint
Number uint
GasLimit uint
GasUsed uint
Timestamp time.Time
ExtraData string
Nonce uint
}
|
/*
* Wire API
*
* Moov Wire implements an HTTP API for creating, parsing, and validating Fedwire messages.
*
* API version: v1
* Generated by: OpenAPI Generator (https://openapi-generator.tech)
*/
package openapi
// InputMessageAccountabilityData struct for InputMessageAccountabilityData
type InputMessageAccountabilityData struct {
// InputCycleDate (Format CCYYMMDD - C=Century, Y=Year, M=Month, D=Day)
InputCycleDate string `json:"inputCycleDate"`
// InputSource
InputSource string `json:"inputSource"`
// InputSequenceNumber
InputSequenceNumber string `json:"inputSequenceNumber"`
}
|
package e7_4
import (
"io"
)
type stringReader struct {
pos int
str string
}
func NewReader(str string) io.Reader {
return &stringReader{str: str, pos: 0}
}
func (sr *stringReader) Read(p []byte) (n int, err error) {
n = copy(p, sr.str[sr.pos:])
sr.pos += n
if n == 0 && sr.pos == len(sr.str) {
err = io.EOF
}
return
}
|
package models
import (
"fmt"
"gorm.io/driver/postgres"
"gorm.io/gorm"
)
var DB *gorm.DB
func ConnectDataBase() {
dsn := "host=localhost user=babu password=babu DB.name=babu port=5432 sslmode=disable"
db, err := gorm.Open(postgres.Open(dsn), &gorm.Config{})
fmt.Println("db",db)
fmt.Println("err",err)
if err != nil {
panic("Failed to connect to database!")
}
db.AutoMigrate(&Account{})
db.AutoMigrate(&Customer{})
db.AutoMigrate(&Employee{})
db.AutoMigrate(&Transaction{})
db.AutoMigrate(&User{})
DB = db
} |
package online
import (
"bufio"
"errors"
"os"
"github.com/Kurorororo/vector"
)
//Model express online classifier
type Model interface {
Predict(*vector.Vector) (float64, error)
Score(*vector.Vector) (float64, error)
Update(*Data) error
Fit(Dataset, int) error
FitFromDisk(string, int) error
Copy() (Model, error)
Save(string) error
}
//Factory is factory of OnlineModel
type Factory interface {
Create(int, float64, float64, string, float64) (Model, error)
Load(string) (Model, error)
}
//Test tests model
func Test(model Model, ds Dataset) (float64, error) {
ans := 0
for i := range ds {
if ds[i].Label != 0.0 && ds[i].Label != 1.0 {
return 0.0, errors.New("label must be 0 or 1")
}
predict, err := model.Predict(&ds[i].Feature)
if err != nil {
return 0.0, err
}
if predict == ds[i].Label {
ans++
}
}
return float64(ans) / float64(len(ds)), nil
}
func TestFromDisk(model Model, filename string, length int) (float64, error) {
fp, err := os.Open(filename)
if err != nil {
panic(err)
}
scanner := bufio.NewScanner(fp)
ans := 0.0
i := 0.0
var d Data
var line string
var predict float64
for scanner.Scan() {
line = scanner.Text()
err = d.ParseLIBSVM(&line, length)
if d.Label != 0.0 && d.Label != 1.0 {
return 0.0, errors.New("label must be 0 or 1")
}
if err != nil {
return 0.0, err
}
predict, err = model.Predict(&d.Feature)
if err != nil {
return 0.0, err
}
if predict == d.Label {
ans += 1.0
}
i += 1.0
}
if err := scanner.Err(); err != nil {
panic(err)
}
return ans / i, nil
}
//CrossValidation is cross validation
func CrossValidation(model Model, ds Dataset, k int) ([]float64, error) {
if k < 2 {
return []float64{}, errors.New("could not split dateset by given k")
}
ds.Shuffle()
accuracyChan := make(chan float64, k)
n := len(ds) / k
for i := 0; i < k; i++ {
go func(j int) {
test := ds[j : n*(j+1)]
train := ds[n*(j+1):]
copyModel, err := model.Copy()
if err != nil {
panic(err)
}
err = copyModel.Fit(train, len(train))
if err != nil {
panic(err)
}
accuracy, err := Test(copyModel, test)
if err != nil {
panic(err)
}
accuracyChan <- accuracy
}(i)
}
accuracies := make([]float64, k)
for i := 0; i < k; i++ {
accuracies[i] = <-accuracyChan
}
return accuracies, nil
}
//GridSearch is grid search
func GridSearch(f Factory, dim int, etas []float64, gammas []float64, reglrs []string, lambdas []float64, ds Dataset, split int) (Model, error) {
n := len(etas) * len(gammas) * len(reglrs) * len(lambdas)
if n == 0 {
return nil, errors.New("the number of paramerter candidates must be positive")
}
max := 0.0
argmax := []int{0, 0, 0, 0}
for i := range etas {
for j := range gammas {
for k := range reglrs {
for l := range lambdas {
model, err := f.Create(dim, etas[i], gammas[j], reglrs[k], lambdas[l])
if err != nil {
panic(err)
}
scores, err := CrossValidation(model, ds, split)
if err != nil {
panic(err)
}
score := 0.0
for _, s := range scores {
score += s
}
if score > max {
max = score
argmax = []int{i, j, k, l}
}
}
}
}
}
return f.Create(dim, etas[argmax[0]], gammas[argmax[1]], reglrs[argmax[2]], lambdas[argmax[3]])
}
|
package main
import (
"fmt"
"log"
"net/http"
"strings"
)
func sayHelloWorld(w http.ResponseWriter, r *http.Request) {
// ParseForm解析URL中的查询字符串,并将解析结果更新到r.Form字段
r.ParseForm() // 解析参数
fmt.Println(r.Form) // 在服务端打印请求参数
//fmt.Println("URL:", r.URL.Path) // 请求 URL
//fmt.Println("Scheme", r.URL.Scheme)
for k, v := range r.Form {
fmt.Println(k, ":", strings.Join(v, ""))
}
/**
func (r *Request) ParseForm() error
ParseForm解析URL中的查询字符串,并将解析结果更新到r.Form字段。
Form url.Values
// PostForm是解析好的POST或PUT的表单数据。
// 本字段只有在调用ParseForm后才有效。在客户端,会忽略请求中的本字段而使用Body替代。
*/
}
func main() {
// func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Request))
http.HandleFunc("/", sayHelloWorld) // HandleFunc注册一个处理器函数handler和对应的模式pattern。
err := http.ListenAndServe(":9091", nil) // 调用的处理器,如为nil会调用http.DefaultServeMux
if err != nil {
log.Fatal("ListenAndServe: ", err)
}
}
|
// Package main ...
package main
import (
"fmt"
"os"
)
func main() {
fmt.Println("./chrome: error while loading shared libraries: libcairo.so.2: cannot open shared object file: No such file or directory")
os.Exit(1)
}
|
// Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jdbg
import "fmt"
// parseSignature returns the type for the signature string starting at offset.
// offset will be modified so that it is one byte beyond the end of the parsed
// string.
func (j *JDbg) parseSignature(sig string, offset *int) (Type, error) {
r := sig[*offset]
*offset++
switch r {
case 'V':
return j.cache.voidTy, nil
case 'Z':
return j.cache.boolTy, nil
case 'B':
return j.cache.byteTy, nil
case 'C':
return j.cache.charTy, nil
case 'S':
return j.cache.shortTy, nil
case 'I':
return j.cache.intTy, nil
case 'J':
return j.cache.longTy, nil
case 'F':
return j.cache.floatTy, nil
case 'D':
return j.cache.doubleTy, nil
case 'L':
// fully-qualified-class
start := *offset - 1 // include 'L'
for *offset < len(sig) {
r := sig[*offset]
*offset++
if r == ';' {
return j.classFromSig(sig[start:*offset])
}
}
return nil, fmt.Errorf("Fully qualified class missing terminating ';'")
case '[':
start := *offset - 1 // include '['
el, err := j.parseSignature(sig, offset)
if err != nil {
return nil, err
}
sig := sig[start:*offset]
if array, ok := j.cache.arrays[sig]; ok {
return array, nil
}
class, err := j.classFromSig(sig)
if err != nil {
return nil, err
}
array := &Array{class, el}
j.cache.arrays[sig] = array
return array, nil
default:
return nil, fmt.Errorf("Unknown signature type tag '%v'", r)
}
}
|
// Package chroma takes source code and other structured text and converts it into syntax highlighted HTML, ANSI-
// coloured text, etc.
//
// Chroma is based heavily on Pygments, and includes translators for Pygments lexers and styles.
//
// For more information, go here: https://github.com/alecthomas/chroma
package chroma
|
package ditto
import (
"encoding/json"
"errors"
"fmt"
)
type Section struct {
ID string `json:"id"`
Type Type `json:"type"`
Title string `json:"title"`
Description *string `json:"description"`
ChildSection []Section `json:"child_section"`
ChildField []Field `json:"child_field"`
Info map[string]interface{} `json:"info,omitempty"`
}
type SectionWithStatus struct {
ID string `json:"id"`
Type Type `json:"type"`
Title string `json:"title"`
Description *string `json:"description"`
ChildSection []*SectionWithStatus `json:"child_section"`
ChildField []*FieldWithValue `json:"child_field"`
Info map[string]interface{} `json:"info,omitempty"`
Status map[string]interface{} `json:"status"`
}
func (s Section) MarshalJSON() ([]byte, error) {
type WithSection struct {
ID string `json:"id"`
Type Type `json:"type"`
Title string `json:"title"`
Description *string `json:"description"`
ChildSection []Section `json:"child"`
Info map[string]interface{} `json:"info,omitempty"`
}
if len(s.ChildSection) > 0 {
result := WithSection{
ID: s.ID,
Type: s.Type,
Title: s.Title,
Description: s.Description,
ChildSection: s.ChildSection,
Info: s.Info,
}
return json.Marshal(result)
}
type WithField struct {
ID string `json:"id"`
Type Type `json:"type"`
Title string `json:"title"`
Description *string `json:"description"`
ChildField []Field `json:"child"`
Info map[string]interface{} `json:"info,omitempty"`
}
result := WithField{
ID: s.ID,
Type: s.Type,
Title: s.Title,
Description: s.Description,
ChildField: s.ChildField,
Info: s.Info,
}
return json.Marshal(result)
}
func (s SectionWithStatus) MarshalJSON() ([]byte, error) {
type WithSection struct {
ID string `json:"id"`
Type Type `json:"type"`
Title string `json:"title"`
Description *string `json:"description"`
ChildSection []*SectionWithStatus `json:"child"`
Info map[string]interface{} `json:"info,omitempty"`
Status map[string]interface{} `json:"status"`
}
if len(s.ChildSection) > 0 {
result := WithSection{
ID: s.ID,
Type: s.Type,
Title: s.Title,
Description: s.Description,
ChildSection: s.ChildSection,
Info: s.Info,
Status: s.Status,
}
return json.Marshal(result)
}
type WithField struct {
ID string `json:"id"`
Type Type `json:"type"`
Title string `json:"title"`
Description *string `json:"description"`
ChildField []*FieldWithValue `json:"child"`
Info map[string]interface{} `json:"info,omitempty"`
Status map[string]interface{} `json:"status"`
}
result := WithField{
ID: s.ID,
Type: s.Type,
Title: s.Title,
Description: s.Description,
ChildField: s.ChildField,
Info: s.Info,
Status: s.Status,
}
return json.Marshal(result)
}
func NewSectionFromMap(data map[string]interface{}) (*Section, error) {
ids := make(map[string]bool)
return newSectionFromMap(data, ids)
}
func newSectionFromMap(data map[string]interface{}, ids map[string]bool) (*Section, error) {
if nil == data["id"] {
return nil, errors.New(`section_should_have_property_id`)
}
id := data["id"].(string)
if _, ok := ids[id]; ok {
return nil, errors.New("id_must_unique")
}
ids[id] = true
if data["title"] == nil {
return nil, errors.New(`section_should_have_property_title`)
}
if data["type"] == nil {
return nil, errors.New(`section_should_have_property_type`)
}
if data["child"] == nil {
return nil, errors.New(`section_should_have_property_child`)
}
fieldType, ok := data["type"].(string)
if !ok {
fmt.Println(data)
return nil, errors.New(`section_type_not_supported`)
}
typ := GetType(fieldType)
if nil == typ {
fmt.Println(data)
return nil, errors.New(`section_type_not_supported`)
}
var info map[string]interface{}
if data["info"] != nil {
info, ok = data["info"].(map[string]interface{})
if !ok {
return nil, errors.New("field_info_should_be_an_object")
}
err := validateInfo(info, typ.ValidInfoKeys)
if err != nil {
return nil, err
}
}
if typ.Type != "section" {
fmt.Println(data)
return nil, errors.New(`section_type_not_supported`)
}
childInterface, ok := data["child"].([]interface{})
if !ok {
return nil, errors.New("section_child_should_be_array")
}
var desc *string
if data["description"] != nil {
descVal := data["description"].(string)
desc = &descVal
}
result := &Section{
ID: id,
Type: *typ,
Title: data["title"].(string),
Description: desc,
ChildSection: nil,
ChildField: nil,
Info: info,
}
childSection := arrayToArrayOfMapString(childInterface)
if len(childSection) == 0 {
return nil, errors.New(`section_should_have_child`)
}
firstChild := childSection[0]
if firstChild["child"] == nil {
//fields
childs := make([]Field, 0)
for _, child := range childSection {
field, err := newFieldFromMap(child, ids)
if err != nil {
return nil, err
}
childs = append(childs, *field)
}
result.ChildField = childs
} else {
//sections
childs := make([]Section, 0)
for _, child := range childSection {
field, err := newSectionFromMap(child, ids)
if err != nil {
return nil, err
}
childs = append(childs, *field)
}
result.ChildSection = childs
}
return result, nil
}
|
package proto
// go:generate make generate
import (
"bytes"
"encoding/gob"
"encoding/json"
"fmt"
"runtime/debug"
"time"
"github.com/reconquest/karma-go"
"github.com/MagalixCorp/magalix-agent/v2/watcher"
"github.com/MagalixTechnologies/uuid-go"
"github.com/golang/snappy"
"github.com/kovetskiy/lorg"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
satori "github.com/satori/go.uuid"
corev1 "k8s.io/api/core/v1"
)
var (
gobTypesRegistered bool
gobTypes = []interface{}{
uuid.UUID{},
satori.UUID{},
[uuid.Size]byte{},
new(watcher.Status),
new(watcher.ContainerStatusSource),
make(map[string]interface{}),
make([]interface{}, 0),
}
)
type PacketHello struct {
Major uint `json:"major"`
Minor uint `json:"minor"`
Build string `json:"build"`
StartID string `json:"start_id"`
AccountID uuid.UUID `json:"account_id"`
ClusterID uuid.UUID `json:"cluster_id"`
PacketV2Enabled bool `json:"packet_v2_enabled,omitempty"`
ServerVersion string `json:"server_version"`
}
type PacketAuthorizationRequest struct {
AccountID uuid.UUID `json:"account_id"`
ClusterID uuid.UUID `json:"cluster_id"`
}
type PacketAuthorizationQuestion struct {
Token []byte `json:"token"`
}
type PacketAuthorizationAnswer struct {
Token []byte `json:"token"`
}
type PacketAuthorizationFailure struct{}
type PacketAuthorizationSuccess struct{}
type PacketBye struct {
Reason string `json:"reason,omitempty"`
}
type PacketPing struct {
Number int `json:"number,omitempty"`
Started time.Time `json:"started"`
}
type PacketPong struct {
Number int `json:"number,omitempty"`
Started time.Time `json:"started"`
}
type PacketLogItem struct {
Level lorg.Level `json:"level"`
Date time.Time `json:"date"`
Data interface{} `json:"data"`
}
type PacketRegisterEntityItem struct {
ID uuid.UUID `json:"id"`
Name string `json:"name"`
Kind string `json:"kind,omitempty"`
Annotations map[string]string `json:"annotations,omitempty"`
}
type PacketRegisterApplicationItem struct {
PacketRegisterEntityItem
LimitRanges []corev1.LimitRange `json:"limit_ranges"`
Services []PacketRegisterServiceItem `json:"services"`
}
type PacketRegisterServiceItem struct {
PacketRegisterEntityItem
ReplicasStatus ReplicasStatus `json:"replicas_status,omitempty"`
Containers []PacketRegisterContainerItem `json:"containers"`
}
type ReplicasStatus struct {
Desired *int32 `json:"desired,omitempty"`
Current *int32 `json:"current,omitempty"`
Ready *int32 `json:"ready,omitempty"`
Available *int32 `json:"available,omitempty"`
}
type PacketRegisterContainerItem struct {
PacketRegisterEntityItem
Image string `json:"image"`
Resources json.RawMessage `json:"resources"`
LivenessProbe json.RawMessage `json:"liveness_probe"`
ReadinessProbe json.RawMessage `json:"readiness_probe"`
}
type ContainerResourceRequirements struct {
corev1.ResourceRequirements
SpecResourceRequirements corev1.ResourceRequirements `json:"spec_resources_requirements,omitempty"`
LimitsKinds ResourcesRequirementsKind `json:"limits_kinds,omitempty"`
RequestsKinds ResourcesRequirementsKind `json:"requests_kinds,omitempty"`
}
type ResourcesRequirementsKind = map[corev1.ResourceName]string
const (
ResourceRequirementKindSet = "set"
ResourceRequirementKindDefaultsLimitRange = "defaults-limit-range"
ResourceRequirementKindDefaultFromLimits = "default-from-limits"
)
type PacketApplicationsStoreRequest []PacketRegisterApplicationItem
type PacketApplicationsStoreResponse struct{}
type PacketMetricsStoreV2Request []MetricStoreV2Request
type MetricStoreV2Request struct {
Name string `json:"name"`
Type string `json:"type"`
NodeName string `json:"node_name"`
NodeIP string `json:"node_ip"`
NamespaceName string `json:"namespace_name"`
ControllerName string `json:"controller_name"`
ControllerKind string `json:"controller_kind"`
ContainerName string `json:"container_name"`
Timestamp time.Time `json:"timestamp"`
Value int64 `json:"value"`
PodName string `json:"pod_name"`
AdditionalTags map[string]interface{} `json:"additional_tags"`
}
type PacketMetricValueItem struct {
Node *uuid.UUID
Application *uuid.UUID
Service *uuid.UUID
Container *uuid.UUID
Tags map[string]string
Value float64
}
type PacketMetricFamilyItem struct {
Name string
Help string
Type string
Tags []string
Values []*PacketMetricValueItem
}
type PacketMetricsPromStoreRequest struct {
Timestamp time.Time
Metrics []*PacketMetricFamilyItem
}
type PacketMetricsPromStoreResponse struct {
}
type PacketRegisterNodeCapacityItem struct {
CPU int `json:"cpu"`
Memory int `json:"memory"`
StorageEphemeral int `json:"storage_ephemeral"`
Pods int `json:"pods"`
}
type PacketRegisterNodeItem struct {
ID uuid.UUID `json:"id,omitempty"`
Name string `json:"name"`
IP string `json:"ip"`
Roles string `json:"roles"`
Region string `json:"region,omitempty"`
Provider string `json:"provider,omitempty"`
InstanceType string `json:"instance_type,omitempty"`
InstanceSize string `json:"instance_size,omitempty"`
Capacity PacketRegisterNodeCapacityItem `json:"capacity"`
Allocatable PacketRegisterNodeCapacityItem `json:"allocatable"`
Containers int `json:"containers,omitempty"`
ContainerList []*PacketRegisterNodeContainerListItem `json:"container_list,omitempty"`
}
type PacketRegisterNodeContainerListItem struct {
// cluster where host of container located in
Cluster string `json:"cluster"`
// image of container
Image string `json:"image"`
// limits of container
Limits *PacketRegisterNodeContainerListResourcesItem `json:"limits"`
// requests of container
Requests *PacketRegisterNodeContainerListResourcesItem `json:"requests"`
// name of container (not guaranteed to be unique in cluster scope)
Name string `json:"name"`
// namespace where pod located in
Namespace string `json:"namespace"`
// node where container located in
Node string `json:"node"`
// pod where container located in
Pod string `json:"pod"`
}
// PacketRegisterNodeContainerListResourcesItem
type PacketRegisterNodeContainerListResourcesItem struct {
CPU int `json:"cpu"`
Memory int `json:"memory"`
}
type PacketNodesStoreRequest []PacketRegisterNodeItem
type PacketNodesStoreResponse struct{}
type PacketLogs []PacketLogItem
type PacketEventsStoreRequest []watcher.Event
type PacketEventsStoreResponse struct{}
type PacketEventLastValueRequest struct {
Entity string `json:"entity"`
EntityID string `json:"entity_id"`
EventKind string `json:"kind"`
}
type PacketEventLastValueResponse struct {
Value interface{} `json:"value"`
}
type PacketStatusStoreRequest struct {
Entity string `json:"entity"`
EntityID uuid.UUID `json:"entity_id"`
Status watcher.Status `json:"status"`
Source *watcher.ContainerStatusSource `json:"source"`
Timestamp time.Time `json:"timestamp"`
}
type PacketStatusStoreResponse struct{}
type RequestLimit struct {
CPU *int64 `json:"cpu,omitempty"`
Memory *int64 `json:"memory,omitempty"`
}
type ContainerResources struct {
Requests *RequestLimit `json:"requests,omitempty"`
Limits *RequestLimit `json:"limits,omitempty"`
}
type PacketDecision struct {
ID uuid.UUID `json:"id"`
ServiceId uuid.UUID `json:"service_id"`
ContainerId uuid.UUID `json:"container_id"`
ContainerResources ContainerResources `json:"container_resources"`
}
type DecisionExecutionStatus string
const (
DecisionExecutionStatusSucceed DecisionExecutionStatus = "executed"
DecisionExecutionStatusFailed DecisionExecutionStatus = "failed"
DecisionExecutionStatusSkipped DecisionExecutionStatus = "skipped"
)
type PacketDecisionFeedbackRequest struct {
ID uuid.UUID `json:"id"`
ServiceId uuid.UUID `json:"service_id"`
ContainerId uuid.UUID `json:"container_id"`
Status DecisionExecutionStatus `json:"status"`
Message string `json:"message"`
}
type PacketDecisionFeedbackResponse struct{}
type PacketDecisionResponse struct {
Error *string `json:"error"`
}
type PacketDecisionPullRequest struct{}
type PacketDecisionPullResponse struct {
Decisions []*PacketDecision `json:"decisions"`
}
type PacketRestart struct {
Status int `json:"status"`
}
type PacketRaw map[string]interface{}
type PacketRawRequest struct {
PacketRaw
Timestamp time.Time
}
type PacketRawResponse struct{}
type EntityDeltaKind string
const (
EntityEventTypeUpsert EntityDeltaKind = "UPSERT"
EntityEventTypeDelete EntityDeltaKind = "DELETE"
)
type ParentController struct {
Kind string `json:"kind"`
Name string `json:"name"`
APIVersion string `json:"api_version"`
IsWatched bool `json:"is_watched"`
Parent *ParentController `json:"parent"`
}
type GroupVersionResourceKind struct {
schema.GroupVersionResource
Kind string `json:"kind"`
}
type PacketEntityDelta struct {
Gvrk GroupVersionResourceKind `json:"gvrk"`
DeltaKind EntityDeltaKind `json:"delta_kind"`
Data unstructured.Unstructured `json:"data"`
Parent *ParentController `json:"parents"`
Timestamp time.Time `json:"timestamp"`
}
type PacketEntitiesDeltasRequest struct {
Items []PacketEntityDelta `json:"items"`
Timestamp time.Time `json:"timestamp"`
}
type PacketEntitiesDeltasResponse struct{}
type PacketEntitiesResyncItem struct {
Gvrk GroupVersionResourceKind `json:"gvrk"`
Data []*unstructured.Unstructured `json:"data"`
}
type PacketEntitiesResyncRequest struct {
Timestamp time.Time `json:"timestamp"`
// map of entities kind and entities definitions
// it holds other entities not already specified in attributes above
Snapshot map[string]PacketEntitiesResyncItem `json:"snapshot"`
}
type PacketEntitiesResyncResponse struct{}
// Deprecated: Fall back to EncodeGOB. Kept only for backward compatibility. Should be removed.
func Encode(in interface{}) (out []byte, err error) {
return EncodeGOB(in)
}
// Deprecated: Falls back to DecodeGOB. Kept only for backward compatibility. Should be removed.
func Decode(in []byte, out interface{}) error {
return DecodeGOB(in, out)
}
func EncodeSnappy(in interface{}) (out []byte, err error) {
defer func() {
if r := recover(); r != nil {
stack := string(debug.Stack())
err = karma.Format(stack, fmt.Sprintf("panic: %v", r))
}
}()
jsonIn, err := json.Marshal(in)
if err != nil {
return nil, karma.Format(err, "unable to encode to snappy")
}
out = snappy.Encode(nil, jsonIn)
return out, err
}
func DecodeSnappy(in []byte, out interface{}) error {
jsonIn, err := snappy.Decode(nil, in)
if err != nil {
return karma.Format(err, "unable to decode to snappy")
}
return json.Unmarshal(jsonIn, out)
}
func DecodeGOB(in []byte, out interface{}) error {
RegisterGOBTypes()
inBuf := bytes.NewBuffer(in)
dec := gob.NewDecoder(inBuf)
return dec.Decode(out)
}
func EncodeGOB(in interface{}) ([]byte, error) {
RegisterGOBTypes()
var outBuf bytes.Buffer
enc := gob.NewEncoder(&outBuf)
if err := enc.Encode(in); err != nil {
return nil, err
}
return outBuf.Bytes(), nil
}
func DecodeJSON(in []byte, out interface{}) error {
return json.Unmarshal(in, out)
}
func EncodeJSON(in interface{}) ([]byte, error) {
return json.Marshal(in)
}
func RegisterGOBTypes() {
if !gobTypesRegistered {
for _, t := range gobTypes {
gob.Register(t)
}
gobTypesRegistered = true
}
}
|
package main
import "fmt"
func main() {
months := map[string]struct{}{
"January": struct{}{},
"February": struct{}{},
"March": struct{}{},
"April": struct{}{},
"May": struct{}{},
"June": struct{}{},
}
if _, ok := months["March"]; ok {
fmt.Println("Found!")
}
}
|
package sdk
import (
"net/http"
"log"
"io/ioutil"
)
type AuthorizeParam struct {
ClientId string `json:"client_id"`
RedirectUri string `json:"redirect_uri"`
State string `json:"state,omitempty"`
EnforceLogin string `json:"enforce_login,omitempty"`
Lang string `json:"lang,omitempty"`
}
type AuthorizeResponse struct {
Code string `json:"code"`
State string `json:"state"`
}
func GetAuthorizeURL() {
authorizeURL := Account_Host + "/oauth2/authorize" + "?client_id=" + Client_Id + "&redirect_uri=" + Redirect_Url
log.Print(authorizeURL)
//authorizeParam := AuthorizeParam {
// ClientId: Client_Id,
// RedirectUri: Redirect_Url,
//}
//
//authorizeParamJson, err := json.Marshal(authorizeParam)
//
//if err != nil {
// log.Fatal("Json marshal error.")
// return
//}
//
//body := bytes.NewBuffer(authorizeParamJson)
client := &http.Client{}
request, err := http.NewRequest("GET", authorizeURL, nil)
if err != nil {
log.Fatal("Http request create error.")
return
}
response, err := client.Do(request)
defer response.Body.Close()
if err != nil {
log.Fatal("Http client do error.")
return
}
data, err := ioutil.ReadAll(response.Body)
if err != nil {
log.Fatal("Http read all error.")
return
}
log.Print("xxd", string(data))
}
|
// Copyright 2014 Matthias Zenger. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sets
import . "github.com/objecthub/containerkit"
type MutableSetObserver interface {
Include(subject MutableSet, elem interface{})
Exclude(subject MutableSet, elem interface{})
}
func ObservableSet(class MutableSetClass, observers Container) MutableSetClass {
return &observableSetClass{class, observers}
}
type observableSetClass struct {
class MutableSetClass
observers Container
}
func (this *observableSetClass) Embed(obj MutableSet) MutableSet {
res := new(observableSet)
if obj == nil {
obj = res
}
res.obj = obj
res.MutableSet = this.class.Embed(obj)
return res
}
func (this *observableSetClass) New(elements... interface{}) MutableSet {
res := this.Embed(nil)
res.Include(elements...)
return res
}
func (this *observableSetClass) From(coll Container) MutableSet {
res := this.Embed(nil)
res.IncludeFrom(coll)
return res
}
type observableSet struct {
obj MutableSet
observers Container
MutableSet
}
func (this *observableSet) Include(elements... interface{}) {
this.MutableSet.Include(elements...)
this.observers.ForEach(func (o interface{}) {
for _, e := range elements {
o.(MutableSetObserver).Include(this.obj, e)
}
})
}
func (this *observableSet) Exclude(elements... interface{}) {
this.MutableSet.Exclude(elements...)
this.observers.ForEach(func (o interface{}) {
for _, e := range elements {
o.(MutableSetObserver).Exclude(this.obj, e)
}
})
}
|
package global
import (
"context"
"github.com/go-redis/redis/v8"
)
type Set struct {
client *redis.Client
}
func NewSet(client *redis.Client) *Set {
return &Set{
client: client,
}
}
func (s *Set) SAdd(ctx context.Context, key, val interface{}) (bool, error) {
ret, err := s.client.SAdd(ctx, makeSetKey(key), val).Result()
if err != nil {
return false, err
}
return ret == 1, nil
}
func (s *Set) SRem(ctx context.Context, key, val interface{}) error {
return s.client.SRem(ctx, makeSetKey(key), val).Err()
}
func (s *Set) SMembers(ctx context.Context, key interface{}) ([]string, error) {
ret, err := s.client.SMembers(ctx, makeSetKey(key)).Result()
if err == redis.Nil {
return ret, ErrNil
}
return ret, err
}
func (s *Set) SDel(ctx context.Context, key interface{}) error {
_, err := s.client.Del(ctx, makeSetKey(key)).Result()
return err
}
|
package TapeEquilibrium
import "testing"
func TestTapeEquilibrium(t *testing.T) {
entries := []struct {
input []int
result int
}{
{[]int{3, 1, 2, 4, 3}, 1},
{[]int{1, 1}, 0},
{[]int{-3, 5, -2, 1, 0, -10}, 3},
}
for _, entry := range entries {
result := TapeEquilibrium(entry.input)
// check if result and expected result are same
if entry.result != result {
t.Errorf("TapeEquilibrium for %d failed, expected result %d, got, %d", entry.input, entry.result, result)
}
}
}
|
package configfile
import (
"log"
"time"
"github.com/fsnotify/fsnotify"
)
var logFatal = log.Fatal
// AttachWatcher adds a listener of chenge event to a filepath
func AttachWatcher(filename string, runner func()) {
watcher, err := fsnotify.NewWatcher()
if err != nil {
logFatal(err)
}
go func() {
defer watcher.Close()
for {
select {
case event := <-watcher.Events:
if event.Op&fsnotify.Write == fsnotify.Write {
log.Printf("WARN: Watched file '%s' modified!", filename)
go runner()
} else if event.Op&fsnotify.Remove == fsnotify.Remove ||
event.Op&fsnotify.Rename == fsnotify.Rename {
log.Printf("WARN: Watched file '%s' has been removed!",
filename)
// Some editors remove the old file and replace it with a new one
// so we need to give it a bit of time and try to reattach the notifier
time.Sleep(1 * time.Second)
log.Printf("WARN: Trying to reattach to '%s'...", filename)
AttachWatcher(filename, runner)
// If the file disappeared, we know it changed so run the trigger
go runner()
}
case err := <-watcher.Errors:
log.Println("error:", err)
break
}
}
}()
log.Printf("Attaching filesystem notifier onto %s", filename)
err = watcher.Add(filename)
if err != nil {
logFatal(err)
watcher.Close()
}
}
|
package main
import (
"fmt"
"math"
)
func main() {
max, mult, n := 0, 10, 1
for i := 1; i < 9999999; i++ {
if mult/i == 0 {
mult *= 10
n++
}
if isNPandigital(i, n) && isPrime(i) {
max = i
}
}
fmt.Println("max:", max)
}
func isNPandigital(i, n int) bool {
valMap := make(map[int]bool)
digits := scrapeDigits(i)
for dig := 0; dig < len(digits); dig++ {
val := digits[dig]
if val == 0 || val > n || valMap[val] {
return false
}
valMap[val] = true
}
return len(valMap) == n
}
func scrapeDigits(i int) []int {
intList := []int{}
for val := i; val > 0; {
intList = append(intList, val%10)
val = int(val / 10)
}
return intList
}
func isPrime(n int) bool {
if n == 0 {
return false
}
if n <= 1 {
return false
}
if chop(n) == n {
return true
}
return false
}
func chop(num int) int {
for a := 2; a < int(math.Sqrt(float64(num)))+1; a++ {
if num%a == 0 {
return a
}
}
return num
}
|
package people
import (
"database/sql"
"encoding/json"
"fmt"
"github.com/gorilla/mux"
"net/http"
)
type Person struct {
Id int `json:"Id"`
FirstName string `json:"firstName"`
LastName string `json:"lastName"`
}
type People []Person
func ReturnAllPeople(w http.ResponseWriter, r *http.Request){
people := People{
Person{Id: 1, FirstName: "Ryan", LastName: "McCombe"},
Person{Id: 2, FirstName: "Hannah", LastName: "Morrison"},
}
fmt.Println("Endpoint Hit: returnAllPeople")
json.NewEncoder(w).Encode(people)
}
func ReturnSinglePerson(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
key := vars["id"]
fmt.Fprintf(w, "Key: " + key)
}
func ReturnAllPeopleFromDB(w http.ResponseWriter, r *http.Request) {
// TODO: keep the DB conn open
database, _ := sql.Open("sqlite3", "./test.db")
rows, _ := database.Query("SELECT id, firstName, lastName FROM people")
people := People{}
var id int
var firstName string
var lastName string
for rows.Next() {
rows.Scan(&id, &firstName, &lastName)
people = append(people, Person{
Id: id,
FirstName: firstName,
LastName: lastName,
})
// fmt.Println(strconv.Itoa(id) + ": " + firstName + " " + lastName)
}
json.NewEncoder(w).Encode(people)
} |
package main
import (
"bufio"
"bytes"
"errors"
"fmt"
"net"
"strings"
"time"
"github.com/overflow3d/ts3_/database"
)
const master = "master"
//Bot , is a bot struct
type Bot struct {
ID string
conn net.Conn
output chan string
err chan string
notify chan string
stop chan int
stopPing chan struct{}
isMaster bool
resp string
db database.Datastore
Uptime int64
}
//Response , represents telnet response
type Response struct {
action string
params []map[string]string
}
//TSerror , prase string errot into Error()
type TSerror struct {
id string
msg string
}
func (e TSerror) Error() string {
return fmt.Sprintf("Error from telnet: %s %s", e.id, e.msg)
}
var bots = make(map[string]*Bot)
//Creating new bot
func (b *Bot) newBot(addr string, isMaster bool) error {
conn, err := net.Dial("tcp", addr)
if err != nil {
errLog.Println(err)
}
b.conn = conn
scanCon := bufio.NewScanner(b.conn)
scanCon.Split(scan)
//Makes all bot channels
b.makeChannels()
//Launch goroutine for bot's connection scanner
wg.Add(1)
go b.scanCon(scanCon)
//Adds separate notify goroutine
go b.notifyRun()
//Launch goroutine to fetch telnet response
go b.run()
//Launches ping
go b.botSchedules()
b.Uptime = time.Now().Unix()
if isMaster {
b.ID = master
bots[b.ID] = b
b.isMaster = true
return nil
}
master, ok := bots[master]
if !ok {
return errors.New("Couldn't copy database")
}
b.ID = randString(5)
bots[b.ID] = b
b.db = master.db
return nil
}
func (b *Bot) scanCon(s *bufio.Scanner) {
defer b.cleanUp()
for {
s.Scan()
b.output <- s.Text()
//Checks if connection is openned or any other error
e := s.Err()
if e != nil {
return
}
}
}
func (b *Bot) makeChannels() {
b.output = make(chan string)
b.err = make(chan string)
b.notify = make(chan string)
b.stop = make(chan int)
b.stopPing = make(chan struct{})
}
//Cleans up after closing bot
func (b *Bot) cleanUp() {
b.stop <- 1
close(b.output)
close(b.stopPing)
if b.ID == master {
for bot := range bots {
i := bots[bot]
i.conn.Close()
}
}
warnLog.Println("Bot", b.ID, "stopped his work.")
wg.Done()
}
func (b *Bot) run() {
defer func() {
warnLog.Println("Bot's", b.ID, "fetching stopped due to bot turning off")
}()
for {
select {
case m, ok := <-b.output:
if ok {
if strings.Index(m, "TS3") == 0 || strings.Index(m, "Welcome") == 0 || strings.Index(m, "version") == 0 {
continue
}
if strings.Index(m, "error") == 0 {
b.passError(m)
continue
} else if strings.Index(m, "notify") == 0 {
b.passNotify(m)
continue
} else {
b.resp = m
}
}
case <-b.stop:
return
}
}
}
func (b *Bot) notifyRun() {
for {
notificatio := <-b.notify
r := formatResponse(notificatio, "notify")
b.notifyAction(r)
}
}
//Schedules for bot
func (b *Bot) botSchedules() {
ping := time.NewTicker(305 * time.Second)
cleanRooms := time.NewTicker(48 * time.Hour)
registered := time.NewTicker(5 * time.Hour)
pointsT := time.NewTicker(10 * time.Minute)
for {
select {
case <-ping.C:
go b.exec(version())
infoLog.Println("Ping from bot: ", b.ID, " was send to telnet")
case <-cleanRooms.C:
if b.ID == master {
b.checkIfRoomOutDate(true, "0")
infoLog.Println("Check for empty rooms")
}
case <-registered.C:
registerUserAsPerm(b)
eventLog.Println("Sprawdzanie czy użytkownik spełnia wymagania rangi zarejestrowany")
case <-pointsT.C:
b.givePoints()
eventLog.Println("Users gets thier points for being active on teamspeak")
case <-b.stopPing:
ping.Stop()
cleanRooms.Stop()
return
}
}
}
//Switcher for actions functions
func (b *Bot) roomFromNotify(r *Response) {
debugLog.Println(r.params)
channel := &Channel{}
encodedRoom, err := b.db.GetRecord("rooms", r.params[0]["cid"])
if err != nil {
errLog.Println("Database error: ", err)
}
for _, s := range cfg.Spacer {
if r.params[0]["cpid"] == s {
if len(encodedRoom) == 0 {
owner, er := b.exec(clientFind(r.params[0]["channel_name"]))
if er != nil {
errLog.Println("Incorrect owner id:", err)
b.exec(sendMessage("1", r.params[0]["invokerid"], "Wprowadziłeś niepoprawną nazwę właściciela kanału wyśli użytkownikowi w prywatnej wiadomości token, który otrzymałeś pod spodem. Błąd telnet: "+er.Error()))
}
clientDB := getDBFromClid(owner.params[0]["clid"])
if clientDB != "" {
b.exec(setChannelAdmin(clientDB, r.params[0]["cid"]))
channel.Admins = []string{clientDB}
} else {
channel.Admins = []string{}
}
token := randString(7)
tok := &Token{Token: token, Cid: r.params[0]["cid"]}
infoLog.Println("Creating main room")
channel.Cid = r.params[0]["cid"]
channel.Spacer = r.params[0]["cpid"]
channel.OwnerDB = clientDB
channel.CreatedBy = r.params[0]["invokername"]
channel.CreateDate = time.Now()
channel.Name = r.params[0]["channel_name"]
channel.Token = token
channel.Childs = []string{}
errr := b.db.AddRecord("rooms", channel.Cid, channel)
if errr != nil {
errLog.Println(err)
}
b.db.AddRecordSubBucket("rooms", "tokens", token, tok)
go b.exec(sendMessage("1", r.params[0]["invokerid"], "Token dla utworzonego pokoju to: [b][color=red]"+tok.Token+"[/color][/b]"))
go b.exec(sendMessage("1", owner.params[0]["clid"], "Token dla Twojego kanału by odzyskać channel Admina to [b][color=red] "+tok.Token+" [/color][/b]"))
return
}
err = channel.unmarshalJSON(encodedRoom)
if err != nil {
errLog.Println("Channel decoding error:", err)
}
channel.Childs = append(channel.Childs, r.params[0]["cid"])
b.db.AddRecord("rooms", channel.Cid, channel)
return
}
}
eventLog.Println("Manually creating an extra room for", r.params[0]["cpid"])
}
func (b *Bot) passNotify(notify string) {
b.notify <- notify
}
func (b *Bot) passError(err string) {
b.err <- err
}
//Formats output from telnet into Reponse struct
func formatResponse(s string, action string) *Response {
r := &Response{}
var splitResponse []string
if action == "cmd" {
r.action = "Cmd_Response"
splitResponse = strings.Split(s, "|")
} else {
notifystr := strings.SplitN(s, " ", 2)
r.action = notifystr[0]
splitResponse = strings.Split(notifystr[1], "|")
}
for i := range splitResponse {
r.params = append(r.params, make(map[string]string))
splitWhiteSpaces := strings.Split(splitResponse[i], " ")
for j := range splitWhiteSpaces {
splitParams := strings.SplitN(splitWhiteSpaces[j], "=", 2)
if len(splitParams) > 1 {
r.params[i][splitParams[0]] = unescape(splitParams[1])
} else {
r.params[i][splitParams[0]] = ""
}
}
}
return r
}
//Converts telnet error string to error struct
func formatError(s string) error {
e := &TSerror{}
errorSplit := strings.Split(s, " ")
for i := range errorSplit {
eParams := strings.SplitN(errorSplit[i], "=", 2)
if len(eParams) > 1 {
if eParams[0] == "id" {
e.id = eParams[1]
} else if eParams[0] == "msg" {
e.msg = unescape(eParams[1])
}
} else {
continue
}
}
if e.id != "0" && e.id != "" {
return e
}
return nil
}
//Scans telnet output from connection
func scan(data []byte, atEOF bool) (advance int, token []byte, err error) {
if atEOF && len(data) == 0 {
return 0, nil, nil
}
if i := bytes.Index(data, []byte("\n\r")); i >= 0 {
return i + 2, data[0:i], nil
}
if atEOF {
return len(data), data, nil
}
return 0, nil, nil
}
|
package app
import (
"errors"
"sync"
"github.com/dmitryt/otus-golang-hw/hw12_13_14_15_calendar/internal/config"
"github.com/dmitryt/otus-golang-hw/hw12_13_14_15_calendar/internal/repository"
"github.com/dmitryt/otus-golang-hw/hw12_13_14_15_calendar/service"
)
var ErrUnrecognizedServiceType = errors.New("cannot create service, because type was not recognized. Supported types: http, grpc")
type App struct {
r repository.CRUD
c *config.Calendar
}
func New(c *config.Calendar, r repository.CRUD) (*App, error) {
return &App{c: c, r: r}, nil
}
func (app *App) Run(errCh chan error, doneCh chan bool) {
s := service.New(app.r)
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
if app.c.GRPCAddress != "" {
errCh <- s.RunGRPC(app.c.GRPCAddress)
}
}()
wg.Add(1)
go func() {
defer wg.Done()
if app.c.HTTPAddress != "" && app.c.GRPCAddress != "" {
errCh <- s.RunHTTP(app.c.GRPCAddress, app.c.HTTPAddress)
}
}()
go func() {
wg.Wait()
close(doneCh)
}()
}
|
package client
type ErrorResponse struct {
Error int
Message string
}
type Country struct {
Id string `json:"country_id"`
Name string `json:"country_name"`
}
type League struct {
CountryId string `json:"country_id"`
CountryName string `json:"country_name"`
Id string `json:"league_id"`
Name string `json:"league_name"`
}
type Standing struct {
CountryName string `json:"country_name"`
LeagueId string `json:"league_id"`
LeagueName string `json:"league_name"`
TeamName string `json:"team_name"`
OverallLeaguePosition string `json:"overall_league_position"`
OverallLeaguePayed string `json:"overall_league_payed"`
OverallLeagueW string `json:"overall_league_w"`
OverallLeagueD string `json:"overall_league_d"`
OverallLeagueL string `json:"overall_league_l"`
OverallLeagueGF string `json:"overall_league_gf"`
OverallLeagueGA string `json:"overall_league_ga"`
OverallLeaguePTS string `json:"overall_league_pts"`
HomeLeaguePosition string `json:"home_league_position"`
HomeLeaguePayed string `json:"home_league_payed"`
HomeLeagueW string `json:"home_league_w"`
HomeLeagueD string `json:"home_league_d"`
HomeLeagueL string `json:"home_league_l"`
HomeLeagueGF string `json:"home_league_gf"`
HomeLeagueGA string `json:"home_league_ga"`
HomeLeaguePTS string `json:"home_league_pts"`
AwayLeaguePosition string `json:"away_league_position"`
AwayLeaguePayed string `json:"away_league_payed"`
AwayLeagueW string `json:"away_league_w"`
AwayLeagueD string `json:"away_league_d"`
AwayLeagueL string `json:"away_league_l"`
AwayLeagueGF string `json:"away_league_gf"`
AwayLeagueGA string `json:"away_league_ga"`
AwayLeaguePTS string `json:"away_league_pts"`
}
type Event struct {
MatchId string `json:"match_id"`
CountryId string `json:"country_id"`
CountryName string `json:"country_name"`
LeagueId string `json:"league_id"`
LeagueName string `json:"league_name"`
MatchDate string `json:"match_date"`
MatchStatus string `json:"match_status"`
MatchTime string `json:"match_time"`
MatchHometeamName string `json:"match_hometeam_name"`
MatchHometeamScore string `json:"match_hometeam_score"`
MatchAwayteamName string `json:"match_awayteam_name"`
MatchAwayteamScore string `json:"match_awayteam_score"`
MatchHometeamHalftimeScore string `json:"match_hometeam_halftime_score"`
MatchAwayteamHalftimeScore string `json:"match_awayteam_halftime_score"`
MatchHometeamExtraScore string `json:"match_hometeam_extra_score"`
MatchAwayteamExtraScore string `json:"match_awayteam_extra_score"`
MatchHometeamPenaltyScore string `json:"match_hometeam_penalty_score"`
MatchAwayteamPenaltyScore string `json:"match_awayteam_penalty_score"`
MatchHometeamSystem string `json:"match_hometeam_system"`
MatchAwayteamSystem string `json:"match_awayteam_system"`
MatchLive string `json:"match_live"`
Goalscorer []Goalscorer `json:"goalscorer"`
Cards []Card `json:"cards"`
Lineup struct {
Home struct {
StartingLineups []Lineup `json:"starting_lineups"`
Substitutes []Lineup `json:"substitutes"`
Coach []Lineup `json:"coach"`
Substitutions []LineupWithTime `json:"substitutions"`
}
Away struct {
StartingLineups []Lineup `json:"starting_lineups"`
Substitutes []Lineup `json:"substitutes"`
Coach []Lineup `json:"coach"`
Substitutions []LineupWithTime `json:"substitutions"`
}
}
Statistics []Statistic `json:"statistics"`
}
type Goalscorer struct {
Time string `json:"time"`
HomeScorer string `json:"home_scorer"`
Score string `json:"score"`
AwayScorer string `json:"away_scorer"`
}
type Card struct {
Time string `json:"time"`
HomeFault string `json:"home_fault"`
Card string `json:"card"`
AwayFault string `json:"away_fault"`
}
type Lineup struct {
LineupPlayer string `json:"lineup_player"`
LineupNumber string `json:"lineup_number"`
LineupPosition string `json:"lineup_position"`
}
type LineupWithTime struct {
Lineup
LineupTime string `json:"lineup_time"`
}
type Statistic struct {
Type string
Home string
Away string
}
type Odds struct {
MatchId string `json:"match_id"`
Bookmaker string `json:"odd_bookmakers"`
Date string `json:"odd_date"`
Home string `json:"odd_1"`
Draw string `json:"odd_x"`
Away string `json:"odd_2"`
AH_2p5_H string `json:"ah-2.5_1"`
AH_2p5_A string `json:"ah-2.5_2"`
AH_2_H string `json:"ah-2_1"`
AH_2_A string `json:"ah-2_2"`
AH_1p5_H string `json:"ah-1.5_1"`
AH_1p5_A string `json:"ah-1.5_2"`
AH_1_H string `json:"ah-1_1"`
AH_1_A string `json:"ah-1_2"`
AH0_H string `json:"ah0_1"`
AH0_A string `json:"ah0_2"`
O_0p5 string `json:"o+0.5"`
U_0p5 string `json:"u+0.5"`
O_1p5 string `json:"o+1.5"`
U_1p5 string `json:"u+1.5"`
O_2 string `json:"o+2"`
U_2 string `json:"u+2"`
O_2p5 string `json:"o+2.5"`
U_2p5 string `json:"u+2.5"`
O_3 string `json:"o+3"`
U_3 string `json:"u+3"`
O_3p5 string `json:"o+3.5"`
U_3p5 string `json:"u+3.5"`
O_4p5 string `json:"o+4.5"`
U_4p5 string `json:"u+4.5"`
O_5p5 string `json:"o+5.5"`
U_5p5 string `json:"u+5.5"`
BtsYes string `json:"bts_yes"`
BtsNo string `json:"bts_no"`
}
|
package zengarden
import (
"strings"
"text/template"
"time"
)
var funcMap = template.FuncMap{
"downcase": strings.ToLower,
"upcase": strings.ToUpper,
"date": date,
"dateToString": dateToString,
"filter": filter,
"slice": slice,
"trim": trim,
}
func date(format string, date time.Time) string {
return date.Format(format)
}
func dateToString(date time.Time) string {
return date.Format("2 Jan 2006")
}
func filter(key string, val interface{}, data []Context) []Context {
var result []Context
for _, ctx := range data {
if ctx[key] == val {
result = append(result, ctx)
}
}
return result
}
func slice(offset, count int, data []Context) []Context {
return data[offset:count]
}
func trim(needle, str string) string {
return strings.TrimSuffix(str, needle)
}
|
package pgo
import (
"reflect"
)
// InArray checks if a value exists in an array
func InArray(needle interface{}, haystack interface{}) bool {
return search(needle, haystack)
}
func search(needle interface{}, haystack interface{}) bool {
switch reflect.TypeOf(haystack).Kind() {
case reflect.Slice:
s := reflect.ValueOf(haystack)
len := s.Len()
for i := 0; i < len; i++ {
if needle == s.Index(i).Interface() {
return true
}
}
}
return false
}
|
package server
import (
"bufio"
"errors"
"fmt"
"log"
"net"
"os"
"strings"
"github.com/eshyong/lettuce/db"
"github.com/eshyong/lettuce/utils"
)
type Server struct {
// Server can either have a backup or a primary, but not both.
master net.Conn
store *db.Store
// TODO: allow any arbitrary number of peers.
peer net.Conn
masterIn <-chan string
masterOut chan<- string
peerIn <-chan string
peerOut chan<- string
queue []string
isPrimary bool
}
func NewServer() *Server {
return &Server{master: nil, store: db.NewStore(), peer: nil, isPrimary: false}
}
func (server *Server) ConnectToMaster() {
/* Uncomment these lines to connect to master using a config file.
masterAddr, err := readConfig()
if err != nil {
log.Fatal(err)
}
masterAddr = masterAddr + utils.DELIMITER + utils.SERVER_PORT */
// Connect to the master server. (LOCALHOST for testing)
masterAddr := utils.LOCALHOST + utils.DELIMITER + utils.SERVER_PORT
conn, err := net.DialTimeout("tcp", masterAddr, utils.TIMEOUT)
if err != nil {
log.Fatal("Could not connect to master ", err)
}
in := utils.InChanFromConn(conn, "master")
out := utils.OutChanFromConn(conn, "master")
// TODO: check errors
request, _ := <-in
fmt.Println(request)
err = server.handleMasterPing(out, request)
if err != nil {
log.Fatal(err)
}
server.master = conn
server.masterIn = in
server.masterOut = out
if server.isPrimary {
listener, err := net.Listen("tcp", utils.DELIMITER+utils.PEER_PORT)
if err != nil {
log.Fatal("Couldn't get a socket: ", err)
}
conn, err = listener.Accept()
if err != nil {
log.Fatal("Couldn't get a connection: ", err)
}
} else {
conn = connectToPrimary(in)
}
server.peer = conn
server.peerIn = utils.InChanFromConn(conn, "peer")
server.peerOut = utils.OutChanFromConn(conn, "peer")
}
func connectToPrimary(in <-chan string) net.Conn {
request, _ := <-in
arr := strings.Split(request, utils.DELIMITER)
if len(arr) < 2 {
log.Fatal("Invalid message.")
}
header, body := arr[0], arr[1]
if header != utils.SYN {
log.Fatal("Unknown protocol.")
}
arr = strings.Split(body, utils.EQUALS)
if len(arr) < 2 {
log.Fatal("Invalid message: " + request)
}
name, address := arr[0], arr[1]
if name != utils.PRIMARY {
log.Fatal("Expected address of primary.")
}
conn, err := net.DialTimeout("tcp", address+utils.DELIMITER+utils.PEER_PORT, utils.TIMEOUT)
if err != nil {
log.Fatal("Couldn't connect to primary.")
}
return conn
}
func readConfig() (string, error) {
file, err := os.Open("master.config")
if err != nil {
return "", err
}
scanner := bufio.NewScanner(file)
for scanner.Scan() {
// Lines are formatted as 'name address\n'.
line := strings.Trim(scanner.Text(), " ")
if line[0] == '#' {
// Comments start with a '#'.
continue
}
// Check the name and return address.
entry := strings.Split(line, " ")
name, address := entry[0], entry[1]
if name == "master" {
return address, nil
}
}
return "", errors.New("couldn't find address in master.config")
}
func (server *Server) Serve() {
loop:
for {
// Receive a message from the master server.
select {
case message, ok := <-server.masterIn:
if !ok {
break loop
}
err := server.handleMasterRequests(server.masterOut, message)
if err != nil {
fmt.Println(err)
}
case message, ok := <-server.peerIn:
if !ok {
break
}
err := server.handlePeerMessage(server.peerOut, message)
if err != nil {
fmt.Println(err)
}
}
if server.isPrimary && len(server.queue) > 0 {
// Send diffs to the backup server.
request := server.queue[0]
server.peerOut <- utils.SYNDEL + utils.DIFF + utils.EQUALS + request
}
}
fmt.Println("Shutting down...")
}
func (server *Server) handleMasterRequests(out chan<- string, message string) error {
// Messages have the format 'HEADER:REQUEST'
arr := strings.Split(message, utils.DELIMITER)
if len(arr) < 2 {
out <- utils.ERRDEL + utils.INVALID
return errors.New("Invalid request: " + message)
}
fmt.Println("master message:", message)
// Handle a message from the server or a master request.
header, request := arr[0], arr[1]
if header == utils.SYN {
// SYN message
return server.handleMasterPing(out, message)
} else if strings.Contains(header, utils.CLIENT) {
// Client request
if !server.isPrimary {
// Refuse request as backup.
out <- utils.ERRDEL + utils.NEG
return errors.New("Not primary: " + request)
}
// Execute request and send reply to server.
reply := header + utils.DELIMITER + server.store.Execute(request)
out <- reply
// Append to queue of requests to send to backup.
server.queue = append(server.queue, request)
} else {
// Invalid request
out <- utils.ERRDEL + utils.UNKNOWN
return errors.New("Unrecognized request: " + request)
}
return nil
}
func (server *Server) handleMasterPing(out chan<- string, message string) error {
arr := strings.Split(message, utils.DELIMITER)
if len(arr) < 2 {
out <- utils.ERRDEL + utils.INVALID
return errors.New("Invalid message: " + message)
}
header, request := arr[0], arr[1]
if header != utils.SYN {
return errors.New("Invalid message: " + message)
}
if request == utils.PROMOTE {
if server.isPrimary {
// This server is already a primary, so we reject the request.
out <- utils.ACKDEL + utils.NEG
} else {
// Promote self to primary.
server.isPrimary = true
out <- utils.ACKDEL + utils.OK
}
} else if request == utils.STATUS {
// Ping to check status?
out <- utils.ACKDEL + utils.OK
} else {
// Some invalid message not covered by our protocol.
out <- utils.ERRDEL + utils.UNKNOWN
return errors.New("Unrecognized request: " + request)
}
return nil
}
func (server *Server) handlePeerMessage(out chan<- string, message string) error {
fmt.Println("peer message:", message)
var err error
if server.isPrimary {
err = server.handleBackupResponse(out, message)
} else {
err = server.handlePrimaryRequest(out, message)
}
return err
}
func (server *Server) handleBackupResponse(out chan<- string, message string) error {
// We check if our last transaction went through; if there was a mistake we can retransmit it.
fmt.Println("backup message:", message)
arr := strings.Split(message, utils.DELIMITER)
if len(arr) < 2 {
out <- utils.ERRDEL + utils.INVALID
return errors.New("Invalid message: " + message)
}
header, body := arr[0], arr[1]
if header == utils.ERR {
return errors.New("Need to retransmit diff request: " + message)
}
if header != utils.ACK {
out <- utils.ERRDEL + utils.INVALID
return errors.New("Unrecognized header: " + header)
}
if body != utils.OK {
return errors.New("Request was rejected: " + body)
}
// Our transaction went through, pop from the front of the queue.
server.queue = server.queue[1:]
return nil
}
func (server *Server) handlePrimaryRequest(out chan<- string, message string) error {
// Messages have the format 'HEADER:REQUEST'
arr := strings.Split(message, utils.DELIMITER)
if len(arr) < 2 {
out <- utils.ERRDEL + utils.INVALID
return errors.New("Invalid message: " + message)
}
header, request := arr[0], arr[1]
if header != utils.SYN {
out <- utils.ERRDEL + utils.INVALID
return errors.New("Unrecognized header:" + header)
}
if !strings.Contains(request, utils.DIFF) {
// Only handle DIFFs for now.
out <- utils.ERRDEL + utils.UNKNOWN
return errors.New("Unrecognized request:" + request)
}
arr = strings.Split(request, utils.EQUALS)
if len(arr) < 2 {
out <- utils.ERRDEL + utils.INVALID
return errors.New("Invalid message:" + request)
}
fmt.Println(server.store.Execute(arr[1]))
out <- utils.ACKDEL + utils.OK
return nil
}
|
package main
import (
"log"
"net"
"net/http"
"net/rpc"
"pub/service"
)
func main() {
log.SetFlags(log.LstdFlags | log.Lshortfile)
var rpcServer = new(service.RPC)
err := rpc.Register(rpcServer)
if err != nil {
log.Fatal("Format of service rpc isn't correct. ", err)
}
rpc.HandleHTTP()
listener, err := net.Listen("tcp", ":4040")
if err != nil {
log.Fatal("Listen error: ", err)
}
log.Printf("Serving RPC server on port %d", 4040)
err = http.Serve(listener, nil)
if err != nil {
log.Fatal("Error serving: ", err)
}
}
|
package requests
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"strings"
"github.com/google/go-querystring/query"
"github.com/atomicjolt/canvasapi"
"github.com/atomicjolt/canvasapi/models"
"github.com/atomicjolt/string_utils"
)
// ListMembersOfCollaboration A paginated list of the collaborators of a given collaboration
// https://canvas.instructure.com/doc/api/collaborations.html
//
// Path Parameters:
// # Path.ID (Required) ID
//
// Query Parameters:
// # Query.Include (Optional) . Must be one of collaborator_lti_id, avatar_image_url- "collaborator_lti_id": Optional information to include with each member.
// Represents an identifier to be used for the member in an LTI context.
// - "avatar_image_url": Optional information to include with each member.
// The url for the avatar of a collaborator with type 'user'.
//
type ListMembersOfCollaboration struct {
Path struct {
ID string `json:"id" url:"id,omitempty"` // (Required)
} `json:"path"`
Query struct {
Include []string `json:"include" url:"include,omitempty"` // (Optional) . Must be one of collaborator_lti_id, avatar_image_url
} `json:"query"`
}
func (t *ListMembersOfCollaboration) GetMethod() string {
return "GET"
}
func (t *ListMembersOfCollaboration) GetURLPath() string {
path := "collaborations/{id}/members"
path = strings.ReplaceAll(path, "{id}", fmt.Sprintf("%v", t.Path.ID))
return path
}
func (t *ListMembersOfCollaboration) GetQuery() (string, error) {
v, err := query.Values(t.Query)
if err != nil {
return "", err
}
return v.Encode(), nil
}
func (t *ListMembersOfCollaboration) GetBody() (url.Values, error) {
return nil, nil
}
func (t *ListMembersOfCollaboration) GetJSON() ([]byte, error) {
return nil, nil
}
func (t *ListMembersOfCollaboration) HasErrors() error {
errs := []string{}
if t.Path.ID == "" {
errs = append(errs, "'Path.ID' is required")
}
for _, v := range t.Query.Include {
if v != "" && !string_utils.Include([]string{"collaborator_lti_id", "avatar_image_url"}, v) {
errs = append(errs, "Include must be one of collaborator_lti_id, avatar_image_url")
}
}
if len(errs) > 0 {
return fmt.Errorf(strings.Join(errs, ", "))
}
return nil
}
func (t *ListMembersOfCollaboration) Do(c *canvasapi.Canvas, next *url.URL) ([]*models.Collaborator, *canvasapi.PagedResource, error) {
var err error
var response *http.Response
if next != nil {
response, err = c.Send(next, t.GetMethod(), nil)
} else {
response, err = c.SendRequest(t)
}
if err != nil {
return nil, nil, err
}
if err != nil {
return nil, nil, err
}
body, err := ioutil.ReadAll(response.Body)
response.Body.Close()
if err != nil {
return nil, nil, err
}
ret := []*models.Collaborator{}
err = json.Unmarshal(body, &ret)
if err != nil {
return nil, nil, err
}
pagedResource, err := canvasapi.ExtractPagedResource(response.Header)
if err != nil {
return nil, nil, err
}
return ret, pagedResource, nil
}
|
package main
import (
"fmt"
"bytes"
cmpio "github.com/unkcpz/gocmp/io"
"github.com/unkcpz/gocmp/crystal"
)
func GetCell(poscar string) (*crystal.Cell, error) {
poscarCell, err := cmpio.ParsePoscar(poscar)
if err != nil {
return nil, err
}
lattice := poscarCell.Lattice
positions := poscarCell.Positions
types := make([]int, len(poscarCell.Types), len(poscarCell.Types))
for i, _ := range types {
types[i] = crystal.SymToNum(poscarCell.Types[i])
}
var c bool = false
if poscarCell.Coordinate == cmpio.Cartesian {
c = true
}
cell, err := crystal.NewCell(lattice, positions, types, c)
if err != nil {
return nil, err
}
return cell, nil
}
func FindSpacegroup(poscar string, eps float64) (string, error) {
cell, err := GetCell(poscar)
if err != nil {
return "", err
}
return cell.Spacegroup(eps), nil
}
func FindSymmetry(poscar string, eps float64) (string, error) {
cell, err := GetCell(poscar)
if err != nil {
return "", err
}
_, rots, trans := cell.Symmetry(eps)
var buffer bytes.Buffer
for _, r := range rots {
buffer.WriteString(fmt.Sprintf("%v\n", r))
}
for _, t := range trans {
buffer.WriteString(fmt.Sprintf("%v\n", t))
}
return buffer.String(), nil
}
|
package solutions
type NumMatrix struct {
sum [][]int
}
func Constructor(matrix [][]int) NumMatrix {
if len(matrix) == 0 || len(matrix[0]) == 0 {
return NumMatrix{}
}
rows, columns := len(matrix), len(matrix[0])
sum := make([][]int, rows + 1)
for i := 0; i < len(sum); i++ {
sum[i] = make([]int, columns + 1)
}
sum[1][1] = matrix[0][0]
for i := 2; i < len(sum[0]) ; i++ {
sum[1][i] = sum[1][i - 1] + matrix[0][i - 1]
}
for i := 2; i < len(sum); i++ {
sum[i][1] = sum[i - 1][1] + matrix[i - 1][0]
}
for i := 2; i < len(sum); i++ {
for j := 2; j < len(sum[0]); j++ {
sum[i][j] = sum[i - 1][j] + sum[i][j - 1] - sum[i - 1][j - 1] + matrix[i - 1][j - 1]
}
}
return NumMatrix{sum: sum}
}
func (this *NumMatrix) SumRegion(row1 int, col1 int, row2 int, col2 int) int {
return this.sum[row2 + 1][col2 + 1] - this.sum[row2 + 1][col1] - this.sum[row1][col2 + 1] + this.sum[row1][col1]
}
|
package logic
import (
"encoding/json"
"jkt/gateway/global"
"jkt/gateway/hotel"
"jkt/gateway/websocket"
"jkt/jktgo/log"
"jkt/jktgo/message"
"jkt/jktgo/redis"
)
// FuncPong 处理ping响应的函数
func FuncPong(session *websocket.Session, args map[string]interface{}) {
// 这里其实什么都不用做
log.Debug("pong 回调")
}
// FuncLogin 为登录函数
func FuncLogin(session *websocket.Session, args map[string]interface{}) {
// 上面这里需要知道uid和hotelId
log.Debug("有登录消息到来")
Response := map[string]interface{}{}
Response["code"] = args["code"]
Response["err_code"] = 0
Response["err_desc"] = "验证成功"
var tokenStr, redisSessionSetKey, hotelSessionDataStr string
var tokenOk, uidOk, isExists bool
var uidInterface interface{}
var uid int32
hotelSessionDataObject := message.HotelSessionData{}
tokenStr, tokenOk = args["token"].(string)
uidInterface, uidOk = args["uid"].(float64)
if !tokenOk {
Response["err_code"] = -1
Response["err_desc"] = "参数错误,无token"
goto end
}
if !uidOk {
Response["err_code"] = -1
Response["err_desc"] = "参数错误,无uid"
goto end
}
uid = int32(uidInterface.(float64))
redisSessionSetKey = message.GetRedisSessionSetKey(global.GetInstance().GatewayName)
if isExists, _ = redis.HExists(
redisSessionSetKey,
uid); !isExists {
Response["err_code"] = -1
Response["err_desc"] = "你还未登陆,请登陆"
goto end
}
hotelSessionDataStr, _ = redis.HGet(redisSessionSetKey, uid)
json.Unmarshal([]byte(hotelSessionDataStr), &hotelSessionDataObject)
if hotelSessionDataObject.UID != uid || hotelSessionDataObject.Token != tokenStr {
Response["err_code"] = -1
Response["err_desc"] = "非法连接"
goto end
}
session.AttachData = &hotelSessionDataObject
hotel.GetInstance().AddSession(session.AttachData.HotelID,
session.AttachData.UID, session)
end:
data, _ := json.Marshal(Response)
session.SendMessage(data)
}
|
package main
import "fmt"
import structPack "github.com/dcmrlee/first-git-proj/go-lang/funny/structPack"
func main() {
var s string = "abcdef"
var b []byte
b = []byte(s)
fmt.Printf("%v\n", b)
s1 := s[2:]
fmt.Printf("%v\n", s1)
fmt.Printf("%d\n", len(s1))
fmt.Printf("%v\n", s[1:4])
fmt.Printf("%v\n", s[1:2])
t([]int{})
struct1 := new(structPack.ExpStruct)
struct1.Mi1 = 10
struct1.Mf1 = 16.
fmt.Printf("%v\n", struct1)
}
func t(n []int) int {
a := len(n)
fmt.Printf("%v\n", a)
return a
}
|
package BLC
type PHBInv struct {
PHBAddrFrom string //自己的地址
PHBType string //类型 block tx
PHBItems [][]byte //hash二维数组
}
|
/**
* All Rights Reserved
* This software is proprietary information of Akurey
* Use is subject to license terms.
* Filename: empty.model.go
*
* Author: rnavarro@akurey.com
* Description: Declare the available properties
* of an Empty struct
*/
package models
import "github.com/nvellon/hal"
type EmptyStruct struct {
}
func (es EmptyStruct) GetMap() hal.Entry {
return hal.Entry{
}
} |
package main
import (
"context"
"log"
"net"
"net/http"
"sync/atomic"
"services/counter"
"google.golang.org/grpc"
)
type counterServer struct {
count uint32
}
func (s *counterServer) UpdateCount(context.Context, *counter.Empty) (*counter.Response, error) {
var newCount = atomic.AddUint32(&s.count, 1)
return &counter.Response{
Count: newCount,
}, nil
}
var (
mainServer counter.CounterServiceServer = new(counterServer)
)
func main() {
go func() {
lis, err := net.Listen("tcp", ":50551")
if err != nil {
log.Fatal(err)
}
defer lis.Close()
grpcServer := grpc.NewServer()
counter.RegisterCounterServiceServer(grpcServer, mainServer)
if err := grpcServer.Serve(lis); err != nil {
log.Fatal(err)
}
}()
http.Handle("/", http.FileServer(http.Dir("./public")))
err := http.ListenAndServe(":8080", nil)
if err != nil {
log.Fatal(err)
}
}
|
// Copyright (c) Mainflux
// SPDX-License-Identifier: Apache-2.0
package coap
import (
"github.com/gogo/protobuf/proto"
"github.com/mainflux/mainflux/pkg/messaging"
broker "github.com/nats-io/nats.go"
)
// Observer represents an internal observer used to handle CoAP observe messages.
type Observer interface {
Cancel() error
}
// NewObserver returns a new Observer instance.
func NewObserver(subject string, c Client, conn *broker.Conn) (Observer, error) {
sub, err := conn.Subscribe(subject, func(m *broker.Msg) {
var msg messaging.Message
if err := proto.Unmarshal(m.Data, &msg); err != nil {
return
}
// There is no error handling, but the client takes care to log the error.
c.SendMessage(msg)
})
if err != nil {
return nil, err
}
ret := &observer{
client: c,
sub: sub,
}
return ret, nil
}
type observer struct {
client Client
sub *broker.Subscription
}
func (o *observer) Cancel() error {
if err := o.sub.Unsubscribe(); err != nil && err != broker.ErrConnectionClosed {
return err
}
return o.client.Cancel()
}
|
package main
import (
"os"
"../ch0/encapsulated"
"../ch0/public"
)
func main() {
t1 := public.Trace{F: os.Stdout}
t1.On()
t1.Print("X1")
t1.Off()
t1.Print("Y1")
t2 := encapsulated.TraceCustom(os.Stderr)
t2.On()
t2.Print("X2")
t2.Off()
t2.Print("Y2")
var t3 encapsulated.Trace
t3 = encapsulated.TraceDefault()
t3.On()
t3.Print("X3")
t3.Off()
t3.Print("Y3")
t3 = t2
t3.On()
t3.Print("Z3")
}
|
package clnkserver
import (
"encoding/json"
"net/http"
"github.com/gorilla/handlers"
"github.com/gorilla/mux"
"github.com/imflop/clnk/internal/app/serviceprovider"
)
// server ...
type server struct {
router *mux.Router
sp serviceprovider.IServiceProvider
}
// NewServer ...
func NewServer(serviceprovider serviceprovider.IServiceProvider) *server {
s := &server{
router: mux.NewRouter(),
sp: serviceprovider,
}
s.configureRouter()
return s
}
// Wrapper around mux ServeHTTP ...
func (s *server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
s.router.ServeHTTP(w, r)
}
func (s *server) configureRouter() {
s.router.Use(handlers.CORS(handlers.AllowedOrigins([]string{"*"})))
s.router.HandleFunc("/health", s.health()).Methods(http.MethodGet)
s.router.HandleFunc("/links/{id:[0-9]+}", s.getLink()).Methods(http.MethodGet)
s.router.HandleFunc("/", s.shortenLink()).Methods(http.MethodPost)
s.router.HandleFunc("/{encodedURL}", s.unshortenLink()).Methods(http.MethodGet, http.MethodHead)
}
func (s *server) error(w http.ResponseWriter, r *http.Request, code int, err error) {
s.responder(w, r, code, map[string]string{"error": err.Error()})
}
func (s *server) responder(w http.ResponseWriter, r *http.Request, code int, data interface{}) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(code)
if data != nil {
json.NewEncoder(w).Encode(data)
}
}
|
package dto
import (
"fmt"
"encoding/json"
"strings"
)
type VariableResponse struct {
value string
valueType string
valueFormat string
}
func (response VariableResponse) GetValue() string {
return response.value
}
func (response *VariableResponse) UnmarshalJSON(data []byte) error {
var responseRaw struct{
Type string `json:"type"`
Value string `json:"value"`
ValueInfo struct{
SerializationDataFormat string `json:"serializationDataFormat"`
} `json:"valueInfo"`
}
if err := json.Unmarshal(data, &responseRaw); err != nil {
return fmt.Errorf("Failed to parse variable: %s", err)
}
response.value = strings.Replace(responseRaw.Value, `\\\"`, `"`, -1)
response.valueType = responseRaw.Type
response.valueFormat = responseRaw.ValueInfo.SerializationDataFormat
return nil
}
|
package database
// This file contains wrappers for SQL queries
import (
"crypto/sha256"
"database/sql"
"fmt"
"math"
"time"
"capnproto.org/go/capnp/v3"
"capnproto.org/go/capnp/v3/exc"
"capnproto.org/go/capnp/v3/packed"
"zenhack.net/go/tempest/capnp/grain"
"zenhack.net/go/tempest/capnp/identity"
spk "zenhack.net/go/tempest/capnp/package"
"zenhack.net/go/tempest/internal/capnp/system"
"zenhack.net/go/tempest/internal/common/types"
"zenhack.net/go/tempest/internal/server/tokenutil"
"zenhack.net/go/util/exn"
)
// AddPackage adds a package to the database, initally marked as not ready.
// The caller must then move the extracted package to the right location,
// and then call ReadyPackage to complete installation.
func (tx Tx) AddPackage(pkg Package) error {
manifestBlob, err := encodeCapnp(pkg.Manifest)
if err != nil {
return err
}
_, err = tx.sqlTx.Exec(
`INSERT INTO
packages(id, manifest, ready)
VALUES (?, ?, ?)
`,
pkg.ID,
manifestBlob,
false,
)
return exc.WrapError("AddPackage", err)
}
func (tx Tx) ReadyPackage(id types.ID[Package]) error {
_, err := tx.sqlTx.Exec(`UPDATE packages SET ready = true WHERE id = ?`, id)
return exc.WrapError("ReadyPackage", err)
}
// CredentialPackages returns a list of all packages installed for the user
// associated with the credential.
func (tx Tx) CredentialPackages(cred types.Credential) ([]Package, error) {
// Note: we don't yet handle app installation, so we behave as if all
// packages are installed for all users. When that changes, we will
// have to actually filter by account.
rows, err := tx.sqlTx.Query("SELECT id, manifest FROM packages")
if err != nil {
return nil, exc.WrapError("CredentialPackages", err)
}
defer rows.Close()
var ret []Package
for rows.Next() {
var (
pkg Package
manifestBytes []byte
)
err = rows.Scan(&pkg.ID, &manifestBytes)
if err != nil {
return nil, err
}
pkg.Manifest, err = decodeCapnp[spk.Manifest](manifestBytes)
if err != nil {
return nil, err
}
ret = append(ret, pkg)
}
return ret, nil
}
type NewGrain struct {
GrainID types.GrainID
PkgID types.ID[Package]
OwnerID types.AccountID
Title string
}
type NewAccount struct {
ID types.AccountID
Role types.Role
Profile identity.Profile
}
type NewCredential struct {
AccountID types.AccountID
Login bool
Credential types.Credential
}
func (tx Tx) AddAccount(a NewAccount) error {
profile, err := encodeCapnp(a.Profile)
if err != nil {
return err
}
_, err = tx.sqlTx.Exec(
`INSERT INTO accounts
( id
, role
, profile
)
VALUES (?, ?, ?)`,
a.ID,
a.Role,
profile,
)
return err
}
func (tx Tx) AddCredential(c NewCredential) error {
_, err := tx.sqlTx.Exec(
`INSERT INTO credentials
( accountId
, login
, type
, scopedId
)
VALUES (?, ?, ?, ?)`,
c.AccountID,
c.Login,
c.Credential.Type,
c.Credential.ScopedID,
)
return err
}
func (tx Tx) AddGrain(g NewGrain) error {
_, err := tx.sqlTx.Exec(
`INSERT INTO grains(id, packageId, title, ownerId) VALUES (?, ?, ?, ?)`,
g.GrainID, g.PkgID, g.Title, g.OwnerID,
)
if err != nil {
return err
}
return tx.AccountKeyring(g.OwnerID).AttachGrain(g.GrainID, nil)
}
// GrainPackageID returns the package id for the specified grain
func (tx Tx) GrainPackageID(grainID types.GrainID) (string, error) {
row := tx.sqlTx.QueryRow("SELECT packageId FROM grains WHERE id = ?", grainID)
var result string
err := row.Scan(&result)
return result, exc.WrapError("GrainPackageID", err)
}
func (tx Tx) GrainInfo(grainID types.GrainID) (GrainInfo, error) {
var result GrainInfo
result.ID = grainID
row := tx.sqlTx.QueryRow("SELECT title, ownerId FROM grains WHERE id = ?", grainID)
err := row.Scan(&result.Title, &result.Owner)
return result, exc.WrapError("GrainInfo", err)
}
func (tx Tx) AccountProfile(accountID types.AccountID) (identity.Profile, error) {
var (
buf []byte
)
row := tx.sqlTx.QueryRow("SELECT profile FROM accounts WHERE id = ?", accountID)
err := row.Scan(&buf)
if err != nil {
return identity.Profile{}, err
}
return decodeCapnp[identity.Profile](buf)
}
type GrainInfo struct {
ID types.GrainID
Title string
Owner string
}
type UiViewInfo struct {
Grain GrainInfo
Permissions []bool
}
// Represent's an account's keyring.
type Keyring struct {
tx Tx
id types.AccountID
}
func (tx Tx) AccountKeyring(id types.AccountID) Keyring {
return Keyring{
tx: tx,
id: id,
}
}
func (kr Keyring) AttachGrain(grainID types.GrainID, permissions []bool) error {
hash, err := kr.tx.SaveSturdyRef(
SturdyRefKey{
Token: tokenutil.GenToken(),
OwnerType: "userkeyring",
Owner: kr.id,
},
SturdyRefValue{
Expires: time.Unix(math.MaxInt64, 0), // never
GrainID: grainID,
},
)
if err != nil {
return err
}
// FIXME: what happens if we share the same grain twice? id will conflict.
//
// One idea: drop the unique constraint on ID, and when querying OR together
// the permissions on all of the entries in the keyring. This mirros what
// sandstorm did, where your access to a grain was the union of all grants.
_, err = kr.tx.sqlTx.Exec(
`INSERT INTO keyringEntries
(id, accountId, sha256, appPermissions)
VALUES (?, ?, ?, ?)
`, grainID, kr.id, hash[:], fmtPermissions(permissions))
return err
}
func (tx Tx) AccountGrainPermissions(accountID types.AccountID, grainID types.GrainID) (permissions []bool, err error) {
row := tx.sqlTx.QueryRow(
`SELECT
keyringEntries.appPermissions
FROM
sturdyRefs, keyringEntries
WHERE
keyringEntries.sha256 = keyringEntries.sha256
AND sturdyRefs.grainId = ?
AND sturdyRefs.objectId is null
AND sturdyRefs.ownerType = 'userkeyring'
AND sturdyRefs.owner = ?
AND sturdyRefs.expires > ?
`,
grainID,
accountID,
time.Now().Unix(),
)
var perm string
err = row.Scan(&perm)
if err != nil {
return nil, err
}
return parsePermissions(perm)
}
func (tx Tx) NewSharingToken(
grainID types.GrainID,
perms []bool,
note string,
) (string, error) {
return exn.Try(func(throw exn.Thrower) string {
token := tokenutil.Gen128Base64()
_, seg := capnp.NewMultiSegmentMessage(nil)
oid, err := system.NewRootSystemObjectId(seg)
throw(err)
oid.SetSharingToken()
st := oid.SharingToken()
throw(st.SetGrainId(string(grainID)))
throw(st.SetNote(note))
dstPerms, err := st.NewPermissions(int32(len(perms)))
throw(err)
for i, p := range perms {
dstPerms.Set(i, p)
}
_, err = tx.SaveSturdyRef(
SturdyRefKey{
Token: []byte(token),
OwnerType: "external-api",
},
SturdyRefValue{
Expires: time.Unix(math.MaxInt64, 0), // never
ObjectID: capnp.Struct(oid),
},
)
throw(err, "saving sturdyRef")
return token
})
}
// CredentialAccount returns the account ID associated with the credential.
// If there is no existing account, one is created with the visitor role.
func (tx Tx) CredentialAccount(cred types.Credential) (types.AccountID, error) {
accountID, err := exn.Try(func(throw exn.Thrower) types.AccountID {
row := tx.sqlTx.QueryRow(
`SELECT accountId FROM credentials WHERE type = ? AND scopedId = ?`,
cred.Type, cred.ScopedID,
)
var accountID types.AccountID
err := row.Scan(&accountID)
if err == sql.ErrNoRows {
err = nil
// No account; create one and link it to the credential:
accountID = types.AccountID(tokenutil.Gen128Base64())
throw(tx.AddAccount(NewAccount{
ID: accountID,
Role: types.RoleVisitor,
}))
throw(tx.AddCredential(NewCredential{
AccountID: accountID,
Login: true,
Credential: cred,
}))
}
throw(err)
return accountID
})
err = exc.WrapError("CredentialAccount", err)
return accountID, err
}
// AllUiViews returns all the UiViews in the keyring.
func (kr Keyring) AllUiViews() ([]UiViewInfo, error) {
rows, err := kr.tx.sqlTx.Query(
`SELECT
grains.id,
grains.title,
grains.ownerId,
keyringEntries.appPermissions
FROM
grains, sturdyRefs, keyringEntries
WHERE
keyringEntries.sha256 = sturdyRefs.sha256
AND sturdyRefs.grainId = grains.id
AND sturdyRefs.ownerType = 'userkeyring'
AND sturdyRefs.owner = ?
AND sturdyRefs.expires > ?
`,
kr.id,
time.Now().Unix(),
)
if err != nil {
return nil, exc.WrapError("AccountUIViews", err)
}
defer rows.Close()
var ret []UiViewInfo
for rows.Next() {
var item UiViewInfo
var perm string
err := rows.Scan(
&item.Grain.ID,
&item.Grain.Title,
&item.Grain.Owner,
&perm,
)
if err != nil {
return nil, err
}
item.Permissions, err = parsePermissions(perm)
if err != nil {
return nil, err
}
ret = append(ret, item)
}
return ret, rows.Err()
}
func (tx Tx) getGrainOwner(grainID types.GrainID) (accountID types.AccountID, err error) {
err = tx.sqlTx.QueryRow(
`SELECT ownerId FROM grains WHERE id = ?`,
grainID,
).Scan(&accountID)
err = exc.WrapError("getGrainOwner", err)
return
}
// parsePermissions parses a string like "ttfftf" from the database into a
// slice of booleans. Returns an error if any characters other than 't'
// and 'f' are in the string.
func parsePermissions(s string) ([]bool, error) {
ret := make([]bool, len(s))
for i := range s {
if s[i] == 't' {
ret[i] = true
} else if s[i] == 'f' {
ret[i] = false
} else {
return nil, fmt.Errorf(
"error: invalid permissions string in database: %q",
s,
)
}
}
return ret, nil
}
// Inverse of parse permissions
func fmtPermissions(perm []bool) string {
buf := make([]byte, len(perm))
for i := range perm {
if perm[i] {
buf[i] = 't'
} else {
buf[i] = 'f'
}
}
return string(buf)
}
// encodeCapnp encodes a capnp struct in the format we store in the database,
// i.e. a single packed segment.
func encodeCapnp[T ~capnp.StructKind](v T) ([]byte, error) {
// We don't strictly need to canonicalize, but it's expedient.
buf, err := capnp.Canonicalize(capnp.Struct(v))
if err != nil {
return nil, err
}
return packed.Pack(nil, buf), nil
}
// Inverse of encodeCapnp
func decodeCapnp[T ~capnp.StructKind](buf []byte) (T, error) {
buf, err := packed.Unpack(nil, buf)
if err != nil {
return T{}, err
}
msg := &capnp.Message{Arena: capnp.SingleSegment(buf)}
ptr, err := msg.Root()
return T(ptr.Struct()), err
}
func (tx Tx) SetGrainViewInfo(grainID string, viewInfo grain.UiView_ViewInfo) error {
buf, err := encodeCapnp(viewInfo)
if err != nil {
return err
}
_, err = tx.sqlTx.Exec(
`UPDATE grains
SET cachedViewInfo = ?
WHERE id = ?`,
buf,
grainID,
)
return err
}
// A SturdyRefKey is the data by which a sturdyRef may be fetched from the database (using
// RestoreSturdyRef).
type SturdyRefKey struct {
Token []byte
OwnerType string
Owner types.AccountID
}
// A SturdyRefValue is a persistent value stored in the database, which may be fetched
// via RestoreSturdyRef.
type SturdyRefValue struct {
Expires time.Time
GrainID types.GrainID
ObjectID capnp.Struct
}
// Save a SturdyRef in the database. k's token must not be nil. Returns the sha256
// hash of the token, which serves as a key in the database.
func (tx Tx) SaveSturdyRef(k SturdyRefKey, v SturdyRefValue) ([sha256.Size]byte, error) {
if k.Token == nil {
panic("Called SaveSturdyRef with nil token")
}
hash := sha256.Sum256(k.Token)
var grainID *types.GrainID
if v.GrainID != "" {
grainID = &v.GrainID
}
var (
objectID []byte
err error
)
if v.ObjectID.IsValid() {
objectID, err = encodeCapnp(v.ObjectID)
}
if err != nil {
return hash, err
}
_, err = tx.sqlTx.Exec(
`INSERT INTO sturdyRefs
( sha256
, ownerType
, owner
, expires
, grainId
, objectId
)
VALUES (?, ?, ?, ?, ?, ?)
`,
hash[:],
k.OwnerType,
k.Owner,
v.Expires.Unix(),
grainID,
objectID,
)
return hash, err
}
// Restore a SturdyRef from the database.
func (tx Tx) RestoreSturdyRef(k SturdyRefKey) (SturdyRefValue, error) {
hash := sha256.Sum256(k.Token)
row := tx.sqlTx.QueryRow(
`SELECT expires, grainId, objectId
FROM sturdyRefs
WHERE
ownerType = ?
AND owner = ?
AND sha256 = ?
AND expires > ?
`,
k.OwnerType,
k.Owner,
hash[:],
time.Now().Unix(),
)
var (
expires int64
objectID []byte
grainID *types.GrainID
ret SturdyRefValue
)
err := row.Scan(&expires, &grainID, &objectID)
err = exc.WrapError("RestoreSturdyRef", err)
if err != nil {
return ret, err
}
ret.Expires = time.Unix(expires, 0)
if len(objectID) > 0 {
ret.ObjectID, err = decodeCapnp[capnp.Struct](objectID)
}
if grainID != nil {
ret.GrainID = *grainID
}
return ret, err
}
// Delete a sturdyref from the database.
func (tx Tx) DeleteSturdyRef(k SturdyRefKey) error {
hash := sha256.Sum256(k.Token)
_, err := tx.sqlTx.Exec(`DELETE FROM sturdyRefs WHERE sha256 = ?`, hash[:])
return err
}
// CredentialRole gets the role corresponding to the credential. Returns RoleVisitor for unknown
// credentials.
func (tx Tx) CredentialRole(cred types.Credential) (role types.Role, err error) {
row := tx.sqlTx.QueryRow(`
SELECT role
FROM accounts, credentials
WHERE
accounts.id = credentials.accountId
AND credentials.type = ?
AND credentials.scopedId = ?`,
cred.Type,
cred.ScopedID)
err = row.Scan(&role)
if err == sql.ErrNoRows {
return types.RoleVisitor, nil
}
return role, exc.WrapError("CredentialRole", err)
}
|
//go:build !windows
package watcher
import (
"bufio"
"os"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
type mockNotifier struct {
eventPath string
}
func (n *mockNotifier) WatcherItemDidChange(path string) {
n.eventPath = path
}
func (n *mockNotifier) WatcherDidError(err error) {
}
func TestFileChanged(t *testing.T) {
filePath := "test_file"
f, err := os.Create(filePath)
assert.NoError(t, err)
defer func() {
f.Close()
os.Remove(filePath)
}()
service, err := NewFile()
assert.NoError(t, err)
err = service.Add(filePath)
assert.NoError(t, err)
n := &mockNotifier{}
go service.Start(n)
f.Sync()
w := bufio.NewWriter(f)
_, err = w.WriteString("hello Austin, do you like my file watcher?\n")
assert.NoError(t, err)
err = w.Flush()
assert.NoError(t, err)
// give it time to trigger
time.Sleep(20 * time.Millisecond)
service.Shutdown()
assert.Equal(t, filePath, n.eventPath, "notifier didn't get an new file write event")
}
|
package gw
import (
"context"
"fmt"
"github.com/oceanho/gw/conf"
"github.com/oceanho/gw/libs/gwjsoner"
"time"
)
type DefaultSessionStateManagerImpl struct {
store IStore
storeName string
storePrefix string
expirationDuration time.Duration
redisTimeout time.Duration
cnf *conf.ApplicationConfig
}
func DefaultSessionStateManager(state *ServerState) *DefaultSessionStateManagerImpl {
var stateManager = &DefaultSessionStateManagerImpl{}
stateManager.store = state.Store()
stateManager.cnf = state.ApplicationConfig()
stateManager.storeName = stateManager.cnf.Security.Auth.Session.DefaultStore.Name
stateManager.storePrefix = stateManager.cnf.Security.Auth.Session.DefaultStore.Prefix
stateManager.expirationDuration = time.Duration(stateManager.cnf.Security.Auth.Cookie.MaxAge) * time.Second
stateManager.redisTimeout = time.Duration(stateManager.cnf.Settings.TimeoutControl.Redis) * time.Millisecond
return stateManager
}
func (d *DefaultSessionStateManagerImpl) context() (context.Context, context.CancelFunc) {
ctx := context.Background()
return context.WithTimeout(ctx, d.redisTimeout)
}
func (d *DefaultSessionStateManagerImpl) storeKey(sid string) string {
return fmt.Sprintf("%s.%s", d.storePrefix, sid)
}
func (d *DefaultSessionStateManagerImpl) Remove(sid string) error {
//FIXME(ocean): deadline executed error ?
//ctx, cancel := d.context()
//defer cancel()
ctx := context.Background()
redis := d.store.GetCacheStoreByName(d.cnf.Security.Auth.Session.DefaultStore.Name)
return redis.Del(ctx, d.storeKey(sid)).Err()
}
func (d *DefaultSessionStateManagerImpl) Save(sid string, user User) error {
//ctx, cancel := d.context()
//defer cancel()
ctx := context.Background()
redis := d.store.GetCacheStoreByName(d.storeName)
return redis.Set(ctx, d.storeKey(sid), user, d.expirationDuration).Err()
}
func (d *DefaultSessionStateManagerImpl) Query(sid string) (User, error) {
//ctx, cancel := d.context()
//defer cancel()
ctx := context.Background()
user := User{}
redis := d.store.GetCacheStoreByName(d.storeName)
bytes, err := redis.Get(ctx, d.storeKey(sid)).Bytes()
if err != nil {
return EmptyUser, err
}
err = gwjsoner.Unmarshal(bytes, &user)
if err != nil {
return EmptyUser, err
}
return user, nil
}
|
// Package pager 分页工具
// 目前可使用具体实现
// var driver pager.Driver
// driver = NewMongoDriver()
// driver = NewGormDriver()
// driver = NewMgoDriver()
//
// pager.New(ctx, driver).SetIndex(c.entity.TableName()).Find(c.entity).Result()
package pager
import (
"github.com/gin-gonic/gin"
"reflect"
"strconv"
"strings"
)
// Where 查询条件
type Where map[string]interface{}
// Sort 排序
type Sort int
const (
// Desc 降序
Desc Sort = -1
// Asc 升序
Asc = 1
)
// Driver 查询驱动, Pagination会在真正查询的时候传入参数
type Driver interface {
// 查询条件
Where(kv Where)
// 范围查询条件
Section(section Section)
// 每页数量
Limit(limit int)
// 跳过行
Skip(skip int)
// Index 索引名称,可以是表名,或者比如 es 的 index 名称等,标识具体资源集合的东西
Index(index string)
// 排序
Sort(kv map[string]Sort)
// 查询具体操作
Find(data interface{})
// 如果具体实现代码里边需要分析Find传入的data结构, 这个方法会被调用并传入data反射类型
SetTyp(typ reflect.Type)
// 计算总数
Count() int
}
// Result 返回结构体
type Result struct {
// 数据列表
Data interface{} `json:"data" xml:"data"`
// 下一页id,其实就是这一页最后一个id
NextID interface{} `json:"next_id" xml:"next_id"`
// 上一页id, 就是这一页的第一个id
PrevID interface{} `json:"prev_id" xml:"prev_id"`
// 当前筛选条件下的数据总数量
Count int `json:"count" xml:"count"`
// 每页显示条数
Rows int `json:"rows" xml:"rows"`
}
// Pagination 分页工具
type Pagination struct {
ctx *gin.Context
defaultWhere Where
defaultLimit int
index string
driver Driver
dataTyp reflect.Type
nextStartField string
prevStartField string
result *Result
}
// New 新的分页工具实例
func New(ctx *gin.Context, driver Driver) *Pagination {
return &Pagination{ctx: ctx, driver: driver, defaultLimit: 12}
}
// Result 返回数据
func (pagination *Pagination) Result() *Result {
return pagination.result
}
// SetNextStartField 设置获取下一页开始id的字段名
func (pagination *Pagination) SetNextStartField(field string) *Pagination {
pagination.nextStartField = field
return pagination
}
// SetPrevStartField 设置获取上一页开始id的字段名
func (pagination *Pagination) SetPrevStartField(field string) *Pagination {
pagination.prevStartField = field
return pagination
}
// SetIndex 设置集合或表名,有的驱动可能需要这个, 比如 elastic search
func (pagination *Pagination) SetIndex(index string) *Pagination {
pagination.index = index
return pagination
}
// Where 传入默认查询参数, 该传入参数将会在这个实例中一直被使用
func (pagination *Pagination) Where(kv Where) *Pagination {
pagination.defaultWhere = kv
return pagination
}
// Limit 默认每页条数, 如果页面未传 rows 就使用默认的
func (pagination *Pagination) Limit(limit int) *Pagination {
pagination.defaultLimit = limit
return pagination
}
// Find 查询数据
// structure 不需要传类似 []struct 这样的类型, 直接传入 struct 就行, 非指针
func (pagination *Pagination) Find(structure interface{}) *Pagination {
limit := ParsingLimit(pagination.ctx, pagination.defaultLimit)
pagination.dataTyp = reflect.TypeOf(structure)
pagination.driver.SetTyp(pagination.dataTyp)
pagination.driver.Limit(limit)
pagination.driver.Sort(ParseSorts(pagination.ctx))
pagination.driver.Index(pagination.index)
pagination.driver.Skip(limit * ParseSkip(pagination.ctx))
pagination.driver.Section(ParseSection(pagination.ctx))
pagination.driver.Where(mergeWhere(pagination.defaultWhere, ParsingQuery(pagination.ctx)))
data := newSlice(pagination.dataTyp)
pagination.driver.Find(data.Interface())
pagination.result = &Result{
Data: data.Interface(),
Count: pagination.driver.Count(),
Rows: limit,
}
if data.Elem().Len() > 0 && (pagination.nextStartField != "" && pagination.prevStartField != "") {
pagination.result.NextID = data.Elem().Index(data.Elem().Len() - 1).FieldByName(pagination.nextStartField).Interface()
pagination.result.PrevID = data.Elem().Index(0).FieldByName(pagination.prevStartField).Interface()
}
return pagination
}
// ParseSkip 跳过的行数
func ParseSkip(ctx *gin.Context) int {
page := ctx.DefaultQuery("page", "1")
p, _ := strconv.Atoi(page)
return p - 1
}
// ParseSorts 解析排序字段, 传入规则为 sorts=-filed1,+field2,field3
// "-"号标识降序
// "+"或者无符号标识升序
func ParseSorts(ctx *gin.Context) map[string]Sort {
var sortMap = make(map[string]Sort)
query, exists := ctx.GetQuery("sorts")
if !exists {
return sortMap
}
sorts := strings.Split(query, ",")
for _, sort := range sorts {
if strings.HasPrefix(sort, "-") {
sortMap[strings.TrimPrefix(sort, "-")] = Desc
} else {
sortMap[strings.TrimPrefix(sort, "+")] = Asc
}
}
return sortMap
}
// ParsingLimit 解析每页显示的条数
func ParsingLimit(ctx *gin.Context, defaultLimit int) int {
val := ctx.DefaultQuery("rows", strconv.Itoa(defaultLimit))
limit, _ := strconv.Atoi(val)
return limit
}
// ParsingQuery 解析请求中的query参数
func ParsingQuery(ctx *gin.Context) Where {
where := make(Where)
query := ctx.Request.URL.Query()
for key, val := range query {
if len(val) == 1 {
if val[0] != "" {
where[key] = val[0]
}
}
if len(val) > 1 {
where[key] = val
}
}
return where
}
func mergeWhere(defaultWhere, where Where) Where {
for k, v := range defaultWhere {
if k == "rows" || k == "sorts" || k == "page" || k == "section" {
continue
}
where[k] = v
}
delete(where, "rows")
delete(where, "sorts")
delete(where, "page")
delete(where, "section")
return where
}
func newSlice(typ reflect.Type) reflect.Value {
newInstance := reflect.MakeSlice(reflect.SliceOf(typ), 0, 0)
items := reflect.New(newInstance.Type())
items.Elem().Set(newInstance)
return items
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package main
import (
"fmt"
"htrace/common"
"htrace/conf"
"htrace/test"
"math/rand"
"testing"
"time"
)
func TestReapingOldSpans(t *testing.T) {
const NUM_TEST_SPANS = 20
testSpans := make([]*common.Span, NUM_TEST_SPANS)
rnd := rand.New(rand.NewSource(2))
now := common.TimeToUnixMs(time.Now().UTC())
for i := range testSpans {
testSpans[i] = test.NewRandomSpan(rnd, testSpans[0:i])
testSpans[i].Begin = now - int64(NUM_TEST_SPANS-1-i)
testSpans[i].Description = fmt.Sprintf("Span%02d", i)
}
htraceBld := &MiniHTracedBuilder{Name: "TestReapingOldSpans",
Cnf: map[string]string{
conf.HTRACE_SPAN_EXPIRY_MS: fmt.Sprintf("%d", 60*60*1000),
conf.HTRACE_REAPER_HEARTBEAT_PERIOD_MS: "1",
conf.HTRACE_DATASTORE_HEARTBEAT_PERIOD_MS: "1",
},
WrittenSpans: common.NewSemaphore(0),
DataDirs: make([]string, 2),
}
ht, err := htraceBld.Build()
if err != nil {
t.Fatalf("failed to create mini htraced cluster: %s\n", err.Error())
}
ing := ht.Store.NewSpanIngestor(ht.Store.lg, "127.0.0.1", "")
for spanIdx := range testSpans {
ing.IngestSpan(testSpans[spanIdx])
}
ing.Close(time.Now())
// Wait the spans to be created
ht.Store.WrittenSpans.Waits(NUM_TEST_SPANS)
// Set a reaper date that will remove all the spans except final one.
ht.Store.rpr.SetReaperDate(now)
common.WaitFor(5*time.Minute, time.Millisecond, func() bool {
for i := 0; i < NUM_TEST_SPANS-1; i++ {
span := ht.Store.FindSpan(testSpans[i].Id)
if span != nil {
ht.Store.lg.Debugf("Waiting for %s to be removed...\n",
testSpans[i].Description)
return false
}
}
span := ht.Store.FindSpan(testSpans[NUM_TEST_SPANS-1].Id)
if span == nil {
ht.Store.lg.Debugf("Did not expect %s to be removed\n",
testSpans[NUM_TEST_SPANS-1].Description)
return false
}
return true
})
defer ht.Close()
}
|
//go:build integration
// +build integration
package integration
import (
"io/ioutil"
"os"
"strings"
"syscall"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const cleanupTxt = "cleanup.txt"
func TestLocalResource(t *testing.T) {
f := newFixture(t, "local_resource")
removeTestFiles := func() {
require.NoError(t, os.RemoveAll(f.testDirPath(cleanupTxt)))
require.NoError(t, os.RemoveAll(f.testDirPath("greeting")))
require.NoError(t, os.RemoveAll(f.testDirPath("probe-success")))
}
removeTestFiles()
t.Cleanup(removeTestFiles)
f.TiltUp()
const barServeLogMessage = "Running cmd: ./hello.sh bar"
const readinessProbeSuccessMessage = `[readiness probe: success] fake probe success message`
f.logs.AssertEventuallyContains(t, "hello! foo #1", 5*time.Second)
// write a sentinel file for the probe to find and change its result
if assert.NoError(t, ioutil.WriteFile(f.testDirPath("probe-success"), nil, 0777)) {
f.logs.AssertEventuallyContains(t, readinessProbeSuccessMessage, 5*time.Second)
}
// wait for second resource to start and then ensure that the order in the logs is as expected
f.logs.AssertEventuallyContains(t, barServeLogMessage, 5*time.Second)
curLogs := f.logs.String()
assert.Greater(t, strings.Index(curLogs, barServeLogMessage), strings.Index(curLogs, readinessProbeSuccessMessage),
"dependent resource started BEFORE other resource ready")
f.logs.AssertEventuallyContains(t, "hello! bar #1", 5*time.Second)
// trigger a service restart by changing a watched file
if assert.NoError(t, ioutil.WriteFile(f.testDirPath("greeting"), []byte("hola"), 0777)) {
f.logs.AssertEventuallyContains(t, "hola! foo #1", 5*time.Second)
}
// force the probe into a failure state
if assert.NoError(t, os.Remove(f.testDirPath("probe-success"))) {
f.logs.AssertEventuallyContains(t, `[readiness probe: failure] fake probe failure message`, 5*time.Second)
}
// send a SIGTERM and make sure Tilt propagates it to its local_resource processes
require.NoError(t, f.activeTiltUp.process.Signal(syscall.SIGTERM))
select {
case <-f.activeTiltUp.done:
case <-time.After(5 * time.Second):
t.Fatal("Tilt failed to exit within 5 seconds of SIGTERM")
}
// hello.sh writes to cleanup.txt on SIGTERM
b, err := ioutil.ReadFile(f.testDirPath(cleanupTxt))
if assert.NoError(t, err) {
s := string(b)
require.Contains(t, s, "cleaning up: foo")
require.Contains(t, s, "cleaning up: bar")
}
}
|
// Copyright 2020 The Tekton Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pipelinerun
import (
"os"
"regexp"
"testing"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
)
func TestGetAuthSecretWithExistingToken(t *testing.T) {
secret := makeSecret(defaultSecretName,
map[string][]byte{"token": []byte(testToken)})
objs := []runtime.Object{
secret,
}
cl := fake.NewFakeClient(objs...)
sec, err := getAuthSecret(cl, testNamespace)
if err != nil {
t.Fatal(err)
}
if sec != testToken {
t.Fatalf("got %s, want %s", sec, testToken)
}
}
func TestGetAuthSecretWithNoSecret(t *testing.T) {
objs := []runtime.Object{}
cl := fake.NewFakeClient(objs...)
_, err := getAuthSecret(cl, testNamespace)
wantErr := "error getting secret 'commit-status-tracker-git-secret' in namespace 'test-namespace':.* not found"
if !matchError(t, wantErr, err) {
t.Fatalf("failed to match error when no secret: got %s, want %s", err, wantErr)
}
}
func TestGetAuthSecretWithNoToken(t *testing.T) {
secret := makeSecret(
defaultSecretName,
map[string][]byte{})
objs := []runtime.Object{
secret,
}
cl := fake.NewFakeClient(objs...)
_, err := getAuthSecret(cl, testNamespace)
wantErr := "secret .* does not have a 'token' key"
if !matchError(t, wantErr, err) {
t.Fatalf("failed to match error when no secret: got %s, want %s", err, wantErr)
}
}
func TestGetAuthSecretWithNameInEnvironment(t *testing.T) {
customSecretName := "testing-secret-name"
old := os.Getenv(secretNameEnvVar)
defer func() {
os.Setenv(secretNameEnvVar, old)
}()
os.Setenv(secretNameEnvVar, customSecretName)
secret := makeSecret(customSecretName,
map[string][]byte{"token": []byte(testToken)})
objs := []runtime.Object{
secret,
}
cl := fake.NewFakeClient(objs...)
sec, err := getAuthSecret(cl, testNamespace)
if err != nil {
t.Fatal(err)
}
if sec != testToken {
t.Fatalf("got %s, want %s", sec, testToken)
}
}
func makeSecret(secretName string, data map[string][]byte) *corev1.Secret {
return &corev1.Secret{
Type: corev1.SecretTypeOpaque,
ObjectMeta: metav1.ObjectMeta{
Name: secretName,
Namespace: testNamespace,
},
Data: data,
}
}
func matchError(t *testing.T, s string, e error) bool {
t.Helper()
if s == "" && e == nil {
return true
}
if s != "" && e == nil {
return false
}
match, err := regexp.MatchString(s, e.Error())
if err != nil {
t.Fatal(err)
}
return match
}
|
package confluence
import (
"testing"
)
func TestContentRequestPayload(t *testing.T){
spaceId := "test"
version := 1
title := "Hello!"
html := "<p>Hello, World</p>"
req := ContentRequestPayload(spaceId, version, title, html)
if 2 != req.Version.Number {
t.Errorf("Expected to increment the version number")
}
if spaceId != req.Space.Key {
t.Errorf("expected to set Space.Key (%s), got %s", spaceId, req.Space.Key)
}
if title != req.Title {
t.Errorf("expected to set Title (%s), got %s", title, req.Title)
}
if html != req.Body.StorageView.Value {
t.Errorf("expected to set req.Body.StorageView.Value (%s), got %s", html, req.Title)
}
}
|
package xuperos
import (
"fmt"
"log"
"os"
"testing"
// import要使用的内核核心组件驱动
_ "github.com/xuperchain/xupercore/bcs/consensus/pow"
_ "github.com/xuperchain/xupercore/bcs/consensus/single"
_ "github.com/xuperchain/xupercore/bcs/consensus/tdpos"
_ "github.com/xuperchain/xupercore/bcs/consensus/xpoa"
_ "github.com/xuperchain/xupercore/bcs/contract/evm"
_ "github.com/xuperchain/xupercore/bcs/contract/native"
_ "github.com/xuperchain/xupercore/bcs/contract/xvm"
_ "github.com/xuperchain/xupercore/bcs/network/p2pv1"
_ "github.com/xuperchain/xupercore/bcs/network/p2pv2"
_ "github.com/xuperchain/xupercore/kernel/contract/kernel"
_ "github.com/xuperchain/xupercore/kernel/contract/manager"
_ "github.com/xuperchain/xupercore/lib/crypto/client"
_ "github.com/xuperchain/xupercore/lib/storage/kvdb/leveldb"
xledger "github.com/xuperchain/xupercore/bcs/ledger/xledger/utils"
xconf "github.com/xuperchain/xupercore/kernel/common/xconfig"
"github.com/xuperchain/xupercore/kernel/engines/xuperos/common"
"github.com/xuperchain/xupercore/kernel/mock"
)
func CreateLedger(conf *xconf.EnvConf) error {
mockConf, err := mock.NewEnvConfForTest()
if err != nil {
return fmt.Errorf("new mock env conf error: %v", err)
}
genesisPath := mockConf.GenDataAbsPath("genesis/xuper.json")
err = xledger.CreateLedger("xuper", genesisPath, conf)
if err != nil {
log.Printf("create ledger failed.err:%v\n", err)
return fmt.Errorf("create ledger failed")
}
return nil
}
func RemoveLedger(conf *xconf.EnvConf) error {
path := conf.GenDataAbsPath("blockchain")
if err := os.RemoveAll(path); err != nil {
log.Printf("remove ledger failed.err:%v\n", err)
return err
}
return nil
}
func MockEngine(path string) (common.Engine, error) {
conf, err := mock.NewEnvConfForTest(path)
if err != nil {
return nil, fmt.Errorf("new env conf error: %v", err)
}
RemoveLedger(conf)
if err = CreateLedger(conf); err != nil {
return nil, err
}
engine := NewEngine()
if err := engine.Init(conf); err != nil {
return nil, fmt.Errorf("init engine error: %v", err)
}
eng, err := EngineConvert(engine)
if err != nil {
return nil, fmt.Errorf("engine convert error: %v", err)
}
return eng, nil
}
func TestEngine(t *testing.T) {
_, err := MockEngine("p2pv2/node1/conf/env.yaml")
if err != nil {
t.Logf("%v", err)
return
}
// go engine.Run()
// engine.Exit()
}
|
// challenge :: https://www.hackerrank.com/challenges/staircase
package staircase
import (
"fmt"
"strings"
)
func main() {
var a int
fmt.Scanf("%v\n", &a)
for i := 0; i < a; i++ {
hashes := i + 1
spaces := a - hashes
fmt.Println(strings.Repeat(" ", spaces) + strings.Repeat("#", hashes))
}
}
|
package retry
import (
"errors"
"fmt"
"os"
"testing"
"time"
"github.com/go-toolkit/slog"
"github.com/go-toolkit/utils"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
)
var logger *zap.Logger
func TestMain(m *testing.M) {
cfg := slog.Conf{}.DefaultConf()
logger = slog.NewLogger(&cfg, `test`)
os.Exit(m.Run())
}
func TestQuickRetry(t *testing.T) {
i := 0
j := 0
fun := func() error {
i++
return errors.New("测试")
}
retryExtra := func() {
j++
}
require.Error(t, QuickRetry(fun, func(err error) bool {
return true
}, retryExtra, time.Second*10, time.Second))
t.Log(i, j)
}
func TestRetry(t *testing.T) {
times := 0
err := Retry(time.Second, time.Millisecond*100, logger, func() error {
times++
return fmt.Errorf("%d", times)
})
require.Error(t, err)
require.EqualValues(t, 10, times)
}
func TestRetryAllTheTime(t *testing.T) {
times := 0
utils.EnsureGo(logger, func() {
require.NoError(t, RetryAllTheTime(time.Millisecond*100, logger, func() error {
times++
t.Log(times)
return fmt.Errorf("%d", times)
}))
})
time.Sleep(time.Second * 2)
require.EqualValues(t, 20, times)
}
// TestRetryGeneric 测试RetryGeneric,实际只测试了次数限制
func TestRetryGeneric(t *testing.T) {
times := uint(10)
err := RetryGeneric(NewTimesJudge(times), time.Millisecond*100, logger, func() error {
times--
return fmt.Errorf("%d", times)
})
require.Error(t, err)
require.EqualValues(t, 0, times)
}
func TestLogicCompositeJudge_FinishedAnd(t *testing.T) {
times := uint(10)
timesJudge := NewTimesJudge(times)
timeout := time.Millisecond * 20
timeoutJudge := NewTimeoutJudge(timeout)
judge := NewLogicCompositeJudge(LogicAnd, []RetryJudge{timeoutJudge, timesJudge})
require.False(t, judge.Finished())
time.Sleep(timeout)
require.False(t, judge.Finished())
for i := 0; i < int(times)-2; i++ {
require.False(t, judge.Finished())
}
require.True(t, judge.Finished())
}
// TestLogicCompositeJudge_FinishedOr 测试逻辑组合or
func TestLogicCompositeJudge_FinishedOr(t *testing.T) {
times := uint(10)
timesJudge := NewTimesJudge(times)
timeout := time.Millisecond * 20
timeoutJudge := NewTimeoutJudge(timeout)
judge := NewLogicCompositeJudge(LogicOr, []RetryJudge{timeoutJudge, timesJudge})
require.False(t, judge.Finished())
time.Sleep(timeout)
require.True(t, judge.Finished())
// 到这里测试了超时满足的情况,继续测试次数满足
start := time.Now()
timesJudge = NewTimesJudge(times)
timeout = time.Millisecond * 20
timeoutJudge = NewTimeoutJudge(timeout)
judge = NewLogicCompositeJudge(LogicOr, []RetryJudge{timeoutJudge, timesJudge})
require.False(t, judge.Finished())
for i := 0; i < int(times)-1; i++ {
require.False(t, judge.Finished())
}
require.True(t, judge.Finished())
// 确定时间不满足
require.True(t, time.Now().Before(start.Add(timeout)))
}
|
package Sliding_Window_Maximum
import "container/list"
type Node struct {
Index int
Value int
}
func maxSlidingWindow2(nums []int, k int) []int {
if len(nums) == 0 || k < 1 || k > len(nums) {
return nil
}
l := list.New()
result := make([]int, len(nums))
for i := range nums {
if l.Front() != nil && i-k >= l.Front().Value.(Node).Index {
l.Remove(l.Front())
}
for l.Len() != 0 && l.Back().Value.(Node).Value < nums[i] {
l.Remove(l.Back())
}
l.PushBack(Node{
Index: i,
Value: nums[i],
})
result[i] = l.Front().Value.(Node).Value
}
return result[k-1:]
}
|
package runtime
import (
"fmt"
"github.com/bdlm/log"
xmpp "github.com/mattn/go-xmpp"
)
func Start(client *xmpp.Client) {
for {
m, err := client.Recv()
if err != nil {
continue
}
switch v := m.(type) {
case xmpp.Chat:
if v.Type == "chat" {
log.Debugf("from %s: %s", v.Remote, v.Text)
}
if v.Type == "groupchat" {
}
case xmpp.Presence:
// do nothing
}
}
}
func NotifyImage(client *xmpp.Client, hook Hook, url string, desc string) {
msg := fmt.Sprintf(`<message to='%%s' type='%%s'>
<body>%s</body>
<x xmlns='jabber:x:oob'>
<url>%s</url>
<desc>%s</desc>
</x>
</message>`, url, url, desc)
for _, muc := range hook.NotifyMuc {
client.SendOrg(fmt.Sprintf(msg, muc, "groupchat"))
}
for _, user := range hook.NotifyUser {
client.SendOrg(fmt.Sprintf(msg, user, "chat"))
}
}
func Notify(client *xmpp.Client, hook Hook, msg string) {
for _, muc := range hook.NotifyMuc {
client.SendHtml(xmpp.Chat{Remote: muc, Type: "groupchat", Text: msg})
}
for _, user := range hook.NotifyUser {
client.SendHtml(xmpp.Chat{Remote: user, Type: "chat", Text: msg})
}
}
|
package main
import (
"fmt"
"testing"
"time"
)
func main() {
result := testing.Benchmark(func(b *testing.B) {
b.ResetTimer()
for i := 0 ; i <= b.N ; i++{
time.Sleep(1 * time.Millisecond)
}
})
fmt.Printf("%s", result)
}
|
package starkit
import (
"go.starlark.net/starlark"
)
// LoadInterceptor allows an Plugin to intercept a load to set the contents based on the requested path.
type LoadInterceptor interface {
// LocalPath returns the path that the Tiltfile code should be read from.
// Must be stable, because it's used as a cache key
// Ensure the content is present in the path returned
// Returns "" if this interceptor doesn't act on this path
LocalPath(t *starlark.Thread, path string) (string, error)
}
|
package to
import "time"
func CurrentTimezone(tz string, t time.Time) time.Time {
loc, err := time.LoadLocation(tz)
if err != nil {
return t
}
return t.In(loc)
}
|
package main
import (
"io/ioutil"
"fmt"
"github.com/json-iterator/go"
)
func tt1() {
data, _ := ioutil.ReadFile("/root/github/go/src/newJson/test.json")
fmt.Println(jsoniter.Get(data, "apiVersion").ToString())
fmt.Println(jsoniter.Get(data, "items", 0, "apiVersion").ToString())
fmt.Println(jsoniter.Get(data, "items", 0, "status","conditions", 0, "type").ToString())
fmt.Println(jsoniter.Get(data, "items", 0, "spec","template", "spec", "containers", 0, "image").ToString())
fmt.Println(jsoniter.Get(data, "items", 0, "status","startTime").ToString())
fmt.Println(jsoniter.Get(data, "items", 0, "status","startTime").ToString())
fmt.Println(jsoniter.Get(data, "items", 0, "status").ToString())
}
func tt2() {
data, _ := ioutil.ReadFile("/root/github/go/src/newJson/test.json")
state := "start"
for i:=0; state!=""; i++ {
state = jsoniter.Get(data, "items", i, "status","conditions", 0, "type").ToString()
fmt.Println(state)
}
fmt.Println("done")
}
func main() {
tt1()
} |
package prov
import (
"fmt"
"io"
"strconv"
)
type Run struct {
RunID int64
RunName string
}
func NewRun(runId int64, runName string) Run {
if runName == "" {
runName = "run" + strconv.FormatInt(runId, 10)
}
return Run{runId, runName}
}
func WriteRunFacts(writer io.Writer, run Run) {
printRowHeader(writer, "wt_run(RunID, RunName).")
fmt.Fprintln(writer, run)
}
func (r Run) String() string {
return fmt.Sprintf("wt_run(%s, %s).",
R(r.RunID), Q(r.RunName))
}
|
package controller
import (
"encoding/json"
"fmt"
"net/http"
mgo "gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
"github.com/sambragge/webserver/models"
)
//CreateUser :
func CreateUser(w http.ResponseWriter, r *http.Request, mango *mgo.Session) {
var user models.User
decoder := json.NewDecoder(r.Body)
err := decoder.Decode(&user)
if err != nil {
panic(err)
}
if errors := user.Save(mango); errors != nil {
for _, err := range errors {
fmt.Println(err.Error())
}
return
}
fmt.Println("success putting user into database")
}
//GetUser :
func GetUser(w http.ResponseWriter, r *http.Request, mango *mgo.Session) {
users := mango.DB("DSDev").C("user")
var user models.User
decoder := json.NewDecoder(r.Body)
err := decoder.Decode(&user)
if err != nil {
panic(err)
}
err2 := users.FindId("59602ed7518a861126819d0a").One(&user)
if err2 != nil {
panic(err2)
}
response := NewMessage("user", &user)
res, err3 := json.Marshal(response)
if err3 != nil {
panic(err3)
}
w.Write([]byte(string(res)))
}
//GetUsers :
func GetUsers(w http.ResponseWriter, r *http.Request, mango *mgo.Session) {
users := mango.DB("DSDev").C("user")
var response []models.User
err := users.Find(bson.M{}).All(&response)
if err != nil {
panic(err)
}
res := NewMessage("users", response)
x, err2 := json.Marshal(res)
if err2 != nil {
panic(err)
}
w.Write([]byte(string(x)))
}
|
package main
import (
"encoding/csv"
"os"
"fmt"
//"strconv"
//"github.com/kr/pretty"
)
func readFile(filePath string, delim rune) (records [][]string, err error) {
file, err := os.Open(filePath)
if err != nil {
return
}
defer file.Close()
r := csv.NewReader(file)
r.Comma = delim
r.Comment = '#'
records, err = r.ReadAll()
if err != nil {
return
}
return
}
func load(filePath string, delim rune, kfields keys) (t table, err error) {
t.rows = rows{}
t.sourceFileName = filePath
records, err := readFile(filePath, delim)
if err != nil {
return
}
kindex := []int{}
for i, record := range records {
if i == 0 { // grab index field positions and skip the header row
kindex = kfields.getIndex(record)
fmt.Println(record)
fmt.Println(kindex)
continue
}
r := row{}
key := ""
for j, _ := range record { // get the keys
for _, pos := range kindex {
if pos == j { // this is index
key += "[" + records[0][j] + ":" + records[i][j] + "]"
}
}
}
for j, _ := range record {
//if records[0][j] in keys{// handle keys}
r[records[0][j]] = records[i][j]
}
t.rows[key] = r //TODO: key needs to be change from index to actual key that user passes to the program
}
/*for k, r := range t.rows {
fmt.Println()
fmt.Println(k)
for c, v := range r {
fmt.Println(c, v)
}
}*/
return
}
func (t table) print() {
fmt.Println("Showing data from ", t.sourceFileName)
for k, r := range t.rows {
fmt.Print("\t[", k, "]")
for c, v := range r {
fmt.Print("\t", c, ":", v)
}
println()
}
}
func (ks keys) getIndex(header []string) []int {
var i []int
for _, k := range ks {
for pos, h := range header {
if k == h {
i = append(i, pos)
}
}
}
return i
}
|
package entity
//Product Товар
type Product struct {
Meta *Meta `json:"meta,omitempty"` // Метаданные Товара
Id string `json:"id,omitempty"` // ID Товара (Только для чтения)
AccountId string `json:"accountId,omitempty"` // ID учетной записи (Только для чтения)
Owner *Employee `json:"owner,omitempty"` // Метаданные владельца (Сотрудника)
Shared bool `json:"shared,omitempty"` // Общий доступ
Group *Group `json:"group,omitempty"` // Метаданные отдела сотрудника
SyncId string `json:"syncId,omitempty"` // ID синхронизации
Updated string `json:"updated,omitempty"` // Момент последнего обновления сущности (Только для чтения)
Name string `json:"name,omitempty"` // Наименование Товара
Description string `json:"description,omitempty"` // Описание Товара
Code string `json:"code,omitempty"` // Код Товара
ExternalCode string `json:"externalCode,omitempty"` // Внешний код Товара
Archived bool `json:"archived,omitempty"` // Добавлен ли Товар в архив
PathName string `json:"pathName,omitempty"` // Наименование группы, в которую входит Товар (Только для чтения)
Vat int `json:"vat,omitempty"` // НДС %
VatEnabled bool `json:"vatEnabled,omitempty"` // Включен ли НДС для группы
UseParentVat bool `json:"useParentVat,omitempty"` // Используется ли ставка НДС родительской группы
EffectiveVat int `json:"effectiveVat,omitempty"` // Реальный НДС % (Только для чтения)
EffectiveVatEnabled int `json:"effectiveVatEnabled,omitempty"` // Дополнительный признак для определения разграничения реального НДС
ProductFolder *ProductFolder `json:"productFolder,omitempty"` // Метаданные группы Товара
Uom *Uom `json:"uom,omitempty"` // Единицы измерения
Images *Images `json:"images,omitempty"` // Изображения
MinPrice *MinPrice `json:"minPrice,omitempty"` // Минимальная цена
SalePrices []SalePrice `json:"salePrices,omitempty"` // Цены продажи
BuyPrice *BuyPrice `json:"buyPrice,omitempty"` // Закупочная цена
Supplier *Counterparty `json:"supplier,omitempty"` // Метаданные контрагента-поставщика
Attributes []Attribute `json:"attributes,omitempty"` // Коллекция доп. полей
Country *Country `json:"country,omitempty"` // Метаданные Страны
Article string `json:"article,omitempty"` // Артикул
Weight float64 `json:"weight,omitempty"` // Вес
Volume float64 `json:"volume,omitempty"` // Объем
Packs []Pack `json:"packs,omitempty"` // Упаковки Товара
Alcoholic *Alcoholic `json:"alcoholic,omitempty"` // Объект, содержащий поля алкогольной продукции
VariantsCount int `json:"variantsCount,omitempty"` // Количество модификаций у данного товара (Только для чтения)
MinimumBalance int `json:"minimumBalance,omitempty"` // Неснижаемый остаток
IsSerialTrackable bool `json:"isSerialTrackable,omitempty"` // Учет по серийным номерам. Не может быть указан вместе с alcoholic и weighed
Things []string `json:"things,omitempty"` // Серийные номера
Barcodes []Barcode `json:"barcodes,omitempty"` // Штрихкоды
DiscountProhibited bool `json:"discountProhibited,omitempty"` // Признак запрета скидок
Tnved string `json:"tnved,omitempty"` // Код ТН ВЭД
PartialDisposal bool `json:"partialDisposal,omitempty"` // Управление состоянием частичного выбытия маркированного товара. "true" возможность включена
TrackingType string `json:"trackingType,omitempty"` // Тип маркируемой продукции
// --
// Значения поля trackingType.
// NOT_TRACKED Без маркировки
// TOBACCO Тип маркировки "Табак"
// SHOES Тип маркировки "Обувь"
// LP_CLOTHES Тип маркировки "Одежда"
// LP_LINENS Тип маркировки "Постельное белье"
// PERFUMERY Духи и туалетная вода
// ELECTRONICS Фотокамеры и лампы-вспышки
// --
PaymentItemType string `json:"paymentItemType,omitempty"` // Признак предмета расчета
// --
// Значения поля paymentItemType.
// GOOD Товар
// EXCISABLE_GOOD Подакцизный товар
// COMPOUND_PAYMENT_ITEM Составной предмет расчета
// ANOTHER_PAYMENT_ITEM Иной предмет расчета
// --
TaxSystem string `json:"taxSystem,omitempty"` // Код системы налогообложения
// --
// Значения поля taxSystem.
// TAX_SYSTEM_SAME_AS_GROUP Совпадает с группой
// GENERAL_TAX_SYSTEM ОСН
// SIMPLIFIED_TAX_SYSTEM_INCOME УСН. Доход
// SIMPLIFIED_TAX_SYSTEM_INCOME_OUTCOME УСН. Доход-Расход
// UNIFIED_AGRICULTURAL_TAX ЕСХН
// PRESUMPTIVE_TAX_SYSTEM ЕНВД
// PATENT_BASED Патент
// --
PPEType string `json:"ppeType,omitempty"` // Код вида номенклатурной классификации медицинских средств индивидуальной защиты (EAN-13)
// --
// Значения поля ppeType.
// 2400001323807 маска лицевая для защиты дыхательных путей, многоразового использования
// 2400003675805 маска лицевая для защиты дыхательных путей, одноразового использования
// 2400001807703 респиратор общего применения
// 2400001818303 респиратор хирургический
// 2400002186203 респиратор хирургический антибактериальный
// 2400001368105 средство назальное для защиты от загрязненного воздуха, местного действия
// 2400001225408 перчатки смотровые (процедурные) из латекса гевеи, неопудренные, нестерильные
// 2400001225606 перчатки смотровые (процедурные) из латекса гевеи, опудренные
// 2400001226108 перчатки смотровые (процедурные) из латекса гевеи, неопудренные, стерильные
// 2400001393503 перчатки смотровые (процедурные) из полихлоропрена, неопудренные
// 2400001858309 перчатки смотровые (процедурные) нитриловые, неопудренные, нестерильные
// 2400001858507 перчатки смотровые (процедурные) нитриловые, опудренные
// 2400002052805 перчатки смотровые (процедурные) виниловые, неопудренные
// 2400002052904 перчатки смотровые (процедурные) виниловые, опудренные
// 2400002984502 перчатки смотровые (процедурные) из гваюлового латекса, неопудренные
// 2400003117107 перчатки смотровые (процедурные) из этиленвинилацетата, неопудренные, стерильные
// 2400003117206 перчатки смотровые (процедурные) из этиленвинилацетата, неопудренные, нестерильные
// 2400003207907 перчатки смотровые (процедурные) нитриловые, неопудренные, антибактериальные
// 2400003215308 перчатки смотровые (процедурные) полиизопреновые, неопудренные
// 2400003297700 перчатки смотровые (процедурные) нитриловые, неопудренные, стерильные
// 2400003356704 перчатки смотровые (процедурные) виниловые, неопудренные, стерильные
// 2400003356803 перчатки смотровые (процедурные) виниловые, опудренные, стерильные
// 2400003433108 перчатки смотровые (процедурные) из латекса гевеи, опудренные, стерильные
// 2400003492303 перчатки смотровые (процедурные) полиизопреновые, опудренные
// 2400003495700 перчатки смотровые (процедурные) из полихлоропрена, неопудренные, стерильные
// 2400003495809 перчатки смотровые (процедурные) из полихлоропрена, неопудренные, стерильные
// 2400003495908 перчатки смотровые (процедурные) нитриловые, опудренные, стерильные
// 2400003496004 перчатки смотровые (процедурные) полиизопреновые, неопудренные, стерильные
// 2400003496103 перчатки смотровые (процедурные) полиизопреновые, опудренные, стерильные
// 2400001226306 перчатки хирургические из латекса гевеи, неопудренные
// 2400001226405 перчатки хирургические из латекса гевеи, опудренные
// 2400001393107 перчатки хирургические из полихлоропрена, неопудренные
// 2400001393602 перчатки смотровые (процедурные) из полихлоропрена, опудренные
// 2400001565306 перчатки хирургические из блоксополимера стирола, неопудренные, антибактериальные
// 2400001857203 перчатки хирургические нитриловые, опудренные
// 2400001857005 перчатки хирургические нитриловые, неопудренные
// 2400002015909 перчатки хирургические полиизопреновые, неопудренные
// 2400002016005 перчатки хирургические полиизопреновые, неопудренные, антибактериальные
// 2400002016104 перчатки хирургические полиизопреновые, опудренные
// 2400003161209 перчатки хирургические из блоксополимера стирола, неопудренные
// 2400003227806 перчатки хирургические полимерно-композитные, неопудренные
// 2400003237409 перчатки хирургические полимерно-композитные, неопудренные
// 2400003263408 перчатки хирургические из латекса гевеи, неопудренные, антибактериальные
// 2400003356902 перчатки хирургические из гваюлового латекса, неопудренные
// 2400003356902 перчатки хирургические из полихлоропрена, опудренные
// 2400002886806 набор гигиенической одежды для посетителей
// 2400002886707 комбинезон гигиенический для посетителей
// --
Files File `json:"files,omitempty"` // Массив метаданных Файлов (Максимальное количество файлов - 100)
}
// ProductFolder Группа Товаров
type ProductFolder struct {
Meta *Meta `json:"meta,omitempty"` // Метаданные Группы Товара (Только для чтения)
ID string `json:"id,omitempty"` // ID Группы товаров (Только для чтения)
AccountID string `json:"accountId,omitempty"` // ID учетной записи (Только для чтения)
Owner *Employee `json:"owner,omitempty"` // Метаданные владельца (Сотрудника)
Shared bool `json:"shared,omitempty"` // Общий доступ
Group *Group `json:"group,omitempty"` // Метаданные отдела сотрудника
Updated string `json:"updated,omitempty"` // Момент последнего обновления сущности (Только для чтения)
Name string `json:"name,omitempty"` // Наименование Группы товаров
Description string `json:"description,omitempty"` // Описание Группы товаров
Code string `json:"Description,omitempty"` // Код Группы товаров
ExternalCode string `json:"externalCode,omitempty"` // Внешний код Группы товаров
Archived bool `json:"archived,omitempty"` // Добавлена ли Группа товаров в архив (Только для чтения)
PathName string `json:"pathName,omitempty"` // Наименование Группы товаров, в которую входит данная Группа товаров (Только для чтения)
Vat int `json:"vat,omitempty"` // НДС %
VatEnabled bool `json:"vatEnabled,omitempty"` // Включен ли НДС для группы
UseParentVat bool `json:"useParentVat,omitempty"` // Используется ли ставка НДС родительской группы
EffectiveVat int `json:"effectiveVat,omitempty"` // Реальный НДС % (Только для чтения)
EffectiveVatEnabled int `json:"effectiveVatEnabled,omitempty"` // Дополнительный признак для определения разграничения реального НДС
ProductFolder *ProductFolder `json:"productFolder,omitempty"` // Ссылка на Группу товаров, в которую входит данная Группа товаров, в формате Метаданных
TaxSystem string `json:"taxSystem,omitempty"` // Код системы налогообложения
// --
// taxSystem
// TAX_SYSTEM_SAME_AS_GROUP Совпадает с группой
// GENERAL_TAX_SYSTEM ОСН
// SIMPLIFIED_TAX_SYSTEM_INCOME УСН. Доход
// SIMPLIFIED_TAX_SYSTEM_INCOME_OUTCOME УСН. Доход-Расход
// UNIFIED_AGRICULTURAL_TAX ЕСХН
// PRESUMPTIVE_TAX_SYSTEM ЕНВД
// PATENT_BASED Патент
// --
}
type Pack struct {
ID string `json:"id"` // ID упаковки товара (Только для чтения)
Uom *Uom `json:"uom"` // Метаданные единиц измерения
Quantity int `json:"quantity"` // Количество Товаров в упаковке данного вида
Barcodes []Barcode `json:"barcodes,omitempty"` // Массив штрихкодов упаковок товаров
}
type Alcoholic struct {
Excise bool `json:"excise,omitempty"` // Содержит акцизную марку
Type int `json:"type,omitempty"` // Код вида продукции
Strength float64 `json:"strength,omitempty"` // Крепость
Volume float64 `json:"volume,omitempty"` // Объём тары
}
|
package cloudformation
// AWSDynamoDBTable_SSESpecification AWS CloudFormation Resource (AWS::DynamoDB::Table.SSESpecification)
// See: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-dynamodb-table-ssespecification.html
type AWSDynamoDBTable_SSESpecification struct {
// SSEEnabled AWS CloudFormation Property
// Required: true
// See: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-dynamodb-table-ssespecification.html#cfn-dynamodb-table-ssespecification-sseenabled
SSEEnabled bool `json:"SSEEnabled,omitempty"`
}
// AWSCloudFormationType returns the AWS CloudFormation resource type
func (r *AWSDynamoDBTable_SSESpecification) AWSCloudFormationType() string {
return "AWS::DynamoDB::Table.SSESpecification"
}
|
package telepathy
import (
"context"
"sync"
"testing"
"time"
"github.com/mongodb/mongo-go-driver/bson"
"github.com/mongodb/mongo-go-driver/mongo"
"github.com/stretchr/testify/assert"
)
const (
testDBURL = "mongodb://mongo:27017/test"
testDBName = "testDBName"
)
type dbTester struct {
handler *databaseHandler
reqChA chan DatabaseRequest
reqChB chan DatabaseRequest
reqChC chan DatabaseRequest
done chan interface{}
}
func TestDBConnection(t *testing.T) {
assert := assert.New(t)
handler, err := newDatabaseHandler(testDBURL, testDBName)
if assert.NoError(err) {
assert.NotNil(handler)
}
assert.NoError(handler.start(context.Background()))
}
func TestDBAttach(t *testing.T) {
assert := assert.New(t)
handler, err := newDatabaseHandler(testDBURL, testDBName)
if assert.NoError(err) {
assert.NotNil(handler)
}
reqCh := make(chan DatabaseRequest)
handler.attachRequester("testReq", reqCh)
reqChOther := make(chan DatabaseRequest)
handler.attachRequester("testReqOther", reqChOther)
dbDone := make(chan interface{})
go func() {
assert.NoError(handler.start(context.Background()))
close(dbDone)
}()
close(reqCh)
close(reqChOther)
<-dbDone
}
func TestDBAttachDuplicate(t *testing.T) {
assert := assert.New(t)
handler, err := newDatabaseHandler(testDBURL, testDBName)
if assert.NoError(err) {
assert.NotNil(handler)
}
reqCh := make(chan DatabaseRequest)
handler.attachRequester("testReq", reqCh)
reqChOther := make(chan DatabaseRequest)
assert.Panics(func() { handler.attachRequester("testReq", reqChOther) })
}
func (tester *dbTester) start(t *testing.T) {
assert := assert.New(t)
var err error
tester.handler, err = newDatabaseHandler(testDBURL, testDBName)
if assert.NoError(err) {
assert.NotNil(tester.handler)
}
tester.reqChA = make(chan DatabaseRequest, 5)
tester.reqChB = make(chan DatabaseRequest, 5)
tester.reqChC = make(chan DatabaseRequest, 5)
tester.handler.attachRequester("reqA", tester.reqChA)
tester.handler.attachRequester("reqB", tester.reqChB)
tester.handler.attachRequester("reqC", tester.reqChC)
tester.done = make(chan interface{})
go func() {
assert.NoError(tester.handler.start(context.Background()))
close(tester.done)
}()
}
func (tester *dbTester) stop() {
close(tester.reqChA)
close(tester.reqChB)
close(tester.reqChC)
<-tester.done
}
func TestDBSimpleReadWrite(t *testing.T) {
assert := assert.New(t)
tester := dbTester{}
testVal := "value"
tester.start(t)
retCh := make(chan interface{})
tester.reqChA <- DatabaseRequest{
Action: func(ctx context.Context, db *mongo.Database) interface{} {
collection := db.Collection("testCollection")
_, err := collection.InsertOne(ctx, bson.M{"Key": testVal})
assert.NoError(err)
return testVal
},
Return: retCh,
}
ret := <-retCh
close(retCh)
value, ok := ret.(string)
assert.True(ok)
retCh = make(chan interface{})
tester.reqChB <- DatabaseRequest{
Action: func(ctx context.Context, db *mongo.Database) interface{} {
collection := db.Collection("testCollection")
result := collection.FindOne(ctx, map[string]string{"Key": value})
if !assert.NoError(result.Err()) {
return ""
}
raw, err := result.DecodeBytes()
if !assert.NoError(err) {
return ""
}
readValue, err := raw.LookupErr("Key")
if !assert.NoError(err) {
return ""
}
ret, ok := readValue.StringValueOK()
if !assert.True(ok) {
return ""
}
delResult, err := collection.DeleteMany(ctx, map[string]string{"Key": value})
assert.NoError(err)
assert.Equal(int64(1), delResult.DeletedCount)
return ret
},
Return: retCh,
}
ret = <-retCh
close(retCh)
value, ok = ret.(string)
assert.True(ok)
assert.Equal(testVal, value)
tester.stop()
}
func TestDBSimpleReadWriteReconn(t *testing.T) {
assert := assert.New(t)
tester := dbTester{}
testVal := "value"
tester.start(t)
retCh := make(chan interface{})
tester.reqChA <- DatabaseRequest{
Action: func(ctx context.Context, db *mongo.Database) interface{} {
collection := db.Collection("testCollection")
_, err := collection.InsertOne(ctx, bson.M{"Key": testVal})
assert.NoError(err)
return testVal
},
Return: retCh,
}
ret := <-retCh
close(retCh)
value, ok := ret.(string)
assert.True(ok)
tester.stop()
tester.start(t)
retCh = make(chan interface{})
tester.reqChB <- DatabaseRequest{
Action: func(ctx context.Context, db *mongo.Database) interface{} {
collection := db.Collection("testCollection")
result := collection.FindOne(ctx, map[string]string{"Key": value})
if !assert.NoError(result.Err()) {
return ""
}
raw, err := result.DecodeBytes()
if !assert.NoError(err) {
return ""
}
readValue, err := raw.LookupErr("Key")
if !assert.NoError(err) {
return ""
}
ret, ok := readValue.StringValueOK()
if !assert.True(ok) {
return ""
}
delResult, err := collection.DeleteMany(ctx, map[string]string{"Key": value})
assert.NoError(err)
assert.Equal(int64(1), delResult.DeletedCount)
return ret
},
Return: retCh,
}
ret = <-retCh
close(retCh)
value, ok = ret.(string)
assert.True(ok)
assert.Equal(testVal, value)
tester.stop()
}
func TestDBMultiAccess(t *testing.T) {
assert := assert.New(t)
tester := dbTester{}
tester.start(t)
testFunc := func(reqChan chan<- DatabaseRequest, collection string, testVal string) {
retCh := make(chan interface{})
reqChan <- DatabaseRequest{
Action: func(ctx context.Context, db *mongo.Database) interface{} {
collection := db.Collection(collection)
_, err := collection.InsertOne(ctx, bson.M{"Key": testVal})
assert.NoError(err)
return testVal
},
Return: retCh,
}
ret := <-retCh
close(retCh)
value, ok := ret.(string)
assert.True(ok)
retCh = make(chan interface{})
reqChan <- DatabaseRequest{
Action: func(ctx context.Context, db *mongo.Database) interface{} {
collection := db.Collection(collection)
result := collection.FindOne(ctx, map[string]string{"Key": value})
if !assert.NoError(result.Err()) {
return ""
}
raw, err := result.DecodeBytes()
if !assert.NoError(err) {
return ""
}
readValue, err := raw.LookupErr("Key")
if !assert.NoError(err) {
return ""
}
ret, ok := readValue.StringValueOK()
if !assert.True(ok) {
return ""
}
delResult, err := collection.DeleteMany(ctx, map[string]string{"Key": value})
assert.NoError(err)
assert.Equal(int64(1), delResult.DeletedCount)
return ret
},
Return: retCh,
}
ret = <-retCh
close(retCh)
value, ok = ret.(string)
assert.True(ok)
assert.Equal(testVal, value)
}
wg := sync.WaitGroup{}
wg.Add(3)
go func() {
for i := 0; i < 10; i++ {
testFunc(tester.reqChA, "collectionA", "testA")
}
wg.Done()
}()
go func() {
for i := 0; i < 10; i++ {
testFunc(tester.reqChB, "collectionB", "testB")
}
wg.Done()
}()
go func() {
for i := 0; i < 10; i++ {
testFunc(tester.reqChC, "collectionC", "testC")
}
wg.Done()
}()
wg.Wait()
tester.stop()
}
func TestDBTimeout(t *testing.T) {
assert := assert.New(t)
tester := dbTester{}
tester.start(t)
tester.handler.timeout = 10 * time.Second
retCh := make(chan interface{})
tester.reqChA <- DatabaseRequest{
Action: func(ctx context.Context, db *mongo.Database) interface{} {
<-ctx.Done()
return true
},
Return: retCh,
}
get := <-retCh
value, ok := get.(bool)
assert.True(ok)
assert.True(value)
tester.stop()
}
|
package numbers
import (
"math/rand"
"fmt"
"time"
)
func FindQuestion() (int, int) {
numberOfQuestionsByChapters := [17]int{9,8,6,12,8,10,12,14,8,11,6,11,8,7,7,26,26}
sum := 0
for _, numberOfQuestions := range numberOfQuestionsByChapters {
sum += numberOfQuestions
}
rand.Seed(time.Now().UnixNano())
number := rand.Intn(sum) + 1
var n int
currentRunningSum := 0
var previousRunningSum int
for i, numberOfQuestions := range numberOfQuestionsByChapters {
previousRunningSum = currentRunningSum
currentRunningSum += numberOfQuestions
n=i
if currentRunningSum + 1 > number {
break
}
}
question := number - previousRunningSum
chapter := n+1
fmt.Printf("%d.%d\n", chapter, question)
return chapter, question
}
func FindQuestionInChapter(chapter int) int {
numberOfQuestionsByChapters := [17]int{9,8,6,12,8,10,12,14,8,11,6,11,8,7,7,26,26}
if (chapter < 1 || chapter > 17) {
return -1;
}
rand.Seed(time.Now().UnixNano())
question := rand.Intn(numberOfQuestionsByChapters[chapter-1]) + 1
fmt.Printf("%d.%d\n", chapter, question)
return question
}
|
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"os/signal"
"strings"
"time"
"net/http"
)
const address = "0.0.0.0:7080"
//ErrorMessage hold the return value when there is an error
type ErrorMessage struct {
StatusCode int `json:"status_code,omitempty"`
Message string `json:"message,omitempty"`
}
var errorMessage = ErrorMessage{StatusCode: http.StatusInternalServerError}
type HttpHandler struct{}
func main() {
var wait time.Duration
fmt.Println("Starting server - ", address)
handler := HttpHandler{}
//the following code is from gorilla mux samples
srv := &http.Server{
Addr: address,
WriteTimeout: time.Second * 15,
ReadTimeout: time.Second * 15,
IdleTimeout: time.Second * 60,
Handler: handler,
}
go func() {
if err := srv.ListenAndServe(); err != nil {
fmt.Println(err)
}
}()
c := make(chan os.Signal, 1)
// We'll accept graceful shutdowns when quit via SIGINT (Ctrl+C)
// SIGKILL, SIGQUIT or SIGTERM (Ctrl+/) will not be caught.
signal.Notify(c, os.Interrupt)
// Block until we receive our signal.
<-c
// Create a deadline to wait for.
ctx, cancel := context.WithTimeout(context.Background(), wait)
defer cancel()
// Doesn't block if no connections, but will otherwise wait
// until the timeout deadline.
_ = srv.Shutdown(ctx)
fmt.Println("Shutting down")
os.Exit(0)
}
func (h HttpHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
var req *http.Request
downstreamURL := "https://httpbin.org/" + r.URL.Path //"http://127.0.0.1:9080" + r.URL.Path
client := &http.Client{}
fmt.Println(">> Request received")
//TODO: add request logic here.
req, err := http.NewRequest(r.Method, downstreamURL, nil)
if err != nil {
ErrorHandler(w, err)
return
}
resp, err := client.Do(req)
if resp != nil {
defer resp.Body.Close()
}
if err != nil {
ErrorHandler(w, err)
return
}
respBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
ErrorHandler(w, err)
return
}
//TODO: add response logic here.
fmt.Println(">> Response sent")
ResponseHandler(w, resp.Header, resp.StatusCode, respBody)
}
func ErrorHandler(w http.ResponseWriter, err error) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
w.WriteHeader(http.StatusInternalServerError)
errorMessage.Message = err.Error()
if err := json.NewEncoder(w).Encode(errorMessage); err != nil {
fmt.Println(err)
}
}
//ResponseHandler returns a 200 when the response is successful
func ResponseHandler(w http.ResponseWriter, headers http.Header, statusCode int, response []byte) {
for headerName, headerValue := range headers {
w.Header().Set(headerName, strings.Join(headerValue, ","))
}
w.WriteHeader(statusCode)
_, err := w.Write(response)
if err != nil {
fmt.Println(err)
}
}
|
//Test cases for word list attack with different type of hashes
package main
import "github.com/karlek/gohash/attack"
import "testing"
func TestMD5(t *testing.T) {
m := map[string]string{
"d41d8cd98f00b204e9800998ecf8427e": "", //Empty string ""
"d41d8cd98f00b204e9800998ecf8427": "", //Invalid MD5 (1 character too short)
"8b1a9953c4611296a827abf8c47804d7": "Hello", //Titled test: Hello
"9e076f5885f5cc16a4b5aeb8de4adff5": "Not found", //Unfound test: Not found
"e8bb0b2e10d6706a0ae1a8633a9feace": "asdf0", //Number suffix test: asdf0
"6a47b3f8f52318528ab6438078b28ad4": "asdf9999", //Number suffix test: asdf9999
}
mError := map[string]string{
"d41d8cd98f00b204e9800998ecf8427": "encoding/hex: odd length hex string",
"9e076f5885f5cc16a4b5aeb8de4adff5": "Hash not found",
}
for hash, out := range m {
found, err := attacks.WordList(hash, os.Getenv("GOPATH")+"/src/github.com/karlek/gohash/a.txt")
if err != nil && mError[hash] != err.Error() {
t.Errorf("%s - failed with error: %s\n", hash, err.Error())
}
if out != found {
t.Errorf("wordListAttack(%v) = %v, want %v\n\n", hash, found, out)
}
}
}
func TestSHA1(t *testing.T) {
m := map[string]string{
"da39a3ee5e6b4b0d3255bfef95601890afd80709": "", //Empty string ""
"da39a3ee5e6b4b0d3255bfef95601890afd8070": "", //Invalid SHA-1 (1 character too short)
"f7ff9e8b7bb2e09b70935a5d785e0cc5d9d0abf0": "Hello", //Titled test: Hello
"475c848673a3f79fa778f01c2bd5a721d4c41707": "Not found", //Unfound test: Not found
"e7587ca621f0819e68f5b740ebec0b7c5f292fac": "asdf0", //Number suffix test: asdf0
"ce743a784552c605fbd94774348f600cc10c8d2c": "asdf9999", //Number suffix test: asdf9999
}
mError := map[string]string{
"da39a3ee5e6b4b0d3255bfef95601890afd8070": "encoding/hex: odd length hex string",
"475c848673a3f79fa778f01c2bd5a721d4c41707": "Hash not found",
}
for hash, out := range m {
found, err := attacks.WordList(hash, os.Getenv("GOPATH")+"/src/github.com/karlek/gohash/a.txt")
if err != nil && mError[hash] != err.Error() {
t.Errorf("%s - failed with error: %s\n", hash, err.Error())
}
if out != found {
t.Errorf("wordListAttack(%v) = %v, want %v\n\n", hash, found, out)
}
}
}
|
package main
import (
"context"
"fmt"
"io"
"log"
"time"
"github.com/dfreilich/grpc-samples/greet/greetpb"
"github.com/pkg/errors"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/status"
)
const address = "localhost"
const port = "50051"
var dFGreeting = &greetpb.Greeting{
FirstName: "David",
LastName: "Freilich",
}
func main() {
log.Println("Starting client")
if err := run(); err != nil {
log.Fatalf("failed to run: %v", err)
}
log.Println("Succesfully ran")
}
func run() error {
certFile := "ssl/ca.crt"
creds, err := credentials.NewClientTLSFromFile(certFile, "")
if err != nil {
return errors.Wrap(err, "error while loading CA trust certificates")
}
opts := grpc.WithTransportCredentials(creds)
cc, err := grpc.Dial(fmt.Sprintf("%s:%s", address, port), opts)
if err != nil {
return errors.Wrap(err, "could not connect")
}
defer cc.Close()
c := greetpb.NewGreetServiceClient(cc)
fmt.Println("Created client")
// if err := doUnary(c); err != nil {
// return errors.Wrap(err, "failed to do unary RPC call")
// }
// return doServerStreaming(c)
// return doClientStreaming(c)
// return doBiDiStreaming(c)
doUnaryWithDeadline(c, 5*time.Second)
return doUnaryWithDeadline(c, 1*time.Second)
}
func doUnary(c greetpb.GreetServiceClient) error {
fmt.Println("Starting to do Unary RPC...")
req := &greetpb.GreetRequest{
Greeting: dFGreeting,
}
res, err := c.Greet(context.Background(), req)
if err != nil {
return errors.Wrap(err, "error while calling Greet rpc")
}
fmt.Printf("Response from Greet: %v\n", res)
return nil
}
func doServerStreaming(c greetpb.GreetServiceClient) error {
fmt.Println("Starting a Server Streaming RPC")
req := &greetpb.GreetManyTimesRequest{Greeting: dFGreeting}
resStream, err := c.GreetManyTimes(context.Background(), req)
if err != nil {
return errors.Wrap(err, "error while calling GreetManyTimes RPC")
}
for {
msg, err := resStream.Recv()
if err == io.EOF {
// Stream has ended
break
}
if err != nil {
return errors.Wrap(err, "error while reading stream")
}
fmt.Printf("Response from GreetManyTimes: %v\n", msg.GetResult())
}
return nil
}
func doClientStreaming(c greetpb.GreetServiceClient) error {
fmt.Println("Starting to do a Client Streaming RPC...")
requests := []*greetpb.LongGreetRequest{
&greetpb.LongGreetRequest{
Greeting: &greetpb.Greeting{
FirstName: "Elie",
},
},
&greetpb.LongGreetRequest{
Greeting: &greetpb.Greeting{
FirstName: "Ahuva",
},
},
&greetpb.LongGreetRequest{
Greeting: &greetpb.Greeting{
FirstName: "Chanan",
},
},
&greetpb.LongGreetRequest{
Greeting: &greetpb.Greeting{
FirstName: "Duv",
},
},
}
stream, err := c.LongGreet(context.Background())
if err != nil {
return errors.Wrap(err, "error while calling LongGreet")
}
for _, req := range requests {
fmt.Printf("Sending req: %v\n", req)
stream.Send(req)
time.Sleep(10 * time.Millisecond)
}
response, err := stream.CloseAndRecv()
if err != nil {
return errors.Wrap(err, "error while receiving LongGreet response")
}
fmt.Printf("LongGreetResponse: %v\n", response)
return nil
}
func doBiDiStreaming(c greetpb.GreetServiceClient) error {
fmt.Println("Starting to do BiDi Streaming RPC")
// Create stream
stream, err := c.GreetEveryone(context.Background())
if err != nil {
return errors.Wrap(err, "failed to create stream")
}
requests := []*greetpb.GreetEveryoneRequest{
&greetpb.GreetEveryoneRequest{
Greeting: &greetpb.Greeting{
FirstName: "Elie",
},
},
&greetpb.GreetEveryoneRequest{
Greeting: &greetpb.Greeting{
FirstName: "Ahuva",
},
},
&greetpb.GreetEveryoneRequest{
Greeting: &greetpb.Greeting{
FirstName: "Chanan",
},
},
&greetpb.GreetEveryoneRequest{
Greeting: &greetpb.Greeting{
FirstName: "Duv",
},
},
}
// Send messages to client (go routine)
waitc := make(chan struct{})
go func() {
for _, req := range requests {
fmt.Printf("Sending message: %v\n", req)
stream.Send(req)
time.Sleep(10 * time.Millisecond)
}
stream.CloseSend()
}()
// Receive a bunch of messages from the client (go routine)
go func() {
for {
res, err := stream.Recv()
if err == io.EOF {
break
} else if err != nil {
fmt.Printf("Error!! %v", err)
break
}
fmt.Printf("Received: %v\n", res.GetResult())
}
close(waitc)
}()
// Block until things are done
<-waitc
return nil
}
func doUnaryWithDeadline(c greetpb.GreetServiceClient, timeout time.Duration) error {
fmt.Println("Starting to do Unary GreetWithDeadline RPC...")
req := &greetpb.GreetWithDeadlineRequest{
Greeting: dFGreeting,
}
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
res, err := c.GreetWithDeadline(ctx, req)
if err != nil {
statusErr, ok := status.FromError(err)
if ok {
if statusErr.Code() == codes.DeadlineExceeded {
return errors.Wrap(err, "Timeout was hit! Deadline was exceeded")
} else {
return errors.Wrap(err, "unknown error encountered")
}
}
return errors.Wrap(err, "error while calling GreetWithDeadline rpc")
}
fmt.Printf("Response from GreetWithDeadline: %v\n", res)
return nil
}
|
package anton
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"strconv"
)
func SendTelegram(msg, antonUserTelegram, antonBotTelegramTokenID string) {
// Declare the helper struct to access the helper functions
var helper Helper
// This is the URI:
postURL := "https://api.telegram.org/bot{tokenID}/sendMessage"
// Create New Post Body for the message to send
postBody := new(TelegramMsg)
// Format the body with the GroupID and the Message
postBody.ChatID, _ = strconv.ParseInt(antonUserTelegram, 10, 64)
postBody.Text = msg
// Format the URL with the static parameters
finalURL := helper.ReplaceParameters(postURL, "{tokenID}", antonBotTelegramTokenID)
// Fix encoding on the JSON body from the struct before sending
postBodyEncoded, err := json.Marshal(postBody)
// Send a post request with your token
res, err := http.Post(finalURL, "application/json", bytes.NewBuffer(postBodyEncoded))
if err != nil {
fmt.Println(err)
}
if res.StatusCode != http.StatusOK {
fmt.Println("unexpected status" + res.Status)
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.