text stringlengths 11 4.05M |
|---|
/*
* Swagger Petstore
*
* No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
*
* API version: 1.0.0
* Generated by: OpenAPI Generator (https://openapi-generator.tech)
*/
package main
import (
"context"
"fmt"
"log"
"net/http"
"reflect"
RESTfulPolyglotEampleGoServer "github.com/marcocipri/RESTfulPolyglotExample/go/servermock"
)
func main() {
t := reflect.TypeOf(MyPetsAPIService{}) // the Singer type
fmt.Println(t, "has", t.NumField(), "fields:")
for i := 0; i < t.NumField(); i++ {
fmt.Print(" field#", i, ": ", t.Field(i).Name, "\n")
}
fmt.Println(t, "has", t.NumMethod(), "methods:")
for i := 0; i < t.NumMethod(); i++ {
fmt.Print(" method#", i, ": ", t.Method(i).Name, "\n")
}
pt := reflect.TypeOf(&MyPetsAPIService{}) // the *Singer type
fmt.Println(pt, "has", pt.NumMethod(), "methods:")
for i := 0; i < pt.NumMethod(); i++ {
fmt.Print(" method#", i, ": ", pt.Method(i).Name, "\n")
}
log.Printf("Server started")
DefaultAPIService := &MyPetsAPIService{}
DefaultAPIController := RESTfulPolyglotEampleGoServer.NewDefaultApiController(DefaultAPIService)
router := RESTfulPolyglotEampleGoServer.NewRouter(DefaultAPIController)
log.Fatal(http.ListenAndServe(":8080", router))
}
type MyPetsAPIService struct {
RESTfulPolyglotEampleGoServer.DefaultApiService
}
// FindPets -
func (s *MyPetsAPIService) FindPets(ctx context.Context, tags []string, limit int32) (RESTfulPolyglotEampleGoServer.ImplResponse, error) {
cat := RESTfulPolyglotEampleGoServer.Pet{Name: "cat", Tag: "felis", Id: 1}
dog := RESTfulPolyglotEampleGoServer.Pet{Name: "dog", Tag: "canis", Id: 2}
pets := []RESTfulPolyglotEampleGoServer.Pet{cat, dog}
return RESTfulPolyglotEampleGoServer.Response(http.StatusOK, pets), nil
}
|
package main
import (
"bytes"
"encoding/csv"
"fmt"
"io"
"log"
)
// csv读写逗号分隔值(csv)的文件
func main() {
var data = []string{"test", "Hello", "Go"}
var buf bytes.Buffer
// 初始化一个writer
w := csv.NewWriter(&buf)
// 写入
if err := w.Write(data); err != nil {
log.Fatal(err)
}
// 将缓存中的数据写入底层的io.Writer。要检查Flush时是否发生错误的话,应调用Error
w.Flush()
// 抓取错误信息
if err := w.Error(); err != nil {
log.Fatal(err)
}
fmt.Println(string(buf.Bytes()))
// 初始化一个reader
r := csv.NewReader(&buf)
for {
// 从r读取一条记录,返回值record是字符串的切片,每个字符串代表一个字段
record, err := r.Read()
if err == io.EOF { // 判断是否结尾
break
}
if err != nil {
log.Fatal(err)
}
fmt.Println(record)
}
// 从r中读取所有剩余的记录,每个记录都是字段的切片
// 成功的调用返回值err为nil而不是EOF,因为ReadAll方法定义为读取直到文件结尾,因此它不会将文件结尾视为应该报告的错误
// *读取过的记录不会再次被读取
records, err := r.ReadAll()
if err != nil {
log.Fatal(err)
}
fmt.Println(records)
}
|
package models
import (
"context"
"corona/helpers"
"database/sql"
"fmt"
"github.com/lib/pq"
uuid "github.com/satori/go.uuid"
"time"
)
type (
CoronaModel struct {
Id uuid.UUID
CountryId uuid.UUID
TotalCases string
NewCases string
TotalDeaths string
NewDeaths string
TotalRecovered string
ActiveCases string
SeriousCases string
TotalTests string
Population string
CreatedBy uuid.UUID
CreatedAt time.Time
UpdatedBy uuid.NullUUID
UpdatedAt pq.NullTime
}
CoronaResponse struct {
Id uuid.UUID `json:"id"`
Country CountryResponse `json:"country"`
TotalCases string `json:"total_cases"`
NewCases string `json:"new_cases"`
TotalDeaths string `json:"total_deaths"`
NewDeaths string `json:"new_deaths"`
TotalRecovered string `json:"total_recovered"`
ActiveCases string `json:"active_cases"`
SeriousCases string `json:"serious_cases"`
TotalTests string `json:"total_tests"`
Population string `json:"population"`
CreatedBy uuid.UUID `json:"created_by"`
CreatedAt time.Time `json:"created_at"`
UpdatedBy uuid.UUID `json:"updated_by"`
UpdatedAt time.Time `json:"updated_at"`
}
)
func (s CoronaModel) Response(ctx context.Context, db *sql.DB, logger *helpers.Logger) (CoronaResponse, error) {
country, err := GetOneCountry(ctx, db, s.CountryId)
if err != nil {
logger.Err.Printf(`model.corona.go/GetOneCountry/%v`, err)
return CoronaResponse{}, err
}
countryResponse, err := country.Response(ctx, db, logger)
if err != nil {
logger.Err.Printf(`model.corona.go/country.Response/%v`, err)
return CoronaResponse{}, err
}
return CoronaResponse{
Id: s.Id,
Country: countryResponse,
TotalCases: s.TotalCases,
NewCases: s.NewCases,
TotalDeaths: s.TotalDeaths,
NewDeaths: s.NewDeaths,
TotalRecovered: s.TotalRecovered,
ActiveCases: s.ActiveCases,
SeriousCases: s.SeriousCases,
TotalTests: s.TotalTests,
Population: s.Population,
CreatedBy: s.CreatedBy,
CreatedAt: s.CreatedAt,
UpdatedBy: s.UpdatedBy.UUID,
UpdatedAt: s.UpdatedAt.Time,
}, nil
}
func GetOneCorona(ctx context.Context, db *sql.DB, id uuid.UUID) (CoronaModel, error) {
query := fmt.Sprintf(`
SELECT
id,
country_id,
total_cases,
new_cases,
total_deaths,
new_deaths,
total_recovered,
active_cases,
serious_cases,
total_tests,
population,
created_by,
created_at,
updated_by,
updated_at
FROM
corona_data
WHERE
id = $1
`)
var data CoronaModel
err := db.QueryRowContext(ctx, query, id).Scan(
&data.Id,
&data.CountryId,
&data.TotalCases,
&data.NewCases,
&data.TotalDeaths,
&data.NewDeaths,
&data.TotalRecovered,
&data.ActiveCases,
&data.SeriousCases,
&data.TotalTests,
&data.Population,
&data.CreatedBy,
&data.CreatedAt,
&data.UpdatedBy,
&data.UpdatedAt,
)
if err != nil {
return CoronaModel{}, err
}
return data, nil
}
func GetCoronaByCountry(ctx context.Context, db *sql.DB, country string) (CoronaModel, error) {
query := fmt.Sprintf(`
SELECT
cd.id,
country_id,
total_cases,
new_cases,
total_deaths,
new_deaths,
total_recovered,
active_cases,
serious_cases,
total_tests,
population,
cd.created_by,
cd.created_at,
cd.updated_by,
cd.updated_at
FROM
corona_data cd
INNER JOIN
country c
ON
cd.country_id=c.id
WHERE
LOWER(c.name) LIKE LOWER($1)
`)
var data CoronaModel
err := db.QueryRowContext(ctx, query, country).Scan(
&data.Id,
&data.CountryId,
&data.TotalCases,
&data.NewCases,
&data.TotalDeaths,
&data.NewDeaths,
&data.TotalRecovered,
&data.ActiveCases,
&data.SeriousCases,
&data.TotalTests,
&data.Population,
&data.CreatedBy,
&data.CreatedAt,
&data.UpdatedBy,
&data.UpdatedAt,
)
if err != nil {
return CoronaModel{}, err
}
return data, nil
}
func GetAllCoronaByContinent(ctx context.Context, db *sql.DB, filter helpers.Filter, id uuid.UUID) (
[]CoronaModel, error) {
query := fmt.Sprintf(`
SELECT
cd.id,
country_id,
total_cases,
new_cases,
total_deaths,
new_deaths,
total_recovered,
active_cases,
serious_cases,
total_tests,
population,
cd.created_by,
cd.created_at,
cd.updated_by,
cd.updated_at
FROM
corona_data cd
INNER JOIN
country c
ON
cd.country_id = c.id
WHERE
c.continent_id = $1
ORDER BY
c.name %s
LIMIT $2 OFFSET $3`, filter.Dir)
rows, err := db.QueryContext(ctx, query, id, filter.Limit, filter.Offset)
if err != nil {
return nil, err
}
defer rows.Close()
var datas []CoronaModel
for rows.Next() {
var data CoronaModel
rows.Scan(
&data.Id,
&data.CountryId,
&data.TotalCases,
&data.NewCases,
&data.TotalDeaths,
&data.NewDeaths,
&data.TotalRecovered,
&data.ActiveCases,
&data.SeriousCases,
&data.TotalTests,
&data.Population,
&data.CreatedBy,
&data.CreatedAt,
&data.UpdatedBy,
&data.UpdatedAt,
)
datas = append(datas, data)
}
return datas, nil
}
func GetAllCorona(ctx context.Context, db *sql.DB, filter helpers.Filter) ([]CoronaModel, error) {
var searchQuery string
if filter.Search != "" {
searchQuery = fmt.Sprintf(`WHERE LOWER(c.name) LIKE LOWER('%%%s%%')`, filter.Search)
}
query := fmt.Sprintf(`
SELECT
cd.id,
country_id,
total_cases,
new_cases,
total_deaths,
new_deaths,
total_recovered,
active_cases,
serious_cases,
total_tests,
population,
cd.created_by,
cd.created_at,
cd.updated_by,
cd.updated_at
FROM
corona_data cd
INNER JOIN
country c
ON
cd.country_id = c.id
%s
ORDER BY
c.name %s
LIMIT $1 OFFSET $2`, searchQuery, filter.Dir)
rows, err := db.QueryContext(ctx, query, filter.Limit, filter.Offset)
if err != nil {
return nil, err
}
defer rows.Close()
var datas []CoronaModel
for rows.Next() {
var data CoronaModel
rows.Scan(
&data.Id,
&data.CountryId,
&data.TotalCases,
&data.NewCases,
&data.TotalDeaths,
&data.NewDeaths,
&data.TotalRecovered,
&data.ActiveCases,
&data.SeriousCases,
&data.TotalTests,
&data.Population,
&data.CreatedBy,
&data.CreatedAt,
&data.UpdatedBy,
&data.UpdatedAt,
)
datas = append(datas, data)
}
return datas, nil
}
func (s *CoronaModel) Insert(ctx context.Context, db *sql.DB) error {
query := fmt.Sprintf(`
INSERT INTO corona_data(
country_id,
total_cases,
new_cases,
total_deaths,
new_deaths,
total_recovered,
active_cases,
serious_cases,
total_tests,
population,
created_by,
created_at
)VALUES(
$1,$2,$3,$4,$5,$6,$7,$8,$9,$10,$11,now())
RETURNING
id, created_at
`)
err := db.QueryRowContext(ctx, query,
s.CountryId, s.TotalCases, s.NewCases, s.TotalDeaths, s.NewDeaths, s.TotalRecovered, s.ActiveCases, s.SeriousCases,
s.TotalTests, s.Population, s.CreatedBy).Scan(
&s.Id, &s.CreatedAt,
)
if err != nil {
return err
}
return nil
}
func (s *CoronaModel) Update(ctx context.Context, db *sql.DB) error {
query := fmt.Sprintf(`
UPDATE corona_data
SET
total_cases=$1,
new_cases=$2,
total_deaths=$3,
new_deaths=$4,
total_recovered=$5,
active_cases=$6,
serious_cases=$7,
total_tests=$8,
population=$9,
updated_at=NOW(),
updated_by=$10
WHERE
country_id=$11
RETURNING
id,created_at,updated_at,created_by
`)
err := db.QueryRowContext(ctx, query,
s.TotalCases, s.NewCases, s.TotalDeaths, s.NewDeaths, s.TotalRecovered, s.ActiveCases, s.SeriousCases, s.TotalTests,
s.Population, s.UpdatedBy, s.CountryId).Scan(
&s.Id, &s.CreatedAt, &s.UpdatedAt, &s.CreatedBy,
)
if err != nil {
return err
}
return nil
}
|
package k8s
import (
"context"
"time"
"github.com/ansel1/merry"
"github.com/tommy351/kubenvoy/pkg/config"
corev1 "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
// Load auth plugins
_ "k8s.io/client-go/plugin/pkg/client/auth"
)
type Client interface {
WatchEndpoints(ctx context.Context, opts *WatchEndpointsOptions) cache.SharedIndexInformer
WatchService(ctx context.Context, opts *WatchServiceOptions) cache.SharedIndexInformer
}
type WatchOptions struct {
ResyncPeriod time.Duration
}
type ListEndpointsOptions struct{}
type WatchEndpointsOptions struct {
ListEndpointsOptions
WatchOptions
}
type ListServiceOptions struct{}
type WatchServiceOptions struct {
ListEndpointsOptions
WatchOptions
}
type client struct {
config *config.KubernetesConfig
client kubernetes.Interface
}
func NewClient(conf *config.KubernetesConfig) (Client, error) {
restConf, err := LoadConfig()
if err != nil {
return nil, merry.Wrap(err)
}
kubeClient, err := kubernetes.NewForConfig(restConf)
if err != nil {
return nil, merry.Wrap(err)
}
return &client{
config: conf,
client: kubeClient,
}, nil
}
func (c *client) WatchEndpoints(ctx context.Context, opts *WatchEndpointsOptions) cache.SharedIndexInformer {
return corev1.NewEndpointsInformer(c.client, c.config.Namespace, opts.ResyncPeriod, cache.Indexers{})
}
func (c *client) WatchService(ctx context.Context, opts *WatchServiceOptions) cache.SharedIndexInformer {
return corev1.NewServiceInformer(c.client, c.config.Namespace, opts.ResyncPeriod, cache.Indexers{})
}
|
package objects
import "time"
type User struct {
ID uint `json:"id"`
Username string `json:"username"`
Password string `json:"password"`
Phone string `json:"phone"`
}
type UserLoginObjectRequest struct {
Username string `json:"username"`
Password string `json:"password"`
}
type UserStoreObjectRequest struct {
ID uint `json:"id"`
Username string `json:"username"`
Password string `json:"password"`
Phone string `json:"phone"`
Email string `json:"email"`
}
type UserStoreObjectResponse struct {
ID uint `json:"id"`
Username string `json:"username"`
Password string `json:"password"`
Phone string `json:"phone"`
Email string `json:"email"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
}
type UserFindObjectResponse struct {
ID uint `json:"id"`
Username string `json:"username"`
Phone string `json:"phone"`
Email string `json:"email"`
CreatedAt *time.Time `json:"created_at"`
UpdatedAt *time.Time `json:"updated_at"`
DeletedAt *time.Time `json:"deleted_at"`
}
type UserLoginObjectResponse struct {
ID uint `json:"id"`
AccessToken string `json:"access_token"`
ExpiredIn int `json:"expired_in"`
Username string `json:"username"`
}
type UserUpdateObjectRequest struct {
Email string `json:"email"`
Phone string `json:"phone"`
}
|
package hook
import (
"ekgo/api/lib/response"
"log"
)
type Test struct{}
func (p *Test) Before() *response.Write {
log.Println("在之前执行")
return nil
}
func (p *Test) After() {
log.Println("在之后执行")
}
|
package main
import "fmt"
//TermainlStatusReceiver prints the window/door
//status changes out to the terminal.
type TermainlStatusReceiver struct{}
//NewTermainlStatusReceiver creates a new instance
//of our terminal receiver, which writes status changes
//out to the terminal.
func NewTermainlStatusReceiver() *TermainlStatusReceiver {
return new(TermainlStatusReceiver)
}
//Notify prints the status change to the terminal.
func (t *TermainlStatusReceiver) Notify(pin SubscribedPin) {
status := "open"
armed := "not armed"
if pin.Value {
status = "closed"
}
if pin.Armed {
armed = "armed"
}
fmt.Printf("The %v was %v and %v.\n", pin.Name, status, armed)
}
|
package main
func canJump(nums []int) bool {
var post = len(nums) - 1
for i := len(nums) - 2; i >= 0; i-- {
if i+nums[i] >= post {
post = i
}
}
return post <= 0
}
|
// Package controllers sets up the controllers for the fleet-controller.
package controllers
import (
"context"
"github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1"
"github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/runtime/schema"
"github.com/rancher/fleet/internal/cmd/controller/controllers/bootstrap"
"github.com/rancher/fleet/internal/cmd/controller/controllers/bundle"
"github.com/rancher/fleet/internal/cmd/controller/controllers/cleanup"
"github.com/rancher/fleet/internal/cmd/controller/controllers/cluster"
"github.com/rancher/fleet/internal/cmd/controller/controllers/clustergroup"
"github.com/rancher/fleet/internal/cmd/controller/controllers/clusterregistration"
"github.com/rancher/fleet/internal/cmd/controller/controllers/clusterregistrationtoken"
"github.com/rancher/fleet/internal/cmd/controller/controllers/config"
"github.com/rancher/fleet/internal/cmd/controller/controllers/content"
"github.com/rancher/fleet/internal/cmd/controller/controllers/display"
"github.com/rancher/fleet/internal/cmd/controller/controllers/git"
"github.com/rancher/fleet/internal/cmd/controller/controllers/image"
"github.com/rancher/fleet/internal/cmd/controller/controllers/manageagent"
fleetns "github.com/rancher/fleet/internal/cmd/controller/namespace"
"github.com/rancher/fleet/internal/cmd/controller/target"
"github.com/rancher/fleet/internal/manifest"
"github.com/rancher/fleet/pkg/durations"
"github.com/rancher/fleet/pkg/generated/controllers/fleet.cattle.io"
fleetcontrollers "github.com/rancher/fleet/pkg/generated/controllers/fleet.cattle.io/v1alpha1"
"github.com/rancher/gitjob/pkg/generated/controllers/gitjob.cattle.io"
gitcontrollers "github.com/rancher/gitjob/pkg/generated/controllers/gitjob.cattle.io/v1"
"github.com/rancher/lasso/pkg/cache"
"github.com/rancher/lasso/pkg/client"
"github.com/rancher/lasso/pkg/controller"
"github.com/rancher/wrangler/pkg/apply"
"github.com/rancher/wrangler/pkg/generated/controllers/apps"
appscontrollers "github.com/rancher/wrangler/pkg/generated/controllers/apps/v1"
"github.com/rancher/wrangler/pkg/generated/controllers/core"
corecontrollers "github.com/rancher/wrangler/pkg/generated/controllers/core/v1"
"github.com/rancher/wrangler/pkg/generated/controllers/rbac"
rbaccontrollers "github.com/rancher/wrangler/pkg/generated/controllers/rbac/v1"
"github.com/rancher/wrangler/pkg/leader"
"github.com/rancher/wrangler/pkg/ratelimit"
"github.com/rancher/wrangler/pkg/start"
"k8s.io/apimachinery/pkg/api/meta"
memory "k8s.io/client-go/discovery/cached"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/restmapper"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/workqueue"
)
type appContext struct {
fleetcontrollers.Interface
K8s kubernetes.Interface
Core corecontrollers.Interface
Apps appscontrollers.Interface
RBAC rbaccontrollers.Interface
GitJob gitcontrollers.Interface
TargetManager *target.Manager
RESTMapper meta.RESTMapper
Apply apply.Apply
ClientConfig clientcmd.ClientConfig
starters []start.Starter
}
func (a *appContext) start(ctx context.Context) error {
return start.All(ctx, 50, a.starters...)
}
func Register(ctx context.Context, systemNamespace string, cfg clientcmd.ClientConfig, disableGitops bool, disableBootstrap bool) error {
appCtx, err := newContext(cfg)
if err != nil {
return err
}
systemRegistrationNamespace := fleetns.SystemRegistrationNamespace(systemNamespace)
if err := applyBootstrapResources(systemNamespace, systemRegistrationNamespace, appCtx); err != nil {
return err
}
// config should be registered first to ensure the global
// config is available to all components
if err := config.Register(ctx,
systemNamespace,
appCtx.Core.ConfigMap()); err != nil {
return err
}
clusterregistration.Register(ctx,
appCtx.Apply.WithCacheTypes(
appCtx.RBAC.ClusterRole(),
appCtx.RBAC.ClusterRoleBinding(),
),
systemNamespace,
systemRegistrationNamespace,
appCtx.Core.ServiceAccount(),
appCtx.Core.Secret(),
appCtx.RBAC.Role(),
appCtx.RBAC.RoleBinding(),
appCtx.ClusterRegistration(),
appCtx.Cluster())
cluster.Register(ctx,
appCtx.BundleDeployment(),
appCtx.ClusterGroup().Cache(),
appCtx.Cluster(),
appCtx.GitRepo().Cache(),
appCtx.Core.Namespace(),
appCtx.ClusterRegistration())
cluster.RegisterImport(ctx,
systemNamespace,
appCtx.Core.Secret().Cache(),
appCtx.Cluster(),
appCtx.ClusterRegistrationToken(),
appCtx.Bundle(),
appCtx.Core.Namespace())
bundle.Register(ctx,
appCtx.Apply,
appCtx.RESTMapper,
appCtx.TargetManager,
appCtx.Bundle(),
appCtx.Cluster(),
appCtx.ImageScan(),
appCtx.GitRepo().Cache(),
appCtx.BundleDeployment())
clustergroup.Register(ctx,
appCtx.Cluster(),
appCtx.ClusterGroup())
content.Register(ctx,
appCtx.Content(),
appCtx.BundleDeployment(),
appCtx.Core.Namespace())
clusterregistrationtoken.Register(ctx,
systemNamespace,
systemRegistrationNamespace,
appCtx.Apply.WithCacheTypes(
appCtx.Core.Secret(),
appCtx.Core.ServiceAccount(),
appCtx.RBAC.Role(),
appCtx.RBAC.RoleBinding()),
appCtx.ClusterRegistrationToken(),
appCtx.Core.ServiceAccount(),
appCtx.Core.Secret().Cache(),
appCtx.Core.Secret())
cleanup.Register(ctx,
appCtx.Apply.WithCacheTypes(
appCtx.Core.Secret(),
appCtx.Core.ServiceAccount(),
appCtx.RBAC.Role(),
appCtx.RBAC.RoleBinding(),
appCtx.RBAC.ClusterRole(),
appCtx.RBAC.ClusterRoleBinding(),
appCtx.Bundle(),
appCtx.ClusterRegistrationToken(),
appCtx.ClusterRegistration(),
appCtx.ClusterGroup(),
appCtx.Cluster(),
appCtx.Core.Namespace()),
appCtx.Core.Secret(),
appCtx.Core.ServiceAccount(),
appCtx.BundleDeployment(),
appCtx.RBAC.Role(),
appCtx.RBAC.RoleBinding(),
appCtx.RBAC.ClusterRole(),
appCtx.RBAC.ClusterRoleBinding(),
appCtx.Core.Namespace(),
appCtx.Cluster().Cache())
manageagent.Register(ctx,
systemNamespace,
appCtx.Apply,
appCtx.Core.Namespace(),
appCtx.Cluster(),
appCtx.Bundle())
if !disableGitops {
git.Register(ctx,
appCtx.Apply.WithCacheTypes(
appCtx.RBAC.Role(),
appCtx.RBAC.RoleBinding(),
appCtx.GitJob.GitJob(),
appCtx.Core.ConfigMap(),
appCtx.Core.ServiceAccount()),
appCtx.GitJob.GitJob(),
appCtx.BundleDeployment(),
appCtx.GitRepoRestriction().Cache(),
appCtx.Bundle(),
appCtx.ImageScan(),
appCtx.GitRepo(),
appCtx.Core.Secret().Cache())
}
if !disableBootstrap {
bootstrap.Register(ctx,
systemNamespace,
appCtx.Apply.WithCacheTypes(
appCtx.GitRepo(),
appCtx.Cluster(),
appCtx.ClusterGroup(),
appCtx.Core.Namespace(),
appCtx.Core.Secret()),
appCtx.ClientConfig,
appCtx.Core.ServiceAccount().Cache(),
appCtx.Core.Secret(),
appCtx.Core.Secret().Cache())
}
display.Register(ctx,
appCtx.Cluster(),
appCtx.ClusterGroup(),
appCtx.GitRepo(),
appCtx.BundleDeployment())
image.Register(ctx,
appCtx.Core,
appCtx.GitRepo(),
appCtx.ImageScan())
leader.RunOrDie(ctx, systemNamespace, "fleet-controller-lock", appCtx.K8s, func(ctx context.Context) {
if err := appCtx.start(ctx); err != nil {
logrus.Fatal(err)
}
logrus.Info("All controllers have been started")
})
return nil
}
func controllerFactory(rest *rest.Config) (controller.SharedControllerFactory, error) {
rateLimit := workqueue.NewItemExponentialFailureRateLimiter(durations.FailureRateLimiterBase, durations.FailureRateLimiterMax)
clusterRateLimiter := workqueue.NewItemExponentialFailureRateLimiter(durations.SlowFailureRateLimiterBase, durations.SlowFailureRateLimiterMax)
clientFactory, err := client.NewSharedClientFactory(rest, nil)
if err != nil {
return nil, err
}
cacheFactory := cache.NewSharedCachedFactory(clientFactory, nil)
return controller.NewSharedControllerFactory(cacheFactory, &controller.SharedControllerFactoryOptions{
DefaultRateLimiter: rateLimit,
DefaultWorkers: 50,
SyncOnlyChangedObjects: true,
KindRateLimiter: map[schema.GroupVersionKind]workqueue.RateLimiter{
v1alpha1.SchemeGroupVersion.WithKind("Cluster"): clusterRateLimiter,
},
}), nil
}
func newContext(cfg clientcmd.ClientConfig) (*appContext, error) {
client, err := cfg.ClientConfig()
if err != nil {
return nil, err
}
client.RateLimiter = ratelimit.None
scf, err := controllerFactory(client)
if err != nil {
return nil, err
}
core, err := core.NewFactoryFromConfigWithOptions(client, &core.FactoryOptions{
SharedControllerFactory: scf,
})
if err != nil {
return nil, err
}
corev := core.Core().V1()
fleet, err := fleet.NewFactoryFromConfigWithOptions(client, &fleet.FactoryOptions{
SharedControllerFactory: scf,
})
if err != nil {
return nil, err
}
fleetv := fleet.Fleet().V1alpha1()
rbac, err := rbac.NewFactoryFromConfigWithOptions(client, &rbac.FactoryOptions{
SharedControllerFactory: scf,
})
if err != nil {
return nil, err
}
rbacv := rbac.Rbac().V1()
apps, err := apps.NewFactoryFromConfigWithOptions(client, &apps.FactoryOptions{
SharedControllerFactory: scf,
})
if err != nil {
return nil, err
}
appsv := apps.Apps().V1()
git, err := gitjob.NewFactoryFromConfigWithOptions(client, &gitjob.FactoryOptions{
SharedControllerFactory: scf,
})
if err != nil {
return nil, err
}
gitv := git.Gitjob().V1()
apply, err := apply.NewForConfig(client)
if err != nil {
return nil, err
}
apply = apply.WithSetOwnerReference(false, false)
k8s, err := kubernetes.NewForConfig(client)
if err != nil {
return nil, err
}
mem := memory.NewMemCacheClient(k8s.Discovery())
restMapper := restmapper.NewDeferredDiscoveryRESTMapper(mem)
targetManager := target.New(
fleetv.Cluster().Cache(),
fleetv.ClusterGroup().Cache(),
fleetv.Bundle().Cache(),
fleetv.BundleNamespaceMapping().Cache(),
corev.Namespace().Cache(),
manifest.NewStore(fleetv.Content()),
fleetv.BundleDeployment().Cache())
return &appContext{
RESTMapper: restMapper,
K8s: k8s,
Apps: appsv,
Interface: fleetv,
Core: corev,
RBAC: rbacv,
Apply: apply,
GitJob: gitv,
TargetManager: targetManager,
ClientConfig: cfg,
starters: []start.Starter{
core,
apps,
fleet,
rbac,
git,
},
}, nil
}
|
package main
import (
"errors"
"io/ioutil"
"os"
"path/filepath"
"github.com/juju/loggo"
"github.com/urfave/cli"
yaml "gopkg.in/yaml.v2"
)
var (
configFilename string
source string
destination string
debug bool
logger loggo.Logger
)
type conf struct {
Shared []string `yaml:"shared"`
}
func main() {
logger = loggo.GetLogger("")
app := cli.NewApp()
app.Name = "deploy"
app.Version = "0.9.2"
app.Usage = "Sets up modern PHP apps to work better when using docker"
cli.AppHelpTemplate = `NAME:
{{.Name}} - {{.Usage}}
USAGE:
{{.Name}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} source destination
COMMANDS:
{{range .Commands}}{{if not .HideHelp}} {{join .Names ", "}}{{ "\t"}}{{.Usage}}{{ "\n" }}{{end}}{{end}}{{end}}{{if .VisibleFlags}}
GLOBAL OPTIONS:
{{range .VisibleFlags}}{{.}}
{{end}}{{end}}
`
app.Flags = []cli.Flag{
cli.StringFlag{
Name: "config, c",
Usage: "Load configuration from `FILE`",
Value: ".ddply",
Destination: &configFilename,
EnvVar: "DEPLOY_CONFIG_FILE",
},
cli.BoolFlag{
Name: "debug, d",
Usage: "Increase verbosity of running messages",
Destination: &debug,
},
}
app.Action = func(c *cli.Context) (err error) {
if c.NArg() < 2 {
return cli.NewExitError("Source and/or destination not specified", 1)
}
source = c.Args().Get(0)
if !IsDir(source) {
return cli.NewExitError("Source argument does not point to valid directory", 2)
}
destination = c.Args().Get(1)
if c.IsSet("debug") {
logger.SetLogLevel(loggo.DEBUG)
}
var configPath = filepath.Join(source, configFilename)
if c.IsSet("config") {
configPath = configFilename
}
foundConfig := findConfig(configPath)
if c.IsSet("config") && !foundConfig {
return cli.NewExitError("Specified config file not found", 4)
}
yamlFile, err := ioutil.ReadFile(configPath)
if err != nil {
// no config file. assume link only
logger.Infof("No configuration file found or specified. Continuing with linked deploy")
var location = []string{""}
LinkShared(location, source, destination)
} else {
// config file. assume copy and link shared
var config conf
err := config.getConfig(yamlFile)
if err != nil {
return cli.NewExitError(err, 6)
}
// was able to find and read config file
logger.Infof("Shared locations from config: %s\n", config.Shared)
logger.Infof("Copying directories...")
CopyDir(source, destination)
LinkShared(config.Shared, source, destination)
}
return nil
}
app.Run(os.Args)
}
func findConfig(path string) (foundConfig bool) {
file, err := os.Stat(path)
if err != nil {
return false
}
return file.Mode().IsRegular()
}
func (c *conf) getConfig(yamlFile []byte) (err error) {
err = yaml.Unmarshal(yamlFile, c)
if err != nil {
return errors.New(err.Error())
}
return nil
}
|
package xin
import (
"bytes"
"strings"
)
type StringValue []byte
func (v StringValue) String() string {
return string(v)
}
func (v StringValue) Repr() string {
return "'" + strings.ReplaceAll(string(v), "'", "\\'") + "'"
}
func (v StringValue) Equal(o Value) bool {
if ov, ok := o.(StringValue); ok {
return bytes.Compare(v, ov) == 0
}
return false
}
func strGetForm(fr *Frame, args []Value, node *astNode) (Value, InterpreterError) {
if len(args) < 2 {
return nil, IncorrectNumberOfArgsError{
node: node,
required: 2,
given: len(args),
}
}
first, second := args[0], args[1]
firstStr, fok := first.(StringValue)
secondInt, sok := second.(IntValue)
if fok && sok {
if int(secondInt) >= 0 && int(secondInt) < len(firstStr) {
return firstStr[secondInt : secondInt+1], nil
}
return zeroValue, nil
}
return nil, MismatchedArgumentsError{
node: node,
args: args,
}
}
func strSetForm(fr *Frame, args []Value, node *astNode) (Value, InterpreterError) {
if len(args) < 3 {
return nil, IncorrectNumberOfArgsError{
node: node,
required: 3,
given: len(args),
}
}
first, second, third := args[0], args[1], args[2]
firstStr, fok := first.(StringValue)
secondInt, sok := second.(IntValue)
thirdStr, tok := third.(StringValue)
if fok && sok && tok {
si := int(secondInt)
if si >= 0 && si < len(firstStr) {
for i, r := range thirdStr {
if si+i < len(firstStr) {
firstStr[si+i] = r
} else {
firstStr = append(firstStr, r)
}
}
randTok := node.leaves[1].token
if randTok.kind == tkName {
err := fr.Up(randTok.value, firstStr, node.position)
if err != nil {
return nil, err
}
}
return firstStr, nil
}
return zeroValue, nil
}
return nil, MismatchedArgumentsError{
node: node,
args: args,
}
}
func strAddForm(fr *Frame, args []Value, node *astNode) (Value, InterpreterError) {
if len(args) < 2 {
return nil, IncorrectNumberOfArgsError{
node: node,
required: 2,
given: len(args),
}
}
first, second := args[0], args[1]
firstStr, fok := first.(StringValue)
secondStr, sok := second.(StringValue)
if fok && sok {
firstStr = append(firstStr, secondStr...)
randTok := node.leaves[1].token
if randTok.kind == tkName {
err := fr.Up(randTok.value, firstStr, node.position)
if err != nil {
return nil, err
}
}
return firstStr, nil
}
return nil, MismatchedArgumentsError{
node: node,
args: args,
}
}
func strSizeForm(fr *Frame, args []Value, node *astNode) (Value, InterpreterError) {
if len(args) < 1 {
return nil, IncorrectNumberOfArgsError{
node: node,
required: 1,
given: len(args),
}
}
first := args[0]
if firstString, ok := first.(StringValue); ok {
return IntValue(len(firstString)), nil
}
return nil, MismatchedArgumentsError{
node: node,
args: args,
}
}
func strSliceForm(fr *Frame, args []Value, node *astNode) (Value, InterpreterError) {
if len(args) < 3 {
return nil, IncorrectNumberOfArgsError{
node: node,
required: 3,
given: len(args),
}
}
first, second, third := args[0], args[1], args[2]
firstStr, fok := first.(StringValue)
secondInt, sok := second.(IntValue)
thirdInt, tok := third.(IntValue)
if fok && sok && tok {
max := len(firstStr)
inRange := func(iv IntValue) bool {
return int(iv) >= 0 && int(iv) <= max
}
if int(secondInt) < 0 {
secondInt = 0
}
if int(thirdInt) < 0 {
thirdInt = 0
}
if int(secondInt) > max {
secondInt = IntValue(max)
}
if int(thirdInt) > max {
thirdInt = IntValue(max)
}
if thirdInt < secondInt {
thirdInt = secondInt
}
if inRange(secondInt) && inRange(thirdInt) && secondInt <= thirdInt {
byteSlice := firstStr[secondInt:thirdInt]
destSlice := make([]byte, len(byteSlice))
copy(destSlice, byteSlice)
return StringValue(destSlice), nil
}
return zeroValue, nil
}
return nil, MismatchedArgumentsError{
node: node,
args: args,
}
}
func strEncForm(fr *Frame, args []Value, node *astNode) (Value, InterpreterError) {
if len(args) < 1 {
return nil, IncorrectNumberOfArgsError{
node: node,
required: 1,
given: len(args),
}
}
first := args[0]
if firstStr, ok := first.(StringValue); ok {
if len(firstStr) < 1 {
return zeroValue, nil
}
return IntValue(firstStr[0]), nil
}
return nil, MismatchedArgumentsError{
node: node,
args: args,
}
}
func strDecForm(fr *Frame, args []Value, node *astNode) (Value, InterpreterError) {
if len(args) < 1 {
return nil, IncorrectNumberOfArgsError{
node: node,
required: 1,
given: len(args),
}
}
first := args[0]
if firstInt, ok := first.(IntValue); ok {
if firstInt < 0 || firstInt > 255 {
return zeroValue, nil
}
return StringValue([]byte{byte(firstInt)}), nil
}
return nil, MismatchedArgumentsError{
node: node,
args: args,
}
}
|
package main
import (
"github.com/Sirupsen/logrus"
"github.com/cerana/cerana/pkg/logrusx"
"github.com/cerana/cerana/provider"
"github.com/cerana/cerana/providers/service"
flag "github.com/spf13/pflag"
)
func main() {
logrus.SetFormatter(&logrusx.JSONFormatter{})
config := service.NewConfig(nil, nil)
flag.StringP("rollback_clone_cmd", "r", "/run/current-system/sw/bin/rollback_clone", "full path to dataset clone/rollback tool")
flag.StringP("dataset_clone_dir", "d", "data/running-clones", "destination for dataset clones used by running services")
flag.Parse()
logrusx.DieOnError(config.LoadConfig(), "load config")
logrusx.DieOnError(config.SetupLogging(), "setup logging")
server, err := provider.NewServer(config.Config)
logrusx.DieOnError(err, "new server")
s := service.New(config, server.Tracker())
s.RegisterTasks(server)
if len(server.RegisteredTasks()) != 0 {
logrusx.DieOnError(server.Start(), "start server")
server.StopOnSignal()
} else {
logrus.Warn("no registered tasks, exiting")
}
}
|
// Copyright (C) 2020 Storj Labs, Inc.
// See LICENSE for copying information.
// Package ranger implements lazy io.Reader and io.Writer interfaces.
package ranger
|
package main
import "sort"
//462. 最少移动次数使数组元素相等 II
//给你一个长度为 n 的整数数组 nums ,返回使所有数组元素相等需要的最少移动数。
//
//在一步操作中,你可以使数组中的一个元素加 1 或者减 1 。
//
//
//
//示例 1:
//
//输入:nums = [1,2,3]
//输出:2
//解释:
//只需要两步操作(每步操作指南使一个元素加 1 或减 1):
//[1,2,3] => [2,2,3] => [2,2,2]
//示例 2:
//
//输入:nums = [1,10,2,9]
//输出:16
//
//提示:
//
//n == nums.length
//1 <= nums.length <= 10^5
//-10^9 <= nums[i] <= 10^9
func minMoves2(nums []int) int {
n := len(nums)
sort.Ints(nums)
mid := nums[n/2]
var result int
for _, v := range nums {
result += abs(v - mid)
}
return result
}
func abs(x int) int {
if x < 0 {
return -x
}
return x
}
|
package html5_test
import (
"strings"
. "github.com/bytesparadise/libasciidoc/testsupport"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("verse blocks", func() {
Context("as delimited blocks", func() {
It("single-line verse with author and title and empty end line", func() {
source := `[verse, john doe, verse title]
____
some *verse* content
____`
expected := `<div class="verseblock">
<pre class="content">some <strong>verse</strong> content</pre>
<div class="attribution">
— john doe<br>
<cite>verse title</cite>
</div>
</div>
`
Expect(RenderHTML(source)).To(MatchHTML(expected))
})
It("single line verse with author and title and empty lines around", func() {
source := `[verse, john doe, verse title]
____
some *verse* content
____`
expected := `<div class="verseblock">
<pre class="content">some <strong>verse</strong> content</pre>
<div class="attribution">
— john doe<br>
<cite>verse title</cite>
</div>
</div>
`
Expect(RenderHTML(source)).To(MatchHTML(expected))
})
It("single-line verse with author, id and title ", func() {
source := `[verse, john doe, verse title]
[#id-for-verse-block]
.title for verse block
____
some *verse* content
____`
expected := `<div id="id-for-verse-block" class="verseblock">
<div class="title">title for verse block</div>
<pre class="content">some <strong>verse</strong> content</pre>
<div class="attribution">
— john doe<br>
<cite>verse title</cite>
</div>
</div>
`
Expect(RenderHTML(source)).To(MatchHTML(expected))
})
It("multi-line verse with author and title and empty end line", func() {
source := `[verse, john doe, verse title]
____
- some
- verse
- content
and more!
____`
expected := `<div class="verseblock">
<pre class="content">- some
- verse
- content
and more!</pre>
<div class="attribution">
— john doe<br>
<cite>verse title</cite>
</div>
</div>
`
Expect(RenderHTML(source)).To(MatchHTML(expected))
})
It("single-line verse with author only", func() {
source := `[verse, john doe]
____
some verse content
____`
expected := `<div class="verseblock">
<pre class="content">some verse content</pre>
<div class="attribution">
— john doe
</div>
</div>
`
Expect(RenderHTML(source)).To(MatchHTML(expected))
})
It("single-line verse with title only", func() {
source := `[verse, , verse title]
____
some verse content
____`
expected := `<div class="verseblock">
<pre class="content">some verse content</pre>
<div class="attribution">
— verse title
</div>
</div>
`
Expect(RenderHTML(source)).To(MatchHTML(expected))
})
It("multi-line verse without author and title and empty end line", func() {
source := `[verse]
____
lines
and tabs
are preserved
____`
expected := `<div class="verseblock">
<pre class="content">lines
and tabs
are preserved</pre>
</div>
`
Expect(RenderHTML(source)).To(MatchHTML(expected))
})
It("empty verse without author and title", func() {
source := `[verse]
____
____`
expected := `<div class="verseblock">
<pre class="content"></pre>
</div>
`
Expect(RenderHTML(source)).To(MatchHTML(expected))
})
Context("with custom substitutions", func() {
source := `:github-url: https://github.com
[subs="$SUBS"]
[verse, john doe, verse title]
____
a link to https://example.com[] <1>
and <more text> on the +
*next* lines with a link to {github-url}[]
* not a list item
____
<1> a callout
`
It("should apply the default substitution", func() {
s := strings.ReplaceAll(source, "[subs=\"$SUBS\"]", "")
expected := `<div class="verseblock">
<pre class="content">a link to <a href="https://example.com" class="bare">https://example.com</a> <1>
and <more text> on the<br>
<strong>next</strong> lines with a link to <a href="https://github.com" class="bare">https://github.com</a>
* not a list item</pre>
<div class="attribution">
— john doe<br>
<cite>verse title</cite>
</div>
</div>
<div class="colist arabic">
<ol>
<li>
<p>a callout</p>
</li>
</ol>
</div>
`
Expect(RenderHTML(s)).To(MatchHTML(expected))
})
It("should apply the 'normal' substitution", func() {
s := strings.ReplaceAll(source, "$SUBS", "normal")
expected := `<div class="verseblock">
<pre class="content">a link to <a href="https://example.com" class="bare">https://example.com</a> <1>
and <more text> on the<br>
<strong>next</strong> lines with a link to <a href="https://github.com" class="bare">https://github.com</a>
* not a list item</pre>
<div class="attribution">
— john doe<br>
<cite>verse title</cite>
</div>
</div>
<div class="colist arabic">
<ol>
<li>
<p>a callout</p>
</li>
</ol>
</div>
`
Expect(RenderHTML(s)).To(MatchHTML(expected))
})
It("should apply the 'quotes' substitution", func() {
s := strings.ReplaceAll(source, "$SUBS", "quotes")
expected := `<div class="verseblock">
<pre class="content">a link to https://example.com[] <1>
and <more text> on the +
<strong>next</strong> lines with a link to {github-url}[]
* not a list item</pre>
<div class="attribution">
— john doe<br>
<cite>verse title</cite>
</div>
</div>
<div class="colist arabic">
<ol>
<li>
<p>a callout</p>
</li>
</ol>
</div>
`
Expect(RenderHTML(s)).To(MatchHTML(expected))
})
It("should apply the 'macros' substitution", func() {
s := strings.ReplaceAll(source, "$SUBS", "macros")
expected := `<div class="verseblock">
<pre class="content">a link to <a href="https://example.com" class="bare">https://example.com</a> <1>
and <more text> on the +
*next* lines with a link to {github-url}[]
* not a list item</pre>
<div class="attribution">
— john doe<br>
<cite>verse title</cite>
</div>
</div>
<div class="colist arabic">
<ol>
<li>
<p>a callout</p>
</li>
</ol>
</div>
`
Expect(RenderHTML(s)).To(MatchHTML(expected))
})
It("should apply the 'attributes' substitution", func() {
s := strings.ReplaceAll(source, "$SUBS", "attributes")
expected := `<div class="verseblock">
<pre class="content">a link to https://example.com[] <1>
and <more text> on the +
*next* lines with a link to https://github.com[]
* not a list item</pre>
<div class="attribution">
— john doe<br>
<cite>verse title</cite>
</div>
</div>
<div class="colist arabic">
<ol>
<li>
<p>a callout</p>
</li>
</ol>
</div>
`
Expect(RenderHTML(s)).To(MatchHTML(expected))
})
It("should apply the 'attributes,macros' substitution", func() {
s := strings.ReplaceAll(source, "$SUBS", "attributes,macros")
expected := `<div class="verseblock">
<pre class="content">a link to <a href="https://example.com" class="bare">https://example.com</a> <1>
and <more text> on the +
*next* lines with a link to <a href="https://github.com" class="bare">https://github.com</a>
* not a list item</pre>
<div class="attribution">
— john doe<br>
<cite>verse title</cite>
</div>
</div>
<div class="colist arabic">
<ol>
<li>
<p>a callout</p>
</li>
</ol>
</div>
`
Expect(RenderHTML(s)).To(MatchHTML(expected))
})
It("should apply the 'specialchars' substitution", func() {
s := strings.ReplaceAll(source, "$SUBS", "specialchars")
expected := `<div class="verseblock">
<pre class="content">a link to https://example.com[] <1>
and <more text> on the +
*next* lines with a link to {github-url}[]
* not a list item</pre>
<div class="attribution">
— john doe<br>
<cite>verse title</cite>
</div>
</div>
<div class="colist arabic">
<ol>
<li>
<p>a callout</p>
</li>
</ol>
</div>
`
Expect(RenderHTML(s)).To(MatchHTML(expected))
})
It("should apply the 'replacements' substitution", func() {
s := strings.ReplaceAll(source, "$SUBS", "replacements")
expected := `<div class="verseblock">
<pre class="content">a link to https://example.com[] <1>
and <more text> on the +
*next* lines with a link to {github-url}[]
* not a list item</pre>
<div class="attribution">
— john doe<br>
<cite>verse title</cite>
</div>
</div>
<div class="colist arabic">
<ol>
<li>
<p>a callout</p>
</li>
</ol>
</div>
`
Expect(RenderHTML(s)).To(MatchHTML(expected))
})
It("should apply the 'post_replacements' substitution", func() {
s := strings.ReplaceAll(source, "$SUBS", "post_replacements")
expected := `<div class="verseblock">
<pre class="content">a link to https://example.com[] <1>
and <more text> on the<br>
*next* lines with a link to {github-url}[]
* not a list item</pre>
<div class="attribution">
— john doe<br>
<cite>verse title</cite>
</div>
</div>
<div class="colist arabic">
<ol>
<li>
<p>a callout</p>
</li>
</ol>
</div>
`
Expect(RenderHTML(s)).To(MatchHTML(expected))
})
It("should apply the 'quotes,macros' substitution", func() {
s := strings.ReplaceAll(source, "$SUBS", "quotes,macros")
expected := `<div class="verseblock">
<pre class="content">a link to <a href="https://example.com" class="bare">https://example.com</a> <1>
and <more text> on the +
<strong>next</strong> lines with a link to {github-url}[]
* not a list item</pre>
<div class="attribution">
— john doe<br>
<cite>verse title</cite>
</div>
</div>
<div class="colist arabic">
<ol>
<li>
<p>a callout</p>
</li>
</ol>
</div>
`
Expect(RenderHTML(s)).To(MatchHTML(expected))
})
It("should apply the 'macros,quotes' substitution", func() {
s := strings.ReplaceAll(source, "$SUBS", "macros,quotes")
expected := `<div class="verseblock">
<pre class="content">a link to <a href="https://example.com" class="bare">https://example.com</a> <1>
and <more text> on the +
<strong>next</strong> lines with a link to {github-url}[]
* not a list item</pre>
<div class="attribution">
— john doe<br>
<cite>verse title</cite>
</div>
</div>
<div class="colist arabic">
<ol>
<li>
<p>a callout</p>
</li>
</ol>
</div>
`
Expect(RenderHTML(s)).To(MatchHTML(expected))
})
It("should apply the 'none' substitution", func() {
s := strings.ReplaceAll(source, "$SUBS", "none")
expected := `<div class="verseblock">
<pre class="content">a link to https://example.com[] <1>
and <more text> on the +
*next* lines with a link to {github-url}[]
* not a list item</pre>
<div class="attribution">
— john doe<br>
<cite>verse title</cite>
</div>
</div>
<div class="colist arabic">
<ol>
<li>
<p>a callout</p>
</li>
</ol>
</div>
`
Expect(RenderHTML(s)).To(MatchHTML(expected))
})
})
})
})
|
package refree
import (
"errors"
"log"
"math"
"github.com/samtholiya/virtual-ping-pong/core/game"
"github.com/samtholiya/virtual-ping-pong/core/player"
"github.com/samtholiya/virtual-ping-pong/core/result"
)
type refree struct {
playersLen int
players []player.Player
game game.Game
roundsCount int
results []result.Result
currentPlayerLen int
gameCreator game.Creator
}
func (r *refree) SetPlayerCount(count int) error {
logValue := math.Log2(float64(count))
if logValue != float64(int64(logValue)) {
return errors.New("Player count is invalid")
}
r.roundsCount = int(logValue)
r.playersLen = count
r.currentPlayerLen = count
return nil
}
func (r *refree) StartRounds(playerChannel chan player.Player, resultChannel chan []result.Result) {
r.results = make([]result.Result, 0)
r.waitForPlayers(playerChannel)
for i := 0; i < r.roundsCount; i++ {
r.executeRounds(i + 1)
}
resultChannel <- r.results
}
func (r *refree) executeRounds(level int) {
resultChannel := make(chan []result.Result, 0)
winnerPlayerChannel := make(chan player.Player, 0)
for i := 0; i < r.currentPlayerLen; i += 2 {
game := r.gameCreator()
attacker := r.players[i]
defender := r.players[i+1]
game.AddAttacker(attacker)
game.AddDefender(defender)
game.SetLevel(level)
go game.Play(resultChannel, winnerPlayerChannel)
}
winnerPlayers := make([]player.Player, 0)
for i := 0; i < r.currentPlayerLen/2; i++ {
results := <-resultChannel
r.results = append(r.results, results...)
winnerPlayers = append(winnerPlayers, <-winnerPlayerChannel)
}
r.players = winnerPlayers
r.currentPlayerLen = len(winnerPlayers)
}
func (r *refree) waitForPlayers(playerChannel chan player.Player) {
r.players = make([]player.Player, 0)
for i := 0; i < r.playersLen; i++ {
player := <-playerChannel
log.Printf("[INFO] Player :%v registered\n", player.GetName())
r.players = append(r.players, player)
}
}
func (r *refree) SetGameCreator(gameCreator game.Creator) error {
if gameCreator == nil {
return errors.New("Error can not be nil")
}
r.gameCreator = gameCreator
return nil
}
//GetResults ... Get the results
func (r *refree) GetResults() []result.Result {
return nil
}
//GetGameCreator ... Returns Game creator
func (r refree) GetGameCreator() game.Creator {
return r.gameCreator
}
|
package main
import (
"fmt"
)
// Given a string, find the length of the longest substring without repeating characters.
// Example 1:
// Input: "abcabcbb"
// Output: 3
// Explanation: The answer is "abc", with the length of 3.
// Example 2:
// Input: "bbbbb"
// Output: 1
// Explanation: The answer is "b", with the length of 1.
// Example 3:
// Input: "pwwkew"
// Output: 3
// Explanation: The answer is "wke", with the length of 3.
// Note that the answer must be a substring, "pwke" is a subsequence and not a substring.
// Optimized Solution : (konfucius):
// func lengthOfLongestSubstring(s string) int {
// smap := make(map[byte]int)
// cur := 0
// maxlen := 0
// for i:=0; i<len(s); i++ {
// if last, ok := smap[s[i]]; ok {
// if last >= cur {
// cur = last + 1 //Move to the next non repeat position
// }
// }
// if i - cur + 1 > maxlen {
// maxlen = i - cur + 1
// }
// smap[s[i]] = i
// }
// return maxlen
// }
// My shitty go solution
func lengthOfLongestSubstring(s string) int {
fmt.Println("inputted string: ")
fmt.Println(s)
longestLength := 0
if len(s) > 0 {
lastchar := string(s[0])
outputArrays := [][]string{}
longestArray := []string{}
visitedSTR := map[string]bool{
lastchar: true,
}
for pos, char := range s {
fmt.Printf("Iteration %d : character %c starts at byte position %d current long %s\n", pos, char, pos, longestArray)
fmt.Println("Output Arrays: ")
fmt.Println(outputArrays)
fmt.Println(len(outputArrays))
newchar := string(char)
if visitedSTR[newchar] {
fmt.Println("Already visited", newchar)
tmpOutput := []string{}
newSlice := append(tmpOutput, newchar)
if len(outputArrays) > 0 {
// join previous old slice to new slice up until same char
fmt.Println("Joining previous long")
mostRecentArray := outputArrays[len(outputArrays)-1]
fmt.Println(mostRecentArray)
for i := len(mostRecentArray) - 1; i >= 0; i-- {
if mostRecentArray[i] == newchar {
fmt.Println("Join complete, resulting array:")
fmt.Println(newSlice)
break
} else {
visitedSTR[mostRecentArray[i]] = true
newSlice = append([]string{mostRecentArray[i]}, newSlice...)
}
}
outputArrays = append(outputArrays, newSlice)
} else {
outputArrays = append(outputArrays, newSlice)
}
} else {
mostRecentArray := outputArrays[len(outputArrays)-1]
outputArrays = outputArrays[:len(outputArrays)-1]
mostRecentArray = append(mostRecentArray, newchar)
outputArrays = append(outputArrays, mostRecentArray)
visitedSTR[newchar] = true
}
if len(outputArrays[len(outputArrays)-1]) > len(longestArray) {
longestArray = outputArrays[len(outputArrays)-1]
fmt.Println("New Longest array generated: ")
fmt.Println(longestArray)
}
outputArrays = [][]string{outputArrays[len(outputArrays)-1]}
}
longestLength = len(longestArray)
}
fmt.Println("Returning int:")
return longestLength
}
func main() {
r := lengthOfLongestSubstring("pwwkew")
t := lengthOfLongestSubstring("aabaab!bb")
fmt.Println(r)
fmt.Println(t)
}
|
package user
import (
"fmt"
"log"
"net/http"
)
type Articles struct {
ArticleId int32 `json:"article_id"`
UserId int32 `json:"user_id"`
Contents string `json:"contents"`
Published bool `json:"published"`
}
func (m *Module) HomeUser(w http.ResponseWriter, r *http.Request) {
fmt.Println("MASUK 0")
rows, err := m.Queries.SelectHome.Query()
if err != nil {
log.Println("Failed to insert data")
return
}
fmt.Println("MASUK 1")
var rowsScanArr []Articles
//Fect data to struct
for rows.Next() {
var rowsScan = Articles{}
err := rows.Scan(&rowsScan.ArticleId, &rowsScan.UserId, &rowsScan.Contents, &rowsScan.Published)
if err != nil {
return
}
// Append for ervery next row
rowsScanArr = append(rowsScanArr, rowsScan)
//log.Println("rowsScanArr :", rowsScanArr)
//log.Println("rowsScanArr[0] :", rowsScanArr[0])
//log.Println("rowsScanArr[0].Contents :", rowsScanArr[0].Contents)
}
log.Println("berhasil insert data")
//if r.Method == "GET" {
err = m.Template.ExecuteTemplate(w, "home.html", rowsScanArr)
if err != nil {
log.Println(`error execute template login, err : `, err)
return
// } else {
}
}
|
package main
import (
"fmt"
)
func printMatrix(m [][]byte) {
for i:=0;i<len(m);i++ {
fmt.Printf("%q\n", m[i])
}
}
func fall(m [][]byte) {
n:=len(m)
for i:=n-1;i>0;i-- {
for j:=0;j<n;j++ {
if m[i-1][j] == '.' && m[i][j] == ' ' {
for k:=i;k<n && m[k][j] == ' ';k++ {
m[k-1][j] = ' '
m[k][j] = '.'
}
}
}
}
}
func main() {
var n int
fmt.Scan(&n)
m := make([][]byte, n)
for i:=0; i<n; i++ {
m[i] = make([]byte, n)
for j:=0; j<n; j++ {
fmt.Scanf("%c", &m[i][j])
}
fmt.Scanln()
}
fmt.Println("\nBefore:")
printMatrix(m)
fall(m)
fmt.Println("\nAfter:")
printMatrix(m)
} |
package controller
import (
"database/sql"
"encoding/csv"
"log"
"net/http"
"os"
"regexp"
"strconv"
"time"
"../model"
"github.com/gin-gonic/gin"
)
//TaskController -
type TaskController struct {
db *sql.DB
}
//New -
func New(db *sql.DB) *TaskController {
return &TaskController{
db: db,
}
}
//RegisterRouter -
func (t *TaskController) RegisterRouter(r gin.IRouter) {
err := model.CreateTaskTable(t.db)
if err != nil {
log.Fatal(err)
}
err = model.CreateReceiveTable(t.db)
if err != nil {
log.Fatal(err)
}
r.POST("/post", t.publish)
r.POST("/delete", t.deleteByID)
r.POST("/info/id", t.infoByID)
r.POST("/info/all", t.infoAll)
r.GET("/info/download", t.infoAllCsv)
r.POST("/info/descripty", t.updateDescription)
r.POST("/info/reveiver", t.infoReveiverBytask)
r.POST("/user/task", t.infoTaskByuser)
r.POST("/task/receive", t.receive)
r.POST("/task/cancle", t.deleteReceive)
}
func (t *TaskController) publish(c *gin.Context) {
task := model.Task{}
err := c.BindJSON(&task)
if err != nil {
c.Error(err)
c.JSON(http.StatusBadRequest, gin.H{"status": http.StatusBadRequest})
return
}
if len(task.Name) == 0 {
c.JSON(http.StatusBadRequest, gin.H{"status": http.StatusBadRequest, "msg": "task name can't be empty"})
return
}
if len(task.Description) == 0 {
c.JSON(http.StatusBadRequest, gin.H{"status": http.StatusBadRequest, "msg": "task descripty can't be empty"})
return
}
task.CreateTime = time.Now()
_, err = model.InsertTask(t.db, task.Name, task.Description, task.CreateTime, task.Poster)
if err != nil {
c.Error(err)
c.JSON(http.StatusBadGateway, gin.H{"status": http.StatusBadGateway})
return
}
c.JSON(http.StatusOK, gin.H{"status": http.StatusOK})
}
func (t *TaskController) deleteByID(c *gin.Context) {
var (
req struct {
ID int `json:"id"`
}
)
err := c.ShouldBind(&req)
if err != nil {
c.Error(err)
c.JSON(http.StatusBadRequest, gin.H{"status": http.StatusBadRequest})
return
}
err = model.DeleteByID(t.db, req.ID)
if err != nil {
c.Error(err)
c.JSON(http.StatusBadGateway, gin.H{"status": http.StatusBadGateway})
return
}
c.JSON(http.StatusOK, gin.H{"status": http.StatusOK})
}
func (t *TaskController) infoByID(c *gin.Context) {
var (
req struct {
ID int `json:"id"`
}
)
err := c.ShouldBind(&req)
if err != nil {
c.Error(err)
c.JSON(http.StatusBadRequest, gin.H{"status": http.StatusBadRequest})
return
}
task, err := model.InfoByID(t.db, req.ID)
if err != nil {
c.Error(err)
c.JSON(http.StatusBadGateway, gin.H{"status": http.StatusBadGateway})
return
}
c.JSON(http.StatusOK, gin.H{"status": http.StatusOK, "task": task})
}
func (t *TaskController) infoAll(c *gin.Context) {
ban, err := model.InfoAllTask(t.db)
if err != nil {
c.Error(err)
c.JSON(http.StatusBadGateway, gin.H{"status": http.StatusBadGateway})
return
}
c.JSON(http.StatusOK, gin.H{"status": http.StatusOK, "ban": ban})
}
func (t *TaskController) infoAllCsv(c *gin.Context) {
tasks, err := model.InfoAllTask(t.db)
if err != nil {
c.Error(err)
c.JSON(http.StatusBadGateway, gin.H{"status": http.StatusBadGateway})
return
}
file, err := os.Create("task.csv")
if err != nil {
c.Error(err)
c.JSON(http.StatusBadGateway, gin.H{"status": http.StatusBadGateway})
return
}
defer file.Close()
csv.NewReader(file)
for _, task := range tasks {
_, err := file.WriteString(strconv.Itoa(task.ID) + ",")
if err != nil {
log.Fatal(err)
}
_, err = file.WriteString(task.Name + ",")
if err != nil {
log.Fatal(err)
}
_, err = file.WriteString(task.Description + ",")
if err != nil {
log.Fatal(err)
}
_, err = file.WriteString(task.Poster + ",")
if err != nil {
log.Fatal(err)
}
_, err = file.WriteString("\n")
if err != nil {
log.Fatal(err)
}
}
}
func (t *TaskController) updateDescription(c *gin.Context) {
var (
req struct {
Description string `json:"description"`
ID int `json:"id"`
User string `json:"user"`
}
)
err := c.ShouldBind(&req)
if err != nil {
c.Error(err)
c.JSON(http.StatusBadRequest, gin.H{"status": http.StatusBadRequest})
return
}
poster, err := model.InfoPosterNameByID(t.db, req.ID)
if err != nil {
c.Error(err)
c.JSON(http.StatusBadRequest, gin.H{"status": http.StatusBadRequest})
return
}
if ok, _ := regexp.MatchString(req.User, poster); !ok {
c.Error(err)
c.JSON(http.StatusOK, gin.H{"status": http.StatusOK, "msg": "Without Permission"})
return
}
err = model.UpdateDescriptionByID(t.db, req.ID, req.Description)
if err != nil {
c.Error(err)
c.JSON(http.StatusBadRequest, gin.H{"status": http.StatusBadRequest})
return
}
c.JSON(http.StatusOK, gin.H{"status": http.StatusOK})
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package benchmark provides utilities for running Google Benchmark binaries on
// device.
package benchmark
import (
"context"
"encoding/json"
"chromiumos/tast/common/testexec"
"chromiumos/tast/errors"
)
// Context is a struct representing the device context as probed by the
// benchmark binary.
type Context struct {
Date string `json:"date"`
HostName string `json:"host_name"`
Executable string `json:"executable"`
NumCPUs int `json:"num_cpus"`
MhzPerCPU int `json:"mhz_per_cpu"`
CPUScalingEnabled bool `json:"cpu_scaling_enabled"`
Cached []struct {
Type string `json:"type"`
Level int `json:"level"`
Size int `json:"size"`
NumSharing int `json:"num_sharing"`
} `json:"caches"`
LoadAvg []float32 `json:"load_avg"`
LibraryBuildType string `json:"library_build_type"`
}
// Output is a struct representing one benchmark execution output.
type Output struct {
Name string `json:"name"`
FamilyIndex int `json:"family_index"`
PerFamilyInstanceIndex int `json:"per_family_instance_index"`
RunName string `json:"run_name"`
RunType string `json:"run_type"`
Repetitions int `json:"repetitions"`
RepetitionIndex int `json:"repetition_index"`
Threads int `json:"threads"`
Iterations int `json:"iterations"`
RealTime float64 `json:"real_time"`
CPUTime float64 `json:"cpu_time"`
TimeUnit string `json:"time_unit"`
}
// Result is a struct representing the complete result of one benchmark run.
type Result struct {
Context Context `json:"context"`
Benchmarks []Output `json:"benchmarks"`
}
// Format defines the output formatting of the benchmark run.
type Format int
const (
// Console sets the output formatting to human-readable texts. This is
// the default used by Google Benchmark.
Console Format = iota
// JSON sets the output formatting to JSON.
JSON
// CSV sets the output format to CSV.
CSV
)
// Benchmark encapsulates all the context for running a benchmark binary.
type Benchmark struct {
// executable is the path to the benchmark executable binary.
executable string
// filter is a string pattern as defined by the Google Benchmark for
// specifying the sub-benchmark(s) to run.
filter string
// outFile is an optional file path for storing another copy of the
// benchmark result in addition to the one written to stdout.
outFile string
// outResultFormat specifies the output formatting of the benchmark
// result writted to outFile.
outResultFormat Format
// extraArgs is a list of strings specifying the extra arguments that
// will be fed to the benchmark binary.
extraArgs []string
}
// options is a self-referencing closure for configuring Benchmark.
type option func(b *Benchmark)
// Filter sets the benchmark name filter pattern.
func Filter(pattern string) option {
return func(b *Benchmark) { b.filter = pattern }
}
// OutputFile sets the additional output file path.
func OutputFile(file string) option {
return func(b *Benchmark) { b.outFile = file }
}
// OutputResultFormat sets the output formatting for the additional output file.
func OutputResultFormat(format Format) option {
return func(b *Benchmark) { b.outResultFormat = format }
}
// ExtraArgs sets a list of arguments that will be passed to the benchmark
// binary.
func ExtraArgs(args ...string) option {
return func(b *Benchmark) { b.extraArgs = args }
}
// New creates a Benchmark instance with the given options.
func New(exec string, opts ...option) *Benchmark {
ret := &Benchmark{
executable: exec,
}
for _, opt := range opts {
opt(ret)
}
return ret
}
// Args returns an array of string for benchmark execution.
func (b *Benchmark) Args() []string {
args := []string{b.executable}
// Always set the output formatting to JSON for stdout, so that we can
// parse and return the JSON-based Result instance.
args = append(args, "--benchmark_format=json")
if b.filter != "" {
args = append(args, "--benchmark_filter="+b.filter)
}
if b.outFile != "" {
args = append(args, "--benchmark_out="+b.outFile)
switch b.outResultFormat {
case Console:
args = append(args, "--benchmark_out_format=console")
case JSON:
args = append(args, "--benchmark_out_format=json")
case CSV:
args = append(args, "--benchmark_out_format=csv")
}
}
if len(b.extraArgs) > 0 {
args = append(args, b.extraArgs...)
}
return args
}
// Run executes the benchmark and returns the benchmark result in a byte array.
func (b *Benchmark) Run(ctx context.Context) (*Result, error) {
args := b.Args()
cmd := testexec.CommandContext(ctx, args[0], args[1:]...)
output, err := cmd.Output(testexec.DumpLogOnError)
if err != nil {
return nil, errors.Wrapf(err, "failed to run the benchmark binary: %s", b.executable)
}
var ret Result
if err := json.Unmarshal(output, &ret); err != nil {
return nil, errors.Wrap(err, "failed to unmarshal output bytes to JSON-based Result")
}
return &ret, nil
}
|
package registry
import (
"context"
"fmt"
regv1 "github.com/tmax-cloud/registry-operator/api/v1"
cmhttp "github.com/tmax-cloud/registry-operator/internal/common/http"
"github.com/tmax-cloud/registry-operator/internal/schemes"
"github.com/tmax-cloud/registry-operator/internal/utils"
"github.com/tmax-cloud/registry-operator/pkg/image"
"github.com/tmax-cloud/registry-operator/pkg/registry/base"
extfactory "github.com/tmax-cloud/registry-operator/pkg/registry/ext/factory"
intfactory "github.com/tmax-cloud/registry-operator/pkg/registry/inter/factory"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
)
type RegistryFactory interface {
Create(registryType regv1.RegistryType) base.Registry
}
func GetFactory(registryType regv1.RegistryType, f *base.Factory) RegistryFactory {
switch registryType {
case regv1.RegistryTypeHpcdRegistry:
return intfactory.NewRegistryFactory(f.K8sClient, f.NamespacedName, f.Scheme, f.HttpClient)
case regv1.RegistryTypeDockerHub, regv1.RegistryTypeDocker, regv1.RegistryTypeHarborV2:
return extfactory.NewRegistryFactory(f.K8sClient, f.NamespacedName, f.Scheme, f.HttpClient)
}
return nil
}
// GetHTTPClient returns httpClient
func GetHTTPClient(client client.Client, image *regv1.ImageInfo) (*cmhttp.HttpClient, error) {
registry := types.NamespacedName{Namespace: image.RegistryNamespace, Name: image.RegistryName}
url, err := GetURL(client, registry, image.RegistryType)
if err != nil {
return nil, err
}
username, password := "", ""
imagePullSecret, err := GetLoginSecret(client, registry, image.RegistryType)
if err != nil {
return nil, err
}
base.Logger.Info("get", "imagePullSecret", imagePullSecret, "namespace", registry.Namespace)
if imagePullSecret != "" {
basic, err := utils.GetBasicAuth(imagePullSecret, registry.Namespace, url)
if err != nil {
return nil, err
}
username, password = utils.DecodeBasicAuth(basic)
}
var ca []byte
certificateSecret, err := GetCertSecret(client, registry, image.RegistryType)
if err != nil {
return nil, err
}
base.Logger.Info("get", "certificateSecret", certificateSecret, "namespace", registry.Namespace)
if certificateSecret != "" {
data, err := utils.GetCAData(certificateSecret, registry.Namespace)
if err != nil {
return nil, err
}
ca = data
}
// if image.RegistryType == regv1.RegistryTypeHpcdRegistry {
// secret, err := certs.GetSystemKeycloakCert(client)
// if err != nil {
// return nil, err
// }
// base.Logger.Info("get", "certificateSecret", secret.Name, "namespace", secret.Namespace)
// if secret != nil {
// data, err := utils.GetCAData(secret.Name, secret.Namespace)
// if err != nil {
// return nil, err
// }
// ca = append(ca, data...)
// }
// }
return cmhttp.NewHTTPClient(
url,
username, password,
ca,
len(ca) == 0,
), nil
}
func GetLoginSecret(client client.Client, registry types.NamespacedName, registryType regv1.RegistryType) (string, error) {
switch registryType {
case regv1.RegistryTypeHpcdRegistry:
reg := ®v1.Registry{}
if err := client.Get(context.TODO(), registry, reg); err != nil {
return "", err
}
return schemes.SubresourceName(reg, schemes.SubTypeRegistryDCJSecret), nil
case regv1.RegistryTypeDockerHub, regv1.RegistryTypeDocker, regv1.RegistryTypeHarborV2:
exreg := ®v1.ExternalRegistry{}
if err := client.Get(context.TODO(), registry, exreg); err != nil {
return "", err
}
return exreg.Status.LoginSecret, nil
}
return "", nil
}
func GetCertSecret(client client.Client, registry types.NamespacedName, registryType regv1.RegistryType) (string, error) {
switch registryType {
case regv1.RegistryTypeHpcdRegistry:
reg := ®v1.Registry{}
if err := client.Get(context.TODO(), registry, reg); err != nil {
return "", err
}
return schemes.SubresourceName(reg, schemes.SubTypeRegistryTLSSecret), nil
case regv1.RegistryTypeDockerHub, regv1.RegistryTypeDocker, regv1.RegistryTypeHarborV2:
exreg := ®v1.ExternalRegistry{}
if err := client.Get(context.TODO(), registry, exreg); err != nil {
return "", err
}
return exreg.Spec.CertificateSecret, nil
}
return "", nil
}
// GetURL returns registry url
func GetURL(client client.Client, registry types.NamespacedName, registryType regv1.RegistryType) (string, error) {
switch registryType {
case regv1.RegistryTypeHpcdRegistry:
reg := ®v1.Registry{}
if err := client.Get(context.TODO(), registry, reg); err != nil {
return "", err
}
return reg.Status.ServerURL, nil
case regv1.RegistryTypeDockerHub:
return image.DefaultServer, nil
case regv1.RegistryTypeDocker, regv1.RegistryTypeHarborV2:
exreg := ®v1.ExternalRegistry{}
if err := client.Get(context.TODO(), registry, exreg); err != nil {
return "", err
}
return exreg.Spec.RegistryURL, nil
}
return "", fmt.Errorf("%s/%s(type:%s) registry url is not found", registry.Namespace, registry.Name, registryType)
}
|
// Copyright (c) 2019, Arm Ltd
package main
import (
"github.com/golang/glog"
pluginapi "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1"
)
func check(err error) {
if err != nil {
glog.Errorf(err.Error())
}
}
func getDevices(n uint) []*pluginapi.Device {
var devs []*pluginapi.Device
for i := uint(0); i < n; i++ {
devs = append(devs, &pluginapi.Device{
ID: string(i),
Health: pluginapi.Healthy,
})
}
return devs
}
func deviceExists(devs []*pluginapi.Device, id string) bool {
for _, d := range devs {
if d.ID == id {
return true
}
}
return false
}
|
package carmaclient
import (
"encoding/json"
"errors"
"fmt"
"github.com/moonwalker/carmaclient/dto"
"net/http"
"net/url"
)
type ListsService service
func (s *ListsService) GetContact(listID int64, originalID string) (*dto.ContactDTO, error) {
response := s.client.carmaRequest(fmt.Sprintf("lists/%d/contacts/%s", listID, url.QueryEscape(originalID)), http.MethodGet, nil)
if response.err != nil {
return nil, response.err
}
if response.statusCode == http.StatusNotFound {
return nil, errors.New(http.StatusText(response.statusCode))
}
responseDTO := &dto.ContactDTO{}
err := json.Unmarshal(response.data, responseDTO)
if err != nil {
return nil, err
}
return responseDTO, nil
}
func (s *ListsService) PutContactUpdate(listID int64, originalID string, contact dto.ContactDTO) (*dto.ContactDTO, error) {
response := s.client.carmaRequest(fmt.Sprintf("lists/%d/contacts/%s/update?force=true", listID, url.QueryEscape(originalID)), http.MethodPut, contact)
if response.err != nil {
return nil, response.err
}
responseDTO := &dto.ContactDTO{}
err := json.Unmarshal(response.data, responseDTO)
if err != nil {
return nil, err
}
return responseDTO, nil
}
|
package business
import (
"context"
"errors"
"main/core/models"
"time"
"github.com/pascaldekloe/jwt"
"go.mongodb.org/mongo-driver/mongo"
"golang.org/x/crypto/bcrypt"
)
type AuthBusiness struct {
DB *mongo.Database
signer *jwt.HMAC
}
func NewAuthBusiness(DB *mongo.Database, key string) (*AuthBusiness, error) {
signer, err := jwt.NewHMAC(jwt.HS512, []byte(key))
if err != nil {
return &AuthBusiness{}, err
}
return &AuthBusiness{
DB: DB,
signer: signer,
}, nil
}
func (b *AuthBusiness) Register(user *models.UserAuth) error {
result := b.DB.Collection("auth").FindOne(context.TODO(), map[string]interface{}{
"email": user.Email,
})
if err := result.Decode(&map[string]interface{}{}); err == nil {
return errors.New(user.Email + " has already existed")
}
hash, hashError := bcrypt.GenerateFromPassword([]byte(user.Password), 7)
if hashError != nil {
return errors.New("Invalid password")
}
hashString := string(hash)
_, insertError := b.DB.Collection("auth").InsertOne(context.TODO(), map[string]interface{}{
"email": user.Email,
"hpass": hashString,
})
if insertError != nil {
return errors.New("Cannot create account [" + user.Email + "]")
}
return nil
}
func (b *AuthBusiness) Login(user *models.UserAuth) (string, error) {
result := b.DB.Collection("auth").FindOne(context.TODO(), map[string]interface{}{
"email": user.Email,
})
userInDB := map[string]interface{}{}
if err := result.Decode(userInDB); err != nil {
return "", errors.New(user.Email + " did not exist")
}
compareError := bcrypt.CompareHashAndPassword([]byte(userInDB["hpass"].(string)), []byte(user.Password))
if compareError != nil {
return "", errors.New("Incorrect password")
}
profile := b.DB.Collection("users").FindOne(context.TODO(), map[string]interface{}{
"email": user.Email,
})
profileInDB := map[string]interface{}{}
profileInDB["email"] = user.Email
profile.Decode(profileInDB)
claims := jwt.Claims{}
claims.Issued = jwt.NewNumericTime(time.Now().Round(time.Second))
claims.Expires = jwt.NewNumericTime(time.Now().Add(30 * time.Minute).Round(time.Second))
claims.Set = profileInDB
claims.Issuer = "vn.edu.itss.healthy-food-core"
token, err := b.signer.Sign(&claims)
tokenString := string(token)
if err != nil {
return "", err
}
return tokenString, nil
}
|
package disassembler
import (
"fmt"
"os"
"strings"
)
var instructions map[byte]string = map[byte]string{
// nothing
0x00: "NOP", 0x10: "NOP", 0x08: "NOP", 0x18: "NOP",
0x20: "NOP", 0x30: "NOP", 0x28: "NOP", 0x38: "NOP",
// decimal adjust halt
0x27: "DAA", 0x76: "HLT",
// databus out in
0xd3: "OUT %", 0xdb: "IN %",
// interrupts
// disable enable
0xf3: "DI", 0xfb: "EI",
// shift register A
// left right
// use bit wrapping
0x07: "RLC", 0x0f: "RRC",
// use carry
0x17: "RAL", 0x1f: "RAR",
// A = !A
0x2f: "CMA",
// carry = !carry carry = 1
0x3f: "CMC", 0x37: "STC",
// load value into register pair
0x01: "LXI B, %%", 0x11: "LXI D, %%",
0x21: "LXI H, %%", 0x31: "LXI SP, %%",
// load register pair
0x0a: "LDAX B", 0x1a: "LDAX D",
// load address store address
0x3a: "LDA $%%", 0x32: "STA $%%",
0x2a: "LHLD $%%", 0x22: "SHLD $%%",
// Increment Decrement Store value
// 8 bit
0x04: "INR B", 0x05: "DCR B", 0x06: "MVI B, %",
0x0c: "INR C", 0x0d: "DCR C", 0x0e: "MVI C, %",
0x14: "INR D", 0x15: "DCR D", 0x16: "MVI D, %",
0x1c: "INR E", 0x1d: "DCR E", 0x1e: "MVI E, %",
0x24: "INR H", 0x25: "DCR H", 0x26: "MVI H, %",
0x2c: "INR L", 0x2d: "DCR L", 0x2e: "MVI L, %",
0x34: "INR M", 0x35: "DCR M", 0x36: "MVI M, %",
0x3c: "INR A", 0x3d: "DCR A", 0x3e: "MVI A, %",
// 16 bit register pairs
0x03: "INX B", 0x0b: "DCX B", 0x02: "STAX B",
0x13: "INX D", 0x1b: "DCX D", 0x12: "STAX D",
0x23: "INX H", 0x2b: "DCX H",
0x33: "INX SP", 0x3b: "DCX SP",
// add 16 bit register pair to HL
0x09: "DAD B", 0x19: "DAD D",
0x29: "DAD H", 0x39: "DAD SP",
// Register B
0x40: "MOV B, B", 0x41: "MOV B, C", 0x42: "MOV B, D",
0x43: "MOV B, E", 0x44: "MOV B, H", 0x45: "MOV B, L",
0x46: "MOV B, M", 0x47: "MOV B, A",
// Register C
0x48: "MOV C, B", 0x49: "MOV C, C", 0x4a: "MOV C, D",
0x4b: "MOV C, E", 0x4c: "MOV C, H", 0x4d: "MOV C, L",
0x4e: "MOV C, M", 0x4f: "MOV C, A",
// Register D
0x50: "MOV D, B", 0x51: "MOV D, C", 0x52: "MOV D, D",
0x53: "MOV D, E", 0x54: "MOV D, H", 0x55: "MOV D, L",
0x56: "MOV D, M", 0x57: "MOV D, A",
// Register E
0x58: "MOV E, B", 0x59: "MOV E, C", 0x5a: "MOV E, D",
0x5b: "MOV E, E", 0x5c: "MOV E, H", 0x5d: "MOV E, L",
0x5e: "MOV E, M", 0x5f: "MOV E, A",
// Register H
0x60: "MOV H, B", 0x61: "MOV H, C", 0x62: "MOV H, D",
0x63: "MOV H, E", 0x64: "MOV H, H", 0x65: "MOV H, L",
0x66: "MOV H, M", 0x67: "MOV H, A",
// Register L
0x68: "MOV L, B", 0x69: "MOV L, C", 0x6a: "MOV L, D",
0x6b: "MOV L, E", 0x6c: "MOV L, H", 0x6d: "MOV L, L",
0x6e: "MOV L, M", 0x6f: "MOV L, A",
// Register M
0x70: "MOV M, B", 0x71: "MOV M, C", 0x72: "MOV M, D",
0x73: "MOV M, E", 0x74: "MOV M, H", 0x75: "MOV M, L",
0x77: "MOV M, A",
// Register A
0x78: "MOV A, B", 0x79: "MOV A, C", 0x7a: "MOV A, D",
0x7b: "MOV A, E", 0x7c: "MOV A, H", 0x7d: "MOV A, L",
0x7e: "MOV A, M", 0x7f: "MOV A, A",
// register A add
0x80: "ADD B", 0x81: "ADD C", 0x82: "ADD D", 0x83: "ADD E",
0x84: "ADD H", 0x85: "ADD L", 0x86: "ADD M", 0x87: "ADD A",
0xc6: "ADI %", // immediate
// register A add with carry
0x88: "ADC B", 0x89: "ADC C", 0x8a: "ADC D", 0x8b: "ADC E",
0x8c: "ADC H", 0x8d: "ADC L", 0x8e: "ADC M", 0x8f: "ADC A",
0xce: "ACI %", // immediate
// register A subtract
0x90: "SUB B", 0x91: "SUB C", 0x92: "SUB D", 0x93: "SUB E",
0x94: "SUB H", 0x95: "SUB L", 0x96: "SUB M", 0x97: "SUB A",
0xd6: "SUI", // immediate
// register A subtract with carry
0x98: "SBB B", 0x99: "SBB C", 0x9a: "SBB D", 0x9b: "SBB E",
0x9c: "SBB H", 0x9d: "SBB L", 0x9e: "SBB M", 0x9f: "SBB A",
0xde: "SBI %", // immediate
// register A AND
0xa0: "ANA B", 0xa1: "ANA C", 0xa2: "ANA D", 0xa3: "ANA E",
0xa4: "ANA H", 0xa5: "ANA L", 0xa6: "ANA M", 0xa7: "ANA A",
0xe6: "ANI %", // immediate
// register A XOR
0xa8: "XRA B", 0xa9: "XRA C", 0xaa: "XRA D", 0xab: "XRA E",
0xac: "XRA H", 0xad: "XRA L", 0xae: "XRA M", 0xaf: "XRA A",
0xee: "XRI %", // immediate
// register A OR
0xb0: "ORA B", 0xb1: "ORA C", 0xb2: "ORA D", 0xb3: "ORA E",
0xb4: "ORA H", 0xb5: "ORA L", 0xb6: "ORA M", 0xb7: "ORA A",
0xf6: "ORI %", // immediate
// register A comparison
0xb8: "CMP B", 0xb9: "CMP C", 0xba: "CMP D", 0xbb: "CMP E",
0xbc: "CMP H", 0xbd: "CMP L", 0xbe: "CMP M", 0xbf: "CMP A",
0xfe: "CPI %", // immediate
// POP register pair
0xc1: "POP B", 0xd1: "POP D", 0xe1: "POP H", 0xf1: "POP PSW",
// PUSH register pair
0xc5: "PUSH B", 0xd5: "PUSH D", 0xe5: "PUSH H", 0xf5: "PUSH PSW",
// HL <-> STACK[SP] SP <- HL H <-> D, L <-> E
0xe3: "XTHL", 0xf9: "SPHL", 0xeb: "XCHG",
// Restart after interrupt
0xc7: "RST 0", 0xcf: "RST 1", 0xd7: "RST 2", 0xdf: "RST 3",
0xe7: "RST 4", 0xef: "RST 5", 0xf7: "RST 6", 0xff: "RST 7",
// JMP HL
0xe9: "PCHL",
// return jump call
// always
0xc9: "RET", 0xc3: "JMP $%%", 0xcd: "CALL $%%",
0xd9: "RET", 0xcb: "JMP $%%", 0xdd: "CALL $%%",
0xed: "CALL $%%",
0xfd: "CALL $%%",
//if !0
0xc0: "RNZ", 0xc2: "JNZ $%%", 0xc4: "CNZ $%%",
// if 0
0xc8: "RZ", 0xca: "JZ $%%", 0xcc: "CZ $%%",
// if !carry
0xd0: "RNC", 0xd2: "JNC $%%", 0xd4: "CNC $%%",
// if carry
0xd8: "RC", 0xda: "JC $%%", 0xdc: "CC $%%",
// if parity odd
0xe0: "RPO", 0xe2: "JPO $%%", 0xe4: "CPO $%%",
// if parity even
0xe8: "RPE", 0xea: "JPE $%%", 0xec: "CPE $%%",
// if > 0
0xf0: "RP", 0xf2: "JP $%%", 0xf4: "CP $%%",
// parity even
0xf8: "RM", 0xfa: "JN $%%", 0xfc: "CM $%%",
}
func bytes_of(path string) ([]byte, int64, error) {
var stat os.FileInfo
var err error
stat, err = os.Stat(path)
if err != nil {
return make([]byte, 0), 0, err
}
var size int64
size = stat.Size()
var bytes []byte = make([]byte, size)
var file *os.File
file, err = os.Open(path)
if err != nil {
return bytes, size, err
}
defer file.Close()
file.Read(bytes)
return bytes, size, nil
}
func disassemble_bytes(bytes []byte, size int64) ([]string, error) {
var index int64 = 0
var argc int64
var instruction string
var args []byte
for index < size {
instruction = instructions[bytes[index]]
argc = int64(strings.Count(instruction, "%"))
args = bytes[index+1 : index+1+argc]
instruction = strings.ReplaceAll(instruction, "%", "")
index += argc + 1
for argc > 0 {
argc--
instruction += fmt.Sprintf("%02X", args[argc])
}
fmt.Println(instruction)
}
return []string{}, nil
}
func push(array *[]string, item string) {
var new []string = append(*array, item)
array = &new
}
func T() {
var bytes []byte
var size int64
bytes, size, _ = bytes_of("./source/invaders.h")
disassemble_bytes(bytes, size)
}
|
package batch
import (
"github.com/utahta/momoclo-channel/api"
"github.com/utahta/momoclo-channel/config"
)
func init() {
config.MustLoad("config/deploy.toml")
s := api.NewBatchServer()
s.Handle()
}
|
package main
import (
"net/http"
"os"
"time"
"github.com/iatistas/dolista-safado/service"
"github.com/sirupsen/logrus"
)
const noToken = "no-token"
func main() {
telegramToken := os.Getenv("TELEGRAM_TOKEN")
if telegramToken == "" || telegramToken == noToken {
logrus.Error("telegram token not provided")
return
}
router := http.NewServeMux()
router.HandleFunc("/message", service.GetMessageHandler(telegramToken))
server := &http.Server{
Addr: ":80",
ReadTimeout: 10 * time.Second,
WriteTimeout: 60 * time.Second,
}
logrus.Info("starting server on port 80")
logrus.Error(server.ListenAndServe())
}
|
// Copyright 2019 - 2022 The Samply Community
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fhir
import "encoding/json"
// THIS FILE IS GENERATED BY https://github.com/samply/golang-fhir-models
// PLEASE DO NOT EDIT BY HAND
// EffectEvidenceSynthesis is documented here http://hl7.org/fhir/StructureDefinition/EffectEvidenceSynthesis
type EffectEvidenceSynthesis struct {
Id *string `bson:"id,omitempty" json:"id,omitempty"`
Meta *Meta `bson:"meta,omitempty" json:"meta,omitempty"`
ImplicitRules *string `bson:"implicitRules,omitempty" json:"implicitRules,omitempty"`
Language *string `bson:"language,omitempty" json:"language,omitempty"`
Text *Narrative `bson:"text,omitempty" json:"text,omitempty"`
Extension []Extension `bson:"extension,omitempty" json:"extension,omitempty"`
ModifierExtension []Extension `bson:"modifierExtension,omitempty" json:"modifierExtension,omitempty"`
Url *string `bson:"url,omitempty" json:"url,omitempty"`
Identifier []Identifier `bson:"identifier,omitempty" json:"identifier,omitempty"`
Version *string `bson:"version,omitempty" json:"version,omitempty"`
Name *string `bson:"name,omitempty" json:"name,omitempty"`
Title *string `bson:"title,omitempty" json:"title,omitempty"`
Status PublicationStatus `bson:"status" json:"status"`
Date *string `bson:"date,omitempty" json:"date,omitempty"`
Publisher *string `bson:"publisher,omitempty" json:"publisher,omitempty"`
Contact []ContactDetail `bson:"contact,omitempty" json:"contact,omitempty"`
Description *string `bson:"description,omitempty" json:"description,omitempty"`
Note []Annotation `bson:"note,omitempty" json:"note,omitempty"`
UseContext []UsageContext `bson:"useContext,omitempty" json:"useContext,omitempty"`
Jurisdiction []CodeableConcept `bson:"jurisdiction,omitempty" json:"jurisdiction,omitempty"`
Copyright *string `bson:"copyright,omitempty" json:"copyright,omitempty"`
ApprovalDate *string `bson:"approvalDate,omitempty" json:"approvalDate,omitempty"`
LastReviewDate *string `bson:"lastReviewDate,omitempty" json:"lastReviewDate,omitempty"`
EffectivePeriod *Period `bson:"effectivePeriod,omitempty" json:"effectivePeriod,omitempty"`
Topic []CodeableConcept `bson:"topic,omitempty" json:"topic,omitempty"`
Author []ContactDetail `bson:"author,omitempty" json:"author,omitempty"`
Editor []ContactDetail `bson:"editor,omitempty" json:"editor,omitempty"`
Reviewer []ContactDetail `bson:"reviewer,omitempty" json:"reviewer,omitempty"`
Endorser []ContactDetail `bson:"endorser,omitempty" json:"endorser,omitempty"`
RelatedArtifact []RelatedArtifact `bson:"relatedArtifact,omitempty" json:"relatedArtifact,omitempty"`
SynthesisType *CodeableConcept `bson:"synthesisType,omitempty" json:"synthesisType,omitempty"`
StudyType *CodeableConcept `bson:"studyType,omitempty" json:"studyType,omitempty"`
Population Reference `bson:"population" json:"population"`
Exposure Reference `bson:"exposure" json:"exposure"`
ExposureAlternative Reference `bson:"exposureAlternative" json:"exposureAlternative"`
Outcome Reference `bson:"outcome" json:"outcome"`
SampleSize *EffectEvidenceSynthesisSampleSize `bson:"sampleSize,omitempty" json:"sampleSize,omitempty"`
ResultsByExposure []EffectEvidenceSynthesisResultsByExposure `bson:"resultsByExposure,omitempty" json:"resultsByExposure,omitempty"`
EffectEstimate []EffectEvidenceSynthesisEffectEstimate `bson:"effectEstimate,omitempty" json:"effectEstimate,omitempty"`
Certainty []EffectEvidenceSynthesisCertainty `bson:"certainty,omitempty" json:"certainty,omitempty"`
}
type EffectEvidenceSynthesisSampleSize struct {
Id *string `bson:"id,omitempty" json:"id,omitempty"`
Extension []Extension `bson:"extension,omitempty" json:"extension,omitempty"`
ModifierExtension []Extension `bson:"modifierExtension,omitempty" json:"modifierExtension,omitempty"`
Description *string `bson:"description,omitempty" json:"description,omitempty"`
NumberOfStudies *int `bson:"numberOfStudies,omitempty" json:"numberOfStudies,omitempty"`
NumberOfParticipants *int `bson:"numberOfParticipants,omitempty" json:"numberOfParticipants,omitempty"`
}
type EffectEvidenceSynthesisResultsByExposure struct {
Id *string `bson:"id,omitempty" json:"id,omitempty"`
Extension []Extension `bson:"extension,omitempty" json:"extension,omitempty"`
ModifierExtension []Extension `bson:"modifierExtension,omitempty" json:"modifierExtension,omitempty"`
Description *string `bson:"description,omitempty" json:"description,omitempty"`
ExposureState *ExposureState `bson:"exposureState,omitempty" json:"exposureState,omitempty"`
VariantState *CodeableConcept `bson:"variantState,omitempty" json:"variantState,omitempty"`
RiskEvidenceSynthesis Reference `bson:"riskEvidenceSynthesis" json:"riskEvidenceSynthesis"`
}
type EffectEvidenceSynthesisEffectEstimate struct {
Id *string `bson:"id,omitempty" json:"id,omitempty"`
Extension []Extension `bson:"extension,omitempty" json:"extension,omitempty"`
ModifierExtension []Extension `bson:"modifierExtension,omitempty" json:"modifierExtension,omitempty"`
Description *string `bson:"description,omitempty" json:"description,omitempty"`
Type *CodeableConcept `bson:"type,omitempty" json:"type,omitempty"`
VariantState *CodeableConcept `bson:"variantState,omitempty" json:"variantState,omitempty"`
Value *json.Number `bson:"value,omitempty" json:"value,omitempty"`
UnitOfMeasure *CodeableConcept `bson:"unitOfMeasure,omitempty" json:"unitOfMeasure,omitempty"`
PrecisionEstimate []EffectEvidenceSynthesisEffectEstimatePrecisionEstimate `bson:"precisionEstimate,omitempty" json:"precisionEstimate,omitempty"`
}
type EffectEvidenceSynthesisEffectEstimatePrecisionEstimate struct {
Id *string `bson:"id,omitempty" json:"id,omitempty"`
Extension []Extension `bson:"extension,omitempty" json:"extension,omitempty"`
ModifierExtension []Extension `bson:"modifierExtension,omitempty" json:"modifierExtension,omitempty"`
Type *CodeableConcept `bson:"type,omitempty" json:"type,omitempty"`
Level *json.Number `bson:"level,omitempty" json:"level,omitempty"`
From *json.Number `bson:"from,omitempty" json:"from,omitempty"`
To *json.Number `bson:"to,omitempty" json:"to,omitempty"`
}
type EffectEvidenceSynthesisCertainty struct {
Id *string `bson:"id,omitempty" json:"id,omitempty"`
Extension []Extension `bson:"extension,omitempty" json:"extension,omitempty"`
ModifierExtension []Extension `bson:"modifierExtension,omitempty" json:"modifierExtension,omitempty"`
Rating []CodeableConcept `bson:"rating,omitempty" json:"rating,omitempty"`
Note []Annotation `bson:"note,omitempty" json:"note,omitempty"`
CertaintySubcomponent []EffectEvidenceSynthesisCertaintyCertaintySubcomponent `bson:"certaintySubcomponent,omitempty" json:"certaintySubcomponent,omitempty"`
}
type EffectEvidenceSynthesisCertaintyCertaintySubcomponent struct {
Id *string `bson:"id,omitempty" json:"id,omitempty"`
Extension []Extension `bson:"extension,omitempty" json:"extension,omitempty"`
ModifierExtension []Extension `bson:"modifierExtension,omitempty" json:"modifierExtension,omitempty"`
Type *CodeableConcept `bson:"type,omitempty" json:"type,omitempty"`
Rating []CodeableConcept `bson:"rating,omitempty" json:"rating,omitempty"`
Note []Annotation `bson:"note,omitempty" json:"note,omitempty"`
}
type OtherEffectEvidenceSynthesis EffectEvidenceSynthesis
// MarshalJSON marshals the given EffectEvidenceSynthesis as JSON into a byte slice
func (r EffectEvidenceSynthesis) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
OtherEffectEvidenceSynthesis
ResourceType string `json:"resourceType"`
}{
OtherEffectEvidenceSynthesis: OtherEffectEvidenceSynthesis(r),
ResourceType: "EffectEvidenceSynthesis",
})
}
// UnmarshalEffectEvidenceSynthesis unmarshals a EffectEvidenceSynthesis.
func UnmarshalEffectEvidenceSynthesis(b []byte) (EffectEvidenceSynthesis, error) {
var effectEvidenceSynthesis EffectEvidenceSynthesis
if err := json.Unmarshal(b, &effectEvidenceSynthesis); err != nil {
return effectEvidenceSynthesis, err
}
return effectEvidenceSynthesis, nil
}
|
package typgo_test
import (
"os"
"testing"
"github.com/stretchr/testify/require"
"github.com/typical-go/typical-go/pkg/typgo"
)
func TestDotEnv(t *testing.T) {
os.WriteFile(".env", []byte("key1=value1\nkey2=value2\n"), 0777)
defer os.Remove(".env")
m, err := typgo.DotEnv(".env").EnvLoad()
require.NoError(t, err)
require.Equal(t, map[string]string{
"key1": "value1",
"key2": "value2",
}, m)
}
func TestEnvironment(t *testing.T) {
m, err := typgo.Environment{
"key1": "value1",
"key2": "value2",
}.EnvLoad()
require.NoError(t, err)
require.Equal(t, map[string]string{
"key1": "value1",
"key2": "value2",
}, m)
}
|
/*
Adapted from
https://github.com/kubernetes/kubectl/tree/master/pkg/cmd/apply
*/
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cli
import (
"context"
"time"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/kubectl/pkg/cmd/apply"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"github.com/tilt-dev/tilt/internal/analytics"
engineanalytics "github.com/tilt-dev/tilt/internal/engine/analytics"
"github.com/tilt-dev/tilt/pkg/model"
)
type applyCmd struct {
streams genericclioptions.IOStreams
flags *apply.ApplyFlags
cmd *cobra.Command
}
var _ tiltCmd = &applyCmd{}
func newApplyCmd(streams genericclioptions.IOStreams) *applyCmd {
return &applyCmd{
streams: streams,
}
}
func (c *applyCmd) name() model.TiltSubcommand { return "apply" }
func (c *applyCmd) register() *cobra.Command {
flags := apply.NewApplyFlags(c.streams)
cmd := &cobra.Command{
Use: "apply (-f FILENAME | -k DIRECTORY)",
DisableFlagsInUseLine: true,
Short: "Apply a configuration to a resource by filename or stdin",
}
flags.AddFlags(cmd)
addConnectServerFlags(cmd)
c.cmd = cmd
c.flags = flags
return cmd
}
func (c *applyCmd) run(ctx context.Context, args []string) error {
a := analytics.Get(ctx)
cmdTags := engineanalytics.CmdTags(map[string]string{})
a.Incr("cmd.apply", cmdTags.AsMap())
defer a.Flush(time.Second)
getter, err := wireClientGetter(ctx)
if err != nil {
return err
}
f := cmdutil.NewFactory(getter)
cmd := c.cmd
o, err := c.flags.ToOptions(f, cmd, "tilt", args)
if err != nil {
return err
}
cmdutil.CheckErr(err)
cmdutil.CheckErr(o.Validate())
cmdutil.CheckErr(o.Run())
return nil
}
|
/*
Copyright 2011 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mysqlindexer
import ()
const requiredSchemaVersion = 12
func SchemaVersion() int {
return requiredSchemaVersion
}
func SQLCreateTables() []string {
return []string{
`CREATE TABLE blobs (
blobref VARCHAR(128) NOT NULL PRIMARY KEY,
size INTEGER NOT NULL,
type VARCHAR(100))`,
`CREATE TABLE claims (
blobref VARCHAR(128) NOT NULL PRIMARY KEY,
signer VARCHAR(128) NOT NULL,
verifiedkeyid VARCHAR(128) NULL,
date VARCHAR(40) NOT NULL,
INDEX (signer, date),
INDEX (verifiedkeyid, date),
unverified CHAR(1) NULL,
claim VARCHAR(50) NOT NULL,
permanode VARCHAR(128) NOT NULL,
INDEX (permanode, signer, date),
attr VARCHAR(128) NULL,
value VARCHAR(128) NULL)`,
`CREATE TABLE permanodes (
blobref VARCHAR(128) NOT NULL PRIMARY KEY,
unverified CHAR(1) NULL,
signer VARCHAR(128) NOT NULL DEFAULT '',
lastmod VARCHAR(40) NOT NULL DEFAULT '',
INDEX (signer, lastmod))`,
`CREATE TABLE files (
fileschemaref VARCHAR(128) NOT NULL,
bytesref VARCHAR(128) NOT NULL,
size BIGINT,
filename VARCHAR(255),
mime VARCHAR(255),
setattrs VARCHAR(255),
PRIMARY KEY(fileschemaref, bytesref),
INDEX (bytesref))`,
// For index.PermanodeOfSignerAttrValue:
// Rows are one per camliType "claim", for claimType "set-attribute" or "add-attribute",
// for attribute values that are known (needed to be indexed, e.g. "camliNamedRoot")
//
// keyid is verified GPG KeyId (e.g. "2931A67C26F5ABDA")
// attr is e.g. "camliNamedRoot"
// value is the claim's "value" field
// claimdate is the "claimDate" field.
// blobref is the blobref of the claim.
// permanode is the claim's "permaNode" field.
`CREATE TABLE signerattrvalue (
keyid VARCHAR(128) NOT NULL,
attr VARCHAR(128) NOT NULL,
value VARCHAR(255) NOT NULL,
claimdate VARCHAR(40) NOT NULL,
INDEX (keyid, attr, value, claimdate),
blobref VARCHAR(128) NOT NULL,
PRIMARY KEY (blobref),
permanode VARCHAR(128) NOT NULL,
INDEX (permanode))`,
`CREATE TABLE meta (
metakey VARCHAR(255) NOT NULL PRIMARY KEY,
value VARCHAR(255) NOT NULL)`,
// Map from blobref (of ASCII armored public key) to keyid
`CREATE TABLE signerkeyid (
blobref VARCHAR(128) NOT NULL,
PRIMARY KEY (blobref),
keyid VARCHAR(128) NOT NULL,
INDEX (keyid)
)`,
}
}
|
type Controller struct {
workqueue workqueue.RateLimitingInterface
}
func Start() {
stopCh := signals.SetupSignalHandler()
cfg, err := clientcmd.BuildConfigFromFlags(masterURL, kubeconfig)
if err != nil {
klog.Fatalf("Error building kubeconfig: %s", err.Error())
}
controller := &Controller{
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "Foos"),
}
controller.Run(2, stopCh)
}
func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error {
defer runtime.HandleCrash()
defer c.workqueue.ShutDown()
klog.Info("Starting Foo controller")
klog.Info("Waiting for informer caches to sync")
if ok := cache.WaitForCacheSync(stopCh, c.deploymentsSynced, c.foosSynced); !ok {
return fmt.Errorf("failed to wait for caches to sync")
}
klog.Info("Starting workers")
for i := 0; i < threadiness; i++ {
go wait.Until(c.runWorker, time.Second, stopCh)
}
klog.Info("Started workers")
<-stopCh
klog.Info("Shutting down workers")
return nil
}
func (c *Controller) runWorker() {
for c.processNextWorkItem() {
}
}
func (c *Controller) processNextWorkItem() bool {
obj, shutdown := c.workqueue.Get()
if shutdown {
return false
}
err := func(obj interface{}) error {
defer c.workqueue.Done(obj)
var key string
var ok bool
if key, ok = obj.(string); !ok {
c.workqueue.Forget(obj)
runtime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj))
return nil
}
if err := c.syncHandler(key); err != nil {
c.workqueue.AddRateLimited(key)
return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error())
}
c.workqueue.Forget(obj)
klog.Infof("Successfully synced '%s'", key)
return nil
}(obj)
if err != nil {
runtime.HandleError(err)
return true
}
return true
}
|
package sshutil
import (
"testing"
)
func TestCleanName(t *testing.T) {
if cleanName(" ") != "\\ " {
t.Errorf("Expected \" to be escaped")
}
}
|
package metadata
// MetaData :
type MetaData struct {
ClientName string
ClientIP string
} |
package main
import (
"fmt"
"log"
"math"
"regexp"
"strings"
"github.com/henkman/liberprimus"
"github.com/henkman/liberprimus/gematriaprimus"
)
func xor(dst, src []uint) {
for i, _ := range dst {
dst[i] ^= src[i%len(src)]
}
}
func frombase(s string, base uint, digits string) (uint, error) {
var e, n uint
for i := len(s) - 1; i >= 0; i-- {
v := strings.Index(digits[:base], string(s[i]))
if v == -1 {
return 0, fmt.Errorf("unknown digit %c at position %d", s[i], i)
}
n = n + uint(v)*uint(math.Pow(float64(base), float64(e)))
e++
}
return n, nil
}
func main() {
if false {
pairs := make([]string, 0, 100)
rePair := regexp.MustCompile("[a-zA-Z0-9]{2}")
for _, page := range liberprimus.Pages[49:51] {
pairs = append(pairs, rePair.FindAllString(page, -1)...)
}
d := map[byte]uint{}
for _, pair := range pairs {
if _, ok := d[pair[0]]; ok {
d[pair[0]]++
} else {
d[pair[0]] = 1
}
if _, ok := d[pair[1]]; ok {
d[pair[1]]++
} else {
d[pair[1]] = 1
}
}
for k, v := range d {
fmt.Printf("%c:%d\n", k, v)
}
return
}
if true {
pairs := make([]string, 0, 100)
rePair := regexp.MustCompile("[a-zA-Z0-9]{2}")
for _, page := range liberprimus.Pages[49:51] {
pairs = append(pairs, rePair.FindAllString(page, -1)...)
}
// const digits = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
const digits = "0123456789abcdeghijklmnopqrstuvwxABCDEFGHIJKLMNOPQRSTUVWXYZ"
for _, pair := range pairs {
x, err := frombase(pair, uint(len(digits)), digits)
if err != nil {
log.Fatal(err)
}
fmt.Println(x)
}
return
}
if false {
rect := [][]uint{
{3258, 3222, 3152, 3038},
{3278, 3299, 3298, 2838},
{3288, 3294, 3296, 2472},
{4516, 1206, 708, 1820},
}
for nl, line := range rect {
for nc, col := range line {
if nl == 3 && nc <= 1 {
fmt.Printf("%d ", 3301+col)
} else {
fmt.Printf("%d ", 3301-col)
}
}
fmt.Println()
}
fmt.Println(4516 - 1206)
return
}
if false {
values := make([][]uint, len(liberprimus.Pages))
for i, page := range liberprimus.Pages {
values[i] = make([]uint, 0, 100)
for _, r := range page {
if gematriaprimus.IsRune(r) {
values[i] = append(values[i], gematriaprimus.RuneToValue(r))
}
}
}
for i, pvs := range values {
fmt.Println("---", i, "---")
for _, pv := range pvs {
fmt.Printf("%d ", pv)
}
fmt.Println()
}
}
}
|
// Copyright 2018 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
package cmd
import (
"fmt"
"log"
"os"
"syscall"
"github.com/spf13/cobra"
"golang.org/x/crypto/ssh/terminal"
"github.com/highfidelity/bens/cnf"
"github.com/highfidelity/bens/env"
"github.com/highfidelity/bens/key"
)
func readPassFromTerm() ([]byte, error) {
fmt.Printf("password: ")
pass, err := terminal.ReadPassword(int(syscall.Stdin))
if err != nil {
return nil, err
}
fmt.Println("")
return pass, nil
}
var serializerType string
var shouldAskPass bool
func init() {
rootCmd.AddCommand(environmentCmd)
environmentCmd.PersistentFlags().StringVarP(
&serializerType,
"formatter", "", "shell", "choices are: shell, powershell and cmd")
environmentCmd.PersistentFlags().BoolVarP(
&shouldAskPass, "ask-pass", "", false, "ask for pass")
}
var environmentCmd = &cobra.Command{
Use: "environment",
Short: "Decrypt and display the environment",
Run: func(cmd *cobra.Command, args []string) {
serializer, err := env.GetSerializer(serializerType)
if err != nil {
log.Fatalf("couldn't load serializer: %v", err)
}
var cipher key.Key
if shouldAskPass {
pass, err := readPassFromTerm()
if err != nil {
log.Fatalf("couldn't read pass from terminal: %v", err)
}
cipher, err = key.NewWithPass(pass, priKeyPath, pubKeyPath)
} else {
pass := os.Getenv("BENS_PASS")
if pass != "" {
cipher, err = key.NewWithPass([]byte(pass), priKeyPath, pubKeyPath)
} else {
cipher, err = key.New(passPath, priKeyPath, pubKeyPath)
}
}
if err != nil {
log.Fatalf("couldn't read key: %v", err)
return
}
c, err := cnf.New(yamlPath, &cipher)
environment, err := c.DecryptEnvironment()
if err != nil {
log.Fatalf("couldn't decrypt environment: %v", err)
}
for _, envVar := range environment {
fmt.Println(serializer.ToString(envVar.Name, envVar.Value))
}
},
}
|
package utils
import "fmt"
// ConvertEnvVars convert ENV vars from a map to a list of strings in the format ["key1=val1", "key2=val2", "key3=val3" ...]
func ConvertEnvVars(envVarsMap map[any]any) []string {
res := []string{}
for k, v := range envVarsMap {
res = append(res, fmt.Sprintf("%s=%s", k, v))
}
return res
}
|
// date: 2019-03-15
package itf
import "github.com/Jarvens/Exchange-Agent/cache"
type Cache interface {
//查询缓存值 key
Get(k string) ([]byte, error)
//设置缓存 key-value
Set(k string, v []byte) error
//删除缓存 key
Del(k string) error
GetStat() cache.Stat
}
|
package gorm2
import (
"context"
"errors"
"testing"
"time"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
"github.com/traPtitech/trap-collection-server/src/domain"
"github.com/traPtitech/trap-collection-server/src/domain/values"
"github.com/traPtitech/trap-collection-server/src/repository"
"github.com/traPtitech/trap-collection-server/src/repository/gorm2/migrate"
"gorm.io/gorm"
)
func TestGetGameFilesWithoutTypesV2(t *testing.T) {
t.Parallel()
ctx := context.Background()
db, err := testDB.getDB(ctx)
if err != nil {
t.Fatalf("failed to get db: %+v\n", err)
}
gameFileRepository := NewGameFileV2(testDB)
type test struct {
description string
fileIDs []values.GameFileID
lockType repository.LockType
beforeGames []migrate.GameTable2
gameFiles []*repository.GameFileInfo
isErr bool
err error
}
gameID1 := values.NewGameID()
gameID2 := values.NewGameID()
gameID3 := values.NewGameID()
gameID4 := values.NewGameID()
gameID5 := values.NewGameID()
gameFileID1 := values.NewGameFileID()
gameFileID2 := values.NewGameFileID()
gameFileID3 := values.NewGameFileID()
gameFileID4 := values.NewGameFileID()
gameFileID5 := values.NewGameFileID()
gameFileID6 := values.NewGameFileID()
gameFileID7 := values.NewGameFileID()
now := time.Now()
var fileTypes []*migrate.GameFileTypeTable
err = db.
Session(&gorm.Session{}).
Find(&fileTypes).Error
if err != nil {
t.Fatalf("failed to get file types: %v\n", err)
}
fileTypeMap := make(map[string]int, len(fileTypes))
for _, fileType := range fileTypes {
fileTypeMap[fileType.Name] = fileType.ID
}
testCases := []test{
{
description: "特に問題ないのでエラーなし",
fileIDs: []values.GameFileID{gameFileID1},
lockType: repository.LockTypeNone,
beforeGames: []migrate.GameTable2{
{
ID: uuid.UUID(gameID1),
Name: "test",
Description: "test",
CreatedAt: now,
GameFiles: []migrate.GameFileTable2{
{
ID: uuid.UUID(gameFileID1),
GameID: uuid.UUID(gameID1),
FileTypeID: fileTypeMap[migrate.GameFileTypeJar],
Hash: "68617368",
EntryPoint: "/path/to/game.jar",
CreatedAt: now,
},
},
},
},
gameFiles: []*repository.GameFileInfo{
{
GameFile: domain.NewGameFile(
gameFileID1,
values.GameFileTypeJar,
"/path/to/game.jar",
[]byte("hash"),
now,
),
GameID: gameID1,
},
},
},
{
description: "windowsでもエラーなし",
fileIDs: []values.GameFileID{gameFileID2},
lockType: repository.LockTypeNone,
beforeGames: []migrate.GameTable2{
{
ID: uuid.UUID(gameID2),
Name: "test",
Description: "test",
CreatedAt: now,
GameFiles: []migrate.GameFileTable2{
{
ID: uuid.UUID(gameFileID2),
GameID: uuid.UUID(gameID2),
FileTypeID: fileTypeMap[migrate.GameFileTypeWindows],
Hash: "68617368",
EntryPoint: "/path/to/game.exe",
CreatedAt: now,
},
},
},
},
gameFiles: []*repository.GameFileInfo{
{
GameFile: domain.NewGameFile(
gameFileID2,
values.GameFileTypeWindows,
"/path/to/game.exe",
[]byte("hash"),
now,
),
GameID: gameID2,
},
},
},
{
description: "macでもエラーなし",
fileIDs: []values.GameFileID{gameFileID3},
lockType: repository.LockTypeNone,
beforeGames: []migrate.GameTable2{
{
ID: uuid.UUID(gameID3),
Name: "test",
Description: "test",
CreatedAt: now,
GameFiles: []migrate.GameFileTable2{
{
ID: uuid.UUID(gameFileID3),
GameID: uuid.UUID(gameID3),
FileTypeID: fileTypeMap[migrate.GameFileTypeMac],
Hash: "68617368",
EntryPoint: "/path/to/game.app",
CreatedAt: now,
},
},
},
},
gameFiles: []*repository.GameFileInfo{
{
GameFile: domain.NewGameFile(
gameFileID3,
values.GameFileTypeMac,
"/path/to/game.app",
[]byte("hash"),
now,
),
GameID: gameID3,
},
},
},
{
description: "fileIDが空でもエラーなし",
fileIDs: []values.GameFileID{},
lockType: repository.LockTypeNone,
beforeGames: []migrate.GameTable2{},
gameFiles: []*repository.GameFileInfo{},
},
{
description: "ファイルが複数でもエラーなし",
fileIDs: []values.GameFileID{gameFileID4, gameFileID5},
lockType: repository.LockTypeNone,
beforeGames: []migrate.GameTable2{
{
ID: uuid.UUID(gameID4),
Name: "test",
Description: "test",
CreatedAt: now,
GameFiles: []migrate.GameFileTable2{
{
ID: uuid.UUID(gameFileID4),
GameID: uuid.UUID(gameID4),
FileTypeID: fileTypeMap[migrate.GameFileTypeJar],
Hash: "68617368",
EntryPoint: "/path/to/game.jar",
CreatedAt: now,
},
{
ID: uuid.UUID(gameFileID5),
GameID: uuid.UUID(gameID4),
FileTypeID: fileTypeMap[migrate.GameFileTypeWindows],
Hash: "68617368",
EntryPoint: "/path/to/game.exe",
CreatedAt: now.Add(-time.Hour),
},
},
},
},
gameFiles: []*repository.GameFileInfo{
{
GameFile: domain.NewGameFile(
gameFileID4,
values.GameFileTypeJar,
"/path/to/game.jar",
[]byte("hash"),
now,
),
GameID: gameID4,
},
{
GameFile: domain.NewGameFile(
gameFileID5,
values.GameFileTypeWindows,
"/path/to/game.exe",
[]byte("hash"),
now.Add(-time.Hour),
),
GameID: gameID4,
},
},
},
{
description: "対応するファイルが存在しない場合もエラーなし",
fileIDs: []values.GameFileID{gameFileID6},
lockType: repository.LockTypeNone,
beforeGames: []migrate.GameTable2{},
gameFiles: []*repository.GameFileInfo{},
},
{
description: "行ロックを取ってもエラーなし",
fileIDs: []values.GameFileID{gameFileID7},
lockType: repository.LockTypeRecord,
beforeGames: []migrate.GameTable2{
{
ID: uuid.UUID(gameID5),
Name: "test",
Description: "test",
CreatedAt: now,
GameFiles: []migrate.GameFileTable2{
{
ID: uuid.UUID(gameFileID7),
GameID: uuid.UUID(gameID5),
FileTypeID: fileTypeMap[migrate.GameFileTypeJar],
Hash: "68617368",
EntryPoint: "/path/to/game.jar",
CreatedAt: now,
},
},
},
},
gameFiles: []*repository.GameFileInfo{
{
GameFile: domain.NewGameFile(
gameFileID7,
values.GameFileTypeJar,
"/path/to/game.jar",
[]byte("hash"),
now,
),
GameID: gameID5,
},
},
},
}
for _, testCase := range testCases {
t.Run(testCase.description, func(t *testing.T) {
if len(testCase.beforeGames) != 0 {
err := db.Create(&testCase.beforeGames).Error
if err != nil {
t.Fatalf("failed to create game: %v\n", err)
}
}
gameFiles, err := gameFileRepository.GetGameFilesWithoutTypes(ctx, testCase.fileIDs, testCase.lockType)
if testCase.isErr {
if testCase.err == nil {
assert.Error(t, err)
} else if !errors.Is(err, testCase.err) {
t.Errorf("error must be %v, but actual is %v", testCase.err, err)
}
} else {
assert.NoError(t, err)
}
if err != nil || testCase.isErr {
return
}
assert.Len(t, gameFiles, len(testCase.gameFiles))
for i, expectGameFile := range testCase.gameFiles {
actualGameFile := gameFiles[i]
assert.Equal(t, expectGameFile.GetID(), actualGameFile.GetID())
assert.Equal(t, expectGameFile.GetFileType(), actualGameFile.GetFileType())
assert.Equal(t, expectGameFile.GetEntryPoint(), actualGameFile.GetEntryPoint())
assert.Equal(t, expectGameFile.GetHash(), actualGameFile.GetHash())
assert.WithinDuration(t, expectGameFile.GetCreatedAt(), actualGameFile.GetCreatedAt(), time.Second)
assert.Equal(t, expectGameFile.GameID, actualGameFile.GameID)
}
})
}
}
func TestSaveGameFileV2(t *testing.T) {
t.Parallel()
ctx := context.Background()
db, err := testDB.getDB(ctx)
if err != nil {
t.Fatalf("failed to get db: %+v\n", err)
}
gameFileRepository := NewGameFileV2(testDB)
type test struct {
description string
gameID values.GameID
file *domain.GameFile
beforeFiles []migrate.GameFileTable2
expectFiles []migrate.GameFileTable2
isErr bool
err error
}
gameID1 := values.NewGameID()
gameID2 := values.NewGameID()
gameID3 := values.NewGameID()
gameID4 := values.NewGameID()
gameID5 := values.NewGameID()
gameID6 := values.NewGameID()
fileID1 := values.NewGameFileID()
fileID2 := values.NewGameFileID()
fileID3 := values.NewGameFileID()
fileID4 := values.NewGameFileID()
fileID5 := values.NewGameFileID()
fileID6 := values.NewGameFileID()
fileID7 := values.NewGameFileID()
fileID8 := values.NewGameFileID()
var fileTypes []*migrate.GameFileTypeTable
err = db.
Session(&gorm.Session{}).
Find(&fileTypes).Error
if err != nil {
t.Fatalf("failed to get role type table: %+v\n", err)
}
fileTypeMap := make(map[string]int, len(fileTypes))
for _, fileType := range fileTypes {
fileTypeMap[fileType.Name] = fileType.ID
}
md5Hash := values.NewGameFileHashFromBytes([]byte{0x09, 0x8f, 0x6b, 0xcd, 0x46, 0x21, 0xd3, 0x73, 0xca, 0xde, 0x4e, 0x83, 0x26, 0x27, 0xb4, 0xf6})
now := time.Now()
testCases := []test{
{
description: "特に問題ないので問題なし",
gameID: gameID1,
file: domain.NewGameFile(
fileID1,
values.GameFileTypeJar,
values.NewGameFileEntryPoint("path/to/file"),
md5Hash,
now,
),
beforeFiles: []migrate.GameFileTable2{},
expectFiles: []migrate.GameFileTable2{
{
ID: uuid.UUID(fileID1),
GameID: uuid.UUID(gameID1),
FileTypeID: fileTypeMap[migrate.GameFileTypeJar],
EntryPoint: "path/to/file",
Hash: md5Hash.String(),
CreatedAt: now,
},
},
},
{
description: "windowsでも問題なし",
gameID: gameID2,
file: domain.NewGameFile(
fileID2,
values.GameFileTypeWindows,
values.NewGameFileEntryPoint("path/to/file"),
md5Hash,
now,
),
beforeFiles: []migrate.GameFileTable2{},
expectFiles: []migrate.GameFileTable2{
{
ID: uuid.UUID(fileID2),
GameID: uuid.UUID(gameID2),
FileTypeID: fileTypeMap[migrate.GameFileTypeWindows],
EntryPoint: "path/to/file",
Hash: md5Hash.String(),
CreatedAt: now,
},
},
},
{
description: "macでも問題なし",
gameID: gameID3,
file: domain.NewGameFile(
fileID3,
values.GameFileTypeMac,
values.NewGameFileEntryPoint("path/to/file"),
md5Hash,
now,
),
beforeFiles: []migrate.GameFileTable2{},
expectFiles: []migrate.GameFileTable2{
{
ID: uuid.UUID(fileID3),
GameID: uuid.UUID(gameID3),
FileTypeID: fileTypeMap[migrate.GameFileTypeMac],
EntryPoint: "path/to/file",
Hash: md5Hash.String(),
CreatedAt: now,
},
},
},
{
description: "想定外の画像の種類なのでエラー",
gameID: gameID4,
file: domain.NewGameFile(
fileID4,
100,
values.NewGameFileEntryPoint("path/to/file"),
md5Hash,
now,
),
beforeFiles: []migrate.GameFileTable2{},
expectFiles: []migrate.GameFileTable2{},
isErr: true,
},
{
description: "既にファイルが存在しても問題なし",
gameID: gameID5,
file: domain.NewGameFile(
fileID5,
values.GameFileTypeMac,
values.NewGameFileEntryPoint("path/to/file"),
md5Hash,
now,
),
beforeFiles: []migrate.GameFileTable2{
{
ID: uuid.UUID(fileID6),
GameID: uuid.UUID(gameID5),
FileTypeID: fileTypeMap[migrate.GameFileTypeJar],
EntryPoint: "path/to/file",
Hash: md5Hash.String(),
CreatedAt: now,
},
},
expectFiles: []migrate.GameFileTable2{
{
ID: uuid.UUID(fileID6),
GameID: uuid.UUID(gameID5),
FileTypeID: fileTypeMap[migrate.GameFileTypeJar],
EntryPoint: "path/to/file",
Hash: md5Hash.String(),
CreatedAt: now,
},
{
ID: uuid.UUID(fileID5),
GameID: uuid.UUID(gameID5),
FileTypeID: fileTypeMap[migrate.GameFileTypeMac],
EntryPoint: "path/to/file",
Hash: md5Hash.String(),
CreatedAt: now,
},
},
},
{
description: "エラーの場合変更なし",
gameID: gameID6,
file: domain.NewGameFile(
fileID7,
100,
values.NewGameFileEntryPoint("path/to/file"),
md5Hash,
now,
),
beforeFiles: []migrate.GameFileTable2{
{
ID: uuid.UUID(fileID8),
GameID: uuid.UUID(gameID6),
FileTypeID: fileTypeMap[migrate.GameFileTypeMac],
EntryPoint: "path/to/file",
Hash: md5Hash.String(),
CreatedAt: now,
},
},
expectFiles: []migrate.GameFileTable2{
{
ID: uuid.UUID(fileID8),
GameID: uuid.UUID(gameID6),
FileTypeID: fileTypeMap[migrate.GameFileTypeMac],
EntryPoint: "path/to/file",
Hash: md5Hash.String(),
CreatedAt: now,
},
},
isErr: true,
},
}
for _, testCase := range testCases {
t.Run(testCase.description, func(t *testing.T) {
err := db.Create(&migrate.GameTable2{
ID: uuid.UUID(testCase.gameID),
Name: "test",
Description: "test",
CreatedAt: time.Now(),
GameFiles: testCase.beforeFiles,
}).Error
if err != nil {
t.Fatalf("failed to create game table: %+v\n", err)
}
err = gameFileRepository.SaveGameFile(ctx, testCase.gameID, testCase.file)
if testCase.isErr {
if testCase.err == nil {
assert.Error(t, err)
} else if !errors.Is(err, testCase.err) {
t.Errorf("error must be %v, but actual is %v", testCase.err, err)
}
} else {
assert.NoError(t, err)
}
var files []migrate.GameFileTable2
err = db.
Session(&gorm.Session{}).
Where("game_id = ?", uuid.UUID(testCase.gameID)).
Find(&files).Error
if err != nil {
t.Fatalf("failed to get role table: %+v\n", err)
}
assert.Len(t, files, len(testCase.expectFiles))
fileMap := make(map[uuid.UUID]migrate.GameFileTable2)
for _, file := range files {
fileMap[file.ID] = file
}
for _, expectFile := range testCase.expectFiles {
actualFile, ok := fileMap[expectFile.ID]
if !ok {
t.Errorf("not found file: %+v", expectFile)
}
assert.Equal(t, expectFile.GameID, actualFile.GameID)
assert.Equal(t, expectFile.FileTypeID, actualFile.FileTypeID)
assert.Equal(t, expectFile.EntryPoint, actualFile.EntryPoint)
assert.Equal(t, expectFile.Hash, actualFile.Hash)
assert.WithinDuration(t, expectFile.CreatedAt, actualFile.CreatedAt, 2*time.Second)
}
})
}
}
func TestGetGameFile(t *testing.T) {
t.Parallel()
ctx := context.Background()
db, err := testDB.getDB(ctx)
if err != nil {
t.Fatalf("failed to get db: %+v\n", err)
}
gameFileRepository := NewGameFileV2(testDB)
type test struct {
description string
fileID values.GameFileID
lockType repository.LockType
files []migrate.GameFileTable2
expectFile repository.GameFileInfo
isErr bool
err error
}
gameID1 := values.NewGameID()
gameID2 := values.NewGameID()
gameID3 := values.NewGameID()
gameID4 := values.NewGameID()
gameID5 := values.NewGameID()
fileID1 := values.NewGameFileID()
fileID2 := values.NewGameFileID()
fileID3 := values.NewGameFileID()
fileID4 := values.NewGameFileID()
fileID5 := values.NewGameFileID()
fileID6 := values.NewGameFileID()
fileID7 := values.NewGameFileID()
var fileTypes []*migrate.GameFileTypeTable
err = db.
Session(&gorm.Session{}).
Find(&fileTypes).Error
if err != nil {
t.Fatalf("failed to get file type table: %+v\n", err)
}
fileTypeMap := make(map[string]int, len(fileTypes))
for _, fileType := range fileTypes {
fileTypeMap[fileType.Name] = fileType.ID
}
md5Hash := values.NewGameFileHashFromBytes([]byte{0x09, 0x8f, 0x6b, 0xcd, 0x46, 0x21, 0xd3, 0x73, 0xca, 0xde, 0x4e, 0x83, 0x26, 0x27, 0xb4, 0xf6})
now := time.Now()
testCases := []test{
{
description: "特に問題ないので問題なし",
fileID: fileID1,
lockType: repository.LockTypeNone,
files: []migrate.GameFileTable2{
{
ID: uuid.UUID(fileID1),
GameID: uuid.UUID(gameID1),
FileTypeID: fileTypeMap[migrate.GameFileTypeJar],
EntryPoint: "path/to/file",
Hash: md5Hash.String(),
CreatedAt: now,
},
},
expectFile: repository.GameFileInfo{
GameFile: domain.NewGameFile(
fileID1,
values.GameFileTypeJar,
values.NewGameFileEntryPoint("path/to/file"),
md5Hash,
now,
),
GameID: gameID1,
},
},
{
description: "windowsでも問題なし",
fileID: fileID2,
lockType: repository.LockTypeNone,
files: []migrate.GameFileTable2{
{
ID: uuid.UUID(fileID2),
GameID: uuid.UUID(gameID2),
FileTypeID: fileTypeMap[migrate.GameFileTypeWindows],
EntryPoint: "path/to/file",
Hash: md5Hash.String(),
CreatedAt: now,
},
},
expectFile: repository.GameFileInfo{
GameFile: domain.NewGameFile(
fileID2,
values.GameFileTypeWindows,
values.NewGameFileEntryPoint("path/to/file"),
md5Hash,
now,
),
GameID: gameID2,
},
},
{
description: "macでも問題なし",
fileID: fileID3,
lockType: repository.LockTypeNone,
files: []migrate.GameFileTable2{
{
ID: uuid.UUID(fileID3),
GameID: uuid.UUID(gameID3),
FileTypeID: fileTypeMap[migrate.GameFileTypeMac],
EntryPoint: "path/to/file",
Hash: md5Hash.String(),
CreatedAt: now,
},
},
expectFile: repository.GameFileInfo{
GameFile: domain.NewGameFile(
fileID3,
values.GameFileTypeMac,
values.NewGameFileEntryPoint("path/to/file"),
md5Hash,
now,
),
GameID: gameID3,
},
},
{
description: "lockTypeがRecordでも問題なし",
fileID: fileID4,
files: []migrate.GameFileTable2{
{
ID: uuid.UUID(fileID4),
GameID: uuid.UUID(gameID4),
FileTypeID: fileTypeMap[migrate.GameFileTypeJar],
EntryPoint: "path/to/file",
Hash: md5Hash.String(),
CreatedAt: now,
},
},
expectFile: repository.GameFileInfo{
GameFile: domain.NewGameFile(
fileID4,
values.GameFileTypeJar,
values.NewGameFileEntryPoint("path/to/file"),
md5Hash,
now,
),
GameID: gameID4,
},
},
{
description: "複数の画像があっても問題なし",
fileID: fileID5,
files: []migrate.GameFileTable2{
{
ID: uuid.UUID(fileID5),
GameID: uuid.UUID(gameID5),
FileTypeID: fileTypeMap[migrate.GameFileTypeJar],
EntryPoint: "path/to/file",
Hash: md5Hash.String(),
CreatedAt: now,
},
{
ID: uuid.UUID(fileID6),
GameID: uuid.UUID(gameID5),
FileTypeID: fileTypeMap[migrate.GameFileTypeWindows],
EntryPoint: "path/to/file",
Hash: md5Hash.String(),
CreatedAt: now,
},
},
expectFile: repository.GameFileInfo{
GameFile: domain.NewGameFile(
fileID5,
values.GameFileTypeJar,
values.NewGameFileEntryPoint("path/to/file"),
md5Hash,
now,
),
GameID: gameID5,
},
},
{
description: "ファイルが存在しないのでRecordNotFound",
fileID: fileID7,
lockType: repository.LockTypeNone,
files: []migrate.GameFileTable2{},
isErr: true,
err: repository.ErrRecordNotFound,
},
}
for _, testCase := range testCases {
t.Run(testCase.description, func(t *testing.T) {
gameIDMap := map[uuid.UUID]*migrate.GameTable2{}
for _, file := range testCase.files {
if game, ok := gameIDMap[file.GameID]; ok {
game.GameFiles = append(game.GameFiles, file)
} else {
gameIDMap[file.GameID] = &migrate.GameTable2{
ID: file.GameID,
Name: "test",
Description: "test",
CreatedAt: now,
GameFiles: []migrate.GameFileTable2{file},
}
}
}
games := make([]migrate.GameTable2, 0, len(gameIDMap))
for _, game := range gameIDMap {
games = append(games, *game)
}
if len(games) > 0 {
err := db.Create(games).Error
if err != nil {
t.Fatalf("failed to create game table: %+v\n", err)
}
}
file, err := gameFileRepository.GetGameFile(ctx, testCase.fileID, testCase.lockType)
if testCase.isErr {
if testCase.err == nil {
assert.Error(t, err)
} else if !errors.Is(err, testCase.err) {
t.Errorf("error must be %v, but actual is %v", testCase.err, err)
}
} else {
assert.NoError(t, err)
}
if err != nil || testCase.isErr {
return
}
assert.Equal(t, testCase.expectFile.GameFile.GetID(), file.GameFile.GetID())
assert.Equal(t, testCase.expectFile.GameFile.GetFileType(), file.GameFile.GetFileType())
assert.Equal(t, testCase.expectFile.GameFile.GetEntryPoint(), file.GameFile.GetEntryPoint())
assert.Equal(t, testCase.expectFile.GameFile.GetHash(), file.GameFile.GetHash())
assert.WithinDuration(t, testCase.expectFile.GameFile.GetCreatedAt(), file.GameFile.GetCreatedAt(), time.Second)
assert.Equal(t, testCase.expectFile.GameID, file.GameID)
})
}
}
func TestGetGameFilesV2(t *testing.T) {
t.Parallel()
ctx := context.Background()
db, err := testDB.getDB(ctx)
if err != nil {
t.Fatalf("failed to get db: %+v\n", err)
}
gameFileRepository := NewGameFileV2(testDB)
type test struct {
description string
gameID values.GameID
lockType repository.LockType
files []migrate.GameFileTable2
expectFiles []*domain.GameFile
isErr bool
err error
}
gameID1 := values.NewGameID()
gameID2 := values.NewGameID()
gameID3 := values.NewGameID()
gameID4 := values.NewGameID()
gameID5 := values.NewGameID()
gameID6 := values.NewGameID()
fileID1 := values.NewGameFileID()
fileID2 := values.NewGameFileID()
fileID3 := values.NewGameFileID()
fileID4 := values.NewGameFileID()
fileID5 := values.NewGameFileID()
fileID6 := values.NewGameFileID()
var fileTypes []*migrate.GameFileTypeTable
err = db.
Session(&gorm.Session{}).
Find(&fileTypes).Error
if err != nil {
t.Fatalf("failed to get role type table: %+v\n", err)
}
fileTypeMap := make(map[string]int, len(fileTypes))
for _, fileType := range fileTypes {
fileTypeMap[fileType.Name] = fileType.ID
}
md5Hash := values.NewGameFileHashFromBytes([]byte{0x09, 0x8f, 0x6b, 0xcd, 0x46, 0x21, 0xd3, 0x73, 0xca, 0xde, 0x4e, 0x83, 0x26, 0x27, 0xb4, 0xf6})
now := time.Now()
testCases := []test{
{
description: "特に問題ないので問題なし",
gameID: gameID1,
lockType: repository.LockTypeNone,
files: []migrate.GameFileTable2{
{
ID: uuid.UUID(fileID1),
GameID: uuid.UUID(gameID1),
FileTypeID: fileTypeMap[migrate.GameFileTypeJar],
EntryPoint: "path/to/file",
Hash: md5Hash.String(),
CreatedAt: now,
},
},
expectFiles: []*domain.GameFile{
domain.NewGameFile(
fileID1,
values.GameFileTypeJar,
values.NewGameFileEntryPoint("path/to/file"),
md5Hash,
now,
),
},
},
{
description: "windowsでも問題なし",
gameID: gameID2,
lockType: repository.LockTypeNone,
files: []migrate.GameFileTable2{
{
ID: uuid.UUID(fileID2),
GameID: uuid.UUID(gameID2),
FileTypeID: fileTypeMap[migrate.GameFileTypeWindows],
EntryPoint: "path/to/file",
Hash: md5Hash.String(),
CreatedAt: now,
},
},
expectFiles: []*domain.GameFile{
domain.NewGameFile(
fileID2,
values.GameFileTypeWindows,
values.NewGameFileEntryPoint("path/to/file"),
md5Hash,
now,
),
},
},
{
description: "macでも問題なし",
gameID: gameID3,
lockType: repository.LockTypeNone,
files: []migrate.GameFileTable2{
{
ID: uuid.UUID(fileID3),
GameID: uuid.UUID(gameID3),
FileTypeID: fileTypeMap[migrate.GameFileTypeMac],
EntryPoint: "path/to/file",
Hash: md5Hash.String(),
CreatedAt: now,
},
},
expectFiles: []*domain.GameFile{
domain.NewGameFile(
fileID3,
values.GameFileTypeMac,
values.NewGameFileEntryPoint("path/to/file"),
md5Hash,
now,
),
},
},
{
description: "lockTypeがRecordでも問題なし",
gameID: gameID4,
lockType: repository.LockTypeRecord,
files: []migrate.GameFileTable2{
{
ID: uuid.UUID(fileID4),
GameID: uuid.UUID(gameID4),
FileTypeID: fileTypeMap[migrate.GameFileTypeJar],
EntryPoint: "path/to/file",
Hash: md5Hash.String(),
CreatedAt: now,
},
},
expectFiles: []*domain.GameFile{
domain.NewGameFile(
fileID4,
values.GameFileTypeJar,
values.NewGameFileEntryPoint("path/to/file"),
md5Hash,
now,
),
},
},
{
description: "複数のファイルがあっても問題なし",
gameID: gameID5,
files: []migrate.GameFileTable2{
{
ID: uuid.UUID(fileID5),
GameID: uuid.UUID(gameID5),
FileTypeID: fileTypeMap[migrate.GameFileTypeJar],
EntryPoint: "path/to/file",
Hash: md5Hash.String(),
CreatedAt: now,
},
{
ID: uuid.UUID(fileID6),
GameID: uuid.UUID(gameID5),
FileTypeID: fileTypeMap[migrate.GameFileTypeWindows],
EntryPoint: "path/to/file",
Hash: md5Hash.String(),
CreatedAt: now.Add(-time.Hour),
},
},
expectFiles: []*domain.GameFile{
domain.NewGameFile(
fileID5,
values.GameFileTypeJar,
values.NewGameFileEntryPoint("path/to/file"),
md5Hash,
now,
),
domain.NewGameFile(
fileID6,
values.GameFileTypeWindows,
values.NewGameFileEntryPoint("path/to/file"),
md5Hash,
now.Add(-time.Hour),
),
},
},
{
description: "ファイルが存在しなくても問題なし",
gameID: gameID6,
lockType: repository.LockTypeNone,
files: []migrate.GameFileTable2{},
expectFiles: []*domain.GameFile{},
},
}
for _, testCase := range testCases {
t.Run(testCase.description, func(t *testing.T) {
gameIDMap := map[uuid.UUID]*migrate.GameTable2{}
for _, file := range testCase.files {
if game, ok := gameIDMap[file.GameID]; ok {
game.GameFiles = append(game.GameFiles, file)
} else {
gameIDMap[file.GameID] = &migrate.GameTable2{
ID: file.GameID,
Name: "test",
Description: "test",
CreatedAt: now,
GameFiles: []migrate.GameFileTable2{file},
}
}
}
games := make([]migrate.GameTable2, 0, len(gameIDMap))
for _, game := range gameIDMap {
games = append(games, *game)
}
if len(games) > 0 {
err := db.Create(games).Error
if err != nil {
t.Fatalf("failed to create game table: %+v\n", err)
}
}
files, err := gameFileRepository.GetGameFiles(ctx, testCase.gameID, testCase.lockType)
if testCase.isErr {
if testCase.err == nil {
assert.Error(t, err)
} else if !errors.Is(err, testCase.err) {
t.Errorf("error must be %v, but actual is %v", testCase.err, err)
}
} else {
assert.NoError(t, err)
}
if err != nil || testCase.isErr {
return
}
for i, expectFile := range testCase.expectFiles {
assert.Equal(t, expectFile.GetID(), files[i].GetID())
assert.Equal(t, expectFile.GetFileType(), files[i].GetFileType())
assert.Equal(t, expectFile.GetEntryPoint(), files[i].GetEntryPoint())
assert.Equal(t, expectFile.GetHash(), files[i].GetHash())
assert.WithinDuration(t, expectFile.GetCreatedAt(), files[i].GetCreatedAt(), time.Second)
}
})
}
}
|
package storage
import "gitlab.cheppers.com/devops-academy-2018/shop2/pkg/shoe/model"
type Handler interface {
Insert(p model.Shoe) (model.Shoe, error)
Read(id uint) model.Shoe
Update(p model.Shoe, fields map[string]interface{}) (model.Shoe, error)
Delete(id uint) error
List() []model.Shoe
Count() int
}
|
package parser_test
import (
"strings"
"github.com/bytesparadise/libasciidoc/pkg/parser"
"github.com/bytesparadise/libasciidoc/pkg/types"
. "github.com/bytesparadise/libasciidoc/testsupport"
log "github.com/sirupsen/logrus"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("tables", func() {
Context("in final documents", func() {
It("1-line table with 2 cells and custom border styling", func() {
source := `[frame=ends,grid=rows]
|===
| *cookie* cookie | _pasta_
|===
`
expected := &types.Document{
Elements: []interface{}{
&types.Table{
Attributes: types.Attributes{
types.AttrFrame: "ends",
types.AttrGrid: "rows",
},
Rows: []*types.TableRow{
{
Cells: []*types.TableCell{
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.QuotedText{
Kind: types.SingleQuoteBold,
Elements: []interface{}{
&types.StringElement{
Content: "cookie",
},
},
},
&types.StringElement{
Content: " cookie",
},
},
},
},
},
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.QuotedText{
Kind: types.SingleQuoteItalic,
Elements: []interface{}{
&types.StringElement{
Content: "pasta",
},
},
},
},
},
},
},
},
},
},
},
},
}
Expect(ParseDocument(source)).To(MatchDocument(expected))
})
It("1-line table with 3 cells", func() {
source := `|===
| *cookie* cookie | _pasta_ | chocolate
|===`
expected := &types.Document{
Elements: []interface{}{
&types.Table{
Rows: []*types.TableRow{
{
Cells: []*types.TableCell{
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.QuotedText{
Kind: types.SingleQuoteBold,
Elements: []interface{}{
&types.StringElement{
Content: "cookie",
},
},
},
&types.StringElement{
Content: " cookie",
},
},
},
},
},
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.QuotedText{
Kind: types.SingleQuoteItalic,
Elements: []interface{}{
&types.StringElement{
Content: "pasta",
},
},
},
},
},
},
},
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "chocolate",
},
},
},
},
},
},
},
},
},
},
}
Expect(ParseDocument(source)).To(MatchDocument(expected))
})
It("2-line table with 3 cells", func() {
source := `|===
| some cookies | some chocolate | some pasta
| more cookies | more chocolate | more pasta
|===`
expected := &types.Document{
Elements: []interface{}{
&types.Table{
Rows: []*types.TableRow{
{
Cells: []*types.TableCell{
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "some cookies",
},
},
},
},
},
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "some chocolate",
},
},
},
},
},
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "some pasta",
},
},
},
},
},
},
},
{
Cells: []*types.TableCell{
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "more cookies",
},
},
},
},
},
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "more chocolate",
},
},
},
},
},
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "more pasta",
},
},
},
},
},
},
},
},
},
},
}
Expect(ParseDocument(source)).To(MatchDocument(expected))
})
It("with compact rows", func() {
source := `|===
|h1|h2|h3
|one|two|three
|===`
expected := &types.Document{
Elements: []interface{}{
&types.Table{
Header: &types.TableRow{
Cells: []*types.TableCell{
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "h1",
},
},
},
},
},
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "h2",
},
},
},
},
},
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "h3",
},
},
},
},
},
},
},
Rows: []*types.TableRow{
{
Cells: []*types.TableCell{
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "one",
},
},
},
},
},
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "two",
},
},
},
},
},
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "three",
},
},
},
},
},
},
},
},
},
},
}
Expect(ParseDocument(source)).To(MatchDocument(expected))
})
It("with title, headers and 1 line per cell", func() {
source := `.table title
|===
|header 1 |header 2
|row 1, column 1
|row 1, column 2
|row 2, column 1
|row 2, column 2
|===`
expected := &types.Document{
Elements: []interface{}{
&types.Table{
Attributes: types.Attributes{
types.AttrTitle: "table title",
},
Header: &types.TableRow{
Cells: []*types.TableCell{
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "header 1",
},
},
},
},
},
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "header 2",
},
},
},
},
},
},
},
Rows: []*types.TableRow{
{
Cells: []*types.TableCell{
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "row 1, column 1",
},
},
},
},
},
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "row 1, column 2",
},
},
},
},
},
},
},
{
Cells: []*types.TableCell{
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "row 2, column 1",
},
},
},
},
},
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "row 2, column 2",
},
},
},
},
},
},
},
},
},
},
}
Expect(ParseDocument(source)).To(MatchDocument(expected))
})
It("with title, headers, id and multiple roles, stretch", func() {
source := `.table title
[#anchor.role1%autowidth.stretch]
|===
|header 1 |header 2
|row 1, column 1
|row 1, column 2
|row 2, column 1
|row 2, column 2
|===`
expected := &types.Document{
Elements: []interface{}{
&types.Table{
Attributes: types.Attributes{
types.AttrTitle: "table title",
types.AttrOptions: types.Options{"autowidth"},
types.AttrRoles: types.Roles{"role1", "stretch"},
types.AttrID: "anchor",
},
Header: &types.TableRow{
Cells: []*types.TableCell{
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "header 1",
},
},
},
},
},
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "header 2",
},
},
},
},
},
},
},
Rows: []*types.TableRow{
{
Cells: []*types.TableCell{
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "row 1, column 1",
},
},
},
},
},
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "row 1, column 2",
},
},
},
},
},
},
},
{
Cells: []*types.TableCell{
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "row 2, column 1",
},
},
},
},
},
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "row 2, column 2",
},
},
},
},
},
},
},
},
},
},
ElementReferences: types.ElementReferences{
"anchor": "table title",
},
}
Expect(ParseDocument(source)).To(MatchDocument(expected))
})
It("with unseparated rows", func() {
source := `|===
|header 1 |header 2
|row 1, column 1
|row 1, column 2
|row 2, column 1
|row 2, column 2
|===`
expected := &types.Document{
Elements: []interface{}{
&types.Table{
Header: &types.TableRow{
Cells: []*types.TableCell{
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "header 1",
},
},
},
},
},
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "header 2",
},
},
},
},
},
},
},
Rows: []*types.TableRow{
{
Cells: []*types.TableCell{
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "row 1, column 1",
},
},
},
},
},
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "row 1, column 2",
},
},
},
},
},
},
},
{
Cells: []*types.TableCell{
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "row 2, column 1",
},
},
},
},
},
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "row 2, column 2",
},
},
},
},
},
},
},
},
},
},
}
Expect(ParseDocument(source)).To(MatchDocument(expected))
})
It("with unbalanced rows", func() {
source := `|===
|header 1 |header 2
|row 1, column 1
|row 1, column 2
|row 2, column 1 |row 2, column 2
|===`
expected := &types.Document{
Elements: []interface{}{
&types.Table{
Header: &types.TableRow{
Cells: []*types.TableCell{
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "header 1",
},
},
},
},
},
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "header 2",
},
},
},
},
},
},
},
Rows: []*types.TableRow{
{
Cells: []*types.TableCell{
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "row 1, column 1",
},
},
},
},
},
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "row 1, column 2",
},
},
},
},
},
},
},
{
Cells: []*types.TableCell{
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "row 2, column 1",
},
},
},
},
},
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "row 2, column 2",
},
},
},
},
},
},
},
},
},
},
}
Expect(ParseDocument(source)).To(MatchDocument(expected))
})
It("empty table ", func() {
source := `|===
|===`
expected := &types.Document{
Elements: []interface{}{
&types.Table{},
},
}
Expect(ParseDocument(source)).To(MatchDocument(expected))
})
It("with cols attribute", func() {
source := `[cols="2*^.^,<,.>"]
|===
|===`
expected := &types.Document{
Elements: []interface{}{
&types.Table{
Attributes: types.Attributes{
types.AttrCols: []interface{}{
&types.TableColumn{
Multiplier: 2,
HAlign: types.HAlignCenter,
VAlign: types.VAlignMiddle,
Weight: 1,
},
&types.TableColumn{
Multiplier: 1,
HAlign: types.HAlignLeft,
VAlign: types.VAlignTop,
Weight: 1,
},
&types.TableColumn{
Multiplier: 1,
HAlign: types.HAlignLeft,
VAlign: types.VAlignBottom,
Weight: 1,
},
},
},
},
},
}
Expect(ParseDocument(source)).To(MatchDocument(expected))
})
It("columns as document attribute", func() {
source := `:cols: pass:[2*^.^d,<e,.>s]
[cols={cols}]
|===
|===`
expected := &types.Document{
Elements: []interface{}{
&types.AttributeDeclaration{
Name: "cols",
Value: []interface{}{
&types.InlinePassthrough{
Kind: types.PassthroughMacro,
Elements: []interface{}{
&types.StringElement{
Content: "2*^.^d,<e,.>s",
},
},
},
},
},
&types.Table{
Attributes: types.Attributes{
types.AttrCols: []interface{}{
&types.TableColumn{
Multiplier: 2,
HAlign: types.HAlignCenter,
VAlign: types.VAlignMiddle,
Style: types.DefaultStyle,
Weight: 1,
},
&types.TableColumn{
Multiplier: 1,
HAlign: types.HAlignLeft,
VAlign: types.VAlignTop,
Style: types.EmphasisStyle,
Weight: 1,
},
&types.TableColumn{
Multiplier: 1,
HAlign: types.HAlignLeft,
VAlign: types.VAlignBottom,
Style: types.StrongStyle,
Weight: 1,
},
},
},
},
},
}
Expect(ParseDocument(source)).To(MatchDocument(expected))
})
It("with header option", func() {
source := `[cols="3*^",options="header"]
|===
|Dir (X,Y,Z) |Num Cells |Size
|X |10 |0.1
|Y |5 |0.2
|Z |10 |0.1
|===`
expected := &types.Document{
Elements: []interface{}{
&types.Table{
Attributes: types.Attributes{
types.AttrCols: []interface{}{
&types.TableColumn{
Multiplier: 3,
HAlign: types.HAlignCenter,
VAlign: types.VAlignTop,
Weight: 1,
},
},
types.AttrOptions: types.Options{"header"},
},
Header: &types.TableRow{
Cells: []*types.TableCell{
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "Dir (X,Y,Z)",
},
},
},
},
},
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "Num Cells",
},
},
},
},
},
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "Size",
},
},
},
},
},
},
},
Rows: []*types.TableRow{
{
Cells: []*types.TableCell{
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "X",
},
},
},
},
},
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "10",
},
},
},
},
},
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "0.1",
},
},
},
},
},
},
},
{
Cells: []*types.TableCell{
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "Y",
},
},
},
},
},
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "5",
},
},
},
},
},
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "0.2",
},
},
},
},
},
},
},
{
Cells: []*types.TableCell{
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "Z",
},
},
},
},
},
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "10",
},
},
},
},
},
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "0.1",
},
},
},
},
},
},
},
},
},
},
}
Expect(ParseDocument(source)).To(MatchDocument(expected))
})
It("with header col option", func() {
source := `[cols="h,>,>",options="header"]
|===
|Dir (X,Y,Z) |Num Cells |Size
|X |10 |0.1
|Y |5 |0.2
|Z |10 |0.1
|===`
expected := &types.Document{
Elements: []interface{}{
&types.Table{
Attributes: types.Attributes{
types.AttrCols: []interface{}{
&types.TableColumn{
Multiplier: 1,
HAlign: types.HAlignDefault,
VAlign: types.VAlignDefault,
Weight: 1,
Style: types.HeaderStyle,
},
&types.TableColumn{
Multiplier: 1,
HAlign: types.HAlignRight,
VAlign: types.VAlignDefault,
Weight: 1,
},
&types.TableColumn{
Multiplier: 1,
HAlign: types.HAlignRight,
VAlign: types.VAlignDefault,
Weight: 1,
},
},
types.AttrOptions: types.Options{"header"},
},
Header: &types.TableRow{
Cells: []*types.TableCell{
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "Dir (X,Y,Z)",
},
},
},
},
},
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "Num Cells",
},
},
},
},
},
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "Size",
},
},
},
},
},
},
},
Rows: []*types.TableRow{
{
Cells: []*types.TableCell{
{
// Format: string(types.HeaderStyle),
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "X",
},
},
},
},
},
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "10",
},
},
},
},
},
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "0.1",
},
},
},
},
},
},
},
{
Cells: []*types.TableCell{
{
// Format: string(types.HeaderStyle),
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "Y",
},
},
},
},
},
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "5",
},
},
},
},
},
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "0.2",
},
},
},
},
},
},
},
{
Cells: []*types.TableCell{
{
// Format: string(types.HeaderStyle),
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "Z",
},
},
},
},
},
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "10",
},
},
},
},
},
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "0.1",
},
},
},
},
},
},
},
},
},
},
}
Expect(ParseDocument(source)).To(MatchDocument(expected))
})
It("with header and footer options", func() {
source := `[%header%footer,cols="2,2,1"]
|===
|Column 1, header row
|Column 2, header row
|Column 3, header row
|Cell in column 1, row 2
|Cell in column 2, row 2
|Cell in column 3, row 2
|Column 1, footer row
|Column 2, footer row
|Column 3, footer row
|===`
expected := &types.Document{
Elements: []interface{}{
&types.Table{
Attributes: types.Attributes{
types.AttrCols: []interface{}{
&types.TableColumn{
Multiplier: 1,
HAlign: types.HAlignLeft,
VAlign: types.VAlignTop,
Weight: 2,
},
&types.TableColumn{
Multiplier: 1,
HAlign: types.HAlignLeft,
VAlign: types.VAlignTop,
Weight: 2,
},
&types.TableColumn{
Multiplier: 1,
HAlign: types.HAlignLeft,
VAlign: types.VAlignTop,
Weight: 1,
},
},
types.AttrOptions: types.Options{"header", "footer"},
},
Header: &types.TableRow{
Cells: []*types.TableCell{
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "Column 1, header row",
},
},
},
},
},
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "Column 2, header row",
},
},
},
},
},
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "Column 3, header row",
},
},
},
},
},
},
},
Rows: []*types.TableRow{
{
Cells: []*types.TableCell{
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "Cell in column 1, row 2",
},
},
},
},
},
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "Cell in column 2, row 2",
},
},
},
},
},
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "Cell in column 3, row 2",
},
},
},
},
},
},
},
},
Footer: &types.TableRow{
Cells: []*types.TableCell{
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "Column 1, footer row",
},
},
},
},
},
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "Column 2, footer row",
},
},
},
},
},
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "Column 3, footer row",
},
},
},
},
},
},
},
},
},
}
Expect(ParseDocument(source)).To(MatchDocument(expected))
})
It("columns with header and alignment changes", func() {
source := `[cols="2*^.^,<,.>,>"]
|===
|h1|h2|h3|h4|h5
|one|two|three|four|five
|===`
expected := &types.Document{
Elements: []interface{}{
&types.Table{
Attributes: types.Attributes{
types.AttrCols: []interface{}{
&types.TableColumn{
Multiplier: 2,
HAlign: "^",
VAlign: "^",
Weight: 1,
},
&types.TableColumn{
Multiplier: 1,
HAlign: "<",
VAlign: "<",
Weight: 1,
},
&types.TableColumn{
Multiplier: 1,
HAlign: "<",
VAlign: ">",
Weight: 1,
},
&types.TableColumn{
Multiplier: 1,
HAlign: ">",
VAlign: "<",
Weight: 1,
},
},
},
Header: &types.TableRow{
Cells: []*types.TableCell{
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "h1",
},
},
},
},
},
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "h2",
},
},
},
},
},
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "h3",
},
},
},
},
},
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "h4",
},
},
},
},
},
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "h5",
},
},
},
},
},
},
},
Rows: []*types.TableRow{
{
Cells: []*types.TableCell{
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "one",
},
},
},
},
},
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "two",
},
},
},
},
},
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "three",
},
},
},
},
},
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "four",
},
},
},
},
},
{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "five",
},
},
},
},
},
},
},
},
},
},
}
Expect(ParseDocument(source)).To(MatchDocument(expected))
})
It("with basic image blocks in cells", func() {
source := `[cols="2*^"]
|===
a|
image::image.png[]
a|
image::another-image.png[]
|===`
expected := &types.Document{
Elements: []interface{}{
&types.Table{
Attributes: types.Attributes{
types.AttrCols: []interface{}{
&types.TableColumn{
Multiplier: 2,
HAlign: types.HAlignCenter,
VAlign: types.VAlignTop,
Weight: 1,
},
},
},
Rows: []*types.TableRow{
{
Cells: []*types.TableCell{
{
Format: "a",
Elements: []interface{}{
&types.ImageBlock{
Location: &types.Location{
Path: "image.png",
},
},
},
},
{
Format: "a",
Elements: []interface{}{
&types.ImageBlock{
Location: &types.Location{
Path: "another-image.png",
},
},
},
},
},
},
},
},
},
}
Expect(ParseDocument(source)).To(MatchDocument(expected))
})
It("with image blocks with attributes in cells", func() {
source := `[cols="2*^"]
|===
a|
[#image-id]
.An image
image::image.png[]
a|
[#another-image-id]
.Another image
image::another-image.png[]
|===`
expected := &types.Document{
Elements: []interface{}{
&types.Table{
Attributes: types.Attributes{
types.AttrCols: []interface{}{
&types.TableColumn{
Multiplier: 2,
HAlign: types.HAlignCenter,
VAlign: types.VAlignTop,
Weight: 1,
},
},
},
Rows: []*types.TableRow{
{
Cells: []*types.TableCell{
{
Format: "a",
Elements: []interface{}{
&types.ImageBlock{
Attributes: types.Attributes{
types.AttrID: "image-id",
types.AttrTitle: "An image",
},
Location: &types.Location{
Path: "image.png",
},
},
},
},
{
Format: "a",
Elements: []interface{}{
&types.ImageBlock{
Attributes: types.Attributes{
types.AttrID: "another-image-id",
types.AttrTitle: "Another image",
},
Location: &types.Location{
Path: "another-image.png",
},
},
},
},
},
},
},
},
},
ElementReferences: types.ElementReferences{
"image-id": "An image",
"another-image-id": "Another image",
},
}
Expect(ParseDocument(source)).To(MatchDocument(expected))
})
})
})
var _ = Describe("table cols", func() {
DescribeTable("valid",
func(source string, expected []*types.TableColumn) {
// given
log.Debugf("processing '%s'", source)
content := strings.NewReader(source)
// when parsing only (ie, no substitution applied)
result, err := parser.ParseReader("", content, parser.Entrypoint("TableColumnsAttribute"))
// then
Expect(err).NotTo(HaveOccurred())
Expect(result).To(BeAssignableToTypeOf([]interface{}{}))
cols := result.([]interface{})
// now, set the attribute in the table and call the `Columns()` method
t := &types.Table{
Attributes: types.Attributes{
types.AttrCols: result,
},
Rows: []*types.TableRow{{}},
}
t.Rows[0].Cells = make([]*types.TableCell, len(cols))
for i := range cols {
t.Rows[0].Cells[i] = &types.TableCell{}
}
Expect(t.Columns()).To(Equal(expected))
},
Entry(`1`, `1`,
[]*types.TableColumn{
{
Multiplier: 1,
HAlign: types.HAlignLeft,
VAlign: types.VAlignTop,
Weight: 1,
Width: "100",
},
}),
Entry(`3*^`, `3*^`,
[]*types.TableColumn{
{
Multiplier: 3,
HAlign: types.HAlignCenter,
VAlign: types.VAlignTop,
Weight: 1,
Width: "33.3333",
},
{
Multiplier: 3,
HAlign: types.HAlignCenter,
VAlign: types.VAlignTop,
Weight: 1,
Width: "33.3333",
},
{
Multiplier: 3,
HAlign: types.HAlignCenter,
VAlign: types.VAlignTop,
Weight: 1,
Width: "33.3334",
},
}),
Entry(`20,~,~`, `20,~,~`,
[]*types.TableColumn{
{
Multiplier: 1,
HAlign: types.HAlignLeft,
VAlign: types.VAlignTop,
Weight: 20,
Width: "20",
},
{
Multiplier: 1,
HAlign: types.HAlignLeft,
VAlign: types.VAlignTop,
Autowidth: true,
Width: "",
},
{
Multiplier: 1,
HAlign: types.HAlignLeft,
VAlign: types.VAlignTop,
Autowidth: true,
Width: "",
},
}),
Entry(`<,>`, `<,>`,
[]*types.TableColumn{
{
Multiplier: 1,
HAlign: types.HAlignLeft,
VAlign: types.VAlignTop,
Weight: 1,
Width: "50",
},
{
Multiplier: 1,
HAlign: types.HAlignRight,
VAlign: types.VAlignTop,
Weight: 1,
Width: "50",
},
}),
Entry(`.<,.>`, `.<,.>`,
[]*types.TableColumn{
{
Multiplier: 1,
HAlign: types.HAlignLeft,
VAlign: types.VAlignTop,
Weight: 1,
Width: "50",
},
{
Multiplier: 1,
HAlign: types.HAlignLeft,
VAlign: types.VAlignBottom,
Weight: 1,
Width: "50",
},
}),
Entry(`<.<,>.>`, `<.<,>.>`,
[]*types.TableColumn{
{
Multiplier: 1,
HAlign: types.HAlignLeft,
VAlign: types.VAlignTop,
Weight: 1,
Width: "50",
},
{
Multiplier: 1,
HAlign: types.HAlignRight,
VAlign: types.VAlignBottom,
Weight: 1,
Width: "50",
},
}),
Entry(`<.<1,>.>2`, `<.<1,>.>2`,
[]*types.TableColumn{
{
Multiplier: 1,
HAlign: types.HAlignLeft,
VAlign: types.VAlignTop,
Weight: 1,
Width: "33.3333",
},
{
Multiplier: 1,
HAlign: types.HAlignRight,
VAlign: types.VAlignBottom,
Weight: 2,
Width: "66.6667",
},
}),
Entry(`2*<.<1,1*>.>2`, `2*<.<1,1*>.>2`,
[]*types.TableColumn{
{
Multiplier: 2,
HAlign: types.HAlignLeft,
VAlign: types.VAlignTop,
Weight: 1,
Width: "25",
},
{
Multiplier: 2,
HAlign: types.HAlignLeft,
VAlign: types.VAlignTop,
Weight: 1,
Width: "25",
},
{
Multiplier: 1,
HAlign: types.HAlignRight,
VAlign: types.VAlignBottom,
Weight: 2,
Width: "50",
},
}),
// with style
Entry(`2*^.^d,<e,.>s`, `2*^.^d,<e,.>s`,
[]*types.TableColumn{
{
Multiplier: 2,
HAlign: types.HAlignCenter,
VAlign: types.VAlignMiddle,
Style: types.DefaultStyle,
Weight: 1,
Width: "25",
},
{
Multiplier: 2,
HAlign: types.HAlignCenter,
VAlign: types.VAlignMiddle,
Style: types.DefaultStyle,
Weight: 1,
Width: "25",
},
{
Multiplier: 1,
HAlign: types.HAlignLeft,
VAlign: types.VAlignTop,
Style: types.EmphasisStyle,
Weight: 1,
Width: "25",
},
{
Multiplier: 1,
HAlign: types.HAlignLeft,
VAlign: types.VAlignBottom,
Style: types.StrongStyle,
Weight: 1,
Width: "25",
},
}),
)
DescribeTable("invalid",
func(source string) {
// given
log.Debugf("processing '%s'", source)
content := strings.NewReader(source)
// when parsing only (ie, no substitution applied)
_, err := parser.ParseReader("", content, parser.Entrypoint("TableColumnsAttribute"))
// then
Expect(err).To(HaveOccurred())
},
// unknown case: should return an error
Entry(`invalid`, `invalid`),
)
})
|
package usersNMethods
import (
"database/sql"
"fmt"
"time"
)
func AddUser(ID int, FirstName string, LastName string, Email string, Sex int, Date time.Time, Loan float64) {
Db, err := sql.Open("postgres", "user=postgres password=root dbname=Technorely sslmode=disable")
if err != nil {
panic(err)
}
defer Db.Close()
_, err = Db.Exec("insert into users (id, firstname, lastname, email, sex, date , loan)"+
"values ($1, $2, $3, $4, $5, $6, $7)",
ID,
FirstName,
LastName,
Email,
Sex,
Date,
Loan,
)
if err != nil {
fmt.Println(err)
}
}
func UpdateUser(ID int, Loan float64) {
Db, err := sql.Open("postgres", "user=postgres password=root dbname=Technorely sslmode=disable")
if err != nil {
panic(err)
}
defer Db.Close()
_, err = Db.Exec("update users set loan = $1 where id = $2", Loan, ID)
if err != nil {
fmt.Println(err)
}
}
func DeleteUser(ID int) {
Db, err := sql.Open("postgres", "user=postgres password=root dbname=Technorely sslmode=disable")
if err != nil {
panic(err)
}
defer Db.Close()
result, err := Db.Exec("delete from users where id = $1", ID)
if err != nil {
panic(err)
}
fmt.Println(result.RowsAffected())
}
|
// Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package memo
import (
"github.com/cockroachdb/cockroach/pkg/sql/opt"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/errors"
)
// This file contains various helper functions that extract useful information
// from expressions.
// CanExtractConstTuple returns true if the expression is a TupleOp with
// constant values (a nested tuple of constant values is considered constant).
func CanExtractConstTuple(e opt.Expr) bool {
return e.Op() == opt.TupleOp && CanExtractConstDatum(e)
}
// CanExtractConstDatum returns true if a constant datum can be created from the
// given expression (tuples and arrays of constant values are considered
// constant values). If CanExtractConstDatum returns true, then
// ExtractConstDatum is guaranteed to work as well.
func CanExtractConstDatum(e opt.Expr) bool {
if opt.IsConstValueOp(e) {
return true
}
if tup, ok := e.(*TupleExpr); ok {
for _, elem := range tup.Elems {
if !CanExtractConstDatum(elem) {
return false
}
}
return true
}
if arr, ok := e.(*ArrayExpr); ok {
for _, elem := range arr.Elems {
if !CanExtractConstDatum(elem) {
return false
}
}
return true
}
return false
}
// ExtractConstDatum returns the Datum that represents the value of an
// expression with a constant value. An expression with a constant value is:
// - one that has a ConstValue tag, or
// - a tuple or array where all children are constant values.
func ExtractConstDatum(e opt.Expr) tree.Datum {
switch t := e.(type) {
case *NullExpr:
return tree.DNull
case *TrueExpr:
return tree.DBoolTrue
case *FalseExpr:
return tree.DBoolFalse
case *ConstExpr:
return t.Value
case *TupleExpr:
datums := make(tree.Datums, len(t.Elems))
for i := range datums {
datums[i] = ExtractConstDatum(t.Elems[i])
}
return tree.NewDTuple(t.Typ, datums...)
case *ArrayExpr:
elementType := t.Typ.ArrayContents()
a := tree.NewDArray(elementType)
a.Array = make(tree.Datums, len(t.Elems))
for i := range a.Array {
a.Array[i] = ExtractConstDatum(t.Elems[i])
if a.Array[i] == tree.DNull {
a.HasNulls = true
} else {
a.HasNonNulls = true
}
}
return a
}
panic(errors.AssertionFailedf("non-const expression: %+v", e))
}
// ExtractAggFunc digs down into the given aggregate expression and returns the
// aggregate function, skipping past any AggFilter or AggDistinct operators.
func ExtractAggFunc(e opt.ScalarExpr) opt.ScalarExpr {
if filter, ok := e.(*AggFilterExpr); ok {
e = filter.Input
}
if distinct, ok := e.(*AggDistinctExpr); ok {
e = distinct.Input
}
if !opt.IsAggregateOp(e) {
panic(errors.AssertionFailedf("not an Aggregate"))
}
return e
}
// ExtractAggInputColumns returns the set of columns the aggregate depends on.
func ExtractAggInputColumns(e opt.ScalarExpr) opt.ColSet {
var res opt.ColSet
if filter, ok := e.(*AggFilterExpr); ok {
res.Add(filter.Filter.(*VariableExpr).Col)
e = filter.Input
}
if distinct, ok := e.(*AggDistinctExpr); ok {
e = distinct.Input
}
if !opt.IsAggregateOp(e) {
panic(errors.AssertionFailedf("not an Aggregate"))
}
for i, n := 0, e.ChildCount(); i < n; i++ {
if variable, ok := e.Child(i).(*VariableExpr); ok {
res.Add(variable.Col)
}
}
return res
}
// ExtractAggFirstVar is given an aggregate expression and returns the Variable
// expression for the first argument, skipping past modifiers like AggDistinct.
func ExtractAggFirstVar(e opt.ScalarExpr) *VariableExpr {
e = ExtractAggFunc(e)
if e.ChildCount() == 0 {
panic(errors.AssertionFailedf("aggregate does not have any arguments"))
}
if variable, ok := e.Child(0).(*VariableExpr); ok {
return variable
}
panic(errors.AssertionFailedf("first aggregate input is not a Variable"))
}
// ExtractJoinEqualityColumns returns pairs of columns (one from the left side,
// one from the right side) which are constrained to be equal in a join (and
// have equivalent types).
func ExtractJoinEqualityColumns(
leftCols, rightCols opt.ColSet, on FiltersExpr,
) (leftEq opt.ColList, rightEq opt.ColList) {
for i := range on {
condition := on[i].Condition
ok, left, right := ExtractJoinEquality(leftCols, rightCols, condition)
if !ok {
continue
}
// Don't allow any column to show up twice.
// TODO(radu): need to figure out the right thing to do in cases
// like: left.a = right.a AND left.a = right.b
duplicate := false
for i := range leftEq {
if leftEq[i] == left || rightEq[i] == right {
duplicate = true
break
}
}
if !duplicate {
leftEq = append(leftEq, left)
rightEq = append(rightEq, right)
}
}
return leftEq, rightEq
}
// ExtractJoinEqualityFilters returns the filters containing pairs of columns
// (one from the left side, one from the right side) which are constrained to
// be equal in a join (and have equivalent types).
func ExtractJoinEqualityFilters(leftCols, rightCols opt.ColSet, on FiltersExpr) FiltersExpr {
// We want to avoid allocating a new slice unless strictly necessary.
var newFilters FiltersExpr
for i := range on {
condition := on[i].Condition
ok, _, _ := ExtractJoinEquality(leftCols, rightCols, condition)
if ok {
if newFilters != nil {
newFilters = append(newFilters, on[i])
}
} else {
if newFilters == nil {
newFilters = make(FiltersExpr, i, len(on)-1)
copy(newFilters, on[:i])
}
}
}
if newFilters != nil {
return newFilters
}
return on
}
// ExtractJoinEqualityFilter returns the filter containing the given pair of
// columns (one from the left side, one from the right side) which are
// constrained to be equal in a join (and have equivalent types).
func ExtractJoinEqualityFilter(
leftCol, rightCol opt.ColumnID, leftCols, rightCols opt.ColSet, on FiltersExpr,
) FiltersItem {
for i := range on {
condition := on[i].Condition
ok, left, right := ExtractJoinEquality(leftCols, rightCols, condition)
if !ok {
continue
}
if left == leftCol && right == rightCol {
return on[i]
}
}
panic(errors.AssertionFailedf("could not find equality between columns %d and %d in filters %s",
leftCol, rightCol, on.String(),
))
}
func isVarEquality(condition opt.ScalarExpr) (leftVar, rightVar *VariableExpr, ok bool) {
if eq, ok := condition.(*EqExpr); ok {
if leftVar, ok := eq.Left.(*VariableExpr); ok {
if rightVar, ok := eq.Right.(*VariableExpr); ok {
return leftVar, rightVar, true
}
}
}
return nil, nil, false
}
// ExtractJoinEquality returns true if the given condition is a simple equality
// condition with two variables (e.g. a=b), where one of the variables (returned
// as "left") is in the set of leftCols and the other (returned as "right") is
// in the set of rightCols.
func ExtractJoinEquality(
leftCols, rightCols opt.ColSet, condition opt.ScalarExpr,
) (ok bool, left, right opt.ColumnID) {
lvar, rvar, ok := isVarEquality(condition)
if !ok {
return false, 0, 0
}
// Don't allow mixed types (see #22519).
if !lvar.DataType().Equivalent(rvar.DataType()) {
return false, 0, 0
}
if leftCols.Contains(lvar.Col) && rightCols.Contains(rvar.Col) {
return true, lvar.Col, rvar.Col
}
if leftCols.Contains(rvar.Col) && rightCols.Contains(lvar.Col) {
return true, rvar.Col, lvar.Col
}
return false, 0, 0
}
// ExtractRemainingJoinFilters calculates the remaining ON condition after
// removing equalities that are handled separately. The given function
// determines if an equality is redundant. The result is empty if there are no
// remaining conditions. Panics if leftEq and rightEq are not the same length.
func ExtractRemainingJoinFilters(on FiltersExpr, leftEq, rightEq opt.ColList) FiltersExpr {
if len(leftEq) != len(rightEq) {
panic(errors.AssertionFailedf("leftEq and rightEq have different lengths"))
}
if len(leftEq) == 0 {
return on
}
var newFilters FiltersExpr
for i := range on {
leftVar, rightVar, ok := isVarEquality(on[i].Condition)
if ok {
a, b := leftVar.Col, rightVar.Col
found := false
for j := range leftEq {
if (a == leftEq[j] && b == rightEq[j]) ||
(a == rightEq[j] && b == leftEq[j]) {
found = true
break
}
}
if found {
// Skip this condition.
continue
}
}
if newFilters == nil {
newFilters = make(FiltersExpr, 0, len(on)-i)
}
newFilters = append(newFilters, on[i])
}
return newFilters
}
// ExtractConstColumns returns columns in the filters expression that have been
// constrained to fixed values.
func ExtractConstColumns(on FiltersExpr, evalCtx *tree.EvalContext) (fixedCols opt.ColSet) {
for i := range on {
scalar := on[i]
scalarProps := scalar.ScalarProps()
if scalarProps.Constraints != nil && !scalarProps.Constraints.IsUnconstrained() {
fixedCols.UnionWith(scalarProps.Constraints.ExtractConstCols(evalCtx))
}
}
return fixedCols
}
// ExtractValueForConstColumn returns the constant value of a column returned by
// ExtractConstColumns.
func ExtractValueForConstColumn(
on FiltersExpr, evalCtx *tree.EvalContext, col opt.ColumnID,
) tree.Datum {
for i := range on {
scalar := on[i]
scalarProps := scalar.ScalarProps()
if scalarProps.Constraints != nil {
if val := scalarProps.Constraints.ExtractValueForConstCol(evalCtx, col); val != nil {
return val
}
}
}
return nil
}
|
package local
type Config struct {
Path string `yaml:"path"`
}
|
package main
import (
"github.com/nsf/termbox-go"
"turbot/gdb"
"turbot/viz"
)
type CodeView struct {
win *viz.Window
mainWin *viz.Window
title *viz.TitleView
files *FilesView
source *SourceView
}
func MakeCodeView(win *viz.Window) *CodeView {
titleWin, mainWin := win.SplitV(-1)
me := &CodeView{
win: win,
mainWin: mainWin,
title: viz.MakeTitleView(titleWin),
files: MakeFilesView(mainWin),
source: MakeSourceView(mainWin),
}
win.SetKeyReceiver(me)
me.ShowSourceView()
return me
}
func (me *CodeView) Focus() {
me.win.Focus()
}
func (me *CodeView) ShowFrame(frame gdb.GdbFrame) {
me.source.ShowFrame(frame)
me.ShowSourceView()
}
// TODO: This doesn't work yet, because there's nothing in the "real" view
// (either source or files) to delegate keys to this one. Need to rethink
// how composite views like this work, so that it doesn't get too foul.
func (me *CodeView) Key(mod termbox.Modifier, key termbox.Key, ch rune) {
if key == termbox.KeyTab {
// Toggle between source and files views.
if me.mainWin.GetPainter() == me.files {
me.ShowSourceView()
} else {
me.ShowFilesView()
}
// TODO: This should happen automatically in viz when the view changes.
me.mainWin.Repaint()
return
}
if me.mainWin.GetPainter() == me.files {
me.files.Key(mod, key, ch)
} else {
me.source.Key(mod, key, ch)
}
}
func (me *CodeView) ShowSourceView() {
me.title.SetTitle("source")
me.mainWin.SetPainter(me.source)
}
func (me *CodeView) ShowFilesView() {
me.title.SetTitle("files")
me.mainWin.SetPainter(me.files)
}
|
package migration
import (
"testing"
)
func TestConvert(t *testing.T) {
const (
testData = "otpauth-migration://offline?data=CjEKCkhlbGxvId6tvu8SGEV4YW1wbGU6YWxpY2VAZ29vZ2xlLmNvbRoHRXhhbXBsZTAC"
want = "otpauth://totp/Example:alice@google.com?issuer=Example&period=30&secret=JBSWY3DPEHPK3PXP"
)
p, err := UnmarshalURL(testData)
if err != nil {
t.Fatal(err)
}
if len(p.OtpParameters) < 1 {
t.Fatalf("got lengh %v, want 1", len(p.OtpParameters))
}
if p.OtpParameters[0].URL().String() != want {
t.Errorf("got %v, want %v", p.OtpParameters[0].URL(), want)
}
}
func TestStdBase64(t *testing.T) {
testData := "otpauth-migration://offline?data=CjsKFBHMQnKu/odWlB/zUy+dfiRIaHj0EhhFeGFtcGxlOmFsaWNlQGdvb2dsZS5jb20aB0V4YW1wbGUwAg=="
_, err := UnmarshalURL(testData)
if err != nil {
t.Fatal(err)
}
}
|
package canvasapi
import (
"fmt"
"net/http"
"net/url"
"regexp"
"strconv"
"strings"
)
var resourceRegex = regexp.MustCompile(`<(.*?)>; rel="(.*?)"`)
type PagedResource struct {
Current, First, Last, Next *PagedLink
}
type PagedLink struct {
URL *url.URL
Page int
}
func ExtractPagedResource(header http.Header) (*PagedResource, error) {
errs := []string{}
pagedResource := &PagedResource{}
links := header.Get("Link")
parts := resourceRegex.FindAllStringSubmatch(links, -1)
m := map[string]*PagedLink{}
var err error
for _, part := range parts {
m[part[2]], err = newPagedLink(part[1])
if err != nil {
return pagedResource, err
}
}
var ok bool
if pagedResource.Current, ok = m["current"]; !ok {
errs = append(errs, "could not find current link")
}
if pagedResource.First, ok = m["first"]; !ok {
errs = append(errs, "could not find first link")
}
if pagedResource.Last, ok = m["last"]; !ok {
errs = append(errs, "could not find last link")
}
pagedResource.Next, _ = m["next"]
if len(errs) > 0 {
return nil, fmt.Errorf(strings.Join(errs, ", "))
}
return pagedResource, nil
}
func newPagedLink(urlstr string) (*PagedLink, error) {
u, err := url.Parse(urlstr)
if err != nil {
return nil, err
}
page, err := strconv.ParseInt(u.Query().Get("page"), 10, 32)
if err != nil {
return nil, fmt.Errorf("could not parse page num: %w", err)
}
return &PagedLink{
URL: u,
Page: int(page),
}, nil
}
|
package parlaytypes
import (
"encoding/json"
)
// TreasureMap - X Marks the spot
// The treasure maps define the automation that will take place on the hosts defined
type TreasureMap struct {
// An array/list of deployments that will take places as part of this "map"
Deployments []Deployment `json:"deployments"`
}
// Deployment defines the hosts and the action(s) that should be performed on them
type Deployment struct {
// Name of the deployment that is taking place i.e. (Install MySQL)
Name string `json:"name"`
// An array/list of hosts that these actions should be performed upon
Hosts []string `json:"hosts"`
// Parallel allow multiple actions across multiple hosts in parallel
Parallel bool `json:"parallel"`
ParallelSessions int `json:"parallelSessions"`
// The actions that should be performed
Actions []Action `json:"actions"`
}
// Action defines what the instructions that will be executed
type Action struct {
Name string `json:"name"`
ActionType string `json:"type"`
Timeout int `json:"timeout"`
// File based operations
Source string `json:"source,omitempty"`
Destination string `json:"destination,omitempty"`
FileMove bool `json:"fileMove,omitempty"`
// Package manager operations
PkgManager string `json:"packageManager,omitempty"`
PkgOperation string `json:"packageOperation,omitempty"`
Packages string `json:"packages,omitempty"`
// Command operations
Command string `json:"command,omitempty"`
Commands []string `json:"commands,omitempty"`
CommandLocal bool `json:"commandLocal,omitempty"`
CommandSaveFile string `json:"commandSaveFile,omitempty"`
CommandSaveAsKey string `json:"commandSaveAsKey,omitempty"`
CommandSudo string `json:"commandSudo,omitempty"`
// Piping commands, read in a file and send over stdin, or capture stdout from a local command
CommandPipeFile string `json:"commandPipeFile,omitempty"`
CommandPipeCmd string `json:"commandPipeCmd,omitempty"`
// Ignore any failures
IgnoreFailure bool `json:"ignoreFail,omitempty"`
// Key operations
KeyFile string `json:"keyFile,omitempty"`
KeyName string `json:"keyName,omitempty"`
//Plugin Spec
Plugin json.RawMessage `json:"plugin,omitempty"`
}
|
package api
import (
"database/sql"
"net/http"
log "github.com/Sirupsen/logrus"
restful "github.com/emicklei/go-restful"
"github.com/gatorloopwebapp/database"
)
// CalculatedAcceleration : struct to hold calculated acceleration values
type CalculatedAcceleration struct {
Val float64 `json:"acceleration"`
}
// GetRecent : gets the most recent calculated acceleration
func (c CalculatedAcceleration) GetRecent(request *restful.Request, response *restful.Response) {
row := database.DB.QueryRow("SELECT acc FROM gatorloop.calc_acc ORDER BY idCalcAcc DESC LIMIT 1")
var acc sql.NullFloat64
err := row.Scan(&acc)
if err != nil {
if err != sql.ErrNoRows {
log.Errorf("Row scan failed. %v", err)
response.WriteError(http.StatusInternalServerError, err)
return
}
}
var ret CalculatedAcceleration
if acc.Valid {
ret = CalculatedAcceleration{acc.Float64}
} else {
ret = CalculatedAcceleration{0}
}
response.WriteEntity(ret)
}
|
package odoo
import (
"fmt"
)
// AccountInvoiceConfirm represents account.invoice.confirm model.
type AccountInvoiceConfirm struct {
LastUpdate *Time `xmlrpc:"__last_update,omptempty"`
CreateDate *Time `xmlrpc:"create_date,omptempty"`
CreateUid *Many2One `xmlrpc:"create_uid,omptempty"`
DisplayName *String `xmlrpc:"display_name,omptempty"`
Id *Int `xmlrpc:"id,omptempty"`
WriteDate *Time `xmlrpc:"write_date,omptempty"`
WriteUid *Many2One `xmlrpc:"write_uid,omptempty"`
}
// AccountInvoiceConfirms represents array of account.invoice.confirm model.
type AccountInvoiceConfirms []AccountInvoiceConfirm
// AccountInvoiceConfirmModel is the odoo model name.
const AccountInvoiceConfirmModel = "account.invoice.confirm"
// Many2One convert AccountInvoiceConfirm to *Many2One.
func (aic *AccountInvoiceConfirm) Many2One() *Many2One {
return NewMany2One(aic.Id.Get(), "")
}
// CreateAccountInvoiceConfirm creates a new account.invoice.confirm model and returns its id.
func (c *Client) CreateAccountInvoiceConfirm(aic *AccountInvoiceConfirm) (int64, error) {
ids, err := c.CreateAccountInvoiceConfirms([]*AccountInvoiceConfirm{aic})
if err != nil {
return -1, err
}
if len(ids) == 0 {
return -1, nil
}
return ids[0], nil
}
// CreateAccountInvoiceConfirm creates a new account.invoice.confirm model and returns its id.
func (c *Client) CreateAccountInvoiceConfirms(aics []*AccountInvoiceConfirm) ([]int64, error) {
var vv []interface{}
for _, v := range aics {
vv = append(vv, v)
}
return c.Create(AccountInvoiceConfirmModel, vv)
}
// UpdateAccountInvoiceConfirm updates an existing account.invoice.confirm record.
func (c *Client) UpdateAccountInvoiceConfirm(aic *AccountInvoiceConfirm) error {
return c.UpdateAccountInvoiceConfirms([]int64{aic.Id.Get()}, aic)
}
// UpdateAccountInvoiceConfirms updates existing account.invoice.confirm records.
// All records (represented by ids) will be updated by aic values.
func (c *Client) UpdateAccountInvoiceConfirms(ids []int64, aic *AccountInvoiceConfirm) error {
return c.Update(AccountInvoiceConfirmModel, ids, aic)
}
// DeleteAccountInvoiceConfirm deletes an existing account.invoice.confirm record.
func (c *Client) DeleteAccountInvoiceConfirm(id int64) error {
return c.DeleteAccountInvoiceConfirms([]int64{id})
}
// DeleteAccountInvoiceConfirms deletes existing account.invoice.confirm records.
func (c *Client) DeleteAccountInvoiceConfirms(ids []int64) error {
return c.Delete(AccountInvoiceConfirmModel, ids)
}
// GetAccountInvoiceConfirm gets account.invoice.confirm existing record.
func (c *Client) GetAccountInvoiceConfirm(id int64) (*AccountInvoiceConfirm, error) {
aics, err := c.GetAccountInvoiceConfirms([]int64{id})
if err != nil {
return nil, err
}
if aics != nil && len(*aics) > 0 {
return &((*aics)[0]), nil
}
return nil, fmt.Errorf("id %v of account.invoice.confirm not found", id)
}
// GetAccountInvoiceConfirms gets account.invoice.confirm existing records.
func (c *Client) GetAccountInvoiceConfirms(ids []int64) (*AccountInvoiceConfirms, error) {
aics := &AccountInvoiceConfirms{}
if err := c.Read(AccountInvoiceConfirmModel, ids, nil, aics); err != nil {
return nil, err
}
return aics, nil
}
// FindAccountInvoiceConfirm finds account.invoice.confirm record by querying it with criteria.
func (c *Client) FindAccountInvoiceConfirm(criteria *Criteria) (*AccountInvoiceConfirm, error) {
aics := &AccountInvoiceConfirms{}
if err := c.SearchRead(AccountInvoiceConfirmModel, criteria, NewOptions().Limit(1), aics); err != nil {
return nil, err
}
if aics != nil && len(*aics) > 0 {
return &((*aics)[0]), nil
}
return nil, fmt.Errorf("account.invoice.confirm was not found with criteria %v", criteria)
}
// FindAccountInvoiceConfirms finds account.invoice.confirm records by querying it
// and filtering it with criteria and options.
func (c *Client) FindAccountInvoiceConfirms(criteria *Criteria, options *Options) (*AccountInvoiceConfirms, error) {
aics := &AccountInvoiceConfirms{}
if err := c.SearchRead(AccountInvoiceConfirmModel, criteria, options, aics); err != nil {
return nil, err
}
return aics, nil
}
// FindAccountInvoiceConfirmIds finds records ids by querying it
// and filtering it with criteria and options.
func (c *Client) FindAccountInvoiceConfirmIds(criteria *Criteria, options *Options) ([]int64, error) {
ids, err := c.Search(AccountInvoiceConfirmModel, criteria, options)
if err != nil {
return []int64{}, err
}
return ids, nil
}
// FindAccountInvoiceConfirmId finds record id by querying it with criteria.
func (c *Client) FindAccountInvoiceConfirmId(criteria *Criteria, options *Options) (int64, error) {
ids, err := c.Search(AccountInvoiceConfirmModel, criteria, options)
if err != nil {
return -1, err
}
if len(ids) > 0 {
return ids[0], nil
}
return -1, fmt.Errorf("account.invoice.confirm was not found with criteria %v and options %v", criteria, options)
}
|
// Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package ordering
import (
"fmt"
"testing"
"github.com/cockroachdb/cockroach/pkg/sql/opt"
"github.com/cockroachdb/cockroach/pkg/sql/opt/cat"
"github.com/cockroachdb/cockroach/pkg/sql/opt/memo"
"github.com/cockroachdb/cockroach/pkg/sql/opt/norm"
"github.com/cockroachdb/cockroach/pkg/sql/opt/props"
"github.com/cockroachdb/cockroach/pkg/sql/opt/props/physical"
"github.com/cockroachdb/cockroach/pkg/sql/opt/testutils/testcat"
"github.com/cockroachdb/cockroach/pkg/sql/opt/testutils/testexpr"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
)
func TestLookupJoinProvided(t *testing.T) {
tc := testcat.New()
if _, err := tc.ExecuteDDL(
"CREATE TABLE t (c1 INT, c2 INT, c3 INT, c4 INT, PRIMARY KEY(c1, c2))",
); err != nil {
t.Fatal(err)
}
evalCtx := tree.NewTestingEvalContext(nil /* st */)
var f norm.Factory
f.Init(evalCtx, tc)
md := f.Metadata()
tn := tree.NewUnqualifiedTableName("t")
tab := md.AddTable(tc.Table(tn), tn)
if c1 := tab.ColumnID(0); c1 != 1 {
t.Fatalf("unexpected ID for column c1: %d\n", c1)
}
c := func(cols ...opt.ColumnID) opt.ColSet {
return opt.MakeColSet(cols...)
}
testCases := []struct {
keyCols opt.ColList
outCols opt.ColSet
required string
input string
provided string
}{
// In these tests, the input (left side of the join) has columns 5,6 and the
// table (right side) has columns 1,2,3,4 and the join has condition
// (c5, c6) = (c1, c2).
//
{ // case 1: the lookup join adds columns 3,4 from the table and retains the
// input columns.
keyCols: opt.ColList{5, 6},
outCols: c(3, 4, 5, 6),
required: "+5,+6",
input: "+5,+6",
provided: "+5,+6",
},
{ // case 2: the lookup join produces all columns. The provided ordering
// on 5,6 is equivalent to an ordering on 1,2.
keyCols: opt.ColList{5, 6},
outCols: c(1, 2, 3, 4, 5, 6),
required: "-1,+2",
input: "-5,+6",
provided: "-5,+6",
},
{ // case 3: the lookup join does not produce input columns 5,6; we must
// remap the input ordering to refer to output columns 1,2 instead.
keyCols: opt.ColList{5, 6},
outCols: c(1, 2, 3, 4),
required: "+1,-2",
input: "+5,-6",
provided: "+1,-2",
},
{ // case 4: a hybrid of the two cases above (we need to remap column 6).
keyCols: opt.ColList{5, 6},
outCols: c(1, 2, 3, 4, 5),
required: "-1,-2",
input: "-5,-6",
provided: "-5,-2",
},
}
for tcIdx, tc := range testCases {
t.Run(fmt.Sprintf("case%d", tcIdx+1), func(t *testing.T) {
input := &testexpr.Instance{
Rel: &props.Relational{},
Provided: &physical.Provided{
Ordering: physical.ParseOrdering(tc.input),
},
}
lookupJoin := f.Memo().MemoizeLookupJoin(
input,
nil, /* FiltersExpr */
&memo.LookupJoinPrivate{
JoinType: opt.InnerJoinOp,
Table: tab,
Index: cat.PrimaryIndex,
KeyCols: tc.keyCols,
Cols: tc.outCols,
},
)
req := physical.ParseOrderingChoice(tc.required)
res := lookupJoinBuildProvided(lookupJoin, &req).String()
if res != tc.provided {
t.Errorf("expected '%s', got '%s'", tc.provided, res)
}
})
}
}
|
package tests
import (
"fmt"
"testing"
"github.com/ravendb/ravendb-go-client"
"github.com/stretchr/testify/assert"
)
type Family struct {
Names []string
}
type FamilyMembers struct {
Members []*Member
}
type Member struct {
Name string
Age int
}
type Arr1 struct {
Str []string
}
type Arr2 struct {
Arr1 []*Arr1
}
type Poc struct {
Name string
Obj *User
}
func crudTestEntitiesAreSavedUsingLowerCase(t *testing.T, driver *RavenTestDriver) {
var err error
store := driver.getDocumentStoreMust(t)
defer store.Close()
{
newSession := openSessionMust(t, store)
user1 := &User{}
user1.setLastName("user1")
err = newSession.StoreWithID(user1, "users/1")
assert.NoError(t, err)
err = newSession.SaveChanges()
assert.NoError(t, err)
newSession.Close()
}
documentsCommand, err := ravendb.NewGetDocumentsCommand([]string{"users/1"}, nil, false)
assert.NoError(t, err)
err = store.GetRequestExecutor("").ExecuteCommand(documentsCommand, nil)
assert.NoError(t, err)
result := documentsCommand.Result
userJSON := result.Results[0]
_, exists := userJSON["lastName"]
assert.True(t, exists)
{
newSession := openSessionMust(t, store)
var users []*User
q := newSession.Advanced().RawQuery("from Users where lastName = 'user1'")
err = q.GetResults(&users)
assert.NoError(t, err)
assert.Equal(t, len(users), 1)
newSession.Close()
}
}
func crudTestCanCustomizePropertyNamingStrategy(t *testing.T, driver *RavenTestDriver) {
// Note: not possible to tweak behavior of JSON serialization
// (entity mapper) in Go
}
func crudTestCrudOperations(t *testing.T, driver *RavenTestDriver) {
var err error
store := driver.getDocumentStoreMust(t)
defer store.Close()
{
newSession := openSessionMust(t, store)
user1 := &User{}
user1.setLastName("user1")
err = newSession.StoreWithID(user1, "users/1")
assert.NoError(t, err)
user2 := &User{}
user2.setName("user2")
user1.Age = 1
err = newSession.StoreWithID(user2, "users/2")
assert.NoError(t, err)
user3 := &User{}
user3.setName("user3")
user3.Age = 1
err = newSession.StoreWithID(user3, "users/3")
assert.NoError(t, err)
user4 := &User{}
user4.setName("user4")
err = newSession.StoreWithID(user4, "users/4")
assert.NoError(t, err)
err = newSession.Delete(user2)
assert.NoError(t, err)
user3.Age = 3
err = newSession.SaveChanges()
assert.NoError(t, err)
var tempUser *User
err = newSession.Load(&tempUser, "users/2")
assert.NoError(t, err)
assert.Nil(t, tempUser)
tempUser = nil
err = newSession.Load(&tempUser, "users/3")
assert.NoError(t, err)
assert.Equal(t, tempUser.Age, 3)
user1 = nil
err = newSession.Load(&user1, "users/1")
assert.NoError(t, err)
user4 = nil
err = newSession.Load(&user4, "users/4")
assert.NoError(t, err)
err = newSession.Delete(user4)
assert.NoError(t, err)
user1.Age = 10
err = newSession.SaveChanges()
assert.NoError(t, err)
tempUser = nil
err = newSession.Load(&tempUser, "users/4")
assert.NoError(t, err)
assert.Nil(t, tempUser)
tempUser = nil
err = newSession.Load(&tempUser, "users/1")
assert.NoError(t, err)
assert.Equal(t, tempUser.Age, 10)
newSession.Close()
}
}
func crudTestCrudOperationsWithWhatChanged(t *testing.T, driver *RavenTestDriver) {
var err error
store := driver.getDocumentStoreMust(t)
defer store.Close()
{
newSession := openSessionMust(t, store)
{
user1 := &User{}
user1.setLastName("user1")
err = newSession.StoreWithID(user1, "users/1")
assert.NoError(t, err)
user2 := &User{}
user2.setName("user2")
user1.Age = 1 // TODO: that's probably a bug in Java code
err = newSession.StoreWithID(user2, "users/2")
assert.NoError(t, err)
user3 := &User{}
user3.setName("user3")
user3.Age = 1
err = newSession.StoreWithID(user3, "users/3")
assert.NoError(t, err)
user4 := &User{}
user4.setName("user4")
err = newSession.StoreWithID(user4, "users/4")
assert.NoError(t, err)
err = newSession.Delete(user2)
assert.NoError(t, err)
user3.Age = 3
changes, _ := newSession.Advanced().WhatChanged()
assert.Equal(t, len(changes), 4)
err = newSession.SaveChanges()
assert.NoError(t, err)
}
{
var user1, user2, user3, user4 *User
err = newSession.Load(&user2, "users/2")
assert.NoError(t, err)
assert.Nil(t, user2)
err = newSession.Load(&user3, "users/3")
assert.NoError(t, err)
assert.Equal(t, user3.Age, 3)
err = newSession.Load(&user1, "users/1")
assert.NoError(t, err)
assert.NotNil(t, user1)
err = newSession.Load(&user4, "users/4")
assert.NoError(t, err)
assert.NotNil(t, user4)
err = newSession.Delete(user4)
assert.NoError(t, err)
user1.Age = 10
var changes map[string][]*ravendb.DocumentsChanges
changes, err = newSession.Advanced().WhatChanged()
assert.NoError(t, err)
assert.Equal(t, len(changes), 2)
err = newSession.SaveChanges()
assert.NoError(t, err)
}
var tempUser *User
err = newSession.Load(&tempUser, "users/4")
assert.NoError(t, err)
assert.Nil(t, tempUser)
tempUser = nil
err = newSession.Load(&tempUser, "users/1")
assert.NoError(t, err)
assert.Equal(t, tempUser.Age, 10)
newSession.Close()
}
}
func crudTestCrudOperationsWithArrayInObject(t *testing.T, driver *RavenTestDriver) {
var err error
store := driver.getDocumentStoreMust(t)
defer store.Close()
{
newSession := openSessionMust(t, store)
family := &Family{}
family.Names = []string{"Hibernating Rhinos", "RavenDB"}
err = newSession.StoreWithID(family, "family/1")
assert.NoError(t, err)
err = newSession.SaveChanges()
assert.NoError(t, err)
var newFamily *Family
err = newSession.Load(&newFamily, "family/1")
assert.NoError(t, err)
newFamily.Names = []string{"Toli", "Mitzi", "Boki"}
changes, _ := newSession.Advanced().WhatChanged()
assert.Equal(t, len(changes), 1)
err = newSession.SaveChanges()
assert.NoError(t, err)
newSession.Close()
}
}
func crudTestCrudOperationsWithArrayInObject2(t *testing.T, driver *RavenTestDriver) {
var err error
store := driver.getDocumentStoreMust(t)
defer store.Close()
{
newSession := openSessionMust(t, store)
family := &Family{}
family.Names = []string{"Hibernating Rhinos", "RavenDB"}
err = newSession.StoreWithID(family, "family/1")
assert.NoError(t, err)
err = newSession.SaveChanges()
assert.NoError(t, err)
var newFamily *Family
err = newSession.Load(&newFamily, "family/1")
assert.NoError(t, err)
newFamily.Names = []string{"Hibernating Rhinos", "RavenDB"}
changes, _ := newSession.Advanced().WhatChanged()
assert.Equal(t, len(changes), 0)
newFamily.Names = []string{"RavenDB", "Hibernating Rhinos"}
changes, _ = newSession.Advanced().WhatChanged()
assert.Equal(t, len(changes), 1)
err = newSession.SaveChanges()
assert.NoError(t, err)
newSession.Close()
}
}
func crudTestCrudOperationsWithArrayInObject3(t *testing.T, driver *RavenTestDriver) {
var err error
store := driver.getDocumentStoreMust(t)
defer store.Close()
{
newSession := openSessionMust(t, store)
family := &Family{}
family.Names = []string{"Hibernating Rhinos", "RavenDB"}
err = newSession.StoreWithID(family, "family/1")
assert.NoError(t, err)
err = newSession.SaveChanges()
assert.NoError(t, err)
var newFamily *Family
err = newSession.Load(&newFamily, "family/1")
assert.NoError(t, err)
newFamily.Names = []string{"RavenDB"}
changes, _ := newSession.Advanced().WhatChanged()
assert.Equal(t, len(changes), 1)
err = newSession.SaveChanges()
assert.NoError(t, err)
newSession.Close()
}
}
func crudTestCrudOperationsWithArrayInObject4(t *testing.T, driver *RavenTestDriver) {
var err error
store := driver.getDocumentStoreMust(t)
defer store.Close()
{
newSession := openSessionMust(t, store)
family := &Family{}
family.Names = []string{"Hibernating Rhinos", "RavenDB"}
err = newSession.StoreWithID(family, "family/1")
assert.NoError(t, err)
err = newSession.SaveChanges()
assert.NoError(t, err)
var newFamily *Family
err = newSession.Load(&newFamily, "family/1")
assert.NoError(t, err)
newFamily.Names = []string{"RavenDB", "Hibernating Rhinos", "Toli", "Mitzi", "Boki"}
changes, _ := newSession.Advanced().WhatChanged()
assert.Equal(t, len(changes), 1)
err = newSession.SaveChanges()
assert.NoError(t, err)
newSession.Close()
}
}
func crudTestCrudOperationsWithNull(t *testing.T, driver *RavenTestDriver) {
var err error
store := driver.getDocumentStoreMust(t)
defer store.Close()
{
newSession := openSessionMust(t, store)
user := &User{}
err = newSession.StoreWithID(user, "users/1")
assert.NoError(t, err)
err = newSession.SaveChanges()
assert.NoError(t, err)
var user2 *User
err = newSession.Load(&user2, "users/1")
assert.NoError(t, err)
WhatChanged, _ := newSession.Advanced().WhatChanged()
assert.Equal(t, len(WhatChanged), 0)
user2.Age = 3
WhatChanged, _ = newSession.Advanced().WhatChanged()
assert.Equal(t, len(WhatChanged), 1)
newSession.Close()
}
}
func crudTestCrudOperationsWithArrayOfObjects(t *testing.T, driver *RavenTestDriver) {
var err error
store := driver.getDocumentStoreMust(t)
defer store.Close()
{
newSession := openSessionMust(t, store)
member1 := &Member{}
member1.Name = "Hibernating Rhinos"
member1.Age = 8
member2 := &Member{}
member2.Name = "RavenDB"
member2.Age = 4
family := &FamilyMembers{}
family.Members = []*Member{member1, member2}
err = newSession.StoreWithID(family, "family/1")
assert.NoError(t, err)
err = newSession.SaveChanges()
assert.NoError(t, err)
member1 = &Member{}
member1.Name = "RavenDB"
member1.Age = 4
member2 = &Member{}
member2.Name = "Hibernating Rhinos"
member2.Age = 8
var newFamily *FamilyMembers
err = newSession.Load(&newFamily, "family/1")
assert.NoError(t, err)
newFamily.Members = []*Member{member1, member2}
changes, _ := newSession.Advanced().WhatChanged()
assert.Equal(t, len(changes), 1)
family1Changes := changes["family/1"]
assert.Equal(t, len(family1Changes), 4)
// Note: order or fields differs from Java. In Java the order seems to be the order
// of declaration in a class. In Go it's alphabetical
{
change := family1Changes[0]
assert.Equal(t, change.FieldName, "Age")
assert.Equal(t, change.Change, ravendb.DocumentChangeFieldChanged)
oldVal := change.FieldOldValue
assert.Equal(t, oldVal, 8.0)
newVal := change.FieldNewValue
assert.Equal(t, newVal, 4.0)
}
{
change := family1Changes[1]
assert.Equal(t, change.FieldName, "Name")
assert.Equal(t, change.Change, ravendb.DocumentChangeFieldChanged)
oldValStr := fmt.Sprintf("%#v", change.FieldOldValue)
assert.Equal(t, oldValStr, "\"Hibernating Rhinos\"")
newValStr := fmt.Sprintf("%#v", change.FieldNewValue)
assert.Equal(t, newValStr, "\"RavenDB\"")
}
{
change := family1Changes[2]
assert.Equal(t, change.FieldName, "Age")
assert.Equal(t, change.Change, ravendb.DocumentChangeFieldChanged)
oldVal := change.FieldOldValue
assert.Equal(t, oldVal, 4.0)
newVal := change.FieldNewValue
assert.Equal(t, newVal, 8.0)
}
{
change := family1Changes[3]
assert.Equal(t, change.FieldName, "Name")
assert.Equal(t, change.Change, ravendb.DocumentChangeFieldChanged)
oldValStr := fmt.Sprintf("%#v", change.FieldOldValue)
assert.Equal(t, oldValStr, "\"RavenDB\"")
newValStr := fmt.Sprintf("%#v", change.FieldNewValue)
assert.Equal(t, newValStr, "\"Hibernating Rhinos\"")
}
member1 = &Member{}
member1.Name = "Toli"
member1.Age = 5
member2 = &Member{}
member2.Name = "Boki"
member2.Age = 15
newFamily.Members = []*Member{member1, member2}
changes, _ = newSession.Advanced().WhatChanged()
assert.Equal(t, len(changes), 1)
family1Changes = changes["family/1"]
assert.Equal(t, len(family1Changes), 4)
// Note: the order of fields in Go is different than in Java. In Go it's alphabetic.
{
change := family1Changes[0]
assert.Equal(t, change.FieldName, "Age")
assert.Equal(t, change.Change, ravendb.DocumentChangeFieldChanged)
oldVal := change.FieldOldValue
assert.Equal(t, oldVal, 8.0)
newVal := change.FieldNewValue
assert.Equal(t, newVal, 5.0)
}
{
change := family1Changes[1]
assert.Equal(t, change.FieldName, "Name")
assert.Equal(t, change.Change, ravendb.DocumentChangeFieldChanged)
oldValStr := fmt.Sprintf("%#v", change.FieldOldValue)
assert.Equal(t, oldValStr, "\"Hibernating Rhinos\"")
newValStr := fmt.Sprintf("%#v", change.FieldNewValue)
assert.Equal(t, newValStr, "\"Toli\"")
}
{
change := family1Changes[2]
assert.Equal(t, change.FieldName, "Age")
assert.Equal(t, change.Change, ravendb.DocumentChangeFieldChanged)
oldVal := change.FieldOldValue
assert.Equal(t, oldVal, 4.0)
newVal := change.FieldNewValue
assert.Equal(t, newVal, 15.0)
}
{
change := family1Changes[3]
assert.Equal(t, change.FieldName, "Name")
assert.Equal(t, change.Change, ravendb.DocumentChangeFieldChanged)
oldValStr := fmt.Sprintf("%#v", change.FieldOldValue)
assert.Equal(t, oldValStr, "\"RavenDB\"")
newValStr := fmt.Sprintf("%#v", change.FieldNewValue)
assert.Equal(t, newValStr, "\"Boki\"")
}
newSession.Close()
}
}
func crudTestCrudOperationsWithArrayOfArrays(t *testing.T, driver *RavenTestDriver) {
var err error
store := driver.getDocumentStoreMust(t)
defer store.Close()
{
newSession := openSessionMust(t, store)
a1 := &Arr1{}
a1.Str = []string{"a", "b"}
a2 := &Arr1{}
a2.Str = []string{"c", "d"}
arr := &Arr2{}
arr.Arr1 = []*Arr1{a1, a2}
err = newSession.StoreWithID(arr, "arr/1")
assert.NoError(t, err)
err = newSession.SaveChanges()
assert.NoError(t, err)
var newArr *Arr2
err = newSession.Load(&newArr, "arr/1")
assert.NoError(t, err)
a1 = &Arr1{}
a1.Str = []string{"d", "c"}
a2 = &Arr1{}
a2.Str = []string{"a", "b"}
newArr.Arr1 = []*Arr1{a1, a2}
WhatChanged, _ := newSession.Advanced().WhatChanged()
assert.Equal(t, 1, len(WhatChanged))
change := WhatChanged["arr/1"]
assert.Equal(t, len(change), 4)
{
oldValueStr := fmt.Sprintf("%#v", change[0].FieldOldValue)
assert.Equal(t, oldValueStr, "\"a\"")
newValueStr := fmt.Sprintf("%#v", change[0].FieldNewValue)
assert.Equal(t, newValueStr, "\"d\"")
}
{
oldValueStr := fmt.Sprintf("%#v", change[1].FieldOldValue)
assert.Equal(t, oldValueStr, "\"b\"")
newValueStr := fmt.Sprintf("%#v", change[1].FieldNewValue)
assert.Equal(t, newValueStr, "\"c\"")
}
{
oldValueStr := fmt.Sprintf("%#v", change[2].FieldOldValue)
assert.Equal(t, oldValueStr, "\"c\"")
newValueStr := fmt.Sprintf("%#v", change[2].FieldNewValue)
assert.Equal(t, newValueStr, "\"a\"")
}
{
oldValueStr := fmt.Sprintf("%#v", change[3].FieldOldValue)
assert.Equal(t, oldValueStr, "\"d\"")
newValueStr := fmt.Sprintf("%#v", change[3].FieldNewValue)
assert.Equal(t, newValueStr, "\"b\"")
}
err = newSession.SaveChanges()
assert.NoError(t, err)
newSession.Close()
}
{
newSession := openSessionMust(t, store)
var newArr *Arr2
err = newSession.Load(&newArr, "arr/1")
assert.NoError(t, err)
a1 := &Arr1{}
a1.Str = []string{"q", "w"}
a2 := &Arr1{}
a2.Str = []string{"a", "b"}
newArr.Arr1 = []*Arr1{a1, a2}
WhatChanged, _ := newSession.Advanced().WhatChanged()
assert.Equal(t, len(WhatChanged), 1)
change := WhatChanged["arr/1"]
assert.Equal(t, len(change), 2)
{
oldValueStr := fmt.Sprintf("%#v", change[0].FieldOldValue)
assert.Equal(t, oldValueStr, "\"d\"")
newValueStr := fmt.Sprintf("%#v", change[0].FieldNewValue)
assert.Equal(t, newValueStr, "\"q\"")
}
{
oldValueStr := fmt.Sprintf("%#v", change[1].FieldOldValue)
assert.Equal(t, oldValueStr, "\"c\"")
newValueStr := fmt.Sprintf("%#v", change[1].FieldNewValue)
assert.Equal(t, newValueStr, "\"w\"")
}
err = newSession.SaveChanges()
assert.NoError(t, err)
newSession.Close()
}
}
func crudTestCrudCanUpdatePropertyToNull(t *testing.T, driver *RavenTestDriver) {
var err error
store := driver.getDocumentStoreMust(t)
defer store.Close()
{
{
newSession := openSessionMust(t, store)
user1 := &User{}
user1.setLastName("user1")
err = newSession.StoreWithID(user1, "users/1")
assert.NoError(t, err)
err = newSession.SaveChanges()
assert.NoError(t, err)
newSession.Close()
}
{
newSession := openSessionMust(t, store)
var user *User
err = newSession.Load(&user, "users/1")
assert.NoError(t, err)
user.Name = nil
err = newSession.SaveChanges()
assert.NoError(t, err)
newSession.Close()
}
{
newSession := openSessionMust(t, store)
var user *User
err = newSession.Load(&user, "users/1")
assert.NoError(t, err)
assert.Nil(t, user.Name)
newSession.Close()
}
}
}
func crudTestCrudCanUpdatePropertyFromNullToObject(t *testing.T, driver *RavenTestDriver) {
var err error
store := driver.getDocumentStoreMust(t)
defer store.Close()
{
session := openSessionMust(t, store)
poc := &Poc{}
poc.Name = "aviv"
err = session.StoreWithID(poc, "pocs/1")
assert.NoError(t, err)
err = session.SaveChanges()
assert.NoError(t, err)
session.Close()
}
{
session := openSessionMust(t, store)
var poc *Poc
err = session.Load(&poc, "pocs/1")
assert.NoError(t, err)
assert.Nil(t, poc.Obj)
user := &User{}
poc.Obj = user
err = session.SaveChanges()
assert.NoError(t, err)
session.Close()
}
{
session := openSessionMust(t, store)
var poc *Poc
err = session.Load(&poc, "pocs/1")
assert.NoError(t, err)
assert.NotNil(t, poc.Obj)
session.Close()
}
}
func TestCrud(t *testing.T) {
driver := createTestDriver(t)
destroy := func() { destroyDriver(t, driver) }
defer recoverTest(t, destroy)
// matches order of Java tests
crudTestCrudOperationsWithNull(t, driver)
crudTestCrudOperationsWithArrayOfObjects(t, driver)
crudTestCrudOperationsWithWhatChanged(t, driver)
crudTestCrudOperations(t, driver)
crudTestCrudOperationsWithArrayInObject(t, driver)
crudTestCrudCanUpdatePropertyToNull(t, driver)
crudTestEntitiesAreSavedUsingLowerCase(t, driver)
crudTestCanCustomizePropertyNamingStrategy(t, driver)
crudTestCrudCanUpdatePropertyFromNullToObject(t, driver)
crudTestCrudOperationsWithArrayInObject2(t, driver)
crudTestCrudOperationsWithArrayInObject3(t, driver)
crudTestCrudOperationsWithArrayInObject4(t, driver)
crudTestCrudOperationsWithArrayOfArrays(t, driver)
}
|
package main
import (
"fmt"
)
type IntMap map[int]int
func makerange(start, end int) []int {
r := []int{}
for i := start; i <= end; i++ {
r = append(r, i)
}
return r
}
func get_factors(n int) IntMap {
rslt := make(IntMap)
for i := range makerange(2, 20) {
rslt[i] = 0
}
for i := 2; n > 1 && i <= 20; i++ {
for ; n % i == 0; {
n /= i
v, ok := rslt[i]
if !ok {
v = 0
}
rslt[i] = v + 1
}
}
return rslt
}
func main() {
factors := []IntMap{}
weights := make(IntMap)
for _, i := range makerange(2, 20) {
factors = append(factors, get_factors(i))
weights[i] = 0
}
for _, d := range factors {
for _, i := range makerange(2, 20) {
if weights[i] < d[i] {
weights[i] = d[i]
}
}
}
rslt := 1
for i, times := range weights {
if i > 0 {
for ; times > 0; times-- {
rslt *= i
}
}
}
fmt.Println(rslt)
}
|
package main
import (
"bufio"
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
"strings"
)
const cdbPath string = `C:\Program Files (x86)\Windows Kits\10\Debuggers\x64\cdb.exe`
const cdbCommand string = "!analyze -v;q"
const version = "v0.0.3c"
const ok = 0
const dmpNotFound = 1
const cdbNotFound = 2
func main() {
var logFolder string
var cdb string
var dump string
var cdbCmd string
var regPattern string
flag.StringVar(&logFolder, "d", "", "folder contains DMP files")
flag.StringVar(&cdb, "p", cdbPath, "cdb file path")
flag.StringVar(&dump, "f", "", "analyze specific dump file, ignore -d if flag set")
flag.StringVar(&cdbCmd, "c", cdbCommand, "command issued to cdb debugger")
flag.StringVar(®Pattern, "regex", "", "regular express to exact from cdb output")
ver := flag.Bool("version", false, "print version")
rawOutput := flag.Bool("raw", false, "raw cdb output")
flag.Parse()
if *ver {
printVersion()
os.Exit(ok)
}
if (logFolder == "" && dump == "") || cdb == "" || cdbCmd == "" {
fmt.Fprintf(os.Stderr, "%s %s \n", filepath.Base(os.Args[0]), version)
flag.PrintDefaults()
os.Exit(dmpNotFound)
}
if !FileExist(cdb) {
fmt.Fprintf(os.Stderr, "cdb not found.\n\tat location: %s", cdb)
os.Exit(cdbNotFound)
}
//specific dump analyze
if FileExist(dump) {
prettyPrintMatched(dump, analyze(cdb, dump, cdbCmd, regPattern, *rawOutput))
os.Exit(ok)
}
//get bugcheck str for all dump files in folder
if !FileExist(logFolder) {
fmt.Fprintf(os.Stderr, "log folder not found.\n\tat location: %s", logFolder)
os.Exit(dmpNotFound)
}
processDumpFolder(logFolder, cdb, cdbCmd, regPattern, *rawOutput)
}
func printVersion() {
fmt.Println(version)
}
func executeCdb(cdb string, dmpFile string, cdbCmd string) ([]byte, error) {
output, err := exec.Command(cdb, "-z", dmpFile, "-c", cdbCmd).Output()
return output, err
}
func analyze(cdb string, dmpFile string, cdbCmd string, regPattern string, rawOutput bool) string {
output, err := executeCdb(cdb, dmpFile, cdbCmd)
if err != nil {
fmt.Println(err.Error())
}
outputStr := string(output)
if rawOutput {
return outputStr
}
scanner := bufio.NewScanner(strings.NewReader(outputStr))
for scanner.Scan() {
line := scanner.Text()
if regPattern == "" {
if strings.Contains(line, "BUGCHECK_STR") {
return line
}
} else {
// regex exact
matched, err := regexp.MatchString(regPattern, line)
if err != nil {
fmt.Fprintf(os.Stderr, "regex syntax error: %s", err.Error())
return "ERROR"
}
if matched {
return line
}
}
}
return "NOT FOUND"
}
func prettyPrintMatched(dumpFile string, matchedStr string) {
dumpFullPath, err := filepath.Abs(dumpFile)
if err != nil {
dumpFullPath = dumpFile
}
fmt.Printf("%s\n\t%s\n\n", dumpFullPath, matchedStr)
}
func processDumpFolder(logFolder string, cdb string, cdbCmd string, regPattern string, rawOutput bool) {
files, err := ioutil.ReadDir(logFolder)
if err != nil {
log.Fatal(err)
}
for _, file := range files {
if strings.ToLower(filepath.Ext(file.Name())) == ".dmp" {
dumpFile := path.Join(logFolder, file.Name())
prettyPrintMatched(dumpFile, analyze(cdb, dumpFile, cdbCmd, regPattern, rawOutput))
}
}
}
|
// server_01
package main
import (
"fmt"
"net/http"
"strconv"
"strings"
)
//another service
func GetServiceThree(w http.ResponseWriter, r *http.Request) {
//en := json.NewEncoder(fp)
url := r.URL.Path
urlArray := strings.Split(url, "/")
parameterArray := strings.Split(urlArray[2], " ")
ans := 0
for i := 0; i < len(parameterArray); i++ {
num, _ := strconv.Atoi(parameterArray[i])
fmt.Println(num)
ans = ans + num
//w.Write([]byte(parameterArray[i]))
//w.Write([]byte(strconv.Itoa(num)))
}
w.Write([]byte(strconv.Itoa(ans)))
}
|
package imageregistrybinding
import (
"log"
"alauda.io/devops-apiserver/pkg/apis/devops/v1alpha1"
devopsclient "alauda.io/devops-apiserver/pkg/client/clientset/versioned"
"alauda.io/diablo/src/backend/api"
"alauda.io/diablo/src/backend/errors"
"alauda.io/diablo/src/backend/resource/common"
"alauda.io/diablo/src/backend/resource/dataselect"
)
// ImageRegistryBindingList contains a list of ImageRegistryBinding in the cluster.
type ImageRegistryBindingList struct {
ListMeta api.ListMeta `json:"listMeta"`
// Unordered list of ImageRegistryBinding.
Items []ImageRegistryBinding `json:"imageregistrybindings"`
// List of non-critical errors, that occurred during resource retrieval.
Errors []error `json:"errors"`
}
// ImageRegistryBinding is a presentation layer view of Kubernetes namespaces. This means it is namespace plus
// additional augmented data we can get from other sources.
type ImageRegistryBinding struct {
ObjectMeta api.ObjectMeta `json:"objectMeta"`
TypeMeta api.TypeMeta `json:"typeMeta"`
Spec v1alpha1.ImageRegistryBindingSpec `json:"spec"`
Status v1alpha1.ServiceStatus `json:"status"`
}
type ImageRegistryBindingRepositoriesDetails struct {
*v1alpha1.ImageRegistryBindingRepositories
}
// GetImageOriginRepositoryList returns a list of remote imageRepository
func GetImageOriginRepositoryList(client devopsclient.Interface, namespace, name string, dsQuery *dataselect.DataSelectQuery) (*ImageRegistryBindingRepositoriesDetails, error) {
log.Println("Getting remote imageRepository")
result := client.DevopsV1alpha1().RESTClient().Get().Namespace(namespace).
Name(name).Resource("imageregistrybindings").SubResource("repositories").Do()
if result.Error() != nil {
return nil, result.Error()
}
obj, err := result.Get()
log.Println("obj", obj, "err", err)
if err != nil {
return nil, err
}
bindingRepositories := obj.(*v1alpha1.ImageRegistryBindingRepositories)
return &ImageRegistryBindingRepositoriesDetails{
bindingRepositories,
}, nil
}
func GetImageRegistryBindingList(client devopsclient.Interface, namespace *common.NamespaceQuery, dsQuery *dataselect.DataSelectQuery) (*ImageRegistryBindingList, error) {
log.Println("Getting list of imagerepository")
irbList, err := client.DevopsV1alpha1().ImageRegistryBindings(namespace.ToRequestParam()).List(api.ListEverything)
if err != nil {
log.Println("error while listing imageRepository", err)
}
nonCriticalErrors, criticalError := errors.HandleError(err)
if criticalError != nil {
return nil, criticalError
}
return toList(irbList.Items, nonCriticalErrors, dsQuery), nil
}
func toList(imageRegistryBindings []v1alpha1.ImageRegistryBinding, nonCriticalErrors []error, dsQuery *dataselect.DataSelectQuery) *ImageRegistryBindingList {
irbList := &ImageRegistryBindingList{
Items: make([]ImageRegistryBinding, 0),
ListMeta: api.ListMeta{TotalItems: len(imageRegistryBindings)},
}
irbCells, filteredTotal := dataselect.GenericDataSelectWithFilter(toCells(imageRegistryBindings), dsQuery)
imageRegistryBindings = fromCells(irbCells)
irbList.ListMeta = api.ListMeta{TotalItems: filteredTotal}
irbList.Errors = nonCriticalErrors
for _, irb := range imageRegistryBindings {
irbList.Items = append(irbList.Items, toDetailsInList(irb))
}
return irbList
}
func toDetailsInList(imageRegistryBinding v1alpha1.ImageRegistryBinding) ImageRegistryBinding {
irb := ImageRegistryBinding{
ObjectMeta: api.NewObjectMeta(imageRegistryBinding.ObjectMeta),
TypeMeta: api.NewTypeMeta(api.ResourceKindImageRegistryBinding),
Spec: imageRegistryBinding.Spec,
Status: imageRegistryBinding.Status,
}
if irb.ObjectMeta.Annotations == nil {
irb.ObjectMeta.Annotations = make(map[string]string, 0)
}
irb.ObjectMeta.Annotations[common.AnnotationsKeyToolType] = v1alpha1.ToolChainArtifactRepositoryName
irb.ObjectMeta.Annotations[common.AnnotationsKeyToolItemKind] = v1alpha1.ResourceKindImageRegistry
irb.ObjectMeta.Annotations[common.AnnotationsKeyToolItemType] = getValueFromLabels(imageRegistryBinding.GetLabels(), v1alpha1.LabelImageRegistryType)
return irb
}
func getValueFromLabels(labels map[string]string, key string) string {
if labels == nil {
return ""
}
if value, ok := labels[key]; ok {
return value
}
return ""
}
|
// Copyright 2021 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"context"
"fmt"
"os"
"github.com/google/subcommands"
"gvisor.dev/gvisor/pkg/sentry/platform"
"gvisor.dev/gvisor/runsc/flag"
)
// Platforms implements subcommands.Command for the "platforms" command.
type Platforms struct{}
// Name implements subcommands.Command.Name.
func (*Platforms) Name() string {
return "platforms"
}
// Synopsis implements subcommands.Command.Synopsis.
func (*Platforms) Synopsis() string {
return "Print a list of available platforms."
}
// Usage implements subcommands.Command.Usage.
func (*Platforms) Usage() string {
return `platforms [options] - Print available platforms.
`
}
// SetFlags implements subcommands.Command.SetFlags.
func (*Platforms) SetFlags(f *flag.FlagSet) {}
// Execute implements subcommands.Command.Execute.
func (*Platforms) Execute(_ context.Context, f *flag.FlagSet, args ...any) subcommands.ExitStatus {
for _, p := range platform.List() {
fmt.Fprintf(os.Stdout, "%s\n", p)
}
return subcommands.ExitSuccess
}
|
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"os"
)
// Person is simple structure
type Person struct {
Firstname, Lastname, Tel string
Age int
}
func loadFromJSON(filename string, v interface{}) error {
file, err := os.Open(filename)
defer file.Close()
if err != nil {
return err
}
decodeJSON := json.NewDecoder(file)
err = decodeJSON.Decode(v)
if err != nil {
return err
}
return nil
}
func saveToJSON(file *os.File, v interface{}) error {
encodeJSON := json.NewEncoder(file)
err := encodeJSON.Encode(v)
if err != nil {
return err
}
return nil
}
func main() {
var persons []Person
err := loadFromJSON("data.json", &persons)
if err != nil {
log.Fatalf("Error while loading json file %v", err)
}
fmt.Println(persons)
data := []Person{
{
Firstname: "test",
Lastname: "test",
Age: 100,
Tel: "+1234678765",
},
{
Firstname: "test2",
Lastname: "test2",
Age: 101,
Tel: "+1234678765",
},
}
if err != nil {
log.Fatalf("Error while openning new file: %v", err)
}
err = saveToJSON(os.Stdout, &data)
if err != nil {
log.Fatalf("error while write new file: %v", err)
}
fileData, err := ioutil.ReadFile("data_map.json")
if err != nil {
log.Fatalf("Error while read map file: %v", err)
}
var personMap map[string]interface{}
json.Unmarshal([]byte(fileData), &personMap)
fmt.Println(personMap["persons"])
fmt.Println(personMap["other_persons"])
}
|
package __ComplexType
import . "_ImportLocation"
func First(slice []_ComplexType) (res _ComplexType) {
if len(slice) == 0 {
return
}
res = slice[0]
return
}
func (c *chain) First() *chain {
return &chain{value: []_ComplexType{First(c.value)}}
} |
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gardenerscheduler
import (
"context"
"fmt"
"os"
gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
flag "github.com/spf13/pflag"
"k8s.io/apimachinery/pkg/labels"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/gardener/test-infra/pkg/hostscheduler"
"github.com/gardener/test-infra/pkg/util/cmdutil"
)
func (s *gardenerscheduler) List(_ *flag.FlagSet) (hostscheduler.SchedulerFunc, error) {
return func(ctx context.Context) error {
shoots := &gardencorev1beta1.ShootList{}
selector := labels.SelectorFromSet(map[string]string{
ShootLabel: "true",
})
err := s.client.List(ctx, shoots, &client.ListOptions{
LabelSelector: selector,
Namespace: s.namespace,
})
if err != nil {
return fmt.Errorf("shoots cannot be listed: %s", err.Error())
}
headers := []string{"NAME", "STATUS", "ID", "LOCKED", "MESSAGE"}
content := make([][]string, 0)
for _, shoot := range shoots.Items {
var (
status = shoot.Labels[ShootLabelStatus]
message string
)
if s.cloudprovider != CloudProviderAll {
if shoot.Spec.Provider.Type != string(s.cloudprovider) {
continue
}
}
if err := shootReady(&shoot); err != nil {
status = "notReady"
message = err.Error()
}
content = append(content, []string{
shoot.Name,
status,
shoot.Annotations[ShootAnnotationID],
shoot.Annotations[ShootAnnotationLockedAt],
message,
})
}
cmdutil.PrintTable(os.Stdout, headers, content)
return nil
}, nil
}
|
/*
* @lc app=leetcode id=59 lang=golang
*
* [59] Spiral Matrix II
*
* https://leetcode.com/problems/spiral-matrix-ii/description/
*
* algorithms
* Medium (54.07%)
* Likes: 1106
* Dislikes: 119
* Total Accepted: 202K
* Total Submissions: 372K
* Testcase Example: '3'
*
* Given a positive integer n, generate a square matrix filled with elements
* from 1 to n^2 in spiral order.
*
* Example:
*
*
* Input: 3
* Output:
* [
* [ 1, 2, 3 ],
* [ 8, 9, 4 ],
* [ 7, 6, 5 ]
* ]
*
*
*/
// @lc code=start
func generateMatrix(n int) [][]int {
if n == 0 {
return [][]int{}
}
retVal := make([][]int, n)
for i := 0; i < n; i++ {
retVal[i] = make([]int, n)
}
total, val := n*n, 1
left, right, top, bottom := 0, n-1, 0, n-1
for val <= total {
// from left to right
for i := left; i <= right; i++ {
retVal[top][i] = val
val++
}
top++
if top > bottom {
break
}
// from top to bottom
for i := top; i <= bottom; i++ {
retVal[i][right] = val
val++
}
right--
if left > right {
break
}
// from right to left
for i := right; i >= left; i-- {
retVal[bottom][i] = val
val++
}
bottom--
if top > bottom {
break
}
// from bottom to top
for i := bottom; i >= top; i-- {
retVal[i][left] = val
val++
}
left++
if left > right {
break
}
}
return retVal
}
// @lc code=end |
package main
import (
"bufio"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"runtime"
"strconv"
"github.com/mitchellh/go-homedir"
)
var (
ug = "0:0"
version = "0.0.0"
build = "-"
owner = "appcelerator"
repo = "github.com/appcelerator/amp"
dockerCmd = "sudo docker"
toolsImage = "appcelerator/amptools"
localToolsImage = "amptools"
dockerArgs []string
)
func init() {
homedir, err := homedir.Dir()
if err != nil {
panic(err)
}
wd, err := os.Getwd()
if err != nil {
panic(err)
}
if runtime.GOOS == "linux" {
ug = fmt.Sprintf("%s:%s", strconv.Itoa(os.Getuid()), strconv.Itoa(os.Getgid()))
}
dockerArgs = []string{
"run", "-t", "--rm", "--name", "amptools",
"-u", ug,
"-v", "/var/run/docker.sock:/var/run/docker.sock",
"-v", fmt.Sprintf("%s/.ssh:/root/.ssh:ro", homedir),
"-v", fmt.Sprintf("%s/.config/amp:/root/.config/amp:ro", homedir),
"-v", fmt.Sprintf("%s:/go/src/%s", wd, repo),
"-w", fmt.Sprintf("/go/src/%s", repo),
"-e", fmt.Sprintf("VERSION=%s", version),
"-e", fmt.Sprintf("BUILD=%s", build),
"-e", fmt.Sprintf("OWNER=%s", owner),
"-e", fmt.Sprintf("REPO=%s", repo),
"-e", fmt.Sprintf("DOCKER_CMD=%s", dockerCmd),
"-e", "GOPATH=/go",
}
if runtime.GOOS == "linux" {
dockerArgs = append(dockerArgs, []string{localToolsImage}...)
} else {
dockerArgs = append(dockerArgs, []string{toolsImage}...)
}
}
// build a local image to avoid leaving files with broken permissions
func buildLocalToolsImage() {
// build the local image "amptools" for the current user
content := []byte(fmt.Sprintf("FROM appcelerator/amptools\nRUN sed -i \"s/sudoer:x:[0-9]*:[0-9]*/sudoer:x:%s/\" /etc/passwd", ug))
tmpdir, err := ioutil.TempDir("", "dockerbuild")
if err != nil {
panic(err)
}
defer os.RemoveAll(tmpdir) // clean up
dockerfile := filepath.Join(tmpdir, "Dockerfile")
if err := ioutil.WriteFile(dockerfile, content, 0666); err != nil {
panic(err)
}
// docker build -t amptools tmpdir
cmd := "docker"
args := []string{
"build",
"-t",
"amptools",
tmpdir,
}
runcmd(cmd, args)
}
func main() {
args := []string{
"make",
}
if len(os.Args) > 1 {
args = append(args, os.Args[1:]...)
}
if runtime.GOOS == "linux" {
buildLocalToolsImage()
}
cmd := "docker"
args = append(dockerArgs, args...)
runcmd(cmd, args)
}
func runcmd(cmd string, args []string) {
proc := exec.Command(cmd, args...)
stdout, err := proc.StdoutPipe()
if err != nil {
panic(err)
}
outscanner := bufio.NewScanner(stdout)
go func() {
for outscanner.Scan() {
fmt.Printf("%s\n", outscanner.Text())
}
}()
stderr, err := proc.StderrPipe()
if err != nil {
panic(err)
}
errscanner := bufio.NewScanner(stderr)
go func() {
for errscanner.Scan() {
fmt.Fprintf(os.Stderr, "%s\n", errscanner.Text())
}
}()
err = proc.Start()
if err != nil {
panic(err)
}
err = proc.Wait()
if err != nil {
// Just pass along the information that the process exited with a failure;
// whatever error information it displayed is what the user will see.
// TODO: return the process exit code
os.Exit(1)
}
}
|
package instances
import (
"context"
"fmt"
"github.com/exoscale/egoscale"
"github.com/janoszen/exoscale-account-wiper/plugin"
"log"
"sync"
"time"
)
type Plugin struct {
}
func (p *Plugin) GetKey() string {
return "instances"
}
func (p *Plugin) GetParameters() map[string]string {
return make(map[string]string)
}
func (p *Plugin) SetParameter(_ string, _ string) error {
return fmt.Errorf("instance deletion has no options")
}
func (p *Plugin) Run(clientFactory *plugin.ClientFactory, ctx context.Context) error {
log.Printf("deleting instances...")
vm := &egoscale.VirtualMachine{}
client := clientFactory.GetExoscaleClient()
vms, err := client.ListWithContext(ctx, vm)
if err != nil {
return err
}
var wg sync.WaitGroup
poolBlocker := make(chan bool, 10)
for _, key := range vms {
select {
case <-ctx.Done():
break
default:
}
vm := key.(*egoscale.VirtualMachine)
wg.Add(1)
go func() {
poolBlocker <- true
if vm.State == "Destroying" {
log.Printf("instance %s is already being destroyed.\n", vm.ID)
} else {
log.Printf("deleting instance %s...\n", vm.ID)
err := vm.Delete(ctx, client)
if err != nil {
log.Printf("could not delete instance %s (%v)\n", vm.ID, err)
return
}
}
for {
log.Printf("waiting for instance %s to be destroyed...\n", vm.ID)
_, err := client.Get(vm)
if err != nil {
break
}
time.Sleep(time.Second * 10)
}
log.Printf("deleted instance %s\n", vm.ID)
<-poolBlocker
wg.Done()
}()
}
wg.Wait()
log.Printf("deleted instances.")
return nil
}
|
package tracing
import (
"context"
"errors"
"fmt"
"math"
"net/http"
"os"
"runtime"
"strings"
"github.com/rs/zerolog"
otelContrib "go.opentelemetry.io/contrib/propagators/jaeger"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
"go.opentelemetry.io/otel/propagation"
"go.opentelemetry.io/otel/sdk/resource"
tracesdk "go.opentelemetry.io/otel/sdk/trace"
semconv "go.opentelemetry.io/otel/semconv/v1.7.0"
"go.opentelemetry.io/otel/trace"
)
const (
service = "cloudflared"
tracerInstrumentName = "origin"
TracerContextName = "cf-trace-id"
TracerContextNameOverride = "uber-trace-id"
IntCloudflaredTracingHeader = "cf-int-cloudflared-tracing"
MaxErrorDescriptionLen = 100
traceHttpStatusCodeKey = "upstreamStatusCode"
traceID128bitsWidth = 128 / 4
separator = ":"
)
var (
CanonicalCloudflaredTracingHeader = http.CanonicalHeaderKey(IntCloudflaredTracingHeader)
Http2TransportAttribute = trace.WithAttributes(transportAttributeKey.String("http2"))
QuicTransportAttribute = trace.WithAttributes(transportAttributeKey.String("quic"))
HostOSAttribute = semconv.HostTypeKey.String(runtime.GOOS)
HostArchAttribute = semconv.HostArchKey.String(runtime.GOARCH)
otelVersionAttribute attribute.KeyValue
hostnameAttribute attribute.KeyValue
cloudflaredVersionAttribute attribute.KeyValue
serviceAttribute = semconv.ServiceNameKey.String(service)
transportAttributeKey = attribute.Key("transport")
otelVersionAttributeKey = attribute.Key("jaeger.version")
errNoopTracerProvider = errors.New("noop tracer provider records no spans")
)
func init() {
// Register the jaeger propagator globally.
otel.SetTextMapPropagator(otelContrib.Jaeger{})
otelVersionAttribute = otelVersionAttributeKey.String(fmt.Sprintf("go-otel-%s", otel.Version()))
if hostname, err := os.Hostname(); err == nil {
hostnameAttribute = attribute.String("hostname", hostname)
}
}
func Init(version string) {
cloudflaredVersionAttribute = semconv.ProcessRuntimeVersionKey.String(version)
}
type TracedHTTPRequest struct {
*http.Request
*cfdTracer
ConnIndex uint8 // The connection index used to proxy the request
}
// NewTracedHTTPRequest creates a new tracer for the current HTTP request context.
func NewTracedHTTPRequest(req *http.Request, connIndex uint8, log *zerolog.Logger) *TracedHTTPRequest {
ctx, exists := extractTrace(req)
if !exists {
return &TracedHTTPRequest{req, &cfdTracer{trace.NewNoopTracerProvider(), &NoopOtlpClient{}, log}, connIndex}
}
return &TracedHTTPRequest{req.WithContext(ctx), newCfdTracer(ctx, log), connIndex}
}
func (tr *TracedHTTPRequest) ToTracedContext() *TracedContext {
return &TracedContext{tr.Context(), tr.cfdTracer}
}
type TracedContext struct {
context.Context
*cfdTracer
}
// NewTracedContext creates a new tracer for the current context.
func NewTracedContext(ctx context.Context, traceContext string, log *zerolog.Logger) *TracedContext {
ctx, exists := extractTraceFromString(ctx, traceContext)
if !exists {
return &TracedContext{ctx, &cfdTracer{trace.NewNoopTracerProvider(), &NoopOtlpClient{}, log}}
}
return &TracedContext{ctx, newCfdTracer(ctx, log)}
}
type cfdTracer struct {
trace.TracerProvider
exporter InMemoryClient
log *zerolog.Logger
}
// NewCfdTracer creates a new tracer for the current request context.
func newCfdTracer(ctx context.Context, log *zerolog.Logger) *cfdTracer {
mc := new(InMemoryOtlpClient)
exp, err := otlptrace.New(ctx, mc)
if err != nil {
return &cfdTracer{trace.NewNoopTracerProvider(), &NoopOtlpClient{}, log}
}
tp := tracesdk.NewTracerProvider(
// We want to dump to in-memory exporter immediately
tracesdk.WithSyncer(exp),
// Record information about this application in a Resource.
tracesdk.WithResource(resource.NewWithAttributes(
semconv.SchemaURL,
serviceAttribute,
otelVersionAttribute,
hostnameAttribute,
cloudflaredVersionAttribute,
HostOSAttribute,
HostArchAttribute,
)),
)
return &cfdTracer{tp, mc, log}
}
func (cft *cfdTracer) Tracer() trace.Tracer {
return cft.TracerProvider.Tracer(tracerInstrumentName)
}
// GetSpans returns the spans as base64 encoded string of protobuf otlp traces.
func (cft *cfdTracer) GetSpans() (enc string) {
enc, err := cft.exporter.Spans()
switch err {
case nil:
break
case errNoTraces:
cft.log.Trace().Err(err).Msgf("expected traces to be available")
return
case errNoopTracer:
return // noop tracer has no traces
default:
cft.log.Debug().Err(err)
return
}
return
}
// GetProtoSpans returns the spans as the otlp traces in protobuf byte array.
func (cft *cfdTracer) GetProtoSpans() (proto []byte) {
proto, err := cft.exporter.ExportProtoSpans()
switch err {
case nil:
break
case errNoTraces:
cft.log.Trace().Err(err).Msgf("expected traces to be available")
return
case errNoopTracer:
return // noop tracer has no traces
default:
cft.log.Debug().Err(err)
return
}
return
}
// AddSpans assigns spans as base64 encoded protobuf otlp traces to provided
// HTTP headers.
func (cft *cfdTracer) AddSpans(headers http.Header) {
if headers == nil {
return
}
enc := cft.GetSpans()
// No need to add header if no traces
if enc == "" {
return
}
headers[CanonicalCloudflaredTracingHeader] = []string{enc}
}
// End will set the OK status for the span and then end it.
func End(span trace.Span) {
endSpan(span, -1, codes.Ok, nil)
}
// EndWithErrorStatus will set a status for the span and then end it.
func EndWithErrorStatus(span trace.Span, err error) {
endSpan(span, -1, codes.Error, err)
}
// EndWithStatusCode will set a status for the span and then end it.
func EndWithStatusCode(span trace.Span, statusCode int) {
endSpan(span, statusCode, codes.Ok, nil)
}
// EndWithErrorStatus will set a status for the span and then end it.
func endSpan(span trace.Span, upstreamStatusCode int, spanStatusCode codes.Code, err error) {
if span == nil {
return
}
if upstreamStatusCode > 0 {
span.SetAttributes(attribute.Int(traceHttpStatusCodeKey, upstreamStatusCode))
}
// add error to status buf cap description
errDescription := ""
if err != nil {
errDescription = err.Error()
l := int(math.Min(float64(len(errDescription)), MaxErrorDescriptionLen))
errDescription = errDescription[:l]
}
span.SetStatus(spanStatusCode, errDescription)
span.End()
}
// extractTraceFromString will extract the trace information from the provided
// propagated trace string context.
func extractTraceFromString(ctx context.Context, trace string) (context.Context, bool) {
if trace == "" {
return ctx, false
}
// Jaeger specific separator
parts := strings.Split(trace, separator)
if len(parts) != 4 {
return ctx, false
}
if parts[0] == "" {
return ctx, false
}
// Correctly left pad the trace to a length of 32
if len(parts[0]) < traceID128bitsWidth {
left := traceID128bitsWidth - len(parts[0])
parts[0] = strings.Repeat("0", left) + parts[0]
trace = strings.Join(parts, separator)
}
// Override the 'cf-trace-id' as 'uber-trace-id' so the jaeger propagator can extract it.
traceHeader := map[string]string{TracerContextNameOverride: trace}
remoteCtx := otel.GetTextMapPropagator().Extract(ctx, propagation.MapCarrier(traceHeader))
return remoteCtx, true
}
// extractTrace attempts to check for a cf-trace-id from a request and return the
// trace context with the provided http.Request.
func extractTrace(req *http.Request) (context.Context, bool) {
// Only add tracing for requests with appropriately tagged headers
remoteTraces := req.Header.Values(TracerContextName)
if len(remoteTraces) <= 0 {
// Strip the cf-trace-id header
req.Header.Del(TracerContextName)
return nil, false
}
traceHeader := map[string]string{}
for _, t := range remoteTraces {
// Override the 'cf-trace-id' as 'uber-trace-id' so the jaeger propagator can extract it.
// Last entry wins if multiple provided
traceHeader[TracerContextNameOverride] = t
}
// Strip the cf-trace-id header
req.Header.Del(TracerContextName)
if traceHeader[TracerContextNameOverride] == "" {
return nil, false
}
remoteCtx := otel.GetTextMapPropagator().Extract(req.Context(), propagation.MapCarrier(traceHeader))
return remoteCtx, true
}
func NewNoopSpan() trace.Span {
return trace.SpanFromContext(nil)
}
|
/*
MIT License
Copyright (c) 2018 IBM
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
// Package jsonlogic is a lightweight rule engine that uses the syntax defined on jsonlogic.com
package jsonlogic
import (
"encoding/json"
"fmt"
)
// stringToInterface converts a string json to an interface{}
func stringToInterface(input string) (interface{}, error) {
b := []byte(input)
var f interface{}
err := json.Unmarshal(b, &f)
if err != nil {
return input, fmt.Errorf("Unmarshal warning: %s", err)
}
return f, err
}
// Apply takes in a rule and an optional data object and applies its logic.
// Parameters are passed as strings and will be unmarshalled.
func Apply(inputs ...string) (interface{}, error) {
var rule, data interface{}
if len(inputs) < 1 {
//TODO: Expected behavior with no params?
return nil, nil
}
rule, err := stringToInterface(inputs[0])
if err != nil {
return nil, err
}
if len(inputs) > 1 {
//We have data inputs
data, err = stringToInterface(inputs[1])
if err != nil {
return nil, err
}
}
return ApplyJSONInterfaces(rule, data)
}
// ApplyJSONInterfaces takes a rule and an optional data object and applies its logic.
// The parameters are unmarshalled JSON interfaces.
// Note this is not meant to be used with any other types except an interface{} generated by Go's Unmarshal method.
func ApplyJSONInterfaces(inputs ...interface{}) (interface{}, error) {
var rule, data interface{}
if len(inputs) < 1 {
//TODO: Expected behavior with no params?
return nil, nil
}
rule = inputs[0]
if len(inputs) > 1 {
//We have data inputs
data = inputs[1]
}
switch rule.(type) {
case map[string]interface{}:
//It's a rule
inputmap := rule.(map[string]interface{})
for operator, value := range inputmap {
value, err := ApplyJSONInterfaces(value, data)
if err != nil {
return nil, err
}
switch operator {
case "===":
return opEqualStrict(value, data)
case "==":
return opEqual(value, data)
case "!==":
return opNotEqualStrict(value, data)
case "!=":
return opNotEqual(value, data)
case "!!":
return opDoubleNot(value, data)
case "!":
return opNot(value, data)
case "<":
return opSmallerThan(value, data)
case ">":
return opGreaterThan(value, data)
case ">=":
return opGreaterEqThan(value, data)
case "<=":
return opSmallerEqThan(value, data)
case "+":
return opSum(value, data)
case "-":
return opSub(value, data)
case "*":
return opMult(value, data)
case "/":
return opDiv(value, data)
case "%":
return opMod(value, data)
case "and":
return opAnd(value, data)
case "or":
return opOr(value, data)
case "merge":
return opMerge(value, data)
case "in":
return opIn(value, data)
case "substr":
return opSubstr(value, data)
case "cat":
return opCat(value, data)
case "map":
return opMap(value, data)
case "log":
return opLog(value)
case "var":
return opVar(value, data)
case "if", "?:": // "?:" is an undocumented alias of 'if'
return opIf(value, data)
case "max":
return opMax(value, data)
case "min":
return opMin(value, data)
case "all":
return opAll(value, data)
case "none":
return opNone(value, data)
case "some":
return opSome(value, data)
case "missing":
return opMissing(value, data)
case "missing_some":
return opMissingSome(value, data)
case "filter":
return opFilter(value, data)
case "reduce":
return opReduce(value, data)
default:
if res, err := opCustom(operator, value, data); err != nil {
return nil, fmt.Errorf("Error: %s", err)
} else {
return res, nil
}
}
}
break
default:
//Non-rule
return rule, nil
}
return nil, nil
}
|
// Copyright 2019 The Cockroach Authors.
//
// Licensed as a CockroachDB Enterprise file under the Cockroach Community
// License (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt
package importccl
import (
"context"
"fmt"
"io/ioutil"
"math"
"net/url"
"os"
"sort"
"testing"
"time"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/blobs"
"github.com/cockroachdb/cockroach/pkg/ccl/backupccl"
"github.com/cockroachdb/cockroach/pkg/jobs"
"github.com/cockroachdb/cockroach/pkg/jobs/jobspb"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverbase"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc"
"github.com/cockroachdb/cockroach/pkg/sql/distsql"
"github.com/cockroachdb/cockroach/pkg/sql/execinfra"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/cockroach/pkg/sql/row"
"github.com/cockroachdb/cockroach/pkg/sql/rowenc"
"github.com/cockroachdb/cockroach/pkg/sql/rowexec"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/storage/cloud"
"github.com/cockroachdb/cockroach/pkg/storage/cloudimpl"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/util/ctxgroup"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/protoutil"
"github.com/cockroachdb/cockroach/pkg/util/retry"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type testSpec struct {
format roachpb.IOFileFormat
inputs map[int32]string
tables map[string]*execinfrapb.ReadImportDataSpec_ImportTable
}
// Given test spec returns ReadImportDataSpec suitable creating input converter.
func (spec *testSpec) getConverterSpec() *execinfrapb.ReadImportDataSpec {
return &execinfrapb.ReadImportDataSpec{
Format: spec.format,
Tables: spec.tables,
Uri: spec.inputs,
ReaderParallelism: 1, // Make tests deterministic
}
}
func TestConverterFlushesBatches(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
// Reset batch size setting upon test completion.
defer row.TestingSetDatumRowConverterBatchSize(0)()
// Helper to generate test name.
testName := func(format roachpb.IOFileFormat, batchSize int) string {
switch batchSize {
case 0:
return fmt.Sprintf("%s-default-batch-size", format.Format)
case 1:
return fmt.Sprintf("%s-always-flush", format.Format)
default:
return fmt.Sprintf("%s-flush-%d-records", format.Format, batchSize)
}
}
ctx := context.Background()
evalCtx := tree.MakeTestingEvalContext(nil)
tests := []testSpec{
newTestSpec(ctx, t, csvFormat(), "testdata/csv/data-0"),
newTestSpec(ctx, t, mysqlDumpFormat(), "testdata/mysqldump/simple.sql"),
newTestSpec(ctx, t, pgDumpFormat(), "testdata/pgdump/simple.sql"),
newTestSpec(ctx, t, avroFormat(t, roachpb.AvroOptions_OCF), "testdata/avro/simple.ocf"),
}
const endBatchSize = -1
for _, testCase := range tests {
expectedNumRecords := 0
expectedNumBatches := 0
converterSpec := testCase.getConverterSpec()
// Run multiple tests, increasing batch size until it exceeds the
// total number of records. When batch size is 0, we run converters
// with the default batch size, and use that run to figure out the
// expected number of records and batches for the subsequent run.
for batchSize := 0; batchSize != endBatchSize; {
t.Run(testName(testCase.format, batchSize), func(t *testing.T) {
if batchSize > 0 {
row.TestingSetDatumRowConverterBatchSize(batchSize)
}
kvCh := make(chan row.KVBatch, batchSize)
conv, err := makeInputConverter(ctx, converterSpec, &evalCtx, kvCh, nil /* seqChunkProvider */)
if err != nil {
t.Fatalf("makeInputConverter() error = %v", err)
}
group := ctxgroup.WithContext(ctx)
group.Go(func() error {
defer close(kvCh)
return conv.readFiles(ctx, testCase.inputs, nil, converterSpec.Format,
externalStorageFactory, security.RootUserName())
})
lastBatch := 0
testNumRecords := 0
testNumBatches := 0
// Read from the channel; we expect batches of testCase.batchSize
// size, with the exception of the last batch.
for batch := range kvCh {
if batchSize > 0 {
assert.True(t, lastBatch == 0 || lastBatch == batchSize)
}
lastBatch = len(batch.KVs)
testNumRecords += lastBatch
testNumBatches++
}
if err := group.Wait(); err != nil {
t.Fatalf("Conversion failed: %v", err)
}
if batchSize == 0 {
expectedNumRecords = testNumRecords
// Next batch: flush every record.
batchSize = 1
expectedNumBatches = expectedNumRecords
} else if batchSize > expectedNumRecords {
// Done with this test case.
batchSize = endBatchSize
return
} else {
// Number of records and batches ought to be correct.
assert.Equal(t, expectedNumRecords, testNumRecords)
assert.Equal(t, expectedNumBatches, testNumBatches)
// Progressively increase the batch size.
batchSize += (batchSize << 2)
expectedNumBatches = int(math.Ceil(float64(expectedNumRecords) / float64(batchSize)))
}
})
}
}
}
// A RowReceiver implementation which fails the test if it receives an error.
type errorReportingRowReceiver struct {
t *testing.T
}
var _ execinfra.RowReceiver = &errorReportingRowReceiver{}
func (r *errorReportingRowReceiver) Push(
row rowenc.EncDatumRow, meta *execinfrapb.ProducerMetadata,
) execinfra.ConsumerStatus {
if r.t.Failed() || (meta != nil && meta.Err != nil) {
if !r.t.Failed() {
r.t.Fail()
}
r.t.Logf("receiver got an error: %v", meta.Err)
return execinfra.ConsumerClosed
}
return execinfra.NeedMoreRows
}
func (r *errorReportingRowReceiver) ProducerDone() {}
func (r *errorReportingRowReceiver) Types() []*types.T {
return nil
}
// A do nothing bulk adder implementation.
type doNothingKeyAdder struct {
onKeyAdd func(key roachpb.Key)
onFlush func()
}
var _ kvserverbase.BulkAdder = &doNothingKeyAdder{}
func (a *doNothingKeyAdder) Add(_ context.Context, k roachpb.Key, _ []byte) error {
if a.onKeyAdd != nil {
a.onKeyAdd(k)
}
return nil
}
func (a *doNothingKeyAdder) Flush(_ context.Context) error {
if a.onFlush != nil {
a.onFlush()
}
return nil
}
func (*doNothingKeyAdder) IsEmpty() bool { return true }
func (*doNothingKeyAdder) CurrentBufferFill() float32 { return 0 }
func (*doNothingKeyAdder) GetSummary() roachpb.BulkOpSummary { return roachpb.BulkOpSummary{} }
func (*doNothingKeyAdder) Close(_ context.Context) {}
func (a *doNothingKeyAdder) SetOnFlush(f func()) { a.onFlush = f }
var eofOffset int64 = math.MaxInt64
func TestImportIgnoresProcessedFiles(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
evalCtx := tree.MakeTestingEvalContext(nil)
flowCtx := &execinfra.FlowCtx{
EvalCtx: &evalCtx,
Cfg: &execinfra.ServerConfig{
Settings: &cluster.Settings{},
ExternalStorage: externalStorageFactory,
BulkAdder: func(
_ context.Context, _ *kv.DB, _ hlc.Timestamp,
_ kvserverbase.BulkAdderOptions) (kvserverbase.BulkAdder, error) {
return &doNothingKeyAdder{}, nil
},
},
}
// In this test, we'll specify import files that do not exist, but mark
// those files fully processed. The converters should not attempt to even
// open these files (and if they do, we should report a test failure)
tests := []struct {
name string
spec testSpec
inputOffsets []int64 // List of file ids that were fully processed
}{
{
"csv-two-invalid",
newTestSpec(ctx, t, csvFormat(), "__invalid__", "testdata/csv/data-0", "/_/missing/_"),
[]int64{eofOffset, 0, eofOffset},
},
{
"csv-all-invalid",
newTestSpec(ctx, t, csvFormat(), "__invalid__", "../../&"),
[]int64{eofOffset, eofOffset},
},
{
"csv-all-valid",
newTestSpec(ctx, t, csvFormat(), "testdata/csv/data-0"),
[]int64{0},
},
{
"mysql-one-invalid",
newTestSpec(ctx, t, mysqlDumpFormat(), "testdata/mysqldump/simple.sql", "/_/missing/_"),
[]int64{0, eofOffset},
},
{
"pgdump-one-input",
newTestSpec(ctx, t, pgDumpFormat(), "testdata/pgdump/simple.sql"),
[]int64{0},
},
{
"avro-one-invalid",
newTestSpec(ctx, t, avroFormat(t, roachpb.AvroOptions_OCF), "__invalid__", "testdata/avro/simple.ocf"),
[]int64{eofOffset, 0},
},
}
// Configures import spec to have appropriate input offsets set.
setInputOffsets := func(
t *testing.T, spec *execinfrapb.ReadImportDataSpec, offsets []int64,
) *execinfrapb.ReadImportDataSpec {
if len(spec.Uri) != len(offsets) {
t.Fatal("Expected matching number of input offsets")
}
spec.ResumePos = make(map[int32]int64)
for id, offset := range offsets {
if offset > 0 {
spec.ResumePos[int32(id)] = offset
}
}
return spec
}
for _, testCase := range tests {
t.Run(fmt.Sprintf("processes-files-once-%s", testCase.name), func(t *testing.T) {
spec := setInputOffsets(t, testCase.spec.getConverterSpec(), testCase.inputOffsets)
post := execinfrapb.PostProcessSpec{}
processor, err := newReadImportDataProcessor(flowCtx, 0, *spec, &post, &errorReportingRowReceiver{t})
if err != nil {
t.Fatalf("Could not create data processor: %v", err)
}
processor.Run(ctx)
})
}
}
type observedKeys struct {
syncutil.Mutex
keys []roachpb.Key
}
func TestImportHonorsResumePosition(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
batchSize := 13
defer row.TestingSetDatumRowConverterBatchSize(batchSize)()
pkBulkAdder := &doNothingKeyAdder{}
ctx := context.Background()
evalCtx := tree.MakeTestingEvalContext(nil)
flowCtx := &execinfra.FlowCtx{
EvalCtx: &evalCtx,
Cfg: &execinfra.ServerConfig{
Settings: &cluster.Settings{},
ExternalStorage: externalStorageFactory,
BulkAdder: func(
_ context.Context, _ *kv.DB, _ hlc.Timestamp,
opts kvserverbase.BulkAdderOptions) (kvserverbase.BulkAdder, error) {
if opts.Name == "pkAdder" {
return pkBulkAdder, nil
}
return &doNothingKeyAdder{}, nil
},
TestingKnobs: execinfra.TestingKnobs{
BulkAdderFlushesEveryBatch: true,
},
},
}
// In this test, we'll specify various resume positions for
// different input formats. We expect that the rows before resume
// position will be skipped.
// NB: We assume that the (external) test files are sorted and
// contain sufficient number of rows.
testSpecs := []testSpec{
newTestSpec(ctx, t, csvFormat(), "testdata/csv/data-0"),
newTestSpec(ctx, t, mysqlDumpFormat(), "testdata/mysqldump/simple.sql"),
newTestSpec(ctx, t, mysqlOutFormat(), "testdata/mysqlout/csv-ish/simple.txt"),
newTestSpec(ctx, t, pgCopyFormat(), "testdata/pgcopy/default/test.txt"),
newTestSpec(ctx, t, pgDumpFormat(), "testdata/pgdump/simple.sql"),
newTestSpec(ctx, t, avroFormat(t, roachpb.AvroOptions_JSON_RECORDS), "testdata/avro/simple-sorted.json"),
}
resumes := []int64{0, 10, 64, eofOffset}
for _, testCase := range testSpecs {
spec := testCase.getConverterSpec()
keys := &observedKeys{keys: make([]roachpb.Key, 0, 1000)}
numKeys := 0
for _, resumePos := range resumes {
spec.ResumePos = map[int32]int64{0: resumePos}
if resumePos == 0 {
// We use 0 resume position to record the set of keys in the input file.
pkBulkAdder.onKeyAdd = func(k roachpb.Key) {
keys.Lock()
keys.keys = append(keys.keys, k)
keys.Unlock()
}
} else {
if resumePos != eofOffset && resumePos > int64(numKeys) {
t.Logf("test skipped: resume position %d > number of keys %d", resumePos, numKeys)
continue
}
// For other resume positions, we want to ensure that
// the key we add is not among [0 - resumePos) keys.
pkBulkAdder.onKeyAdd = func(k roachpb.Key) {
maxKeyIdx := int(resumePos)
if resumePos == eofOffset {
maxKeyIdx = numKeys
}
keys.Lock()
idx := sort.Search(maxKeyIdx, func(i int) bool { return keys.keys[i].Compare(k) == 0 })
if idx < maxKeyIdx {
t.Errorf("failed to skip key[%d]=%s", idx, k)
}
keys.Unlock()
}
}
t.Run(fmt.Sprintf("resume-%v-%v", spec.Format.Format, resumePos), func(t *testing.T) {
rp := resumePos
progCh := make(chan execinfrapb.RemoteProducerMetadata_BulkProcessorProgress)
defer close(progCh)
// Setup progress consumer.
go func() {
// Consume progress reports. Since we expect every batch to be flushed
// (BulkAdderFlushesEveryBatch), then the progress resport must be emitted every
// batchSize rows (possibly out of order), starting from our initial resumePos
for prog := range progCh {
if !t.Failed() && prog.ResumePos[0] < (rp+int64(batchSize)) {
t.Logf("unexpected progress resume pos: %d", prog.ResumePos[0])
t.Fail()
}
}
}()
_, err := runImport(ctx, flowCtx, spec, progCh, nil /* seqChunkProvider */)
if err != nil {
t.Fatal(err)
}
})
if resumePos == 0 {
// Even though the input is assumed to be sorted, we may still observe
// bulk adder keys arriving out of order. We need to sort the keys.
keys.Lock()
sort.Slice(keys.keys, func(i int, j int) bool {
return keys.keys[i].Compare(keys.keys[j]) < 0
})
numKeys = len(keys.keys)
keys.Unlock()
}
}
}
}
type duplicateKeyErrorAdder struct {
doNothingKeyAdder
}
var _ kvserverbase.BulkAdder = &duplicateKeyErrorAdder{}
func (a *duplicateKeyErrorAdder) Add(_ context.Context, k roachpb.Key, v []byte) error {
return &kvserverbase.DuplicateKeyError{Key: k, Value: v}
}
func TestImportHandlesDuplicateKVs(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
batchSize := 13
defer row.TestingSetDatumRowConverterBatchSize(batchSize)()
evalCtx := tree.MakeTestingEvalContext(nil)
flowCtx := &execinfra.FlowCtx{
EvalCtx: &evalCtx,
Cfg: &execinfra.ServerConfig{
Settings: &cluster.Settings{},
ExternalStorage: externalStorageFactory,
BulkAdder: func(
_ context.Context, _ *kv.DB, _ hlc.Timestamp,
opts kvserverbase.BulkAdderOptions) (kvserverbase.BulkAdder, error) {
return &duplicateKeyErrorAdder{}, nil
},
TestingKnobs: execinfra.TestingKnobs{
BulkAdderFlushesEveryBatch: true,
},
},
}
// In this test, we'll attempt to import different input formats.
// All imports produce a DuplicateKeyError, which we expect to be propagated.
testSpecs := []testSpec{
newTestSpec(ctx, t, csvFormat(), "testdata/csv/data-0"),
newTestSpec(ctx, t, mysqlDumpFormat(), "testdata/mysqldump/simple.sql"),
newTestSpec(ctx, t, mysqlOutFormat(), "testdata/mysqlout/csv-ish/simple.txt"),
newTestSpec(ctx, t, pgCopyFormat(), "testdata/pgcopy/default/test.txt"),
newTestSpec(ctx, t, pgDumpFormat(), "testdata/pgdump/simple.sql"),
newTestSpec(ctx, t, avroFormat(t, roachpb.AvroOptions_JSON_RECORDS), "testdata/avro/simple-sorted.json"),
}
for _, testCase := range testSpecs {
spec := testCase.getConverterSpec()
t.Run(fmt.Sprintf("duplicate-key-%v", spec.Format.Format), func(t *testing.T) {
progCh := make(chan execinfrapb.RemoteProducerMetadata_BulkProcessorProgress)
defer close(progCh)
go func() {
for range progCh {
}
}()
_, err := runImport(ctx, flowCtx, spec, progCh, nil /* seqChunkProvider */)
require.True(t, errors.HasType(err, &kvserverbase.DuplicateKeyError{}))
})
}
}
// syncBarrier allows 2 threads (a controller and a worker) to
// synchronize between themselves. A controller portion of the
// barrier waits until worker starts running, and then notifies
// worker to proceed. The worker is the opposite: notifies controller
// that it started running, and waits for the proceed signal.
type syncBarrier interface {
// Enter blocks the barrier, and returns a function
// that, when executed, unblocks the other thread.
Enter() func()
}
type barrier struct {
read <-chan struct{}
write chan<- struct{}
controller bool
}
// Returns controller/worker barriers.
func newSyncBarrier() (syncBarrier, syncBarrier) {
p1 := make(chan struct{})
p2 := make(chan struct{})
return &barrier{p1, p2, true}, &barrier{p2, p1, false}
}
func (b *barrier) Enter() func() {
if b.controller {
b.write <- struct{}{}
return func() { <-b.read }
}
<-b.read
return func() { b.write <- struct{}{} }
}
// A special jobs.Resumer that, instead of finishing
// the job successfully, forces the job to be paused.
var _ jobs.Resumer = &cancellableImportResumer{}
type cancellableImportResumer struct {
ctx context.Context
jobIDCh chan jobspb.JobID
jobID jobspb.JobID
onSuccessBarrier syncBarrier
wrapped *importResumer
}
func (r *cancellableImportResumer) Resume(ctx context.Context, execCtx interface{}) error {
r.jobID = r.wrapped.job.ID()
r.jobIDCh <- r.jobID
if err := r.wrapped.Resume(r.ctx, execCtx); err != nil {
return err
}
if r.onSuccessBarrier != nil {
defer r.onSuccessBarrier.Enter()()
}
return errors.New("job succeed, but we're forcing it to be paused")
}
func (r *cancellableImportResumer) OnFailOrCancel(ctx context.Context, execCtx interface{}) error {
// This callback is invoked when an error or cancellation occurs
// during the import. Since our Resume handler returned an
// error (after pausing the job), we need to short-circuits
// jobs machinery so that this job is not marked as failed.
return errors.New("bail out")
}
func setImportReaderParallelism(parallelism int32) func() {
factory := rowexec.NewReadImportDataProcessor
rowexec.NewReadImportDataProcessor = func(
flowCtx *execinfra.FlowCtx, processorID int32,
spec execinfrapb.ReadImportDataSpec, post *execinfrapb.PostProcessSpec,
output execinfra.RowReceiver) (execinfra.Processor, error) {
spec.ReaderParallelism = parallelism
return factory(flowCtx, processorID, spec, post, output)
}
return func() {
rowexec.NewReadImportDataProcessor = factory
}
}
// Queries the status and the import progress of the job.
type jobState struct {
err error
status jobs.Status
prog jobspb.ImportProgress
}
func queryJob(db sqlutils.DBHandle, jobID jobspb.JobID) (js jobState) {
js = jobState{
err: nil,
status: "",
prog: jobspb.ImportProgress{},
}
var progressBytes, payloadBytes []byte
js.err = db.QueryRowContext(
context.Background(), "SELECT status, payload, progress FROM system.jobs WHERE id = $1", jobID).Scan(
&js.status, &payloadBytes, &progressBytes)
if js.err != nil {
return
}
if js.status == jobs.StatusFailed {
payload := &jobspb.Payload{}
js.err = protoutil.Unmarshal(payloadBytes, payload)
if js.err == nil {
js.err = errors.Newf("%s", payload.Error)
}
return
}
progress := &jobspb.Progress{}
if js.err = protoutil.Unmarshal(progressBytes, progress); js.err != nil {
return
}
js.prog = *(progress.Details.(*jobspb.Progress_Import).Import)
return
}
// Repeatedly queries job status/progress until specified function returns true.
func queryJobUntil(
t *testing.T, db sqlutils.DBHandle, jobID jobspb.JobID, isDone func(js jobState) bool,
) (js jobState) {
t.Helper()
for r := retry.Start(base.DefaultRetryOptions()); r.Next(); {
js = queryJob(db, jobID)
if js.err != nil || isDone(js) {
break
}
}
if js.err != nil {
t.Fatal(js.err)
}
return
}
func TestCSVImportCanBeResumed(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
defer setImportReaderParallelism(1)()
const batchSize = 5
defer TestingSetParallelImporterReaderBatchSize(batchSize)()
defer row.TestingSetDatumRowConverterBatchSize(2 * batchSize)()
defer jobs.TestingSetAdoptAndCancelIntervals(100*time.Millisecond, 100*time.Millisecond)()
s, db, _ := serverutils.StartServer(t,
base.TestServerArgs{
Knobs: base.TestingKnobs{
RegistryLiveness: jobs.NewFakeNodeLiveness(1),
DistSQL: &execinfra.TestingKnobs{
BulkAdderFlushesEveryBatch: true,
},
},
})
registry := s.JobRegistry().(*jobs.Registry)
ctx := context.Background()
defer s.Stopper().Stop(ctx)
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `CREATE DATABASE d`)
sqlDB.Exec(t, "CREATE TABLE t (id INT, data STRING)")
defer sqlDB.Exec(t, `DROP TABLE t`)
jobCtx, cancelImport := context.WithCancel(ctx)
jobIDCh := make(chan jobspb.JobID)
var jobID jobspb.JobID = -1
var importSummary backupccl.RowCount
registry.TestingResumerCreationKnobs = map[jobspb.Type]func(raw jobs.Resumer) jobs.Resumer{
// Arrange for our special job resumer to be
// returned the very first time we start the import.
jobspb.TypeImport: func(raw jobs.Resumer) jobs.Resumer {
resumer := raw.(*importResumer)
resumer.testingKnobs.ignoreProtectedTimestamps = true
resumer.testingKnobs.alwaysFlushJobProgress = true
resumer.testingKnobs.afterImport = func(summary backupccl.RowCount) error {
importSummary = summary
return nil
}
if jobID == -1 {
return &cancellableImportResumer{
ctx: jobCtx,
jobIDCh: jobIDCh,
wrapped: resumer,
}
}
return resumer
},
}
testBarrier, csvBarrier := newSyncBarrier()
csv1 := newCsvGenerator(0, 10*batchSize+1, &intGenerator{}, &strGenerator{})
csv1.addBreakpoint(7*batchSize, func() (bool, error) {
defer csvBarrier.Enter()()
return false, nil
})
// Convince distsql to use our "external" storage implementation.
storage := newGeneratedStorage(csv1)
s.DistSQLServer().(*distsql.ServerImpl).ServerConfig.ExternalStorage = storage.externalStorageFactory()
// Execute import; ignore any errors returned
// (since we're aborting the first import run.).
go func() {
_, _ = sqlDB.DB.ExecContext(ctx,
`IMPORT INTO t (id, data) CSV DATA ($1)`, storage.getGeneratorURIs()[0])
}()
// Wait for the job to start running
jobID = <-jobIDCh
// Wait until we are blocked handling breakpoint.
unblockImport := testBarrier.Enter()
// Wait until we have recorded some job progress.
js := queryJobUntil(t, sqlDB.DB, jobID, func(js jobState) bool { return js.prog.ResumePos[0] > 0 })
// Pause the job;
if err := registry.PauseRequested(ctx, nil, jobID); err != nil {
t.Fatal(err)
}
// Send cancellation and unblock breakpoint.
cancelImport()
unblockImport()
// Get updated resume position counter.
js = queryJobUntil(t, sqlDB.DB, jobID, func(js jobState) bool { return jobs.StatusPaused == js.status })
resumePos := js.prog.ResumePos[0]
t.Logf("Resume pos: %v\n", js.prog.ResumePos[0])
// Unpause the job and wait for it to complete.
if err := registry.Unpause(ctx, nil, jobID); err != nil {
t.Fatal(err)
}
js = queryJobUntil(t, sqlDB.DB, jobID, func(js jobState) bool { return jobs.StatusSucceeded == js.status })
// Verify that the import proceeded from the resumeRow position.
assert.Equal(t, importSummary.Rows, int64(csv1.numRows)-resumePos)
sqlDB.CheckQueryResults(t, `SELECT id FROM t ORDER BY id`,
sqlDB.QueryStr(t, `SELECT generate_series(0, $1)`, csv1.numRows-1),
)
}
func TestCSVImportMarksFilesFullyProcessed(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const batchSize = 5
defer TestingSetParallelImporterReaderBatchSize(batchSize)()
defer row.TestingSetDatumRowConverterBatchSize(2 * batchSize)()
defer jobs.TestingSetAdoptAndCancelIntervals(100*time.Millisecond, 100*time.Millisecond)()
s, db, _ := serverutils.StartServer(t,
base.TestServerArgs{
Knobs: base.TestingKnobs{
RegistryLiveness: jobs.NewFakeNodeLiveness(1),
DistSQL: &execinfra.TestingKnobs{
BulkAdderFlushesEveryBatch: true,
},
},
})
registry := s.JobRegistry().(*jobs.Registry)
ctx := context.Background()
defer s.Stopper().Stop(ctx)
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `CREATE DATABASE d`)
sqlDB.Exec(t, "CREATE TABLE t (id INT, data STRING)")
defer sqlDB.Exec(t, `DROP TABLE t`)
jobIDCh := make(chan jobspb.JobID)
controllerBarrier, importBarrier := newSyncBarrier()
var jobID jobspb.JobID = -1
var importSummary backupccl.RowCount
registry.TestingResumerCreationKnobs = map[jobspb.Type]func(raw jobs.Resumer) jobs.Resumer{
// Arrange for our special job resumer to be
// returned the very first time we start the import.
jobspb.TypeImport: func(raw jobs.Resumer) jobs.Resumer {
resumer := raw.(*importResumer)
resumer.testingKnobs.alwaysFlushJobProgress = true
resumer.testingKnobs.ignoreProtectedTimestamps = true
resumer.testingKnobs.afterImport = func(summary backupccl.RowCount) error {
importSummary = summary
return nil
}
if jobID == -1 {
return &cancellableImportResumer{
ctx: ctx,
jobIDCh: jobIDCh,
onSuccessBarrier: importBarrier,
wrapped: resumer,
}
}
return resumer
},
}
csv1 := newCsvGenerator(0, 10*batchSize+1, &intGenerator{}, &strGenerator{})
csv2 := newCsvGenerator(0, 20*batchSize-1, &intGenerator{}, &strGenerator{})
csv3 := newCsvGenerator(0, 1, &intGenerator{}, &strGenerator{})
// Convince distsql to use our "external" storage implementation.
storage := newGeneratedStorage(csv1, csv2, csv3)
s.DistSQLServer().(*distsql.ServerImpl).ServerConfig.ExternalStorage = storage.externalStorageFactory()
// Execute import; ignore any errors returned
// (since we're aborting the first import run).
go func() {
_, _ = sqlDB.DB.ExecContext(ctx,
`IMPORT INTO t (id, data) CSV DATA ($1, $2, $3)`, storage.getGeneratorURIs()...)
}()
// Wait for the job to start running
jobID = <-jobIDCh
// Tell importer that it can continue with it's onSuccess
proceedImport := controllerBarrier.Enter()
// Pause the job;
if err := registry.PauseRequested(ctx, nil, jobID); err != nil {
t.Fatal(err)
}
// All files should have been processed,
// and the resume position set to maxInt64.
js := queryJobUntil(t, sqlDB.DB, jobID, func(js jobState) bool { return jobs.StatusPaused == js.status })
for _, pos := range js.prog.ResumePos {
assert.True(t, pos == math.MaxInt64)
}
// Send cancellation and unblock import.
proceedImport()
// Unpause the job and wait for it to complete.
if err := registry.Unpause(ctx, nil, jobID); err != nil {
t.Fatal(err)
}
js = queryJobUntil(t, sqlDB.DB, jobID, func(js jobState) bool { return jobs.StatusSucceeded == js.status })
// Verify that after resume we have not processed any additional rows.
assert.Zero(t, importSummary.Rows)
}
func TestImportWithPartialIndexesErrs(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
s, db, _ := serverutils.StartServer(t,
base.TestServerArgs{
Knobs: base.TestingKnobs{
RegistryLiveness: jobs.NewFakeNodeLiveness(1),
DistSQL: &execinfra.TestingKnobs{
BulkAdderFlushesEveryBatch: true,
},
},
})
ctx := context.Background()
defer s.Stopper().Stop(ctx)
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `CREATE DATABASE d`)
sqlDB.Exec(t, "CREATE TABLE t (id INT, data STRING, INDEX (data) WHERE id > 0)")
defer sqlDB.Exec(t, `DROP TABLE t`)
sqlDB.ExpectErr(t, "cannot import into table with partial indexes", `IMPORT INTO t (id, data) CSV DATA ('https://foo.bar')`)
}
func (ses *generatedStorage) externalStorageFactory() cloud.ExternalStorageFactory {
return func(_ context.Context, es roachpb.ExternalStorage) (cloud.ExternalStorage, error) {
uri, err := url.Parse(es.HttpPath.BaseUri)
if err != nil {
return nil, err
}
id, ok := ses.nameIDMap[uri.Path]
if !ok {
id = ses.nextID
ses.nextID++
ses.nameIDMap[uri.Path] = id
}
return &generatorExternalStorage{conf: es, gen: ses.generators[id]}, nil
}
}
// External storage factory needed to run converters.
func externalStorageFactory(
ctx context.Context, dest roachpb.ExternalStorage,
) (cloud.ExternalStorage, error) {
workdir, err := os.Getwd()
if err != nil {
return nil, err
}
return cloudimpl.MakeExternalStorage(ctx, dest, base.ExternalIODirConfig{},
nil, blobs.TestBlobServiceClient(workdir), nil, nil)
}
// Helper to create and initialize testSpec.
func newTestSpec(
ctx context.Context, t *testing.T, format roachpb.IOFileFormat, inputs ...string,
) testSpec {
spec := testSpec{
format: format,
inputs: make(map[int32]string),
}
// Initialize table descriptor for import. We need valid descriptor to run
// converters, even though we don't actually import anything in this test.
var descr *tabledesc.Mutable
switch format.Format {
case roachpb.IOFileFormat_CSV:
descr = descForTable(ctx, t,
"CREATE TABLE simple (i INT PRIMARY KEY, s text )", 100, 200, NoFKs)
case
roachpb.IOFileFormat_Mysqldump,
roachpb.IOFileFormat_MysqlOutfile,
roachpb.IOFileFormat_PgDump,
roachpb.IOFileFormat_PgCopy,
roachpb.IOFileFormat_Avro:
descr = descForTable(ctx, t,
"CREATE TABLE simple (i INT PRIMARY KEY, s text, b bytea default null)", 100, 200, NoFKs)
default:
t.Fatalf("Unsupported input format: %v", format)
}
targetCols := make([]string, len(descr.Columns))
numCols := 0
for i, col := range descr.Columns {
if !col.Hidden {
targetCols[i] = col.Name
numCols++
}
}
assert.True(t, numCols > 0)
fullTableName := "simple"
if format.Format == roachpb.IOFileFormat_PgDump {
fullTableName = "public.simple"
}
spec.tables = map[string]*execinfrapb.ReadImportDataSpec_ImportTable{
fullTableName: {Desc: descr.TableDesc(), TargetCols: targetCols[0:numCols]},
}
for id, path := range inputs {
spec.inputs[int32(id)] = cloudimpl.MakeLocalStorageURI(path)
}
return spec
}
func pgDumpFormat() roachpb.IOFileFormat {
return roachpb.IOFileFormat{
Format: roachpb.IOFileFormat_PgDump,
PgDump: roachpb.PgDumpOptions{
MaxRowSize: 64 * 1024,
IgnoreUnsupported: true,
},
}
}
func pgCopyFormat() roachpb.IOFileFormat {
return roachpb.IOFileFormat{
Format: roachpb.IOFileFormat_PgCopy,
PgCopy: roachpb.PgCopyOptions{
Delimiter: '\t',
Null: `\N`,
MaxRowSize: 4096,
},
}
}
func mysqlDumpFormat() roachpb.IOFileFormat {
return roachpb.IOFileFormat{
Format: roachpb.IOFileFormat_Mysqldump,
}
}
func mysqlOutFormat() roachpb.IOFileFormat {
return roachpb.IOFileFormat{
Format: roachpb.IOFileFormat_MysqlOutfile,
MysqlOut: roachpb.MySQLOutfileOptions{
FieldSeparator: ',',
RowSeparator: '\n',
HasEscape: true,
Escape: '\\',
Enclose: roachpb.MySQLOutfileOptions_Always,
Encloser: '"',
},
}
}
func csvFormat() roachpb.IOFileFormat {
return roachpb.IOFileFormat{
Format: roachpb.IOFileFormat_CSV,
}
}
func avroFormat(t *testing.T, format roachpb.AvroOptions_Format) roachpb.IOFileFormat {
avro := roachpb.AvroOptions{
Format: format,
StrictMode: false,
}
if format != roachpb.AvroOptions_OCF {
// Need to load schema for record specific inputs.
bytes, err := ioutil.ReadFile("testdata/avro/simple-schema.json")
require.NoError(t, err)
avro.SchemaJSON = string(bytes)
avro.RecordSeparator = '\n'
}
return roachpb.IOFileFormat{
Format: roachpb.IOFileFormat_Avro,
Avro: avro,
}
}
|
package tq
import (
"fmt"
"hash"
"io"
"os"
"path/filepath"
"regexp"
"strconv"
"github.com/git-lfs/git-lfs/errors"
"github.com/git-lfs/git-lfs/localstorage"
"github.com/git-lfs/git-lfs/tools"
"github.com/rubyist/tracerx"
)
// Adapter for basic HTTP downloads, includes resuming via HTTP Range
type basicDownloadAdapter struct {
*adapterBase
}
func (a *basicDownloadAdapter) ClearTempStorage() error {
return os.RemoveAll(a.tempDir())
}
func (a *basicDownloadAdapter) tempDir() string {
// Must be dedicated to this adapter as deleted by ClearTempStorage
// Also make local to this repo not global, and separate to localstorage temp,
// which gets cleared at the end of every invocation
d := filepath.Join(localstorage.Objects().RootDir, "incomplete")
if err := os.MkdirAll(d, 0755); err != nil {
return os.TempDir()
}
return d
}
func (a *basicDownloadAdapter) WorkerStarting(workerNum int) (interface{}, error) {
return nil, nil
}
func (a *basicDownloadAdapter) WorkerEnding(workerNum int, ctx interface{}) {
}
func (a *basicDownloadAdapter) DoTransfer(ctx interface{}, t *Transfer, cb ProgressCallback, authOkFunc func()) error {
f, fromByte, hashSoFar, err := a.checkResumeDownload(t)
if err != nil {
return err
}
return a.download(t, cb, authOkFunc, f, fromByte, hashSoFar)
}
// Checks to see if a download can be resumed, and if so returns a non-nil locked file, byte start and hash
func (a *basicDownloadAdapter) checkResumeDownload(t *Transfer) (outFile *os.File, fromByte int64, hashSoFar hash.Hash, e error) {
// lock the file by opening it for read/write, rather than checking Stat() etc
// which could be subject to race conditions by other processes
f, err := os.OpenFile(a.downloadFilename(t), os.O_RDWR, 0644)
if err != nil {
// Create a new file instead, must not already exist or error (permissions / race condition)
newfile, err := os.OpenFile(a.downloadFilename(t), os.O_CREATE|os.O_WRONLY|os.O_EXCL, 0644)
return newfile, 0, nil, err
}
// Successfully opened an existing file at this point
// Read any existing data into hash then return file handle at end
hash := tools.NewLfsContentHash()
n, err := io.Copy(hash, f)
if err != nil {
f.Close()
return nil, 0, nil, err
}
tracerx.Printf("xfer: Attempting to resume download of %q from byte %d", t.Oid, n)
return f, n, hash, nil
}
// Create or open a download file for resuming
func (a *basicDownloadAdapter) downloadFilename(t *Transfer) string {
// Not a temp file since we will be resuming it
return filepath.Join(a.tempDir(), t.Oid+".tmp")
}
// download starts or resumes and download. Always closes dlFile if non-nil
func (a *basicDownloadAdapter) download(t *Transfer, cb ProgressCallback, authOkFunc func(), dlFile *os.File, fromByte int64, hash hash.Hash) error {
if dlFile != nil {
// ensure we always close dlFile. Note that this does not conflict with the
// early close below, as close is idempotent.
defer dlFile.Close()
}
rel, err := t.Rel("download")
if err != nil {
return err
}
if rel == nil {
return errors.Errorf("Object %s not found on the server.", t.Oid)
}
req, err := a.newHTTPRequest("GET", rel)
if err != nil {
return err
}
if fromByte > 0 {
if dlFile == nil || hash == nil {
return fmt.Errorf("Cannot restart %v from %d without a file & hash", t.Oid, fromByte)
}
// We could just use a start byte, but since we know the length be specific
req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", fromByte, t.Size-1))
}
req = a.apiClient.LogRequest(req, "lfs.data.download")
res, err := a.doHTTP(t, req)
if err != nil {
// Special-case status code 416 () - fall back
if fromByte > 0 && dlFile != nil && (res != nil && res.StatusCode == 416) {
tracerx.Printf("xfer: server rejected resume download request for %q from byte %d; re-downloading from start", t.Oid, fromByte)
dlFile.Close()
os.Remove(dlFile.Name())
return a.download(t, cb, authOkFunc, nil, 0, nil)
}
return errors.NewRetriableError(err)
}
defer res.Body.Close()
// Range request must return 206 & content range to confirm
if fromByte > 0 {
rangeRequestOk := false
var failReason string
// check 206 and Content-Range, fall back if either not as expected
if res.StatusCode == 206 {
// Probably a successful range request, check Content-Range
if rangeHdr := res.Header.Get("Content-Range"); rangeHdr != "" {
regex := regexp.MustCompile(`bytes (\d+)\-.*`)
match := regex.FindStringSubmatch(rangeHdr)
if match != nil && len(match) > 1 {
contentStart, _ := strconv.ParseInt(match[1], 10, 64)
if contentStart == fromByte {
rangeRequestOk = true
} else {
failReason = fmt.Sprintf("Content-Range start byte incorrect: %s expected %d", match[1], fromByte)
}
} else {
failReason = fmt.Sprintf("badly formatted Content-Range header: %q", rangeHdr)
}
} else {
failReason = "missing Content-Range header in response"
}
} else {
failReason = fmt.Sprintf("expected status code 206, received %d", res.StatusCode)
}
if rangeRequestOk {
tracerx.Printf("xfer: server accepted resume download request: %q from byte %d", t.Oid, fromByte)
advanceCallbackProgress(cb, t, fromByte)
} else {
// Abort resume, perform regular download
tracerx.Printf("xfer: failed to resume download for %q from byte %d: %s. Re-downloading from start", t.Oid, fromByte, failReason)
dlFile.Close()
os.Remove(dlFile.Name())
if res.StatusCode == 200 {
// If status code was 200 then server just ignored Range header and
// sent everything. Don't re-request, use this one from byte 0
dlFile = nil
fromByte = 0
hash = nil
} else {
// re-request needed
return a.download(t, cb, authOkFunc, nil, 0, nil)
}
}
}
// Signal auth OK on success response, before starting download to free up
// other workers immediately
if authOkFunc != nil {
authOkFunc()
}
var hasher *tools.HashingReader
httpReader := tools.NewRetriableReader(res.Body)
if fromByte > 0 && hash != nil {
// pre-load hashing reader with previous content
hasher = tools.NewHashingReaderPreloadHash(httpReader, hash)
} else {
hasher = tools.NewHashingReader(httpReader)
}
if dlFile == nil {
// New file start
dlFile, err = os.OpenFile(a.downloadFilename(t), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
if err != nil {
return err
}
defer dlFile.Close()
}
dlfilename := dlFile.Name()
// Wrap callback to give name context
ccb := func(totalSize int64, readSoFar int64, readSinceLast int) error {
if cb != nil {
return cb(t.Name, totalSize, readSoFar+fromByte, readSinceLast)
}
return nil
}
written, err := tools.CopyWithCallback(dlFile, hasher, res.ContentLength, ccb)
if err != nil {
return errors.Wrapf(err, "cannot write data to tempfile %q", dlfilename)
}
if err := dlFile.Close(); err != nil {
return fmt.Errorf("can't close tempfile %q: %v", dlfilename, err)
}
if actual := hasher.Hash(); actual != t.Oid {
return fmt.Errorf("Expected OID %s, got %s after %d bytes written", t.Oid, actual, written)
}
return tools.RenameFileCopyPermissions(dlfilename, t.Path)
}
func configureBasicDownloadAdapter(m *Manifest) {
m.RegisterNewAdapterFunc(BasicAdapterName, Download, func(name string, dir Direction) Adapter {
switch dir {
case Download:
bd := &basicDownloadAdapter{newAdapterBase(name, dir, nil)}
// self implements impl
bd.transferImpl = bd
return bd
case Upload:
panic("Should never ask this func to upload")
}
return nil
})
}
|
package main
import "fmt"
type ListNode struct {
Val int
Next *ListNode
}
func (l *ListNode) Print() {
if l == nil {
fmt.Print("NULL\n")
return
}
fmt.Print(l.Val, "->")
l.Next.Print()
}
func partition(head *ListNode, x int) *ListNode {
nhead := []*ListNode{nil, nil}
nnow := []*ListNode{nil, nil}
now := head
for now != nil {
id := 0
if now.Val >= x {
id = 1
}
if nhead[id] == nil {
nhead[id] = now
nnow[id] = now
} else {
nnow[id].Next = now
nnow[id] = nnow[id].Next
}
now = now.Next
}
if nhead[0] != nil {
nnow[0].Next = nhead[1]
if nnow[1] != nil {
nnow[1].Next = nil
}
return nhead[0]
} else {
return nhead[1]
}
}
func main() {
input := &ListNode{
Val: 1,
Next: &ListNode{
Val: 4,
Next: &ListNode{
Val: 3,
Next: &ListNode{
Val: 2,
Next: &ListNode{
Val: 5,
Next: &ListNode{
Val: 2,
Next: nil,
},
},
},
},
},
}
x := 3
partition(input, x).Print()
}
|
package network
import (
"fmt"
"github.com/woobest/network/config"
)
const PacketHeadSize = 6
var ErrorPacketError = fmt.Errorf("Packet error")
type NetPacket struct {
BodySize uint16
Opcode uint32
}
func (self *NetPacket) PrasePacket(buf []byte) error {
self.BodySize = config.ByteOrder.Uint16(buf)
self.Opcode = config.ByteOrder.Uint32(buf[2:])
if self.BodySize > config.MaxPacketSize {
return ErrorPacketError
}
return nil
}
func (self *NetPacket) BuildBuff(buf []byte) {
//ary := [PacketHeadSize]byte{}
config.ByteOrder.PutUint16(buf, self.BodySize)
config.ByteOrder.PutUint32(buf[2:], self.Opcode)
}
|
package clientkube
import (
"github.com/go-logr/logr"
"github.com/go-logr/logr/testing"
"github.com/bryanl/clientkube/pkg/cluster"
)
type options struct {
logger logr.Logger
store cluster.Store
}
func currentOptions(list ...Option) options {
opts := options{
logger: &testing.NullLogger{},
}
for _, o := range list {
o(&opts)
}
return opts
}
type Option func(o *options)
func WithLogger(logger logr.Logger) Option {
return func(o *options) {
o.logger = logger
}
}
func WithStore(store cluster.Store) Option {
return func(o *options) {
o.store = store
}
}
|
package source
import (
"errors"
"fmt"
"io"
"net/http"
"net/url"
"regexp"
"strings"
log "github.com/sirupsen/logrus"
"github.com/sp0x/surf/browser"
"github.com/sp0x/surf/jar"
)
const (
searchMethodPost = "post"
searchMethodGet = "get"
)
type WebClient struct {
Browser browser.Browsable
Cacher ContentCacher
options FetchOptions
errorHandler func(options *RequestOptions)
}
func (w *WebClient) SetErrorHandler(callback func(options *RequestOptions)) {
w.errorHandler = callback
}
func NewWebContentFetcher(browser browser.Browsable,
contentCache ContentCacher,
options FetchOptions) *WebClient {
browser.SetCookieJar(jar.NewMemoryCookies())
return &WebClient{
Browser: browser,
// We'll use the indexer to cache content.
Cacher: contentCache,
options: options,
}
}
type ContentCacher interface {
CachePage(browsable browser.Browsable) error
IsCacheable() bool
}
func (w *WebClient) Cleanup() {
w.Browser.HistoryJar().Clear()
}
// Gets the content from which we'll extract the search results
func (w *WebClient) Fetch(req *RequestOptions) (FetchResult, error) {
if req == nil {
return nil, errors.New("req is required for searching")
}
defer func() {
// After we're done we'll cleanup the history of the browser.
w.Cleanup()
}()
var err error
var result FetchResult
switch req.Method {
case "", searchMethodGet:
if err = w.get(req); err != nil {
if w.errorHandler != nil {
w.errorHandler(req)
}
return nil, err
}
result = extractResponseResult(w.Browser)
case searchMethodPost:
postResult, err := w.Post(req)
if err != nil {
if w.errorHandler != nil {
w.errorHandler(req)
}
return nil, err
}
result = postResult
default:
return nil, fmt.Errorf("unknown search method %q", req.Method)
}
w.dumpFetchData()
return result, nil
}
func extractResponseResult(browser browser.Browsable) FetchResult {
state := browser.State()
if state.Response == nil {
return &HTTPResult{}
}
fqContentType := state.Response.Header.Get("content-type")
contentSplit := strings.Split(fqContentType, ";")
contentEncoding := "utf8"
if len(contentSplit) > 1 {
contentEncoding = contentSplit[1]
}
rootFetchResult := HTTPResult{
contentType: contentSplit[0],
encoding: contentEncoding,
Response: state.Response,
StatusCode: state.Response.StatusCode,
}
if contentSplit[0] == "application/json" {
return &JSONFetchResult{
HTTPResult: rootFetchResult,
Body: browser.RawBody(),
}
}
return &HTMLFetchResult{
HTTPResult: rootFetchResult,
DOM: state.Dom,
}
}
func (w *WebClient) get(req *RequestOptions) error {
destURL := req.URL
if len(req.Values) > 0 {
pURL, err := url.Parse(fmt.Sprintf("%s?%s", req.URL, req.Values.Encode()))
if err != nil {
return err
}
destURL = pURL
}
w.applyOptions(req)
err := w.Browser.Open(destURL.String())
if err != nil {
return err
}
if w.Cacher != nil && w.Cacher.IsCacheable() {
_ = w.Cacher.CachePage(w.Browser.NewTab())
}
if err = w.handleMetaRefreshHeader(req); err != nil {
if w.errorHandler != nil {
w.errorHandler(req)
}
return err
}
return nil
}
func (w *WebClient) applyOptions(reqOptions *RequestOptions) {
referer := ""
if w.options.FakeReferer && reqOptions.Referer == nil {
referer = reqOptions.URL.String()
} else if reqOptions.Referer != nil {
referer = reqOptions.Referer.String()
}
if referer != "" {
w.Browser.SetHeadersJar(http.Header{
"referer": []string{referer},
})
}
if reqOptions.CookieJar != nil {
w.Browser.SetCookieJar(reqOptions.CookieJar)
}
}
func (w *WebClient) URL() *url.URL {
return w.Browser.Url()
}
func (w *WebClient) Clone() ContentFetcher {
f := &WebClient{}
*f = *w
f.Browser = f.Browser.NewTab()
return f
}
func (w *WebClient) Open(opts *RequestOptions) (FetchResult, error) {
if opts.Encoding != "" || opts.NoEncoding {
w.Browser.SetEncoding(opts.Encoding)
}
err := w.Browser.Open(opts.URL.String())
if err != nil {
return nil, err
}
return extractResponseResult(w.Browser), nil
}
func (w *WebClient) Download(buffer io.Writer) (int64, error) {
return w.Browser.Download(buffer)
}
func (w *WebClient) Post(reqOps *RequestOptions) (FetchResult, error) {
urlStr := reqOps.URL
values := reqOps.Values
w.applyOptions(reqOps)
if err := w.Browser.PostForm(urlStr.String(), values); err != nil {
return nil, err
}
if w.Cacher != nil {
_ = w.Cacher.CachePage(w.Browser.NewTab())
}
if err := w.handleMetaRefreshHeader(reqOps); err != nil {
return nil, err
}
w.dumpFetchData()
return extractResponseResult(w.Browser), nil
}
// this should eventually upstream into surf browser
// Handle a header like: Refresh: 0;url=my_view_page.php
func (w *WebClient) handleMetaRefreshHeader(reqOptions *RequestOptions) error {
h := w.Browser.ResponseHeaders()
if refresh := h.Get("Refresh"); refresh != "" {
requestURL := w.Browser.State().Request.URL
if s := regexp.MustCompile(`\s*;\s*`).Split(refresh, 2); len(s) == 2 {
log.
WithField("fields", s).
Info("Found refresh header")
requestURL.Path = strings.TrimPrefix(s[1], "url=")
reqOptions.URL = requestURL
err := w.get(reqOptions)
if err != nil {
if w.errorHandler != nil {
w.errorHandler(reqOptions)
}
}
return err
}
}
return nil
}
|
package testxuzan
import "fmt"
func Xuzan() {
fmt.Println(222222222)
}
|
// Copyright (c) 2016-2019 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package store
import (
"bytes"
"io/ioutil"
"testing"
"github.com/uber/kraken/core"
"github.com/stretchr/testify/require"
)
func TestSimpleStoreCreateCacheFile(t *testing.T) {
require := require.New(t)
s, cleanup := SimpleStoreFixture()
defer cleanup()
tag := core.TagFixture()
d := core.DigestFixture().String()
require.NoError(s.CreateCacheFile(tag, bytes.NewBufferString(d)))
f, err := s.GetCacheFileReader(tag)
require.NoError(err)
result, err := ioutil.ReadAll(f)
require.NoError(err)
require.Equal(d, string(result))
}
|
package models
import(
"encoding/json"
)
/**
* Type definition for SeverityEnum enum
*/
type SeverityEnum int
/**
* Value collection for SeverityEnum enum
*/
const (
Severity_KCRITICAL SeverityEnum = 1 + iota
Severity_KWARNING
Severity_KINFO
)
func (r SeverityEnum) MarshalJSON() ([]byte, error) {
s := SeverityEnumToValue(r)
return json.Marshal(s)
}
func (r *SeverityEnum) UnmarshalJSON(data []byte) error {
var s string
json.Unmarshal(data, &s)
v := SeverityEnumFromValue(s)
*r = v
return nil
}
/**
* Converts SeverityEnum to its string representation
*/
func SeverityEnumToValue(severityEnum SeverityEnum) string {
switch severityEnum {
case Severity_KCRITICAL:
return "kCritical"
case Severity_KWARNING:
return "kWarning"
case Severity_KINFO:
return "kInfo"
default:
return "kCritical"
}
}
/**
* Converts SeverityEnum Array to its string Array representation
*/
func SeverityEnumArrayToValue(severityEnum []SeverityEnum) []string {
convArray := make([]string,len( severityEnum))
for i:=0; i<len(severityEnum);i++ {
convArray[i] = SeverityEnumToValue(severityEnum[i])
}
return convArray
}
/**
* Converts given value to its enum representation
*/
func SeverityEnumFromValue(value string) SeverityEnum {
switch value {
case "kCritical":
return Severity_KCRITICAL
case "kWarning":
return Severity_KWARNING
case "kInfo":
return Severity_KINFO
default:
return Severity_KCRITICAL
}
}
|
package main
import "github.com/spf13/cobra"
type clusterAnnotationAddFlags struct {
clusterFlags
cluster string
annotations []string
}
func (flags *clusterAnnotationAddFlags) addFlags(command *cobra.Command) {
command.Flags().StringVar(&flags.cluster, "cluster", "", "The id of the cluster to be annotated.")
command.Flags().StringArrayVar(&flags.annotations, "annotation", []string{}, "Additional annotations for the cluster. Accepts multiple values, for example: '... --annotation abc --annotation def'")
_ = command.MarkFlagRequired("cluster")
_ = command.MarkFlagRequired("annotation")
}
type clusterAnnotationDeleteFlags struct {
clusterFlags
cluster string
annotation string
}
func (flags *clusterAnnotationDeleteFlags) addFlags(command *cobra.Command) {
command.Flags().StringVar(&flags.cluster, "cluster", "", "The id of the cluster from which annotation should be removed.")
command.Flags().StringVar(&flags.annotation, "annotation", "", "Name of the annotation to be removed from the cluster.")
_ = command.MarkFlagRequired("cluster")
_ = command.MarkFlagRequired("annotation")
}
|
package main
import (
"fmt"
"kcctoken"
"log"
"math/big"
"os"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient"
)
func _transfer(_addr string, _num int64) {
cli, err := ethclient.Dial("http://localhost:8545")
if err != nil {
log.Fatal("failed to ethclient.Dial")
}
instance, err := kcctoken.NewToken(common.HexToAddress("0xf3a68ae8ca860a1803aad6bf6aa6b1e6ee80f6b1"), cli)
if err != nil {
log.Fatal("failed to kcctoken.NewToken")
}
file, err := os.Open("../../data/keystore/UTC--2019-06-19T22-59-49.902830200Z--6cfef69b6f13ade124d5f670ce7e00cdb54e8d3e")
if err != nil {
log.Fatal("failed to os.Open")
}
auth, err := bind.NewTransactor(file, "eilinge")
if err != nil {
log.Fatal("failed to bind.NewTransactor")
}
_, err = instance.Transfer(auth, common.HexToAddress(_addr), big.NewInt(_num))
if err != nil {
log.Fatal("failed to instance.Transfer")
}
fmt.Println("Transfer token success")
}
func main() {
_transfer("0xc3c480c212bc5485062655f3848691974d784b69", 10000)
}
|
package postgres
import (
"context"
"github.com/stone-co/the-amazing-ledger/app/domain/vos"
"github.com/stone-co/the-amazing-ledger/app/shared/instrumentation/newrelic"
)
func (r *LedgerRepository) GetAnalyticalData(ctx context.Context, path vos.AccountPath, fn func(vos.Statement) error) error {
operation := "Repository.GetAnalyticalData"
query := `
SELECT
account_class,
account_group,
account_subgroup,
account_id,
account_suffix,
operation,
amount
FROM
entries
`
args := []interface{}{}
if path.TotalLevels >= 1 {
query += " WHERE account_class = $1"
args = append(args, path.Class.String())
if path.TotalLevels >= 2 {
query += " AND account_group = $2"
args = append(args, path.Group)
if path.TotalLevels >= 3 {
query += " AND account_subgroup = $3"
args = append(args, path.Subgroup)
}
}
}
query += " ORDER BY version"
defer newrelic.NewDatastoreSegment(ctx, collection, operation, query).End()
rows, err := r.db.Query(ctx, query, args...)
if err != nil {
return err
}
defer rows.Close()
for rows.Next() {
var class string
var group string
var subgroup string
var id string
var suffix string
var op string
var amount int
if err = rows.Scan(
&class,
&group,
&subgroup,
&id,
&suffix,
&op,
&amount,
); err != nil {
return err
}
account := vos.FormatAccount(class, group, subgroup, id, suffix)
err = fn(vos.Statement{
Account: account,
Operation: vos.OperationTypeFromString(op),
Amount: amount,
})
if err != nil {
return err
}
}
if err := rows.Err(); err != nil {
return err
}
return nil
}
|
package tasks
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"net/http"
"github.com/go-swagger/go-swagger/httpkit"
"github.com/go-swagger/go-swagger/examples/task-tracker/models"
)
/*UpdateTaskOK Task details
swagger:response updateTaskOK
*/
type UpdateTaskOK struct {
// In: body
Payload *models.Task `json:"body,omitempty"`
}
// NewUpdateTaskOK creates UpdateTaskOK with default headers values
func NewUpdateTaskOK() *UpdateTaskOK {
return &UpdateTaskOK{}
}
// WithPayload adds the payload to the update task o k response
func (o *UpdateTaskOK) WithPayload(payload *models.Task) *UpdateTaskOK {
o.Payload = payload
return o
}
// WriteResponse to the client
func (o *UpdateTaskOK) WriteResponse(rw http.ResponseWriter, producer httpkit.Producer) {
rw.WriteHeader(200)
if o.Payload != nil {
if err := producer.Produce(rw, o.Payload); err != nil {
panic(err) // let the recovery middleware deal with this
}
}
}
/*UpdateTaskUnprocessableEntity Validation error
swagger:response updateTaskUnprocessableEntity
*/
type UpdateTaskUnprocessableEntity struct {
// In: body
Payload *models.ValidationError `json:"body,omitempty"`
}
// NewUpdateTaskUnprocessableEntity creates UpdateTaskUnprocessableEntity with default headers values
func NewUpdateTaskUnprocessableEntity() *UpdateTaskUnprocessableEntity {
return &UpdateTaskUnprocessableEntity{}
}
// WithPayload adds the payload to the update task unprocessable entity response
func (o *UpdateTaskUnprocessableEntity) WithPayload(payload *models.ValidationError) *UpdateTaskUnprocessableEntity {
o.Payload = payload
return o
}
// WriteResponse to the client
func (o *UpdateTaskUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer httpkit.Producer) {
rw.WriteHeader(422)
if o.Payload != nil {
if err := producer.Produce(rw, o.Payload); err != nil {
panic(err) // let the recovery middleware deal with this
}
}
}
/*UpdateTaskDefault update task default
swagger:response updateTaskDefault
*/
type UpdateTaskDefault struct {
_statusCode int
}
// NewUpdateTaskDefault creates UpdateTaskDefault with default headers values
func NewUpdateTaskDefault(code int) *UpdateTaskDefault {
if code <= 0 {
code = 500
}
return &UpdateTaskDefault{
_statusCode: code,
}
}
// WithStatusCode adds the status to the update task default response
func (o *UpdateTaskDefault) WithStatusCode(code int) *UpdateTaskDefault {
o._statusCode = code
return o
}
// WriteResponse to the client
func (o *UpdateTaskDefault) WriteResponse(rw http.ResponseWriter, producer httpkit.Producer) {
rw.WriteHeader(o._statusCode)
}
|
// This file was generated by counterfeiter
package fakes
import (
"database/sql"
"sync"
)
type Db struct {
ExecStub func(query string, args ...interface{}) (sql.Result, error)
execMutex sync.RWMutex
execArgsForCall []struct {
query string
args []interface{}
}
execReturns struct {
result1 sql.Result
result2 error
}
NamedExecStub func(query string, arg interface{}) (sql.Result, error)
namedExecMutex sync.RWMutex
namedExecArgsForCall []struct {
query string
arg interface{}
}
namedExecReturns struct {
result1 sql.Result
result2 error
}
GetStub func(dest interface{}, query string, args ...interface{}) error
getMutex sync.RWMutex
getArgsForCall []struct {
dest interface{}
query string
args []interface{}
}
getReturns struct {
result1 error
}
SelectStub func(dest interface{}, query string, args ...interface{}) error
selectMutex sync.RWMutex
selectArgsForCall []struct {
dest interface{}
query string
args []interface{}
}
selectReturns struct {
result1 error
}
}
func (fake *Db) Exec(query string, args ...interface{}) (sql.Result, error) {
fake.execMutex.Lock()
fake.execArgsForCall = append(fake.execArgsForCall, struct {
query string
args []interface{}
}{query, args})
fake.execMutex.Unlock()
if fake.ExecStub != nil {
return fake.ExecStub(query, args...)
} else {
return fake.execReturns.result1, fake.execReturns.result2
}
}
func (fake *Db) ExecCallCount() int {
fake.execMutex.RLock()
defer fake.execMutex.RUnlock()
return len(fake.execArgsForCall)
}
func (fake *Db) ExecArgsForCall(i int) (string, []interface{}) {
fake.execMutex.RLock()
defer fake.execMutex.RUnlock()
return fake.execArgsForCall[i].query, fake.execArgsForCall[i].args
}
func (fake *Db) ExecReturns(result1 sql.Result, result2 error) {
fake.ExecStub = nil
fake.execReturns = struct {
result1 sql.Result
result2 error
}{result1, result2}
}
func (fake *Db) NamedExec(query string, arg interface{}) (sql.Result, error) {
fake.namedExecMutex.Lock()
fake.namedExecArgsForCall = append(fake.namedExecArgsForCall, struct {
query string
arg interface{}
}{query, arg})
fake.namedExecMutex.Unlock()
if fake.NamedExecStub != nil {
return fake.NamedExecStub(query, arg)
} else {
return fake.namedExecReturns.result1, fake.namedExecReturns.result2
}
}
func (fake *Db) NamedExecCallCount() int {
fake.namedExecMutex.RLock()
defer fake.namedExecMutex.RUnlock()
return len(fake.namedExecArgsForCall)
}
func (fake *Db) NamedExecArgsForCall(i int) (string, interface{}) {
fake.namedExecMutex.RLock()
defer fake.namedExecMutex.RUnlock()
return fake.namedExecArgsForCall[i].query, fake.namedExecArgsForCall[i].arg
}
func (fake *Db) NamedExecReturns(result1 sql.Result, result2 error) {
fake.NamedExecStub = nil
fake.namedExecReturns = struct {
result1 sql.Result
result2 error
}{result1, result2}
}
func (fake *Db) Get(dest interface{}, query string, args ...interface{}) error {
fake.getMutex.Lock()
fake.getArgsForCall = append(fake.getArgsForCall, struct {
dest interface{}
query string
args []interface{}
}{dest, query, args})
fake.getMutex.Unlock()
if fake.GetStub != nil {
return fake.GetStub(dest, query, args...)
} else {
return fake.getReturns.result1
}
}
func (fake *Db) GetCallCount() int {
fake.getMutex.RLock()
defer fake.getMutex.RUnlock()
return len(fake.getArgsForCall)
}
func (fake *Db) GetArgsForCall(i int) (interface{}, string, []interface{}) {
fake.getMutex.RLock()
defer fake.getMutex.RUnlock()
return fake.getArgsForCall[i].dest, fake.getArgsForCall[i].query, fake.getArgsForCall[i].args
}
func (fake *Db) GetReturns(result1 error) {
fake.GetStub = nil
fake.getReturns = struct {
result1 error
}{result1}
}
func (fake *Db) Select(dest interface{}, query string, args ...interface{}) error {
fake.selectMutex.Lock()
fake.selectArgsForCall = append(fake.selectArgsForCall, struct {
dest interface{}
query string
args []interface{}
}{dest, query, args})
fake.selectMutex.Unlock()
if fake.SelectStub != nil {
return fake.SelectStub(dest, query, args...)
} else {
return fake.selectReturns.result1
}
}
func (fake *Db) SelectCallCount() int {
fake.selectMutex.RLock()
defer fake.selectMutex.RUnlock()
return len(fake.selectArgsForCall)
}
func (fake *Db) SelectArgsForCall(i int) (interface{}, string, []interface{}) {
fake.selectMutex.RLock()
defer fake.selectMutex.RUnlock()
return fake.selectArgsForCall[i].dest, fake.selectArgsForCall[i].query, fake.selectArgsForCall[i].args
}
func (fake *Db) SelectReturns(result1 error) {
fake.SelectStub = nil
fake.selectReturns = struct {
result1 error
}{result1}
}
|
package main
import (
"fmt"
"os"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/sp0x/torrentd/indexer"
"github.com/sp0x/torrentd/indexer/search"
"github.com/sp0x/torrentd/indexer/status"
)
func init() {
cmdGet := &cobra.Command{
Use: "get",
Short: "Run a query or get results from index(es)",
Run: getCommand,
}
storage := ""
query := ""
workers := 0
users := 1
cmdFlags := cmdGet.PersistentFlags()
cmdFlags.StringVarP(&storage, "storage", "o", "boltdb", `The storage backing to use.
Currently supported storage backings: boltdb, firebase, sqlite`)
cmdFlags.StringVar(&query, "query", "", `Query to use when searching`)
cmdFlags.IntVar(&workers, "workers", 0, "The number of parallel searches that can be used.")
cmdFlags.IntVar(&users, "users", 1, "The number of user sessions to use in rotation.")
_ = viper.BindEnv("workers")
_ = viper.BindEnv("users")
firebaseProject := ""
firebaseCredentials := ""
cmdFlags.StringVarP(&firebaseCredentials, "firebase_project", "", "", "The project id for firebase")
cmdFlags.StringVarP(&firebaseProject, "firebase_credentials_file", "", "", "The service credentials for firebase")
// Storage config
_ = viper.BindPFlag("storage", cmdFlags.Lookup("storage"))
_ = viper.BindEnv("storage")
// Firebase related
_ = viper.BindPFlag("firebase_project", cmdFlags.Lookup("firebase_project"))
_ = viper.BindEnv("firebase_project")
_ = viper.BindPFlag("firebase_credentials_file", cmdFlags.Lookup("firebase_credentials_file"))
_ = viper.BindEnv("firebase_credentials_file")
_ = viper.BindPFlags(cmdFlags)
_ = viper.BindEnv("query")
rootCmd.AddCommand(cmdGet)
}
func getCommand(c *cobra.Command, _ []string) {
facade := indexer.NewFacadeFromConfiguration(&appConfig)
if facade == nil {
log.Error("Couldn't initialize torrent facade.")
return
}
// Start watching the torrent tracker.
status.SetupPubsub(appConfig.GetString("firebase_project"))
queryStr := c.Flag("query").Value.String()
query, _ := search.NewQueryFromQueryString(queryStr)
err := indexer.Get(facade, query)
if err != nil {
fmt.Printf("Couldn't get results: ")
os.Exit(1)
}
}
|
package main
func f()
|
package controllers
import (
"fmt"
"net/http"
"github.com/astaxie/beego"
"github.com/go-react/community/models"
)
// ErrorController 提供统一的错误处理控制器
type ErrorController struct {
BaseController
}
// result 响应错误结果
func (ec *ErrorController) result(statusCode int, message string) {
result := models.C_ErrorResult{
Code: http.StatusText(statusCode),
Message: message,
}
// 开发模式,使用自定义的信息
if beego.BConfig.RunMode == beego.DEV {
if v := ec.Ctx.Input.GetData("ErrorCode"); v != nil {
result.Code = fmt.Sprint(v)
}
if v := ec.Ctx.Input.GetData("Error"); v != nil {
result.Message = v.(error).Error()
}
}
ec.Ctx.Output.SetStatus(statusCode)
ec.Data["json"] = result
ec.ServeJSON()
}
// Error400 400
func (ec *ErrorController) Error400() {
ec.result(http.StatusBadRequest, "请求格式错误!")
}
// Error401 401
func (ec *ErrorController) Error401() {
ec.result(http.StatusUnauthorized, "请求未授权!")
}
// Error403 403
func (ec *ErrorController) Error403() {
ec.result(http.StatusForbidden, "鉴权成功,但是该用户没有权限!")
}
// Error404 404
func (ec *ErrorController) Error404() {
ec.result(http.StatusNotFound, "请求的资源不存在!")
}
// Error500 500
func (ec *ErrorController) Error500() {
ec.result(http.StatusInternalServerError, "内部服务器错误!")
}
// Error503 503
func (ec *ErrorController) Error503() {
ec.result(http.StatusServiceUnavailable, "服务器当前无法处理请求!")
}
|
package models
import "time"
// TODO(nick): Shared protobuf would be nice
type Sms struct {
Address string `json:"address"`
Body string `json:"body"`
Date time.Time `json:"date"`
ID int64 `json:"id,omitempty"`
ThreadID string `json:"threadId"`
SmsType int `json:"type"`
}
|
/*
Copyright 2019 The Skaffold Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package build
import (
"bytes"
"context"
"fmt"
"io"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/graph"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/platform"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/schema/latest"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/tag"
"github.com/GoogleContainerTools/skaffold/v2/testutil"
testEvent "github.com/GoogleContainerTools/skaffold/v2/testutil/event"
)
func TestGetBuild(t *testing.T) {
tests := []struct {
description string
buildArtifact ArtifactBuilder
tags tag.ImageTags
expectedTag string
expectedOut string
shouldErr bool
}{
{
description: "build succeeds",
buildArtifact: func(ctx context.Context, out io.Writer, artifact *latest.Artifact, tag string, _ platform.Matcher) (string, error) {
out.Write([]byte("build succeeds"))
return fmt.Sprintf("%s@sha256:abac", tag), nil
},
tags: tag.ImageTags{
"skaffold/image1": "skaffold/image1:v0.0.1",
"skaffold/image2": "skaffold/image2:v0.0.2",
},
expectedTag: "skaffold/image1:v0.0.1@sha256:abac",
expectedOut: "build succeeds",
},
{
description: "tag with ko scheme prefix and Go import path with uppercase characters is sanitized",
buildArtifact: func(ctx context.Context, out io.Writer, artifact *latest.Artifact, tag string, _ platform.Matcher) (string, error) {
out.Write([]byte("build succeeds"))
return fmt.Sprintf("%s@sha256:abac", tag), nil
},
tags: tag.ImageTags{
"skaffold/image1": "ko://github.com/GoogleContainerTools/skaffold/cmd/skaffold:v0.0.1",
},
expectedTag: "github.com/googlecontainertools/skaffold/cmd/skaffold:v0.0.1@sha256:abac",
expectedOut: "build succeeds",
},
{
description: "build fails",
buildArtifact: func(ctx context.Context, out io.Writer, artifact *latest.Artifact, tag string, _ platform.Matcher) (string, error) {
return "", fmt.Errorf("build fails")
},
tags: tag.ImageTags{
"skaffold/image1": "",
},
expectedOut: "",
shouldErr: true,
},
{
description: "tag not found",
tags: tag.ImageTags{},
expectedOut: "",
shouldErr: true,
},
}
for _, test := range tests {
testutil.Run(t, test.description, func(t *testutil.T) {
out := new(bytes.Buffer)
artifact := &latest.Artifact{ImageName: "skaffold/image1"}
got, err := performBuild(context.Background(), out, test.tags, platform.Resolver{}, artifact, test.buildArtifact)
t.CheckErrorAndDeepEqual(test.shouldErr, err, test.expectedTag, got)
t.CheckDeepEqual(test.expectedOut, out.String())
})
}
}
func TestFormatResults(t *testing.T) {
tests := []struct {
description string
artifacts []*latest.Artifact
expected []graph.Artifact
results map[string]interface{}
shouldErr bool
}{
{
description: "all builds completely successfully",
artifacts: []*latest.Artifact{
{ImageName: "skaffold/image1"},
{ImageName: "skaffold/image2"},
},
expected: []graph.Artifact{
{ImageName: "skaffold/image1", Tag: "skaffold/image1:v0.0.1@sha256:abac"},
{ImageName: "skaffold/image2", Tag: "skaffold/image2:v0.0.2@sha256:abac"},
},
results: map[string]interface{}{
"skaffold/image1": "skaffold/image1:v0.0.1@sha256:abac",
"skaffold/image2": "skaffold/image2:v0.0.2@sha256:abac",
},
},
{
description: "no build result produced for a build",
artifacts: []*latest.Artifact{
{ImageName: "skaffold/image1"},
{ImageName: "skaffold/image2"},
},
expected: nil,
results: map[string]interface{}{
"skaffold/image1": "skaffold/image1:v0.0.1@sha256:abac",
},
shouldErr: true,
},
}
for _, test := range tests {
testutil.Run(t, test.description, func(t *testutil.T) {
m := new(sync.Map)
for k, v := range test.results {
m.Store(k, v)
}
results := &artifactStoreImpl{m: m}
got, err := results.GetArtifacts(test.artifacts)
t.CheckErrorAndDeepEqual(test.shouldErr, err, test.expected, got)
})
}
}
func TestInOrder(t *testing.T) {
tests := []struct {
description string
buildFunc ArtifactBuilder
expected string
}{
{
description: "short and nice build log",
expected: "Building 2 artifacts in parallel\nBuilding [skaffold/image1]...\nshort\nBuild [skaffold/image1] succeeded\nBuilding [skaffold/image2]...\nshort\nBuild [skaffold/image2] succeeded\n",
buildFunc: func(ctx context.Context, out io.Writer, artifact *latest.Artifact, tag string, _ platform.Matcher) (string, error) {
out.Write([]byte("short\n"))
return fmt.Sprintf("%s:tag", artifact.ImageName), nil
},
},
{
description: "long build log gets printed correctly",
expected: `Building 2 artifacts in parallel
Building [skaffold/image1]...
This is a long string more than 10 bytes.
And new lines
Build [skaffold/image1] succeeded
Building [skaffold/image2]...
This is a long string more than 10 bytes.
And new lines
Build [skaffold/image2] succeeded
`,
buildFunc: func(ctx context.Context, out io.Writer, artifact *latest.Artifact, tag string, _ platform.Matcher) (string, error) {
out.Write([]byte("This is a long string more than 10 bytes.\nAnd new lines\n"))
return fmt.Sprintf("%s:tag", artifact.ImageName), nil
},
},
}
for _, test := range tests {
testutil.Run(t, test.description, func(t *testutil.T) {
out := new(bytes.Buffer)
artifacts := []*latest.Artifact{
{ImageName: "skaffold/image1"},
{ImageName: "skaffold/image2", Dependencies: []*latest.ArtifactDependency{{ImageName: "skaffold/image1"}}},
}
tags := tag.ImageTags{
"skaffold/image1": "skaffold/image1:v0.0.1",
"skaffold/image2": "skaffold/image2:v0.0.2",
}
initializeEvents()
InOrder(context.Background(), out, tags, platform.Resolver{}, artifacts, test.buildFunc, 0, NewArtifactStore())
t.CheckDeepEqual(test.expected, out.String())
})
}
}
func TestInOrderConcurrency(t *testing.T) {
tests := []struct {
artifacts int
limit int
maxConcurrency int
}{
{
artifacts: 10,
limit: 0, // default - no limit
maxConcurrency: 10,
},
{
artifacts: 50,
limit: 1,
maxConcurrency: 1,
},
{
artifacts: 50,
limit: 10,
maxConcurrency: 10,
},
}
for _, test := range tests {
testutil.Run(t, fmt.Sprintf("%d artifacts, max concurrency=%d", test.artifacts, test.limit), func(t *testutil.T) {
var artifacts []*latest.Artifact
tags := tag.ImageTags{}
for i := 0; i < test.artifacts; i++ {
imageName := fmt.Sprintf("skaffold/image%d", i)
tag := fmt.Sprintf("skaffold/image%d:tag", i)
artifacts = append(artifacts, &latest.Artifact{ImageName: imageName})
tags[imageName] = tag
}
var actualConcurrency int32
builder := func(_ context.Context, _ io.Writer, _ *latest.Artifact, tag string, _ platform.Matcher) (string, error) {
if atomic.AddInt32(&actualConcurrency, 1) > int32(test.maxConcurrency) {
return "", fmt.Errorf("only %d build can run at a time", test.maxConcurrency)
}
time.Sleep(5 * time.Millisecond)
atomic.AddInt32(&actualConcurrency, -1)
return tag, nil
}
initializeEvents()
results, err := InOrder(context.Background(), io.Discard, tags, platform.Resolver{}, artifacts, builder, test.limit, NewArtifactStore())
t.CheckNoError(err)
t.CheckDeepEqual(test.artifacts, len(results))
})
}
}
func TestInOrderForArgs(t *testing.T) {
tests := []struct {
description string
buildArtifact ArtifactBuilder
artifactLen int
concurrency int
dependency map[int][]int
expected []graph.Artifact
err error
}{
{
description: "runs in parallel for 2 artifacts with no dependency",
buildArtifact: func(_ context.Context, _ io.Writer, _ *latest.Artifact, tag string, _ platform.Matcher) (string, error) {
return tag, nil
},
artifactLen: 2,
expected: []graph.Artifact{
{ImageName: "artifact1", Tag: "artifact1@tag1"},
{ImageName: "artifact2", Tag: "artifact2@tag2"},
},
},
{
description: "runs in parallel for 5 artifacts with dependencies",
buildArtifact: func(_ context.Context, _ io.Writer, _ *latest.Artifact, tag string, _ platform.Matcher) (string, error) {
return tag, nil
},
dependency: map[int][]int{
0: {2, 3},
1: {3},
2: {1},
3: {4},
},
artifactLen: 5,
expected: []graph.Artifact{
{ImageName: "artifact1", Tag: "artifact1@tag1"},
{ImageName: "artifact2", Tag: "artifact2@tag2"},
{ImageName: "artifact3", Tag: "artifact3@tag3"},
{ImageName: "artifact4", Tag: "artifact4@tag4"},
{ImageName: "artifact5", Tag: "artifact5@tag5"},
},
},
{
description: "runs with max concurrency of 2 for 5 artifacts with dependencies",
buildArtifact: func(_ context.Context, _ io.Writer, _ *latest.Artifact, tag string, _ platform.Matcher) (string, error) {
return tag, nil
},
dependency: map[int][]int{
0: {2, 3},
1: {3},
2: {1},
3: {4},
},
artifactLen: 5,
concurrency: 2,
expected: []graph.Artifact{
{ImageName: "artifact1", Tag: "artifact1@tag1"},
{ImageName: "artifact2", Tag: "artifact2@tag2"},
{ImageName: "artifact3", Tag: "artifact3@tag3"},
{ImageName: "artifact4", Tag: "artifact4@tag4"},
{ImageName: "artifact5", Tag: "artifact5@tag5"},
},
},
{
description: "runs in parallel should return for 0 artifacts",
artifactLen: 0,
expected: nil,
},
{
description: "build fails for artifacts without dependencies",
buildArtifact: func(c context.Context, _ io.Writer, a *latest.Artifact, tag string, _ platform.Matcher) (string, error) {
if a.ImageName == "artifact2" {
return "", fmt.Errorf(`some error occurred while building "artifact2"`)
}
select {
case <-c.Done():
return "", c.Err()
case <-time.After(5 * time.Second):
return tag, nil
}
},
artifactLen: 5,
expected: nil,
err: fmt.Errorf(`build [artifact2] failed: %w`, fmt.Errorf(`some error occurred while building "artifact2"`)),
},
{
description: "build fails for artifacts with dependencies",
buildArtifact: func(_ context.Context, _ io.Writer, a *latest.Artifact, tag string, _ platform.Matcher) (string, error) {
if a.ImageName == "artifact2" {
return "", fmt.Errorf(`some error occurred while building "artifact2"`)
}
return tag, nil
},
dependency: map[int][]int{
0: {1},
1: {2},
2: {3},
3: {4},
},
artifactLen: 5,
expected: nil,
err: fmt.Errorf(`build [artifact2] failed: %w`, fmt.Errorf(`some error occurred while building "artifact2"`)),
},
}
for _, test := range tests {
testutil.Run(t, test.description, func(t *testutil.T) {
artifacts := make([]*latest.Artifact, test.artifactLen)
tags := tag.ImageTags{}
for i := 0; i < test.artifactLen; i++ {
a := fmt.Sprintf("artifact%d", i+1)
artifacts[i] = &latest.Artifact{ImageName: a}
tags[a] = fmt.Sprintf("%s@tag%d", a, i+1)
}
setDependencies(artifacts, test.dependency)
initializeEvents()
actual, err := InOrder(context.Background(), io.Discard, tags, platform.Resolver{}, artifacts, test.buildArtifact, test.concurrency, NewArtifactStore())
t.CheckDeepEqual(test.expected, actual)
t.CheckDeepEqual(test.err, err, cmp.Comparer(errorsComparer))
})
}
}
// setDependencies constructs a graph of artifact dependencies using the map as an adjacency list representation of indices in the artifacts array.
// For example:
// m = {
//
// 0 : {1, 2},
// 2 : {3},
//
// }
// implies that a[0] artifact depends on a[1] and a[2]; and a[2] depends on a[3].
func setDependencies(a []*latest.Artifact, d map[int][]int) {
for k, dep := range d {
for i := range dep {
a[k].Dependencies = append(a[k].Dependencies, &latest.ArtifactDependency{
ImageName: a[dep[i]].ImageName,
})
}
}
}
func initializeEvents() {
pipes := []latest.Pipeline{{
Deploy: latest.DeployConfig{},
Build: latest.BuildConfig{
BuildType: latest.BuildType{
LocalBuild: &latest.LocalBuild{},
},
},
}}
testEvent.InitializeState(pipes)
}
func errorsComparer(a, b error) bool {
if a == nil && b == nil {
return true
}
if a == nil || b == nil {
return false
}
return a.Error() == b.Error()
}
|
package repository
import (
"fmt"
"github.com/jmoiron/sqlx"
"github.com/solntsevatv/url_translater/internal/url_translater"
)
type UrlPostgres struct {
db *sqlx.DB
}
func NewUrlPostgres(db *sqlx.DB) *UrlPostgres {
return &UrlPostgres{db: db}
}
func (r *UrlPostgres) IsDBEmpty() (bool, error) {
var count int
query := fmt.Sprintf("SELECT count(*) FROM %s;", linksTable)
err := r.db.Get(&count, query)
if err != nil {
return true, err
}
if count == 0 {
return true, nil
} else {
return false, nil
}
}
func (r *UrlPostgres) GetNextUrlId() (int, error) {
var id int
is_empty, err := r.IsDBEmpty()
if err != nil {
return 0, err
}
if is_empty {
id = 1
return id, nil
}
query := fmt.Sprintf("SELECT last_value FROM %s_id_seq;", linksTable)
err = r.db.Get(&id, query)
if err != nil {
return 0, err
}
return id + 1, nil // прибавляем 1, так как возвращаем СЛЕДУЮЩИЙ id
}
func (r *UrlPostgres) CreateShortURL(url url_translater.URL) (string, error) {
var id int
query := fmt.Sprintf("INSERT INTO %s (long_url, short_url) values ($1, $2) RETURNING id", linksTable)
row := r.db.QueryRow(query, url.LongUrl, url.ShortURL)
if err := row.Scan(&id); err != nil {
return "", err
}
return url.ShortURL, nil
}
func (r *UrlPostgres) GetLongURL(short_url url_translater.ShortURL) (string, error) {
var long_url url_translater.LongURL
query := fmt.Sprintf("SELECT id, long_url FROM %s WHERE short_url=$1;", linksTable)
err := r.db.Get(&long_url, query, short_url.LinkUrl)
if err != nil {
return "", err
}
return long_url.LinkUrl, nil
}
|
// date: 2019-03-12
package log
import (
"github.com/Jarvens/Exchange-Agent/util/config"
"github.com/sirupsen/logrus"
"gopkg.in/natefinch/lumberjack.v2"
"net/http"
"os"
)
func LumberJackLogger(filePath string, maxSize int, maxBackups int, maxAge int) *lumberjack.Logger {
return &lumberjack.Logger{Filename: filePath,
MaxSize: maxSize,
MaxAge: maxAge,
MaxBackups: maxBackups}
}
func InitLogToStdoutDebug() {
logrus.SetFormatter(&logrus.TextFormatter{ForceColors: true})
logrus.SetOutput(os.Stdout)
logrus.SetLevel(logrus.DebugLevel)
}
func InitLogToStdout() {
logrus.SetFormatter(&logrus.TextFormatter{})
logrus.SetOutput(os.Stdout)
logrus.SetLevel(logrus.WarnLevel)
}
func InitLogToFile() {
logrus.SetFormatter(&logrus.TextFormatter{})
out := LumberJackLogger(config.WsErrorLogFilePath+config.WsErrorLogFileExtension,
config.WsErrorLogMaxSize,
config.WsErrorLogMaxBackups,
config.WsErrorLogMaxAge)
logrus.SetOutput(out)
access := LumberJackLogger(config.WsAccessLogFilePath+config.WsAccessLogFileExtension,
config.WsAccessLogMaxSize,
config.WsAccessLogMaxBackups,
config.WsAccessLogMaxAge)
logrus.SetLevel(logrus.InfoLevel)
//成功日志输出
logrus.SetOutput(access)
//错误日志输出
logrus.SetLevel(logrus.InfoLevel)
}
func Init(environment string) {
switch environment {
case "DEVELOPMENT":
InitLogToStdoutDebug()
case "TEST":
InitLogToFile()
case "PRODUCTION":
InitLogToFile()
}
logrus.Infof("Environment : %s", environment)
}
func Debug(msg string) {
logrus.Debug(msg)
}
// Debugf logs a formatted message with debug log level.
func Debugf(msg string, args ...interface{}) {
logrus.Debugf(msg, args...)
}
// Info logs a message with info log level.
func Info(msg string) {
logrus.Info(msg)
}
// Infof logs a formatted message with info log level.
func Infof(msg string, args ...interface{}) {
logrus.Infof(msg, args...)
}
// Warn logs a message with warn log level.
func Warn(msg string) {
logrus.Warn(msg)
}
// Warnf logs a formatted message with warn log level.
func Warnf(msg string, args ...interface{}) {
logrus.Warnf(msg, args...)
}
// Error logs a message with error log level.
func Error(msg string) {
logrus.Error(msg)
}
// Errorf logs a formatted message with error log level.
func Errorf(msg string, args ...interface{}) {
logrus.Errorf(msg, args...)
}
// Fatal logs a message with fatal log level.
func Fatal(msg string) {
logrus.Fatal(msg)
}
// Fatalf logs a formatted message with fatal log level.
func Fatalf(msg string, args ...interface{}) {
logrus.Fatalf(msg, args...)
}
// Panic logs a message with panic log level.
func Panic(msg string) {
logrus.Panic(msg)
}
// Panicf logs a formatted message with panic log level.
func Panicf(msg string, args ...interface{}) {
logrus.Panicf(msg, args...)
}
// log response body data for debugging
func DebugResponse(response *http.Response) string {
bodyBuffer := make([]byte, 5000)
var str string
count, err := response.Body.Read(bodyBuffer)
for ; count > 0; count, err = response.Body.Read(bodyBuffer) {
if err != nil {
}
str += string(bodyBuffer[:count])
}
Debugf("response data : %v", str)
return str
}
func init() {
Init("PRODUCTION")
}
|
// +build storage_pgx
package config
const DefaultStorage = StoragePostgres
|
package main
import (
"fmt"
"log"
"net/http"
"os"
"time"
)
func handler(w http.ResponseWriter, r *http.Request) {
query := r.URL.Query()
name := query.Get("name")
if name == "" {
name = "Guest"
}
log.Printf("Received request for %s\n", name)
w.Write([]byte(fmt.Sprintf("Welcome, %s\n", name)))
}
func main() {
// Create Server and Route Handlers
r := mux.NewRouter()
r.HandleFunc("/", handler)
srv := &http.Server{
Handler: r,
Addr: ":8080",
ReadTimeout: 10 * time.Second,
WriteTimeout: 10 * time.Second,
}
// Configure Logging
LOG_FILE_LOCATION := os.Getenv("LOG_FILE_LOCATION")
if LOG_FILE_LOCATION != "" {
log.SetOutput(&lumberjack.Logger{
Filename: LOG_FILE_LOCATION,
MaxSize: 500, // megabytes
MaxBackups: 3,
MaxAge: 28, // days
Compress: true, // disabled by default
})
}
// Start Server
go func() {
log.Println("Starting Server...")
if err := srv.ListenAndServe(); err != nil {
log.Fatal(err)
}
}()
// Shutdown
waitForShutdown(srv)
}
|
package collect
import (
"archive/tar"
"bytes"
"context"
"io"
"os"
"path/filepath"
"time"
"github.com/pkg/errors"
troubleshootv1beta2 "github.com/replicatedhq/troubleshoot/pkg/apis/troubleshoot/v1beta2"
"github.com/replicatedhq/troubleshoot/pkg/logger"
"github.com/segmentio/ksuid"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
kuberneteserrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/remotecommand"
)
// CopyFromHost is a function that copies a file or directory from a host or hosts to include in the bundle.
func CopyFromHost(ctx context.Context, c *Collector, collector *troubleshootv1beta2.CopyFromHost, namespace string, clientConfig *restclient.Config, client kubernetes.Interface) (CollectorResult, error) {
labels := map[string]string{
"app.kubernetes.io/managed-by": "troubleshoot.sh",
"troubleshoot.sh/collector": "copyfromhost",
"troubleshoot.sh/copyfromhost-id": ksuid.New().String(),
}
hostPath := filepath.Clean(collector.HostPath) // strip trailing slash
hostDir := filepath.Dir(hostPath)
fileName := filepath.Base(hostPath)
if hostDir == filepath.Dir(hostDir) { // is the parent directory the root?
hostDir = hostPath
fileName = "."
}
_, cleanup, err := copyFromHostCreateDaemonSet(ctx, client, collector, hostDir, namespace, "troubleshoot-copyfromhost-", labels)
defer cleanup()
if err != nil {
return nil, errors.Wrap(err, "create daemonset")
}
childCtx, cancel := context.WithCancel(ctx)
defer cancel()
timeoutCtx := context.Background()
if collector.Timeout != "" {
timeout, err := time.ParseDuration(collector.Timeout)
if err != nil {
return nil, errors.Wrap(err, "parse timeout")
}
if timeout > 0 {
childCtx, cancel = context.WithTimeout(childCtx, timeout)
defer cancel()
}
}
errCh := make(chan error, 1)
resultCh := make(chan CollectorResult, 1)
go func() {
var outputFilename string
if collector.Name != "" {
outputFilename = collector.Name
} else {
outputFilename = hostPath
}
b, err := copyFromHostGetFilesFromPods(childCtx, c, collector, clientConfig, client, fileName, outputFilename, labels, namespace)
if err != nil {
errCh <- err
} else {
resultCh <- b
}
}()
select {
case <-timeoutCtx.Done():
return nil, errors.New("timeout")
case result := <-resultCh:
return result, nil
case err := <-errCh:
if errors.Is(err, context.DeadlineExceeded) {
return nil, errors.New("timeout")
}
return nil, err
}
}
func copyFromHostCreateDaemonSet(ctx context.Context, client kubernetes.Interface, collector *troubleshootv1beta2.CopyFromHost, hostPath string, namespace string, generateName string, labels map[string]string) (name string, cleanup func(), err error) {
pullPolicy := corev1.PullIfNotPresent
volumeType := corev1.HostPathDirectory
if collector.ImagePullPolicy != "" {
pullPolicy = corev1.PullPolicy(collector.ImagePullPolicy)
}
ds := appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
GenerateName: generateName,
Namespace: namespace,
Labels: labels,
},
Spec: appsv1.DaemonSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: labels,
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
},
Spec: corev1.PodSpec{
RestartPolicy: corev1.RestartPolicyAlways,
Containers: []corev1.Container{
{
Image: collector.Image,
ImagePullPolicy: pullPolicy,
Name: "collector",
Command: []string{"sleep"},
Args: []string{"1000000"},
VolumeMounts: []corev1.VolumeMount{
{
Name: "host",
MountPath: "/host",
},
},
},
},
Tolerations: []corev1.Toleration{
{
Key: "node-role.kubernetes.io/master",
Operator: "Exists",
Effect: "NoSchedule",
},
},
Volumes: []corev1.Volume{
{
Name: "host",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: hostPath,
Type: &volumeType,
},
},
},
},
},
},
},
}
cleanupFuncs := []func(){}
cleanup = func() {
for _, fn := range cleanupFuncs {
fn()
}
}
if collector.ImagePullSecret != nil && collector.ImagePullSecret.Data != nil {
secretName, err := createSecret(ctx, client, namespace, collector.ImagePullSecret)
if err != nil {
return "", cleanup, errors.Wrap(err, "create secret")
}
ds.Spec.Template.Spec.ImagePullSecrets = append(ds.Spec.Template.Spec.ImagePullSecrets, corev1.LocalObjectReference{Name: secretName})
cleanupFuncs = append(cleanupFuncs, func() {
err := client.CoreV1().Secrets(namespace).Delete(ctx, collector.ImagePullSecret.Name, metav1.DeleteOptions{})
if err != nil && !kuberneteserrors.IsNotFound(err) {
logger.Printf("Failed to delete secret %s: %v", collector.ImagePullSecret.Name, err)
}
})
}
createdDS, err := client.AppsV1().DaemonSets(namespace).Create(ctx, &ds, metav1.CreateOptions{})
if err != nil {
return "", cleanup, errors.Wrap(err, "create daemonset")
}
cleanupFuncs = append(cleanupFuncs, func() {
if err := client.AppsV1().DaemonSets(namespace).Delete(ctx, createdDS.Name, metav1.DeleteOptions{}); err != nil {
logger.Printf("Failed to delete daemonset %s: %v", createdDS.Name, err)
}
})
// This timeout is different from collector timeout.
// Time it takes to pull images should not count towards collector timeout.
childCtx, cancel := context.WithTimeout(ctx, 30*time.Second)
defer cancel()
for {
select {
case <-time.After(1 * time.Second):
case <-childCtx.Done():
return createdDS.Name, cleanup, errors.Wrap(ctx.Err(), "wait for daemonset")
}
ds, err := client.AppsV1().DaemonSets(namespace).Get(ctx, createdDS.Name, metav1.GetOptions{})
if err != nil {
if !kuberneteserrors.IsNotFound(err) {
continue
}
return createdDS.Name, cleanup, errors.Wrap(err, "get daemonset")
}
if ds.Status.DesiredNumberScheduled == 0 || ds.Status.DesiredNumberScheduled != ds.Status.NumberAvailable {
continue
}
break
}
return createdDS.Name, cleanup, nil
}
func copyFromHostGetFilesFromPods(ctx context.Context, c *Collector, collector *troubleshootv1beta2.CopyFromHost, clientConfig *restclient.Config, client kubernetes.Interface, fileName string, outputFilename string, labelSelector map[string]string, namespace string) (CollectorResult, error) {
opts := metav1.ListOptions{
LabelSelector: labels.SelectorFromSet(labelSelector).String(),
}
pods, err := client.CoreV1().Pods(namespace).List(ctx, opts)
if err != nil {
return nil, errors.Wrap(err, "list pods")
}
output := NewResult()
for _, pod := range pods.Items {
outputNodeFilename := filepath.Join(outputFilename, pod.Spec.NodeName)
files, stderr, err := copyFilesFromHost(ctx, filepath.Join(c.BundlePath, outputNodeFilename), clientConfig, client, pod.Name, "collector", namespace, filepath.Join("/host", fileName), collector.ExtractArchive)
if err != nil {
output.SaveResult(c.BundlePath, filepath.Join(outputNodeFilename, "error.txt"), bytes.NewBuffer([]byte(err.Error())))
if len(stderr) > 0 {
output.SaveResult(c.BundlePath, filepath.Join(outputNodeFilename, "stderr.txt"), bytes.NewBuffer(stderr))
}
}
for k, v := range files {
relPath, err := filepath.Rel(c.BundlePath, filepath.Join(c.BundlePath, filepath.Join(outputNodeFilename, k)))
if err != nil {
return nil, errors.Wrap(err, "relative path")
}
output[relPath] = v
}
}
return output, nil
}
func copyFilesFromHost(ctx context.Context, dstPath string, clientConfig *restclient.Config, client kubernetes.Interface, podName string, containerName string, namespace string, containerPath string, extract bool) (CollectorResult, []byte, error) {
command := []string{"tar", "-C", filepath.Dir(containerPath), "-cf", "-", filepath.Base(containerPath)}
req := client.CoreV1().RESTClient().Post().Resource("pods").Name(podName).Namespace(namespace).SubResource("exec")
scheme := runtime.NewScheme()
if err := corev1.AddToScheme(scheme); err != nil {
return nil, nil, errors.Wrap(err, "failed to add runtime scheme")
}
parameterCodec := runtime.NewParameterCodec(scheme)
req.VersionedParams(&corev1.PodExecOptions{
Command: command,
Container: containerName,
Stdin: true,
Stdout: false,
Stderr: true,
TTY: false,
}, parameterCodec)
exec, err := remotecommand.NewSPDYExecutor(clientConfig, "POST", req.URL())
if err != nil {
return nil, nil, errors.Wrap(err, "failed to create SPDY executor")
}
result := NewResult()
var stdoutWriter io.Writer
var copyError error
if extract {
pipeReader, pipeWriter := io.Pipe()
tarReader := tar.NewReader(pipeReader)
stdoutWriter = pipeWriter
go func() {
// this can cause "read/write on closed pipe" error, but without this exec.Stream blocks
defer pipeWriter.Close()
for {
header, err := tarReader.Next()
if err == io.EOF {
return
}
if err != nil {
pipeWriter.CloseWithError(errors.Wrap(err, "failed to read header from tar"))
return
}
switch header.Typeflag {
case tar.TypeDir:
name := filepath.Join(dstPath, header.Name)
if err := os.MkdirAll(name, os.FileMode(header.Mode)); err != nil {
pipeWriter.CloseWithError(errors.Wrap(err, "failed to mkdir"))
return
}
case tar.TypeReg:
err := result.SaveResult(dstPath, header.Name, tarReader)
if err != nil {
pipeWriter.CloseWithError(errors.Wrapf(err, "failed to save result for file %s", header.Name))
return
}
}
}
}()
} else {
w, err := result.GetWriter(dstPath, "archive.tar")
if err != nil {
return nil, nil, errors.Wrap(err, "failed to craete dest file")
}
defer result.CloseWriter(dstPath, "archive.tar", w)
stdoutWriter = w
}
var stderr bytes.Buffer
copyError = exec.Stream(remotecommand.StreamOptions{
Stdin: nil,
Stdout: stdoutWriter,
Stderr: &stderr,
Tty: false,
})
if copyError != nil {
return result, stderr.Bytes(), errors.Wrap(copyError, "failed to stream command output")
}
return result, stderr.Bytes(), nil
}
|
package worker
import (
"github.com/winjeg/db-filler/config"
"github.com/winjeg/db-filler/log"
"github.com/winjeg/db-filler/schema"
"github.com/winjeg/db-filler/store"
"fmt"
"sync"
)
var (
conf = config.GetConf()
logger = log.GetLogger()
db = store.GetDb()
wg = sync.WaitGroup{}
)
func logError(err error) {
if err != nil {
logger.Error(err)
}
}
// do the work
func Perform() {
// run the sql. before everything start to work
if len(conf.ExtraSettings.Sql) > 0 {
_, _, err := store.GetFromResult(db.Exec(conf.ExtraSettings.Sql))
logError(err)
}
// only generation?
if conf.ExtraSettings.OnlyGenerate {
GenerateSql(conf.WorkerSettings.SqlNum)
return
}
// generate sql
sqls := GenerateSql(conf.WorkerSettings.SqlNum)
// parallel insertion
ParallelExecute(sqls, conf.WorkerSettings.InsertWorkerNum)
}
// generate sql with thread num specified
func GenerateSql(num int) []string {
// to check if table exits, if not, panic and exit the program
_, _, err := store.GetFromResult(db.Exec(fmt.Sprintf("show create table %s", conf.WorkerSettings.TableName)))
if err != nil {
logger.Panic("the table you specified may not exits", err.Error())
}
tableDef := schema.SchemaService.GetTableDefinition(db, conf.WorkerSettings.TableName, conf.DbSettings.DbName)
waitGroup := sync.WaitGroup{}
threadNum := conf.WorkerSettings.GenerateWorkerNum
waitGroup.Add(threadNum)
genNum := num
perNum := genNum / threadNum / conf.WorkerSettings.RowNum
restNum := genNum % (threadNum * conf.WorkerSettings.RowNum)
result := make([]string, 0, genNum)
var lock sync.Mutex
for i := 0; i < threadNum; i++ {
go func(n int) {
defer waitGroup.Done()
ts := make([]string, 0, perNum)
if n == 0 {
// first thread takes the reset and original
for j := 0; j < restNum+perNum; j++ {
sql := tableDef.GenerateInsert(conf.WorkerSettings.RowNum)
ts = append(ts, sql)
}
} else {
for j := 0; j < perNum; j++ {
sql := tableDef.GenerateInsert(conf.WorkerSettings.RowNum)
ts = append(ts, sql)
}
}
// put it to the result slice safely
if len(ts) > 0 {
lock.Lock()
result = append(result, ts...)
lock.Unlock()
}
}(i)
}
waitGroup.Wait()
// if the sql file is set, append the sql to this file
if store.SqlFileStore != nil {
for i := range result {
err := store.SqlFileStore.Append(result[i])
logError(err)
}
err = store.SqlFileStore.Close()
logError(err)
}
return result
}
// parallel insert sql generation and insertion
//func ParallelizeGenAndInsert() {
// // TODO generate and insert, using multi go routines
//}
// parallel execute sql in n routines
func ParallelExecute(sqls []string, workerNum int) {
var waitGroup sync.WaitGroup
sqlLen := len(sqls)
perNum := sqlLen / workerNum
waitGroup.Add(workerNum)
errorSqls := make([]string, 0, sqlLen)
var lock sync.Mutex
for i := 0; i < workerNum; i++ {
go func(n int) {
defer waitGroup.Done()
terrorSqls := make([]string, 0, perNum)
tsqls := sqls[n*perNum : n*perNum+perNum]
if n == workerNum-1 {
tsqls = sqls[n*perNum:]
}
for _, v := range tsqls {
_, _, err := store.GetFromResult(db.Exec(v))
if err != nil {
logError(err)
terrorSqls = append(terrorSqls, v)
}
}
if len(terrorSqls) > 0 {
lock.Lock()
errorSqls = append(errorSqls, terrorSqls...)
lock.Unlock()
}
}(i)
}
waitGroup.Wait()
if store.ErrorFileStore != nil {
for i := range errorSqls {
err := store.ErrorFileStore.Append(errorSqls[i])
logError(err)
}
err := store.ErrorFileStore.Close()
logError(err)
}
}
|
package main
import (
"github.com/urfave/cli"
)
var configCommand = cli.Command{
Name: "config",
HelpName: "config",
Usage: `View and edit client configuration`,
Description: `With this command you view and edit the client configurations
like ndoe address, username, namespace, etc.`,
ArgsUsage: "eli config view",
Subcommands: []cli.Command{
configViewCommand,
configSetCommand,
},
}
|
package tmpl
import "bytes"
import "testing"
func testWrite(t *testing.T, root *fsRoot, name, value string) {
target, err := root.Writer(name)
if err != nil {
t.Fatal("could not create writer", err)
}
if _, err := target.Write([]byte(value)); err != nil {
t.Fatal("could not write", value)
}
}
func testRead(t *testing.T, root *fsRoot, name string) string {
source, err := root.Reader(name)
if err != nil {
t.Fatal("could not create reader", err)
}
var buf bytes.Buffer
_, err = buf.ReadFrom(source)
if err != nil {
t.Fatal("Could not read", err)
}
t.Log("read", name, "contents", buf.String())
return buf.String()
}
|
package base91
import(
"fmt"
"unicode/utf8"
)
var encodeTable []byte
var decodeTable []byte
var ebq, en, dbq, dn, dv int
// Init Initializes the arrays used for conversion
func Init(){
encodeTable=StringToASCIIBytes("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789!#$%&()*+,./:;<=>?@[]^_`{|}~\"")
decodeTable=make([]byte, 256)
for i:=0; i<256; i++ {
decodeTable[i]=255
}
for i:=0; i<91; i++ {
decodeTable[encodeTable[i]]=byte(i)
}
EncReset()
DecReset()
}
// EncReset sends back the counters to the initial position
func EncReset() {
ebq = 0
en = 0
}
// DecReset sends back the counters to the initial position
func DecReset(){
dbq = 0
dn = 0
dv = 1
}
//StringToASCIIBytes converts a string to bytes
func StringToASCIIBytes(s string) []byte {
t := make([]byte, utf8.RuneCountInString(s))
i := 0
for _, r := range s {
t[i] = byte(r)
i++
}
return t
}
// PrintTables Prints the values if the translation tables
func PrintTables(){
fmt.Println(encodeTable)
fmt.Println(decodeTable)
}
|
package main
import "fmt"
func main() {
for i := 1; i <= 10000; i++ {
fmt.Printf("\t %v", i)
} //END for
} //END main
//Hands-on exercise #1
//Print every number from 1 to 10,000
//solution: https://play.golang.org/p/voDiuiDGGw
//video: 050
//SOLUTION: (The solution starts at 0)
//package main
//import (
//"fmt"
//)
//func main() {
// for i := 0; i <= 10000; i++ {
// fmt.Println(i)
// }
//}
|
package adapter
import (
"bytes"
"fmt"
"git.trac.cn/nv/subscribe/pkg/db"
"git.trac.cn/nv/subscribe/pkg/logging"
"gopkg.in/ini.v1"
"os"
)
func Surge() ([]byte, error) {
var (
err error
cfg *ini.File
)
nodes, err := db.GetTrojanNodes(map[string]interface{}{"is_deleted": 0})
if err != nil {
logging.Error(err)
return nil, err
}
loadOptions := ini.LoadOptions{}
loadOptions.UnparseableSections = []string{"Rule", "URL Rewrite"}
cfg, err = ini.LoadSources(loadOptions, configSurgeIni)
if err != nil {
fmt.Printf("Fail to read file: %v", err)
os.Exit(1)
}
proxyNode, _ := cfg.GetSection("Proxy")
groupString := "select"
for _,node := range nodes {
_, _ = proxyNode.NewKey(node.NodeName, fmt.Sprintf("%s, %s, %d, password=%s",
db.Trojan, node.ServerHost, node.ServerPort, node.ServerPassword))
groupString += "," + node.NodeName
}
proxyGroupNode, _ := cfg.GetSection("Proxy Group")
_, _ = proxyGroupNode.NewKey("Group", groupString)
buf := new(bytes.Buffer)
_, err = cfg.WriteTo(buf)
if err != nil {
return nil, err
}
return buf.Bytes(), nil
}
var configSurgeIni = []byte(`
[General]
loglevel = notify
skip-proxy = 127.0.0.1, 192.168.0.0/16, 10.0.0.0/8, 172.16.0.0/12, 100.64.0.0/10, localhost, *.local
bypass-tun = 192.168.0.0/16, 10.0.0.0/8, 172.16.0.0/12
test-timeout = 30
tls-provider = openssl
bypass-system = true
[URL Rewrite]
^http://www.google.cn http://www.google.com
[Proxy]
[Proxy Group]
[Rule]
DOMAIN-SUFFIX,google-analytics.com,REJECT
DOMAIN-SUFFIX,hm.baidu.com,REJECT
DOMAIN-SUFFIX,cpro.baidu.com,REJECT
DOMAIN-SUFFIX,p.tanx.com,REJECT
DOMAIN-SUFFIX,s5.cnzz.com,REJECT
DOMAIN-SUFFIX,hzs19.cnzz.com,REJECT
DOMAIN-SUFFIX,c.cnzz.com,REJECT
DOMAIN-SUFFIX,w.cnzz.com,REJECT
DOMAIN-SUFFIX,tcss.qq.com,REJECT
#
DOMAIN-KEYWORD,google,Group,force-remote-dns
DOMAIN-KEYWORD,facebook,Group,force-remote-dns
DOMAIN-KEYWORD,youtube,Group,force-remote-dns
DOMAIN-KEYWORD,twitter,Group,force-remote-dns
DOMAIN-KEYWORD,instagram,Group,force-remote-dns
DOMAIN-KEYWORD,gmail,Group,force-remote-dns
DOMAIN-KEYWORD,blogspot,Group
DOMAIN-SUFFIX,twimg.com,Group,force-remote-dns
# Remove these lines below if you dont have trouble accessing Apple resources
DOMAIN-SUFFIX,ls.apple.com,DIRECT
DOMAIN-SUFFIX,lcdn-registration.apple.com,DIRECT
DOMAIN-SUFFIX,apple.com,Group
DOMAIN-SUFFIX,mzstatic.com,Group
DOMAIN-SUFFIX,itunes.com,Group
DOMAIN-SUFFIX,icloud.com,Group
DOMAIN-SUFFIX,amazonaws.com,Group
DOMAIN-SUFFIX,android.com,Group
DOMAIN-SUFFIX,angularjs.org,Group
DOMAIN-SUFFIX,appspot.com,Group
DOMAIN-SUFFIX,akamaihd.net,Group
DOMAIN-SUFFIX,amazon.com,Group
DOMAIN-SUFFIX,bit.ly,Group
DOMAIN-SUFFIX,bitbucket.org,Group
DOMAIN-SUFFIX,blog.com,Group
DOMAIN-SUFFIX,blogcdn.com,Group
DOMAIN-SUFFIX,blogger.com,Group
DOMAIN-SUFFIX,blogsmithmedia.com,Group
DOMAIN-SUFFIX,box.net,Group
DOMAIN-SUFFIX,bloomberg.com,Group
DOMAIN-SUFFIX,chromium.org,Group
DOMAIN-SUFFIX,cl.ly,Group
DOMAIN-SUFFIX,cloudfront.net,Group
DOMAIN-SUFFIX,cloudflare.com,Group
DOMAIN-SUFFIX,cocoapods.org,Group
DOMAIN-SUFFIX,crashlytics.com,Group
DOMAIN-SUFFIX,dribbble.com,Group
DOMAIN-SUFFIX,dropbox.com,Group
DOMAIN-SUFFIX,dropboxstatic.com,Group
DOMAIN-SUFFIX,dropboxusercontent.com,Group
DOMAIN-SUFFIX,docker.com,Group
DOMAIN-SUFFIX,duckduckgo.com,Group
DOMAIN-SUFFIX,digicert.com,Group
DOMAIN-SUFFIX,dnsimple.com,Group
DOMAIN-SUFFIX,edgecastcdn.net,Group
DOMAIN-SUFFIX,engadget.com,Group
DOMAIN-SUFFIX,eurekavpt.com,Group
DOMAIN-SUFFIX,fb.me,Group
DOMAIN-SUFFIX,fbcdn.net,Group
DOMAIN-SUFFIX,fc2.com,Group
DOMAIN-SUFFIX,feedburner.com,Group
DOMAIN-SUFFIX,fabric.io,Group
DOMAIN-SUFFIX,flickr.com,Group
DOMAIN-SUFFIX,fastly.net,Group
DOMAIN-SUFFIX,ggpht.com,Group
DOMAIN-SUFFIX,github.com,Group
DOMAIN-SUFFIX,github.io,Group
DOMAIN-SUFFIX,githubusercontent.com,Group
DOMAIN-SUFFIX,golang.org,Group
DOMAIN-SUFFIX,goo.gl,Group
DOMAIN-SUFFIX,gstatic.com,Group
DOMAIN-SUFFIX,godaddy.com,Group
DOMAIN-SUFFIX,gravatar.com,Group
DOMAIN-SUFFIX,imageshack.us,Group
DOMAIN-SUFFIX,imgur.com,Group
DOMAIN-SUFFIX,jshint.com,Group
DOMAIN-SUFFIX,ift.tt,Group
DOMAIN-SUFFIX,j.mp,Group
DOMAIN-SUFFIX,kat.cr,Group
DOMAIN-SUFFIX,linode.com,Group
DOMAIN-SUFFIX,linkedin.com,Group
DOMAIN-SUFFIX,licdn.com,Group
DOMAIN-SUFFIX,lithium.com,Group
DOMAIN-SUFFIX,megaupload.com,Group
DOMAIN-SUFFIX,mobile01.com,Group
DOMAIN-SUFFIX,modmyi.com,Group
DOMAIN-SUFFIX,nytimes.com,Group
DOMAIN-SUFFIX,name.com,Group
DOMAIN-SUFFIX,openvpn.net,Group
DOMAIN-SUFFIX,openwrt.org,Group
DOMAIN-SUFFIX,ow.ly,Group
DOMAIN-SUFFIX,pinboard.in,Group
DOMAIN-SUFFIX,ssl-images-amazon.com,Group
DOMAIN-SUFFIX,sstatic.net,Group
DOMAIN-SUFFIX,stackoverflow.com,Group
DOMAIN-SUFFIX,staticflickr.com,Group
DOMAIN-SUFFIX,squarespace.com,Group
DOMAIN-SUFFIX,symcd.com,Group
DOMAIN-SUFFIX,symcb.com,Group
DOMAIN-SUFFIX,symauth.com,Group
DOMAIN-SUFFIX,ubnt.com,Group
DOMAIN-SUFFIX,t.co,Group
DOMAIN-SUFFIX,thepiratebay.org,Group
DOMAIN-SUFFIX,tumblr.com,Group
DOMAIN-SUFFIX,twimg.com,Group
DOMAIN-SUFFIX,twitch.tv,Group
DOMAIN-SUFFIX,twitter.com,Group
DOMAIN-SUFFIX,wikipedia.com,Group
DOMAIN-SUFFIX,wikipedia.org,Group
DOMAIN-SUFFIX,wikimedia.org,Group
DOMAIN-SUFFIX,wordpress.com,Group
DOMAIN-SUFFIX,wsj.com,Group
DOMAIN-SUFFIX,wsj.net,Group
DOMAIN-SUFFIX,wp.com,Group
DOMAIN-SUFFIX,vimeo.com,Group
DOMAIN-SUFFIX,youtu.be,Group
DOMAIN-SUFFIX,ytimg.com,Group
// Telegram
IP-CIDR,91.108.56.0/22,Group
IP-CIDR,91.108.4.0/22,Group
IP-CIDR,109.239.140.0/24,Group
IP-CIDR,149.154.160.0/20,Group
// LAN
IP-CIDR,192.168.0.0/16,DIRECT
IP-CIDR,10.0.0.0/8,DIRECT
IP-CIDR,172.16.0.0/12,DIRECT
IP-CIDR,127.0.0.0/8,DIRECT
GEOIP,CN,DIRECT
FINAL,Group
`) |
package model
import "fmt"
type Config struct {
MySQL *MySQLOptions `yaml:"MYSQL"`
PostgreSQL *PostgreSQLOptions `yaml:"POSTGRESQL"`
}
type MySQLOptions struct {
Host string `yaml:"HOST"`
Port string `yaml:"PORT"`
User string `yaml:"USER"`
Pass string `yaml:"PASS"`
Db string `yaml:"DB"`
Charset string `yaml:"CHARSET"`
}
func (options *MySQLOptions) String() string {
return fmt.Sprintf("%s:%s@(%s:%s)/%s?charset=%s",
options.User, options.Pass,
options.Host, options.Port,
options.Db, options.Charset,
)
}
type PostgreSQLOptions struct {
Host string `yaml:"HOST"`
Port string `yaml:"PORT"`
User string `yaml:"USER"`
Pass string `yaml:"PASS"`
Db string `yaml:"DB"`
}
func (options *PostgreSQLOptions) String() string {
return fmt.Sprintf("host=%s port=%s user=%s dbname=%s password=%s",
options.Host, options.Port,
options.User, options.Db,
options.Pass,
)
}
|
package tests
import "fmt"
/*
Example functionsはExampleから始まる名前で定義し、
出力を// Output:から始まるコメントで書くことで、標準出力の内容をテストできる。
もし、Outputのコメントがない場合にはコンパイルのみが走る。
*/
func ExampleHello() {
fmt.Println("Hello")
// Output: Hello
}
/*
Unordered outputoを使うと、順不同な結果に対してもマッチさせることができる。
例えば、mapをイテレートすると順不同に結果が返ってくるので、そういったケースで使用する。
*/
func ExampleUnordered() {
for _, v := range []int{1, 2, 3} {
fmt.Println(v)
}
// Unordered output:
// 2
// 3
// 1
}
|
package main
import (
"encoding/json"
"fmt"
"os"
"github.com/aws/aws-lambda-go/events"
"github.com/aws/aws-lambda-go/lambda"
)
// HandleRequest handles an API Gateway proxy request
func HandleRequest(request events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {
// Capture the 'name' query parameter from the request
var name string
var ok bool
if name, ok = request.QueryStringParameters["name"]; !ok {
name = "my pretties"
}
// Build the response
message := struct {
Message string `json:"message"`
SampleEnvVar string `json:"sampleEnvVar"`
} {
Message: fmt.Sprintf("Hello, %s!", name),
SampleEnvVar: os.Getenv("SampleEnvVar"),
}
// Encode the response data into a JSON string
messageJSON, _ := json.Marshal(message)
// Return response to the API gateway
return events.APIGatewayProxyResponse{
StatusCode: 200,
Headers: map[string]string{"Content-Type": "application/json"},
Body: string(messageJSON),
}, nil
}
func main() {
lambda.Start(HandleRequest)
}
|
/*
Remove Nth Node From End of List
Given a linked list, remove the nth node from the end of list and return its head.
For example,
Given linked list: 1->2->3->4->5, and n = 2.
After removing the second node from the end, the linked list becomes 1->2->3->5.
Note:
Given n will always be valid.
Try to do this in one pass.
*/
package main
import "fmt"
func (listNode ListNode) print() {
ptr := &listNode
for ptr != nil {
fmt.Print(ptr.Val)
if ptr.Next != nil {
fmt.Print("->")
}
ptr = ptr.Next
}
}
func newList(val []int) *ListNode {
head := new(ListNode)
ptr, last := head, head
for _, v := range val {
last = ptr
last.Val = v
ptr = new(ListNode)
last.Next = ptr
}
last.Next, ptr = nil, nil
return head
}
func removeNthFromEnd(head *ListNode, n int) *ListNode {
ptr, last := head, head
// get last = ptr - n - 2
for ptr.Next != nil && n != 0 {
ptr = ptr.Next
n--
}
// here last + n + 1 = tail
for ptr.Next != nil {
last = last.Next
ptr = ptr.Next
}
if last.Next == nil || n > 0 {
return last.Next
}
// delete last n node
last.Next.Val = 0
last.Next, last.Next.Next = last.Next.Next, nil
return head
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.