file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
main.go | package main
/*
Minimal tool to automate release creation.
Create:
- git tag
- homebrew bottle
- linux tarball
- GitHub release with asset link(s)
Update:
- Homebrew formula tap with new release & SHAs
*/
import (
"bytes"
"compress/gzip"
"context"
"crypto/sha256"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
"text/template"
"github.com/google/go-github/github"
"github.com/mholt/archiver"
"github.com/pkg/errors"
"github.com/spf13/pflag"
"golang.org/x/oauth2"
)
const (
repoOwner = "dollarshaveclub"
repoName = "psst"
)
var rname, npath, commitsha, ghtoken, taprepo, tapref, fpath, ftpath, targetoslist string
var draft, prerelease, dobuild bool
var trowner, trname string
var hbrev, brbd uint
var osvs []string
var logger = log.New(os.Stderr, "", log.LstdFlags)
func ferr(msg string, args ...interface{}) {
fmt.Printf(msg+"\n", args...)
os.Exit(1)
}
var ghc *github.Client
func init() {
pflag.StringVar(&rname, "release", "", "release name (ex: v1.0.0)")
pflag.StringVar(&npath, "notes-path", "relnotes.md", "path to release notes")
pflag.StringVar(&commitsha, "commit", "", "commit SHA to release")
pflag.StringVar(&taprepo, "tap-repo", "dollarshaveclub/homebrew-public", "name of tap GitHub repository ([owner]/[repo])")
pflag.StringVar(&tapref, "tap-repo-ref", "master", "tap repository ref (branch/tag/SHA)")
pflag.StringVar(&fpath, "formula", "Formula/psst.rb", "path to formula within tap repo")
pflag.StringVar(&ftpath, "formula-template", "Formula/psst.rb.tmpl", "path to formula template within tap repo")
pflag.StringVar(&targetoslist, "macos-versions", "el_capitan,high_sierra,sierra", "Supported MacOS versions (comma-delimited)")
pflag.UintVar(&hbrev, "homebrew-rev", 0, "Homebrew revision (bump to force reinstall/rebuild)")
pflag.UintVar(&brbd, "bottle-rebuild", 1, "Bottle rebuild (bump to force bottle reinstall)")
pflag.BoolVar(&draft, "draft", false, "Draft release (unpublished)")
pflag.BoolVar(&prerelease, "prerelease", false, "Prerelease")
pflag.BoolVar(&dobuild, "build", true, "Build binaries first")
pflag.Parse()
trs := strings.Split(taprepo, "/")
if len(trs) != 2 {
ferr("malformed tap repo (expected [owner]/[repo]): %v", taprepo)
}
if rname == "" {
ferr("release name is required")
}
trowner = trs[0]
trname = trs[1]
osvs = strings.Split(targetoslist, ",")
if len(osvs) == 0 {
ferr("At least one MacOS version is required")
}
ghtoken = os.Getenv("GITHUB_TOKEN")
if ghtoken == "" {
ferr("GITHUB_TOKEN missing from environment")
}
if err := checkFiles(npath); err != nil {
ferr("file path error: %v", err)
}
checkLocalRepoVersion()
ghc = newGHClient()
}
func newGHClient() *github.Client {
tc := oauth2.NewClient(context.Background(), oauth2.StaticTokenSource(
&oauth2.Token{AccessToken: ghtoken},
))
return github.NewClient(tc)
}
func checkLocalRepoVersion() {
cmd := exec.Command("git", "rev-parse", "HEAD")
out, err := cmd.Output()
if err != nil {
ferr("error getting git command output: %v", err)
}
if strings.TrimRight(string(out), "\n") != commitsha {
ferr("current git revision does not match requested release version: %v (expected %v)", string(out), commitsha)
}
}
func checkFiles(paths ...string) error {
for _, p := range paths {
if _, err := os.Stat(p); err != nil {
return errors.Wrap(err, "file error")
}
}
return nil
}
func createGitTag() error {
msg := fmt.Sprintf("release %v", rname)
ot := "commit"
tag := github.Tag{
Tag: &rname,
Message: &msg,
Object: &github.GitObject{
Type: &ot,
SHA: &commitsha,
},
}
log.Printf("creating tag...\n")
_, _, err := ghc.Git.CreateTag(context.Background(), repoOwner, repoName, &tag)
if err != nil {
return errors.Wrap(err, "error creating tag")
}
refstr := fmt.Sprintf("refs/tags/%v", rname)
objt := "commit"
ref := github.Reference{
Ref: &refstr,
Object: &github.GitObject{
Type: &objt,
SHA: &commitsha,
},
}
log.Printf("creating tag ref...\n")
_, _, err = ghc.Git.CreateRef(context.Background(), repoOwner, repoName, &ref)
if err != nil {
return errors.Wrap(err, "error creating tag ref")
}
return nil
}
type bottleDefinition struct {
Hash string
TargetOS string
}
type formulaTemplateData struct {
Tag string
CommitSHA string
HomebrewRevision uint
BaseDownloadURL string
Bottled bool
BottleRebuild uint
BottleDefs []bottleDefinition
}
func (ftd *formulaTemplateData) populate(bdefs []bottleDefinition) {
ftd.Tag = rname
ftd.CommitSHA = commitsha
if hbrev > 0 {
ftd.HomebrewRevision = hbrev
}
ftd.BaseDownloadURL = fmt.Sprintf("https://github.com/%v/%v/releases/download/%v", repoOwner, repoName, rname)
ftd.BottleRebuild = brbd
ftd.Bottled = true
ftd.BottleDefs = bdefs
}
const header = "# GENERATED FROM TEMPLATE. DO NOT EDIT!\n"
// generateFormula fetches the template from github, executes the template with ftd and returns the raw data or error, if any
func generateFormula(ftd formulaTemplateData) ([]byte, error) {
logger.Printf("Generating Homebrew formula")
// get template
fc, _, _, err := ghc.Repositories.GetContents(context.Background(), trowner, trname, ftpath, &github.RepositoryContentGetOptions{Ref: tapref})
if err != nil {
return nil, errors.Wrap(err, "error getting formula template")
}
rt, err := fc.GetContent()
if err != nil {
return nil, errors.Wrap(err, "error getting formula template content")
}
// generate new formula
tmpl, err := template.New("formula").Parse(rt)
if err != nil {
return nil, errors.Wrap(err, "error parsing formula template")
}
buf := bytes.NewBuffer([]byte{})
if err = tmpl.Execute(buf, &ftd); err != nil {
return nil, errors.Wrap(err, "error executing template")
}
return append([]byte(header), buf.Bytes()...), nil
}
func pushFormula(fd []byte) error {
logger.Printf("Pushing Homebrew formula")
// Get the current file for the SHA
fc, _, _, err := ghc.Repositories.GetContents(context.Background(), trowner, trname, fpath, &github.RepositoryContentGetOptions{Ref: tapref})
if err != nil {
return errors.Wrap(err, "error getting formula contents")
}
sp := func(s string) *string {
return &s
}
_, _, err = ghc.Repositories.UpdateFile(context.Background(), trowner, trname, fpath, &github.RepositoryContentFileOptions{
Message: sp(fmt.Sprintf("updated for release %v", rname)),
Content: fd,
SHA: fc.SHA,
Branch: &tapref,
})
if err != nil {
return errors.Wrap(err, "error updating formula")
}
return nil
}
const (
linuxBinName = "psst-linux-amd64"
)
var buildopts = []string{"-ldflags", "-X github.com/dollarshaveclub/psst/cmd.CommitSHA=%v -X github.com/dollarshaveclub/psst/cmd.Version=%v -X github.com/dollarshaveclub/psst/cmd.CompiledDirectory=github -X github.com/dollarshaveclub/psst/cmd.CompiledStorage=vault -X github.com/dollarshaveclub/psst/cmd.Org=dollarshaveclub"}
func buildBins() error {
if err := os.MkdirAll("bins", os.ModeDir|0755); err != nil {
return errors.Wrap(err, "error creating bins directory")
}
cwd, err := os.Getwd()
if err != nil {
return errors.Wrap(err, "error getting working directory")
}
wd := filepath.Join(cwd, "..")
buildopts[1] = fmt.Sprintf(buildopts[1], commitsha, rname)
build := func(osn string) ([]byte, error) {
cmd := exec.Command("go", append([]string{"build"}, buildopts...)...)
cmd.Env = append(os.Environ(), []string{fmt.Sprintf("GOOS=%v", osn), "GOARCH=amd64"}...)
cmd.Dir = wd
return cmd.CombinedOutput()
}
logger.Printf("Building binaries...\n")
logger.Printf("...macOS amd64")
if out, err := build("darwin"); err != nil {
return errors.Wrapf(err, "error running build command: %s", out)
}
if err := os.Rename(filepath.Join(wd, "psst"), filepath.Join(cwd, "bins", "psst-darwin")); err != nil {
return errors.Wrap(err, "error renaming binary")
}
logger.Printf("...Linux amd64")
if out, err := build("linux"); err != nil {
return errors.Wrapf(err, "error running build command: %s", out)
}
lfn := filepath.Join(cwd, "bins", linuxBinName)
if err := os.Rename(filepath.Join(wd, "psst"), lfn); err != nil {
return errors.Wrap(err, "error renaming binary")
}
// compress linux binary
logger.Printf("...compressing Linux binary\n")
d, err := ioutil.ReadFile(lfn)
if err != nil {
return errors.Wrap(err, "error reading linux binary")
}
f, err := os.Create(lfn + ".gz")
if err != nil {
return errors.Wrap(err, "error creating compressed linux binary")
}
defer f.Close()
gw := gzip.NewWriter(f)
defer gw.Close()
if _, err := gw.Write(d); err != nil {
return errors.Wrap(err, "error writing compressed linux binary")
}
return nil
}
// "copy" (link) a file if it doesn't exist
func cpifneeded(src, dest string) error {
if _, err := os.Stat(dest); err != nil {
if os.IsNotExist(err) {
return os.Link(src, dest)
}
return errors.Wrap(err, "error getting destination")
}
return nil
}
var bottleNameTmpl = template.Must(template.New("bn").Parse("psst-{{ .Release }}{{ if .HomebrewRevision }}_{{ .HomebrewRevision }}{{ end }}.{{ .OS }}.bottle.{{ .BottleRebuild }}.tar.gz"))
// createBottle synthetically creates a bottle tarball returning the bottle definitions, local bottle filenames and error if any
func createBottle() ([]bottleDefinition, []string, error) |
func createGHRelease(assetpaths []string) error {
rel := github.RepositoryRelease{
TagName: &rname,
//TargetCommitish: &commitsha,
Name: &rname,
Draft: &draft,
Prerelease: &prerelease,
}
nd, err := ioutil.ReadFile(npath)
if err != nil {
return errors.Wrap(err, "error reading release notes")
}
notes := string(nd)
rel.Body = ¬es
logger.Printf("Creating GitHub release")
ro, _, err := ghc.Repositories.CreateRelease(context.Background(), repoOwner, repoName, &rel)
if err != nil {
return errors.Wrap(err, "error creating release")
}
for _, ap := range assetpaths {
f, err := os.Open(ap)
if err != nil {
return errors.Wrap(err, "error opening asset")
}
defer f.Close()
logger.Printf("Uploading asset %v...", ap)
resp, _, err := ghc.Repositories.UploadReleaseAsset(context.Background(), repoOwner, repoName, *ro.ID, &github.UploadOptions{Name: filepath.Base(ap)}, f)
if err != nil {
return errors.Wrap(err, "error uploading asset")
}
logger.Printf("...%v\n", resp.GetBrowserDownloadURL())
}
return nil
}
func cleanup() error {
logger.Printf("Cleaning up")
for _, p := range []string{"./bins", "./bottle", "./psst"} {
if err := os.RemoveAll(p); err != nil {
return errors.Wrap(err, "error removing path")
}
}
return nil
}
func main() {
if dobuild {
if err := buildBins(); err != nil {
ferr("error building binaries: %v", err)
}
}
bds, lps, err := createBottle()
if err != nil {
ferr("error creating bottle: %v", err)
}
ftd := formulaTemplateData{}
ftd.populate(bds)
fd, err := generateFormula(ftd)
if err != nil {
ferr("error generating formula: %v", err)
}
if err = pushFormula(fd); err != nil {
ferr("error pushing formula: %v", err)
}
if err := createGitTag(); err != nil {
ferr("error creating tag: %v", err)
}
cwd, err := os.Getwd()
if err != nil {
ferr("error getting working directory: %v", err)
}
assetpaths := append([]string{filepath.Join(cwd, "bins", linuxBinName+".gz")}, lps...)
if err = createGHRelease(assetpaths); err != nil {
ferr("error creating GitHub release: %v", err)
}
if err := cleanup(); err != nil {
ferr("error cleaning up: %v", err)
}
logger.Printf("Done")
}
| {
logger.Printf("Creating Homebrew bottle...\n")
cwd, err := os.Getwd()
if err != nil {
return nil, nil, errors.Wrap(err, "error getting working directory")
}
rver := regexp.MustCompile("([0-9.]+)").FindString(rname)
basepath := filepath.Join(".", "psst", rver)
binpath := filepath.Join(basepath, "bin")
if err := os.MkdirAll(binpath, os.ModeDir|0755); err != nil {
return nil, nil, errors.Wrap(err, "error creating bottle directory path")
}
// .brew
if err := os.MkdirAll(filepath.Join(basepath, ".brew"), os.ModeDir|0755); err != nil {
return nil, nil, errors.Wrap(err, "error creating .brew directory")
}
// copy README
if err := cpifneeded(filepath.Join(cwd, "..", "README.md"), filepath.Join(basepath, "README.md")); err != nil {
return nil, nil, errors.Wrap(err, "error copying README")
}
// copy binary
if err := cpifneeded(filepath.Join("bins", "psst-darwin"), filepath.Join(binpath, "psst")); err != nil {
return nil, nil, errors.Wrap(err, "error copying binary")
}
// INSTALL_RECEIPT.json
ir, err := ioutil.ReadFile("INSTALL_RECEIPT.json.tmpl")
if err != nil {
return nil, nil, errors.Wrap(err, "error reading install receipt template")
}
tmpl, err := template.New("instrcpt").Parse(string(ir))
d := struct {
Release string
OS string
HomebrewRevision uint
BottleRebuild uint
}{
Release: rver,
BottleRebuild: brbd,
}
if hbrev > 0 {
d.HomebrewRevision = hbrev
}
buf := bytes.NewBuffer([]byte{})
if err := tmpl.Execute(buf, &d); err != nil {
return nil, nil, errors.Wrap(err, "error executing install receipt template")
}
if err := ioutil.WriteFile(filepath.Join(basepath, "INSTALL_RECEIPT.json"), buf.Bytes(), os.ModePerm); err != nil {
return nil, nil, errors.Wrap(err, "error writing install receipt")
}
// tar it up
if err := os.MkdirAll("bottle", os.ModeDir|0755); err != nil {
return nil, nil, errors.Wrap(err, "error creating bottle directory")
}
buf = bytes.NewBuffer([]byte{})
d.OS = osvs[0]
if err := bottleNameTmpl.Execute(buf, &d); err != nil {
return nil, nil, errors.Wrap(err, "error executing bottle filename template: "+d.OS)
}
bp := filepath.Join("bottle", buf.String())
if err := archiver.TarGz.Make(bp, []string{"psst"}); err != nil {
return nil, nil, errors.Wrap(err, "error creating bottle tarball")
}
// Get hash of bottle, populate bottle definitions
bd, err := ioutil.ReadFile(bp)
if err != nil {
return nil, nil, errors.Wrap(err, "error reading bottle")
}
sha := fmt.Sprintf("%x", sha256.Sum256(bd))
bdefs := []bottleDefinition{
bottleDefinition{
Hash: sha,
TargetOS: osvs[0],
},
}
lps := []string{bp}
// link other bottles
for _, osn := range osvs[1:] {
d.OS = osn
buf = bytes.NewBuffer([]byte{})
if err := bottleNameTmpl.Execute(buf, &d); err != nil {
return nil, nil, errors.Wrap(err, "error executing bottle filename template: "+d.OS)
}
p := filepath.Join("bottle", buf.String())
if err := cpifneeded(bp, p); err != nil {
return nil, nil, errors.Wrap(err, "error linking bottle")
}
lps = append(lps, p)
bdefs = append(bdefs, bottleDefinition{
Hash: sha,
TargetOS: osn,
})
}
return bdefs, lps, nil
} | identifier_body |
main.go | package main
/*
Minimal tool to automate release creation.
Create:
- git tag
- homebrew bottle
- linux tarball
- GitHub release with asset link(s)
Update:
- Homebrew formula tap with new release & SHAs
*/
import (
"bytes"
"compress/gzip"
"context"
"crypto/sha256"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
"text/template"
"github.com/google/go-github/github"
"github.com/mholt/archiver"
"github.com/pkg/errors"
"github.com/spf13/pflag"
"golang.org/x/oauth2"
)
const (
repoOwner = "dollarshaveclub"
repoName = "psst"
)
var rname, npath, commitsha, ghtoken, taprepo, tapref, fpath, ftpath, targetoslist string
var draft, prerelease, dobuild bool
var trowner, trname string
var hbrev, brbd uint
var osvs []string
var logger = log.New(os.Stderr, "", log.LstdFlags)
func ferr(msg string, args ...interface{}) {
fmt.Printf(msg+"\n", args...)
os.Exit(1)
}
var ghc *github.Client
func init() {
pflag.StringVar(&rname, "release", "", "release name (ex: v1.0.0)")
pflag.StringVar(&npath, "notes-path", "relnotes.md", "path to release notes")
pflag.StringVar(&commitsha, "commit", "", "commit SHA to release")
pflag.StringVar(&taprepo, "tap-repo", "dollarshaveclub/homebrew-public", "name of tap GitHub repository ([owner]/[repo])")
pflag.StringVar(&tapref, "tap-repo-ref", "master", "tap repository ref (branch/tag/SHA)")
pflag.StringVar(&fpath, "formula", "Formula/psst.rb", "path to formula within tap repo")
pflag.StringVar(&ftpath, "formula-template", "Formula/psst.rb.tmpl", "path to formula template within tap repo")
pflag.StringVar(&targetoslist, "macos-versions", "el_capitan,high_sierra,sierra", "Supported MacOS versions (comma-delimited)")
pflag.UintVar(&hbrev, "homebrew-rev", 0, "Homebrew revision (bump to force reinstall/rebuild)")
pflag.UintVar(&brbd, "bottle-rebuild", 1, "Bottle rebuild (bump to force bottle reinstall)")
pflag.BoolVar(&draft, "draft", false, "Draft release (unpublished)")
pflag.BoolVar(&prerelease, "prerelease", false, "Prerelease")
pflag.BoolVar(&dobuild, "build", true, "Build binaries first")
pflag.Parse()
trs := strings.Split(taprepo, "/")
if len(trs) != 2 {
ferr("malformed tap repo (expected [owner]/[repo]): %v", taprepo)
}
if rname == "" {
ferr("release name is required")
}
trowner = trs[0]
trname = trs[1]
osvs = strings.Split(targetoslist, ",")
if len(osvs) == 0 {
ferr("At least one MacOS version is required")
}
ghtoken = os.Getenv("GITHUB_TOKEN")
if ghtoken == "" {
ferr("GITHUB_TOKEN missing from environment")
}
if err := checkFiles(npath); err != nil {
ferr("file path error: %v", err)
}
checkLocalRepoVersion()
ghc = newGHClient()
}
func newGHClient() *github.Client {
tc := oauth2.NewClient(context.Background(), oauth2.StaticTokenSource(
&oauth2.Token{AccessToken: ghtoken},
))
return github.NewClient(tc)
}
func checkLocalRepoVersion() {
cmd := exec.Command("git", "rev-parse", "HEAD")
out, err := cmd.Output()
if err != nil {
ferr("error getting git command output: %v", err)
}
if strings.TrimRight(string(out), "\n") != commitsha {
ferr("current git revision does not match requested release version: %v (expected %v)", string(out), commitsha)
}
}
func checkFiles(paths ...string) error {
for _, p := range paths {
if _, err := os.Stat(p); err != nil {
return errors.Wrap(err, "file error")
}
}
return nil
}
func createGitTag() error {
msg := fmt.Sprintf("release %v", rname)
ot := "commit"
tag := github.Tag{
Tag: &rname,
Message: &msg,
Object: &github.GitObject{
Type: &ot,
SHA: &commitsha,
},
}
log.Printf("creating tag...\n")
_, _, err := ghc.Git.CreateTag(context.Background(), repoOwner, repoName, &tag)
if err != nil {
return errors.Wrap(err, "error creating tag")
}
refstr := fmt.Sprintf("refs/tags/%v", rname)
objt := "commit"
ref := github.Reference{
Ref: &refstr,
Object: &github.GitObject{
Type: &objt,
SHA: &commitsha,
},
}
log.Printf("creating tag ref...\n")
_, _, err = ghc.Git.CreateRef(context.Background(), repoOwner, repoName, &ref)
if err != nil {
return errors.Wrap(err, "error creating tag ref")
}
return nil
}
type bottleDefinition struct {
Hash string
TargetOS string
}
type formulaTemplateData struct {
Tag string
CommitSHA string
HomebrewRevision uint
BaseDownloadURL string
Bottled bool
BottleRebuild uint
BottleDefs []bottleDefinition
}
func (ftd *formulaTemplateData) populate(bdefs []bottleDefinition) {
ftd.Tag = rname
ftd.CommitSHA = commitsha
if hbrev > 0 {
ftd.HomebrewRevision = hbrev
}
ftd.BaseDownloadURL = fmt.Sprintf("https://github.com/%v/%v/releases/download/%v", repoOwner, repoName, rname)
ftd.BottleRebuild = brbd
ftd.Bottled = true
ftd.BottleDefs = bdefs
}
const header = "# GENERATED FROM TEMPLATE. DO NOT EDIT!\n"
// generateFormula fetches the template from github, executes the template with ftd and returns the raw data or error, if any
func generateFormula(ftd formulaTemplateData) ([]byte, error) {
logger.Printf("Generating Homebrew formula")
// get template
fc, _, _, err := ghc.Repositories.GetContents(context.Background(), trowner, trname, ftpath, &github.RepositoryContentGetOptions{Ref: tapref})
if err != nil {
return nil, errors.Wrap(err, "error getting formula template")
}
rt, err := fc.GetContent()
if err != nil {
return nil, errors.Wrap(err, "error getting formula template content")
}
// generate new formula
tmpl, err := template.New("formula").Parse(rt)
if err != nil {
return nil, errors.Wrap(err, "error parsing formula template")
}
buf := bytes.NewBuffer([]byte{})
if err = tmpl.Execute(buf, &ftd); err != nil {
return nil, errors.Wrap(err, "error executing template")
}
return append([]byte(header), buf.Bytes()...), nil
}
func pushFormula(fd []byte) error {
logger.Printf("Pushing Homebrew formula")
// Get the current file for the SHA
fc, _, _, err := ghc.Repositories.GetContents(context.Background(), trowner, trname, fpath, &github.RepositoryContentGetOptions{Ref: tapref})
if err != nil {
return errors.Wrap(err, "error getting formula contents")
}
sp := func(s string) *string {
return &s
}
_, _, err = ghc.Repositories.UpdateFile(context.Background(), trowner, trname, fpath, &github.RepositoryContentFileOptions{
Message: sp(fmt.Sprintf("updated for release %v", rname)),
Content: fd,
SHA: fc.SHA,
Branch: &tapref,
})
if err != nil {
return errors.Wrap(err, "error updating formula")
}
return nil
}
const (
linuxBinName = "psst-linux-amd64"
)
var buildopts = []string{"-ldflags", "-X github.com/dollarshaveclub/psst/cmd.CommitSHA=%v -X github.com/dollarshaveclub/psst/cmd.Version=%v -X github.com/dollarshaveclub/psst/cmd.CompiledDirectory=github -X github.com/dollarshaveclub/psst/cmd.CompiledStorage=vault -X github.com/dollarshaveclub/psst/cmd.Org=dollarshaveclub"}
func buildBins() error {
if err := os.MkdirAll("bins", os.ModeDir|0755); err != nil {
return errors.Wrap(err, "error creating bins directory")
}
cwd, err := os.Getwd()
if err != nil {
return errors.Wrap(err, "error getting working directory")
}
wd := filepath.Join(cwd, "..")
buildopts[1] = fmt.Sprintf(buildopts[1], commitsha, rname)
build := func(osn string) ([]byte, error) {
cmd := exec.Command("go", append([]string{"build"}, buildopts...)...)
cmd.Env = append(os.Environ(), []string{fmt.Sprintf("GOOS=%v", osn), "GOARCH=amd64"}...)
cmd.Dir = wd
return cmd.CombinedOutput()
}
logger.Printf("Building binaries...\n")
logger.Printf("...macOS amd64")
if out, err := build("darwin"); err != nil {
return errors.Wrapf(err, "error running build command: %s", out)
}
if err := os.Rename(filepath.Join(wd, "psst"), filepath.Join(cwd, "bins", "psst-darwin")); err != nil {
return errors.Wrap(err, "error renaming binary")
}
logger.Printf("...Linux amd64")
if out, err := build("linux"); err != nil {
return errors.Wrapf(err, "error running build command: %s", out)
}
lfn := filepath.Join(cwd, "bins", linuxBinName)
if err := os.Rename(filepath.Join(wd, "psst"), lfn); err != nil {
return errors.Wrap(err, "error renaming binary")
}
// compress linux binary
logger.Printf("...compressing Linux binary\n")
d, err := ioutil.ReadFile(lfn)
if err != nil {
return errors.Wrap(err, "error reading linux binary")
}
f, err := os.Create(lfn + ".gz")
if err != nil {
return errors.Wrap(err, "error creating compressed linux binary")
}
defer f.Close()
gw := gzip.NewWriter(f)
defer gw.Close()
if _, err := gw.Write(d); err != nil {
return errors.Wrap(err, "error writing compressed linux binary")
}
return nil
}
// "copy" (link) a file if it doesn't exist
func cpifneeded(src, dest string) error {
if _, err := os.Stat(dest); err != nil {
if os.IsNotExist(err) {
return os.Link(src, dest)
}
return errors.Wrap(err, "error getting destination")
}
return nil
}
var bottleNameTmpl = template.Must(template.New("bn").Parse("psst-{{ .Release }}{{ if .HomebrewRevision }}_{{ .HomebrewRevision }}{{ end }}.{{ .OS }}.bottle.{{ .BottleRebuild }}.tar.gz"))
// createBottle synthetically creates a bottle tarball returning the bottle definitions, local bottle filenames and error if any
func createBottle() ([]bottleDefinition, []string, error) {
logger.Printf("Creating Homebrew bottle...\n")
cwd, err := os.Getwd()
if err != nil {
return nil, nil, errors.Wrap(err, "error getting working directory")
}
rver := regexp.MustCompile("([0-9.]+)").FindString(rname)
basepath := filepath.Join(".", "psst", rver)
binpath := filepath.Join(basepath, "bin")
if err := os.MkdirAll(binpath, os.ModeDir|0755); err != nil {
return nil, nil, errors.Wrap(err, "error creating bottle directory path")
}
// .brew
if err := os.MkdirAll(filepath.Join(basepath, ".brew"), os.ModeDir|0755); err != nil {
return nil, nil, errors.Wrap(err, "error creating .brew directory")
}
// copy README
if err := cpifneeded(filepath.Join(cwd, "..", "README.md"), filepath.Join(basepath, "README.md")); err != nil {
return nil, nil, errors.Wrap(err, "error copying README")
}
// copy binary
if err := cpifneeded(filepath.Join("bins", "psst-darwin"), filepath.Join(binpath, "psst")); err != nil {
return nil, nil, errors.Wrap(err, "error copying binary")
}
// INSTALL_RECEIPT.json
ir, err := ioutil.ReadFile("INSTALL_RECEIPT.json.tmpl")
if err != nil {
return nil, nil, errors.Wrap(err, "error reading install receipt template")
}
tmpl, err := template.New("instrcpt").Parse(string(ir))
d := struct {
Release string
OS string
HomebrewRevision uint
BottleRebuild uint
}{
Release: rver,
BottleRebuild: brbd,
}
if hbrev > 0 {
d.HomebrewRevision = hbrev
}
buf := bytes.NewBuffer([]byte{})
if err := tmpl.Execute(buf, &d); err != nil {
return nil, nil, errors.Wrap(err, "error executing install receipt template")
}
if err := ioutil.WriteFile(filepath.Join(basepath, "INSTALL_RECEIPT.json"), buf.Bytes(), os.ModePerm); err != nil {
return nil, nil, errors.Wrap(err, "error writing install receipt")
}
// tar it up
if err := os.MkdirAll("bottle", os.ModeDir|0755); err != nil {
return nil, nil, errors.Wrap(err, "error creating bottle directory")
}
buf = bytes.NewBuffer([]byte{})
d.OS = osvs[0]
if err := bottleNameTmpl.Execute(buf, &d); err != nil |
bp := filepath.Join("bottle", buf.String())
if err := archiver.TarGz.Make(bp, []string{"psst"}); err != nil {
return nil, nil, errors.Wrap(err, "error creating bottle tarball")
}
// Get hash of bottle, populate bottle definitions
bd, err := ioutil.ReadFile(bp)
if err != nil {
return nil, nil, errors.Wrap(err, "error reading bottle")
}
sha := fmt.Sprintf("%x", sha256.Sum256(bd))
bdefs := []bottleDefinition{
bottleDefinition{
Hash: sha,
TargetOS: osvs[0],
},
}
lps := []string{bp}
// link other bottles
for _, osn := range osvs[1:] {
d.OS = osn
buf = bytes.NewBuffer([]byte{})
if err := bottleNameTmpl.Execute(buf, &d); err != nil {
return nil, nil, errors.Wrap(err, "error executing bottle filename template: "+d.OS)
}
p := filepath.Join("bottle", buf.String())
if err := cpifneeded(bp, p); err != nil {
return nil, nil, errors.Wrap(err, "error linking bottle")
}
lps = append(lps, p)
bdefs = append(bdefs, bottleDefinition{
Hash: sha,
TargetOS: osn,
})
}
return bdefs, lps, nil
}
func createGHRelease(assetpaths []string) error {
rel := github.RepositoryRelease{
TagName: &rname,
//TargetCommitish: &commitsha,
Name: &rname,
Draft: &draft,
Prerelease: &prerelease,
}
nd, err := ioutil.ReadFile(npath)
if err != nil {
return errors.Wrap(err, "error reading release notes")
}
notes := string(nd)
rel.Body = ¬es
logger.Printf("Creating GitHub release")
ro, _, err := ghc.Repositories.CreateRelease(context.Background(), repoOwner, repoName, &rel)
if err != nil {
return errors.Wrap(err, "error creating release")
}
for _, ap := range assetpaths {
f, err := os.Open(ap)
if err != nil {
return errors.Wrap(err, "error opening asset")
}
defer f.Close()
logger.Printf("Uploading asset %v...", ap)
resp, _, err := ghc.Repositories.UploadReleaseAsset(context.Background(), repoOwner, repoName, *ro.ID, &github.UploadOptions{Name: filepath.Base(ap)}, f)
if err != nil {
return errors.Wrap(err, "error uploading asset")
}
logger.Printf("...%v\n", resp.GetBrowserDownloadURL())
}
return nil
}
func cleanup() error {
logger.Printf("Cleaning up")
for _, p := range []string{"./bins", "./bottle", "./psst"} {
if err := os.RemoveAll(p); err != nil {
return errors.Wrap(err, "error removing path")
}
}
return nil
}
func main() {
if dobuild {
if err := buildBins(); err != nil {
ferr("error building binaries: %v", err)
}
}
bds, lps, err := createBottle()
if err != nil {
ferr("error creating bottle: %v", err)
}
ftd := formulaTemplateData{}
ftd.populate(bds)
fd, err := generateFormula(ftd)
if err != nil {
ferr("error generating formula: %v", err)
}
if err = pushFormula(fd); err != nil {
ferr("error pushing formula: %v", err)
}
if err := createGitTag(); err != nil {
ferr("error creating tag: %v", err)
}
cwd, err := os.Getwd()
if err != nil {
ferr("error getting working directory: %v", err)
}
assetpaths := append([]string{filepath.Join(cwd, "bins", linuxBinName+".gz")}, lps...)
if err = createGHRelease(assetpaths); err != nil {
ferr("error creating GitHub release: %v", err)
}
if err := cleanup(); err != nil {
ferr("error cleaning up: %v", err)
}
logger.Printf("Done")
}
| {
return nil, nil, errors.Wrap(err, "error executing bottle filename template: "+d.OS)
} | conditional_block |
main.go | package main
/*
Minimal tool to automate release creation.
Create:
- git tag
- homebrew bottle
- linux tarball
- GitHub release with asset link(s)
Update:
- Homebrew formula tap with new release & SHAs
*/
import (
"bytes"
"compress/gzip"
"context"
"crypto/sha256"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
"text/template"
"github.com/google/go-github/github"
"github.com/mholt/archiver"
"github.com/pkg/errors"
"github.com/spf13/pflag"
"golang.org/x/oauth2"
)
const (
repoOwner = "dollarshaveclub"
repoName = "psst"
)
var rname, npath, commitsha, ghtoken, taprepo, tapref, fpath, ftpath, targetoslist string
var draft, prerelease, dobuild bool
var trowner, trname string
var hbrev, brbd uint
var osvs []string
var logger = log.New(os.Stderr, "", log.LstdFlags)
func ferr(msg string, args ...interface{}) {
fmt.Printf(msg+"\n", args...)
os.Exit(1)
}
var ghc *github.Client
func init() {
pflag.StringVar(&rname, "release", "", "release name (ex: v1.0.0)")
pflag.StringVar(&npath, "notes-path", "relnotes.md", "path to release notes")
pflag.StringVar(&commitsha, "commit", "", "commit SHA to release")
pflag.StringVar(&taprepo, "tap-repo", "dollarshaveclub/homebrew-public", "name of tap GitHub repository ([owner]/[repo])")
pflag.StringVar(&tapref, "tap-repo-ref", "master", "tap repository ref (branch/tag/SHA)")
pflag.StringVar(&fpath, "formula", "Formula/psst.rb", "path to formula within tap repo")
pflag.StringVar(&ftpath, "formula-template", "Formula/psst.rb.tmpl", "path to formula template within tap repo")
pflag.StringVar(&targetoslist, "macos-versions", "el_capitan,high_sierra,sierra", "Supported MacOS versions (comma-delimited)")
pflag.UintVar(&hbrev, "homebrew-rev", 0, "Homebrew revision (bump to force reinstall/rebuild)")
pflag.UintVar(&brbd, "bottle-rebuild", 1, "Bottle rebuild (bump to force bottle reinstall)")
pflag.BoolVar(&draft, "draft", false, "Draft release (unpublished)")
pflag.BoolVar(&prerelease, "prerelease", false, "Prerelease")
pflag.BoolVar(&dobuild, "build", true, "Build binaries first")
pflag.Parse()
trs := strings.Split(taprepo, "/")
if len(trs) != 2 {
ferr("malformed tap repo (expected [owner]/[repo]): %v", taprepo)
}
if rname == "" {
ferr("release name is required")
}
trowner = trs[0]
trname = trs[1]
osvs = strings.Split(targetoslist, ",")
if len(osvs) == 0 {
ferr("At least one MacOS version is required")
}
ghtoken = os.Getenv("GITHUB_TOKEN")
if ghtoken == "" {
ferr("GITHUB_TOKEN missing from environment")
}
if err := checkFiles(npath); err != nil {
ferr("file path error: %v", err)
}
checkLocalRepoVersion()
ghc = newGHClient()
}
func newGHClient() *github.Client {
tc := oauth2.NewClient(context.Background(), oauth2.StaticTokenSource(
&oauth2.Token{AccessToken: ghtoken},
))
return github.NewClient(tc)
}
func checkLocalRepoVersion() {
cmd := exec.Command("git", "rev-parse", "HEAD")
out, err := cmd.Output()
if err != nil {
ferr("error getting git command output: %v", err)
}
if strings.TrimRight(string(out), "\n") != commitsha {
ferr("current git revision does not match requested release version: %v (expected %v)", string(out), commitsha)
}
}
func | (paths ...string) error {
for _, p := range paths {
if _, err := os.Stat(p); err != nil {
return errors.Wrap(err, "file error")
}
}
return nil
}
func createGitTag() error {
msg := fmt.Sprintf("release %v", rname)
ot := "commit"
tag := github.Tag{
Tag: &rname,
Message: &msg,
Object: &github.GitObject{
Type: &ot,
SHA: &commitsha,
},
}
log.Printf("creating tag...\n")
_, _, err := ghc.Git.CreateTag(context.Background(), repoOwner, repoName, &tag)
if err != nil {
return errors.Wrap(err, "error creating tag")
}
refstr := fmt.Sprintf("refs/tags/%v", rname)
objt := "commit"
ref := github.Reference{
Ref: &refstr,
Object: &github.GitObject{
Type: &objt,
SHA: &commitsha,
},
}
log.Printf("creating tag ref...\n")
_, _, err = ghc.Git.CreateRef(context.Background(), repoOwner, repoName, &ref)
if err != nil {
return errors.Wrap(err, "error creating tag ref")
}
return nil
}
type bottleDefinition struct {
Hash string
TargetOS string
}
type formulaTemplateData struct {
Tag string
CommitSHA string
HomebrewRevision uint
BaseDownloadURL string
Bottled bool
BottleRebuild uint
BottleDefs []bottleDefinition
}
func (ftd *formulaTemplateData) populate(bdefs []bottleDefinition) {
ftd.Tag = rname
ftd.CommitSHA = commitsha
if hbrev > 0 {
ftd.HomebrewRevision = hbrev
}
ftd.BaseDownloadURL = fmt.Sprintf("https://github.com/%v/%v/releases/download/%v", repoOwner, repoName, rname)
ftd.BottleRebuild = brbd
ftd.Bottled = true
ftd.BottleDefs = bdefs
}
const header = "# GENERATED FROM TEMPLATE. DO NOT EDIT!\n"
// generateFormula fetches the template from github, executes the template with ftd and returns the raw data or error, if any
func generateFormula(ftd formulaTemplateData) ([]byte, error) {
logger.Printf("Generating Homebrew formula")
// get template
fc, _, _, err := ghc.Repositories.GetContents(context.Background(), trowner, trname, ftpath, &github.RepositoryContentGetOptions{Ref: tapref})
if err != nil {
return nil, errors.Wrap(err, "error getting formula template")
}
rt, err := fc.GetContent()
if err != nil {
return nil, errors.Wrap(err, "error getting formula template content")
}
// generate new formula
tmpl, err := template.New("formula").Parse(rt)
if err != nil {
return nil, errors.Wrap(err, "error parsing formula template")
}
buf := bytes.NewBuffer([]byte{})
if err = tmpl.Execute(buf, &ftd); err != nil {
return nil, errors.Wrap(err, "error executing template")
}
return append([]byte(header), buf.Bytes()...), nil
}
func pushFormula(fd []byte) error {
logger.Printf("Pushing Homebrew formula")
// Get the current file for the SHA
fc, _, _, err := ghc.Repositories.GetContents(context.Background(), trowner, trname, fpath, &github.RepositoryContentGetOptions{Ref: tapref})
if err != nil {
return errors.Wrap(err, "error getting formula contents")
}
sp := func(s string) *string {
return &s
}
_, _, err = ghc.Repositories.UpdateFile(context.Background(), trowner, trname, fpath, &github.RepositoryContentFileOptions{
Message: sp(fmt.Sprintf("updated for release %v", rname)),
Content: fd,
SHA: fc.SHA,
Branch: &tapref,
})
if err != nil {
return errors.Wrap(err, "error updating formula")
}
return nil
}
const (
linuxBinName = "psst-linux-amd64"
)
var buildopts = []string{"-ldflags", "-X github.com/dollarshaveclub/psst/cmd.CommitSHA=%v -X github.com/dollarshaveclub/psst/cmd.Version=%v -X github.com/dollarshaveclub/psst/cmd.CompiledDirectory=github -X github.com/dollarshaveclub/psst/cmd.CompiledStorage=vault -X github.com/dollarshaveclub/psst/cmd.Org=dollarshaveclub"}
func buildBins() error {
if err := os.MkdirAll("bins", os.ModeDir|0755); err != nil {
return errors.Wrap(err, "error creating bins directory")
}
cwd, err := os.Getwd()
if err != nil {
return errors.Wrap(err, "error getting working directory")
}
wd := filepath.Join(cwd, "..")
buildopts[1] = fmt.Sprintf(buildopts[1], commitsha, rname)
build := func(osn string) ([]byte, error) {
cmd := exec.Command("go", append([]string{"build"}, buildopts...)...)
cmd.Env = append(os.Environ(), []string{fmt.Sprintf("GOOS=%v", osn), "GOARCH=amd64"}...)
cmd.Dir = wd
return cmd.CombinedOutput()
}
logger.Printf("Building binaries...\n")
logger.Printf("...macOS amd64")
if out, err := build("darwin"); err != nil {
return errors.Wrapf(err, "error running build command: %s", out)
}
if err := os.Rename(filepath.Join(wd, "psst"), filepath.Join(cwd, "bins", "psst-darwin")); err != nil {
return errors.Wrap(err, "error renaming binary")
}
logger.Printf("...Linux amd64")
if out, err := build("linux"); err != nil {
return errors.Wrapf(err, "error running build command: %s", out)
}
lfn := filepath.Join(cwd, "bins", linuxBinName)
if err := os.Rename(filepath.Join(wd, "psst"), lfn); err != nil {
return errors.Wrap(err, "error renaming binary")
}
// compress linux binary
logger.Printf("...compressing Linux binary\n")
d, err := ioutil.ReadFile(lfn)
if err != nil {
return errors.Wrap(err, "error reading linux binary")
}
f, err := os.Create(lfn + ".gz")
if err != nil {
return errors.Wrap(err, "error creating compressed linux binary")
}
defer f.Close()
gw := gzip.NewWriter(f)
defer gw.Close()
if _, err := gw.Write(d); err != nil {
return errors.Wrap(err, "error writing compressed linux binary")
}
return nil
}
// "copy" (link) a file if it doesn't exist
func cpifneeded(src, dest string) error {
if _, err := os.Stat(dest); err != nil {
if os.IsNotExist(err) {
return os.Link(src, dest)
}
return errors.Wrap(err, "error getting destination")
}
return nil
}
var bottleNameTmpl = template.Must(template.New("bn").Parse("psst-{{ .Release }}{{ if .HomebrewRevision }}_{{ .HomebrewRevision }}{{ end }}.{{ .OS }}.bottle.{{ .BottleRebuild }}.tar.gz"))
// createBottle synthetically creates a bottle tarball returning the bottle definitions, local bottle filenames and error if any
func createBottle() ([]bottleDefinition, []string, error) {
logger.Printf("Creating Homebrew bottle...\n")
cwd, err := os.Getwd()
if err != nil {
return nil, nil, errors.Wrap(err, "error getting working directory")
}
rver := regexp.MustCompile("([0-9.]+)").FindString(rname)
basepath := filepath.Join(".", "psst", rver)
binpath := filepath.Join(basepath, "bin")
if err := os.MkdirAll(binpath, os.ModeDir|0755); err != nil {
return nil, nil, errors.Wrap(err, "error creating bottle directory path")
}
// .brew
if err := os.MkdirAll(filepath.Join(basepath, ".brew"), os.ModeDir|0755); err != nil {
return nil, nil, errors.Wrap(err, "error creating .brew directory")
}
// copy README
if err := cpifneeded(filepath.Join(cwd, "..", "README.md"), filepath.Join(basepath, "README.md")); err != nil {
return nil, nil, errors.Wrap(err, "error copying README")
}
// copy binary
if err := cpifneeded(filepath.Join("bins", "psst-darwin"), filepath.Join(binpath, "psst")); err != nil {
return nil, nil, errors.Wrap(err, "error copying binary")
}
// INSTALL_RECEIPT.json
ir, err := ioutil.ReadFile("INSTALL_RECEIPT.json.tmpl")
if err != nil {
return nil, nil, errors.Wrap(err, "error reading install receipt template")
}
tmpl, err := template.New("instrcpt").Parse(string(ir))
d := struct {
Release string
OS string
HomebrewRevision uint
BottleRebuild uint
}{
Release: rver,
BottleRebuild: brbd,
}
if hbrev > 0 {
d.HomebrewRevision = hbrev
}
buf := bytes.NewBuffer([]byte{})
if err := tmpl.Execute(buf, &d); err != nil {
return nil, nil, errors.Wrap(err, "error executing install receipt template")
}
if err := ioutil.WriteFile(filepath.Join(basepath, "INSTALL_RECEIPT.json"), buf.Bytes(), os.ModePerm); err != nil {
return nil, nil, errors.Wrap(err, "error writing install receipt")
}
// tar it up
if err := os.MkdirAll("bottle", os.ModeDir|0755); err != nil {
return nil, nil, errors.Wrap(err, "error creating bottle directory")
}
buf = bytes.NewBuffer([]byte{})
d.OS = osvs[0]
if err := bottleNameTmpl.Execute(buf, &d); err != nil {
return nil, nil, errors.Wrap(err, "error executing bottle filename template: "+d.OS)
}
bp := filepath.Join("bottle", buf.String())
if err := archiver.TarGz.Make(bp, []string{"psst"}); err != nil {
return nil, nil, errors.Wrap(err, "error creating bottle tarball")
}
// Get hash of bottle, populate bottle definitions
bd, err := ioutil.ReadFile(bp)
if err != nil {
return nil, nil, errors.Wrap(err, "error reading bottle")
}
sha := fmt.Sprintf("%x", sha256.Sum256(bd))
bdefs := []bottleDefinition{
bottleDefinition{
Hash: sha,
TargetOS: osvs[0],
},
}
lps := []string{bp}
// link other bottles
for _, osn := range osvs[1:] {
d.OS = osn
buf = bytes.NewBuffer([]byte{})
if err := bottleNameTmpl.Execute(buf, &d); err != nil {
return nil, nil, errors.Wrap(err, "error executing bottle filename template: "+d.OS)
}
p := filepath.Join("bottle", buf.String())
if err := cpifneeded(bp, p); err != nil {
return nil, nil, errors.Wrap(err, "error linking bottle")
}
lps = append(lps, p)
bdefs = append(bdefs, bottleDefinition{
Hash: sha,
TargetOS: osn,
})
}
return bdefs, lps, nil
}
func createGHRelease(assetpaths []string) error {
rel := github.RepositoryRelease{
TagName: &rname,
//TargetCommitish: &commitsha,
Name: &rname,
Draft: &draft,
Prerelease: &prerelease,
}
nd, err := ioutil.ReadFile(npath)
if err != nil {
return errors.Wrap(err, "error reading release notes")
}
notes := string(nd)
rel.Body = ¬es
logger.Printf("Creating GitHub release")
ro, _, err := ghc.Repositories.CreateRelease(context.Background(), repoOwner, repoName, &rel)
if err != nil {
return errors.Wrap(err, "error creating release")
}
for _, ap := range assetpaths {
f, err := os.Open(ap)
if err != nil {
return errors.Wrap(err, "error opening asset")
}
defer f.Close()
logger.Printf("Uploading asset %v...", ap)
resp, _, err := ghc.Repositories.UploadReleaseAsset(context.Background(), repoOwner, repoName, *ro.ID, &github.UploadOptions{Name: filepath.Base(ap)}, f)
if err != nil {
return errors.Wrap(err, "error uploading asset")
}
logger.Printf("...%v\n", resp.GetBrowserDownloadURL())
}
return nil
}
func cleanup() error {
logger.Printf("Cleaning up")
for _, p := range []string{"./bins", "./bottle", "./psst"} {
if err := os.RemoveAll(p); err != nil {
return errors.Wrap(err, "error removing path")
}
}
return nil
}
func main() {
if dobuild {
if err := buildBins(); err != nil {
ferr("error building binaries: %v", err)
}
}
bds, lps, err := createBottle()
if err != nil {
ferr("error creating bottle: %v", err)
}
ftd := formulaTemplateData{}
ftd.populate(bds)
fd, err := generateFormula(ftd)
if err != nil {
ferr("error generating formula: %v", err)
}
if err = pushFormula(fd); err != nil {
ferr("error pushing formula: %v", err)
}
if err := createGitTag(); err != nil {
ferr("error creating tag: %v", err)
}
cwd, err := os.Getwd()
if err != nil {
ferr("error getting working directory: %v", err)
}
assetpaths := append([]string{filepath.Join(cwd, "bins", linuxBinName+".gz")}, lps...)
if err = createGHRelease(assetpaths); err != nil {
ferr("error creating GitHub release: %v", err)
}
if err := cleanup(); err != nil {
ferr("error cleaning up: %v", err)
}
logger.Printf("Done")
}
| checkFiles | identifier_name |
shard_consumer.go | /*
* Copyright (c) 2018 VMware, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
* associated documentation files (the "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is furnished to do
* so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
* NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
// The implementation is derived from https://github.com/patrobinson/gokini
//
// Copyright 2018 Patrick robinson
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
package kcl
import (
"context"
"math"
"sync"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/kinesis"
ks "github.com/aws/aws-sdk-go/service/kinesis"
"github.com/aws/aws-sdk-go/service/kinesis/kinesisiface"
)
const (
// This is the initial state of a shard consumer. This causes the consumer to remain blocked until the all
// parent shards have been completed.
//WAITING_ON_PARENT_SHARDS ShardConsumerState = iota + 1
// This state is responsible for initializing the record processor with the shard information.
INITIALIZING = iota + 2
//
PROCESSING
SHUTDOWN_REQUESTED
SHUTTING_DOWN
SHUTDOWN_COMPLETE
// ErrCodeKMSThrottlingException is defined in the API Reference https://docs.aws.amazon.com/sdk-for-go/api/service/kinesis/#Kinesis.GetRecords
// But it's not a constant?
ErrCodeKMSThrottlingException = "KMSThrottlingException"
/**
* Indicates that the entire application is being shutdown, and if desired the record processor will be given a
* final chance to checkpoint. This state will not trigger a direct call to
* {@link com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor#shutdown(ShutdownInput)}, but
* instead depend on a different interface for backward compatibility.
*/
REQUESTED ShutdownReason = iota + 1
/**
* Terminate processing for this RecordProcessor (resharding use case).
* Indicates that the shard is closed and all records from the shard have been delivered to the application.
* Applications SHOULD checkpoint their progress to indicate that they have successfully processed all records
* from this shard and processing of child shards can be started.
*/
TERMINATE
/**
* Processing will be moved to a different record processor (fail over, load balancing use cases).
* Applications SHOULD NOT checkpoint their progress (as another record processor may have already started
* processing data).
*/
ZOMBIE
)
// Containers for the parameters to the IRecordProcessor
/**
* Reason the RecordProcessor is being shutdown.
* Used to distinguish between a fail-over vs. a termination (shard is closed and all records have been delivered).
* In case of a fail over, applications should NOT checkpoint as part of shutdown,
* since another record processor may have already started processing records for that shard.
* In case of termination (resharding use case), applications SHOULD checkpoint their progress to indicate
* that they have successfully processed all the records (processing of child shards can then begin).
*/
type ShutdownReason int
type InitializationInput struct {
ShardID string
ExtendedSequenceNumber *ExtendedSequenceNumber
PendingCheckpointSequenceNumber *ExtendedSequenceNumber
}
type ProcessRecordsInput struct {
CacheEntryTime *time.Time
CacheExitTime *time.Time
Records []*ks.Record
Checkpointer *RecordProcessorCheckpointer
MillisBehindLatest int64
}
type ShutdownInput struct {
ShutdownReason ShutdownReason
Checkpointer *RecordProcessorCheckpointer
}
var shutdownReasonMap = map[ShutdownReason]*string{
REQUESTED: aws.String("REQUESTED"),
TERMINATE: aws.String("TERMINATE"),
ZOMBIE: aws.String("ZOMBIE"),
}
func ShutdownReasonMessage(reason ShutdownReason) *string {
return shutdownReasonMap[reason]
}
// RecordProcessor is the interface for some callback functions invoked by KCL will
// The main task of using KCL is to provide implementation on RecordProcessor interface.
// Note: This is exactly the same interface as Amazon KCL RecordProcessor v2
type RecordProcessor interface {
/**
* Invoked by the Amazon Kinesis Client Library before data records are delivered to the RecordProcessor instance
* (via processRecords).
*
* @param initializationInput Provides information related to initialization
*/
Initialize(ctx context.Context, initializationInput *InitializationInput)
/**
* Process data records. The Amazon Kinesis Client Library will invoke this method to deliver data records to the
* application.
* Upon fail over, the new instance will get records with sequence number > checkpoint position
* for each partition key.
*
* @param processRecordsInput Provides the records to be processed as well as information and capabilities related
* to them (eg checkpointing).
*/
ProcessRecords(ctx context.Context, processRecordsInput *ProcessRecordsInput)
/**
* Invoked by the Amazon Kinesis Client Library to indicate it will no longer send data records to this
* RecordProcessor instance.
*
* <h2><b>Warning</b></h2>
*
* When the value of {@link ShutdownInput#getShutdownReason()} is
* {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.ShutdownReason#TERMINATE} it is required that you
* checkpoint. Failure to do so will result in an IllegalArgumentException, and the KCL no longer making progress.
*
* @param shutdownInput
* Provides information and capabilities (eg checkpointing) related to shutdown of this record processor.
*/
Shutdown(ctx context.Context, shutdownInput *ShutdownInput)
}
// ExtendedSequenceNumber represents a two-part sequence number for records aggregated by the Kinesis Producer Library.
//
// The KPL combines multiple user records into a single Kinesis record. Each user record therefore has an integer
// sub-sequence number, in addition to the regular sequence number of the Kinesis record. The sub-sequence number
// is used to checkpoint within an aggregated record.
type ExtendedSequenceNumber struct {
SequenceNumber *string
SubSequenceNumber int64
}
// ShardStatus represents a shard consumer's progress in a shard.
// NB: this type is passed around and mutated.
type ShardStatus struct {
ID string
ParentShardId string
// Checkpoint is the last checkpoint.
Checkpoint string
AssignedTo string
Mux *sync.Mutex
LeaseTimeout time.Time
// Shard Range
StartingSequenceNumber string
// child shard doesn't have end sequence number
EndingSequenceNumber string
}
func (ss *ShardStatus) GetLeaseOwner() string {
ss.Mux.Lock()
defer ss.Mux.Unlock()
return ss.AssignedTo
}
func (ss *ShardStatus) SetLeaseOwner(owner string) {
ss.Mux.Lock()
defer ss.Mux.Unlock()
ss.AssignedTo = owner
}
//type ShardConsumerState int
// ShardConsumer is responsible for consuming data records of a (specified) shard.
type ShardConsumer struct {
streamName string
shard *ShardStatus
kc kinesisiface.KinesisAPI
checkpointer Checkpointer
recordProcessor RecordProcessor
cfg *ConsumerConfig
stop *chan struct{}
consumerID string
metrics MonitoringService
//state ShardConsumerState
}
// run continously poll the shard for records, until the lease ends.
// entry point for consumer.
// Precondition: it currently has the lease on the shard.
func (sc *ShardConsumer) run(ctx context.Context) error {
defer sc.releaseLease(ctx)
log := sc.cfg.Log
shard := sc.shard
// If the shard is child shard, need to wait until the parent finished.
if err := sc.waitOnParentShard(ctx); err != nil {
// If parent shard has been deleted by Kinesis system already, just ignore the error.
if err != ErrSequenceIDNotFound {
log.Errorf("Error in waiting for parent shard: %v to finish. Error: %+v", shard.ParentShardId, err)
return err
}
}
shardIterator, err := sc.getShardIterator(ctx)
if err != nil {
log.Errorf("Unable to get shard iterator for %s: %v", shard.ID, err)
return err
}
// Start processing events and notify record processor on shard and starting checkpoint
sc.recordProcessor.Initialize(ctx, &InitializationInput{
ShardID: shard.ID,
ExtendedSequenceNumber: &ExtendedSequenceNumber{SequenceNumber: aws.String(shard.Checkpoint)},
})
recordCheckpointer := NewRecordProcessorCheckpointer(shard, sc.checkpointer)
retriedErrors := 0
// TODO: add timeout
// each iter: { call GetRecords, call RecordProcessor, check lease status and control accordingly }
for {
if time.Now().UTC().After(shard.LeaseTimeout.Add(-time.Duration(sc.cfg.LeaseRefreshPeriodMillis) * time.Millisecond)) {
log.Debugf("Refreshing lease on shard: %s for worker: %s", shard.ID, sc.consumerID)
err = sc.checkpointer.GetLease(ctx, shard, sc.consumerID)
if err != nil {
if err.Error() == ErrLeaseNotAquired {
log.Warnf("Failed in acquiring lease on shard: %s for worker: %s", shard.ID, sc.consumerID)
return nil
}
// log and return error
log.Errorf("Error in refreshing lease on shard: %s for worker: %s. Error: %+v",
shard.ID, sc.consumerID, err)
return err
}
}
getRecordsStartTime := time.Now()
log.Debugf("Trying to read %d record from iterator: %v", sc.cfg.MaxRecords, aws.StringValue(shardIterator))
getRecordsArgs := &kinesis.GetRecordsInput{
Limit: aws.Int64(int64(sc.cfg.MaxRecords)),
ShardIterator: shardIterator,
}
// Get records from stream and retry as needed
getResp, err := sc.kc.GetRecordsWithContext(ctx, getRecordsArgs)
if err != nil {
if awsErr, ok := err.(awserr.Error); ok {
if awsErr.Code() == kinesis.ErrCodeProvisionedThroughputExceededException || awsErr.Code() == ErrCodeKMSThrottlingException {
log.Errorf("Error getting records from shard %v: %+v", shard.ID, err)
retriedErrors++
// exponential backoff
// https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff
time.Sleep(time.Duration(math.Exp2(float64(retriedErrors))*100) * time.Millisecond)
continue
}
}
log.Errorf("Error getting records from Kinesis that cannot be retried: %+v Request: %s", err, getRecordsArgs)
return err
}
// Convert from nanoseconds to milliseconds
getRecordsTimeMS := time.Since(getRecordsStartTime) / 1000000
sc.metrics.RecordGetRecordsTime(shard.ID, float64(getRecordsTimeMS))
// reset the retry count after success
retriedErrors = 0
// IRecordProcessorCheckpointer | MillisBehindLatest: aws.Int64Value(getResp.MillisBehindLatest),
Checkpointer: recordCheckpointer,
}
recordLength := len(input.Records)
recordBytes := int64(0)
log.Debugf("Received %d records, MillisBehindLatest: %v", recordLength, input.MillisBehindLatest)
for _, r := range getResp.Records {
recordBytes += int64(len(r.Data))
}
if recordLength > 0 || sc.cfg.CallProcessRecordsEvenForEmptyRecordList {
processRecordsStartTime := time.Now()
// Delivery the events to the record processor
sc.recordProcessor.ProcessRecords(ctx, input)
// Convert from nanoseconds to milliseconds
processedRecordsTiming := time.Since(processRecordsStartTime) / 1000000
sc.metrics.RecordProcessRecordsTime(shard.ID, float64(processedRecordsTiming))
}
sc.metrics.IncrRecordsProcessed(shard.ID, recordLength)
sc.metrics.IncrBytesProcessed(shard.ID, recordBytes)
sc.metrics.MillisBehindLatest(shard.ID, float64(*getResp.MillisBehindLatest))
// Idle between each read, the user is responsible for checkpoint the progress
// This value is only used when no records are returned; if records are returned, it should immediately
// retrieve the next set of records.
if recordLength == 0 && aws.Int64Value(getResp.MillisBehindLatest) < int64(sc.cfg.IdleTimeBetweenReadsInMillis) {
time.Sleep(time.Duration(sc.cfg.IdleTimeBetweenReadsInMillis) * time.Millisecond)
}
// The shard has been closed, so no new records can be read from it
if getResp.NextShardIterator == nil {
log.Infof("Shard %s closed", shard.ID)
shutdownInput := &ShutdownInput{ShutdownReason: TERMINATE, Checkpointer: recordCheckpointer}
sc.recordProcessor.Shutdown(ctx, shutdownInput)
return nil
}
shardIterator = getResp.NextShardIterator
select {
case <-*sc.stop:
shutdownInput := &ShutdownInput{ShutdownReason: REQUESTED, Checkpointer: recordCheckpointer}
sc.recordProcessor.Shutdown(ctx, shutdownInput)
return nil
default:
}
}
}
// Need to wait until the parent shard finished
func (sc *ShardConsumer) waitOnParentShard(ctx context.Context) error {
shard := sc.shard
if len(shard.ParentShardId) == 0 {
return nil
}
pshard := &ShardStatus{
ID: shard.ParentShardId,
Mux: &sync.Mutex{},
}
for {
if err := sc.checkpointer.FetchCheckpoint(ctx, pshard); err != nil {
return err
}
// Parent shard is finished.
if pshard.Checkpoint == SHARD_END {
return nil
}
time.Sleep(time.Duration(sc.cfg.ParentShardPollIntervalMillis) * time.Millisecond)
}
}
func (sc *ShardConsumer) getShardIterator(ctx context.Context) (*string, error) {
shard := sc.shard
log := sc.cfg.Log
// Get checkpoint of the shard from dynamoDB
err := sc.checkpointer.FetchCheckpoint(ctx, shard)
if err != nil && err != ErrSequenceIDNotFound {
return nil, err
}
// If there isn't any checkpoint for the shard, use the configuration value.
// TODO: configurable
if shard.Checkpoint == "" {
initPos := sc.cfg.InitialPositionInStream
shardIteratorType := InitalPositionInStreamToShardIteratorType(initPos)
log.Debugf("No checkpoint recorded for shard: %v, starting with: %v", shard.ID,
aws.StringValue(shardIteratorType))
var shardIterArgs *kinesis.GetShardIteratorInput
if initPos == AT_TIMESTAMP {
shardIterArgs = &kinesis.GetShardIteratorInput{
ShardId: &shard.ID,
ShardIteratorType: shardIteratorType,
Timestamp: sc.cfg.InitialPositionInStreamExtended.Timestamp,
StreamName: &sc.streamName,
}
} else {
shardIterArgs = &kinesis.GetShardIteratorInput{
ShardId: &shard.ID,
ShardIteratorType: shardIteratorType,
StreamName: &sc.streamName,
}
}
iterResp, err := sc.kc.GetShardIterator(shardIterArgs)
if err != nil {
return nil, err
}
return iterResp.ShardIterator, nil
}
log.Debugf("Start shard: %v at checkpoint: %v", shard.ID, shard.Checkpoint)
shardIterArgs := &kinesis.GetShardIteratorInput{
ShardId: &shard.ID,
ShardIteratorType: aws.String("AFTER_SEQUENCE_NUMBER"),
StartingSequenceNumber: &shard.Checkpoint,
StreamName: &sc.streamName,
}
iterResp, err := sc.kc.GetShardIterator(shardIterArgs)
if err != nil {
return nil, err
}
return iterResp.ShardIterator, nil
}
// Cleanup the internal lease cache
// Mutates shard { SetLeaseOwner }
func (sc *ShardConsumer) releaseLease(ctx context.Context) {
log := sc.cfg.Log
shard := sc.shard
log.Infof("Release lease for shard %s", shard.ID)
shard.SetLeaseOwner("")
// Release the lease by wiping out the lease owner for the shard
// Note: we don't need to do anything in case of error here and shard lease will eventuall be expired.
if err := sc.checkpointer.RemoveLeaseOwner(ctx, shard.ID); err != nil {
log.Errorf("Failed to release shard lease or shard: %s Error: %+v", shard.ID, err)
}
// reporting lease lose metrics
sc.metrics.LeaseLost(shard.ID)
} | input := &ProcessRecordsInput{
Records: getResp.Records, | random_line_split |
shard_consumer.go | /*
* Copyright (c) 2018 VMware, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
* associated documentation files (the "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is furnished to do
* so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
* NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
// The implementation is derived from https://github.com/patrobinson/gokini
//
// Copyright 2018 Patrick robinson
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
package kcl
import (
"context"
"math"
"sync"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/kinesis"
ks "github.com/aws/aws-sdk-go/service/kinesis"
"github.com/aws/aws-sdk-go/service/kinesis/kinesisiface"
)
const (
// This is the initial state of a shard consumer. This causes the consumer to remain blocked until the all
// parent shards have been completed.
//WAITING_ON_PARENT_SHARDS ShardConsumerState = iota + 1
// This state is responsible for initializing the record processor with the shard information.
INITIALIZING = iota + 2
//
PROCESSING
SHUTDOWN_REQUESTED
SHUTTING_DOWN
SHUTDOWN_COMPLETE
// ErrCodeKMSThrottlingException is defined in the API Reference https://docs.aws.amazon.com/sdk-for-go/api/service/kinesis/#Kinesis.GetRecords
// But it's not a constant?
ErrCodeKMSThrottlingException = "KMSThrottlingException"
/**
* Indicates that the entire application is being shutdown, and if desired the record processor will be given a
* final chance to checkpoint. This state will not trigger a direct call to
* {@link com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor#shutdown(ShutdownInput)}, but
* instead depend on a different interface for backward compatibility.
*/
REQUESTED ShutdownReason = iota + 1
/**
* Terminate processing for this RecordProcessor (resharding use case).
* Indicates that the shard is closed and all records from the shard have been delivered to the application.
* Applications SHOULD checkpoint their progress to indicate that they have successfully processed all records
* from this shard and processing of child shards can be started.
*/
TERMINATE
/**
* Processing will be moved to a different record processor (fail over, load balancing use cases).
* Applications SHOULD NOT checkpoint their progress (as another record processor may have already started
* processing data).
*/
ZOMBIE
)
// Containers for the parameters to the IRecordProcessor
/**
* Reason the RecordProcessor is being shutdown.
* Used to distinguish between a fail-over vs. a termination (shard is closed and all records have been delivered).
* In case of a fail over, applications should NOT checkpoint as part of shutdown,
* since another record processor may have already started processing records for that shard.
* In case of termination (resharding use case), applications SHOULD checkpoint their progress to indicate
* that they have successfully processed all the records (processing of child shards can then begin).
*/
type ShutdownReason int
type InitializationInput struct {
ShardID string
ExtendedSequenceNumber *ExtendedSequenceNumber
PendingCheckpointSequenceNumber *ExtendedSequenceNumber
}
type ProcessRecordsInput struct {
CacheEntryTime *time.Time
CacheExitTime *time.Time
Records []*ks.Record
Checkpointer *RecordProcessorCheckpointer
MillisBehindLatest int64
}
type ShutdownInput struct {
ShutdownReason ShutdownReason
Checkpointer *RecordProcessorCheckpointer
}
var shutdownReasonMap = map[ShutdownReason]*string{
REQUESTED: aws.String("REQUESTED"),
TERMINATE: aws.String("TERMINATE"),
ZOMBIE: aws.String("ZOMBIE"),
}
func ShutdownReasonMessage(reason ShutdownReason) *string {
return shutdownReasonMap[reason]
}
// RecordProcessor is the interface for some callback functions invoked by KCL will
// The main task of using KCL is to provide implementation on RecordProcessor interface.
// Note: This is exactly the same interface as Amazon KCL RecordProcessor v2
type RecordProcessor interface {
/**
* Invoked by the Amazon Kinesis Client Library before data records are delivered to the RecordProcessor instance
* (via processRecords).
*
* @param initializationInput Provides information related to initialization
*/
Initialize(ctx context.Context, initializationInput *InitializationInput)
/**
* Process data records. The Amazon Kinesis Client Library will invoke this method to deliver data records to the
* application.
* Upon fail over, the new instance will get records with sequence number > checkpoint position
* for each partition key.
*
* @param processRecordsInput Provides the records to be processed as well as information and capabilities related
* to them (eg checkpointing).
*/
ProcessRecords(ctx context.Context, processRecordsInput *ProcessRecordsInput)
/**
* Invoked by the Amazon Kinesis Client Library to indicate it will no longer send data records to this
* RecordProcessor instance.
*
* <h2><b>Warning</b></h2>
*
* When the value of {@link ShutdownInput#getShutdownReason()} is
* {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.ShutdownReason#TERMINATE} it is required that you
* checkpoint. Failure to do so will result in an IllegalArgumentException, and the KCL no longer making progress.
*
* @param shutdownInput
* Provides information and capabilities (eg checkpointing) related to shutdown of this record processor.
*/
Shutdown(ctx context.Context, shutdownInput *ShutdownInput)
}
// ExtendedSequenceNumber represents a two-part sequence number for records aggregated by the Kinesis Producer Library.
//
// The KPL combines multiple user records into a single Kinesis record. Each user record therefore has an integer
// sub-sequence number, in addition to the regular sequence number of the Kinesis record. The sub-sequence number
// is used to checkpoint within an aggregated record.
type ExtendedSequenceNumber struct {
SequenceNumber *string
SubSequenceNumber int64
}
// ShardStatus represents a shard consumer's progress in a shard.
// NB: this type is passed around and mutated.
type ShardStatus struct {
ID string
ParentShardId string
// Checkpoint is the last checkpoint.
Checkpoint string
AssignedTo string
Mux *sync.Mutex
LeaseTimeout time.Time
// Shard Range
StartingSequenceNumber string
// child shard doesn't have end sequence number
EndingSequenceNumber string
}
func (ss *ShardStatus) GetLeaseOwner() string {
ss.Mux.Lock()
defer ss.Mux.Unlock()
return ss.AssignedTo
}
func (ss *ShardStatus) SetLeaseOwner(owner string) {
ss.Mux.Lock()
defer ss.Mux.Unlock()
ss.AssignedTo = owner
}
//type ShardConsumerState int
// ShardConsumer is responsible for consuming data records of a (specified) shard.
type ShardConsumer struct {
streamName string
shard *ShardStatus
kc kinesisiface.KinesisAPI
checkpointer Checkpointer
recordProcessor RecordProcessor
cfg *ConsumerConfig
stop *chan struct{}
consumerID string
metrics MonitoringService
//state ShardConsumerState
}
// run continously poll the shard for records, until the lease ends.
// entry point for consumer.
// Precondition: it currently has the lease on the shard.
func (sc *ShardConsumer) | (ctx context.Context) error {
defer sc.releaseLease(ctx)
log := sc.cfg.Log
shard := sc.shard
// If the shard is child shard, need to wait until the parent finished.
if err := sc.waitOnParentShard(ctx); err != nil {
// If parent shard has been deleted by Kinesis system already, just ignore the error.
if err != ErrSequenceIDNotFound {
log.Errorf("Error in waiting for parent shard: %v to finish. Error: %+v", shard.ParentShardId, err)
return err
}
}
shardIterator, err := sc.getShardIterator(ctx)
if err != nil {
log.Errorf("Unable to get shard iterator for %s: %v", shard.ID, err)
return err
}
// Start processing events and notify record processor on shard and starting checkpoint
sc.recordProcessor.Initialize(ctx, &InitializationInput{
ShardID: shard.ID,
ExtendedSequenceNumber: &ExtendedSequenceNumber{SequenceNumber: aws.String(shard.Checkpoint)},
})
recordCheckpointer := NewRecordProcessorCheckpointer(shard, sc.checkpointer)
retriedErrors := 0
// TODO: add timeout
// each iter: { call GetRecords, call RecordProcessor, check lease status and control accordingly }
for {
if time.Now().UTC().After(shard.LeaseTimeout.Add(-time.Duration(sc.cfg.LeaseRefreshPeriodMillis) * time.Millisecond)) {
log.Debugf("Refreshing lease on shard: %s for worker: %s", shard.ID, sc.consumerID)
err = sc.checkpointer.GetLease(ctx, shard, sc.consumerID)
if err != nil {
if err.Error() == ErrLeaseNotAquired {
log.Warnf("Failed in acquiring lease on shard: %s for worker: %s", shard.ID, sc.consumerID)
return nil
}
// log and return error
log.Errorf("Error in refreshing lease on shard: %s for worker: %s. Error: %+v",
shard.ID, sc.consumerID, err)
return err
}
}
getRecordsStartTime := time.Now()
log.Debugf("Trying to read %d record from iterator: %v", sc.cfg.MaxRecords, aws.StringValue(shardIterator))
getRecordsArgs := &kinesis.GetRecordsInput{
Limit: aws.Int64(int64(sc.cfg.MaxRecords)),
ShardIterator: shardIterator,
}
// Get records from stream and retry as needed
getResp, err := sc.kc.GetRecordsWithContext(ctx, getRecordsArgs)
if err != nil {
if awsErr, ok := err.(awserr.Error); ok {
if awsErr.Code() == kinesis.ErrCodeProvisionedThroughputExceededException || awsErr.Code() == ErrCodeKMSThrottlingException {
log.Errorf("Error getting records from shard %v: %+v", shard.ID, err)
retriedErrors++
// exponential backoff
// https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff
time.Sleep(time.Duration(math.Exp2(float64(retriedErrors))*100) * time.Millisecond)
continue
}
}
log.Errorf("Error getting records from Kinesis that cannot be retried: %+v Request: %s", err, getRecordsArgs)
return err
}
// Convert from nanoseconds to milliseconds
getRecordsTimeMS := time.Since(getRecordsStartTime) / 1000000
sc.metrics.RecordGetRecordsTime(shard.ID, float64(getRecordsTimeMS))
// reset the retry count after success
retriedErrors = 0
// IRecordProcessorCheckpointer
input := &ProcessRecordsInput{
Records: getResp.Records,
MillisBehindLatest: aws.Int64Value(getResp.MillisBehindLatest),
Checkpointer: recordCheckpointer,
}
recordLength := len(input.Records)
recordBytes := int64(0)
log.Debugf("Received %d records, MillisBehindLatest: %v", recordLength, input.MillisBehindLatest)
for _, r := range getResp.Records {
recordBytes += int64(len(r.Data))
}
if recordLength > 0 || sc.cfg.CallProcessRecordsEvenForEmptyRecordList {
processRecordsStartTime := time.Now()
// Delivery the events to the record processor
sc.recordProcessor.ProcessRecords(ctx, input)
// Convert from nanoseconds to milliseconds
processedRecordsTiming := time.Since(processRecordsStartTime) / 1000000
sc.metrics.RecordProcessRecordsTime(shard.ID, float64(processedRecordsTiming))
}
sc.metrics.IncrRecordsProcessed(shard.ID, recordLength)
sc.metrics.IncrBytesProcessed(shard.ID, recordBytes)
sc.metrics.MillisBehindLatest(shard.ID, float64(*getResp.MillisBehindLatest))
// Idle between each read, the user is responsible for checkpoint the progress
// This value is only used when no records are returned; if records are returned, it should immediately
// retrieve the next set of records.
if recordLength == 0 && aws.Int64Value(getResp.MillisBehindLatest) < int64(sc.cfg.IdleTimeBetweenReadsInMillis) {
time.Sleep(time.Duration(sc.cfg.IdleTimeBetweenReadsInMillis) * time.Millisecond)
}
// The shard has been closed, so no new records can be read from it
if getResp.NextShardIterator == nil {
log.Infof("Shard %s closed", shard.ID)
shutdownInput := &ShutdownInput{ShutdownReason: TERMINATE, Checkpointer: recordCheckpointer}
sc.recordProcessor.Shutdown(ctx, shutdownInput)
return nil
}
shardIterator = getResp.NextShardIterator
select {
case <-*sc.stop:
shutdownInput := &ShutdownInput{ShutdownReason: REQUESTED, Checkpointer: recordCheckpointer}
sc.recordProcessor.Shutdown(ctx, shutdownInput)
return nil
default:
}
}
}
// Need to wait until the parent shard finished
func (sc *ShardConsumer) waitOnParentShard(ctx context.Context) error {
shard := sc.shard
if len(shard.ParentShardId) == 0 {
return nil
}
pshard := &ShardStatus{
ID: shard.ParentShardId,
Mux: &sync.Mutex{},
}
for {
if err := sc.checkpointer.FetchCheckpoint(ctx, pshard); err != nil {
return err
}
// Parent shard is finished.
if pshard.Checkpoint == SHARD_END {
return nil
}
time.Sleep(time.Duration(sc.cfg.ParentShardPollIntervalMillis) * time.Millisecond)
}
}
func (sc *ShardConsumer) getShardIterator(ctx context.Context) (*string, error) {
shard := sc.shard
log := sc.cfg.Log
// Get checkpoint of the shard from dynamoDB
err := sc.checkpointer.FetchCheckpoint(ctx, shard)
if err != nil && err != ErrSequenceIDNotFound {
return nil, err
}
// If there isn't any checkpoint for the shard, use the configuration value.
// TODO: configurable
if shard.Checkpoint == "" {
initPos := sc.cfg.InitialPositionInStream
shardIteratorType := InitalPositionInStreamToShardIteratorType(initPos)
log.Debugf("No checkpoint recorded for shard: %v, starting with: %v", shard.ID,
aws.StringValue(shardIteratorType))
var shardIterArgs *kinesis.GetShardIteratorInput
if initPos == AT_TIMESTAMP {
shardIterArgs = &kinesis.GetShardIteratorInput{
ShardId: &shard.ID,
ShardIteratorType: shardIteratorType,
Timestamp: sc.cfg.InitialPositionInStreamExtended.Timestamp,
StreamName: &sc.streamName,
}
} else {
shardIterArgs = &kinesis.GetShardIteratorInput{
ShardId: &shard.ID,
ShardIteratorType: shardIteratorType,
StreamName: &sc.streamName,
}
}
iterResp, err := sc.kc.GetShardIterator(shardIterArgs)
if err != nil {
return nil, err
}
return iterResp.ShardIterator, nil
}
log.Debugf("Start shard: %v at checkpoint: %v", shard.ID, shard.Checkpoint)
shardIterArgs := &kinesis.GetShardIteratorInput{
ShardId: &shard.ID,
ShardIteratorType: aws.String("AFTER_SEQUENCE_NUMBER"),
StartingSequenceNumber: &shard.Checkpoint,
StreamName: &sc.streamName,
}
iterResp, err := sc.kc.GetShardIterator(shardIterArgs)
if err != nil {
return nil, err
}
return iterResp.ShardIterator, nil
}
// Cleanup the internal lease cache
// Mutates shard { SetLeaseOwner }
func (sc *ShardConsumer) releaseLease(ctx context.Context) {
log := sc.cfg.Log
shard := sc.shard
log.Infof("Release lease for shard %s", shard.ID)
shard.SetLeaseOwner("")
// Release the lease by wiping out the lease owner for the shard
// Note: we don't need to do anything in case of error here and shard lease will eventuall be expired.
if err := sc.checkpointer.RemoveLeaseOwner(ctx, shard.ID); err != nil {
log.Errorf("Failed to release shard lease or shard: %s Error: %+v", shard.ID, err)
}
// reporting lease lose metrics
sc.metrics.LeaseLost(shard.ID)
}
| run | identifier_name |
shard_consumer.go | /*
* Copyright (c) 2018 VMware, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
* associated documentation files (the "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is furnished to do
* so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
* NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
// The implementation is derived from https://github.com/patrobinson/gokini
//
// Copyright 2018 Patrick robinson
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
package kcl
import (
"context"
"math"
"sync"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/kinesis"
ks "github.com/aws/aws-sdk-go/service/kinesis"
"github.com/aws/aws-sdk-go/service/kinesis/kinesisiface"
)
const (
// This is the initial state of a shard consumer. This causes the consumer to remain blocked until the all
// parent shards have been completed.
//WAITING_ON_PARENT_SHARDS ShardConsumerState = iota + 1
// This state is responsible for initializing the record processor with the shard information.
INITIALIZING = iota + 2
//
PROCESSING
SHUTDOWN_REQUESTED
SHUTTING_DOWN
SHUTDOWN_COMPLETE
// ErrCodeKMSThrottlingException is defined in the API Reference https://docs.aws.amazon.com/sdk-for-go/api/service/kinesis/#Kinesis.GetRecords
// But it's not a constant?
ErrCodeKMSThrottlingException = "KMSThrottlingException"
/**
* Indicates that the entire application is being shutdown, and if desired the record processor will be given a
* final chance to checkpoint. This state will not trigger a direct call to
* {@link com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor#shutdown(ShutdownInput)}, but
* instead depend on a different interface for backward compatibility.
*/
REQUESTED ShutdownReason = iota + 1
/**
* Terminate processing for this RecordProcessor (resharding use case).
* Indicates that the shard is closed and all records from the shard have been delivered to the application.
* Applications SHOULD checkpoint their progress to indicate that they have successfully processed all records
* from this shard and processing of child shards can be started.
*/
TERMINATE
/**
* Processing will be moved to a different record processor (fail over, load balancing use cases).
* Applications SHOULD NOT checkpoint their progress (as another record processor may have already started
* processing data).
*/
ZOMBIE
)
// Containers for the parameters to the IRecordProcessor
/**
* Reason the RecordProcessor is being shutdown.
* Used to distinguish between a fail-over vs. a termination (shard is closed and all records have been delivered).
* In case of a fail over, applications should NOT checkpoint as part of shutdown,
* since another record processor may have already started processing records for that shard.
* In case of termination (resharding use case), applications SHOULD checkpoint their progress to indicate
* that they have successfully processed all the records (processing of child shards can then begin).
*/
type ShutdownReason int
type InitializationInput struct {
ShardID string
ExtendedSequenceNumber *ExtendedSequenceNumber
PendingCheckpointSequenceNumber *ExtendedSequenceNumber
}
type ProcessRecordsInput struct {
CacheEntryTime *time.Time
CacheExitTime *time.Time
Records []*ks.Record
Checkpointer *RecordProcessorCheckpointer
MillisBehindLatest int64
}
type ShutdownInput struct {
ShutdownReason ShutdownReason
Checkpointer *RecordProcessorCheckpointer
}
var shutdownReasonMap = map[ShutdownReason]*string{
REQUESTED: aws.String("REQUESTED"),
TERMINATE: aws.String("TERMINATE"),
ZOMBIE: aws.String("ZOMBIE"),
}
func ShutdownReasonMessage(reason ShutdownReason) *string {
return shutdownReasonMap[reason]
}
// RecordProcessor is the interface for some callback functions invoked by KCL will
// The main task of using KCL is to provide implementation on RecordProcessor interface.
// Note: This is exactly the same interface as Amazon KCL RecordProcessor v2
type RecordProcessor interface {
/**
* Invoked by the Amazon Kinesis Client Library before data records are delivered to the RecordProcessor instance
* (via processRecords).
*
* @param initializationInput Provides information related to initialization
*/
Initialize(ctx context.Context, initializationInput *InitializationInput)
/**
* Process data records. The Amazon Kinesis Client Library will invoke this method to deliver data records to the
* application.
* Upon fail over, the new instance will get records with sequence number > checkpoint position
* for each partition key.
*
* @param processRecordsInput Provides the records to be processed as well as information and capabilities related
* to them (eg checkpointing).
*/
ProcessRecords(ctx context.Context, processRecordsInput *ProcessRecordsInput)
/**
* Invoked by the Amazon Kinesis Client Library to indicate it will no longer send data records to this
* RecordProcessor instance.
*
* <h2><b>Warning</b></h2>
*
* When the value of {@link ShutdownInput#getShutdownReason()} is
* {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.ShutdownReason#TERMINATE} it is required that you
* checkpoint. Failure to do so will result in an IllegalArgumentException, and the KCL no longer making progress.
*
* @param shutdownInput
* Provides information and capabilities (eg checkpointing) related to shutdown of this record processor.
*/
Shutdown(ctx context.Context, shutdownInput *ShutdownInput)
}
// ExtendedSequenceNumber represents a two-part sequence number for records aggregated by the Kinesis Producer Library.
//
// The KPL combines multiple user records into a single Kinesis record. Each user record therefore has an integer
// sub-sequence number, in addition to the regular sequence number of the Kinesis record. The sub-sequence number
// is used to checkpoint within an aggregated record.
type ExtendedSequenceNumber struct {
SequenceNumber *string
SubSequenceNumber int64
}
// ShardStatus represents a shard consumer's progress in a shard.
// NB: this type is passed around and mutated.
type ShardStatus struct {
ID string
ParentShardId string
// Checkpoint is the last checkpoint.
Checkpoint string
AssignedTo string
Mux *sync.Mutex
LeaseTimeout time.Time
// Shard Range
StartingSequenceNumber string
// child shard doesn't have end sequence number
EndingSequenceNumber string
}
func (ss *ShardStatus) GetLeaseOwner() string {
ss.Mux.Lock()
defer ss.Mux.Unlock()
return ss.AssignedTo
}
func (ss *ShardStatus) SetLeaseOwner(owner string) {
ss.Mux.Lock()
defer ss.Mux.Unlock()
ss.AssignedTo = owner
}
//type ShardConsumerState int
// ShardConsumer is responsible for consuming data records of a (specified) shard.
type ShardConsumer struct {
streamName string
shard *ShardStatus
kc kinesisiface.KinesisAPI
checkpointer Checkpointer
recordProcessor RecordProcessor
cfg *ConsumerConfig
stop *chan struct{}
consumerID string
metrics MonitoringService
//state ShardConsumerState
}
// run continously poll the shard for records, until the lease ends.
// entry point for consumer.
// Precondition: it currently has the lease on the shard.
func (sc *ShardConsumer) run(ctx context.Context) error {
defer sc.releaseLease(ctx)
log := sc.cfg.Log
shard := sc.shard
// If the shard is child shard, need to wait until the parent finished.
if err := sc.waitOnParentShard(ctx); err != nil {
// If parent shard has been deleted by Kinesis system already, just ignore the error.
if err != ErrSequenceIDNotFound {
log.Errorf("Error in waiting for parent shard: %v to finish. Error: %+v", shard.ParentShardId, err)
return err
}
}
shardIterator, err := sc.getShardIterator(ctx)
if err != nil {
log.Errorf("Unable to get shard iterator for %s: %v", shard.ID, err)
return err
}
// Start processing events and notify record processor on shard and starting checkpoint
sc.recordProcessor.Initialize(ctx, &InitializationInput{
ShardID: shard.ID,
ExtendedSequenceNumber: &ExtendedSequenceNumber{SequenceNumber: aws.String(shard.Checkpoint)},
})
recordCheckpointer := NewRecordProcessorCheckpointer(shard, sc.checkpointer)
retriedErrors := 0
// TODO: add timeout
// each iter: { call GetRecords, call RecordProcessor, check lease status and control accordingly }
for {
if time.Now().UTC().After(shard.LeaseTimeout.Add(-time.Duration(sc.cfg.LeaseRefreshPeriodMillis) * time.Millisecond)) {
log.Debugf("Refreshing lease on shard: %s for worker: %s", shard.ID, sc.consumerID)
err = sc.checkpointer.GetLease(ctx, shard, sc.consumerID)
if err != nil {
if err.Error() == ErrLeaseNotAquired {
log.Warnf("Failed in acquiring lease on shard: %s for worker: %s", shard.ID, sc.consumerID)
return nil
}
// log and return error
log.Errorf("Error in refreshing lease on shard: %s for worker: %s. Error: %+v",
shard.ID, sc.consumerID, err)
return err
}
}
getRecordsStartTime := time.Now()
log.Debugf("Trying to read %d record from iterator: %v", sc.cfg.MaxRecords, aws.StringValue(shardIterator))
getRecordsArgs := &kinesis.GetRecordsInput{
Limit: aws.Int64(int64(sc.cfg.MaxRecords)),
ShardIterator: shardIterator,
}
// Get records from stream and retry as needed
getResp, err := sc.kc.GetRecordsWithContext(ctx, getRecordsArgs)
if err != nil {
if awsErr, ok := err.(awserr.Error); ok {
if awsErr.Code() == kinesis.ErrCodeProvisionedThroughputExceededException || awsErr.Code() == ErrCodeKMSThrottlingException {
log.Errorf("Error getting records from shard %v: %+v", shard.ID, err)
retriedErrors++
// exponential backoff
// https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff
time.Sleep(time.Duration(math.Exp2(float64(retriedErrors))*100) * time.Millisecond)
continue
}
}
log.Errorf("Error getting records from Kinesis that cannot be retried: %+v Request: %s", err, getRecordsArgs)
return err
}
// Convert from nanoseconds to milliseconds
getRecordsTimeMS := time.Since(getRecordsStartTime) / 1000000
sc.metrics.RecordGetRecordsTime(shard.ID, float64(getRecordsTimeMS))
// reset the retry count after success
retriedErrors = 0
// IRecordProcessorCheckpointer
input := &ProcessRecordsInput{
Records: getResp.Records,
MillisBehindLatest: aws.Int64Value(getResp.MillisBehindLatest),
Checkpointer: recordCheckpointer,
}
recordLength := len(input.Records)
recordBytes := int64(0)
log.Debugf("Received %d records, MillisBehindLatest: %v", recordLength, input.MillisBehindLatest)
for _, r := range getResp.Records {
recordBytes += int64(len(r.Data))
}
if recordLength > 0 || sc.cfg.CallProcessRecordsEvenForEmptyRecordList {
processRecordsStartTime := time.Now()
// Delivery the events to the record processor
sc.recordProcessor.ProcessRecords(ctx, input)
// Convert from nanoseconds to milliseconds
processedRecordsTiming := time.Since(processRecordsStartTime) / 1000000
sc.metrics.RecordProcessRecordsTime(shard.ID, float64(processedRecordsTiming))
}
sc.metrics.IncrRecordsProcessed(shard.ID, recordLength)
sc.metrics.IncrBytesProcessed(shard.ID, recordBytes)
sc.metrics.MillisBehindLatest(shard.ID, float64(*getResp.MillisBehindLatest))
// Idle between each read, the user is responsible for checkpoint the progress
// This value is only used when no records are returned; if records are returned, it should immediately
// retrieve the next set of records.
if recordLength == 0 && aws.Int64Value(getResp.MillisBehindLatest) < int64(sc.cfg.IdleTimeBetweenReadsInMillis) {
time.Sleep(time.Duration(sc.cfg.IdleTimeBetweenReadsInMillis) * time.Millisecond)
}
// The shard has been closed, so no new records can be read from it
if getResp.NextShardIterator == nil {
log.Infof("Shard %s closed", shard.ID)
shutdownInput := &ShutdownInput{ShutdownReason: TERMINATE, Checkpointer: recordCheckpointer}
sc.recordProcessor.Shutdown(ctx, shutdownInput)
return nil
}
shardIterator = getResp.NextShardIterator
select {
case <-*sc.stop:
shutdownInput := &ShutdownInput{ShutdownReason: REQUESTED, Checkpointer: recordCheckpointer}
sc.recordProcessor.Shutdown(ctx, shutdownInput)
return nil
default:
}
}
}
// Need to wait until the parent shard finished
func (sc *ShardConsumer) waitOnParentShard(ctx context.Context) error {
shard := sc.shard
if len(shard.ParentShardId) == 0 {
return nil
}
pshard := &ShardStatus{
ID: shard.ParentShardId,
Mux: &sync.Mutex{},
}
for {
if err := sc.checkpointer.FetchCheckpoint(ctx, pshard); err != nil {
return err
}
// Parent shard is finished.
if pshard.Checkpoint == SHARD_END {
return nil
}
time.Sleep(time.Duration(sc.cfg.ParentShardPollIntervalMillis) * time.Millisecond)
}
}
func (sc *ShardConsumer) getShardIterator(ctx context.Context) (*string, error) {
shard := sc.shard
log := sc.cfg.Log
// Get checkpoint of the shard from dynamoDB
err := sc.checkpointer.FetchCheckpoint(ctx, shard)
if err != nil && err != ErrSequenceIDNotFound {
return nil, err
}
// If there isn't any checkpoint for the shard, use the configuration value.
// TODO: configurable
if shard.Checkpoint == "" {
initPos := sc.cfg.InitialPositionInStream
shardIteratorType := InitalPositionInStreamToShardIteratorType(initPos)
log.Debugf("No checkpoint recorded for shard: %v, starting with: %v", shard.ID,
aws.StringValue(shardIteratorType))
var shardIterArgs *kinesis.GetShardIteratorInput
if initPos == AT_TIMESTAMP {
shardIterArgs = &kinesis.GetShardIteratorInput{
ShardId: &shard.ID,
ShardIteratorType: shardIteratorType,
Timestamp: sc.cfg.InitialPositionInStreamExtended.Timestamp,
StreamName: &sc.streamName,
}
} else {
shardIterArgs = &kinesis.GetShardIteratorInput{
ShardId: &shard.ID,
ShardIteratorType: shardIteratorType,
StreamName: &sc.streamName,
}
}
iterResp, err := sc.kc.GetShardIterator(shardIterArgs)
if err != nil |
return iterResp.ShardIterator, nil
}
log.Debugf("Start shard: %v at checkpoint: %v", shard.ID, shard.Checkpoint)
shardIterArgs := &kinesis.GetShardIteratorInput{
ShardId: &shard.ID,
ShardIteratorType: aws.String("AFTER_SEQUENCE_NUMBER"),
StartingSequenceNumber: &shard.Checkpoint,
StreamName: &sc.streamName,
}
iterResp, err := sc.kc.GetShardIterator(shardIterArgs)
if err != nil {
return nil, err
}
return iterResp.ShardIterator, nil
}
// Cleanup the internal lease cache
// Mutates shard { SetLeaseOwner }
func (sc *ShardConsumer) releaseLease(ctx context.Context) {
log := sc.cfg.Log
shard := sc.shard
log.Infof("Release lease for shard %s", shard.ID)
shard.SetLeaseOwner("")
// Release the lease by wiping out the lease owner for the shard
// Note: we don't need to do anything in case of error here and shard lease will eventuall be expired.
if err := sc.checkpointer.RemoveLeaseOwner(ctx, shard.ID); err != nil {
log.Errorf("Failed to release shard lease or shard: %s Error: %+v", shard.ID, err)
}
// reporting lease lose metrics
sc.metrics.LeaseLost(shard.ID)
}
| {
return nil, err
} | conditional_block |
shard_consumer.go | /*
* Copyright (c) 2018 VMware, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
* associated documentation files (the "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is furnished to do
* so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
* NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
// The implementation is derived from https://github.com/patrobinson/gokini
//
// Copyright 2018 Patrick robinson
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
package kcl
import (
"context"
"math"
"sync"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/kinesis"
ks "github.com/aws/aws-sdk-go/service/kinesis"
"github.com/aws/aws-sdk-go/service/kinesis/kinesisiface"
)
const (
// This is the initial state of a shard consumer. This causes the consumer to remain blocked until the all
// parent shards have been completed.
//WAITING_ON_PARENT_SHARDS ShardConsumerState = iota + 1
// This state is responsible for initializing the record processor with the shard information.
INITIALIZING = iota + 2
//
PROCESSING
SHUTDOWN_REQUESTED
SHUTTING_DOWN
SHUTDOWN_COMPLETE
// ErrCodeKMSThrottlingException is defined in the API Reference https://docs.aws.amazon.com/sdk-for-go/api/service/kinesis/#Kinesis.GetRecords
// But it's not a constant?
ErrCodeKMSThrottlingException = "KMSThrottlingException"
/**
* Indicates that the entire application is being shutdown, and if desired the record processor will be given a
* final chance to checkpoint. This state will not trigger a direct call to
* {@link com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor#shutdown(ShutdownInput)}, but
* instead depend on a different interface for backward compatibility.
*/
REQUESTED ShutdownReason = iota + 1
/**
* Terminate processing for this RecordProcessor (resharding use case).
* Indicates that the shard is closed and all records from the shard have been delivered to the application.
* Applications SHOULD checkpoint their progress to indicate that they have successfully processed all records
* from this shard and processing of child shards can be started.
*/
TERMINATE
/**
* Processing will be moved to a different record processor (fail over, load balancing use cases).
* Applications SHOULD NOT checkpoint their progress (as another record processor may have already started
* processing data).
*/
ZOMBIE
)
// Containers for the parameters to the IRecordProcessor
/**
* Reason the RecordProcessor is being shutdown.
* Used to distinguish between a fail-over vs. a termination (shard is closed and all records have been delivered).
* In case of a fail over, applications should NOT checkpoint as part of shutdown,
* since another record processor may have already started processing records for that shard.
* In case of termination (resharding use case), applications SHOULD checkpoint their progress to indicate
* that they have successfully processed all the records (processing of child shards can then begin).
*/
type ShutdownReason int
type InitializationInput struct {
ShardID string
ExtendedSequenceNumber *ExtendedSequenceNumber
PendingCheckpointSequenceNumber *ExtendedSequenceNumber
}
type ProcessRecordsInput struct {
CacheEntryTime *time.Time
CacheExitTime *time.Time
Records []*ks.Record
Checkpointer *RecordProcessorCheckpointer
MillisBehindLatest int64
}
type ShutdownInput struct {
ShutdownReason ShutdownReason
Checkpointer *RecordProcessorCheckpointer
}
var shutdownReasonMap = map[ShutdownReason]*string{
REQUESTED: aws.String("REQUESTED"),
TERMINATE: aws.String("TERMINATE"),
ZOMBIE: aws.String("ZOMBIE"),
}
func ShutdownReasonMessage(reason ShutdownReason) *string {
return shutdownReasonMap[reason]
}
// RecordProcessor is the interface for some callback functions invoked by KCL will
// The main task of using KCL is to provide implementation on RecordProcessor interface.
// Note: This is exactly the same interface as Amazon KCL RecordProcessor v2
type RecordProcessor interface {
/**
* Invoked by the Amazon Kinesis Client Library before data records are delivered to the RecordProcessor instance
* (via processRecords).
*
* @param initializationInput Provides information related to initialization
*/
Initialize(ctx context.Context, initializationInput *InitializationInput)
/**
* Process data records. The Amazon Kinesis Client Library will invoke this method to deliver data records to the
* application.
* Upon fail over, the new instance will get records with sequence number > checkpoint position
* for each partition key.
*
* @param processRecordsInput Provides the records to be processed as well as information and capabilities related
* to them (eg checkpointing).
*/
ProcessRecords(ctx context.Context, processRecordsInput *ProcessRecordsInput)
/**
* Invoked by the Amazon Kinesis Client Library to indicate it will no longer send data records to this
* RecordProcessor instance.
*
* <h2><b>Warning</b></h2>
*
* When the value of {@link ShutdownInput#getShutdownReason()} is
* {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.ShutdownReason#TERMINATE} it is required that you
* checkpoint. Failure to do so will result in an IllegalArgumentException, and the KCL no longer making progress.
*
* @param shutdownInput
* Provides information and capabilities (eg checkpointing) related to shutdown of this record processor.
*/
Shutdown(ctx context.Context, shutdownInput *ShutdownInput)
}
// ExtendedSequenceNumber represents a two-part sequence number for records aggregated by the Kinesis Producer Library.
//
// The KPL combines multiple user records into a single Kinesis record. Each user record therefore has an integer
// sub-sequence number, in addition to the regular sequence number of the Kinesis record. The sub-sequence number
// is used to checkpoint within an aggregated record.
type ExtendedSequenceNumber struct {
SequenceNumber *string
SubSequenceNumber int64
}
// ShardStatus represents a shard consumer's progress in a shard.
// NB: this type is passed around and mutated.
type ShardStatus struct {
ID string
ParentShardId string
// Checkpoint is the last checkpoint.
Checkpoint string
AssignedTo string
Mux *sync.Mutex
LeaseTimeout time.Time
// Shard Range
StartingSequenceNumber string
// child shard doesn't have end sequence number
EndingSequenceNumber string
}
func (ss *ShardStatus) GetLeaseOwner() string {
ss.Mux.Lock()
defer ss.Mux.Unlock()
return ss.AssignedTo
}
func (ss *ShardStatus) SetLeaseOwner(owner string) {
ss.Mux.Lock()
defer ss.Mux.Unlock()
ss.AssignedTo = owner
}
//type ShardConsumerState int
// ShardConsumer is responsible for consuming data records of a (specified) shard.
type ShardConsumer struct {
streamName string
shard *ShardStatus
kc kinesisiface.KinesisAPI
checkpointer Checkpointer
recordProcessor RecordProcessor
cfg *ConsumerConfig
stop *chan struct{}
consumerID string
metrics MonitoringService
//state ShardConsumerState
}
// run continously poll the shard for records, until the lease ends.
// entry point for consumer.
// Precondition: it currently has the lease on the shard.
func (sc *ShardConsumer) run(ctx context.Context) error |
// Need to wait until the parent shard finished
func (sc *ShardConsumer) waitOnParentShard(ctx context.Context) error {
shard := sc.shard
if len(shard.ParentShardId) == 0 {
return nil
}
pshard := &ShardStatus{
ID: shard.ParentShardId,
Mux: &sync.Mutex{},
}
for {
if err := sc.checkpointer.FetchCheckpoint(ctx, pshard); err != nil {
return err
}
// Parent shard is finished.
if pshard.Checkpoint == SHARD_END {
return nil
}
time.Sleep(time.Duration(sc.cfg.ParentShardPollIntervalMillis) * time.Millisecond)
}
}
func (sc *ShardConsumer) getShardIterator(ctx context.Context) (*string, error) {
shard := sc.shard
log := sc.cfg.Log
// Get checkpoint of the shard from dynamoDB
err := sc.checkpointer.FetchCheckpoint(ctx, shard)
if err != nil && err != ErrSequenceIDNotFound {
return nil, err
}
// If there isn't any checkpoint for the shard, use the configuration value.
// TODO: configurable
if shard.Checkpoint == "" {
initPos := sc.cfg.InitialPositionInStream
shardIteratorType := InitalPositionInStreamToShardIteratorType(initPos)
log.Debugf("No checkpoint recorded for shard: %v, starting with: %v", shard.ID,
aws.StringValue(shardIteratorType))
var shardIterArgs *kinesis.GetShardIteratorInput
if initPos == AT_TIMESTAMP {
shardIterArgs = &kinesis.GetShardIteratorInput{
ShardId: &shard.ID,
ShardIteratorType: shardIteratorType,
Timestamp: sc.cfg.InitialPositionInStreamExtended.Timestamp,
StreamName: &sc.streamName,
}
} else {
shardIterArgs = &kinesis.GetShardIteratorInput{
ShardId: &shard.ID,
ShardIteratorType: shardIteratorType,
StreamName: &sc.streamName,
}
}
iterResp, err := sc.kc.GetShardIterator(shardIterArgs)
if err != nil {
return nil, err
}
return iterResp.ShardIterator, nil
}
log.Debugf("Start shard: %v at checkpoint: %v", shard.ID, shard.Checkpoint)
shardIterArgs := &kinesis.GetShardIteratorInput{
ShardId: &shard.ID,
ShardIteratorType: aws.String("AFTER_SEQUENCE_NUMBER"),
StartingSequenceNumber: &shard.Checkpoint,
StreamName: &sc.streamName,
}
iterResp, err := sc.kc.GetShardIterator(shardIterArgs)
if err != nil {
return nil, err
}
return iterResp.ShardIterator, nil
}
// Cleanup the internal lease cache
// Mutates shard { SetLeaseOwner }
func (sc *ShardConsumer) releaseLease(ctx context.Context) {
log := sc.cfg.Log
shard := sc.shard
log.Infof("Release lease for shard %s", shard.ID)
shard.SetLeaseOwner("")
// Release the lease by wiping out the lease owner for the shard
// Note: we don't need to do anything in case of error here and shard lease will eventuall be expired.
if err := sc.checkpointer.RemoveLeaseOwner(ctx, shard.ID); err != nil {
log.Errorf("Failed to release shard lease or shard: %s Error: %+v", shard.ID, err)
}
// reporting lease lose metrics
sc.metrics.LeaseLost(shard.ID)
}
| {
defer sc.releaseLease(ctx)
log := sc.cfg.Log
shard := sc.shard
// If the shard is child shard, need to wait until the parent finished.
if err := sc.waitOnParentShard(ctx); err != nil {
// If parent shard has been deleted by Kinesis system already, just ignore the error.
if err != ErrSequenceIDNotFound {
log.Errorf("Error in waiting for parent shard: %v to finish. Error: %+v", shard.ParentShardId, err)
return err
}
}
shardIterator, err := sc.getShardIterator(ctx)
if err != nil {
log.Errorf("Unable to get shard iterator for %s: %v", shard.ID, err)
return err
}
// Start processing events and notify record processor on shard and starting checkpoint
sc.recordProcessor.Initialize(ctx, &InitializationInput{
ShardID: shard.ID,
ExtendedSequenceNumber: &ExtendedSequenceNumber{SequenceNumber: aws.String(shard.Checkpoint)},
})
recordCheckpointer := NewRecordProcessorCheckpointer(shard, sc.checkpointer)
retriedErrors := 0
// TODO: add timeout
// each iter: { call GetRecords, call RecordProcessor, check lease status and control accordingly }
for {
if time.Now().UTC().After(shard.LeaseTimeout.Add(-time.Duration(sc.cfg.LeaseRefreshPeriodMillis) * time.Millisecond)) {
log.Debugf("Refreshing lease on shard: %s for worker: %s", shard.ID, sc.consumerID)
err = sc.checkpointer.GetLease(ctx, shard, sc.consumerID)
if err != nil {
if err.Error() == ErrLeaseNotAquired {
log.Warnf("Failed in acquiring lease on shard: %s for worker: %s", shard.ID, sc.consumerID)
return nil
}
// log and return error
log.Errorf("Error in refreshing lease on shard: %s for worker: %s. Error: %+v",
shard.ID, sc.consumerID, err)
return err
}
}
getRecordsStartTime := time.Now()
log.Debugf("Trying to read %d record from iterator: %v", sc.cfg.MaxRecords, aws.StringValue(shardIterator))
getRecordsArgs := &kinesis.GetRecordsInput{
Limit: aws.Int64(int64(sc.cfg.MaxRecords)),
ShardIterator: shardIterator,
}
// Get records from stream and retry as needed
getResp, err := sc.kc.GetRecordsWithContext(ctx, getRecordsArgs)
if err != nil {
if awsErr, ok := err.(awserr.Error); ok {
if awsErr.Code() == kinesis.ErrCodeProvisionedThroughputExceededException || awsErr.Code() == ErrCodeKMSThrottlingException {
log.Errorf("Error getting records from shard %v: %+v", shard.ID, err)
retriedErrors++
// exponential backoff
// https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff
time.Sleep(time.Duration(math.Exp2(float64(retriedErrors))*100) * time.Millisecond)
continue
}
}
log.Errorf("Error getting records from Kinesis that cannot be retried: %+v Request: %s", err, getRecordsArgs)
return err
}
// Convert from nanoseconds to milliseconds
getRecordsTimeMS := time.Since(getRecordsStartTime) / 1000000
sc.metrics.RecordGetRecordsTime(shard.ID, float64(getRecordsTimeMS))
// reset the retry count after success
retriedErrors = 0
// IRecordProcessorCheckpointer
input := &ProcessRecordsInput{
Records: getResp.Records,
MillisBehindLatest: aws.Int64Value(getResp.MillisBehindLatest),
Checkpointer: recordCheckpointer,
}
recordLength := len(input.Records)
recordBytes := int64(0)
log.Debugf("Received %d records, MillisBehindLatest: %v", recordLength, input.MillisBehindLatest)
for _, r := range getResp.Records {
recordBytes += int64(len(r.Data))
}
if recordLength > 0 || sc.cfg.CallProcessRecordsEvenForEmptyRecordList {
processRecordsStartTime := time.Now()
// Delivery the events to the record processor
sc.recordProcessor.ProcessRecords(ctx, input)
// Convert from nanoseconds to milliseconds
processedRecordsTiming := time.Since(processRecordsStartTime) / 1000000
sc.metrics.RecordProcessRecordsTime(shard.ID, float64(processedRecordsTiming))
}
sc.metrics.IncrRecordsProcessed(shard.ID, recordLength)
sc.metrics.IncrBytesProcessed(shard.ID, recordBytes)
sc.metrics.MillisBehindLatest(shard.ID, float64(*getResp.MillisBehindLatest))
// Idle between each read, the user is responsible for checkpoint the progress
// This value is only used when no records are returned; if records are returned, it should immediately
// retrieve the next set of records.
if recordLength == 0 && aws.Int64Value(getResp.MillisBehindLatest) < int64(sc.cfg.IdleTimeBetweenReadsInMillis) {
time.Sleep(time.Duration(sc.cfg.IdleTimeBetweenReadsInMillis) * time.Millisecond)
}
// The shard has been closed, so no new records can be read from it
if getResp.NextShardIterator == nil {
log.Infof("Shard %s closed", shard.ID)
shutdownInput := &ShutdownInput{ShutdownReason: TERMINATE, Checkpointer: recordCheckpointer}
sc.recordProcessor.Shutdown(ctx, shutdownInput)
return nil
}
shardIterator = getResp.NextShardIterator
select {
case <-*sc.stop:
shutdownInput := &ShutdownInput{ShutdownReason: REQUESTED, Checkpointer: recordCheckpointer}
sc.recordProcessor.Shutdown(ctx, shutdownInput)
return nil
default:
}
}
} | identifier_body |
cursor.rs | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License..
use crate::io::prelude::*;
use crate::alloc::Allocator;
use crate::cmp;
use crate::io::{self, BorrowedCursor, ErrorKind, IoSlice, IoSliceMut, SeekFrom};
/// A `Cursor` wraps an in-memory buffer and provides it with a
/// [`Seek`] implementation.
///
/// `Cursor`s are used with in-memory buffers, anything implementing
/// <code>[AsRef]<\[u8]></code>, to allow them to implement [`Read`] and/or [`Write`],
/// allowing these buffers to be used anywhere you might use a reader or writer
/// that does actual I/O.
///
/// The standard library implements some I/O traits on various types which
/// are commonly used as a buffer, like <code>Cursor<[Vec]\<u8>></code> and
/// <code>Cursor<[&\[u8\]][bytes]></code>.
///
/// # Examples
///
/// We may want to write bytes to a [`File`] in our production
/// code, but use an in-memory buffer in our tests. We can do this with
/// `Cursor`:
///
/// [bytes]: crate::slice "slice"
/// [`File`]: crate::fs::File
///
/// ```no_run
/// use std::io::prelude::*;
/// use std::io::{self, SeekFrom};
/// use std::fs::File;
///
/// // a library function we've written
/// fn write_ten_bytes_at_end<W: Write + Seek>(writer: &mut W) -> io::Result<()> {
/// writer.seek(SeekFrom::End(-10))?;
///
/// for i in 0..10 {
/// writer.write(&[i])?;
/// }
///
/// // all went well
/// Ok(())
/// }
///
/// # fn foo() -> io::Result<()> {
/// // Here's some code that uses this library function.
/// //
/// // We might want to use a BufReader here for efficiency, but let's
/// // keep this example focused.
/// let mut file = File::create("foo.txt")?;
///
/// write_ten_bytes_at_end(&mut file)?;
/// # Ok(())
/// # }
///
/// // now let's write a test
/// #[test]
/// fn test_writes_bytes() {
/// // setting up a real File is much slower than an in-memory buffer,
/// // let's use a cursor instead
/// use std::io::Cursor;
/// let mut buff = Cursor::new(vec![0; 15]);
///
/// write_ten_bytes_at_end(&mut buff).unwrap();
///
/// assert_eq!(&buff.get_ref()[5..15], &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
/// }
/// ```
#[derive(Debug, Default, Eq, PartialEq)]
pub struct Cursor<T> {
inner: T,
pos: u64,
}
impl<T> Cursor<T> {
/// Creates a new cursor wrapping the provided underlying in-memory buffer.
///
/// Cursor initial position is `0` even if underlying buffer (e.g., [`Vec`])
/// is not empty. So writing to cursor starts with overwriting [`Vec`]
/// content, not with appending to it.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
///
/// let buff = Cursor::new(Vec::new());
/// # fn force_inference(_: &Cursor<Vec<u8>>) {}
/// # force_inference(&buff);
/// ```
pub const fn new(inner: T) -> Cursor<T> {
Cursor { pos: 0, inner }
}
/// Consumes this cursor, returning the underlying value.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
///
/// let buff = Cursor::new(Vec::new());
/// # fn force_inference(_: &Cursor<Vec<u8>>) {}
/// # force_inference(&buff);
///
/// let vec = buff.into_inner();
/// ```
pub fn into_inner(self) -> T {
self.inner
}
/// Gets a reference to the underlying value in this cursor.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
///
/// let buff = Cursor::new(Vec::new());
/// # fn force_inference(_: &Cursor<Vec<u8>>) {}
/// # force_inference(&buff);
///
/// let reference = buff.get_ref();
/// ```
pub const fn get_ref(&self) -> &T {
&self.inner
}
/// Gets a mutable reference to the underlying value in this cursor.
///
/// Care should be taken to avoid modifying the internal I/O state of the
/// underlying value as it may corrupt this cursor's position.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
///
/// let mut buff = Cursor::new(Vec::new());
/// # fn force_inference(_: &Cursor<Vec<u8>>) {}
/// # force_inference(&buff);
///
/// let reference = buff.get_mut();
/// ```
pub fn get_mut(&mut self) -> &mut T {
&mut self.inner
}
/// Returns the current position of this cursor.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
/// use std::io::prelude::*;
/// use std::io::SeekFrom;
///
/// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]);
///
/// assert_eq!(buff.position(), 0);
///
/// buff.seek(SeekFrom::Current(2)).unwrap();
/// assert_eq!(buff.position(), 2);
///
/// buff.seek(SeekFrom::Current(-1)).unwrap();
/// assert_eq!(buff.position(), 1);
/// ```
pub const fn position(&self) -> u64 {
self.pos
}
/// Sets the position of this cursor.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
///
/// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]);
///
/// assert_eq!(buff.position(), 0);
///
/// buff.set_position(2);
/// assert_eq!(buff.position(), 2);
///
/// buff.set_position(4);
/// assert_eq!(buff.position(), 4);
/// ```
pub fn set_position(&mut self, pos: u64) {
self.pos = pos;
}
}
impl<T> Cursor<T>
where
T: AsRef<[u8]>,
{
/// Returns the remaining slice.
///
/// # Examples
///
/// ```
/// #![feature(cursor_remaining)]
/// use std::io::Cursor;
///
/// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]);
///
/// assert_eq!(buff.remaining_slice(), &[1, 2, 3, 4, 5]);
///
/// buff.set_position(2);
/// assert_eq!(buff.remaining_slice(), &[3, 4, 5]);
///
/// buff.set_position(4);
/// assert_eq!(buff.remaining_slice(), &[5]);
///
/// buff.set_position(6);
/// assert_eq!(buff.remaining_slice(), &[]);
/// ```
pub fn remaining_slice(&self) -> &[u8] {
let len = self.pos.min(self.inner.as_ref().len() as u64);
&self.inner.as_ref()[(len as usize)..]
}
/// Returns `true` if the remaining slice is empty.
///
/// # Examples
///
/// ```
/// #![feature(cursor_remaining)]
/// use std::io::Cursor;
///
/// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]);
///
/// buff.set_position(2);
/// assert!(!buff.is_empty());
///
/// buff.set_position(5);
/// assert!(buff.is_empty());
///
/// buff.set_position(10);
/// assert!(buff.is_empty());
/// ```
pub fn is_empty(&self) -> bool {
self.pos >= self.inner.as_ref().len() as u64
}
}
impl<T> Clone for Cursor<T> | T: Clone,
{
#[inline]
fn clone(&self) -> Self {
Cursor { inner: self.inner.clone(), pos: self.pos }
}
#[inline]
fn clone_from(&mut self, other: &Self) {
self.inner.clone_from(&other.inner);
self.pos = other.pos;
}
}
impl<T> io::Seek for Cursor<T>
where
T: AsRef<[u8]>,
{
fn seek(&mut self, style: SeekFrom) -> io::Result<u64> {
let (base_pos, offset) = match style {
SeekFrom::Start(n) => {
self.pos = n;
return Ok(n);
}
SeekFrom::End(n) => (self.inner.as_ref().len() as u64, n),
SeekFrom::Current(n) => (self.pos, n),
};
match base_pos.checked_add_signed(offset) {
Some(n) => {
self.pos = n;
Ok(self.pos)
}
None => Err(io::const_io_error!(
ErrorKind::InvalidInput,
"invalid seek to a negative or overflowing position",
)),
}
}
fn stream_len(&mut self) -> io::Result<u64> {
Ok(self.inner.as_ref().len() as u64)
}
fn stream_position(&mut self) -> io::Result<u64> {
Ok(self.pos)
}
}
impl<T> Read for Cursor<T>
where
T: AsRef<[u8]>,
{
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let n = Read::read(&mut self.remaining_slice(), buf)?;
self.pos += n as u64;
Ok(n)
}
fn read_buf(&mut self, mut cursor: BorrowedCursor<'_>) -> io::Result<()> {
let prev_written = cursor.written();
Read::read_buf(&mut self.fill_buf()?, cursor.reborrow())?;
self.pos += (cursor.written() - prev_written) as u64;
Ok(())
}
fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
let mut nread = 0;
for buf in bufs {
let n = self.read(buf)?;
nread += n;
if n < buf.len() {
break;
}
}
Ok(nread)
}
fn is_read_vectored(&self) -> bool {
true
}
fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> {
let n = buf.len();
Read::read_exact(&mut self.remaining_slice(), buf)?;
self.pos += n as u64;
Ok(())
}
}
impl<T> BufRead for Cursor<T>
where
T: AsRef<[u8]>,
{
fn fill_buf(&mut self) -> io::Result<&[u8]> {
Ok(self.remaining_slice())
}
fn consume(&mut self, amt: usize) {
self.pos += amt as u64;
}
}
// Non-resizing write implementation
#[inline]
fn slice_write(pos_mut: &mut u64, slice: &mut [u8], buf: &[u8]) -> io::Result<usize> {
let pos = cmp::min(*pos_mut, slice.len() as u64);
let amt = (&mut slice[(pos as usize)..]).write(buf)?;
*pos_mut += amt as u64;
Ok(amt)
}
#[inline]
fn slice_write_vectored(
pos_mut: &mut u64,
slice: &mut [u8],
bufs: &[IoSlice<'_>],
) -> io::Result<usize> {
let mut nwritten = 0;
for buf in bufs {
let n = slice_write(pos_mut, slice, buf)?;
nwritten += n;
if n < buf.len() {
break;
}
}
Ok(nwritten)
}
/// Reserves the required space, and pads the vec with 0s if necessary.
fn reserve_and_pad<A: Allocator>(
pos_mut: &mut u64,
vec: &mut Vec<u8, A>,
buf_len: usize,
) -> io::Result<usize> {
let pos: usize = (*pos_mut).try_into().map_err(|_| {
io::const_io_error!(
ErrorKind::InvalidInput,
"cursor position exceeds maximum possible vector length",
)
})?;
// For safety reasons, we don't want these numbers to overflow
// otherwise our allocation won't be enough
let desired_cap = pos.saturating_add(buf_len);
if desired_cap > vec.capacity() {
// We want our vec's total capacity
// to have room for (pos+buf_len) bytes. Reserve allocates
// based on additional elements from the length, so we need to
// reserve the difference
vec.reserve(desired_cap - vec.len());
}
// Pad if pos is above the current len.
if pos > vec.len() {
let diff = pos - vec.len();
// Unfortunately, `resize()` would suffice but the optimiser does not
// realise the `reserve` it does can be eliminated. So we do it manually
// to eliminate that extra branch
let spare = vec.spare_capacity_mut();
debug_assert!(spare.len() >= diff);
// Safety: we have allocated enough capacity for this.
// And we are only writing, not reading
unsafe {
spare.get_unchecked_mut(..diff).fill(core::mem::MaybeUninit::new(0));
vec.set_len(pos);
}
}
Ok(pos)
}
/// Writes the slice to the vec without allocating
/// # Safety: vec must have buf.len() spare capacity
unsafe fn vec_write_unchecked<A>(pos: usize, vec: &mut Vec<u8, A>, buf: &[u8]) -> usize
where
A: Allocator,
{
debug_assert!(vec.capacity() >= pos + buf.len());
vec.as_mut_ptr().add(pos).copy_from(buf.as_ptr(), buf.len());
pos + buf.len()
}
/// Resizing write implementation for [`Cursor`]
///
/// Cursor is allowed to have a pre-allocated and initialised
/// vector body, but with a position of 0. This means the [`Write`]
/// will overwrite the contents of the vec.
///
/// This also allows for the vec body to be empty, but with a position of N.
/// This means that [`Write`] will pad the vec with 0 initially,
/// before writing anything from that point
fn vec_write<A>(pos_mut: &mut u64, vec: &mut Vec<u8, A>, buf: &[u8]) -> io::Result<usize>
where
A: Allocator,
{
let buf_len = buf.len();
let mut pos = reserve_and_pad(pos_mut, vec, buf_len)?;
// Write the buf then progress the vec forward if necessary
// Safety: we have ensured that the capacity is available
// and that all bytes get written up to pos
unsafe {
pos = vec_write_unchecked(pos, vec, buf);
if pos > vec.len() {
vec.set_len(pos);
}
};
// Bump us forward
*pos_mut += buf_len as u64;
Ok(buf_len)
}
/// Resizing write_vectored implementation for [`Cursor`]
///
/// Cursor is allowed to have a pre-allocated and initialised
/// vector body, but with a position of 0. This means the [`Write`]
/// will overwrite the contents of the vec.
///
/// This also allows for the vec body to be empty, but with a position of N.
/// This means that [`Write`] will pad the vec with 0 initially,
/// before writing anything from that point
fn vec_write_vectored<A>(
pos_mut: &mut u64,
vec: &mut Vec<u8, A>,
bufs: &[IoSlice<'_>],
) -> io::Result<usize>
where
A: Allocator,
{
// For safety reasons, we don't want this sum to overflow ever.
// If this saturates, the reserve should panic to avoid any unsound writing.
let buf_len = bufs.iter().fold(0usize, |a, b| a.saturating_add(b.len()));
let mut pos = reserve_and_pad(pos_mut, vec, buf_len)?;
// Write the buf then progress the vec forward if necessary
// Safety: we have ensured that the capacity is available
// and that all bytes get written up to the last pos
unsafe {
for buf in bufs {
pos = vec_write_unchecked(pos, vec, buf);
}
if pos > vec.len() {
vec.set_len(pos);
}
}
// Bump us forward
*pos_mut += buf_len as u64;
Ok(buf_len)
}
impl Write for Cursor<&mut [u8]> {
#[inline]
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
slice_write(&mut self.pos, self.inner, buf)
}
#[inline]
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
slice_write_vectored(&mut self.pos, self.inner, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
true
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl<A> Write for Cursor<&mut Vec<u8, A>>
where
A: Allocator,
{
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
vec_write(&mut self.pos, self.inner, buf)
}
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
vec_write_vectored(&mut self.pos, self.inner, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
true
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl<A> Write for Cursor<Vec<u8, A>>
where
A: Allocator,
{
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
vec_write(&mut self.pos, &mut self.inner, buf)
}
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
vec_write_vectored(&mut self.pos, &mut self.inner, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
true
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl<A> Write for Cursor<Box<[u8], A>>
where
A: Allocator,
{
#[inline]
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
slice_write(&mut self.pos, &mut self.inner, buf)
}
#[inline]
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
slice_write_vectored(&mut self.pos, &mut self.inner, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
true
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl<const N: usize> Write for Cursor<[u8; N]> {
#[inline]
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
slice_write(&mut self.pos, &mut self.inner, buf)
}
#[inline]
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
slice_write_vectored(&mut self.pos, &mut self.inner, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
true
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
} | where | random_line_split |
cursor.rs | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License..
use crate::io::prelude::*;
use crate::alloc::Allocator;
use crate::cmp;
use crate::io::{self, BorrowedCursor, ErrorKind, IoSlice, IoSliceMut, SeekFrom};
/// A `Cursor` wraps an in-memory buffer and provides it with a
/// [`Seek`] implementation.
///
/// `Cursor`s are used with in-memory buffers, anything implementing
/// <code>[AsRef]<\[u8]></code>, to allow them to implement [`Read`] and/or [`Write`],
/// allowing these buffers to be used anywhere you might use a reader or writer
/// that does actual I/O.
///
/// The standard library implements some I/O traits on various types which
/// are commonly used as a buffer, like <code>Cursor<[Vec]\<u8>></code> and
/// <code>Cursor<[&\[u8\]][bytes]></code>.
///
/// # Examples
///
/// We may want to write bytes to a [`File`] in our production
/// code, but use an in-memory buffer in our tests. We can do this with
/// `Cursor`:
///
/// [bytes]: crate::slice "slice"
/// [`File`]: crate::fs::File
///
/// ```no_run
/// use std::io::prelude::*;
/// use std::io::{self, SeekFrom};
/// use std::fs::File;
///
/// // a library function we've written
/// fn write_ten_bytes_at_end<W: Write + Seek>(writer: &mut W) -> io::Result<()> {
/// writer.seek(SeekFrom::End(-10))?;
///
/// for i in 0..10 {
/// writer.write(&[i])?;
/// }
///
/// // all went well
/// Ok(())
/// }
///
/// # fn foo() -> io::Result<()> {
/// // Here's some code that uses this library function.
/// //
/// // We might want to use a BufReader here for efficiency, but let's
/// // keep this example focused.
/// let mut file = File::create("foo.txt")?;
///
/// write_ten_bytes_at_end(&mut file)?;
/// # Ok(())
/// # }
///
/// // now let's write a test
/// #[test]
/// fn test_writes_bytes() {
/// // setting up a real File is much slower than an in-memory buffer,
/// // let's use a cursor instead
/// use std::io::Cursor;
/// let mut buff = Cursor::new(vec![0; 15]);
///
/// write_ten_bytes_at_end(&mut buff).unwrap();
///
/// assert_eq!(&buff.get_ref()[5..15], &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
/// }
/// ```
#[derive(Debug, Default, Eq, PartialEq)]
pub struct Cursor<T> {
inner: T,
pos: u64,
}
impl<T> Cursor<T> {
/// Creates a new cursor wrapping the provided underlying in-memory buffer.
///
/// Cursor initial position is `0` even if underlying buffer (e.g., [`Vec`])
/// is not empty. So writing to cursor starts with overwriting [`Vec`]
/// content, not with appending to it.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
///
/// let buff = Cursor::new(Vec::new());
/// # fn force_inference(_: &Cursor<Vec<u8>>) {}
/// # force_inference(&buff);
/// ```
pub const fn new(inner: T) -> Cursor<T> {
Cursor { pos: 0, inner }
}
/// Consumes this cursor, returning the underlying value.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
///
/// let buff = Cursor::new(Vec::new());
/// # fn force_inference(_: &Cursor<Vec<u8>>) {}
/// # force_inference(&buff);
///
/// let vec = buff.into_inner();
/// ```
pub fn into_inner(self) -> T {
self.inner
}
/// Gets a reference to the underlying value in this cursor.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
///
/// let buff = Cursor::new(Vec::new());
/// # fn force_inference(_: &Cursor<Vec<u8>>) {}
/// # force_inference(&buff);
///
/// let reference = buff.get_ref();
/// ```
pub const fn get_ref(&self) -> &T {
&self.inner
}
/// Gets a mutable reference to the underlying value in this cursor.
///
/// Care should be taken to avoid modifying the internal I/O state of the
/// underlying value as it may corrupt this cursor's position.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
///
/// let mut buff = Cursor::new(Vec::new());
/// # fn force_inference(_: &Cursor<Vec<u8>>) {}
/// # force_inference(&buff);
///
/// let reference = buff.get_mut();
/// ```
pub fn get_mut(&mut self) -> &mut T {
&mut self.inner
}
/// Returns the current position of this cursor.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
/// use std::io::prelude::*;
/// use std::io::SeekFrom;
///
/// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]);
///
/// assert_eq!(buff.position(), 0);
///
/// buff.seek(SeekFrom::Current(2)).unwrap();
/// assert_eq!(buff.position(), 2);
///
/// buff.seek(SeekFrom::Current(-1)).unwrap();
/// assert_eq!(buff.position(), 1);
/// ```
pub const fn position(&self) -> u64 {
self.pos
}
/// Sets the position of this cursor.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
///
/// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]);
///
/// assert_eq!(buff.position(), 0);
///
/// buff.set_position(2);
/// assert_eq!(buff.position(), 2);
///
/// buff.set_position(4);
/// assert_eq!(buff.position(), 4);
/// ```
pub fn set_position(&mut self, pos: u64) {
self.pos = pos;
}
}
impl<T> Cursor<T>
where
T: AsRef<[u8]>,
{
/// Returns the remaining slice.
///
/// # Examples
///
/// ```
/// #![feature(cursor_remaining)]
/// use std::io::Cursor;
///
/// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]);
///
/// assert_eq!(buff.remaining_slice(), &[1, 2, 3, 4, 5]);
///
/// buff.set_position(2);
/// assert_eq!(buff.remaining_slice(), &[3, 4, 5]);
///
/// buff.set_position(4);
/// assert_eq!(buff.remaining_slice(), &[5]);
///
/// buff.set_position(6);
/// assert_eq!(buff.remaining_slice(), &[]);
/// ```
pub fn remaining_slice(&self) -> &[u8] {
let len = self.pos.min(self.inner.as_ref().len() as u64);
&self.inner.as_ref()[(len as usize)..]
}
/// Returns `true` if the remaining slice is empty.
///
/// # Examples
///
/// ```
/// #![feature(cursor_remaining)]
/// use std::io::Cursor;
///
/// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]);
///
/// buff.set_position(2);
/// assert!(!buff.is_empty());
///
/// buff.set_position(5);
/// assert!(buff.is_empty());
///
/// buff.set_position(10);
/// assert!(buff.is_empty());
/// ```
pub fn is_empty(&self) -> bool {
self.pos >= self.inner.as_ref().len() as u64
}
}
impl<T> Clone for Cursor<T>
where
T: Clone,
{
#[inline]
fn clone(&self) -> Self {
Cursor { inner: self.inner.clone(), pos: self.pos }
}
#[inline]
fn clone_from(&mut self, other: &Self) {
self.inner.clone_from(&other.inner);
self.pos = other.pos;
}
}
impl<T> io::Seek for Cursor<T>
where
T: AsRef<[u8]>,
{
fn seek(&mut self, style: SeekFrom) -> io::Result<u64> {
let (base_pos, offset) = match style {
SeekFrom::Start(n) => {
self.pos = n;
return Ok(n);
}
SeekFrom::End(n) => (self.inner.as_ref().len() as u64, n),
SeekFrom::Current(n) => (self.pos, n),
};
match base_pos.checked_add_signed(offset) {
Some(n) => {
self.pos = n;
Ok(self.pos)
}
None => Err(io::const_io_error!(
ErrorKind::InvalidInput,
"invalid seek to a negative or overflowing position",
)),
}
}
fn stream_len(&mut self) -> io::Result<u64> {
Ok(self.inner.as_ref().len() as u64)
}
fn stream_position(&mut self) -> io::Result<u64> {
Ok(self.pos)
}
}
impl<T> Read for Cursor<T>
where
T: AsRef<[u8]>,
{
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let n = Read::read(&mut self.remaining_slice(), buf)?;
self.pos += n as u64;
Ok(n)
}
fn read_buf(&mut self, mut cursor: BorrowedCursor<'_>) -> io::Result<()> {
let prev_written = cursor.written();
Read::read_buf(&mut self.fill_buf()?, cursor.reborrow())?;
self.pos += (cursor.written() - prev_written) as u64;
Ok(())
}
fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
let mut nread = 0;
for buf in bufs {
let n = self.read(buf)?;
nread += n;
if n < buf.len() {
break;
}
}
Ok(nread)
}
fn is_read_vectored(&self) -> bool {
true
}
fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> {
let n = buf.len();
Read::read_exact(&mut self.remaining_slice(), buf)?;
self.pos += n as u64;
Ok(())
}
}
impl<T> BufRead for Cursor<T>
where
T: AsRef<[u8]>,
{
fn fill_buf(&mut self) -> io::Result<&[u8]> {
Ok(self.remaining_slice())
}
fn consume(&mut self, amt: usize) {
self.pos += amt as u64;
}
}
// Non-resizing write implementation
#[inline]
fn slice_write(pos_mut: &mut u64, slice: &mut [u8], buf: &[u8]) -> io::Result<usize> {
let pos = cmp::min(*pos_mut, slice.len() as u64);
let amt = (&mut slice[(pos as usize)..]).write(buf)?;
*pos_mut += amt as u64;
Ok(amt)
}
#[inline]
fn slice_write_vectored(
pos_mut: &mut u64,
slice: &mut [u8],
bufs: &[IoSlice<'_>],
) -> io::Result<usize> {
let mut nwritten = 0;
for buf in bufs {
let n = slice_write(pos_mut, slice, buf)?;
nwritten += n;
if n < buf.len() {
break;
}
}
Ok(nwritten)
}
/// Reserves the required space, and pads the vec with 0s if necessary.
fn reserve_and_pad<A: Allocator>(
pos_mut: &mut u64,
vec: &mut Vec<u8, A>,
buf_len: usize,
) -> io::Result<usize> {
let pos: usize = (*pos_mut).try_into().map_err(|_| {
io::const_io_error!(
ErrorKind::InvalidInput,
"cursor position exceeds maximum possible vector length",
)
})?;
// For safety reasons, we don't want these numbers to overflow
// otherwise our allocation won't be enough
let desired_cap = pos.saturating_add(buf_len);
if desired_cap > vec.capacity() {
// We want our vec's total capacity
// to have room for (pos+buf_len) bytes. Reserve allocates
// based on additional elements from the length, so we need to
// reserve the difference
vec.reserve(desired_cap - vec.len());
}
// Pad if pos is above the current len.
if pos > vec.len() {
let diff = pos - vec.len();
// Unfortunately, `resize()` would suffice but the optimiser does not
// realise the `reserve` it does can be eliminated. So we do it manually
// to eliminate that extra branch
let spare = vec.spare_capacity_mut();
debug_assert!(spare.len() >= diff);
// Safety: we have allocated enough capacity for this.
// And we are only writing, not reading
unsafe {
spare.get_unchecked_mut(..diff).fill(core::mem::MaybeUninit::new(0));
vec.set_len(pos);
}
}
Ok(pos)
}
/// Writes the slice to the vec without allocating
/// # Safety: vec must have buf.len() spare capacity
unsafe fn vec_write_unchecked<A>(pos: usize, vec: &mut Vec<u8, A>, buf: &[u8]) -> usize
where
A: Allocator,
{
debug_assert!(vec.capacity() >= pos + buf.len());
vec.as_mut_ptr().add(pos).copy_from(buf.as_ptr(), buf.len());
pos + buf.len()
}
/// Resizing write implementation for [`Cursor`]
///
/// Cursor is allowed to have a pre-allocated and initialised
/// vector body, but with a position of 0. This means the [`Write`]
/// will overwrite the contents of the vec.
///
/// This also allows for the vec body to be empty, but with a position of N.
/// This means that [`Write`] will pad the vec with 0 initially,
/// before writing anything from that point
fn vec_write<A>(pos_mut: &mut u64, vec: &mut Vec<u8, A>, buf: &[u8]) -> io::Result<usize>
where
A: Allocator,
{
let buf_len = buf.len();
let mut pos = reserve_and_pad(pos_mut, vec, buf_len)?;
// Write the buf then progress the vec forward if necessary
// Safety: we have ensured that the capacity is available
// and that all bytes get written up to pos
unsafe {
pos = vec_write_unchecked(pos, vec, buf);
if pos > vec.len() {
vec.set_len(pos);
}
};
// Bump us forward
*pos_mut += buf_len as u64;
Ok(buf_len)
}
/// Resizing write_vectored implementation for [`Cursor`]
///
/// Cursor is allowed to have a pre-allocated and initialised
/// vector body, but with a position of 0. This means the [`Write`]
/// will overwrite the contents of the vec.
///
/// This also allows for the vec body to be empty, but with a position of N.
/// This means that [`Write`] will pad the vec with 0 initially,
/// before writing anything from that point
fn vec_write_vectored<A>(
pos_mut: &mut u64,
vec: &mut Vec<u8, A>,
bufs: &[IoSlice<'_>],
) -> io::Result<usize>
where
A: Allocator,
{
// For safety reasons, we don't want this sum to overflow ever.
// If this saturates, the reserve should panic to avoid any unsound writing.
let buf_len = bufs.iter().fold(0usize, |a, b| a.saturating_add(b.len()));
let mut pos = reserve_and_pad(pos_mut, vec, buf_len)?;
// Write the buf then progress the vec forward if necessary
// Safety: we have ensured that the capacity is available
// and that all bytes get written up to the last pos
unsafe {
for buf in bufs {
pos = vec_write_unchecked(pos, vec, buf);
}
if pos > vec.len() {
vec.set_len(pos);
}
}
// Bump us forward
*pos_mut += buf_len as u64;
Ok(buf_len)
}
impl Write for Cursor<&mut [u8]> {
#[inline]
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
slice_write(&mut self.pos, self.inner, buf)
}
#[inline]
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
slice_write_vectored(&mut self.pos, self.inner, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
true
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl<A> Write for Cursor<&mut Vec<u8, A>>
where
A: Allocator,
{
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
vec_write(&mut self.pos, self.inner, buf)
}
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
vec_write_vectored(&mut self.pos, self.inner, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
true
}
#[inline]
fn | (&mut self) -> io::Result<()> {
Ok(())
}
}
impl<A> Write for Cursor<Vec<u8, A>>
where
A: Allocator,
{
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
vec_write(&mut self.pos, &mut self.inner, buf)
}
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
vec_write_vectored(&mut self.pos, &mut self.inner, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
true
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl<A> Write for Cursor<Box<[u8], A>>
where
A: Allocator,
{
#[inline]
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
slice_write(&mut self.pos, &mut self.inner, buf)
}
#[inline]
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
slice_write_vectored(&mut self.pos, &mut self.inner, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
true
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl<const N: usize> Write for Cursor<[u8; N]> {
#[inline]
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
slice_write(&mut self.pos, &mut self.inner, buf)
}
#[inline]
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
slice_write_vectored(&mut self.pos, &mut self.inner, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
true
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
| flush | identifier_name |
cursor.rs | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License..
use crate::io::prelude::*;
use crate::alloc::Allocator;
use crate::cmp;
use crate::io::{self, BorrowedCursor, ErrorKind, IoSlice, IoSliceMut, SeekFrom};
/// A `Cursor` wraps an in-memory buffer and provides it with a
/// [`Seek`] implementation.
///
/// `Cursor`s are used with in-memory buffers, anything implementing
/// <code>[AsRef]<\[u8]></code>, to allow them to implement [`Read`] and/or [`Write`],
/// allowing these buffers to be used anywhere you might use a reader or writer
/// that does actual I/O.
///
/// The standard library implements some I/O traits on various types which
/// are commonly used as a buffer, like <code>Cursor<[Vec]\<u8>></code> and
/// <code>Cursor<[&\[u8\]][bytes]></code>.
///
/// # Examples
///
/// We may want to write bytes to a [`File`] in our production
/// code, but use an in-memory buffer in our tests. We can do this with
/// `Cursor`:
///
/// [bytes]: crate::slice "slice"
/// [`File`]: crate::fs::File
///
/// ```no_run
/// use std::io::prelude::*;
/// use std::io::{self, SeekFrom};
/// use std::fs::File;
///
/// // a library function we've written
/// fn write_ten_bytes_at_end<W: Write + Seek>(writer: &mut W) -> io::Result<()> {
/// writer.seek(SeekFrom::End(-10))?;
///
/// for i in 0..10 {
/// writer.write(&[i])?;
/// }
///
/// // all went well
/// Ok(())
/// }
///
/// # fn foo() -> io::Result<()> {
/// // Here's some code that uses this library function.
/// //
/// // We might want to use a BufReader here for efficiency, but let's
/// // keep this example focused.
/// let mut file = File::create("foo.txt")?;
///
/// write_ten_bytes_at_end(&mut file)?;
/// # Ok(())
/// # }
///
/// // now let's write a test
/// #[test]
/// fn test_writes_bytes() {
/// // setting up a real File is much slower than an in-memory buffer,
/// // let's use a cursor instead
/// use std::io::Cursor;
/// let mut buff = Cursor::new(vec![0; 15]);
///
/// write_ten_bytes_at_end(&mut buff).unwrap();
///
/// assert_eq!(&buff.get_ref()[5..15], &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
/// }
/// ```
#[derive(Debug, Default, Eq, PartialEq)]
pub struct Cursor<T> {
inner: T,
pos: u64,
}
impl<T> Cursor<T> {
/// Creates a new cursor wrapping the provided underlying in-memory buffer.
///
/// Cursor initial position is `0` even if underlying buffer (e.g., [`Vec`])
/// is not empty. So writing to cursor starts with overwriting [`Vec`]
/// content, not with appending to it.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
///
/// let buff = Cursor::new(Vec::new());
/// # fn force_inference(_: &Cursor<Vec<u8>>) {}
/// # force_inference(&buff);
/// ```
pub const fn new(inner: T) -> Cursor<T> {
Cursor { pos: 0, inner }
}
/// Consumes this cursor, returning the underlying value.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
///
/// let buff = Cursor::new(Vec::new());
/// # fn force_inference(_: &Cursor<Vec<u8>>) {}
/// # force_inference(&buff);
///
/// let vec = buff.into_inner();
/// ```
pub fn into_inner(self) -> T {
self.inner
}
/// Gets a reference to the underlying value in this cursor.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
///
/// let buff = Cursor::new(Vec::new());
/// # fn force_inference(_: &Cursor<Vec<u8>>) {}
/// # force_inference(&buff);
///
/// let reference = buff.get_ref();
/// ```
pub const fn get_ref(&self) -> &T {
&self.inner
}
/// Gets a mutable reference to the underlying value in this cursor.
///
/// Care should be taken to avoid modifying the internal I/O state of the
/// underlying value as it may corrupt this cursor's position.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
///
/// let mut buff = Cursor::new(Vec::new());
/// # fn force_inference(_: &Cursor<Vec<u8>>) {}
/// # force_inference(&buff);
///
/// let reference = buff.get_mut();
/// ```
pub fn get_mut(&mut self) -> &mut T {
&mut self.inner
}
/// Returns the current position of this cursor.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
/// use std::io::prelude::*;
/// use std::io::SeekFrom;
///
/// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]);
///
/// assert_eq!(buff.position(), 0);
///
/// buff.seek(SeekFrom::Current(2)).unwrap();
/// assert_eq!(buff.position(), 2);
///
/// buff.seek(SeekFrom::Current(-1)).unwrap();
/// assert_eq!(buff.position(), 1);
/// ```
pub const fn position(&self) -> u64 {
self.pos
}
/// Sets the position of this cursor.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
///
/// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]);
///
/// assert_eq!(buff.position(), 0);
///
/// buff.set_position(2);
/// assert_eq!(buff.position(), 2);
///
/// buff.set_position(4);
/// assert_eq!(buff.position(), 4);
/// ```
pub fn set_position(&mut self, pos: u64) {
self.pos = pos;
}
}
impl<T> Cursor<T>
where
T: AsRef<[u8]>,
{
/// Returns the remaining slice.
///
/// # Examples
///
/// ```
/// #![feature(cursor_remaining)]
/// use std::io::Cursor;
///
/// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]);
///
/// assert_eq!(buff.remaining_slice(), &[1, 2, 3, 4, 5]);
///
/// buff.set_position(2);
/// assert_eq!(buff.remaining_slice(), &[3, 4, 5]);
///
/// buff.set_position(4);
/// assert_eq!(buff.remaining_slice(), &[5]);
///
/// buff.set_position(6);
/// assert_eq!(buff.remaining_slice(), &[]);
/// ```
pub fn remaining_slice(&self) -> &[u8] {
let len = self.pos.min(self.inner.as_ref().len() as u64);
&self.inner.as_ref()[(len as usize)..]
}
/// Returns `true` if the remaining slice is empty.
///
/// # Examples
///
/// ```
/// #![feature(cursor_remaining)]
/// use std::io::Cursor;
///
/// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]);
///
/// buff.set_position(2);
/// assert!(!buff.is_empty());
///
/// buff.set_position(5);
/// assert!(buff.is_empty());
///
/// buff.set_position(10);
/// assert!(buff.is_empty());
/// ```
pub fn is_empty(&self) -> bool {
self.pos >= self.inner.as_ref().len() as u64
}
}
impl<T> Clone for Cursor<T>
where
T: Clone,
{
#[inline]
fn clone(&self) -> Self {
Cursor { inner: self.inner.clone(), pos: self.pos }
}
#[inline]
fn clone_from(&mut self, other: &Self) {
self.inner.clone_from(&other.inner);
self.pos = other.pos;
}
}
impl<T> io::Seek for Cursor<T>
where
T: AsRef<[u8]>,
{
fn seek(&mut self, style: SeekFrom) -> io::Result<u64> {
let (base_pos, offset) = match style {
SeekFrom::Start(n) => {
self.pos = n;
return Ok(n);
}
SeekFrom::End(n) => (self.inner.as_ref().len() as u64, n),
SeekFrom::Current(n) => (self.pos, n),
};
match base_pos.checked_add_signed(offset) {
Some(n) => {
self.pos = n;
Ok(self.pos)
}
None => Err(io::const_io_error!(
ErrorKind::InvalidInput,
"invalid seek to a negative or overflowing position",
)),
}
}
fn stream_len(&mut self) -> io::Result<u64> {
Ok(self.inner.as_ref().len() as u64)
}
fn stream_position(&mut self) -> io::Result<u64> {
Ok(self.pos)
}
}
impl<T> Read for Cursor<T>
where
T: AsRef<[u8]>,
{
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let n = Read::read(&mut self.remaining_slice(), buf)?;
self.pos += n as u64;
Ok(n)
}
fn read_buf(&mut self, mut cursor: BorrowedCursor<'_>) -> io::Result<()> {
let prev_written = cursor.written();
Read::read_buf(&mut self.fill_buf()?, cursor.reborrow())?;
self.pos += (cursor.written() - prev_written) as u64;
Ok(())
}
fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
let mut nread = 0;
for buf in bufs {
let n = self.read(buf)?;
nread += n;
if n < buf.len() {
break;
}
}
Ok(nread)
}
fn is_read_vectored(&self) -> bool {
true
}
fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> {
let n = buf.len();
Read::read_exact(&mut self.remaining_slice(), buf)?;
self.pos += n as u64;
Ok(())
}
}
impl<T> BufRead for Cursor<T>
where
T: AsRef<[u8]>,
{
fn fill_buf(&mut self) -> io::Result<&[u8]> {
Ok(self.remaining_slice())
}
fn consume(&mut self, amt: usize) {
self.pos += amt as u64;
}
}
// Non-resizing write implementation
#[inline]
fn slice_write(pos_mut: &mut u64, slice: &mut [u8], buf: &[u8]) -> io::Result<usize> {
let pos = cmp::min(*pos_mut, slice.len() as u64);
let amt = (&mut slice[(pos as usize)..]).write(buf)?;
*pos_mut += amt as u64;
Ok(amt)
}
#[inline]
fn slice_write_vectored(
pos_mut: &mut u64,
slice: &mut [u8],
bufs: &[IoSlice<'_>],
) -> io::Result<usize> {
let mut nwritten = 0;
for buf in bufs {
let n = slice_write(pos_mut, slice, buf)?;
nwritten += n;
if n < buf.len() {
break;
}
}
Ok(nwritten)
}
/// Reserves the required space, and pads the vec with 0s if necessary.
fn reserve_and_pad<A: Allocator>(
pos_mut: &mut u64,
vec: &mut Vec<u8, A>,
buf_len: usize,
) -> io::Result<usize> {
let pos: usize = (*pos_mut).try_into().map_err(|_| {
io::const_io_error!(
ErrorKind::InvalidInput,
"cursor position exceeds maximum possible vector length",
)
})?;
// For safety reasons, we don't want these numbers to overflow
// otherwise our allocation won't be enough
let desired_cap = pos.saturating_add(buf_len);
if desired_cap > vec.capacity() |
// Pad if pos is above the current len.
if pos > vec.len() {
let diff = pos - vec.len();
// Unfortunately, `resize()` would suffice but the optimiser does not
// realise the `reserve` it does can be eliminated. So we do it manually
// to eliminate that extra branch
let spare = vec.spare_capacity_mut();
debug_assert!(spare.len() >= diff);
// Safety: we have allocated enough capacity for this.
// And we are only writing, not reading
unsafe {
spare.get_unchecked_mut(..diff).fill(core::mem::MaybeUninit::new(0));
vec.set_len(pos);
}
}
Ok(pos)
}
/// Writes the slice to the vec without allocating
/// # Safety: vec must have buf.len() spare capacity
unsafe fn vec_write_unchecked<A>(pos: usize, vec: &mut Vec<u8, A>, buf: &[u8]) -> usize
where
A: Allocator,
{
debug_assert!(vec.capacity() >= pos + buf.len());
vec.as_mut_ptr().add(pos).copy_from(buf.as_ptr(), buf.len());
pos + buf.len()
}
/// Resizing write implementation for [`Cursor`]
///
/// Cursor is allowed to have a pre-allocated and initialised
/// vector body, but with a position of 0. This means the [`Write`]
/// will overwrite the contents of the vec.
///
/// This also allows for the vec body to be empty, but with a position of N.
/// This means that [`Write`] will pad the vec with 0 initially,
/// before writing anything from that point
fn vec_write<A>(pos_mut: &mut u64, vec: &mut Vec<u8, A>, buf: &[u8]) -> io::Result<usize>
where
A: Allocator,
{
let buf_len = buf.len();
let mut pos = reserve_and_pad(pos_mut, vec, buf_len)?;
// Write the buf then progress the vec forward if necessary
// Safety: we have ensured that the capacity is available
// and that all bytes get written up to pos
unsafe {
pos = vec_write_unchecked(pos, vec, buf);
if pos > vec.len() {
vec.set_len(pos);
}
};
// Bump us forward
*pos_mut += buf_len as u64;
Ok(buf_len)
}
/// Resizing write_vectored implementation for [`Cursor`]
///
/// Cursor is allowed to have a pre-allocated and initialised
/// vector body, but with a position of 0. This means the [`Write`]
/// will overwrite the contents of the vec.
///
/// This also allows for the vec body to be empty, but with a position of N.
/// This means that [`Write`] will pad the vec with 0 initially,
/// before writing anything from that point
fn vec_write_vectored<A>(
pos_mut: &mut u64,
vec: &mut Vec<u8, A>,
bufs: &[IoSlice<'_>],
) -> io::Result<usize>
where
A: Allocator,
{
// For safety reasons, we don't want this sum to overflow ever.
// If this saturates, the reserve should panic to avoid any unsound writing.
let buf_len = bufs.iter().fold(0usize, |a, b| a.saturating_add(b.len()));
let mut pos = reserve_and_pad(pos_mut, vec, buf_len)?;
// Write the buf then progress the vec forward if necessary
// Safety: we have ensured that the capacity is available
// and that all bytes get written up to the last pos
unsafe {
for buf in bufs {
pos = vec_write_unchecked(pos, vec, buf);
}
if pos > vec.len() {
vec.set_len(pos);
}
}
// Bump us forward
*pos_mut += buf_len as u64;
Ok(buf_len)
}
impl Write for Cursor<&mut [u8]> {
#[inline]
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
slice_write(&mut self.pos, self.inner, buf)
}
#[inline]
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
slice_write_vectored(&mut self.pos, self.inner, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
true
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl<A> Write for Cursor<&mut Vec<u8, A>>
where
A: Allocator,
{
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
vec_write(&mut self.pos, self.inner, buf)
}
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
vec_write_vectored(&mut self.pos, self.inner, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
true
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl<A> Write for Cursor<Vec<u8, A>>
where
A: Allocator,
{
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
vec_write(&mut self.pos, &mut self.inner, buf)
}
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
vec_write_vectored(&mut self.pos, &mut self.inner, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
true
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl<A> Write for Cursor<Box<[u8], A>>
where
A: Allocator,
{
#[inline]
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
slice_write(&mut self.pos, &mut self.inner, buf)
}
#[inline]
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
slice_write_vectored(&mut self.pos, &mut self.inner, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
true
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl<const N: usize> Write for Cursor<[u8; N]> {
#[inline]
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
slice_write(&mut self.pos, &mut self.inner, buf)
}
#[inline]
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
slice_write_vectored(&mut self.pos, &mut self.inner, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
true
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
| {
// We want our vec's total capacity
// to have room for (pos+buf_len) bytes. Reserve allocates
// based on additional elements from the length, so we need to
// reserve the difference
vec.reserve(desired_cap - vec.len());
} | conditional_block |
cursor.rs | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License..
use crate::io::prelude::*;
use crate::alloc::Allocator;
use crate::cmp;
use crate::io::{self, BorrowedCursor, ErrorKind, IoSlice, IoSliceMut, SeekFrom};
/// A `Cursor` wraps an in-memory buffer and provides it with a
/// [`Seek`] implementation.
///
/// `Cursor`s are used with in-memory buffers, anything implementing
/// <code>[AsRef]<\[u8]></code>, to allow them to implement [`Read`] and/or [`Write`],
/// allowing these buffers to be used anywhere you might use a reader or writer
/// that does actual I/O.
///
/// The standard library implements some I/O traits on various types which
/// are commonly used as a buffer, like <code>Cursor<[Vec]\<u8>></code> and
/// <code>Cursor<[&\[u8\]][bytes]></code>.
///
/// # Examples
///
/// We may want to write bytes to a [`File`] in our production
/// code, but use an in-memory buffer in our tests. We can do this with
/// `Cursor`:
///
/// [bytes]: crate::slice "slice"
/// [`File`]: crate::fs::File
///
/// ```no_run
/// use std::io::prelude::*;
/// use std::io::{self, SeekFrom};
/// use std::fs::File;
///
/// // a library function we've written
/// fn write_ten_bytes_at_end<W: Write + Seek>(writer: &mut W) -> io::Result<()> {
/// writer.seek(SeekFrom::End(-10))?;
///
/// for i in 0..10 {
/// writer.write(&[i])?;
/// }
///
/// // all went well
/// Ok(())
/// }
///
/// # fn foo() -> io::Result<()> {
/// // Here's some code that uses this library function.
/// //
/// // We might want to use a BufReader here for efficiency, but let's
/// // keep this example focused.
/// let mut file = File::create("foo.txt")?;
///
/// write_ten_bytes_at_end(&mut file)?;
/// # Ok(())
/// # }
///
/// // now let's write a test
/// #[test]
/// fn test_writes_bytes() {
/// // setting up a real File is much slower than an in-memory buffer,
/// // let's use a cursor instead
/// use std::io::Cursor;
/// let mut buff = Cursor::new(vec![0; 15]);
///
/// write_ten_bytes_at_end(&mut buff).unwrap();
///
/// assert_eq!(&buff.get_ref()[5..15], &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
/// }
/// ```
#[derive(Debug, Default, Eq, PartialEq)]
pub struct Cursor<T> {
inner: T,
pos: u64,
}
impl<T> Cursor<T> {
/// Creates a new cursor wrapping the provided underlying in-memory buffer.
///
/// Cursor initial position is `0` even if underlying buffer (e.g., [`Vec`])
/// is not empty. So writing to cursor starts with overwriting [`Vec`]
/// content, not with appending to it.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
///
/// let buff = Cursor::new(Vec::new());
/// # fn force_inference(_: &Cursor<Vec<u8>>) {}
/// # force_inference(&buff);
/// ```
pub const fn new(inner: T) -> Cursor<T> {
Cursor { pos: 0, inner }
}
/// Consumes this cursor, returning the underlying value.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
///
/// let buff = Cursor::new(Vec::new());
/// # fn force_inference(_: &Cursor<Vec<u8>>) {}
/// # force_inference(&buff);
///
/// let vec = buff.into_inner();
/// ```
pub fn into_inner(self) -> T {
self.inner
}
/// Gets a reference to the underlying value in this cursor.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
///
/// let buff = Cursor::new(Vec::new());
/// # fn force_inference(_: &Cursor<Vec<u8>>) {}
/// # force_inference(&buff);
///
/// let reference = buff.get_ref();
/// ```
pub const fn get_ref(&self) -> &T {
&self.inner
}
/// Gets a mutable reference to the underlying value in this cursor.
///
/// Care should be taken to avoid modifying the internal I/O state of the
/// underlying value as it may corrupt this cursor's position.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
///
/// let mut buff = Cursor::new(Vec::new());
/// # fn force_inference(_: &Cursor<Vec<u8>>) {}
/// # force_inference(&buff);
///
/// let reference = buff.get_mut();
/// ```
pub fn get_mut(&mut self) -> &mut T {
&mut self.inner
}
/// Returns the current position of this cursor.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
/// use std::io::prelude::*;
/// use std::io::SeekFrom;
///
/// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]);
///
/// assert_eq!(buff.position(), 0);
///
/// buff.seek(SeekFrom::Current(2)).unwrap();
/// assert_eq!(buff.position(), 2);
///
/// buff.seek(SeekFrom::Current(-1)).unwrap();
/// assert_eq!(buff.position(), 1);
/// ```
pub const fn position(&self) -> u64 {
self.pos
}
/// Sets the position of this cursor.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
///
/// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]);
///
/// assert_eq!(buff.position(), 0);
///
/// buff.set_position(2);
/// assert_eq!(buff.position(), 2);
///
/// buff.set_position(4);
/// assert_eq!(buff.position(), 4);
/// ```
pub fn set_position(&mut self, pos: u64) {
self.pos = pos;
}
}
impl<T> Cursor<T>
where
T: AsRef<[u8]>,
{
/// Returns the remaining slice.
///
/// # Examples
///
/// ```
/// #![feature(cursor_remaining)]
/// use std::io::Cursor;
///
/// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]);
///
/// assert_eq!(buff.remaining_slice(), &[1, 2, 3, 4, 5]);
///
/// buff.set_position(2);
/// assert_eq!(buff.remaining_slice(), &[3, 4, 5]);
///
/// buff.set_position(4);
/// assert_eq!(buff.remaining_slice(), &[5]);
///
/// buff.set_position(6);
/// assert_eq!(buff.remaining_slice(), &[]);
/// ```
pub fn remaining_slice(&self) -> &[u8] {
let len = self.pos.min(self.inner.as_ref().len() as u64);
&self.inner.as_ref()[(len as usize)..]
}
/// Returns `true` if the remaining slice is empty.
///
/// # Examples
///
/// ```
/// #![feature(cursor_remaining)]
/// use std::io::Cursor;
///
/// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]);
///
/// buff.set_position(2);
/// assert!(!buff.is_empty());
///
/// buff.set_position(5);
/// assert!(buff.is_empty());
///
/// buff.set_position(10);
/// assert!(buff.is_empty());
/// ```
pub fn is_empty(&self) -> bool {
self.pos >= self.inner.as_ref().len() as u64
}
}
impl<T> Clone for Cursor<T>
where
T: Clone,
{
#[inline]
fn clone(&self) -> Self {
Cursor { inner: self.inner.clone(), pos: self.pos }
}
#[inline]
fn clone_from(&mut self, other: &Self) {
self.inner.clone_from(&other.inner);
self.pos = other.pos;
}
}
impl<T> io::Seek for Cursor<T>
where
T: AsRef<[u8]>,
{
fn seek(&mut self, style: SeekFrom) -> io::Result<u64> {
let (base_pos, offset) = match style {
SeekFrom::Start(n) => {
self.pos = n;
return Ok(n);
}
SeekFrom::End(n) => (self.inner.as_ref().len() as u64, n),
SeekFrom::Current(n) => (self.pos, n),
};
match base_pos.checked_add_signed(offset) {
Some(n) => {
self.pos = n;
Ok(self.pos)
}
None => Err(io::const_io_error!(
ErrorKind::InvalidInput,
"invalid seek to a negative or overflowing position",
)),
}
}
fn stream_len(&mut self) -> io::Result<u64> {
Ok(self.inner.as_ref().len() as u64)
}
fn stream_position(&mut self) -> io::Result<u64> {
Ok(self.pos)
}
}
impl<T> Read for Cursor<T>
where
T: AsRef<[u8]>,
{
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let n = Read::read(&mut self.remaining_slice(), buf)?;
self.pos += n as u64;
Ok(n)
}
fn read_buf(&mut self, mut cursor: BorrowedCursor<'_>) -> io::Result<()> {
let prev_written = cursor.written();
Read::read_buf(&mut self.fill_buf()?, cursor.reborrow())?;
self.pos += (cursor.written() - prev_written) as u64;
Ok(())
}
fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
let mut nread = 0;
for buf in bufs {
let n = self.read(buf)?;
nread += n;
if n < buf.len() {
break;
}
}
Ok(nread)
}
fn is_read_vectored(&self) -> bool {
true
}
fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> {
let n = buf.len();
Read::read_exact(&mut self.remaining_slice(), buf)?;
self.pos += n as u64;
Ok(())
}
}
impl<T> BufRead for Cursor<T>
where
T: AsRef<[u8]>,
{
fn fill_buf(&mut self) -> io::Result<&[u8]> {
Ok(self.remaining_slice())
}
fn consume(&mut self, amt: usize) {
self.pos += amt as u64;
}
}
// Non-resizing write implementation
#[inline]
fn slice_write(pos_mut: &mut u64, slice: &mut [u8], buf: &[u8]) -> io::Result<usize> {
let pos = cmp::min(*pos_mut, slice.len() as u64);
let amt = (&mut slice[(pos as usize)..]).write(buf)?;
*pos_mut += amt as u64;
Ok(amt)
}
#[inline]
fn slice_write_vectored(
pos_mut: &mut u64,
slice: &mut [u8],
bufs: &[IoSlice<'_>],
) -> io::Result<usize> {
let mut nwritten = 0;
for buf in bufs {
let n = slice_write(pos_mut, slice, buf)?;
nwritten += n;
if n < buf.len() {
break;
}
}
Ok(nwritten)
}
/// Reserves the required space, and pads the vec with 0s if necessary.
fn reserve_and_pad<A: Allocator>(
pos_mut: &mut u64,
vec: &mut Vec<u8, A>,
buf_len: usize,
) -> io::Result<usize> {
let pos: usize = (*pos_mut).try_into().map_err(|_| {
io::const_io_error!(
ErrorKind::InvalidInput,
"cursor position exceeds maximum possible vector length",
)
})?;
// For safety reasons, we don't want these numbers to overflow
// otherwise our allocation won't be enough
let desired_cap = pos.saturating_add(buf_len);
if desired_cap > vec.capacity() {
// We want our vec's total capacity
// to have room for (pos+buf_len) bytes. Reserve allocates
// based on additional elements from the length, so we need to
// reserve the difference
vec.reserve(desired_cap - vec.len());
}
// Pad if pos is above the current len.
if pos > vec.len() {
let diff = pos - vec.len();
// Unfortunately, `resize()` would suffice but the optimiser does not
// realise the `reserve` it does can be eliminated. So we do it manually
// to eliminate that extra branch
let spare = vec.spare_capacity_mut();
debug_assert!(spare.len() >= diff);
// Safety: we have allocated enough capacity for this.
// And we are only writing, not reading
unsafe {
spare.get_unchecked_mut(..diff).fill(core::mem::MaybeUninit::new(0));
vec.set_len(pos);
}
}
Ok(pos)
}
/// Writes the slice to the vec without allocating
/// # Safety: vec must have buf.len() spare capacity
unsafe fn vec_write_unchecked<A>(pos: usize, vec: &mut Vec<u8, A>, buf: &[u8]) -> usize
where
A: Allocator,
{
debug_assert!(vec.capacity() >= pos + buf.len());
vec.as_mut_ptr().add(pos).copy_from(buf.as_ptr(), buf.len());
pos + buf.len()
}
/// Resizing write implementation for [`Cursor`]
///
/// Cursor is allowed to have a pre-allocated and initialised
/// vector body, but with a position of 0. This means the [`Write`]
/// will overwrite the contents of the vec.
///
/// This also allows for the vec body to be empty, but with a position of N.
/// This means that [`Write`] will pad the vec with 0 initially,
/// before writing anything from that point
fn vec_write<A>(pos_mut: &mut u64, vec: &mut Vec<u8, A>, buf: &[u8]) -> io::Result<usize>
where
A: Allocator,
{
let buf_len = buf.len();
let mut pos = reserve_and_pad(pos_mut, vec, buf_len)?;
// Write the buf then progress the vec forward if necessary
// Safety: we have ensured that the capacity is available
// and that all bytes get written up to pos
unsafe {
pos = vec_write_unchecked(pos, vec, buf);
if pos > vec.len() {
vec.set_len(pos);
}
};
// Bump us forward
*pos_mut += buf_len as u64;
Ok(buf_len)
}
/// Resizing write_vectored implementation for [`Cursor`]
///
/// Cursor is allowed to have a pre-allocated and initialised
/// vector body, but with a position of 0. This means the [`Write`]
/// will overwrite the contents of the vec.
///
/// This also allows for the vec body to be empty, but with a position of N.
/// This means that [`Write`] will pad the vec with 0 initially,
/// before writing anything from that point
fn vec_write_vectored<A>(
pos_mut: &mut u64,
vec: &mut Vec<u8, A>,
bufs: &[IoSlice<'_>],
) -> io::Result<usize>
where
A: Allocator,
{
// For safety reasons, we don't want this sum to overflow ever.
// If this saturates, the reserve should panic to avoid any unsound writing.
let buf_len = bufs.iter().fold(0usize, |a, b| a.saturating_add(b.len()));
let mut pos = reserve_and_pad(pos_mut, vec, buf_len)?;
// Write the buf then progress the vec forward if necessary
// Safety: we have ensured that the capacity is available
// and that all bytes get written up to the last pos
unsafe {
for buf in bufs {
pos = vec_write_unchecked(pos, vec, buf);
}
if pos > vec.len() {
vec.set_len(pos);
}
}
// Bump us forward
*pos_mut += buf_len as u64;
Ok(buf_len)
}
impl Write for Cursor<&mut [u8]> {
#[inline]
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
slice_write(&mut self.pos, self.inner, buf)
}
#[inline]
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
slice_write_vectored(&mut self.pos, self.inner, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
true
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl<A> Write for Cursor<&mut Vec<u8, A>>
where
A: Allocator,
{
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
vec_write(&mut self.pos, self.inner, buf)
}
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
vec_write_vectored(&mut self.pos, self.inner, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
true
}
#[inline]
fn flush(&mut self) -> io::Result<()> |
}
impl<A> Write for Cursor<Vec<u8, A>>
where
A: Allocator,
{
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
vec_write(&mut self.pos, &mut self.inner, buf)
}
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
vec_write_vectored(&mut self.pos, &mut self.inner, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
true
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl<A> Write for Cursor<Box<[u8], A>>
where
A: Allocator,
{
#[inline]
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
slice_write(&mut self.pos, &mut self.inner, buf)
}
#[inline]
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
slice_write_vectored(&mut self.pos, &mut self.inner, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
true
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl<const N: usize> Write for Cursor<[u8; N]> {
#[inline]
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
slice_write(&mut self.pos, &mut self.inner, buf)
}
#[inline]
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
slice_write_vectored(&mut self.pos, &mut self.inner, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
true
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
| {
Ok(())
} | identifier_body |
lib.rs | #![deny(
absolute_paths_not_starting_with_crate,
ambiguous_associated_items,
ambiguous_glob_reexports,
anonymous_parameters,
arithmetic_overflow,
array_into_iter,
asm_sub_register,
bad_asm_style,
bindings_with_variant_name,
break_with_label_and_loop,
byte_slice_in_packed_struct_with_derive,
cenum_impl_drop_cast,
clashing_extern_declarations,
coherence_leak_check,
conflicting_repr_hints,
confusable_idents,
const_evaluatable_unchecked,
const_item_mutation,
dead_code,
deprecated,
deprecated_cfg_attr_crate_type_name,
deprecated_in_future,
deprecated_where_clause_location,
deref_into_dyn_supertrait,
deref_nullptr,
drop_bounds,
dropping_copy_types,
dropping_references,
duplicate_macro_attributes,
dyn_drop,
ellipsis_inclusive_range_patterns,
enum_intrinsics_non_enums,
explicit_outlives_requirements,
exported_private_dependencies,
ffi_unwind_calls,
for_loops_over_fallibles,
forbidden_lint_groups,
forgetting_copy_types,
forgetting_references,
function_item_references,
ill_formed_attribute_input,
illegal_floating_point_literal_pattern,
implied_bounds_entailment,
improper_ctypes,
improper_ctypes_definitions,
incomplete_features,
incomplete_include,
indirect_structural_match,
ineffective_unstable_trait_impl,
inline_no_sanitize,
invalid_alignment,
invalid_atomic_ordering,
invalid_doc_attributes,
invalid_macro_export_arguments,
invalid_type_param_default,
invalid_value,
irrefutable_let_patterns,
keyword_idents,
large_assignments,
late_bound_lifetime_arguments,
legacy_derive_helpers,
let_underscore_drop,
let_underscore_lock,
macro_expanded_macro_exports_accessed_by_absolute_paths,
map_unit_fn,
meta_variable_misuse,
missing_abi,
missing_copy_implementations,
missing_docs,
missing_fragment_specifier,
mixed_script_confusables,
mutable_transmutes,
named_arguments_used_positionally,
named_asm_labels,
no_mangle_const_items,
no_mangle_generic_items,
non_ascii_idents,
non_camel_case_types,
non_fmt_panics,
non_shorthand_field_patterns,
non_snake_case,
non_upper_case_globals,
nontrivial_structural_match,
noop_method_call,
opaque_hidden_inferred_bound,
order_dependent_trait_objects,
overflowing_literals,
overlapping_range_endpoints,
path_statements,
patterns_in_fns_without_body,
pointer_structural_match,
private_in_public,
proc_macro_back_compat,
proc_macro_derive_resolution_fallback,
pub_use_of_private_extern_crate,
redundant_semicolons,
repr_transparent_external_private_fields,
rust_2021_incompatible_closure_captures,
rust_2021_incompatible_or_patterns,
rust_2021_prefixes_incompatible_syntax,
rust_2021_prelude_collisions,
semicolon_in_expressions_from_macros,
soft_unstable,
special_module_name,
stable_features,
suspicious_auto_trait_impls,
suspicious_double_ref_op,
temporary_cstring_as_ptr,
text_direction_codepoint_in_comment,
text_direction_codepoint_in_literal,
trivial_bounds,
trivial_casts,
trivial_numeric_casts,
type_alias_bounds,
tyvar_behind_raw_pointer,
uncommon_codepoints,
unconditional_panic,
unconditional_recursion,
undefined_naked_function_abi,
unexpected_cfgs,
ungated_async_fn_track_caller,
uninhabited_static,
unknown_crate_types,
unnameable_test_items,
unreachable_code,
unreachable_patterns,
unreachable_pub,
unsafe_code,
unsafe_op_in_unsafe_fn,
unstable_features,
unstable_name_collisions,
unstable_syntax_pre_expansion,
unsupported_calling_conventions,
unused_allocation,
unused_assignments,
unused_assignments,
unused_attributes,
unused_braces,
unused_comparisons,
unused_crate_dependencies,
unused_doc_comments,
unused_extern_crates,
unused_features,
unused_import_braces,
unused_imports,
unused_labels,
unused_lifetimes,
unused_macro_rules,
unused_macros,
unused_must_use,
unused_mut,
unused_parens,
unused_qualifications,
unused_tuple_struct_fields,
unused_unsafe,
unused_variables,
useless_deprecated,
where_clauses_object_safety,
while_true
)]
#![warn(macro_use_extern_crate, unknown_lints)]
#![allow(
bare_trait_objects,
box_pointers,
elided_lifetimes_in_paths,
missing_debug_implementations,
single_use_lifetimes,
unused_results,
variant_size_differences,
warnings,
renamed_and_removed_lints
)]
//! # simple_redis
//!
//! Simple and resilient [redis](https://redis.io/) client based on [redis-rs](https://crates.io/crates/redis) with
//! internal connection and subscription handling.
//!
//! This library provides a very basic, simple API for the most common redis operations.<br>
//! While not as comprehensive or flexiable as [redis-rs](https://crates.io/crates/redis),
//! it does provide a simpler api for most common use cases and operations as well as automatic and resilient internal
//! connection and subscription (pubsub) handling.<br>
//! In addition, the entire API is accessible via redis client and there is no need to manage connection or pubsub
//! instances in parallel.<br>
//!
//! ## Connection Resiliency
//!
//! Connection resiliency is managed by verifying the internally managed connection before every operation against the
//! redis server.<br>
//! In case of any connection issue, a new connection will be allocated to ensure the operation is invoked on a valid
//! connection only.<br>
//! However, this comes at a small performance cost of PING operation to the redis server.<br>
//!
//! ## Subscription Resiliency
//!
//! Subscription resiliency is ensured by recreating the internal pubsub and issuing new subscription requests
//! automatically in case of any error while fetching a message from the subscribed channels.
//!
//! # Examples
//!
//! ## Initialization and Simple Operations
//!
//! ```
//! fn main() {
//! match simple_redis::create("redis://127.0.0.1:6379/") {
//! Ok(mut client) => {
//! println!("Created Redis Client");
//!
//! match client.set("my_key", "my_value") {
//! Err(error) => println!("Unable to set value in Redis: {}", error),
//! _ => println!("Value set in Redis")
//! };
//!
//! match client.get_string("my_key") {
//! Ok(value) => println!("Read value from Redis: {}", value),
//! Err(error) => println!("Unable to get value from Redis: {}", error)
//! };
//!
//! match client.set("my_numeric_key", 255.5) {
//! Err(error) => println!("Unable to set value in Redis: {}", error),
//! _ => println!("Value set in Redis")
//! };
//!
//! match client.get::<f32>("my_numeric_key") {
//! Ok(value) => println!("Read value from Redis: {}", value),
//! Err(error) => println!("Unable to get value from Redis: {}", error)
//! };
//!
//! match client.hgetall("my_map") {
//! Ok(map) => {
//! match map.get("my_field") {
//! Some(value) => println!("Got field value from map: {}", value),
//! None => println!("Map field is emtpy"),
//! }
//! },
//! Err(error) => println!("Unable to read map from Redis: {}", error),
//! };
//!
//! /// run some command that is not built in the library
//! match client.run_command::<String>("ECHO", vec!["testing"]) {
//! Ok(value) => assert_eq!(value, "testing"),
//! _ => panic!("test error"),
//! };
//!
//! /// publish messages
//! let result = client.publish("news_channel", "test message");
//! assert!(result.is_ok());
//! },
//! Err(error) => println!("Unable to create Redis client: {}", error)
//! }
//! }
//! ```
//!
//! ## Subscription Flow
//!
//! ```rust,no_run
//! use simple_redis::{Interrupts, Message};
//!
//! fn main() {
//! match simple_redis::create("redis://127.0.0.1:6379/") {
//! Ok(mut client) => {
//! println!("Created Redis Client");
//!
//! let mut result = client.subscribe("important_notifications");
//! assert!(result.is_ok());
//! result = client.psubscribe("*_notifications");
//! assert!(result.is_ok());
//!
//! // fetch messages from all subscriptions
//! client.fetch_messages(
//! &mut |message: Message| -> bool {
//! let payload : String = message.get_payload().unwrap();
//! println!("Got message: {}", payload);
//!
//! // continue fetching
//! false
//! },
//! &mut || -> Interrupts { Interrupts::new() },
//! ).unwrap();
//! },
//! Err(error) => println!("Unable to create Redis client: {}", error)
//! }
//! }
//! ```
//!
//! ## Closing Connection
//!
//! ```rust
//! fn main() {
//! match simple_redis::create("redis://127.0.0.1:6379/") {
//! Ok(mut client) => {
//! println!("Created Redis Client");
//!
//! match client.set("my_key", "my_value") {
//! Err(error) => println!("Unable to set value in Redis: {}", error),
//! _ => println!("Value set in Redis")
//! };
//!
//! match client.quit() {
//! Err(error) => println!("Error: {}", error),
//! _ => println!("Connection Closed.")
//! }
//! },
//! Err(error) => println!("Unable to create Redis client: {}", error)
//! }
//! }
//! ```
//!
//! # Installation
//! In order to use this library, just add it as a dependency:
//!
//! ```ini
//! [dependencies]
//! simple_redis = "*"
//! ```
//!
//! # Contributing
//! See [contributing guide](https://github.com/sagiegurari/simple_redis/blob/master/.github/CONTRIBUTING.md)
//!
//! # License
//! Developed by Sagie Gur-Ari and licensed under the
//! [Apache 2](https://github.com/sagiegurari/simple_redis/blob/master/LICENSE) open source license.
//!
#[cfg(test)]
#[path = "./lib_test.rs"]
mod lib_test;
#[cfg(doctest)]
doc_comment::doctest!("../README.md");
pub mod client;
mod commands;
mod connection;
mod subscriber;
pub mod types;
/// Error Type
pub type RedisError = types::RedisError;
/// PubSub message
pub type Message = types::Message;
/// Blocking operations interrupts
pub type Interrupts = types::Interrupts;
/// Redis result which either holds a value or a Redis error
pub type RedisResult<T> = types::RedisResult<T>;
/// Constructs a new redis client.<br>
/// The redis connection string must be in the following format: `redis://[:<passwd>@]<hostname>[:port][/<db>]`
///
/// # Arguments
///
/// * `connection_string` - The connection string in the format of: `redis://[:<passwd>@]<hostname>[:port][/<db>]`
///
/// # Example
///
/// ```
/// extern crate simple_redis;
///
/// fn main() {
/// match simple_redis::create("redis://127.0.0.1:6379/") {
/// Ok(client) => println!("Created Redis Client"),
/// Err(error) => println!("Unable to create Redis client: {}", error)
/// }
/// }
/// ```
pub fn create(connection_string: &str) -> Result<client::Client, RedisError> | {
client::create(connection_string)
} | identifier_body | |
lib.rs | #![deny(
absolute_paths_not_starting_with_crate,
ambiguous_associated_items,
ambiguous_glob_reexports,
anonymous_parameters,
arithmetic_overflow,
array_into_iter,
asm_sub_register,
bad_asm_style,
bindings_with_variant_name,
break_with_label_and_loop,
byte_slice_in_packed_struct_with_derive,
cenum_impl_drop_cast,
clashing_extern_declarations,
coherence_leak_check,
conflicting_repr_hints,
confusable_idents,
const_evaluatable_unchecked,
const_item_mutation,
dead_code,
deprecated,
deprecated_cfg_attr_crate_type_name,
deprecated_in_future,
deprecated_where_clause_location,
deref_into_dyn_supertrait,
deref_nullptr,
drop_bounds,
dropping_copy_types,
dropping_references,
duplicate_macro_attributes,
dyn_drop,
ellipsis_inclusive_range_patterns,
enum_intrinsics_non_enums,
explicit_outlives_requirements,
exported_private_dependencies,
ffi_unwind_calls,
for_loops_over_fallibles,
forbidden_lint_groups,
forgetting_copy_types,
forgetting_references,
function_item_references,
ill_formed_attribute_input,
illegal_floating_point_literal_pattern,
implied_bounds_entailment,
improper_ctypes,
improper_ctypes_definitions,
incomplete_features,
incomplete_include,
indirect_structural_match,
ineffective_unstable_trait_impl,
inline_no_sanitize,
invalid_alignment,
invalid_atomic_ordering,
invalid_doc_attributes,
invalid_macro_export_arguments,
invalid_type_param_default,
invalid_value,
irrefutable_let_patterns,
keyword_idents,
large_assignments,
late_bound_lifetime_arguments,
legacy_derive_helpers,
let_underscore_drop,
let_underscore_lock,
macro_expanded_macro_exports_accessed_by_absolute_paths,
map_unit_fn,
meta_variable_misuse,
missing_abi,
missing_copy_implementations,
missing_docs,
missing_fragment_specifier,
mixed_script_confusables,
mutable_transmutes,
named_arguments_used_positionally,
named_asm_labels,
no_mangle_const_items,
no_mangle_generic_items,
non_ascii_idents,
non_camel_case_types,
non_fmt_panics,
non_shorthand_field_patterns,
non_snake_case,
non_upper_case_globals,
nontrivial_structural_match,
noop_method_call,
opaque_hidden_inferred_bound,
order_dependent_trait_objects,
overflowing_literals,
overlapping_range_endpoints,
path_statements,
patterns_in_fns_without_body,
pointer_structural_match,
private_in_public,
proc_macro_back_compat,
proc_macro_derive_resolution_fallback,
pub_use_of_private_extern_crate,
redundant_semicolons,
repr_transparent_external_private_fields,
rust_2021_incompatible_closure_captures,
rust_2021_incompatible_or_patterns,
rust_2021_prefixes_incompatible_syntax,
rust_2021_prelude_collisions,
semicolon_in_expressions_from_macros,
soft_unstable,
special_module_name,
stable_features,
suspicious_auto_trait_impls,
suspicious_double_ref_op,
temporary_cstring_as_ptr,
text_direction_codepoint_in_comment,
text_direction_codepoint_in_literal,
trivial_bounds,
trivial_casts,
trivial_numeric_casts,
type_alias_bounds,
tyvar_behind_raw_pointer,
uncommon_codepoints,
unconditional_panic,
unconditional_recursion,
undefined_naked_function_abi,
unexpected_cfgs,
ungated_async_fn_track_caller,
uninhabited_static,
unknown_crate_types,
unnameable_test_items,
unreachable_code,
unreachable_patterns,
unreachable_pub,
unsafe_code,
unsafe_op_in_unsafe_fn,
unstable_features,
unstable_name_collisions,
unstable_syntax_pre_expansion,
unsupported_calling_conventions,
unused_allocation,
unused_assignments,
unused_assignments,
unused_attributes,
unused_braces,
unused_comparisons,
unused_crate_dependencies,
unused_doc_comments,
unused_extern_crates,
unused_features,
unused_import_braces,
unused_imports,
unused_labels,
unused_lifetimes,
unused_macro_rules,
unused_macros,
unused_must_use,
unused_mut,
unused_parens,
unused_qualifications,
unused_tuple_struct_fields,
unused_unsafe,
unused_variables,
useless_deprecated,
where_clauses_object_safety,
while_true
)]
#![warn(macro_use_extern_crate, unknown_lints)]
#![allow(
bare_trait_objects,
box_pointers,
elided_lifetimes_in_paths,
missing_debug_implementations,
single_use_lifetimes,
unused_results,
variant_size_differences,
warnings,
renamed_and_removed_lints
)]
//! # simple_redis
//!
//! Simple and resilient [redis](https://redis.io/) client based on [redis-rs](https://crates.io/crates/redis) with
//! internal connection and subscription handling.
//!
//! This library provides a very basic, simple API for the most common redis operations.<br>
//! While not as comprehensive or flexiable as [redis-rs](https://crates.io/crates/redis),
//! it does provide a simpler api for most common use cases and operations as well as automatic and resilient internal
//! connection and subscription (pubsub) handling.<br>
//! In addition, the entire API is accessible via redis client and there is no need to manage connection or pubsub
//! instances in parallel.<br>
//!
//! ## Connection Resiliency
//!
//! Connection resiliency is managed by verifying the internally managed connection before every operation against the
//! redis server.<br>
//! In case of any connection issue, a new connection will be allocated to ensure the operation is invoked on a valid
//! connection only.<br>
//! However, this comes at a small performance cost of PING operation to the redis server.<br>
//!
//! ## Subscription Resiliency
//!
//! Subscription resiliency is ensured by recreating the internal pubsub and issuing new subscription requests
//! automatically in case of any error while fetching a message from the subscribed channels.
//!
//! # Examples
//!
//! ## Initialization and Simple Operations
//!
//! ```
//! fn main() {
//! match simple_redis::create("redis://127.0.0.1:6379/") {
//! Ok(mut client) => {
//! println!("Created Redis Client");
//!
//! match client.set("my_key", "my_value") {
//! Err(error) => println!("Unable to set value in Redis: {}", error),
//! _ => println!("Value set in Redis")
//! };
//!
//! match client.get_string("my_key") {
//! Ok(value) => println!("Read value from Redis: {}", value),
//! Err(error) => println!("Unable to get value from Redis: {}", error)
//! };
//!
//! match client.set("my_numeric_key", 255.5) {
//! Err(error) => println!("Unable to set value in Redis: {}", error),
//! _ => println!("Value set in Redis")
//! };
//!
//! match client.get::<f32>("my_numeric_key") {
//! Ok(value) => println!("Read value from Redis: {}", value),
//! Err(error) => println!("Unable to get value from Redis: {}", error)
//! };
//!
//! match client.hgetall("my_map") {
//! Ok(map) => {
//! match map.get("my_field") {
//! Some(value) => println!("Got field value from map: {}", value),
//! None => println!("Map field is emtpy"),
//! }
//! },
//! Err(error) => println!("Unable to read map from Redis: {}", error),
//! };
//!
//! /// run some command that is not built in the library
//! match client.run_command::<String>("ECHO", vec!["testing"]) {
//! Ok(value) => assert_eq!(value, "testing"),
//! _ => panic!("test error"),
//! };
//!
//! /// publish messages
//! let result = client.publish("news_channel", "test message");
//! assert!(result.is_ok());
//! },
//! Err(error) => println!("Unable to create Redis client: {}", error)
//! }
//! }
//! ```
//!
//! ## Subscription Flow
//!
//! ```rust,no_run
//! use simple_redis::{Interrupts, Message};
//!
//! fn main() {
//! match simple_redis::create("redis://127.0.0.1:6379/") {
//! Ok(mut client) => {
//! println!("Created Redis Client");
//!
//! let mut result = client.subscribe("important_notifications");
//! assert!(result.is_ok());
//! result = client.psubscribe("*_notifications");
//! assert!(result.is_ok());
//!
//! // fetch messages from all subscriptions
//! client.fetch_messages(
//! &mut |message: Message| -> bool {
//! let payload : String = message.get_payload().unwrap();
//! println!("Got message: {}", payload);
//!
//! // continue fetching
//! false
//! },
//! &mut || -> Interrupts { Interrupts::new() },
//! ).unwrap();
//! },
//! Err(error) => println!("Unable to create Redis client: {}", error)
//! }
//! }
//! ```
//!
//! ## Closing Connection
//!
//! ```rust
//! fn main() {
//! match simple_redis::create("redis://127.0.0.1:6379/") {
//! Ok(mut client) => {
//! println!("Created Redis Client");
//!
//! match client.set("my_key", "my_value") {
//! Err(error) => println!("Unable to set value in Redis: {}", error),
//! _ => println!("Value set in Redis")
//! };
//!
//! match client.quit() {
//! Err(error) => println!("Error: {}", error),
//! _ => println!("Connection Closed.")
//! }
//! },
//! Err(error) => println!("Unable to create Redis client: {}", error)
//! }
//! }
//! ```
//!
//! # Installation
//! In order to use this library, just add it as a dependency:
//!
//! ```ini
//! [dependencies]
//! simple_redis = "*"
//! ```
//!
//! # Contributing
//! See [contributing guide](https://github.com/sagiegurari/simple_redis/blob/master/.github/CONTRIBUTING.md)
//!
//! # License
//! Developed by Sagie Gur-Ari and licensed under the
//! [Apache 2](https://github.com/sagiegurari/simple_redis/blob/master/LICENSE) open source license.
//!
#[cfg(test)]
#[path = "./lib_test.rs"]
mod lib_test;
#[cfg(doctest)]
doc_comment::doctest!("../README.md");
pub mod client;
mod commands;
mod connection;
mod subscriber;
pub mod types;
/// Error Type
pub type RedisError = types::RedisError;
/// PubSub message
pub type Message = types::Message;
/// Blocking operations interrupts
pub type Interrupts = types::Interrupts;
/// Redis result which either holds a value or a Redis error
pub type RedisResult<T> = types::RedisResult<T>;
/// Constructs a new redis client.<br>
/// The redis connection string must be in the following format: `redis://[:<passwd>@]<hostname>[:port][/<db>]`
///
/// # Arguments
///
/// * `connection_string` - The connection string in the format of: `redis://[:<passwd>@]<hostname>[:port][/<db>]`
///
/// # Example
///
/// ```
/// extern crate simple_redis;
///
/// fn main() {
/// match simple_redis::create("redis://127.0.0.1:6379/") {
/// Ok(client) => println!("Created Redis Client"),
/// Err(error) => println!("Unable to create Redis client: {}", error)
/// }
/// }
/// ```
pub fn | (connection_string: &str) -> Result<client::Client, RedisError> {
client::create(connection_string)
}
| create | identifier_name |
lib.rs | #![deny(
absolute_paths_not_starting_with_crate,
ambiguous_associated_items,
ambiguous_glob_reexports,
anonymous_parameters,
arithmetic_overflow,
array_into_iter,
asm_sub_register,
bad_asm_style,
bindings_with_variant_name,
break_with_label_and_loop,
byte_slice_in_packed_struct_with_derive,
cenum_impl_drop_cast,
clashing_extern_declarations,
coherence_leak_check,
conflicting_repr_hints,
confusable_idents,
const_evaluatable_unchecked,
const_item_mutation,
dead_code,
deprecated,
deprecated_cfg_attr_crate_type_name,
deprecated_in_future,
deprecated_where_clause_location,
deref_into_dyn_supertrait,
deref_nullptr,
drop_bounds,
dropping_copy_types,
dropping_references,
duplicate_macro_attributes,
dyn_drop,
ellipsis_inclusive_range_patterns,
enum_intrinsics_non_enums,
explicit_outlives_requirements,
exported_private_dependencies,
ffi_unwind_calls,
for_loops_over_fallibles,
forbidden_lint_groups,
forgetting_copy_types,
forgetting_references,
function_item_references,
ill_formed_attribute_input,
illegal_floating_point_literal_pattern,
implied_bounds_entailment,
improper_ctypes,
improper_ctypes_definitions,
incomplete_features,
incomplete_include,
indirect_structural_match,
ineffective_unstable_trait_impl,
inline_no_sanitize,
invalid_alignment,
invalid_atomic_ordering,
invalid_doc_attributes,
invalid_macro_export_arguments,
invalid_type_param_default,
invalid_value,
irrefutable_let_patterns,
keyword_idents,
large_assignments,
late_bound_lifetime_arguments,
legacy_derive_helpers,
let_underscore_drop,
let_underscore_lock,
macro_expanded_macro_exports_accessed_by_absolute_paths,
map_unit_fn,
meta_variable_misuse,
missing_abi,
missing_copy_implementations,
missing_docs,
missing_fragment_specifier,
mixed_script_confusables,
mutable_transmutes,
named_arguments_used_positionally,
named_asm_labels,
no_mangle_const_items,
no_mangle_generic_items,
non_ascii_idents,
non_camel_case_types,
non_fmt_panics,
non_shorthand_field_patterns,
non_snake_case,
non_upper_case_globals,
nontrivial_structural_match,
noop_method_call,
opaque_hidden_inferred_bound,
order_dependent_trait_objects,
overflowing_literals,
overlapping_range_endpoints,
path_statements,
patterns_in_fns_without_body,
pointer_structural_match,
private_in_public,
proc_macro_back_compat,
proc_macro_derive_resolution_fallback,
pub_use_of_private_extern_crate,
redundant_semicolons,
repr_transparent_external_private_fields,
rust_2021_incompatible_closure_captures,
rust_2021_incompatible_or_patterns,
rust_2021_prefixes_incompatible_syntax,
rust_2021_prelude_collisions,
semicolon_in_expressions_from_macros,
soft_unstable,
special_module_name,
stable_features,
suspicious_auto_trait_impls,
suspicious_double_ref_op,
temporary_cstring_as_ptr,
text_direction_codepoint_in_comment,
text_direction_codepoint_in_literal,
trivial_bounds,
trivial_casts,
trivial_numeric_casts,
type_alias_bounds,
tyvar_behind_raw_pointer,
uncommon_codepoints,
unconditional_panic,
unconditional_recursion,
undefined_naked_function_abi,
unexpected_cfgs,
ungated_async_fn_track_caller,
uninhabited_static,
unknown_crate_types,
unnameable_test_items,
unreachable_code,
unreachable_patterns,
unreachable_pub,
unsafe_code,
unsafe_op_in_unsafe_fn,
unstable_features,
unstable_name_collisions,
unstable_syntax_pre_expansion,
unsupported_calling_conventions,
unused_allocation,
unused_assignments,
unused_assignments,
unused_attributes,
unused_braces,
unused_comparisons,
unused_crate_dependencies,
unused_doc_comments,
unused_extern_crates,
unused_features,
unused_import_braces,
unused_imports,
unused_labels,
unused_lifetimes,
unused_macro_rules,
unused_macros,
unused_must_use,
unused_mut,
unused_parens,
unused_qualifications,
unused_tuple_struct_fields,
unused_unsafe,
unused_variables,
useless_deprecated,
where_clauses_object_safety,
while_true
)]
#![warn(macro_use_extern_crate, unknown_lints)]
#![allow(
bare_trait_objects,
box_pointers,
elided_lifetimes_in_paths,
missing_debug_implementations,
single_use_lifetimes,
unused_results,
variant_size_differences,
warnings,
renamed_and_removed_lints
)]
//! # simple_redis
//!
//! Simple and resilient [redis](https://redis.io/) client based on [redis-rs](https://crates.io/crates/redis) with
//! internal connection and subscription handling.
//!
//! This library provides a very basic, simple API for the most common redis operations.<br>
//! While not as comprehensive or flexiable as [redis-rs](https://crates.io/crates/redis),
//! it does provide a simpler api for most common use cases and operations as well as automatic and resilient internal
//! connection and subscription (pubsub) handling.<br>
//! In addition, the entire API is accessible via redis client and there is no need to manage connection or pubsub
//! instances in parallel.<br>
//!
//! ## Connection Resiliency
//!
//! Connection resiliency is managed by verifying the internally managed connection before every operation against the
//! redis server.<br>
//! In case of any connection issue, a new connection will be allocated to ensure the operation is invoked on a valid
//! connection only.<br>
//! However, this comes at a small performance cost of PING operation to the redis server.<br>
//!
//! ## Subscription Resiliency
//!
//! Subscription resiliency is ensured by recreating the internal pubsub and issuing new subscription requests
//! automatically in case of any error while fetching a message from the subscribed channels.
//!
//! # Examples
//!
//! ## Initialization and Simple Operations
//!
//! ```
//! fn main() {
//! match simple_redis::create("redis://127.0.0.1:6379/") {
//! Ok(mut client) => {
//! println!("Created Redis Client");
//!
//! match client.set("my_key", "my_value") {
//! Err(error) => println!("Unable to set value in Redis: {}", error),
//! _ => println!("Value set in Redis")
//! };
//!
//! match client.get_string("my_key") {
//! Ok(value) => println!("Read value from Redis: {}", value),
//! Err(error) => println!("Unable to get value from Redis: {}", error)
//! };
//!
//! match client.set("my_numeric_key", 255.5) {
//! Err(error) => println!("Unable to set value in Redis: {}", error),
//! _ => println!("Value set in Redis")
//! };
//!
//! match client.get::<f32>("my_numeric_key") {
//! Ok(value) => println!("Read value from Redis: {}", value),
//! Err(error) => println!("Unable to get value from Redis: {}", error)
//! };
//!
//! match client.hgetall("my_map") {
//! Ok(map) => {
//! match map.get("my_field") {
//! Some(value) => println!("Got field value from map: {}", value),
//! None => println!("Map field is emtpy"),
//! }
//! },
//! Err(error) => println!("Unable to read map from Redis: {}", error),
//! };
//!
//! /// run some command that is not built in the library
//! match client.run_command::<String>("ECHO", vec!["testing"]) {
//! Ok(value) => assert_eq!(value, "testing"),
//! _ => panic!("test error"),
//! };
//!
//! /// publish messages
//! let result = client.publish("news_channel", "test message");
//! assert!(result.is_ok());
//! },
//! Err(error) => println!("Unable to create Redis client: {}", error)
//! }
//! }
//! ```
//!
//! ## Subscription Flow
//!
//! ```rust,no_run
//! use simple_redis::{Interrupts, Message};
//!
//! fn main() {
//! match simple_redis::create("redis://127.0.0.1:6379/") {
//! Ok(mut client) => {
//! println!("Created Redis Client");
//!
//! let mut result = client.subscribe("important_notifications");
//! assert!(result.is_ok());
//! result = client.psubscribe("*_notifications");
//! assert!(result.is_ok());
//!
//! // fetch messages from all subscriptions
//! client.fetch_messages(
//! &mut |message: Message| -> bool {
//! let payload : String = message.get_payload().unwrap();
//! println!("Got message: {}", payload);
//!
//! // continue fetching
//! false
//! },
//! &mut || -> Interrupts { Interrupts::new() },
//! ).unwrap();
//! },
//! Err(error) => println!("Unable to create Redis client: {}", error)
//! }
//! }
//! ```
//!
//! ## Closing Connection
//!
//! ```rust
//! fn main() {
//! match simple_redis::create("redis://127.0.0.1:6379/") {
//! Ok(mut client) => {
//! println!("Created Redis Client");
//!
//! match client.set("my_key", "my_value") {
//! Err(error) => println!("Unable to set value in Redis: {}", error),
//! _ => println!("Value set in Redis")
//! };
//!
//! match client.quit() {
//! Err(error) => println!("Error: {}", error),
//! _ => println!("Connection Closed.")
//! }
//! },
//! Err(error) => println!("Unable to create Redis client: {}", error)
//! }
//! }
//! ```
//!
//! # Installation
//! In order to use this library, just add it as a dependency:
//!
//! ```ini
//! [dependencies]
//! simple_redis = "*"
//! ```
//!
//! # Contributing
//! See [contributing guide](https://github.com/sagiegurari/simple_redis/blob/master/.github/CONTRIBUTING.md)
//!
//! # License
//! Developed by Sagie Gur-Ari and licensed under the
//! [Apache 2](https://github.com/sagiegurari/simple_redis/blob/master/LICENSE) open source license.
//!
#[cfg(test)]
#[path = "./lib_test.rs"]
mod lib_test;
#[cfg(doctest)]
doc_comment::doctest!("../README.md");
pub mod client;
mod commands;
mod connection;
mod subscriber;
pub mod types;
/// Error Type
pub type RedisError = types::RedisError;
/// PubSub message
pub type Message = types::Message;
/// Blocking operations interrupts
pub type Interrupts = types::Interrupts;
/// Redis result which either holds a value or a Redis error
pub type RedisResult<T> = types::RedisResult<T>;
/// Constructs a new redis client.<br>
/// The redis connection string must be in the following format: `redis://[:<passwd>@]<hostname>[:port][/<db>]`
///
/// # Arguments
///
/// * `connection_string` - The connection string in the format of: `redis://[:<passwd>@]<hostname>[:port][/<db>]`
///
/// # Example | /// extern crate simple_redis;
///
/// fn main() {
/// match simple_redis::create("redis://127.0.0.1:6379/") {
/// Ok(client) => println!("Created Redis Client"),
/// Err(error) => println!("Unable to create Redis client: {}", error)
/// }
/// }
/// ```
pub fn create(connection_string: &str) -> Result<client::Client, RedisError> {
client::create(connection_string)
} | ///
/// ``` | random_line_split |
pc_v0.py | #!/usr/bin/env python3
import csv
import fdb
import psycopg2
import time
import unicodedata
from pprint import pprint
from db_password import (
DBPASS_F1,
DBPASS_PERSONA,
)
_DATABASES = {
'f1': { # F1 e SCC
'ENGINE': 'firebird',
'NAME': '/dados/db/f1/f1.cdb',
'USER': 'sysdba',
'PASSWORD': DBPASS_F1,
'HOST': 'localhost',
'PORT': 23050,
# 'HOST': '192.168.1.98',
# 'PORT': 3050,
'OPTIONS': {'charset': 'WIN1252'},
'TIME_ZONE': None,
'CONN_MAX_AGE': None,
'AUTOCOMMIT': None,
'DIALECT': 3,
},
'persona': { # Nasajon
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': "nasajon_db",
'USER': "postgres",
'PASSWORD': DBPASS_PERSONA,
'HOST': 'localhost',
'PORT': '25433',
},
}
def dictlist_zip_columns(cursor, columns):
return [
dict(zip(columns, row))
for row in cursor
]
def custom_dictlist(cursor, name_case=None):
if name_case is None:
columns = [i[0] for i in cursor.description]
else:
columns = [name_case(i[0]) for i in cursor.description]
return dictlist_zip_columns(cursor, columns)
def dictlist(cursor):
return custom_dictlist(cursor)
def dictlist_lower(cursor):
return custom_dictlist(cursor, name_case=str.lower)
def tira_acento(texto):
process = unicodedata.normalize("NFD", texto)
process = process.encode("ascii", "ignore")
return process.decode("utf-8")
def tira_acento_upper(texto):
return tira_acento(texto).upper()
class Main():
def __init__(self, *args, **kwargs):
super(Main, self).__init__(*args, **kwargs)
self.test_context = {
'msgs_ok': [],
'msgs_erro': [],
}
def connect_fdb(self, id, return_error=False):
try:
db = _DATABASES[id]
self.con = fdb.connect(
host=db['HOST'],
port=db['PORT'],
database=db['NAME'],
user=db['USER'],
password=db['PASSWORD'],
sql_dialect=db['DIALECT'],
charset=db['OPTIONS']['charset'],
)
# help(self.con)
except Exception as e:
if return_error:
return e
else:
|
def set_cursor(self):
self.cursor = self.con.cursor()
# help(self.cursor)
# raise SystemExit
def close(self):
self.con.close()
def execute(self, sql):
self.cursor.execute(sql)
def executemany(self, sql, list_tuples):
# "insert into languages (name, year_released) values (?, ?)"
self.cursor.executemany(sql, list_tuples)
def fetchall(self):
return self.cursor.fetchall()
def fetchone(self):
return self.cursor.fetchone()
def itermap(self):
return self.cursor.itermap()
def commit(self):
return self.con.commit()
def rollback(self):
return self.con.rollback()
def conecta_fdb_db(self, db_id):
error = self.connect_fdb(db_id, return_error=True)
if isinstance(error, Exception):
return False, error
else:
try:
self.set_cursor()
self.close()
return True, None
except Exception as e:
return False, e
def acessa_fdb_db(self, db_id):
count = 0
while count < 20:
result, e = self.conecta_fdb_db(db_id)
if result:
self.test_context['msgs_ok'].append(f'Banco "{db_id}" acessível')
break
else:
error = e
count += 1
time.sleep(0.5)
if count != 0:
self.test_context['msgs_erro'].append(
f'({count}) Erro ao acessar banco "{db_id}" [{error}]')
def test_connection(self):
self.acessa_fdb_db('f1')
pprint(self.test_context)
def test_output(self):
self.connect_fdb('f1')
self.set_cursor()
sql = """
select
pc.*
from SCC_PLANOCONTASNOVO pc
"""
self.execute(sql)
print(
''.join([
field[fdb.DESCRIPTION_NAME].ljust(field[fdb.DESCRIPTION_DISPLAY_SIZE])
for field in self.cursor.description
])
)
data = self.fetchall()
pprint(data[:2])
self.execute(sql)
data = self.itermap()
for row in data:
pprint(dict(row))
break
self.execute(sql)
data = self.cursor.fetchallmap()
pprint(data[:2])
# self.executemany(
# "insert into languages (name, year_released) values (?, ?)",
# [
# ('Lisp', 1958),
# ('Dylan', 1995),
# ],
# )
self.close()
def pc_csv(self):
self.connect_fdb('f1')
self.set_cursor()
sql = """
select
pc.*
from SCC_PLANOCONTASNOVO pc
"""
self.execute(sql)
data = self.cursor.itermap()
# pprint(data)
# pprint(csv.list_dialects())
with open('pc.csv', 'w', newline='') as csvfile:
cwriter = csv.writer(
csvfile,
dialect='unix',
delimiter=';',
quotechar='"',
quoting=csv.QUOTE_MINIMAL
)
for row in data:
# pprint(row)
cwriter.writerow([
row['CONTA'],
row['DESCRICAO'],
])
break
self.close()
def exec(self, sql):
self.connect_fdb('f1')
self.set_cursor()
self.execute(sql)
data = self.cursor.fetchallmap()
self.close()
return data
def fb_print_nivel1(self):
data = self.fb_get_pc_nivel1()
for row in data:
row['conta'] = row['conta'].rstrip('.0')
row['descricao'] = tira_acento_upper(row['descricao'])
print("{conta};{descricao}".format(**row))
return data
def connect_pg(self, id):
db = _DATABASES[id]
self.pgcon = psycopg2.connect(
host=db['HOST'],
port=db['PORT'],
database=db['NAME'],
user=db['USER'],
password=db['PASSWORD'],
)
def set_cursor_pg(self):
self.pgcursor = self.pgcon.cursor()
def fetch_pg(self, sql):
self.connect_pg('persona')
self.set_cursor_pg()
self.pgcursor.execute(sql)
# data = self.pgcursor.fetchall()
data = dictlist_lower(self.pgcursor)
self.pgcon.close()
return data
def testa_pg(self):
data = self.fetch_pg("""
select
p.codigo
, p.descricao
from contabil.planosauxiliares p
where p.codigo = 'SCC ANSELMO'
""")
pprint(data)
def exec_pg(self, sql, dados):
# pprint(sql)
# pprint(dados)
self.connect_pg('persona')
self.set_cursor_pg()
self.pgcursor.execute(sql, dados)
self.pgcon.commit()
self.pgcursor.close()
self.pgcon.close()
def testa_insert_pg(self):
self.exec_pg(
"""
insert into contabil.contasauxiliares (planoauxiliar, codigo, tenant)
select
p.planoauxiliar
, %s
, p.tenant
from contabil.planosauxiliares p
where p.codigo = 'SCC ANSELMO'
""",
(
"9",
),
)
def pg_get_ca(self, codigo=None):
filtra_codigo = (
f"AND ca.codigo = '{codigo}'"
if codigo else ''
)
return self.fetch_pg(f"""
select
ca.*
from contabil.contasauxiliares ca
where ca.contamae is null
{filtra_codigo} -- filtra_codigo
""")
def pg_print_ca(self):
data = self.pg_get_ca()
# pprint(data)
for row in data:
print(row['codigo'])
def pg_insert_ca_nivel1(self, codigo):
if not self.pg_get_ca(codigo):
sql = """
insert into contabil.contasauxiliares
(planoauxiliar, codigo, tenant)
select
p.planoauxiliar
, %s
, p.tenant
from contabil.planosauxiliares p
where p.codigo = 'SCC ANSELMO'
"""
self.exec_pg(sql, (codigo, ))
def exec_dictlist_lower(self, sql):
self.connect_fdb('f1')
self.set_cursor()
self.execute(sql)
data = dictlist_lower(self.cursor)
self.close()
return data
def fb_get_pc_nivel1(self, maior_que=' '):
data = self.exec_dictlist_lower(
f"""
select
pc.*
from SCC_PLANOCONTASNOVO pc
where pc.conta not like '0%'
and pc.conta like '%.0.00'
and pc.conta > '{maior_que}'
order by
pc.conta
"""
)
for row in data:
row['conta'] = row['conta'].rstrip('.0')
row['descricao'] = tira_acento_upper(row['descricao'])
return data
if __name__ == '__main__':
main = Main()
# main.test_connection()
# main.test_output()
# main.pc_csv()
dados = main.fb_print_nivel1()
# main.testa_pg()
# main.testa_insert_pg()
### inserindo nível 1
main.pg_print_ca()
dados = main.fb_get_pc_nivel1(maior_que='1.0.00')
for row in dados:
main.pg_insert_ca_nivel1(codigo=row['conta'])
main.pg_print_ca()
| raise e | conditional_block |
pc_v0.py | #!/usr/bin/env python3
import csv
import fdb
import psycopg2
import time
import unicodedata
from pprint import pprint
from db_password import (
DBPASS_F1,
DBPASS_PERSONA,
)
_DATABASES = {
'f1': { # F1 e SCC
'ENGINE': 'firebird',
'NAME': '/dados/db/f1/f1.cdb',
'USER': 'sysdba',
'PASSWORD': DBPASS_F1,
'HOST': 'localhost',
'PORT': 23050,
# 'HOST': '192.168.1.98',
# 'PORT': 3050,
'OPTIONS': {'charset': 'WIN1252'},
'TIME_ZONE': None,
'CONN_MAX_AGE': None,
'AUTOCOMMIT': None,
'DIALECT': 3,
},
'persona': { # Nasajon
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': "nasajon_db",
'USER': "postgres",
'PASSWORD': DBPASS_PERSONA,
'HOST': 'localhost',
'PORT': '25433',
},
}
def dictlist_zip_columns(cursor, columns):
return [
dict(zip(columns, row))
for row in cursor
]
def custom_dictlist(cursor, name_case=None):
if name_case is None:
columns = [i[0] for i in cursor.description]
else:
columns = [name_case(i[0]) for i in cursor.description] | def dictlist(cursor):
return custom_dictlist(cursor)
def dictlist_lower(cursor):
return custom_dictlist(cursor, name_case=str.lower)
def tira_acento(texto):
process = unicodedata.normalize("NFD", texto)
process = process.encode("ascii", "ignore")
return process.decode("utf-8")
def tira_acento_upper(texto):
return tira_acento(texto).upper()
class Main():
def __init__(self, *args, **kwargs):
super(Main, self).__init__(*args, **kwargs)
self.test_context = {
'msgs_ok': [],
'msgs_erro': [],
}
def connect_fdb(self, id, return_error=False):
try:
db = _DATABASES[id]
self.con = fdb.connect(
host=db['HOST'],
port=db['PORT'],
database=db['NAME'],
user=db['USER'],
password=db['PASSWORD'],
sql_dialect=db['DIALECT'],
charset=db['OPTIONS']['charset'],
)
# help(self.con)
except Exception as e:
if return_error:
return e
else:
raise e
def set_cursor(self):
self.cursor = self.con.cursor()
# help(self.cursor)
# raise SystemExit
def close(self):
self.con.close()
def execute(self, sql):
self.cursor.execute(sql)
def executemany(self, sql, list_tuples):
# "insert into languages (name, year_released) values (?, ?)"
self.cursor.executemany(sql, list_tuples)
def fetchall(self):
return self.cursor.fetchall()
def fetchone(self):
return self.cursor.fetchone()
def itermap(self):
return self.cursor.itermap()
def commit(self):
return self.con.commit()
def rollback(self):
return self.con.rollback()
def conecta_fdb_db(self, db_id):
error = self.connect_fdb(db_id, return_error=True)
if isinstance(error, Exception):
return False, error
else:
try:
self.set_cursor()
self.close()
return True, None
except Exception as e:
return False, e
def acessa_fdb_db(self, db_id):
count = 0
while count < 20:
result, e = self.conecta_fdb_db(db_id)
if result:
self.test_context['msgs_ok'].append(f'Banco "{db_id}" acessível')
break
else:
error = e
count += 1
time.sleep(0.5)
if count != 0:
self.test_context['msgs_erro'].append(
f'({count}) Erro ao acessar banco "{db_id}" [{error}]')
def test_connection(self):
self.acessa_fdb_db('f1')
pprint(self.test_context)
def test_output(self):
self.connect_fdb('f1')
self.set_cursor()
sql = """
select
pc.*
from SCC_PLANOCONTASNOVO pc
"""
self.execute(sql)
print(
''.join([
field[fdb.DESCRIPTION_NAME].ljust(field[fdb.DESCRIPTION_DISPLAY_SIZE])
for field in self.cursor.description
])
)
data = self.fetchall()
pprint(data[:2])
self.execute(sql)
data = self.itermap()
for row in data:
pprint(dict(row))
break
self.execute(sql)
data = self.cursor.fetchallmap()
pprint(data[:2])
# self.executemany(
# "insert into languages (name, year_released) values (?, ?)",
# [
# ('Lisp', 1958),
# ('Dylan', 1995),
# ],
# )
self.close()
def pc_csv(self):
self.connect_fdb('f1')
self.set_cursor()
sql = """
select
pc.*
from SCC_PLANOCONTASNOVO pc
"""
self.execute(sql)
data = self.cursor.itermap()
# pprint(data)
# pprint(csv.list_dialects())
with open('pc.csv', 'w', newline='') as csvfile:
cwriter = csv.writer(
csvfile,
dialect='unix',
delimiter=';',
quotechar='"',
quoting=csv.QUOTE_MINIMAL
)
for row in data:
# pprint(row)
cwriter.writerow([
row['CONTA'],
row['DESCRICAO'],
])
break
self.close()
def exec(self, sql):
self.connect_fdb('f1')
self.set_cursor()
self.execute(sql)
data = self.cursor.fetchallmap()
self.close()
return data
def fb_print_nivel1(self):
data = self.fb_get_pc_nivel1()
for row in data:
row['conta'] = row['conta'].rstrip('.0')
row['descricao'] = tira_acento_upper(row['descricao'])
print("{conta};{descricao}".format(**row))
return data
def connect_pg(self, id):
db = _DATABASES[id]
self.pgcon = psycopg2.connect(
host=db['HOST'],
port=db['PORT'],
database=db['NAME'],
user=db['USER'],
password=db['PASSWORD'],
)
def set_cursor_pg(self):
self.pgcursor = self.pgcon.cursor()
def fetch_pg(self, sql):
self.connect_pg('persona')
self.set_cursor_pg()
self.pgcursor.execute(sql)
# data = self.pgcursor.fetchall()
data = dictlist_lower(self.pgcursor)
self.pgcon.close()
return data
def testa_pg(self):
data = self.fetch_pg("""
select
p.codigo
, p.descricao
from contabil.planosauxiliares p
where p.codigo = 'SCC ANSELMO'
""")
pprint(data)
def exec_pg(self, sql, dados):
# pprint(sql)
# pprint(dados)
self.connect_pg('persona')
self.set_cursor_pg()
self.pgcursor.execute(sql, dados)
self.pgcon.commit()
self.pgcursor.close()
self.pgcon.close()
def testa_insert_pg(self):
self.exec_pg(
"""
insert into contabil.contasauxiliares (planoauxiliar, codigo, tenant)
select
p.planoauxiliar
, %s
, p.tenant
from contabil.planosauxiliares p
where p.codigo = 'SCC ANSELMO'
""",
(
"9",
),
)
def pg_get_ca(self, codigo=None):
filtra_codigo = (
f"AND ca.codigo = '{codigo}'"
if codigo else ''
)
return self.fetch_pg(f"""
select
ca.*
from contabil.contasauxiliares ca
where ca.contamae is null
{filtra_codigo} -- filtra_codigo
""")
def pg_print_ca(self):
data = self.pg_get_ca()
# pprint(data)
for row in data:
print(row['codigo'])
def pg_insert_ca_nivel1(self, codigo):
if not self.pg_get_ca(codigo):
sql = """
insert into contabil.contasauxiliares
(planoauxiliar, codigo, tenant)
select
p.planoauxiliar
, %s
, p.tenant
from contabil.planosauxiliares p
where p.codigo = 'SCC ANSELMO'
"""
self.exec_pg(sql, (codigo, ))
def exec_dictlist_lower(self, sql):
self.connect_fdb('f1')
self.set_cursor()
self.execute(sql)
data = dictlist_lower(self.cursor)
self.close()
return data
def fb_get_pc_nivel1(self, maior_que=' '):
data = self.exec_dictlist_lower(
f"""
select
pc.*
from SCC_PLANOCONTASNOVO pc
where pc.conta not like '0%'
and pc.conta like '%.0.00'
and pc.conta > '{maior_que}'
order by
pc.conta
"""
)
for row in data:
row['conta'] = row['conta'].rstrip('.0')
row['descricao'] = tira_acento_upper(row['descricao'])
return data
if __name__ == '__main__':
main = Main()
# main.test_connection()
# main.test_output()
# main.pc_csv()
dados = main.fb_print_nivel1()
# main.testa_pg()
# main.testa_insert_pg()
### inserindo nível 1
main.pg_print_ca()
dados = main.fb_get_pc_nivel1(maior_que='1.0.00')
for row in dados:
main.pg_insert_ca_nivel1(codigo=row['conta'])
main.pg_print_ca() | return dictlist_zip_columns(cursor, columns)
| random_line_split |
pc_v0.py | #!/usr/bin/env python3
import csv
import fdb
import psycopg2
import time
import unicodedata
from pprint import pprint
from db_password import (
DBPASS_F1,
DBPASS_PERSONA,
)
_DATABASES = {
'f1': { # F1 e SCC
'ENGINE': 'firebird',
'NAME': '/dados/db/f1/f1.cdb',
'USER': 'sysdba',
'PASSWORD': DBPASS_F1,
'HOST': 'localhost',
'PORT': 23050,
# 'HOST': '192.168.1.98',
# 'PORT': 3050,
'OPTIONS': {'charset': 'WIN1252'},
'TIME_ZONE': None,
'CONN_MAX_AGE': None,
'AUTOCOMMIT': None,
'DIALECT': 3,
},
'persona': { # Nasajon
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': "nasajon_db",
'USER': "postgres",
'PASSWORD': DBPASS_PERSONA,
'HOST': 'localhost',
'PORT': '25433',
},
}
def dictlist_zip_columns(cursor, columns):
return [
dict(zip(columns, row))
for row in cursor
]
def custom_dictlist(cursor, name_case=None):
if name_case is None:
columns = [i[0] for i in cursor.description]
else:
columns = [name_case(i[0]) for i in cursor.description]
return dictlist_zip_columns(cursor, columns)
def dictlist(cursor):
return custom_dictlist(cursor)
def dictlist_lower(cursor):
return custom_dictlist(cursor, name_case=str.lower)
def tira_acento(texto):
process = unicodedata.normalize("NFD", texto)
process = process.encode("ascii", "ignore")
return process.decode("utf-8")
def tira_acento_upper(texto):
return tira_acento(texto).upper()
class Main():
def __init__(self, *args, **kwargs):
super(Main, self).__init__(*args, **kwargs)
self.test_context = {
'msgs_ok': [],
'msgs_erro': [],
}
def connect_fdb(self, id, return_error=False):
try:
db = _DATABASES[id]
self.con = fdb.connect(
host=db['HOST'],
port=db['PORT'],
database=db['NAME'],
user=db['USER'],
password=db['PASSWORD'],
sql_dialect=db['DIALECT'],
charset=db['OPTIONS']['charset'],
)
# help(self.con)
except Exception as e:
if return_error:
return e
else:
raise e
def set_cursor(self):
self.cursor = self.con.cursor()
# help(self.cursor)
# raise SystemExit
def close(self):
self.con.close()
def execute(self, sql):
self.cursor.execute(sql)
def executemany(self, sql, list_tuples):
# "insert into languages (name, year_released) values (?, ?)"
self.cursor.executemany(sql, list_tuples)
def fetchall(self):
return self.cursor.fetchall()
def fetchone(self):
return self.cursor.fetchone()
def itermap(self):
return self.cursor.itermap()
def commit(self):
return self.con.commit()
def rollback(self):
return self.con.rollback()
def conecta_fdb_db(self, db_id):
error = self.connect_fdb(db_id, return_error=True)
if isinstance(error, Exception):
return False, error
else:
try:
self.set_cursor()
self.close()
return True, None
except Exception as e:
return False, e
def acessa_fdb_db(self, db_id):
count = 0
while count < 20:
result, e = self.conecta_fdb_db(db_id)
if result:
self.test_context['msgs_ok'].append(f'Banco "{db_id}" acessível')
break
else:
error = e
count += 1
time.sleep(0.5)
if count != 0:
self.test_context['msgs_erro'].append(
f'({count}) Erro ao acessar banco "{db_id}" [{error}]')
def test_connection(self):
self.acessa_fdb_db('f1')
pprint(self.test_context)
def test_output(self):
self.connect_fdb('f1')
self.set_cursor()
sql = """
select
pc.*
from SCC_PLANOCONTASNOVO pc
"""
self.execute(sql)
print(
''.join([
field[fdb.DESCRIPTION_NAME].ljust(field[fdb.DESCRIPTION_DISPLAY_SIZE])
for field in self.cursor.description
])
)
data = self.fetchall()
pprint(data[:2])
self.execute(sql)
data = self.itermap()
for row in data:
pprint(dict(row))
break
self.execute(sql)
data = self.cursor.fetchallmap()
pprint(data[:2])
# self.executemany(
# "insert into languages (name, year_released) values (?, ?)",
# [
# ('Lisp', 1958),
# ('Dylan', 1995),
# ],
# )
self.close()
def pc_csv(self):
self.connect_fdb('f1')
self.set_cursor()
sql = """
select
pc.*
from SCC_PLANOCONTASNOVO pc
"""
self.execute(sql)
data = self.cursor.itermap()
# pprint(data)
# pprint(csv.list_dialects())
with open('pc.csv', 'w', newline='') as csvfile:
cwriter = csv.writer(
csvfile,
dialect='unix',
delimiter=';',
quotechar='"',
quoting=csv.QUOTE_MINIMAL
)
for row in data:
# pprint(row)
cwriter.writerow([
row['CONTA'],
row['DESCRICAO'],
])
break
self.close()
def exec(self, sql):
self.connect_fdb('f1')
self.set_cursor()
self.execute(sql)
data = self.cursor.fetchallmap()
self.close()
return data
def fb_print_nivel1(self):
data = self.fb_get_pc_nivel1()
for row in data:
row['conta'] = row['conta'].rstrip('.0')
row['descricao'] = tira_acento_upper(row['descricao'])
print("{conta};{descricao}".format(**row))
return data
def connect_pg(self, id):
db = _DATABASES[id]
self.pgcon = psycopg2.connect(
host=db['HOST'],
port=db['PORT'],
database=db['NAME'],
user=db['USER'],
password=db['PASSWORD'],
)
def set_cursor_pg(self):
self.pgcursor = self.pgcon.cursor()
def fetch_pg(self, sql):
self.connect_pg('persona')
self.set_cursor_pg()
self.pgcursor.execute(sql)
# data = self.pgcursor.fetchall()
data = dictlist_lower(self.pgcursor)
self.pgcon.close()
return data
def testa_pg(self):
d |
def exec_pg(self, sql, dados):
# pprint(sql)
# pprint(dados)
self.connect_pg('persona')
self.set_cursor_pg()
self.pgcursor.execute(sql, dados)
self.pgcon.commit()
self.pgcursor.close()
self.pgcon.close()
def testa_insert_pg(self):
self.exec_pg(
"""
insert into contabil.contasauxiliares (planoauxiliar, codigo, tenant)
select
p.planoauxiliar
, %s
, p.tenant
from contabil.planosauxiliares p
where p.codigo = 'SCC ANSELMO'
""",
(
"9",
),
)
def pg_get_ca(self, codigo=None):
filtra_codigo = (
f"AND ca.codigo = '{codigo}'"
if codigo else ''
)
return self.fetch_pg(f"""
select
ca.*
from contabil.contasauxiliares ca
where ca.contamae is null
{filtra_codigo} -- filtra_codigo
""")
def pg_print_ca(self):
data = self.pg_get_ca()
# pprint(data)
for row in data:
print(row['codigo'])
def pg_insert_ca_nivel1(self, codigo):
if not self.pg_get_ca(codigo):
sql = """
insert into contabil.contasauxiliares
(planoauxiliar, codigo, tenant)
select
p.planoauxiliar
, %s
, p.tenant
from contabil.planosauxiliares p
where p.codigo = 'SCC ANSELMO'
"""
self.exec_pg(sql, (codigo, ))
def exec_dictlist_lower(self, sql):
self.connect_fdb('f1')
self.set_cursor()
self.execute(sql)
data = dictlist_lower(self.cursor)
self.close()
return data
def fb_get_pc_nivel1(self, maior_que=' '):
data = self.exec_dictlist_lower(
f"""
select
pc.*
from SCC_PLANOCONTASNOVO pc
where pc.conta not like '0%'
and pc.conta like '%.0.00'
and pc.conta > '{maior_que}'
order by
pc.conta
"""
)
for row in data:
row['conta'] = row['conta'].rstrip('.0')
row['descricao'] = tira_acento_upper(row['descricao'])
return data
if __name__ == '__main__':
main = Main()
# main.test_connection()
# main.test_output()
# main.pc_csv()
dados = main.fb_print_nivel1()
# main.testa_pg()
# main.testa_insert_pg()
### inserindo nível 1
main.pg_print_ca()
dados = main.fb_get_pc_nivel1(maior_que='1.0.00')
for row in dados:
main.pg_insert_ca_nivel1(codigo=row['conta'])
main.pg_print_ca()
| ata = self.fetch_pg("""
select
p.codigo
, p.descricao
from contabil.planosauxiliares p
where p.codigo = 'SCC ANSELMO'
""")
pprint(data)
| identifier_body |
pc_v0.py | #!/usr/bin/env python3
import csv
import fdb
import psycopg2
import time
import unicodedata
from pprint import pprint
from db_password import (
DBPASS_F1,
DBPASS_PERSONA,
)
_DATABASES = {
'f1': { # F1 e SCC
'ENGINE': 'firebird',
'NAME': '/dados/db/f1/f1.cdb',
'USER': 'sysdba',
'PASSWORD': DBPASS_F1,
'HOST': 'localhost',
'PORT': 23050,
# 'HOST': '192.168.1.98',
# 'PORT': 3050,
'OPTIONS': {'charset': 'WIN1252'},
'TIME_ZONE': None,
'CONN_MAX_AGE': None,
'AUTOCOMMIT': None,
'DIALECT': 3,
},
'persona': { # Nasajon
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': "nasajon_db",
'USER': "postgres",
'PASSWORD': DBPASS_PERSONA,
'HOST': 'localhost',
'PORT': '25433',
},
}
def dictlist_zip_columns(cursor, columns):
return [
dict(zip(columns, row))
for row in cursor
]
def custom_dictlist(cursor, name_case=None):
if name_case is None:
columns = [i[0] for i in cursor.description]
else:
columns = [name_case(i[0]) for i in cursor.description]
return dictlist_zip_columns(cursor, columns)
def dictlist(cursor):
return custom_dictlist(cursor)
def dictlist_lower(cursor):
return custom_dictlist(cursor, name_case=str.lower)
def tira_acento(texto):
process = unicodedata.normalize("NFD", texto)
process = process.encode("ascii", "ignore")
return process.decode("utf-8")
def tira_acento_upper(texto):
return tira_acento(texto).upper()
class Main():
def __init__(self, *args, **kwargs):
super(Main, self).__init__(*args, **kwargs)
self.test_context = {
'msgs_ok': [],
'msgs_erro': [],
}
def connect_fdb(self, id, return_error=False):
try:
db = _DATABASES[id]
self.con = fdb.connect(
host=db['HOST'],
port=db['PORT'],
database=db['NAME'],
user=db['USER'],
password=db['PASSWORD'],
sql_dialect=db['DIALECT'],
charset=db['OPTIONS']['charset'],
)
# help(self.con)
except Exception as e:
if return_error:
return e
else:
raise e
def set_cursor(self):
self.cursor = self.con.cursor()
# help(self.cursor)
# raise SystemExit
def close(self):
self.con.close()
def execute(self, sql):
self.cursor.execute(sql)
def executemany(self, sql, list_tuples):
# "insert into languages (name, year_released) values (?, ?)"
self.cursor.executemany(sql, list_tuples)
def fetchall(self):
return self.cursor.fetchall()
def fetchone(self):
return self.cursor.fetchone()
def itermap(self):
return self.cursor.itermap()
def commit(self):
return self.con.commit()
def rollback(self):
return self.con.rollback()
def conecta_fdb_db(self, db_id):
error = self.connect_fdb(db_id, return_error=True)
if isinstance(error, Exception):
return False, error
else:
try:
self.set_cursor()
self.close()
return True, None
except Exception as e:
return False, e
def acessa_fdb_db(self, db_id):
count = 0
while count < 20:
result, e = self.conecta_fdb_db(db_id)
if result:
self.test_context['msgs_ok'].append(f'Banco "{db_id}" acessível')
break
else:
error = e
count += 1
time.sleep(0.5)
if count != 0:
self.test_context['msgs_erro'].append(
f'({count}) Erro ao acessar banco "{db_id}" [{error}]')
def test_connection(self):
self.acessa_fdb_db('f1')
pprint(self.test_context)
def test_output(self):
self.connect_fdb('f1')
self.set_cursor()
sql = """
select
pc.*
from SCC_PLANOCONTASNOVO pc
"""
self.execute(sql)
print(
''.join([
field[fdb.DESCRIPTION_NAME].ljust(field[fdb.DESCRIPTION_DISPLAY_SIZE])
for field in self.cursor.description
])
)
data = self.fetchall()
pprint(data[:2])
self.execute(sql)
data = self.itermap()
for row in data:
pprint(dict(row))
break
self.execute(sql)
data = self.cursor.fetchallmap()
pprint(data[:2])
# self.executemany(
# "insert into languages (name, year_released) values (?, ?)",
# [
# ('Lisp', 1958),
# ('Dylan', 1995),
# ],
# )
self.close()
def pc_csv(self):
self.connect_fdb('f1')
self.set_cursor()
sql = """
select
pc.*
from SCC_PLANOCONTASNOVO pc
"""
self.execute(sql)
data = self.cursor.itermap()
# pprint(data)
# pprint(csv.list_dialects())
with open('pc.csv', 'w', newline='') as csvfile:
cwriter = csv.writer(
csvfile,
dialect='unix',
delimiter=';',
quotechar='"',
quoting=csv.QUOTE_MINIMAL
)
for row in data:
# pprint(row)
cwriter.writerow([
row['CONTA'],
row['DESCRICAO'],
])
break
self.close()
def exec(self, sql):
self.connect_fdb('f1')
self.set_cursor()
self.execute(sql)
data = self.cursor.fetchallmap()
self.close()
return data
def fb_print_nivel1(self):
data = self.fb_get_pc_nivel1()
for row in data:
row['conta'] = row['conta'].rstrip('.0')
row['descricao'] = tira_acento_upper(row['descricao'])
print("{conta};{descricao}".format(**row))
return data
def connect_pg(self, id):
db = _DATABASES[id]
self.pgcon = psycopg2.connect(
host=db['HOST'],
port=db['PORT'],
database=db['NAME'],
user=db['USER'],
password=db['PASSWORD'],
)
def set_cursor_pg(self):
self.pgcursor = self.pgcon.cursor()
def f | self, sql):
self.connect_pg('persona')
self.set_cursor_pg()
self.pgcursor.execute(sql)
# data = self.pgcursor.fetchall()
data = dictlist_lower(self.pgcursor)
self.pgcon.close()
return data
def testa_pg(self):
data = self.fetch_pg("""
select
p.codigo
, p.descricao
from contabil.planosauxiliares p
where p.codigo = 'SCC ANSELMO'
""")
pprint(data)
def exec_pg(self, sql, dados):
# pprint(sql)
# pprint(dados)
self.connect_pg('persona')
self.set_cursor_pg()
self.pgcursor.execute(sql, dados)
self.pgcon.commit()
self.pgcursor.close()
self.pgcon.close()
def testa_insert_pg(self):
self.exec_pg(
"""
insert into contabil.contasauxiliares (planoauxiliar, codigo, tenant)
select
p.planoauxiliar
, %s
, p.tenant
from contabil.planosauxiliares p
where p.codigo = 'SCC ANSELMO'
""",
(
"9",
),
)
def pg_get_ca(self, codigo=None):
filtra_codigo = (
f"AND ca.codigo = '{codigo}'"
if codigo else ''
)
return self.fetch_pg(f"""
select
ca.*
from contabil.contasauxiliares ca
where ca.contamae is null
{filtra_codigo} -- filtra_codigo
""")
def pg_print_ca(self):
data = self.pg_get_ca()
# pprint(data)
for row in data:
print(row['codigo'])
def pg_insert_ca_nivel1(self, codigo):
if not self.pg_get_ca(codigo):
sql = """
insert into contabil.contasauxiliares
(planoauxiliar, codigo, tenant)
select
p.planoauxiliar
, %s
, p.tenant
from contabil.planosauxiliares p
where p.codigo = 'SCC ANSELMO'
"""
self.exec_pg(sql, (codigo, ))
def exec_dictlist_lower(self, sql):
self.connect_fdb('f1')
self.set_cursor()
self.execute(sql)
data = dictlist_lower(self.cursor)
self.close()
return data
def fb_get_pc_nivel1(self, maior_que=' '):
data = self.exec_dictlist_lower(
f"""
select
pc.*
from SCC_PLANOCONTASNOVO pc
where pc.conta not like '0%'
and pc.conta like '%.0.00'
and pc.conta > '{maior_que}'
order by
pc.conta
"""
)
for row in data:
row['conta'] = row['conta'].rstrip('.0')
row['descricao'] = tira_acento_upper(row['descricao'])
return data
if __name__ == '__main__':
main = Main()
# main.test_connection()
# main.test_output()
# main.pc_csv()
dados = main.fb_print_nivel1()
# main.testa_pg()
# main.testa_insert_pg()
### inserindo nível 1
main.pg_print_ca()
dados = main.fb_get_pc_nivel1(maior_que='1.0.00')
for row in dados:
main.pg_insert_ca_nivel1(codigo=row['conta'])
main.pg_print_ca()
| etch_pg( | identifier_name |
structs.rs | use super::*;
use mio::{
*,
event::Evented,
};
use ::std::{
io,
io::{
Read,
Write,
ErrorKind,
},
time,
};
#[derive(Debug)]
pub struct Middleman {
stream: mio::net::TcpStream,
buf: Vec<u8>,
buf_occupancy: usize,
payload_bytes: Option<u32>,
}
impl Middleman { fn check_payload(&mut self) {
if self.payload_bytes.is_none() && self.buf_occupancy >= 4 {
self.payload_bytes = Some(
bincode::deserialize(&self.buf[..4])
.unwrap()
)
}
}
/// Create a new Middleman structure to wrap the given Mio TcpStream.
/// The Middleman implements `mio::Evented`, but delegates its functions to this given stream
/// As such, registering the Middleman and registering the TcpStream are anaologous.
pub fn new(stream: mio::net::TcpStream) -> Middleman {
Self {
stream: stream,
buf: Vec::with_capacity(128),
buf_occupancy: 0,
payload_bytes: None,
}
}
fn read_in(&mut self) -> Result<usize, io::Error> {
let mut total = 0;
loop {
let limit = (self.buf_occupancy + 64) + (self.buf_occupancy);
if self.buf.len() < limit {
self.buf.resize(limit, 0u8);
}
match self.stream.read(&mut self.buf[self.buf_occupancy..]) {
Ok(0) => return Ok(total),
Ok(bytes) => {
self.buf_occupancy += bytes;
total += bytes;
},
Err(ref e) if e.kind() == ErrorKind::WouldBlock => {
return Ok(total);
},
Err(e) => return Err(e),
};
}
}
/// Write the given message directly into the TcpStream. Returns `Err` variant if there
/// is a problem serializing or writing the message. The call returns Ok(()) once the bytes
/// are entirely written to the stream.
pub fn send<M: Message>(&mut self, m: &M) -> Result<(), SendError> {
self.send_packed(
& PackedMessage::new(m)?
)?;
Ok(())
}
/// See `send`. This variant can be useful to
/// avoid the overhead of repeatedly packing a message for whatever reason, eg: sending
/// the same message using multiple Middleman structs.
///
/// Note that this function does NOT check for internal consistency of the packed message.
/// So, if this message was constructed by a means other than `Packed::new`, then the
/// results may be unpredictable.
pub fn send_packed(&mut self, msg: & PackedMessage) -> Result<(), io::Error> {
self.stream.write_all(&msg.0)
}
/// Conume an iterator over some Message structs, sending them all in the order traversed (see `send`).
/// Returns (a,b) where a gives the total number of messages sent successfully and where b is Ok if
/// nothing goes wrong and an error otherwise. In the event of the first error, no more messages will be sent.
pub fn send_all<'m, I, M>(&'m mut self, msg_iter: I) -> (usize, Result<(), SendError>)
where
M: Message + 'm,
I: Iterator<Item = &'m M>,
{
let mut total = 0;
for msg in msg_iter {
match self.send(msg) {
Ok(_) => total += 1,
Err(e) => return (total, Err(e)),
}
}
(total, Ok(()))
}
/// See `send_all` and `send_packed`. This uses the message iterator from the former and
/// the packed messages from the latter.
pub fn send_all_packed<'m, I>(&'m mut self, packed_msg_iter: I) -> (usize, Result<(), io::Error>)
where
I: Iterator<Item = &'m PackedMessage>,
{
let mut total = 0;
for msg in packed_msg_iter {
match self.send_packed(msg) {
Ok(_) => total += 1,
Err(e) => return (total, Err(e)),
}
}
(total, Ok(()))
}
/// Attempt to dedserialize some data in the receiving buffer into a single
/// complete structure with the given type M. If there is insufficient data
/// at the moment, Ok(None) is returned.
///
/// As the type is provided by the reader, it is possible for the sent
/// message to be misinterpreted as a different type. At best, this is detected
/// by a failure in deserialization. If an error occurs, the data is not consumed
/// from the Middleman. Subsequent reads will operate on the same data.
///
/// NOTE: The correctness of this call depends on the sender sending an _internally consistent_
/// `PackedMessage`. If you (or the sender) are manually manipulating the internal state of
/// sent messages this may cause errors for the receiver. If you are sticking to the Middleman API
/// and treating each `PackedMessage` as a black box, everything should be fine.
pub fn recv<M: Message>(&mut self) -> Result<Option<M>, RecvError> {
self.read_in()?;
self.check_payload();
if let Some(pb) = self.payload_bytes {
let buf_end = pb as usize + 4;
if self.buf_occupancy >= buf_end {
let decoded: M = bincode::deserialize(
&self.buf[4..buf_end]
)?;
self.payload_bytes = None;
self.buf.drain(0..buf_end);
self.buf_occupancy -= buf_end;
return Ok(Some(decoded))
}
}
Ok(None)
}
/// See `recv`. Will repeatedly call recv() until the next message is not yet ready.
/// Recevied messages are placed into the buffer `dest_vector`. The return result is (a,b)
/// where a is the total number of message successfully received and where b is OK(()) if all goes well
/// and some Err otherwise. In the event of the first error, the call will return and not receive any further.
pub fn recv_all_into<M: Message>(&mut self, dest_vector: &mut Vec<M>) -> (usize, Result<(), RecvError>) {
let mut total = 0;
loop {
match self.recv::<M>() {
Ok(None) => return (total, Ok(())),
Ok(Some(msg)) => { dest_vector.push(msg); total += 1; },
Err(e) => return (total, Err(e)),
};
}
}
/// Hijack the mio event loop, reading and writing to the socket as polling allows.
/// Events not related to the recv() of this middleman (determined from the provided mio::Token)
/// are pushed into the provided extra_events vector. Returns Ok(Some(_)) if a
/// message was successfully received. May return Ok(None) if the user provides as timeout some
// non-none Duration. Returns Err(_) if something goes wrong with reading from the socket or
/// deserializing the message. See try_recv for more information.
/// WARNING: The user should take care to iterate over these events also, as without them all the
/// Evented objects registered with the provided poll object might experience lost wakeups.
/// It is suggested that in the event of any recv_blocking calls in your loop, you extend the event
/// loop with a drain() on the same vector passed here as extra_events (using the iterator chain function, for example.)
pub fn recv_blocking<M: Message>(&mut self,
poll: &Poll,
events: &mut Events,
my_tok: Token,
extra_events: &mut Vec<Event>,
mut timeout: Option<time::Duration>) -> Result<Option<M>, RecvError> {
if let Some(msg) = self.recv::<M>()? {
// trivial case.
// message was already sitting in the buffer.
return Ok(Some(msg));
}
let started_at = time::Instant::now();
let mut res = None;
loop {
for event in events.iter() {
let tok = event.token();
if res.is_none() && tok == my_tok {
if ! event.readiness().is_readable() {
continue;
}
// event is relevant!
self.read_in()?;
match self.recv::<M>() {
Ok(Some(msg)) => {
// got a message!
res = Some(msg);
},
Ok(None) => (),
Err(e) => return Err(e),
}
} else {
extra_events.push(event);
}
}
if let Some(msg) = res { | return Ok(Some(msg));
} else {
poll.poll(events, timeout).expect("poll() failed inside `recv_blocking()`");
if let Some(t) = timeout {
// update remaining timeout
let since = started_at.elapsed();
if since >= t {
// ran out of time
return Ok(None);
}
timeout = Some(t-since);
}
}
}
}
/// See `recv_blocking`. This function is intended as an alternative for use
/// for cases where it is _certain_ that this Middleman is the only registered `mio::Evented`
/// for the provided `Poll` and `Events` objects. Thus, the call _WILL NOT CHECK_ the token at all,
/// presuming that all events are associated with this middleman.
pub fn recv_blocking_solo<M: Message>(&mut self,
poll: &Poll,
events: &mut Events,
mut timeout: Option<time::Duration>) -> Result<Option<M>, RecvError> {
if let Some(msg) = self.recv::<M>()? {
// trivial case.
// message was already sitting in the buffer.
return Ok(Some(msg));
}
let started_at = time::Instant::now();
loop {
for event in events.iter() {
if event.readiness().is_readable(){
// event is relevant!
self.read_in()?;
match self.recv::<M>() {
Ok(Some(msg)) => return Ok(Some(msg)),
Ok(None) => (),
Err(e) => return Err(e),
}
}
}
poll.poll(events, timeout).expect("poll() failed inside `recv_blocking_solo()`");
if let Some(t) = timeout {
// update remaining timeout
let since = started_at.elapsed();
if since >= t {
// ran out of time
return Ok(None);
}
timeout = Some(t-since);
}
}
}
/// Similar to `recv_all_into`, but rather than storing each received message 'm',
/// the provided function is called with arguments (self, m) where self is `&mut self`.
/// This allows for ergonomic utility of the received messages using a closure.
pub fn recv_all_map<F,M>(&mut self, mut func: F) -> (usize, Result<(), RecvError>)
where M: Message, F: FnMut(&mut Self, M) + Sized {
let mut total = 0;
loop {
match self.recv::<M>() {
Ok(None) => return (total, Ok(())),
Ok(Some(msg)) => { total += 1; func(self, msg) },
Err(e) => return (total, Err(e)),
};
}
}
/// Combination of `recv_all_map` and `recv_packed`.
pub fn recv_all_packed_map<F>(&mut self, mut func: F) -> (usize, Result<(), RecvError>)
where F: FnMut(&mut Self, PackedMessage) + Sized {
let mut total = 0;
loop {
match self.recv_packed() {
Ok(None) => return (total, Ok(())),
Ok(Some(packed)) => { total += 1; func(self, packed) },
Err(e) => return (total, Err(e)),
};
}
}
/// Similar to `recv`, except builds (instead of some M: Message), a `PackedMessage` object.
/// These packed messages can be deserialized later, sent on the line without knowledge of the
/// message type etc.
pub fn recv_packed(&mut self) -> Result<Option<PackedMessage>, RecvError> {
self.read_in()?;
self.check_payload();
if let Some(pb) = self.payload_bytes {
let buf_end = pb as usize + 4;
if self.buf_occupancy >= buf_end {
let mut vec = self.buf.drain(0..buf_end)
.collect::<Vec<_>>();
self.payload_bytes = None;
self.buf_occupancy -= buf_end;
return Ok(Some(PackedMessage(vec)))
}
}
Ok(None)
}
/// Similar to `recv_packed`, but the potentially-read bytes are not actually removed
/// from the stream. The message will _still be there_.
pub fn peek_packed(&mut self) -> Result<Option<PackedMessage>, RecvError> {
if let Some(pb) = self.payload_bytes {
let buf_end = pb as usize + 4;
if self.buf_occupancy >= buf_end {
return Ok(
Some(
PackedMessage::from_raw(
self.buf[4..buf_end].to_vec()
)
)
)
}
}
Ok(None)
}
}
/// This structure represents the serialized form of some `Message`-implementing structure.
/// Dealing with a PackedMessage may be suitable when:
/// (1) You need to send/store/receive a message but don't need to actually _use_ it yourself.
/// (2) You want to serialize a message once, and send it multiple times.
/// (3) You want to read and discard a message whose type is unknown.
///
/// NOTE: The packed message maps 1:1 with the bytes that travel over the TcpStream. As such,
/// packed messages also contain the 4-byte length preable. The user is discocuraged from
/// manipulating the contents of a packed message. The `recv` statement relies on consistency
/// of packed messages
pub struct PackedMessage(Vec<u8>);
impl PackedMessage {
/// Create a new PakcedMessage from the given `Message`-implementing struct
pub fn new<M: Message>(m: &M) -> Result<Self, PackingError> {
let m_len: usize = bincode::serialized_size(&m)? as usize;
if m_len > ::std::u32::MAX as usize {
return Err(PackingError::TooBigToRepresent);
}
let tot_len = m_len+4;
let mut vec = Vec::with_capacity(tot_len);
vec.resize(tot_len, 0u8);
bincode::serialize_into(&mut vec[0..4], &(m_len as u32))?;
bincode::serialize_into(&mut vec[4..tot_len], m)?;
Ok(PackedMessage(vec))
}
/// Attempt to unpack this Packedmessage given a type hint. This may fail if the
/// PackedMessage isn't internally consistent or the type doesn't match that
/// of the type used for serialization.
pub fn unpack<M: Message>(&self) -> Result<M, Box<bincode::ErrorKind>> {
bincode::deserialize(&self.0[4..])
}
/// Unwrap the byte buffer comprising this PackedMessage
#[inline] pub fn into_raw(self) -> Vec<u8> { self.0 }
/// Accept the given byte buffer as the basis for a PackedMessage
///
/// WARNING: Use this at your own risk! The `recv` functions and their variants rely on
/// the correct contents of messages to work correcty.
///
/// NOTE: The first 4 bytes of a the buffer are used to store the length of the payload.
#[inline] pub fn from_raw(v: Vec<u8>) -> Self { PackedMessage(v) }
/// Return the number of bytes this packed message contains. Maps 1:1 with
/// the bit complexity of the message sent over the network.
#[inline] pub fn byte_len(&self) -> usize { self.0.len() }
/// Acquire an immutable reference to the internal buffer of the packed message.
#[inline] pub fn get_raw(&self) -> &Vec<u8> { &self.0 }
/// Acquire a mutable reference to the internal buffer of the packed message.
///
/// WARNING: Contents of a PackedMessage represent a delicata internal state. Sending an
/// internally inconsistent PackedMessage will compromise the connection. Use at your own risk!
#[inline] pub fn get_mut_raw(&mut self) -> &mut Vec<u8> { &mut self.0 }
}
impl Evented for Middleman {
fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt)
-> io::Result<()> {
self.stream.register(poll, token, interest, opts)
}
fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt)
-> io::Result<()> {
self.stream.reregister(poll, token, interest, opts)
}
fn deregister(&self, poll: &Poll) -> io::Result<()> {
self.stream.deregister(poll)
}
} | // message ready to go. Exiting loop | random_line_split |
structs.rs | use super::*;
use mio::{
*,
event::Evented,
};
use ::std::{
io,
io::{
Read,
Write,
ErrorKind,
},
time,
};
#[derive(Debug)]
pub struct Middleman {
stream: mio::net::TcpStream,
buf: Vec<u8>,
buf_occupancy: usize,
payload_bytes: Option<u32>,
}
impl Middleman { fn check_payload(&mut self) {
if self.payload_bytes.is_none() && self.buf_occupancy >= 4 {
self.payload_bytes = Some(
bincode::deserialize(&self.buf[..4])
.unwrap()
)
}
}
/// Create a new Middleman structure to wrap the given Mio TcpStream.
/// The Middleman implements `mio::Evented`, but delegates its functions to this given stream
/// As such, registering the Middleman and registering the TcpStream are anaologous.
pub fn new(stream: mio::net::TcpStream) -> Middleman {
Self {
stream: stream,
buf: Vec::with_capacity(128),
buf_occupancy: 0,
payload_bytes: None,
}
}
fn read_in(&mut self) -> Result<usize, io::Error> {
let mut total = 0;
loop {
let limit = (self.buf_occupancy + 64) + (self.buf_occupancy);
if self.buf.len() < limit {
self.buf.resize(limit, 0u8);
}
match self.stream.read(&mut self.buf[self.buf_occupancy..]) {
Ok(0) => return Ok(total),
Ok(bytes) => {
self.buf_occupancy += bytes;
total += bytes;
},
Err(ref e) if e.kind() == ErrorKind::WouldBlock => {
return Ok(total);
},
Err(e) => return Err(e),
};
}
}
/// Write the given message directly into the TcpStream. Returns `Err` variant if there
/// is a problem serializing or writing the message. The call returns Ok(()) once the bytes
/// are entirely written to the stream.
pub fn send<M: Message>(&mut self, m: &M) -> Result<(), SendError> {
self.send_packed(
& PackedMessage::new(m)?
)?;
Ok(())
}
/// See `send`. This variant can be useful to
/// avoid the overhead of repeatedly packing a message for whatever reason, eg: sending
/// the same message using multiple Middleman structs.
///
/// Note that this function does NOT check for internal consistency of the packed message.
/// So, if this message was constructed by a means other than `Packed::new`, then the
/// results may be unpredictable.
pub fn send_packed(&mut self, msg: & PackedMessage) -> Result<(), io::Error> {
self.stream.write_all(&msg.0)
}
/// Conume an iterator over some Message structs, sending them all in the order traversed (see `send`).
/// Returns (a,b) where a gives the total number of messages sent successfully and where b is Ok if
/// nothing goes wrong and an error otherwise. In the event of the first error, no more messages will be sent.
pub fn send_all<'m, I, M>(&'m mut self, msg_iter: I) -> (usize, Result<(), SendError>)
where
M: Message + 'm,
I: Iterator<Item = &'m M>,
{
let mut total = 0;
for msg in msg_iter {
match self.send(msg) {
Ok(_) => total += 1,
Err(e) => return (total, Err(e)),
}
}
(total, Ok(()))
}
/// See `send_all` and `send_packed`. This uses the message iterator from the former and
/// the packed messages from the latter.
pub fn send_all_packed<'m, I>(&'m mut self, packed_msg_iter: I) -> (usize, Result<(), io::Error>)
where
I: Iterator<Item = &'m PackedMessage>,
{
let mut total = 0;
for msg in packed_msg_iter {
match self.send_packed(msg) {
Ok(_) => total += 1,
Err(e) => return (total, Err(e)),
}
}
(total, Ok(()))
}
/// Attempt to dedserialize some data in the receiving buffer into a single
/// complete structure with the given type M. If there is insufficient data
/// at the moment, Ok(None) is returned.
///
/// As the type is provided by the reader, it is possible for the sent
/// message to be misinterpreted as a different type. At best, this is detected
/// by a failure in deserialization. If an error occurs, the data is not consumed
/// from the Middleman. Subsequent reads will operate on the same data.
///
/// NOTE: The correctness of this call depends on the sender sending an _internally consistent_
/// `PackedMessage`. If you (or the sender) are manually manipulating the internal state of
/// sent messages this may cause errors for the receiver. If you are sticking to the Middleman API
/// and treating each `PackedMessage` as a black box, everything should be fine.
pub fn recv<M: Message>(&mut self) -> Result<Option<M>, RecvError> {
self.read_in()?;
self.check_payload();
if let Some(pb) = self.payload_bytes {
let buf_end = pb as usize + 4;
if self.buf_occupancy >= buf_end {
let decoded: M = bincode::deserialize(
&self.buf[4..buf_end]
)?;
self.payload_bytes = None;
self.buf.drain(0..buf_end);
self.buf_occupancy -= buf_end;
return Ok(Some(decoded))
}
}
Ok(None)
}
/// See `recv`. Will repeatedly call recv() until the next message is not yet ready.
/// Recevied messages are placed into the buffer `dest_vector`. The return result is (a,b)
/// where a is the total number of message successfully received and where b is OK(()) if all goes well
/// and some Err otherwise. In the event of the first error, the call will return and not receive any further.
pub fn recv_all_into<M: Message>(&mut self, dest_vector: &mut Vec<M>) -> (usize, Result<(), RecvError>) {
let mut total = 0;
loop {
match self.recv::<M>() {
Ok(None) => return (total, Ok(())),
Ok(Some(msg)) => { dest_vector.push(msg); total += 1; },
Err(e) => return (total, Err(e)),
};
}
}
/// Hijack the mio event loop, reading and writing to the socket as polling allows.
/// Events not related to the recv() of this middleman (determined from the provided mio::Token)
/// are pushed into the provided extra_events vector. Returns Ok(Some(_)) if a
/// message was successfully received. May return Ok(None) if the user provides as timeout some
// non-none Duration. Returns Err(_) if something goes wrong with reading from the socket or
/// deserializing the message. See try_recv for more information.
/// WARNING: The user should take care to iterate over these events also, as without them all the
/// Evented objects registered with the provided poll object might experience lost wakeups.
/// It is suggested that in the event of any recv_blocking calls in your loop, you extend the event
/// loop with a drain() on the same vector passed here as extra_events (using the iterator chain function, for example.)
pub fn recv_blocking<M: Message>(&mut self,
poll: &Poll,
events: &mut Events,
my_tok: Token,
extra_events: &mut Vec<Event>,
mut timeout: Option<time::Duration>) -> Result<Option<M>, RecvError> {
if let Some(msg) = self.recv::<M>()? |
let started_at = time::Instant::now();
let mut res = None;
loop {
for event in events.iter() {
let tok = event.token();
if res.is_none() && tok == my_tok {
if ! event.readiness().is_readable() {
continue;
}
// event is relevant!
self.read_in()?;
match self.recv::<M>() {
Ok(Some(msg)) => {
// got a message!
res = Some(msg);
},
Ok(None) => (),
Err(e) => return Err(e),
}
} else {
extra_events.push(event);
}
}
if let Some(msg) = res {
// message ready to go. Exiting loop
return Ok(Some(msg));
} else {
poll.poll(events, timeout).expect("poll() failed inside `recv_blocking()`");
if let Some(t) = timeout {
// update remaining timeout
let since = started_at.elapsed();
if since >= t {
// ran out of time
return Ok(None);
}
timeout = Some(t-since);
}
}
}
}
/// See `recv_blocking`. This function is intended as an alternative for use
/// for cases where it is _certain_ that this Middleman is the only registered `mio::Evented`
/// for the provided `Poll` and `Events` objects. Thus, the call _WILL NOT CHECK_ the token at all,
/// presuming that all events are associated with this middleman.
pub fn recv_blocking_solo<M: Message>(&mut self,
poll: &Poll,
events: &mut Events,
mut timeout: Option<time::Duration>) -> Result<Option<M>, RecvError> {
if let Some(msg) = self.recv::<M>()? {
// trivial case.
// message was already sitting in the buffer.
return Ok(Some(msg));
}
let started_at = time::Instant::now();
loop {
for event in events.iter() {
if event.readiness().is_readable(){
// event is relevant!
self.read_in()?;
match self.recv::<M>() {
Ok(Some(msg)) => return Ok(Some(msg)),
Ok(None) => (),
Err(e) => return Err(e),
}
}
}
poll.poll(events, timeout).expect("poll() failed inside `recv_blocking_solo()`");
if let Some(t) = timeout {
// update remaining timeout
let since = started_at.elapsed();
if since >= t {
// ran out of time
return Ok(None);
}
timeout = Some(t-since);
}
}
}
/// Similar to `recv_all_into`, but rather than storing each received message 'm',
/// the provided function is called with arguments (self, m) where self is `&mut self`.
/// This allows for ergonomic utility of the received messages using a closure.
pub fn recv_all_map<F,M>(&mut self, mut func: F) -> (usize, Result<(), RecvError>)
where M: Message, F: FnMut(&mut Self, M) + Sized {
let mut total = 0;
loop {
match self.recv::<M>() {
Ok(None) => return (total, Ok(())),
Ok(Some(msg)) => { total += 1; func(self, msg) },
Err(e) => return (total, Err(e)),
};
}
}
/// Combination of `recv_all_map` and `recv_packed`.
pub fn recv_all_packed_map<F>(&mut self, mut func: F) -> (usize, Result<(), RecvError>)
where F: FnMut(&mut Self, PackedMessage) + Sized {
let mut total = 0;
loop {
match self.recv_packed() {
Ok(None) => return (total, Ok(())),
Ok(Some(packed)) => { total += 1; func(self, packed) },
Err(e) => return (total, Err(e)),
};
}
}
/// Similar to `recv`, except builds (instead of some M: Message), a `PackedMessage` object.
/// These packed messages can be deserialized later, sent on the line without knowledge of the
/// message type etc.
pub fn recv_packed(&mut self) -> Result<Option<PackedMessage>, RecvError> {
self.read_in()?;
self.check_payload();
if let Some(pb) = self.payload_bytes {
let buf_end = pb as usize + 4;
if self.buf_occupancy >= buf_end {
let mut vec = self.buf.drain(0..buf_end)
.collect::<Vec<_>>();
self.payload_bytes = None;
self.buf_occupancy -= buf_end;
return Ok(Some(PackedMessage(vec)))
}
}
Ok(None)
}
/// Similar to `recv_packed`, but the potentially-read bytes are not actually removed
/// from the stream. The message will _still be there_.
pub fn peek_packed(&mut self) -> Result<Option<PackedMessage>, RecvError> {
if let Some(pb) = self.payload_bytes {
let buf_end = pb as usize + 4;
if self.buf_occupancy >= buf_end {
return Ok(
Some(
PackedMessage::from_raw(
self.buf[4..buf_end].to_vec()
)
)
)
}
}
Ok(None)
}
}
/// This structure represents the serialized form of some `Message`-implementing structure.
/// Dealing with a PackedMessage may be suitable when:
/// (1) You need to send/store/receive a message but don't need to actually _use_ it yourself.
/// (2) You want to serialize a message once, and send it multiple times.
/// (3) You want to read and discard a message whose type is unknown.
///
/// NOTE: The packed message maps 1:1 with the bytes that travel over the TcpStream. As such,
/// packed messages also contain the 4-byte length preable. The user is discocuraged from
/// manipulating the contents of a packed message. The `recv` statement relies on consistency
/// of packed messages
pub struct PackedMessage(Vec<u8>);
impl PackedMessage {
/// Create a new PakcedMessage from the given `Message`-implementing struct
pub fn new<M: Message>(m: &M) -> Result<Self, PackingError> {
let m_len: usize = bincode::serialized_size(&m)? as usize;
if m_len > ::std::u32::MAX as usize {
return Err(PackingError::TooBigToRepresent);
}
let tot_len = m_len+4;
let mut vec = Vec::with_capacity(tot_len);
vec.resize(tot_len, 0u8);
bincode::serialize_into(&mut vec[0..4], &(m_len as u32))?;
bincode::serialize_into(&mut vec[4..tot_len], m)?;
Ok(PackedMessage(vec))
}
/// Attempt to unpack this Packedmessage given a type hint. This may fail if the
/// PackedMessage isn't internally consistent or the type doesn't match that
/// of the type used for serialization.
pub fn unpack<M: Message>(&self) -> Result<M, Box<bincode::ErrorKind>> {
bincode::deserialize(&self.0[4..])
}
/// Unwrap the byte buffer comprising this PackedMessage
#[inline] pub fn into_raw(self) -> Vec<u8> { self.0 }
/// Accept the given byte buffer as the basis for a PackedMessage
///
/// WARNING: Use this at your own risk! The `recv` functions and their variants rely on
/// the correct contents of messages to work correcty.
///
/// NOTE: The first 4 bytes of a the buffer are used to store the length of the payload.
#[inline] pub fn from_raw(v: Vec<u8>) -> Self { PackedMessage(v) }
/// Return the number of bytes this packed message contains. Maps 1:1 with
/// the bit complexity of the message sent over the network.
#[inline] pub fn byte_len(&self) -> usize { self.0.len() }
/// Acquire an immutable reference to the internal buffer of the packed message.
#[inline] pub fn get_raw(&self) -> &Vec<u8> { &self.0 }
/// Acquire a mutable reference to the internal buffer of the packed message.
///
/// WARNING: Contents of a PackedMessage represent a delicata internal state. Sending an
/// internally inconsistent PackedMessage will compromise the connection. Use at your own risk!
#[inline] pub fn get_mut_raw(&mut self) -> &mut Vec<u8> { &mut self.0 }
}
impl Evented for Middleman {
fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt)
-> io::Result<()> {
self.stream.register(poll, token, interest, opts)
}
fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt)
-> io::Result<()> {
self.stream.reregister(poll, token, interest, opts)
}
fn deregister(&self, poll: &Poll) -> io::Result<()> {
self.stream.deregister(poll)
}
} | {
// trivial case.
// message was already sitting in the buffer.
return Ok(Some(msg));
} | conditional_block |
structs.rs | use super::*;
use mio::{
*,
event::Evented,
};
use ::std::{
io,
io::{
Read,
Write,
ErrorKind,
},
time,
};
#[derive(Debug)]
pub struct Middleman {
stream: mio::net::TcpStream,
buf: Vec<u8>,
buf_occupancy: usize,
payload_bytes: Option<u32>,
}
impl Middleman { fn check_payload(&mut self) {
if self.payload_bytes.is_none() && self.buf_occupancy >= 4 {
self.payload_bytes = Some(
bincode::deserialize(&self.buf[..4])
.unwrap()
)
}
}
/// Create a new Middleman structure to wrap the given Mio TcpStream.
/// The Middleman implements `mio::Evented`, but delegates its functions to this given stream
/// As such, registering the Middleman and registering the TcpStream are anaologous.
pub fn new(stream: mio::net::TcpStream) -> Middleman {
Self {
stream: stream,
buf: Vec::with_capacity(128),
buf_occupancy: 0,
payload_bytes: None,
}
}
fn read_in(&mut self) -> Result<usize, io::Error> {
let mut total = 0;
loop {
let limit = (self.buf_occupancy + 64) + (self.buf_occupancy);
if self.buf.len() < limit {
self.buf.resize(limit, 0u8);
}
match self.stream.read(&mut self.buf[self.buf_occupancy..]) {
Ok(0) => return Ok(total),
Ok(bytes) => {
self.buf_occupancy += bytes;
total += bytes;
},
Err(ref e) if e.kind() == ErrorKind::WouldBlock => {
return Ok(total);
},
Err(e) => return Err(e),
};
}
}
/// Write the given message directly into the TcpStream. Returns `Err` variant if there
/// is a problem serializing or writing the message. The call returns Ok(()) once the bytes
/// are entirely written to the stream.
pub fn send<M: Message>(&mut self, m: &M) -> Result<(), SendError> {
self.send_packed(
& PackedMessage::new(m)?
)?;
Ok(())
}
/// See `send`. This variant can be useful to
/// avoid the overhead of repeatedly packing a message for whatever reason, eg: sending
/// the same message using multiple Middleman structs.
///
/// Note that this function does NOT check for internal consistency of the packed message.
/// So, if this message was constructed by a means other than `Packed::new`, then the
/// results may be unpredictable.
pub fn send_packed(&mut self, msg: & PackedMessage) -> Result<(), io::Error> {
self.stream.write_all(&msg.0)
}
/// Conume an iterator over some Message structs, sending them all in the order traversed (see `send`).
/// Returns (a,b) where a gives the total number of messages sent successfully and where b is Ok if
/// nothing goes wrong and an error otherwise. In the event of the first error, no more messages will be sent.
pub fn send_all<'m, I, M>(&'m mut self, msg_iter: I) -> (usize, Result<(), SendError>)
where
M: Message + 'm,
I: Iterator<Item = &'m M>,
{
let mut total = 0;
for msg in msg_iter {
match self.send(msg) {
Ok(_) => total += 1,
Err(e) => return (total, Err(e)),
}
}
(total, Ok(()))
}
/// See `send_all` and `send_packed`. This uses the message iterator from the former and
/// the packed messages from the latter.
pub fn send_all_packed<'m, I>(&'m mut self, packed_msg_iter: I) -> (usize, Result<(), io::Error>)
where
I: Iterator<Item = &'m PackedMessage>,
{
let mut total = 0;
for msg in packed_msg_iter {
match self.send_packed(msg) {
Ok(_) => total += 1,
Err(e) => return (total, Err(e)),
}
}
(total, Ok(()))
}
/// Attempt to dedserialize some data in the receiving buffer into a single
/// complete structure with the given type M. If there is insufficient data
/// at the moment, Ok(None) is returned.
///
/// As the type is provided by the reader, it is possible for the sent
/// message to be misinterpreted as a different type. At best, this is detected
/// by a failure in deserialization. If an error occurs, the data is not consumed
/// from the Middleman. Subsequent reads will operate on the same data.
///
/// NOTE: The correctness of this call depends on the sender sending an _internally consistent_
/// `PackedMessage`. If you (or the sender) are manually manipulating the internal state of
/// sent messages this may cause errors for the receiver. If you are sticking to the Middleman API
/// and treating each `PackedMessage` as a black box, everything should be fine.
pub fn recv<M: Message>(&mut self) -> Result<Option<M>, RecvError> {
self.read_in()?;
self.check_payload();
if let Some(pb) = self.payload_bytes {
let buf_end = pb as usize + 4;
if self.buf_occupancy >= buf_end {
let decoded: M = bincode::deserialize(
&self.buf[4..buf_end]
)?;
self.payload_bytes = None;
self.buf.drain(0..buf_end);
self.buf_occupancy -= buf_end;
return Ok(Some(decoded))
}
}
Ok(None)
}
/// See `recv`. Will repeatedly call recv() until the next message is not yet ready.
/// Recevied messages are placed into the buffer `dest_vector`. The return result is (a,b)
/// where a is the total number of message successfully received and where b is OK(()) if all goes well
/// and some Err otherwise. In the event of the first error, the call will return and not receive any further.
pub fn recv_all_into<M: Message>(&mut self, dest_vector: &mut Vec<M>) -> (usize, Result<(), RecvError>) {
let mut total = 0;
loop {
match self.recv::<M>() {
Ok(None) => return (total, Ok(())),
Ok(Some(msg)) => { dest_vector.push(msg); total += 1; },
Err(e) => return (total, Err(e)),
};
}
}
/// Hijack the mio event loop, reading and writing to the socket as polling allows.
/// Events not related to the recv() of this middleman (determined from the provided mio::Token)
/// are pushed into the provided extra_events vector. Returns Ok(Some(_)) if a
/// message was successfully received. May return Ok(None) if the user provides as timeout some
// non-none Duration. Returns Err(_) if something goes wrong with reading from the socket or
/// deserializing the message. See try_recv for more information.
/// WARNING: The user should take care to iterate over these events also, as without them all the
/// Evented objects registered with the provided poll object might experience lost wakeups.
/// It is suggested that in the event of any recv_blocking calls in your loop, you extend the event
/// loop with a drain() on the same vector passed here as extra_events (using the iterator chain function, for example.)
pub fn recv_blocking<M: Message>(&mut self,
poll: &Poll,
events: &mut Events,
my_tok: Token,
extra_events: &mut Vec<Event>,
mut timeout: Option<time::Duration>) -> Result<Option<M>, RecvError> {
if let Some(msg) = self.recv::<M>()? {
// trivial case.
// message was already sitting in the buffer.
return Ok(Some(msg));
}
let started_at = time::Instant::now();
let mut res = None;
loop {
for event in events.iter() {
let tok = event.token();
if res.is_none() && tok == my_tok {
if ! event.readiness().is_readable() {
continue;
}
// event is relevant!
self.read_in()?;
match self.recv::<M>() {
Ok(Some(msg)) => {
// got a message!
res = Some(msg);
},
Ok(None) => (),
Err(e) => return Err(e),
}
} else {
extra_events.push(event);
}
}
if let Some(msg) = res {
// message ready to go. Exiting loop
return Ok(Some(msg));
} else {
poll.poll(events, timeout).expect("poll() failed inside `recv_blocking()`");
if let Some(t) = timeout {
// update remaining timeout
let since = started_at.elapsed();
if since >= t {
// ran out of time
return Ok(None);
}
timeout = Some(t-since);
}
}
}
}
/// See `recv_blocking`. This function is intended as an alternative for use
/// for cases where it is _certain_ that this Middleman is the only registered `mio::Evented`
/// for the provided `Poll` and `Events` objects. Thus, the call _WILL NOT CHECK_ the token at all,
/// presuming that all events are associated with this middleman.
pub fn recv_blocking_solo<M: Message>(&mut self,
poll: &Poll,
events: &mut Events,
mut timeout: Option<time::Duration>) -> Result<Option<M>, RecvError> {
if let Some(msg) = self.recv::<M>()? {
// trivial case.
// message was already sitting in the buffer.
return Ok(Some(msg));
}
let started_at = time::Instant::now();
loop {
for event in events.iter() {
if event.readiness().is_readable(){
// event is relevant!
self.read_in()?;
match self.recv::<M>() {
Ok(Some(msg)) => return Ok(Some(msg)),
Ok(None) => (),
Err(e) => return Err(e),
}
}
}
poll.poll(events, timeout).expect("poll() failed inside `recv_blocking_solo()`");
if let Some(t) = timeout {
// update remaining timeout
let since = started_at.elapsed();
if since >= t {
// ran out of time
return Ok(None);
}
timeout = Some(t-since);
}
}
}
/// Similar to `recv_all_into`, but rather than storing each received message 'm',
/// the provided function is called with arguments (self, m) where self is `&mut self`.
/// This allows for ergonomic utility of the received messages using a closure.
pub fn recv_all_map<F,M>(&mut self, mut func: F) -> (usize, Result<(), RecvError>)
where M: Message, F: FnMut(&mut Self, M) + Sized {
let mut total = 0;
loop {
match self.recv::<M>() {
Ok(None) => return (total, Ok(())),
Ok(Some(msg)) => { total += 1; func(self, msg) },
Err(e) => return (total, Err(e)),
};
}
}
/// Combination of `recv_all_map` and `recv_packed`.
pub fn recv_all_packed_map<F>(&mut self, mut func: F) -> (usize, Result<(), RecvError>)
where F: FnMut(&mut Self, PackedMessage) + Sized {
let mut total = 0;
loop {
match self.recv_packed() {
Ok(None) => return (total, Ok(())),
Ok(Some(packed)) => { total += 1; func(self, packed) },
Err(e) => return (total, Err(e)),
};
}
}
/// Similar to `recv`, except builds (instead of some M: Message), a `PackedMessage` object.
/// These packed messages can be deserialized later, sent on the line without knowledge of the
/// message type etc.
pub fn recv_packed(&mut self) -> Result<Option<PackedMessage>, RecvError> {
self.read_in()?;
self.check_payload();
if let Some(pb) = self.payload_bytes {
let buf_end = pb as usize + 4;
if self.buf_occupancy >= buf_end {
let mut vec = self.buf.drain(0..buf_end)
.collect::<Vec<_>>();
self.payload_bytes = None;
self.buf_occupancy -= buf_end;
return Ok(Some(PackedMessage(vec)))
}
}
Ok(None)
}
/// Similar to `recv_packed`, but the potentially-read bytes are not actually removed
/// from the stream. The message will _still be there_.
pub fn peek_packed(&mut self) -> Result<Option<PackedMessage>, RecvError> {
if let Some(pb) = self.payload_bytes {
let buf_end = pb as usize + 4;
if self.buf_occupancy >= buf_end {
return Ok(
Some(
PackedMessage::from_raw(
self.buf[4..buf_end].to_vec()
)
)
)
}
}
Ok(None)
}
}
/// This structure represents the serialized form of some `Message`-implementing structure.
/// Dealing with a PackedMessage may be suitable when:
/// (1) You need to send/store/receive a message but don't need to actually _use_ it yourself.
/// (2) You want to serialize a message once, and send it multiple times.
/// (3) You want to read and discard a message whose type is unknown.
///
/// NOTE: The packed message maps 1:1 with the bytes that travel over the TcpStream. As such,
/// packed messages also contain the 4-byte length preable. The user is discocuraged from
/// manipulating the contents of a packed message. The `recv` statement relies on consistency
/// of packed messages
pub struct PackedMessage(Vec<u8>);
impl PackedMessage {
/// Create a new PakcedMessage from the given `Message`-implementing struct
pub fn new<M: Message>(m: &M) -> Result<Self, PackingError> {
let m_len: usize = bincode::serialized_size(&m)? as usize;
if m_len > ::std::u32::MAX as usize {
return Err(PackingError::TooBigToRepresent);
}
let tot_len = m_len+4;
let mut vec = Vec::with_capacity(tot_len);
vec.resize(tot_len, 0u8);
bincode::serialize_into(&mut vec[0..4], &(m_len as u32))?;
bincode::serialize_into(&mut vec[4..tot_len], m)?;
Ok(PackedMessage(vec))
}
/// Attempt to unpack this Packedmessage given a type hint. This may fail if the
/// PackedMessage isn't internally consistent or the type doesn't match that
/// of the type used for serialization.
pub fn unpack<M: Message>(&self) -> Result<M, Box<bincode::ErrorKind>> {
bincode::deserialize(&self.0[4..])
}
/// Unwrap the byte buffer comprising this PackedMessage
#[inline] pub fn into_raw(self) -> Vec<u8> { self.0 }
/// Accept the given byte buffer as the basis for a PackedMessage
///
/// WARNING: Use this at your own risk! The `recv` functions and their variants rely on
/// the correct contents of messages to work correcty.
///
/// NOTE: The first 4 bytes of a the buffer are used to store the length of the payload.
#[inline] pub fn from_raw(v: Vec<u8>) -> Self { PackedMessage(v) }
/// Return the number of bytes this packed message contains. Maps 1:1 with
/// the bit complexity of the message sent over the network.
#[inline] pub fn byte_len(&self) -> usize { self.0.len() }
/// Acquire an immutable reference to the internal buffer of the packed message.
#[inline] pub fn get_raw(&self) -> &Vec<u8> { &self.0 }
/// Acquire a mutable reference to the internal buffer of the packed message.
///
/// WARNING: Contents of a PackedMessage represent a delicata internal state. Sending an
/// internally inconsistent PackedMessage will compromise the connection. Use at your own risk!
#[inline] pub fn get_mut_raw(&mut self) -> &mut Vec<u8> { &mut self.0 }
}
impl Evented for Middleman {
fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt)
-> io::Result<()> {
self.stream.register(poll, token, interest, opts)
}
fn | (&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt)
-> io::Result<()> {
self.stream.reregister(poll, token, interest, opts)
}
fn deregister(&self, poll: &Poll) -> io::Result<()> {
self.stream.deregister(poll)
}
} | reregister | identifier_name |
structs.rs | use super::*;
use mio::{
*,
event::Evented,
};
use ::std::{
io,
io::{
Read,
Write,
ErrorKind,
},
time,
};
#[derive(Debug)]
pub struct Middleman {
stream: mio::net::TcpStream,
buf: Vec<u8>,
buf_occupancy: usize,
payload_bytes: Option<u32>,
}
impl Middleman { fn check_payload(&mut self) {
if self.payload_bytes.is_none() && self.buf_occupancy >= 4 {
self.payload_bytes = Some(
bincode::deserialize(&self.buf[..4])
.unwrap()
)
}
}
/// Create a new Middleman structure to wrap the given Mio TcpStream.
/// The Middleman implements `mio::Evented`, but delegates its functions to this given stream
/// As such, registering the Middleman and registering the TcpStream are anaologous.
pub fn new(stream: mio::net::TcpStream) -> Middleman {
Self {
stream: stream,
buf: Vec::with_capacity(128),
buf_occupancy: 0,
payload_bytes: None,
}
}
fn read_in(&mut self) -> Result<usize, io::Error> {
let mut total = 0;
loop {
let limit = (self.buf_occupancy + 64) + (self.buf_occupancy);
if self.buf.len() < limit {
self.buf.resize(limit, 0u8);
}
match self.stream.read(&mut self.buf[self.buf_occupancy..]) {
Ok(0) => return Ok(total),
Ok(bytes) => {
self.buf_occupancy += bytes;
total += bytes;
},
Err(ref e) if e.kind() == ErrorKind::WouldBlock => {
return Ok(total);
},
Err(e) => return Err(e),
};
}
}
/// Write the given message directly into the TcpStream. Returns `Err` variant if there
/// is a problem serializing or writing the message. The call returns Ok(()) once the bytes
/// are entirely written to the stream.
pub fn send<M: Message>(&mut self, m: &M) -> Result<(), SendError> {
self.send_packed(
& PackedMessage::new(m)?
)?;
Ok(())
}
/// See `send`. This variant can be useful to
/// avoid the overhead of repeatedly packing a message for whatever reason, eg: sending
/// the same message using multiple Middleman structs.
///
/// Note that this function does NOT check for internal consistency of the packed message.
/// So, if this message was constructed by a means other than `Packed::new`, then the
/// results may be unpredictable.
pub fn send_packed(&mut self, msg: & PackedMessage) -> Result<(), io::Error> {
self.stream.write_all(&msg.0)
}
/// Conume an iterator over some Message structs, sending them all in the order traversed (see `send`).
/// Returns (a,b) where a gives the total number of messages sent successfully and where b is Ok if
/// nothing goes wrong and an error otherwise. In the event of the first error, no more messages will be sent.
pub fn send_all<'m, I, M>(&'m mut self, msg_iter: I) -> (usize, Result<(), SendError>)
where
M: Message + 'm,
I: Iterator<Item = &'m M>,
{
let mut total = 0;
for msg in msg_iter {
match self.send(msg) {
Ok(_) => total += 1,
Err(e) => return (total, Err(e)),
}
}
(total, Ok(()))
}
/// See `send_all` and `send_packed`. This uses the message iterator from the former and
/// the packed messages from the latter.
pub fn send_all_packed<'m, I>(&'m mut self, packed_msg_iter: I) -> (usize, Result<(), io::Error>)
where
I: Iterator<Item = &'m PackedMessage>,
{
let mut total = 0;
for msg in packed_msg_iter {
match self.send_packed(msg) {
Ok(_) => total += 1,
Err(e) => return (total, Err(e)),
}
}
(total, Ok(()))
}
/// Attempt to dedserialize some data in the receiving buffer into a single
/// complete structure with the given type M. If there is insufficient data
/// at the moment, Ok(None) is returned.
///
/// As the type is provided by the reader, it is possible for the sent
/// message to be misinterpreted as a different type. At best, this is detected
/// by a failure in deserialization. If an error occurs, the data is not consumed
/// from the Middleman. Subsequent reads will operate on the same data.
///
/// NOTE: The correctness of this call depends on the sender sending an _internally consistent_
/// `PackedMessage`. If you (or the sender) are manually manipulating the internal state of
/// sent messages this may cause errors for the receiver. If you are sticking to the Middleman API
/// and treating each `PackedMessage` as a black box, everything should be fine.
pub fn recv<M: Message>(&mut self) -> Result<Option<M>, RecvError> {
self.read_in()?;
self.check_payload();
if let Some(pb) = self.payload_bytes {
let buf_end = pb as usize + 4;
if self.buf_occupancy >= buf_end {
let decoded: M = bincode::deserialize(
&self.buf[4..buf_end]
)?;
self.payload_bytes = None;
self.buf.drain(0..buf_end);
self.buf_occupancy -= buf_end;
return Ok(Some(decoded))
}
}
Ok(None)
}
/// See `recv`. Will repeatedly call recv() until the next message is not yet ready.
/// Recevied messages are placed into the buffer `dest_vector`. The return result is (a,b)
/// where a is the total number of message successfully received and where b is OK(()) if all goes well
/// and some Err otherwise. In the event of the first error, the call will return and not receive any further.
pub fn recv_all_into<M: Message>(&mut self, dest_vector: &mut Vec<M>) -> (usize, Result<(), RecvError>) {
let mut total = 0;
loop {
match self.recv::<M>() {
Ok(None) => return (total, Ok(())),
Ok(Some(msg)) => { dest_vector.push(msg); total += 1; },
Err(e) => return (total, Err(e)),
};
}
}
/// Hijack the mio event loop, reading and writing to the socket as polling allows.
/// Events not related to the recv() of this middleman (determined from the provided mio::Token)
/// are pushed into the provided extra_events vector. Returns Ok(Some(_)) if a
/// message was successfully received. May return Ok(None) if the user provides as timeout some
// non-none Duration. Returns Err(_) if something goes wrong with reading from the socket or
/// deserializing the message. See try_recv for more information.
/// WARNING: The user should take care to iterate over these events also, as without them all the
/// Evented objects registered with the provided poll object might experience lost wakeups.
/// It is suggested that in the event of any recv_blocking calls in your loop, you extend the event
/// loop with a drain() on the same vector passed here as extra_events (using the iterator chain function, for example.)
pub fn recv_blocking<M: Message>(&mut self,
poll: &Poll,
events: &mut Events,
my_tok: Token,
extra_events: &mut Vec<Event>,
mut timeout: Option<time::Duration>) -> Result<Option<M>, RecvError> {
if let Some(msg) = self.recv::<M>()? {
// trivial case.
// message was already sitting in the buffer.
return Ok(Some(msg));
}
let started_at = time::Instant::now();
let mut res = None;
loop {
for event in events.iter() {
let tok = event.token();
if res.is_none() && tok == my_tok {
if ! event.readiness().is_readable() {
continue;
}
// event is relevant!
self.read_in()?;
match self.recv::<M>() {
Ok(Some(msg)) => {
// got a message!
res = Some(msg);
},
Ok(None) => (),
Err(e) => return Err(e),
}
} else {
extra_events.push(event);
}
}
if let Some(msg) = res {
// message ready to go. Exiting loop
return Ok(Some(msg));
} else {
poll.poll(events, timeout).expect("poll() failed inside `recv_blocking()`");
if let Some(t) = timeout {
// update remaining timeout
let since = started_at.elapsed();
if since >= t {
// ran out of time
return Ok(None);
}
timeout = Some(t-since);
}
}
}
}
/// See `recv_blocking`. This function is intended as an alternative for use
/// for cases where it is _certain_ that this Middleman is the only registered `mio::Evented`
/// for the provided `Poll` and `Events` objects. Thus, the call _WILL NOT CHECK_ the token at all,
/// presuming that all events are associated with this middleman.
pub fn recv_blocking_solo<M: Message>(&mut self,
poll: &Poll,
events: &mut Events,
mut timeout: Option<time::Duration>) -> Result<Option<M>, RecvError> {
if let Some(msg) = self.recv::<M>()? {
// trivial case.
// message was already sitting in the buffer.
return Ok(Some(msg));
}
let started_at = time::Instant::now();
loop {
for event in events.iter() {
if event.readiness().is_readable(){
// event is relevant!
self.read_in()?;
match self.recv::<M>() {
Ok(Some(msg)) => return Ok(Some(msg)),
Ok(None) => (),
Err(e) => return Err(e),
}
}
}
poll.poll(events, timeout).expect("poll() failed inside `recv_blocking_solo()`");
if let Some(t) = timeout {
// update remaining timeout
let since = started_at.elapsed();
if since >= t {
// ran out of time
return Ok(None);
}
timeout = Some(t-since);
}
}
}
/// Similar to `recv_all_into`, but rather than storing each received message 'm',
/// the provided function is called with arguments (self, m) where self is `&mut self`.
/// This allows for ergonomic utility of the received messages using a closure.
pub fn recv_all_map<F,M>(&mut self, mut func: F) -> (usize, Result<(), RecvError>)
where M: Message, F: FnMut(&mut Self, M) + Sized {
let mut total = 0;
loop {
match self.recv::<M>() {
Ok(None) => return (total, Ok(())),
Ok(Some(msg)) => { total += 1; func(self, msg) },
Err(e) => return (total, Err(e)),
};
}
}
/// Combination of `recv_all_map` and `recv_packed`.
pub fn recv_all_packed_map<F>(&mut self, mut func: F) -> (usize, Result<(), RecvError>)
where F: FnMut(&mut Self, PackedMessage) + Sized {
let mut total = 0;
loop {
match self.recv_packed() {
Ok(None) => return (total, Ok(())),
Ok(Some(packed)) => { total += 1; func(self, packed) },
Err(e) => return (total, Err(e)),
};
}
}
/// Similar to `recv`, except builds (instead of some M: Message), a `PackedMessage` object.
/// These packed messages can be deserialized later, sent on the line without knowledge of the
/// message type etc.
pub fn recv_packed(&mut self) -> Result<Option<PackedMessage>, RecvError> {
self.read_in()?;
self.check_payload();
if let Some(pb) = self.payload_bytes {
let buf_end = pb as usize + 4;
if self.buf_occupancy >= buf_end {
let mut vec = self.buf.drain(0..buf_end)
.collect::<Vec<_>>();
self.payload_bytes = None;
self.buf_occupancy -= buf_end;
return Ok(Some(PackedMessage(vec)))
}
}
Ok(None)
}
/// Similar to `recv_packed`, but the potentially-read bytes are not actually removed
/// from the stream. The message will _still be there_.
pub fn peek_packed(&mut self) -> Result<Option<PackedMessage>, RecvError> {
if let Some(pb) = self.payload_bytes {
let buf_end = pb as usize + 4;
if self.buf_occupancy >= buf_end {
return Ok(
Some(
PackedMessage::from_raw(
self.buf[4..buf_end].to_vec()
)
)
)
}
}
Ok(None)
}
}
/// This structure represents the serialized form of some `Message`-implementing structure.
/// Dealing with a PackedMessage may be suitable when:
/// (1) You need to send/store/receive a message but don't need to actually _use_ it yourself.
/// (2) You want to serialize a message once, and send it multiple times.
/// (3) You want to read and discard a message whose type is unknown.
///
/// NOTE: The packed message maps 1:1 with the bytes that travel over the TcpStream. As such,
/// packed messages also contain the 4-byte length preable. The user is discocuraged from
/// manipulating the contents of a packed message. The `recv` statement relies on consistency
/// of packed messages
pub struct PackedMessage(Vec<u8>);
impl PackedMessage {
/// Create a new PakcedMessage from the given `Message`-implementing struct
pub fn new<M: Message>(m: &M) -> Result<Self, PackingError> {
let m_len: usize = bincode::serialized_size(&m)? as usize;
if m_len > ::std::u32::MAX as usize {
return Err(PackingError::TooBigToRepresent);
}
let tot_len = m_len+4;
let mut vec = Vec::with_capacity(tot_len);
vec.resize(tot_len, 0u8);
bincode::serialize_into(&mut vec[0..4], &(m_len as u32))?;
bincode::serialize_into(&mut vec[4..tot_len], m)?;
Ok(PackedMessage(vec))
}
/// Attempt to unpack this Packedmessage given a type hint. This may fail if the
/// PackedMessage isn't internally consistent or the type doesn't match that
/// of the type used for serialization.
pub fn unpack<M: Message>(&self) -> Result<M, Box<bincode::ErrorKind>> {
bincode::deserialize(&self.0[4..])
}
/// Unwrap the byte buffer comprising this PackedMessage
#[inline] pub fn into_raw(self) -> Vec<u8> { self.0 }
/// Accept the given byte buffer as the basis for a PackedMessage
///
/// WARNING: Use this at your own risk! The `recv` functions and their variants rely on
/// the correct contents of messages to work correcty.
///
/// NOTE: The first 4 bytes of a the buffer are used to store the length of the payload.
#[inline] pub fn from_raw(v: Vec<u8>) -> Self { PackedMessage(v) }
/// Return the number of bytes this packed message contains. Maps 1:1 with
/// the bit complexity of the message sent over the network.
#[inline] pub fn byte_len(&self) -> usize { self.0.len() }
/// Acquire an immutable reference to the internal buffer of the packed message.
#[inline] pub fn get_raw(&self) -> &Vec<u8> { &self.0 }
/// Acquire a mutable reference to the internal buffer of the packed message.
///
/// WARNING: Contents of a PackedMessage represent a delicata internal state. Sending an
/// internally inconsistent PackedMessage will compromise the connection. Use at your own risk!
#[inline] pub fn get_mut_raw(&mut self) -> &mut Vec<u8> |
}
impl Evented for Middleman {
fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt)
-> io::Result<()> {
self.stream.register(poll, token, interest, opts)
}
fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt)
-> io::Result<()> {
self.stream.reregister(poll, token, interest, opts)
}
fn deregister(&self, poll: &Poll) -> io::Result<()> {
self.stream.deregister(poll)
}
} | { &mut self.0 } | identifier_body |
quicksearch.js | (function($){
// Function to be called when the quick search template is ready
window.initQuickSearch = function initQuickSearch(portletId,seeAllMsg, noResultMsg, searching) {
//*** Global variables ***
var CONNECTORS; //all registered SearchService connectors
var SEARCH_TYPES; //enabled search types
var QUICKSEARCH_SETTING; //quick search setting
var DELAY_SEARCH_TIME = 1000; // Search time delay
var txtQuickSearchQuery_id = "#adminkeyword-" + portletId;
var linkQuickSearchQuery_id = "#adminSearchLink-" + portletId;
var quickSearchResult_id = "#quickSearchResult-" + portletId;
var seeAll_id = "#seeAll-" + portletId;
var value = $(txtQuickSearchQuery_id).val();
var isDefault = false;
var isEnterKey = false;
window['isSearching'] = false;
var durationKeyup = 0;
var keypressed = false;
var skipKeyup = 0;
var textVal = "";
var firstBackSpace = true;
var index = 0;
var currentFocus = 0;
var searchTimeout;
//var skipKeyUp = [9,16,17,18,19,20,33,34,35,36,37,38,39,40,45,49];
var mapKeyUp = {"0":"48","1":"49","2":"50","3":"51","4":"52","5":"53","6":"54","7":"55","8":"56","9":"57",
"a":"65","b":"66","c":"67","d":"68","e":"69","f":"70","g":"71","h":"72","i":"73","j":"74",
"k":"75","l":"76","m":"77","n":"78","o":"79","p":"80","q":"81","r":"82","s":"83","t":"84",
"u":"85","v":"86","w":"87","x":"88","y":"89","z":"90","numpad 0":"96","numpad 1":"97","numpad 2":"98",
"numpad 3":"99","numpad 4":"100","numpad 5":"101","numpad 6":"102","numpad 7":"103", "backspace":"8", "delete":"46"};
/*var QUICKSEARCH_RESULT_TEMPLATE= " \
<div class='quickSearchResult %{type}' tabindex='%{index}' id='quickSearchResult%{index}'> \
<span class='avatar'> \
%{avatar} \
</span> \
<a href='%{url}' class='name'>%{title}</a> \
</div> \
";*///<div class='Excerpt Ellipsis'>%{excerpt}</div> \
var QUICKSEARCH_RESULT_TEMPLATE= "\
<div class='quickSearchResult %{type}' tabindex='%{index}' id='quickSearchResult%{index}' onkeydown='fireAEvent(event,this.id)'> \
%{lineResult}\
</div>";//<div class='Excerpt Ellipsis'>%{excerpt}</div> \
var LINE_RESULT_TEMPLATE = "\
<a href='%{url}'> \
<i class='%{cssClass}'></i> %{title}\
</a>";
var OTHER_RESULT_TEMPLATE = "\
<a href='%{url}' class='avatarTiny'><img src='%{imageSrc}'/>%{title}</a>\
";
var QUICKSEARCH_TABLE_TEMPLATE=" \
<table class='uiGrid table table-striped rounded-corners'> \
<col width='30%'> \
<col width='70%'> \
%{resultRows} \
%{messageRow} \
</table> \
";
var QUICKSEARCH_TABLE_ROW_TEMPLATE=" \
<tr> \
<th> \
%{type} \
</th> \
<td> \
%{results} \
</td> \
</tr> \
";
var QUICKSEARCH_SEE_ALL=" \
<tr> \
<td colspan='2' class='message'> \
<a id='seeAll-" + portletId + "' class='' href='#'>"+seeAllMsg+"</a> \
</td> \
</tr> \
";
var QUICKSEARCH_NO_RESULT=" \
<tr> \
<td colspan='2' class='noResult'> \
<span id='seeAll-" + portletId + "' class='' href='#'>"+noResultMsg+" <strong>%{query}<strong></span> \
</td> \
</tr> \
";
var IMAGE_AVATAR_TEMPLATE = " \
<span class='avatar pull-left'> \
<img src='%{imageSrc}'> \
</span> \
";
var CSS_AVATAR_TEMPLATE = " \
<span class='avatar pull-left'> \
<i class='%{cssClass}'></i> \
</span> \
";
var EVENT_AVATAR_TEMPLATE = " \
<div class='calendarBox calendarBox-mini'> \
<div class='heading'> %{month} </div> \
<div class='content' style='margin-left: 0px;'> %{date} </div> \
</div> \
";
var TASK_AVATAR_TEMPLATE = " \
<i class='uiIconStatus-20-%{taskStatus}'></i> \
";
var QUICKSEARCH_WAITING_TEMPLATE=" \
<table class='uiGrid table table-hover table-striped rounded-corners'> \
<col width='30%'> \
<col width='70%'> \
<tr> \
<td colspan='2' class='noResult'> \
<span id='seeAll-" + portletId + "' class='' href='#'>"+searching+" </span> \
</td> \
</tr> \
</table> \
";
searchTimeout = setTimeout(searchWhenNoKeypress, DELAY_SEARCH_TIME);
function searchWhenNoKeypress() {
if (keypressed) {
quickSearch();
keypressed = false;
}
}
//*** Utility functions ***
String.prototype.toProperCase = function() {
return this.replace(/\w\S*/g, function(txt){return txt.charAt(0).toUpperCase() + txt.substr(1).toLowerCase();});
};
// Highlight the specified text in a string
String.prototype.highlight = function(words) {
var str = this;
for(var i=0; i<words.length; i++) {
if("" == words[i]) continue;
var regex;
if(isSpecialExpressionCharacter(words[i].charAt(0))) {
regex = new RegExp("(\\" + words[i] + ")", "gi");
} else {
regex = new RegExp("(" + words[i] + ")", "gi");
}
str = str.replace(regex, "<strong>$1</strong>");
}
return str;
};
function isSpecialExpressionCharacter(c) |
function getRegistry(callback) {
$.getJSON("/rest/search/registry", function(registry){
if(callback) callback(registry);
});
}
function getQuicksearchSetting(callback) {
$.getJSON("/rest/search/setting/quicksearch", function(setting){
if(callback) callback(setting);
});
}
function setWaitingStatus(status) {
if (status){
window['isSearching'] = true;
$(txtQuickSearchQuery_id).addClass("loadding");
if ($.browser.msie && parseInt($.browser.version, 10) == 8) {
$(quickSearchResult_id).show();
}else{
var width = Math.min($(quickSearchResult_id).width(), $(window).width() - $(txtQuickSearchQuery_id).offset().left - 20);
$(quickSearchResult_id).width(width);
$(quickSearchResult_id).show();
}
}else {
window['isSearching'] = false;
}
}
function quickSearch() {
var query = $(txtQuickSearchQuery_id).val();
setWaitingStatus(true);
var types = QUICKSEARCH_SETTING.searchTypes.join(","); //search for the types specified in quick search setting only
var searchParams = {
searchContext: {
siteName:eXo.env.portal.portalName
},
q: query,
sites: QUICKSEARCH_SETTING.searchCurrentSiteOnly ? eXo.env.portal.portalName : "all",
types: types,
offset: 0,
limit: QUICKSEARCH_SETTING.resultsPerPage,
sort: "relevancy",
order: "desc"
};
// get results of all search types in a map
$.getJSON("/rest/search", searchParams, function(resultMap){
var rows = []; //one row per type
index = 0;
$.each(SEARCH_TYPES, function(i, searchType){
var results = resultMap[searchType]; //get all results of this type
if(results && 0!=$(results).size()) { //show the type with result only
//results.map(function(result){result.type = searchType;}); //assign type for each result
results = results.sort(function(a,b){
return byRelevancyDESC(a,b);
});
$.map(results, function(result){result.type = searchType;}); //assign type for each result
var cell = []; //the cell contains results of this type (in the quick search result table)
$.each(results, function(i, result){
index = index + 1;
cell.push(renderQuickSearchResult(result, index)); //add this result to the cell
});
var row = QUICKSEARCH_TABLE_ROW_TEMPLATE.replace(/%{type}/g,eXo.ecm.WCMUtils.getBundle("quicksearch.type." + CONNECTORS[searchType].displayName , eXo.env.portal.language)).replace(/%{results}/g, cell.join(""));
rows.push(row);
}
});
var messageRow = rows.length==0 ? QUICKSEARCH_NO_RESULT.replace(/%{query}/, XSSUtils.sanitizeString(query)) : QUICKSEARCH_SEE_ALL;
$(quickSearchResult_id).html(QUICKSEARCH_TABLE_TEMPLATE.replace(/%{resultRows}/, rows.join("")).replace(/%{messageRow}/g, messageRow));
if ($.browser.msie && parseInt($.browser.version, 10) == 8) {
$(quickSearchResult_id).show();
}else{
var width = Math.min($(quickSearchResult_id).width(), $(window).width() - $(txtQuickSearchQuery_id).offset().left - 20);
$(quickSearchResult_id).width(width);
$(quickSearchResult_id).show();
}
$(txtQuickSearchQuery_id).removeClass("loadding");
setWaitingStatus(false);
var searchPage = "/portal/"+eXo.env.portal.portalName+"/search";
$(seeAll_id).attr("href", searchPage +"?q="+query+"&types="+types); //the query to be passed to main search page
currentFocus = 0;
});
}
function renderQuickSearchResult(result, index) {
var query = $(txtQuickSearchQuery_id).val();
var terms = query.split(/\s+/g); //for highlighting
var avatar = "";
var line = "";
switch(result.type) {
case "event":
line = LINE_RESULT_TEMPLATE.replace(/%{cssClass}/g, "uiIconPLFEvent uiIconPLFLightGray");
break;
case "task":
var cssClass = "uiIconPLFTask" + result.taskStatus.toProperCase() + " uiIconPLFLightGray";
line = LINE_RESULT_TEMPLATE.replace(/%{cssClass}/g, cssClass);
break;
case "tasksInTasks":
var cssClass = "uiIconTick" + (result.completed ? ' uiIconBlue' : '');
line = LINE_RESULT_TEMPLATE.replace(/%{cssClass}/g, cssClass);
break;
case "file":
var cssClasses = $.map(result.fileType.split(/\s+/g), function(type){return "uiIcon16x16" + type}).join(" ");
line = LINE_RESULT_TEMPLATE.replace(/%{cssClass}/g, cssClasses);
break;
case "document":
var cssClasses = $.map(result.fileType.split(/\s+/g), function(type){return "uiIcon16x16Template" + type}).join(" ");
line = LINE_RESULT_TEMPLATE.replace(/%{cssClass}/g, cssClasses);
break;
case "post":
line = LINE_RESULT_TEMPLATE.replace(/%{cssClass}/g, "uiIconPLFDiscussion uiIconPLFLightGray");
break;
case "answer":
line = LINE_RESULT_TEMPLATE.replace(/%{cssClass}/g, "uiIconPLFAnswers uiIconPLFLightGray");
break;
case "wiki":
line = LINE_RESULT_TEMPLATE.replace(/%{cssClass}/g, "uiIconWikiWiki uiIconWikiLightGray");
break;
case "page":
line = LINE_RESULT_TEMPLATE.replace(/%{cssClass}/g, "uiIconEcmsTemplateDocument uiIconEcmsLightGrey");
break;
default:
line = OTHER_RESULT_TEMPLATE.replace(/%{imageSrc}/g, result.imageUrl);
}
var html = QUICKSEARCH_RESULT_TEMPLATE.
replace(/%{index}/g, index).
replace(/%{type}/g, result.type).
replace(/%{lineResult}/g, line).
replace(/%{url}/g, result.url).
replace(/%{title}/g, (result.title||"").highlight(terms)).
replace(/%{excerpt}/g, (result.excerpt||"").highlight(terms)).
replace(/%{detail}/g, (result.detail||"").highlight(terms)).
replace(/%{avatar}/g, avatar);
return html;
}
function byRelevancyDESC(b,a) {
if (a.relevancy < b.relevancy)
return -1;
if (a.relevancy > b.relevancy)
return 1;
return 0;
}
//*** Event handlers - Quick search ***
$(document).on("click",seeAll_id, function(){
window.location.href = generateAllResultsURL(); //open the main search page
$(quickSearchResult_id).hide();
});
$(txtQuickSearchQuery_id).keyup(function(e){
if(""==$(this).val()) {
$(quickSearchResult_id).hide();
return;
}
if(13==e.keyCode) {
$(seeAll_id).trigger("click"); //go to main search page if Enter is pressed
} else {
keypressed = true;
if (searchTimeout) {
clearTimeout(searchTimeout);
}
searchTimeout = setTimeout(searchWhenNoKeypress, DELAY_SEARCH_TIME);
//quickSearch(); //search for the text just being typed in
var currentVal = $(txtQuickSearchQuery_id).val();
if (!charDeletedIsEmpty(e,textVal, currentVal)){
$.each(mapKeyUp, function(key, value){
textVal = $(txtQuickSearchQuery_id).val();
});
}
}
});
//skip backspace and delete key
function charDeletedIsEmpty(key,textVal, currentVal){
//process backspace key
if (key.keyCode == 8 && textVal.trim() == currentVal.trim()){
return true;
}
//process delete key
if (key.keyCode == 46 && textVal.trim() == currentVal.trim()){
return true;
}
}
// catch ennter key when search is running
$(document).keyup(function (e) {
if (e.keyCode == 13 && window['isSearching'] && !$(txtQuickSearchQuery_id).is(':hidden') ) {
//$(quickSearchResult_id).focus();
isDefault = false;
$(linkQuickSearchQuery_id).trigger('click');
//$(linkQuickSearchQuery_id).click(); //go to main search page if Enter is pressed
}
});
$(document).keyup(function (e) {
if (e.keyCode == 13 && !$(txtQuickSearchQuery_id).is(':hidden') ) {
var focusedId = $("*:focus").attr("id");
if (currentFocus > 0 && currentFocus <= index){
var link = $("#"+focusedId+" .name").attr('href');
window.open(link,"_self");
}
}
});
// catch arrow key
$(document).keyup(function (e) {
if (index >= 1){
if (e.keyCode == 40 && !$(txtQuickSearchQuery_id).is(':hidden') ) {
if (currentFocus >= 1 && currentFocus < index){
var divClass = $('#quickSearchResult'+ currentFocus).attr('class').replace(" arrowResult", "");
$('#quickSearchResult'+currentFocus).attr('class',divClass);
}
if (currentFocus < index){
currentFocus = currentFocus + 1;
$("#quickSearchResult"+currentFocus).focus();
var divClass = $('#quickSearchResult'+currentFocus).attr('class') + " arrowResult";
$('#quickSearchResult'+currentFocus).attr('class',divClass);
}else if (currentFocus == index){
$("#quickSearchResult"+index).focus();
}
}
if (e.keyCode == 38 && !$(txtQuickSearchQuery_id).is(':hidden') ) {
if (currentFocus > 1){
var divClass = $('#quickSearchResult'+ currentFocus).attr('class').replace(" arrowResult", "");
$('#quickSearchResult'+currentFocus).attr('class',divClass);
}
if (currentFocus > 1){
currentFocus = currentFocus - 1;
$("#quickSearchResult"+currentFocus).focus();
var divClass = $('#quickSearchResult'+currentFocus).attr('class') + " arrowResult";
$('#quickSearchResult'+currentFocus).attr('class',divClass);
}else if (currentFocus == 1){
$("#quickSearchResult"+currentFocus).focus();
}
}
}
});
//show the input search or go to the main search page when search link is clicked
$(linkQuickSearchQuery_id).click(function () {
if ($(txtQuickSearchQuery_id).is(':hidden')) {
$(txtQuickSearchQuery_id).val(value);
// $(txtQuickSearchQuery_id).css('color', '#555');
isDefault = true;
$(txtQuickSearchQuery_id).show();
$(txtQuickSearchQuery_id).focus();
}
else
if (isDefault == true) {
$(txtQuickSearchQuery_id).hide();
$(quickSearchResult_id).hide();
}
else {
//alert(window['isSearching']);
if(!window['isSearching']) {
$(seeAll_id).click(); //go to main search page if Enter is pressed
}else if (window['isSearching']){
$(linkQuickSearchQuery_id).attr("onclick","window.location.href='"+ generateAllResultsURL() + "'");
window['isSearching'] = false;
}
}
});
$(txtQuickSearchQuery_id).focus(function(){
$(this).val('');
// $(this).css('color', '#000');
isDefault = false;
});
//collapse the input search field when clicking outside the search box
$('body').click(function (evt) {
if ($(evt.target).parents('#ToolBarSearch').length == 0) {
// $(txtQuickSearchQuery_id).hide();
$(txtQuickSearchQuery_id).removeClass("showInputSearch");
$("#ToolBarSearch .uiIconPLF24x24Search").removeClass('uiIconCloseSearchBox');
$('#PlatformAdminToolbarContainer').removeClass('activeInputSearch');
$('#ToolBarSearch').find('input[type="text"]').removeClass('loadding');
$('body').removeClass('quickSearchDisplay');
$(quickSearchResult_id).hide();
}
});
//*** The entry point ***
// Load all needed configurations and settings from the service to prepare for the search
getRegistry(function(registry){
CONNECTORS = registry[0];
SEARCH_TYPES = registry[1];
getQuicksearchSetting(function(setting){
QUICKSEARCH_SETTING = setting;
});
});
function generateAllResultsURL() {
var query = $(txtQuickSearchQuery_id).val();
var types = QUICKSEARCH_SETTING.searchTypes.join(","); //search for the types specified in quick search setting only
var searchPage = "/portal/"+eXo.env.portal.portalName+"/search";
return searchPage + "?q="+query+"&types="+types;
}
//$ = jQuery; //undo .conflict();
}
//Function to be called when the quick search setting template is ready
window.initQuickSearchSetting = function(allMsg,alertOk,alertNotOk){
var CONNECTORS; //all registered SearchService connectors
var CHECKBOX_TEMPLATE = "\
<div class='control-group'> \
<div class='controls-full'> \
<span class='uiCheckbox'> \
<input type='checkbox' class='checkbox' name='%{name}' value='%{value}'> \
<span>%{text}</span> \
</span> \
</div> \
</div> \
";
function getSelectedTypes() {
var searchIn = [];
if($(":checkbox[name='searchInOption'][value='all']").is(":checked")) {
return "all";
} else {
$.each($(":checkbox[name='searchInOption'][value!='all']:checked"), function(){
searchIn.push(this.value);
});
if (searchIn.length==0){
return "false";
}
return searchIn.join(",");
}
}
// Call REST service to save the setting
$("#btnSave").click(function(){
var jqxhr = $.post("/rest/search/setting/quicksearch", {
resultsPerPage: $("#resultsPerPage").val(),
searchTypes: getSelectedTypes(),
searchCurrentSiteOnly: $("#searchCurrentSiteOnly").is(":checked")
});
jqxhr.complete(function(data) {
alert("ok"==data.responseText?alertOk:alertNotOk+data.responseText);
});
});
// Handler for the checkboxes
$(":checkbox[name='searchInOption']").live("click", function(){
if("all"==this.value){ //All checked
if($(this).is(":checked")) { // check/uncheck all
$(":checkbox[name='searchInOption']").attr('checked', true);
} else {
$(":checkbox[name='searchInOption']").attr('checked', false);
}
} else {
$(":checkbox[name='searchInOption'][value='all']").attr('checked', false); //uncheck All Sites
}
});
// Load all needed configurations and settings from the service to build the UI
$.getJSON("/rest/search/registry", function(registry){
CONNECTORS = registry[0];
var searchInOpts=[];
searchInOpts.push(CHECKBOX_TEMPLATE.
replace(/%{name}/g, "searchInOption").
replace(/%{value}/g, "all").
replace(/%{text}/g, allMsg));
$.each(registry[1], function(i, type){
if(CONNECTORS[type]) searchInOpts.push(CHECKBOX_TEMPLATE.
replace(/%{name}/g, "searchInOption").
replace(/%{value}/g, type).
replace(/%{text}/g, eXo.ecm.WCMUtils.getBundle("quicksearch.type." + CONNECTORS[type].displayName , eXo.env.portal.language)));
});
$("#lstSearchInOptions").html(searchInOpts.join(""));
// Display the previously saved (or default) quick search setting
$.getJSON("/rest/search/setting/quicksearch", function(setting){
if(-1 != $.inArray("all", setting.searchTypes)) {
$(":checkbox[name='searchInOption']").attr('checked', true);
} else {
$(":checkbox[name='searchInOption']").attr('checked', false);
$.each($(":checkbox[name='searchInOption']"), function(){
if(-1 != $.inArray(this.value, setting.searchTypes)) {
$(this).attr('checked', true);
}
});
}
$("#resultsPerPage").val(setting.resultsPerPage);
$("#searchCurrentSiteOnly").attr('checked', setting.searchCurrentSiteOnly);
});
});
}
})($);
| {
var specials = '`~!@#$%^&*()-=+{}[]\|;:\'"<>,./?';
for(var i = 0; i < specials.length; i++) {
if(c == specials.charAt(i)) {
return true;
}
}
return false;
} | identifier_body |
quicksearch.js | (function($){
// Function to be called when the quick search template is ready
window.initQuickSearch = function initQuickSearch(portletId,seeAllMsg, noResultMsg, searching) {
//*** Global variables ***
var CONNECTORS; //all registered SearchService connectors
var SEARCH_TYPES; //enabled search types
var QUICKSEARCH_SETTING; //quick search setting
var DELAY_SEARCH_TIME = 1000; // Search time delay
var txtQuickSearchQuery_id = "#adminkeyword-" + portletId;
var linkQuickSearchQuery_id = "#adminSearchLink-" + portletId;
var quickSearchResult_id = "#quickSearchResult-" + portletId;
var seeAll_id = "#seeAll-" + portletId;
var value = $(txtQuickSearchQuery_id).val();
var isDefault = false;
var isEnterKey = false;
window['isSearching'] = false;
var durationKeyup = 0;
var keypressed = false;
var skipKeyup = 0;
var textVal = "";
var firstBackSpace = true;
var index = 0;
var currentFocus = 0;
var searchTimeout;
//var skipKeyUp = [9,16,17,18,19,20,33,34,35,36,37,38,39,40,45,49];
var mapKeyUp = {"0":"48","1":"49","2":"50","3":"51","4":"52","5":"53","6":"54","7":"55","8":"56","9":"57",
"a":"65","b":"66","c":"67","d":"68","e":"69","f":"70","g":"71","h":"72","i":"73","j":"74",
"k":"75","l":"76","m":"77","n":"78","o":"79","p":"80","q":"81","r":"82","s":"83","t":"84",
"u":"85","v":"86","w":"87","x":"88","y":"89","z":"90","numpad 0":"96","numpad 1":"97","numpad 2":"98",
"numpad 3":"99","numpad 4":"100","numpad 5":"101","numpad 6":"102","numpad 7":"103", "backspace":"8", "delete":"46"};
/*var QUICKSEARCH_RESULT_TEMPLATE= " \
<div class='quickSearchResult %{type}' tabindex='%{index}' id='quickSearchResult%{index}'> \
<span class='avatar'> \
%{avatar} \
</span> \
<a href='%{url}' class='name'>%{title}</a> \
</div> \
";*///<div class='Excerpt Ellipsis'>%{excerpt}</div> \
var QUICKSEARCH_RESULT_TEMPLATE= "\
<div class='quickSearchResult %{type}' tabindex='%{index}' id='quickSearchResult%{index}' onkeydown='fireAEvent(event,this.id)'> \
%{lineResult}\
</div>";//<div class='Excerpt Ellipsis'>%{excerpt}</div> \
var LINE_RESULT_TEMPLATE = "\
<a href='%{url}'> \
<i class='%{cssClass}'></i> %{title}\
</a>";
var OTHER_RESULT_TEMPLATE = "\
<a href='%{url}' class='avatarTiny'><img src='%{imageSrc}'/>%{title}</a>\
";
var QUICKSEARCH_TABLE_TEMPLATE=" \
<table class='uiGrid table table-striped rounded-corners'> \
<col width='30%'> \
<col width='70%'> \
%{resultRows} \
%{messageRow} \
</table> \
";
var QUICKSEARCH_TABLE_ROW_TEMPLATE=" \
<tr> \
<th> \
%{type} \
</th> \
<td> \
%{results} \
</td> \
</tr> \
";
var QUICKSEARCH_SEE_ALL=" \
<tr> \
<td colspan='2' class='message'> \
<a id='seeAll-" + portletId + "' class='' href='#'>"+seeAllMsg+"</a> \
</td> \
</tr> \
";
var QUICKSEARCH_NO_RESULT=" \
<tr> \
<td colspan='2' class='noResult'> \
<span id='seeAll-" + portletId + "' class='' href='#'>"+noResultMsg+" <strong>%{query}<strong></span> \
</td> \
</tr> \
";
var IMAGE_AVATAR_TEMPLATE = " \
<span class='avatar pull-left'> \
<img src='%{imageSrc}'> \
</span> \
";
var CSS_AVATAR_TEMPLATE = " \
<span class='avatar pull-left'> \
<i class='%{cssClass}'></i> \
</span> \
";
var EVENT_AVATAR_TEMPLATE = " \
<div class='calendarBox calendarBox-mini'> \
<div class='heading'> %{month} </div> \
<div class='content' style='margin-left: 0px;'> %{date} </div> \
</div> \
";
var TASK_AVATAR_TEMPLATE = " \
<i class='uiIconStatus-20-%{taskStatus}'></i> \
";
var QUICKSEARCH_WAITING_TEMPLATE=" \
<table class='uiGrid table table-hover table-striped rounded-corners'> \
<col width='30%'> \
<col width='70%'> \
<tr> \
<td colspan='2' class='noResult'> \
<span id='seeAll-" + portletId + "' class='' href='#'>"+searching+" </span> \
</td> \
</tr> \
</table> \
";
searchTimeout = setTimeout(searchWhenNoKeypress, DELAY_SEARCH_TIME);
function searchWhenNoKeypress() {
if (keypressed) {
quickSearch();
keypressed = false;
}
}
//*** Utility functions ***
String.prototype.toProperCase = function() {
return this.replace(/\w\S*/g, function(txt){return txt.charAt(0).toUpperCase() + txt.substr(1).toLowerCase();});
};
// Highlight the specified text in a string
String.prototype.highlight = function(words) {
var str = this;
for(var i=0; i<words.length; i++) {
if("" == words[i]) continue;
var regex;
if(isSpecialExpressionCharacter(words[i].charAt(0))) {
regex = new RegExp("(\\" + words[i] + ")", "gi");
} else {
regex = new RegExp("(" + words[i] + ")", "gi");
}
str = str.replace(regex, "<strong>$1</strong>");
}
return str;
};
function isSpecialExpressionCharacter(c) {
var specials = '`~!@#$%^&*()-=+{}[]\|;:\'"<>,./?';
for(var i = 0; i < specials.length; i++) {
if(c == specials.charAt(i)) {
return true;
}
}
return false;
}
function getRegistry(callback) {
$.getJSON("/rest/search/registry", function(registry){
if(callback) callback(registry);
});
}
function | (callback) {
$.getJSON("/rest/search/setting/quicksearch", function(setting){
if(callback) callback(setting);
});
}
function setWaitingStatus(status) {
if (status){
window['isSearching'] = true;
$(txtQuickSearchQuery_id).addClass("loadding");
if ($.browser.msie && parseInt($.browser.version, 10) == 8) {
$(quickSearchResult_id).show();
}else{
var width = Math.min($(quickSearchResult_id).width(), $(window).width() - $(txtQuickSearchQuery_id).offset().left - 20);
$(quickSearchResult_id).width(width);
$(quickSearchResult_id).show();
}
}else {
window['isSearching'] = false;
}
}
function quickSearch() {
var query = $(txtQuickSearchQuery_id).val();
setWaitingStatus(true);
var types = QUICKSEARCH_SETTING.searchTypes.join(","); //search for the types specified in quick search setting only
var searchParams = {
searchContext: {
siteName:eXo.env.portal.portalName
},
q: query,
sites: QUICKSEARCH_SETTING.searchCurrentSiteOnly ? eXo.env.portal.portalName : "all",
types: types,
offset: 0,
limit: QUICKSEARCH_SETTING.resultsPerPage,
sort: "relevancy",
order: "desc"
};
// get results of all search types in a map
$.getJSON("/rest/search", searchParams, function(resultMap){
var rows = []; //one row per type
index = 0;
$.each(SEARCH_TYPES, function(i, searchType){
var results = resultMap[searchType]; //get all results of this type
if(results && 0!=$(results).size()) { //show the type with result only
//results.map(function(result){result.type = searchType;}); //assign type for each result
results = results.sort(function(a,b){
return byRelevancyDESC(a,b);
});
$.map(results, function(result){result.type = searchType;}); //assign type for each result
var cell = []; //the cell contains results of this type (in the quick search result table)
$.each(results, function(i, result){
index = index + 1;
cell.push(renderQuickSearchResult(result, index)); //add this result to the cell
});
var row = QUICKSEARCH_TABLE_ROW_TEMPLATE.replace(/%{type}/g,eXo.ecm.WCMUtils.getBundle("quicksearch.type." + CONNECTORS[searchType].displayName , eXo.env.portal.language)).replace(/%{results}/g, cell.join(""));
rows.push(row);
}
});
var messageRow = rows.length==0 ? QUICKSEARCH_NO_RESULT.replace(/%{query}/, XSSUtils.sanitizeString(query)) : QUICKSEARCH_SEE_ALL;
$(quickSearchResult_id).html(QUICKSEARCH_TABLE_TEMPLATE.replace(/%{resultRows}/, rows.join("")).replace(/%{messageRow}/g, messageRow));
if ($.browser.msie && parseInt($.browser.version, 10) == 8) {
$(quickSearchResult_id).show();
}else{
var width = Math.min($(quickSearchResult_id).width(), $(window).width() - $(txtQuickSearchQuery_id).offset().left - 20);
$(quickSearchResult_id).width(width);
$(quickSearchResult_id).show();
}
$(txtQuickSearchQuery_id).removeClass("loadding");
setWaitingStatus(false);
var searchPage = "/portal/"+eXo.env.portal.portalName+"/search";
$(seeAll_id).attr("href", searchPage +"?q="+query+"&types="+types); //the query to be passed to main search page
currentFocus = 0;
});
}
function renderQuickSearchResult(result, index) {
var query = $(txtQuickSearchQuery_id).val();
var terms = query.split(/\s+/g); //for highlighting
var avatar = "";
var line = "";
switch(result.type) {
case "event":
line = LINE_RESULT_TEMPLATE.replace(/%{cssClass}/g, "uiIconPLFEvent uiIconPLFLightGray");
break;
case "task":
var cssClass = "uiIconPLFTask" + result.taskStatus.toProperCase() + " uiIconPLFLightGray";
line = LINE_RESULT_TEMPLATE.replace(/%{cssClass}/g, cssClass);
break;
case "tasksInTasks":
var cssClass = "uiIconTick" + (result.completed ? ' uiIconBlue' : '');
line = LINE_RESULT_TEMPLATE.replace(/%{cssClass}/g, cssClass);
break;
case "file":
var cssClasses = $.map(result.fileType.split(/\s+/g), function(type){return "uiIcon16x16" + type}).join(" ");
line = LINE_RESULT_TEMPLATE.replace(/%{cssClass}/g, cssClasses);
break;
case "document":
var cssClasses = $.map(result.fileType.split(/\s+/g), function(type){return "uiIcon16x16Template" + type}).join(" ");
line = LINE_RESULT_TEMPLATE.replace(/%{cssClass}/g, cssClasses);
break;
case "post":
line = LINE_RESULT_TEMPLATE.replace(/%{cssClass}/g, "uiIconPLFDiscussion uiIconPLFLightGray");
break;
case "answer":
line = LINE_RESULT_TEMPLATE.replace(/%{cssClass}/g, "uiIconPLFAnswers uiIconPLFLightGray");
break;
case "wiki":
line = LINE_RESULT_TEMPLATE.replace(/%{cssClass}/g, "uiIconWikiWiki uiIconWikiLightGray");
break;
case "page":
line = LINE_RESULT_TEMPLATE.replace(/%{cssClass}/g, "uiIconEcmsTemplateDocument uiIconEcmsLightGrey");
break;
default:
line = OTHER_RESULT_TEMPLATE.replace(/%{imageSrc}/g, result.imageUrl);
}
var html = QUICKSEARCH_RESULT_TEMPLATE.
replace(/%{index}/g, index).
replace(/%{type}/g, result.type).
replace(/%{lineResult}/g, line).
replace(/%{url}/g, result.url).
replace(/%{title}/g, (result.title||"").highlight(terms)).
replace(/%{excerpt}/g, (result.excerpt||"").highlight(terms)).
replace(/%{detail}/g, (result.detail||"").highlight(terms)).
replace(/%{avatar}/g, avatar);
return html;
}
function byRelevancyDESC(b,a) {
if (a.relevancy < b.relevancy)
return -1;
if (a.relevancy > b.relevancy)
return 1;
return 0;
}
//*** Event handlers - Quick search ***
$(document).on("click",seeAll_id, function(){
window.location.href = generateAllResultsURL(); //open the main search page
$(quickSearchResult_id).hide();
});
$(txtQuickSearchQuery_id).keyup(function(e){
if(""==$(this).val()) {
$(quickSearchResult_id).hide();
return;
}
if(13==e.keyCode) {
$(seeAll_id).trigger("click"); //go to main search page if Enter is pressed
} else {
keypressed = true;
if (searchTimeout) {
clearTimeout(searchTimeout);
}
searchTimeout = setTimeout(searchWhenNoKeypress, DELAY_SEARCH_TIME);
//quickSearch(); //search for the text just being typed in
var currentVal = $(txtQuickSearchQuery_id).val();
if (!charDeletedIsEmpty(e,textVal, currentVal)){
$.each(mapKeyUp, function(key, value){
textVal = $(txtQuickSearchQuery_id).val();
});
}
}
});
//skip backspace and delete key
function charDeletedIsEmpty(key,textVal, currentVal){
//process backspace key
if (key.keyCode == 8 && textVal.trim() == currentVal.trim()){
return true;
}
//process delete key
if (key.keyCode == 46 && textVal.trim() == currentVal.trim()){
return true;
}
}
// catch ennter key when search is running
$(document).keyup(function (e) {
if (e.keyCode == 13 && window['isSearching'] && !$(txtQuickSearchQuery_id).is(':hidden') ) {
//$(quickSearchResult_id).focus();
isDefault = false;
$(linkQuickSearchQuery_id).trigger('click');
//$(linkQuickSearchQuery_id).click(); //go to main search page if Enter is pressed
}
});
$(document).keyup(function (e) {
if (e.keyCode == 13 && !$(txtQuickSearchQuery_id).is(':hidden') ) {
var focusedId = $("*:focus").attr("id");
if (currentFocus > 0 && currentFocus <= index){
var link = $("#"+focusedId+" .name").attr('href');
window.open(link,"_self");
}
}
});
// catch arrow key
$(document).keyup(function (e) {
if (index >= 1){
if (e.keyCode == 40 && !$(txtQuickSearchQuery_id).is(':hidden') ) {
if (currentFocus >= 1 && currentFocus < index){
var divClass = $('#quickSearchResult'+ currentFocus).attr('class').replace(" arrowResult", "");
$('#quickSearchResult'+currentFocus).attr('class',divClass);
}
if (currentFocus < index){
currentFocus = currentFocus + 1;
$("#quickSearchResult"+currentFocus).focus();
var divClass = $('#quickSearchResult'+currentFocus).attr('class') + " arrowResult";
$('#quickSearchResult'+currentFocus).attr('class',divClass);
}else if (currentFocus == index){
$("#quickSearchResult"+index).focus();
}
}
if (e.keyCode == 38 && !$(txtQuickSearchQuery_id).is(':hidden') ) {
if (currentFocus > 1){
var divClass = $('#quickSearchResult'+ currentFocus).attr('class').replace(" arrowResult", "");
$('#quickSearchResult'+currentFocus).attr('class',divClass);
}
if (currentFocus > 1){
currentFocus = currentFocus - 1;
$("#quickSearchResult"+currentFocus).focus();
var divClass = $('#quickSearchResult'+currentFocus).attr('class') + " arrowResult";
$('#quickSearchResult'+currentFocus).attr('class',divClass);
}else if (currentFocus == 1){
$("#quickSearchResult"+currentFocus).focus();
}
}
}
});
//show the input search or go to the main search page when search link is clicked
$(linkQuickSearchQuery_id).click(function () {
if ($(txtQuickSearchQuery_id).is(':hidden')) {
$(txtQuickSearchQuery_id).val(value);
// $(txtQuickSearchQuery_id).css('color', '#555');
isDefault = true;
$(txtQuickSearchQuery_id).show();
$(txtQuickSearchQuery_id).focus();
}
else
if (isDefault == true) {
$(txtQuickSearchQuery_id).hide();
$(quickSearchResult_id).hide();
}
else {
//alert(window['isSearching']);
if(!window['isSearching']) {
$(seeAll_id).click(); //go to main search page if Enter is pressed
}else if (window['isSearching']){
$(linkQuickSearchQuery_id).attr("onclick","window.location.href='"+ generateAllResultsURL() + "'");
window['isSearching'] = false;
}
}
});
$(txtQuickSearchQuery_id).focus(function(){
$(this).val('');
// $(this).css('color', '#000');
isDefault = false;
});
//collapse the input search field when clicking outside the search box
$('body').click(function (evt) {
if ($(evt.target).parents('#ToolBarSearch').length == 0) {
// $(txtQuickSearchQuery_id).hide();
$(txtQuickSearchQuery_id).removeClass("showInputSearch");
$("#ToolBarSearch .uiIconPLF24x24Search").removeClass('uiIconCloseSearchBox');
$('#PlatformAdminToolbarContainer').removeClass('activeInputSearch');
$('#ToolBarSearch').find('input[type="text"]').removeClass('loadding');
$('body').removeClass('quickSearchDisplay');
$(quickSearchResult_id).hide();
}
});
//*** The entry point ***
// Load all needed configurations and settings from the service to prepare for the search
getRegistry(function(registry){
CONNECTORS = registry[0];
SEARCH_TYPES = registry[1];
getQuicksearchSetting(function(setting){
QUICKSEARCH_SETTING = setting;
});
});
function generateAllResultsURL() {
var query = $(txtQuickSearchQuery_id).val();
var types = QUICKSEARCH_SETTING.searchTypes.join(","); //search for the types specified in quick search setting only
var searchPage = "/portal/"+eXo.env.portal.portalName+"/search";
return searchPage + "?q="+query+"&types="+types;
}
//$ = jQuery; //undo .conflict();
}
//Function to be called when the quick search setting template is ready
window.initQuickSearchSetting = function(allMsg,alertOk,alertNotOk){
var CONNECTORS; //all registered SearchService connectors
var CHECKBOX_TEMPLATE = "\
<div class='control-group'> \
<div class='controls-full'> \
<span class='uiCheckbox'> \
<input type='checkbox' class='checkbox' name='%{name}' value='%{value}'> \
<span>%{text}</span> \
</span> \
</div> \
</div> \
";
function getSelectedTypes() {
var searchIn = [];
if($(":checkbox[name='searchInOption'][value='all']").is(":checked")) {
return "all";
} else {
$.each($(":checkbox[name='searchInOption'][value!='all']:checked"), function(){
searchIn.push(this.value);
});
if (searchIn.length==0){
return "false";
}
return searchIn.join(",");
}
}
// Call REST service to save the setting
$("#btnSave").click(function(){
var jqxhr = $.post("/rest/search/setting/quicksearch", {
resultsPerPage: $("#resultsPerPage").val(),
searchTypes: getSelectedTypes(),
searchCurrentSiteOnly: $("#searchCurrentSiteOnly").is(":checked")
});
jqxhr.complete(function(data) {
alert("ok"==data.responseText?alertOk:alertNotOk+data.responseText);
});
});
// Handler for the checkboxes
$(":checkbox[name='searchInOption']").live("click", function(){
if("all"==this.value){ //All checked
if($(this).is(":checked")) { // check/uncheck all
$(":checkbox[name='searchInOption']").attr('checked', true);
} else {
$(":checkbox[name='searchInOption']").attr('checked', false);
}
} else {
$(":checkbox[name='searchInOption'][value='all']").attr('checked', false); //uncheck All Sites
}
});
// Load all needed configurations and settings from the service to build the UI
$.getJSON("/rest/search/registry", function(registry){
CONNECTORS = registry[0];
var searchInOpts=[];
searchInOpts.push(CHECKBOX_TEMPLATE.
replace(/%{name}/g, "searchInOption").
replace(/%{value}/g, "all").
replace(/%{text}/g, allMsg));
$.each(registry[1], function(i, type){
if(CONNECTORS[type]) searchInOpts.push(CHECKBOX_TEMPLATE.
replace(/%{name}/g, "searchInOption").
replace(/%{value}/g, type).
replace(/%{text}/g, eXo.ecm.WCMUtils.getBundle("quicksearch.type." + CONNECTORS[type].displayName , eXo.env.portal.language)));
});
$("#lstSearchInOptions").html(searchInOpts.join(""));
// Display the previously saved (or default) quick search setting
$.getJSON("/rest/search/setting/quicksearch", function(setting){
if(-1 != $.inArray("all", setting.searchTypes)) {
$(":checkbox[name='searchInOption']").attr('checked', true);
} else {
$(":checkbox[name='searchInOption']").attr('checked', false);
$.each($(":checkbox[name='searchInOption']"), function(){
if(-1 != $.inArray(this.value, setting.searchTypes)) {
$(this).attr('checked', true);
}
});
}
$("#resultsPerPage").val(setting.resultsPerPage);
$("#searchCurrentSiteOnly").attr('checked', setting.searchCurrentSiteOnly);
});
});
}
})($);
| getQuicksearchSetting | identifier_name |
quicksearch.js | (function($){
// Function to be called when the quick search template is ready
window.initQuickSearch = function initQuickSearch(portletId,seeAllMsg, noResultMsg, searching) {
//*** Global variables ***
var CONNECTORS; //all registered SearchService connectors
var SEARCH_TYPES; //enabled search types
var QUICKSEARCH_SETTING; //quick search setting
var DELAY_SEARCH_TIME = 1000; // Search time delay
var txtQuickSearchQuery_id = "#adminkeyword-" + portletId;
var linkQuickSearchQuery_id = "#adminSearchLink-" + portletId;
var quickSearchResult_id = "#quickSearchResult-" + portletId;
var seeAll_id = "#seeAll-" + portletId;
var value = $(txtQuickSearchQuery_id).val();
var isDefault = false;
var isEnterKey = false;
window['isSearching'] = false;
var durationKeyup = 0;
var keypressed = false;
var skipKeyup = 0;
var textVal = "";
var firstBackSpace = true;
var index = 0;
var currentFocus = 0;
var searchTimeout;
//var skipKeyUp = [9,16,17,18,19,20,33,34,35,36,37,38,39,40,45,49];
var mapKeyUp = {"0":"48","1":"49","2":"50","3":"51","4":"52","5":"53","6":"54","7":"55","8":"56","9":"57",
"a":"65","b":"66","c":"67","d":"68","e":"69","f":"70","g":"71","h":"72","i":"73","j":"74",
"k":"75","l":"76","m":"77","n":"78","o":"79","p":"80","q":"81","r":"82","s":"83","t":"84",
"u":"85","v":"86","w":"87","x":"88","y":"89","z":"90","numpad 0":"96","numpad 1":"97","numpad 2":"98",
"numpad 3":"99","numpad 4":"100","numpad 5":"101","numpad 6":"102","numpad 7":"103", "backspace":"8", "delete":"46"};
/*var QUICKSEARCH_RESULT_TEMPLATE= " \
<div class='quickSearchResult %{type}' tabindex='%{index}' id='quickSearchResult%{index}'> \
<span class='avatar'> \
%{avatar} \
</span> \
<a href='%{url}' class='name'>%{title}</a> \
</div> \
";*///<div class='Excerpt Ellipsis'>%{excerpt}</div> \
var QUICKSEARCH_RESULT_TEMPLATE= "\
<div class='quickSearchResult %{type}' tabindex='%{index}' id='quickSearchResult%{index}' onkeydown='fireAEvent(event,this.id)'> \
%{lineResult}\
</div>";//<div class='Excerpt Ellipsis'>%{excerpt}</div> \
var LINE_RESULT_TEMPLATE = "\
<a href='%{url}'> \
<i class='%{cssClass}'></i> %{title}\
</a>";
var OTHER_RESULT_TEMPLATE = "\
<a href='%{url}' class='avatarTiny'><img src='%{imageSrc}'/>%{title}</a>\
";
var QUICKSEARCH_TABLE_TEMPLATE=" \
<table class='uiGrid table table-striped rounded-corners'> \
<col width='30%'> \ |
var QUICKSEARCH_TABLE_ROW_TEMPLATE=" \
<tr> \
<th> \
%{type} \
</th> \
<td> \
%{results} \
</td> \
</tr> \
";
var QUICKSEARCH_SEE_ALL=" \
<tr> \
<td colspan='2' class='message'> \
<a id='seeAll-" + portletId + "' class='' href='#'>"+seeAllMsg+"</a> \
</td> \
</tr> \
";
var QUICKSEARCH_NO_RESULT=" \
<tr> \
<td colspan='2' class='noResult'> \
<span id='seeAll-" + portletId + "' class='' href='#'>"+noResultMsg+" <strong>%{query}<strong></span> \
</td> \
</tr> \
";
var IMAGE_AVATAR_TEMPLATE = " \
<span class='avatar pull-left'> \
<img src='%{imageSrc}'> \
</span> \
";
var CSS_AVATAR_TEMPLATE = " \
<span class='avatar pull-left'> \
<i class='%{cssClass}'></i> \
</span> \
";
var EVENT_AVATAR_TEMPLATE = " \
<div class='calendarBox calendarBox-mini'> \
<div class='heading'> %{month} </div> \
<div class='content' style='margin-left: 0px;'> %{date} </div> \
</div> \
";
var TASK_AVATAR_TEMPLATE = " \
<i class='uiIconStatus-20-%{taskStatus}'></i> \
";
var QUICKSEARCH_WAITING_TEMPLATE=" \
<table class='uiGrid table table-hover table-striped rounded-corners'> \
<col width='30%'> \
<col width='70%'> \
<tr> \
<td colspan='2' class='noResult'> \
<span id='seeAll-" + portletId + "' class='' href='#'>"+searching+" </span> \
</td> \
</tr> \
</table> \
";
searchTimeout = setTimeout(searchWhenNoKeypress, DELAY_SEARCH_TIME);
function searchWhenNoKeypress() {
if (keypressed) {
quickSearch();
keypressed = false;
}
}
//*** Utility functions ***
String.prototype.toProperCase = function() {
return this.replace(/\w\S*/g, function(txt){return txt.charAt(0).toUpperCase() + txt.substr(1).toLowerCase();});
};
// Highlight the specified text in a string
String.prototype.highlight = function(words) {
var str = this;
for(var i=0; i<words.length; i++) {
if("" == words[i]) continue;
var regex;
if(isSpecialExpressionCharacter(words[i].charAt(0))) {
regex = new RegExp("(\\" + words[i] + ")", "gi");
} else {
regex = new RegExp("(" + words[i] + ")", "gi");
}
str = str.replace(regex, "<strong>$1</strong>");
}
return str;
};
function isSpecialExpressionCharacter(c) {
var specials = '`~!@#$%^&*()-=+{}[]\|;:\'"<>,./?';
for(var i = 0; i < specials.length; i++) {
if(c == specials.charAt(i)) {
return true;
}
}
return false;
}
function getRegistry(callback) {
$.getJSON("/rest/search/registry", function(registry){
if(callback) callback(registry);
});
}
function getQuicksearchSetting(callback) {
$.getJSON("/rest/search/setting/quicksearch", function(setting){
if(callback) callback(setting);
});
}
function setWaitingStatus(status) {
if (status){
window['isSearching'] = true;
$(txtQuickSearchQuery_id).addClass("loadding");
if ($.browser.msie && parseInt($.browser.version, 10) == 8) {
$(quickSearchResult_id).show();
}else{
var width = Math.min($(quickSearchResult_id).width(), $(window).width() - $(txtQuickSearchQuery_id).offset().left - 20);
$(quickSearchResult_id).width(width);
$(quickSearchResult_id).show();
}
}else {
window['isSearching'] = false;
}
}
function quickSearch() {
var query = $(txtQuickSearchQuery_id).val();
setWaitingStatus(true);
var types = QUICKSEARCH_SETTING.searchTypes.join(","); //search for the types specified in quick search setting only
var searchParams = {
searchContext: {
siteName:eXo.env.portal.portalName
},
q: query,
sites: QUICKSEARCH_SETTING.searchCurrentSiteOnly ? eXo.env.portal.portalName : "all",
types: types,
offset: 0,
limit: QUICKSEARCH_SETTING.resultsPerPage,
sort: "relevancy",
order: "desc"
};
// get results of all search types in a map
$.getJSON("/rest/search", searchParams, function(resultMap){
var rows = []; //one row per type
index = 0;
$.each(SEARCH_TYPES, function(i, searchType){
var results = resultMap[searchType]; //get all results of this type
if(results && 0!=$(results).size()) { //show the type with result only
//results.map(function(result){result.type = searchType;}); //assign type for each result
results = results.sort(function(a,b){
return byRelevancyDESC(a,b);
});
$.map(results, function(result){result.type = searchType;}); //assign type for each result
var cell = []; //the cell contains results of this type (in the quick search result table)
$.each(results, function(i, result){
index = index + 1;
cell.push(renderQuickSearchResult(result, index)); //add this result to the cell
});
var row = QUICKSEARCH_TABLE_ROW_TEMPLATE.replace(/%{type}/g,eXo.ecm.WCMUtils.getBundle("quicksearch.type." + CONNECTORS[searchType].displayName , eXo.env.portal.language)).replace(/%{results}/g, cell.join(""));
rows.push(row);
}
});
var messageRow = rows.length==0 ? QUICKSEARCH_NO_RESULT.replace(/%{query}/, XSSUtils.sanitizeString(query)) : QUICKSEARCH_SEE_ALL;
$(quickSearchResult_id).html(QUICKSEARCH_TABLE_TEMPLATE.replace(/%{resultRows}/, rows.join("")).replace(/%{messageRow}/g, messageRow));
if ($.browser.msie && parseInt($.browser.version, 10) == 8) {
$(quickSearchResult_id).show();
}else{
var width = Math.min($(quickSearchResult_id).width(), $(window).width() - $(txtQuickSearchQuery_id).offset().left - 20);
$(quickSearchResult_id).width(width);
$(quickSearchResult_id).show();
}
$(txtQuickSearchQuery_id).removeClass("loadding");
setWaitingStatus(false);
var searchPage = "/portal/"+eXo.env.portal.portalName+"/search";
$(seeAll_id).attr("href", searchPage +"?q="+query+"&types="+types); //the query to be passed to main search page
currentFocus = 0;
});
}
function renderQuickSearchResult(result, index) {
var query = $(txtQuickSearchQuery_id).val();
var terms = query.split(/\s+/g); //for highlighting
var avatar = "";
var line = "";
switch(result.type) {
case "event":
line = LINE_RESULT_TEMPLATE.replace(/%{cssClass}/g, "uiIconPLFEvent uiIconPLFLightGray");
break;
case "task":
var cssClass = "uiIconPLFTask" + result.taskStatus.toProperCase() + " uiIconPLFLightGray";
line = LINE_RESULT_TEMPLATE.replace(/%{cssClass}/g, cssClass);
break;
case "tasksInTasks":
var cssClass = "uiIconTick" + (result.completed ? ' uiIconBlue' : '');
line = LINE_RESULT_TEMPLATE.replace(/%{cssClass}/g, cssClass);
break;
case "file":
var cssClasses = $.map(result.fileType.split(/\s+/g), function(type){return "uiIcon16x16" + type}).join(" ");
line = LINE_RESULT_TEMPLATE.replace(/%{cssClass}/g, cssClasses);
break;
case "document":
var cssClasses = $.map(result.fileType.split(/\s+/g), function(type){return "uiIcon16x16Template" + type}).join(" ");
line = LINE_RESULT_TEMPLATE.replace(/%{cssClass}/g, cssClasses);
break;
case "post":
line = LINE_RESULT_TEMPLATE.replace(/%{cssClass}/g, "uiIconPLFDiscussion uiIconPLFLightGray");
break;
case "answer":
line = LINE_RESULT_TEMPLATE.replace(/%{cssClass}/g, "uiIconPLFAnswers uiIconPLFLightGray");
break;
case "wiki":
line = LINE_RESULT_TEMPLATE.replace(/%{cssClass}/g, "uiIconWikiWiki uiIconWikiLightGray");
break;
case "page":
line = LINE_RESULT_TEMPLATE.replace(/%{cssClass}/g, "uiIconEcmsTemplateDocument uiIconEcmsLightGrey");
break;
default:
line = OTHER_RESULT_TEMPLATE.replace(/%{imageSrc}/g, result.imageUrl);
}
var html = QUICKSEARCH_RESULT_TEMPLATE.
replace(/%{index}/g, index).
replace(/%{type}/g, result.type).
replace(/%{lineResult}/g, line).
replace(/%{url}/g, result.url).
replace(/%{title}/g, (result.title||"").highlight(terms)).
replace(/%{excerpt}/g, (result.excerpt||"").highlight(terms)).
replace(/%{detail}/g, (result.detail||"").highlight(terms)).
replace(/%{avatar}/g, avatar);
return html;
}
function byRelevancyDESC(b,a) {
if (a.relevancy < b.relevancy)
return -1;
if (a.relevancy > b.relevancy)
return 1;
return 0;
}
//*** Event handlers - Quick search ***
$(document).on("click",seeAll_id, function(){
window.location.href = generateAllResultsURL(); //open the main search page
$(quickSearchResult_id).hide();
});
$(txtQuickSearchQuery_id).keyup(function(e){
if(""==$(this).val()) {
$(quickSearchResult_id).hide();
return;
}
if(13==e.keyCode) {
$(seeAll_id).trigger("click"); //go to main search page if Enter is pressed
} else {
keypressed = true;
if (searchTimeout) {
clearTimeout(searchTimeout);
}
searchTimeout = setTimeout(searchWhenNoKeypress, DELAY_SEARCH_TIME);
//quickSearch(); //search for the text just being typed in
var currentVal = $(txtQuickSearchQuery_id).val();
if (!charDeletedIsEmpty(e,textVal, currentVal)){
$.each(mapKeyUp, function(key, value){
textVal = $(txtQuickSearchQuery_id).val();
});
}
}
});
//skip backspace and delete key
function charDeletedIsEmpty(key,textVal, currentVal){
//process backspace key
if (key.keyCode == 8 && textVal.trim() == currentVal.trim()){
return true;
}
//process delete key
if (key.keyCode == 46 && textVal.trim() == currentVal.trim()){
return true;
}
}
// catch ennter key when search is running
$(document).keyup(function (e) {
if (e.keyCode == 13 && window['isSearching'] && !$(txtQuickSearchQuery_id).is(':hidden') ) {
//$(quickSearchResult_id).focus();
isDefault = false;
$(linkQuickSearchQuery_id).trigger('click');
//$(linkQuickSearchQuery_id).click(); //go to main search page if Enter is pressed
}
});
$(document).keyup(function (e) {
if (e.keyCode == 13 && !$(txtQuickSearchQuery_id).is(':hidden') ) {
var focusedId = $("*:focus").attr("id");
if (currentFocus > 0 && currentFocus <= index){
var link = $("#"+focusedId+" .name").attr('href');
window.open(link,"_self");
}
}
});
// catch arrow key
$(document).keyup(function (e) {
if (index >= 1){
if (e.keyCode == 40 && !$(txtQuickSearchQuery_id).is(':hidden') ) {
if (currentFocus >= 1 && currentFocus < index){
var divClass = $('#quickSearchResult'+ currentFocus).attr('class').replace(" arrowResult", "");
$('#quickSearchResult'+currentFocus).attr('class',divClass);
}
if (currentFocus < index){
currentFocus = currentFocus + 1;
$("#quickSearchResult"+currentFocus).focus();
var divClass = $('#quickSearchResult'+currentFocus).attr('class') + " arrowResult";
$('#quickSearchResult'+currentFocus).attr('class',divClass);
}else if (currentFocus == index){
$("#quickSearchResult"+index).focus();
}
}
if (e.keyCode == 38 && !$(txtQuickSearchQuery_id).is(':hidden') ) {
if (currentFocus > 1){
var divClass = $('#quickSearchResult'+ currentFocus).attr('class').replace(" arrowResult", "");
$('#quickSearchResult'+currentFocus).attr('class',divClass);
}
if (currentFocus > 1){
currentFocus = currentFocus - 1;
$("#quickSearchResult"+currentFocus).focus();
var divClass = $('#quickSearchResult'+currentFocus).attr('class') + " arrowResult";
$('#quickSearchResult'+currentFocus).attr('class',divClass);
}else if (currentFocus == 1){
$("#quickSearchResult"+currentFocus).focus();
}
}
}
});
//show the input search or go to the main search page when search link is clicked
$(linkQuickSearchQuery_id).click(function () {
if ($(txtQuickSearchQuery_id).is(':hidden')) {
$(txtQuickSearchQuery_id).val(value);
// $(txtQuickSearchQuery_id).css('color', '#555');
isDefault = true;
$(txtQuickSearchQuery_id).show();
$(txtQuickSearchQuery_id).focus();
}
else
if (isDefault == true) {
$(txtQuickSearchQuery_id).hide();
$(quickSearchResult_id).hide();
}
else {
//alert(window['isSearching']);
if(!window['isSearching']) {
$(seeAll_id).click(); //go to main search page if Enter is pressed
}else if (window['isSearching']){
$(linkQuickSearchQuery_id).attr("onclick","window.location.href='"+ generateAllResultsURL() + "'");
window['isSearching'] = false;
}
}
});
$(txtQuickSearchQuery_id).focus(function(){
$(this).val('');
// $(this).css('color', '#000');
isDefault = false;
});
//collapse the input search field when clicking outside the search box
$('body').click(function (evt) {
if ($(evt.target).parents('#ToolBarSearch').length == 0) {
// $(txtQuickSearchQuery_id).hide();
$(txtQuickSearchQuery_id).removeClass("showInputSearch");
$("#ToolBarSearch .uiIconPLF24x24Search").removeClass('uiIconCloseSearchBox');
$('#PlatformAdminToolbarContainer').removeClass('activeInputSearch');
$('#ToolBarSearch').find('input[type="text"]').removeClass('loadding');
$('body').removeClass('quickSearchDisplay');
$(quickSearchResult_id).hide();
}
});
//*** The entry point ***
// Load all needed configurations and settings from the service to prepare for the search
getRegistry(function(registry){
CONNECTORS = registry[0];
SEARCH_TYPES = registry[1];
getQuicksearchSetting(function(setting){
QUICKSEARCH_SETTING = setting;
});
});
function generateAllResultsURL() {
var query = $(txtQuickSearchQuery_id).val();
var types = QUICKSEARCH_SETTING.searchTypes.join(","); //search for the types specified in quick search setting only
var searchPage = "/portal/"+eXo.env.portal.portalName+"/search";
return searchPage + "?q="+query+"&types="+types;
}
//$ = jQuery; //undo .conflict();
}
//Function to be called when the quick search setting template is ready
window.initQuickSearchSetting = function(allMsg,alertOk,alertNotOk){
var CONNECTORS; //all registered SearchService connectors
var CHECKBOX_TEMPLATE = "\
<div class='control-group'> \
<div class='controls-full'> \
<span class='uiCheckbox'> \
<input type='checkbox' class='checkbox' name='%{name}' value='%{value}'> \
<span>%{text}</span> \
</span> \
</div> \
</div> \
";
function getSelectedTypes() {
var searchIn = [];
if($(":checkbox[name='searchInOption'][value='all']").is(":checked")) {
return "all";
} else {
$.each($(":checkbox[name='searchInOption'][value!='all']:checked"), function(){
searchIn.push(this.value);
});
if (searchIn.length==0){
return "false";
}
return searchIn.join(",");
}
}
// Call REST service to save the setting
$("#btnSave").click(function(){
var jqxhr = $.post("/rest/search/setting/quicksearch", {
resultsPerPage: $("#resultsPerPage").val(),
searchTypes: getSelectedTypes(),
searchCurrentSiteOnly: $("#searchCurrentSiteOnly").is(":checked")
});
jqxhr.complete(function(data) {
alert("ok"==data.responseText?alertOk:alertNotOk+data.responseText);
});
});
// Handler for the checkboxes
$(":checkbox[name='searchInOption']").live("click", function(){
if("all"==this.value){ //All checked
if($(this).is(":checked")) { // check/uncheck all
$(":checkbox[name='searchInOption']").attr('checked', true);
} else {
$(":checkbox[name='searchInOption']").attr('checked', false);
}
} else {
$(":checkbox[name='searchInOption'][value='all']").attr('checked', false); //uncheck All Sites
}
});
// Load all needed configurations and settings from the service to build the UI
$.getJSON("/rest/search/registry", function(registry){
CONNECTORS = registry[0];
var searchInOpts=[];
searchInOpts.push(CHECKBOX_TEMPLATE.
replace(/%{name}/g, "searchInOption").
replace(/%{value}/g, "all").
replace(/%{text}/g, allMsg));
$.each(registry[1], function(i, type){
if(CONNECTORS[type]) searchInOpts.push(CHECKBOX_TEMPLATE.
replace(/%{name}/g, "searchInOption").
replace(/%{value}/g, type).
replace(/%{text}/g, eXo.ecm.WCMUtils.getBundle("quicksearch.type." + CONNECTORS[type].displayName , eXo.env.portal.language)));
});
$("#lstSearchInOptions").html(searchInOpts.join(""));
// Display the previously saved (or default) quick search setting
$.getJSON("/rest/search/setting/quicksearch", function(setting){
if(-1 != $.inArray("all", setting.searchTypes)) {
$(":checkbox[name='searchInOption']").attr('checked', true);
} else {
$(":checkbox[name='searchInOption']").attr('checked', false);
$.each($(":checkbox[name='searchInOption']"), function(){
if(-1 != $.inArray(this.value, setting.searchTypes)) {
$(this).attr('checked', true);
}
});
}
$("#resultsPerPage").val(setting.resultsPerPage);
$("#searchCurrentSiteOnly").attr('checked', setting.searchCurrentSiteOnly);
});
});
}
})($); | <col width='70%'> \
%{resultRows} \
%{messageRow} \
</table> \
"; | random_line_split |
quicksearch.js | (function($){
// Function to be called when the quick search template is ready
window.initQuickSearch = function initQuickSearch(portletId,seeAllMsg, noResultMsg, searching) {
//*** Global variables ***
var CONNECTORS; //all registered SearchService connectors
var SEARCH_TYPES; //enabled search types
var QUICKSEARCH_SETTING; //quick search setting
var DELAY_SEARCH_TIME = 1000; // Search time delay
var txtQuickSearchQuery_id = "#adminkeyword-" + portletId;
var linkQuickSearchQuery_id = "#adminSearchLink-" + portletId;
var quickSearchResult_id = "#quickSearchResult-" + portletId;
var seeAll_id = "#seeAll-" + portletId;
var value = $(txtQuickSearchQuery_id).val();
var isDefault = false;
var isEnterKey = false;
window['isSearching'] = false;
var durationKeyup = 0;
var keypressed = false;
var skipKeyup = 0;
var textVal = "";
var firstBackSpace = true;
var index = 0;
var currentFocus = 0;
var searchTimeout;
//var skipKeyUp = [9,16,17,18,19,20,33,34,35,36,37,38,39,40,45,49];
var mapKeyUp = {"0":"48","1":"49","2":"50","3":"51","4":"52","5":"53","6":"54","7":"55","8":"56","9":"57",
"a":"65","b":"66","c":"67","d":"68","e":"69","f":"70","g":"71","h":"72","i":"73","j":"74",
"k":"75","l":"76","m":"77","n":"78","o":"79","p":"80","q":"81","r":"82","s":"83","t":"84",
"u":"85","v":"86","w":"87","x":"88","y":"89","z":"90","numpad 0":"96","numpad 1":"97","numpad 2":"98",
"numpad 3":"99","numpad 4":"100","numpad 5":"101","numpad 6":"102","numpad 7":"103", "backspace":"8", "delete":"46"};
/*var QUICKSEARCH_RESULT_TEMPLATE= " \
<div class='quickSearchResult %{type}' tabindex='%{index}' id='quickSearchResult%{index}'> \
<span class='avatar'> \
%{avatar} \
</span> \
<a href='%{url}' class='name'>%{title}</a> \
</div> \
";*///<div class='Excerpt Ellipsis'>%{excerpt}</div> \
var QUICKSEARCH_RESULT_TEMPLATE= "\
<div class='quickSearchResult %{type}' tabindex='%{index}' id='quickSearchResult%{index}' onkeydown='fireAEvent(event,this.id)'> \
%{lineResult}\
</div>";//<div class='Excerpt Ellipsis'>%{excerpt}</div> \
var LINE_RESULT_TEMPLATE = "\
<a href='%{url}'> \
<i class='%{cssClass}'></i> %{title}\
</a>";
var OTHER_RESULT_TEMPLATE = "\
<a href='%{url}' class='avatarTiny'><img src='%{imageSrc}'/>%{title}</a>\
";
var QUICKSEARCH_TABLE_TEMPLATE=" \
<table class='uiGrid table table-striped rounded-corners'> \
<col width='30%'> \
<col width='70%'> \
%{resultRows} \
%{messageRow} \
</table> \
";
var QUICKSEARCH_TABLE_ROW_TEMPLATE=" \
<tr> \
<th> \
%{type} \
</th> \
<td> \
%{results} \
</td> \
</tr> \
";
var QUICKSEARCH_SEE_ALL=" \
<tr> \
<td colspan='2' class='message'> \
<a id='seeAll-" + portletId + "' class='' href='#'>"+seeAllMsg+"</a> \
</td> \
</tr> \
";
var QUICKSEARCH_NO_RESULT=" \
<tr> \
<td colspan='2' class='noResult'> \
<span id='seeAll-" + portletId + "' class='' href='#'>"+noResultMsg+" <strong>%{query}<strong></span> \
</td> \
</tr> \
";
var IMAGE_AVATAR_TEMPLATE = " \
<span class='avatar pull-left'> \
<img src='%{imageSrc}'> \
</span> \
";
var CSS_AVATAR_TEMPLATE = " \
<span class='avatar pull-left'> \
<i class='%{cssClass}'></i> \
</span> \
";
var EVENT_AVATAR_TEMPLATE = " \
<div class='calendarBox calendarBox-mini'> \
<div class='heading'> %{month} </div> \
<div class='content' style='margin-left: 0px;'> %{date} </div> \
</div> \
";
var TASK_AVATAR_TEMPLATE = " \
<i class='uiIconStatus-20-%{taskStatus}'></i> \
";
var QUICKSEARCH_WAITING_TEMPLATE=" \
<table class='uiGrid table table-hover table-striped rounded-corners'> \
<col width='30%'> \
<col width='70%'> \
<tr> \
<td colspan='2' class='noResult'> \
<span id='seeAll-" + portletId + "' class='' href='#'>"+searching+" </span> \
</td> \
</tr> \
</table> \
";
searchTimeout = setTimeout(searchWhenNoKeypress, DELAY_SEARCH_TIME);
function searchWhenNoKeypress() {
if (keypressed) {
quickSearch();
keypressed = false;
}
}
//*** Utility functions ***
String.prototype.toProperCase = function() {
return this.replace(/\w\S*/g, function(txt){return txt.charAt(0).toUpperCase() + txt.substr(1).toLowerCase();});
};
// Highlight the specified text in a string
String.prototype.highlight = function(words) {
var str = this;
for(var i=0; i<words.length; i++) {
if("" == words[i]) continue;
var regex;
if(isSpecialExpressionCharacter(words[i].charAt(0))) {
regex = new RegExp("(\\" + words[i] + ")", "gi");
} else {
regex = new RegExp("(" + words[i] + ")", "gi");
}
str = str.replace(regex, "<strong>$1</strong>");
}
return str;
};
function isSpecialExpressionCharacter(c) {
var specials = '`~!@#$%^&*()-=+{}[]\|;:\'"<>,./?';
for(var i = 0; i < specials.length; i++) {
if(c == specials.charAt(i)) {
return true;
}
}
return false;
}
function getRegistry(callback) {
$.getJSON("/rest/search/registry", function(registry){
if(callback) callback(registry);
});
}
function getQuicksearchSetting(callback) {
$.getJSON("/rest/search/setting/quicksearch", function(setting){
if(callback) callback(setting);
});
}
function setWaitingStatus(status) {
if (status){
window['isSearching'] = true;
$(txtQuickSearchQuery_id).addClass("loadding");
if ($.browser.msie && parseInt($.browser.version, 10) == 8) {
$(quickSearchResult_id).show();
}else{
var width = Math.min($(quickSearchResult_id).width(), $(window).width() - $(txtQuickSearchQuery_id).offset().left - 20);
$(quickSearchResult_id).width(width);
$(quickSearchResult_id).show();
}
}else {
window['isSearching'] = false;
}
}
function quickSearch() {
var query = $(txtQuickSearchQuery_id).val();
setWaitingStatus(true);
var types = QUICKSEARCH_SETTING.searchTypes.join(","); //search for the types specified in quick search setting only
var searchParams = {
searchContext: {
siteName:eXo.env.portal.portalName
},
q: query,
sites: QUICKSEARCH_SETTING.searchCurrentSiteOnly ? eXo.env.portal.portalName : "all",
types: types,
offset: 0,
limit: QUICKSEARCH_SETTING.resultsPerPage,
sort: "relevancy",
order: "desc"
};
// get results of all search types in a map
$.getJSON("/rest/search", searchParams, function(resultMap){
var rows = []; //one row per type
index = 0;
$.each(SEARCH_TYPES, function(i, searchType){
var results = resultMap[searchType]; //get all results of this type
if(results && 0!=$(results).size()) { //show the type with result only
//results.map(function(result){result.type = searchType;}); //assign type for each result
results = results.sort(function(a,b){
return byRelevancyDESC(a,b);
});
$.map(results, function(result){result.type = searchType;}); //assign type for each result
var cell = []; //the cell contains results of this type (in the quick search result table)
$.each(results, function(i, result){
index = index + 1;
cell.push(renderQuickSearchResult(result, index)); //add this result to the cell
});
var row = QUICKSEARCH_TABLE_ROW_TEMPLATE.replace(/%{type}/g,eXo.ecm.WCMUtils.getBundle("quicksearch.type." + CONNECTORS[searchType].displayName , eXo.env.portal.language)).replace(/%{results}/g, cell.join(""));
rows.push(row);
}
});
var messageRow = rows.length==0 ? QUICKSEARCH_NO_RESULT.replace(/%{query}/, XSSUtils.sanitizeString(query)) : QUICKSEARCH_SEE_ALL;
$(quickSearchResult_id).html(QUICKSEARCH_TABLE_TEMPLATE.replace(/%{resultRows}/, rows.join("")).replace(/%{messageRow}/g, messageRow));
if ($.browser.msie && parseInt($.browser.version, 10) == 8) {
$(quickSearchResult_id).show();
}else{
var width = Math.min($(quickSearchResult_id).width(), $(window).width() - $(txtQuickSearchQuery_id).offset().left - 20);
$(quickSearchResult_id).width(width);
$(quickSearchResult_id).show();
}
$(txtQuickSearchQuery_id).removeClass("loadding");
setWaitingStatus(false);
var searchPage = "/portal/"+eXo.env.portal.portalName+"/search";
$(seeAll_id).attr("href", searchPage +"?q="+query+"&types="+types); //the query to be passed to main search page
currentFocus = 0;
});
}
function renderQuickSearchResult(result, index) {
var query = $(txtQuickSearchQuery_id).val();
var terms = query.split(/\s+/g); //for highlighting
var avatar = "";
var line = "";
switch(result.type) {
case "event":
line = LINE_RESULT_TEMPLATE.replace(/%{cssClass}/g, "uiIconPLFEvent uiIconPLFLightGray");
break;
case "task":
var cssClass = "uiIconPLFTask" + result.taskStatus.toProperCase() + " uiIconPLFLightGray";
line = LINE_RESULT_TEMPLATE.replace(/%{cssClass}/g, cssClass);
break;
case "tasksInTasks":
var cssClass = "uiIconTick" + (result.completed ? ' uiIconBlue' : '');
line = LINE_RESULT_TEMPLATE.replace(/%{cssClass}/g, cssClass);
break;
case "file":
var cssClasses = $.map(result.fileType.split(/\s+/g), function(type){return "uiIcon16x16" + type}).join(" ");
line = LINE_RESULT_TEMPLATE.replace(/%{cssClass}/g, cssClasses);
break;
case "document":
var cssClasses = $.map(result.fileType.split(/\s+/g), function(type){return "uiIcon16x16Template" + type}).join(" ");
line = LINE_RESULT_TEMPLATE.replace(/%{cssClass}/g, cssClasses);
break;
case "post":
line = LINE_RESULT_TEMPLATE.replace(/%{cssClass}/g, "uiIconPLFDiscussion uiIconPLFLightGray");
break;
case "answer":
line = LINE_RESULT_TEMPLATE.replace(/%{cssClass}/g, "uiIconPLFAnswers uiIconPLFLightGray");
break;
case "wiki":
line = LINE_RESULT_TEMPLATE.replace(/%{cssClass}/g, "uiIconWikiWiki uiIconWikiLightGray");
break;
case "page":
line = LINE_RESULT_TEMPLATE.replace(/%{cssClass}/g, "uiIconEcmsTemplateDocument uiIconEcmsLightGrey");
break;
default:
line = OTHER_RESULT_TEMPLATE.replace(/%{imageSrc}/g, result.imageUrl);
}
var html = QUICKSEARCH_RESULT_TEMPLATE.
replace(/%{index}/g, index).
replace(/%{type}/g, result.type).
replace(/%{lineResult}/g, line).
replace(/%{url}/g, result.url).
replace(/%{title}/g, (result.title||"").highlight(terms)).
replace(/%{excerpt}/g, (result.excerpt||"").highlight(terms)).
replace(/%{detail}/g, (result.detail||"").highlight(terms)).
replace(/%{avatar}/g, avatar);
return html;
}
function byRelevancyDESC(b,a) {
if (a.relevancy < b.relevancy)
return -1;
if (a.relevancy > b.relevancy)
return 1;
return 0;
}
//*** Event handlers - Quick search ***
$(document).on("click",seeAll_id, function(){
window.location.href = generateAllResultsURL(); //open the main search page
$(quickSearchResult_id).hide();
});
$(txtQuickSearchQuery_id).keyup(function(e){
if(""==$(this).val()) {
$(quickSearchResult_id).hide();
return;
}
if(13==e.keyCode) {
$(seeAll_id).trigger("click"); //go to main search page if Enter is pressed
} else {
keypressed = true;
if (searchTimeout) {
clearTimeout(searchTimeout);
}
searchTimeout = setTimeout(searchWhenNoKeypress, DELAY_SEARCH_TIME);
//quickSearch(); //search for the text just being typed in
var currentVal = $(txtQuickSearchQuery_id).val();
if (!charDeletedIsEmpty(e,textVal, currentVal)){
$.each(mapKeyUp, function(key, value){
textVal = $(txtQuickSearchQuery_id).val();
});
}
}
});
//skip backspace and delete key
function charDeletedIsEmpty(key,textVal, currentVal){
//process backspace key
if (key.keyCode == 8 && textVal.trim() == currentVal.trim()){
return true;
}
//process delete key
if (key.keyCode == 46 && textVal.trim() == currentVal.trim()){
return true;
}
}
// catch ennter key when search is running
$(document).keyup(function (e) {
if (e.keyCode == 13 && window['isSearching'] && !$(txtQuickSearchQuery_id).is(':hidden') ) {
//$(quickSearchResult_id).focus();
isDefault = false;
$(linkQuickSearchQuery_id).trigger('click');
//$(linkQuickSearchQuery_id).click(); //go to main search page if Enter is pressed
}
});
$(document).keyup(function (e) {
if (e.keyCode == 13 && !$(txtQuickSearchQuery_id).is(':hidden') ) {
var focusedId = $("*:focus").attr("id");
if (currentFocus > 0 && currentFocus <= index) |
}
});
// catch arrow key
$(document).keyup(function (e) {
if (index >= 1){
if (e.keyCode == 40 && !$(txtQuickSearchQuery_id).is(':hidden') ) {
if (currentFocus >= 1 && currentFocus < index){
var divClass = $('#quickSearchResult'+ currentFocus).attr('class').replace(" arrowResult", "");
$('#quickSearchResult'+currentFocus).attr('class',divClass);
}
if (currentFocus < index){
currentFocus = currentFocus + 1;
$("#quickSearchResult"+currentFocus).focus();
var divClass = $('#quickSearchResult'+currentFocus).attr('class') + " arrowResult";
$('#quickSearchResult'+currentFocus).attr('class',divClass);
}else if (currentFocus == index){
$("#quickSearchResult"+index).focus();
}
}
if (e.keyCode == 38 && !$(txtQuickSearchQuery_id).is(':hidden') ) {
if (currentFocus > 1){
var divClass = $('#quickSearchResult'+ currentFocus).attr('class').replace(" arrowResult", "");
$('#quickSearchResult'+currentFocus).attr('class',divClass);
}
if (currentFocus > 1){
currentFocus = currentFocus - 1;
$("#quickSearchResult"+currentFocus).focus();
var divClass = $('#quickSearchResult'+currentFocus).attr('class') + " arrowResult";
$('#quickSearchResult'+currentFocus).attr('class',divClass);
}else if (currentFocus == 1){
$("#quickSearchResult"+currentFocus).focus();
}
}
}
});
//show the input search or go to the main search page when search link is clicked
$(linkQuickSearchQuery_id).click(function () {
if ($(txtQuickSearchQuery_id).is(':hidden')) {
$(txtQuickSearchQuery_id).val(value);
// $(txtQuickSearchQuery_id).css('color', '#555');
isDefault = true;
$(txtQuickSearchQuery_id).show();
$(txtQuickSearchQuery_id).focus();
}
else
if (isDefault == true) {
$(txtQuickSearchQuery_id).hide();
$(quickSearchResult_id).hide();
}
else {
//alert(window['isSearching']);
if(!window['isSearching']) {
$(seeAll_id).click(); //go to main search page if Enter is pressed
}else if (window['isSearching']){
$(linkQuickSearchQuery_id).attr("onclick","window.location.href='"+ generateAllResultsURL() + "'");
window['isSearching'] = false;
}
}
});
$(txtQuickSearchQuery_id).focus(function(){
$(this).val('');
// $(this).css('color', '#000');
isDefault = false;
});
//collapse the input search field when clicking outside the search box
$('body').click(function (evt) {
if ($(evt.target).parents('#ToolBarSearch').length == 0) {
// $(txtQuickSearchQuery_id).hide();
$(txtQuickSearchQuery_id).removeClass("showInputSearch");
$("#ToolBarSearch .uiIconPLF24x24Search").removeClass('uiIconCloseSearchBox');
$('#PlatformAdminToolbarContainer').removeClass('activeInputSearch');
$('#ToolBarSearch').find('input[type="text"]').removeClass('loadding');
$('body').removeClass('quickSearchDisplay');
$(quickSearchResult_id).hide();
}
});
//*** The entry point ***
// Load all needed configurations and settings from the service to prepare for the search
getRegistry(function(registry){
CONNECTORS = registry[0];
SEARCH_TYPES = registry[1];
getQuicksearchSetting(function(setting){
QUICKSEARCH_SETTING = setting;
});
});
function generateAllResultsURL() {
var query = $(txtQuickSearchQuery_id).val();
var types = QUICKSEARCH_SETTING.searchTypes.join(","); //search for the types specified in quick search setting only
var searchPage = "/portal/"+eXo.env.portal.portalName+"/search";
return searchPage + "?q="+query+"&types="+types;
}
//$ = jQuery; //undo .conflict();
}
//Function to be called when the quick search setting template is ready
window.initQuickSearchSetting = function(allMsg,alertOk,alertNotOk){
var CONNECTORS; //all registered SearchService connectors
var CHECKBOX_TEMPLATE = "\
<div class='control-group'> \
<div class='controls-full'> \
<span class='uiCheckbox'> \
<input type='checkbox' class='checkbox' name='%{name}' value='%{value}'> \
<span>%{text}</span> \
</span> \
</div> \
</div> \
";
function getSelectedTypes() {
var searchIn = [];
if($(":checkbox[name='searchInOption'][value='all']").is(":checked")) {
return "all";
} else {
$.each($(":checkbox[name='searchInOption'][value!='all']:checked"), function(){
searchIn.push(this.value);
});
if (searchIn.length==0){
return "false";
}
return searchIn.join(",");
}
}
// Call REST service to save the setting
$("#btnSave").click(function(){
var jqxhr = $.post("/rest/search/setting/quicksearch", {
resultsPerPage: $("#resultsPerPage").val(),
searchTypes: getSelectedTypes(),
searchCurrentSiteOnly: $("#searchCurrentSiteOnly").is(":checked")
});
jqxhr.complete(function(data) {
alert("ok"==data.responseText?alertOk:alertNotOk+data.responseText);
});
});
// Handler for the checkboxes
$(":checkbox[name='searchInOption']").live("click", function(){
if("all"==this.value){ //All checked
if($(this).is(":checked")) { // check/uncheck all
$(":checkbox[name='searchInOption']").attr('checked', true);
} else {
$(":checkbox[name='searchInOption']").attr('checked', false);
}
} else {
$(":checkbox[name='searchInOption'][value='all']").attr('checked', false); //uncheck All Sites
}
});
// Load all needed configurations and settings from the service to build the UI
$.getJSON("/rest/search/registry", function(registry){
CONNECTORS = registry[0];
var searchInOpts=[];
searchInOpts.push(CHECKBOX_TEMPLATE.
replace(/%{name}/g, "searchInOption").
replace(/%{value}/g, "all").
replace(/%{text}/g, allMsg));
$.each(registry[1], function(i, type){
if(CONNECTORS[type]) searchInOpts.push(CHECKBOX_TEMPLATE.
replace(/%{name}/g, "searchInOption").
replace(/%{value}/g, type).
replace(/%{text}/g, eXo.ecm.WCMUtils.getBundle("quicksearch.type." + CONNECTORS[type].displayName , eXo.env.portal.language)));
});
$("#lstSearchInOptions").html(searchInOpts.join(""));
// Display the previously saved (or default) quick search setting
$.getJSON("/rest/search/setting/quicksearch", function(setting){
if(-1 != $.inArray("all", setting.searchTypes)) {
$(":checkbox[name='searchInOption']").attr('checked', true);
} else {
$(":checkbox[name='searchInOption']").attr('checked', false);
$.each($(":checkbox[name='searchInOption']"), function(){
if(-1 != $.inArray(this.value, setting.searchTypes)) {
$(this).attr('checked', true);
}
});
}
$("#resultsPerPage").val(setting.resultsPerPage);
$("#searchCurrentSiteOnly").attr('checked', setting.searchCurrentSiteOnly);
});
});
}
})($);
| {
var link = $("#"+focusedId+" .name").attr('href');
window.open(link,"_self");
} | conditional_block |
utils.py | from __future__ import unicode_literals
import os
import re
import requests
from functools import wraps
NULL_SUFFIX = '''_or_null'''
try:
string_type = basestring
except NameError: # Python 3, basestring causes NameError
string_type = str
def make_url(context, endingpoint):
BASE_URL = dereference_variables(context, '$BASE_URL')
if 'http' not in BASE_URL:
BASE_URL = 'http://{0}'.format(BASE_URL)
return '{0}{1}'.format(BASE_URL, endingpoint)
def dereference_variables(context, value):
variables = context.variables\
if hasattr(context, 'variables') else {}
for key in re.findall('\$+\w+', value):
var_name = key[1:]
value = value.replace(
key,
variables.get(
var_name,
os.environ.get(var_name, key)
)
)
return value
def dereference_arguments(f):
@wraps(f)
def wrapper(context, *args, **kwargs):
new_kwargs = {}
new_args = []
for key, value in kwargs.items():
new_kwargs[key] = dereference_variables(context.text, value)
for value in args:
new_args.append(dereference_variables(context.text, value))
context.processed_text = dereference_variables(context, context.text)\
if context.text else ''
return f(context, *new_args, **new_kwargs)
return wrapper
def compare_lists(expected_list, actual_list, path=None):
assert type(expected_list) is list,\
"Expected {0} is not a list".format(repr(expected_list))
assert type(actual_list) is list,\
"Actual {0} is not a list".format(repr(actual_list))
for i, item in enumerate(expected_list):
path = '{0}.{1}'.format(path, i) if path else str(i)
try:
actual_value = actual_list[i]
except ValueError:
actual_value = None
compare_values(item, actual_value, path=path)
def compare_dicts_structure(expected_dict, actual_dict):
'''
Make a comparison of the keys of the two dictionaries passed
and if they are not the same prepare an assertion message on
how they differ and raise the AssertionError
'''
msg_assert = "Keys/Properties of actual values do not match those of expected values"
msg_keys_in_act_but_not_exp = ""
msg_keys_in_exp_but_not_act = ""
if expected_dict.keys() == actual_dict.keys():
pass
else:
set_exp_keys = set(expected_dict.keys())
set_actual_keys = set(actual_dict.keys())
#Deal with actual keys not seen in expected
exp_keys_not_in_act = set_exp_keys - set_actual_keys
if exp_keys_not_in_act:
str_exp_keys_not_in_act = " ,".join(exp_keys_not_in_act)
msg_keys_in_exp_but_not_act = '''The following keys are in the expected values but not the actual values : {}'''.format(str_exp_keys_not_in_act)
#Deal with expected keys not seen in actual
act_keys_not_in_exp = set_actual_keys - set_exp_keys
if act_keys_not_in_exp:
str_act_keys_not_in_exp = " ,".join(act_keys_not_in_exp)
msg_keys_in_act_but_not_exp = '''The following keys are in the actual values but not the expected values : {}'''.format(str_act_keys_not_in_exp)
#Prepare a composite exception message and raise the Exception
if msg_keys_in_act_but_not_exp and msg_keys_in_exp_but_not_act:
msg_assert = '''{}. {} . {} .'''.format(msg_assert, msg_keys_in_act_but_not_exp, msg_keys_in_exp_but_not_act)
raise AssertionError (msg_assert)
elif msg_keys_in_act_but_not_exp:
msg_assert = '''{}. {} .'''.format(msg_assert, msg_keys_in_act_but_not_exp)
raise AssertionError (msg_assert)
elif msg_keys_in_exp_but_not_act:
msg_assert = '''{}. {} .'''.format(msg_assert, msg_keys_in_exp_but_not_act)
raise AssertionError (msg_assert)
def compare_dicts(expected_dict, actual_dict, strict=False, path=None):
assert type(expected_dict) is dict,\
"Expected {0} is not a dict".format(repr(expected_dict))
assert type(actual_dict) is dict,\
"Actual {0} is not a dict".format(repr(actual_dict))
if strict:
compare_dicts_structure(expected_dict, actual_dict)
for key in expected_dict:
expected_value = expected_dict[key]
actual_value = actual_dict.get(key, None)
path = '{0}.{1}'.format(path, key) if path else key
compare_values(expected_value, actual_value, strict=False, path=path)
def validate_value_iso_datetime(matchstr):
'''
Test whether the 'matchstr' argument matches an
ISO-8601.2019 format date/time. An example of such
a date/time is '2021-11-30T14:20:15'
'''
rgx_pttn = r"""[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}"""
# method 1: using a compile object
compile_obj = re.compile(rgx_pttn)
match_obj = compile_obj.search(matchstr)
if match_obj:
return True
else:
return False
def validate_value_iso_datetime_at_eoe(matchstr):
'''
Test whether the 'matchstr' argument matches an
ISO-8601.2019 format date/time and the value of the
date/time is '2300-01-01T00:00:00'
'''
rgx_pttn = r"""(?P<YYYY>[0-9]{4})
-
(?P<MM>[0-9]{2})
-
(?P<DD>[0-9]{2})
T
(?P<HH24>[0-9]{2})
:
(?P<MI>[0-9]{2})
:
(?P<S>[0-9]{2})"""
if validate_value_iso_datetime(matchstr):
compile_obj = re.compile(rgx_pttn, re.MULTILINE| re.VERBOSE)
match_obj = compile_obj.search(matchstr)
#
if ((match_obj.group('YYYY') == "2300") and
(match_obj.group('MM') == "01") and
(match_obj.group('DD') == "01") and
(match_obj.group('DD') == "01") and
(match_obj.group('HH24') == "00") and
(match_obj.group('MI') == "00") and
(match_obj.group('S') == "00")):
return True
else:
return False
else:
return False
def validate_value(validator, value):
'''
Test whether the 'value' argument matches one of a set
of pre-determined situations defined by the value of the
'validator' argument.
In addition deal with a special case where the validator
string is suffixed with a string to indicate that None
is a valid value
'''
# If the validator string ends with the 'NULL_SUFFIX'
# then trim the 'NULL_SUFFIX' off the end of the validator
# so that, for instance, ...
#
# 'integer_or_null'
#
# ... becomes ....
#
# 'integer'.
#
# In addition if the value is None (what null from JSON
# gets converted to) then return True, otherwise just let
# the normal validation take its course.
#
null_sfx_idx = -1 * len(NULL_SUFFIX)
if validator[null_sfx_idx : ] == NULL_SUFFIX:
#Trim the NULL_SUFFIX_ off the 'validator value'
validator = validator[ : null_sfx_idx]
#If the value is None, return True
if value==None:
return True
#Having dealt with the 'NULL_SUFFIX' special case now
#proceed with normal processing.
if validator == 'int':
return type(value) == int
if validator == 'float':
return type(value) == float
if validator == 'number':
return type(value) == int or type(value) == float
if validator == 'integer':
return type(value) == int
if validator == 'positive_integer':
return (type(value) == int and (value >= 0))
if validator == 'string':
return type(value) == str
if validator == 'string_and_not_empty':
return (type(value) == str and (len(value) > 0))
if validator == 'numeric_true_false':
return ((type(value) == int) and ((value==0) or (value==1)))
if validator == 'iso_date_time':
return validate_value_iso_datetime(value)
if validator == 'iso_date_time_at_eoe':
return validate_value_iso_datetime_at_eoe(value)
raise Exception('Unknown validator: {}'.format(validator))
def compare_values(expected_value, actual_value, strict=False, path=None):
validator_pattern = r'^<is_(.+)>$'
regex_pattern = r'^%(.+)%$'
if type(expected_value) is dict:
compare_dicts(expected_value, actual_value, strict=strict, path=path)
elif type(expected_value) is list:
compare_lists(expected_value, actual_value, path=path)
elif isinstance(expected_value, string_type) and re.match(regex_pattern, expected_value):
custom_regex = re.match(regex_pattern, expected_value).groups()[0]
if not re.match(custom_regex, actual_value or ''):
message = 'Expected {0} to match regex {1}'
params = [repr(actual_value), repr(expected_value)]
if path:
message = message + ' at path {2}'
params.append(path)
raise AssertionError(message.format(*params))
elif isinstance(expected_value, string_type) and re.match(validator_pattern, expected_value):
validator_name = re.match(validator_pattern, expected_value).groups()[0]
is_valid = validate_value(validator_name, actual_value)
if not is_valid:
message = 'Expected {0} to match validator {1}'
params = [repr(actual_value), validator_name]
if path:
message = message + ' at path {2}'
params.append(path)
raise AssertionError(message.format(*params))
else:
try:
assert expected_value == actual_value
except AssertionError:
message = 'Expected {0} to equal {1}'
params = [repr(actual_value), repr(expected_value)]
if path:
message = message + ' at path {2}'
params.append(path)
raise AssertionError(message.format(*params))
def compare_contents(expected_value, actual_value):
if expected_value[0] == '%' and expected_value[-1] == '%':
assert re.search(expected_value.strip('%'), actual_value or ''),\
'Expected response to contain regex \'{0}\''.format(expected_value)
else:
assert expected_value in actual_value,\
'Expected response to contain text \'{0}\''.format(expected_value)
def do_request(context, method, endingpoint, body=None):
| fn = getattr(requests, method.lower())
kwargs = {}
if hasattr(context, 'request_headers'):
kwargs['headers'] = context.request_headers
if body:
kwargs['data'] = body
if hasattr(context, 'request_files'):
kwargs['files'] = context.request_files
context.response = fn(make_url(context, endingpoint), **kwargs) | identifier_body | |
utils.py | from __future__ import unicode_literals
import os
import re
import requests
from functools import wraps | try:
string_type = basestring
except NameError: # Python 3, basestring causes NameError
string_type = str
def make_url(context, endingpoint):
BASE_URL = dereference_variables(context, '$BASE_URL')
if 'http' not in BASE_URL:
BASE_URL = 'http://{0}'.format(BASE_URL)
return '{0}{1}'.format(BASE_URL, endingpoint)
def dereference_variables(context, value):
variables = context.variables\
if hasattr(context, 'variables') else {}
for key in re.findall('\$+\w+', value):
var_name = key[1:]
value = value.replace(
key,
variables.get(
var_name,
os.environ.get(var_name, key)
)
)
return value
def dereference_arguments(f):
@wraps(f)
def wrapper(context, *args, **kwargs):
new_kwargs = {}
new_args = []
for key, value in kwargs.items():
new_kwargs[key] = dereference_variables(context.text, value)
for value in args:
new_args.append(dereference_variables(context.text, value))
context.processed_text = dereference_variables(context, context.text)\
if context.text else ''
return f(context, *new_args, **new_kwargs)
return wrapper
def compare_lists(expected_list, actual_list, path=None):
assert type(expected_list) is list,\
"Expected {0} is not a list".format(repr(expected_list))
assert type(actual_list) is list,\
"Actual {0} is not a list".format(repr(actual_list))
for i, item in enumerate(expected_list):
path = '{0}.{1}'.format(path, i) if path else str(i)
try:
actual_value = actual_list[i]
except ValueError:
actual_value = None
compare_values(item, actual_value, path=path)
def compare_dicts_structure(expected_dict, actual_dict):
'''
Make a comparison of the keys of the two dictionaries passed
and if they are not the same prepare an assertion message on
how they differ and raise the AssertionError
'''
msg_assert = "Keys/Properties of actual values do not match those of expected values"
msg_keys_in_act_but_not_exp = ""
msg_keys_in_exp_but_not_act = ""
if expected_dict.keys() == actual_dict.keys():
pass
else:
set_exp_keys = set(expected_dict.keys())
set_actual_keys = set(actual_dict.keys())
#Deal with actual keys not seen in expected
exp_keys_not_in_act = set_exp_keys - set_actual_keys
if exp_keys_not_in_act:
str_exp_keys_not_in_act = " ,".join(exp_keys_not_in_act)
msg_keys_in_exp_but_not_act = '''The following keys are in the expected values but not the actual values : {}'''.format(str_exp_keys_not_in_act)
#Deal with expected keys not seen in actual
act_keys_not_in_exp = set_actual_keys - set_exp_keys
if act_keys_not_in_exp:
str_act_keys_not_in_exp = " ,".join(act_keys_not_in_exp)
msg_keys_in_act_but_not_exp = '''The following keys are in the actual values but not the expected values : {}'''.format(str_act_keys_not_in_exp)
#Prepare a composite exception message and raise the Exception
if msg_keys_in_act_but_not_exp and msg_keys_in_exp_but_not_act:
msg_assert = '''{}. {} . {} .'''.format(msg_assert, msg_keys_in_act_but_not_exp, msg_keys_in_exp_but_not_act)
raise AssertionError (msg_assert)
elif msg_keys_in_act_but_not_exp:
msg_assert = '''{}. {} .'''.format(msg_assert, msg_keys_in_act_but_not_exp)
raise AssertionError (msg_assert)
elif msg_keys_in_exp_but_not_act:
msg_assert = '''{}. {} .'''.format(msg_assert, msg_keys_in_exp_but_not_act)
raise AssertionError (msg_assert)
def compare_dicts(expected_dict, actual_dict, strict=False, path=None):
assert type(expected_dict) is dict,\
"Expected {0} is not a dict".format(repr(expected_dict))
assert type(actual_dict) is dict,\
"Actual {0} is not a dict".format(repr(actual_dict))
if strict:
compare_dicts_structure(expected_dict, actual_dict)
for key in expected_dict:
expected_value = expected_dict[key]
actual_value = actual_dict.get(key, None)
path = '{0}.{1}'.format(path, key) if path else key
compare_values(expected_value, actual_value, strict=False, path=path)
def validate_value_iso_datetime(matchstr):
'''
Test whether the 'matchstr' argument matches an
ISO-8601.2019 format date/time. An example of such
a date/time is '2021-11-30T14:20:15'
'''
rgx_pttn = r"""[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}"""
# method 1: using a compile object
compile_obj = re.compile(rgx_pttn)
match_obj = compile_obj.search(matchstr)
if match_obj:
return True
else:
return False
def validate_value_iso_datetime_at_eoe(matchstr):
'''
Test whether the 'matchstr' argument matches an
ISO-8601.2019 format date/time and the value of the
date/time is '2300-01-01T00:00:00'
'''
rgx_pttn = r"""(?P<YYYY>[0-9]{4})
-
(?P<MM>[0-9]{2})
-
(?P<DD>[0-9]{2})
T
(?P<HH24>[0-9]{2})
:
(?P<MI>[0-9]{2})
:
(?P<S>[0-9]{2})"""
if validate_value_iso_datetime(matchstr):
compile_obj = re.compile(rgx_pttn, re.MULTILINE| re.VERBOSE)
match_obj = compile_obj.search(matchstr)
#
if ((match_obj.group('YYYY') == "2300") and
(match_obj.group('MM') == "01") and
(match_obj.group('DD') == "01") and
(match_obj.group('DD') == "01") and
(match_obj.group('HH24') == "00") and
(match_obj.group('MI') == "00") and
(match_obj.group('S') == "00")):
return True
else:
return False
else:
return False
def validate_value(validator, value):
'''
Test whether the 'value' argument matches one of a set
of pre-determined situations defined by the value of the
'validator' argument.
In addition deal with a special case where the validator
string is suffixed with a string to indicate that None
is a valid value
'''
# If the validator string ends with the 'NULL_SUFFIX'
# then trim the 'NULL_SUFFIX' off the end of the validator
# so that, for instance, ...
#
# 'integer_or_null'
#
# ... becomes ....
#
# 'integer'.
#
# In addition if the value is None (what null from JSON
# gets converted to) then return True, otherwise just let
# the normal validation take its course.
#
null_sfx_idx = -1 * len(NULL_SUFFIX)
if validator[null_sfx_idx : ] == NULL_SUFFIX:
#Trim the NULL_SUFFIX_ off the 'validator value'
validator = validator[ : null_sfx_idx]
#If the value is None, return True
if value==None:
return True
#Having dealt with the 'NULL_SUFFIX' special case now
#proceed with normal processing.
if validator == 'int':
return type(value) == int
if validator == 'float':
return type(value) == float
if validator == 'number':
return type(value) == int or type(value) == float
if validator == 'integer':
return type(value) == int
if validator == 'positive_integer':
return (type(value) == int and (value >= 0))
if validator == 'string':
return type(value) == str
if validator == 'string_and_not_empty':
return (type(value) == str and (len(value) > 0))
if validator == 'numeric_true_false':
return ((type(value) == int) and ((value==0) or (value==1)))
if validator == 'iso_date_time':
return validate_value_iso_datetime(value)
if validator == 'iso_date_time_at_eoe':
return validate_value_iso_datetime_at_eoe(value)
raise Exception('Unknown validator: {}'.format(validator))
def compare_values(expected_value, actual_value, strict=False, path=None):
validator_pattern = r'^<is_(.+)>$'
regex_pattern = r'^%(.+)%$'
if type(expected_value) is dict:
compare_dicts(expected_value, actual_value, strict=strict, path=path)
elif type(expected_value) is list:
compare_lists(expected_value, actual_value, path=path)
elif isinstance(expected_value, string_type) and re.match(regex_pattern, expected_value):
custom_regex = re.match(regex_pattern, expected_value).groups()[0]
if not re.match(custom_regex, actual_value or ''):
message = 'Expected {0} to match regex {1}'
params = [repr(actual_value), repr(expected_value)]
if path:
message = message + ' at path {2}'
params.append(path)
raise AssertionError(message.format(*params))
elif isinstance(expected_value, string_type) and re.match(validator_pattern, expected_value):
validator_name = re.match(validator_pattern, expected_value).groups()[0]
is_valid = validate_value(validator_name, actual_value)
if not is_valid:
message = 'Expected {0} to match validator {1}'
params = [repr(actual_value), validator_name]
if path:
message = message + ' at path {2}'
params.append(path)
raise AssertionError(message.format(*params))
else:
try:
assert expected_value == actual_value
except AssertionError:
message = 'Expected {0} to equal {1}'
params = [repr(actual_value), repr(expected_value)]
if path:
message = message + ' at path {2}'
params.append(path)
raise AssertionError(message.format(*params))
def compare_contents(expected_value, actual_value):
if expected_value[0] == '%' and expected_value[-1] == '%':
assert re.search(expected_value.strip('%'), actual_value or ''),\
'Expected response to contain regex \'{0}\''.format(expected_value)
else:
assert expected_value in actual_value,\
'Expected response to contain text \'{0}\''.format(expected_value)
def do_request(context, method, endingpoint, body=None):
fn = getattr(requests, method.lower())
kwargs = {}
if hasattr(context, 'request_headers'):
kwargs['headers'] = context.request_headers
if body:
kwargs['data'] = body
if hasattr(context, 'request_files'):
kwargs['files'] = context.request_files
context.response = fn(make_url(context, endingpoint), **kwargs) |
NULL_SUFFIX = '''_or_null'''
| random_line_split |
utils.py | from __future__ import unicode_literals
import os
import re
import requests
from functools import wraps
NULL_SUFFIX = '''_or_null'''
try:
string_type = basestring
except NameError: # Python 3, basestring causes NameError
string_type = str
def make_url(context, endingpoint):
BASE_URL = dereference_variables(context, '$BASE_URL')
if 'http' not in BASE_URL:
BASE_URL = 'http://{0}'.format(BASE_URL)
return '{0}{1}'.format(BASE_URL, endingpoint)
def dereference_variables(context, value):
variables = context.variables\
if hasattr(context, 'variables') else {}
for key in re.findall('\$+\w+', value):
var_name = key[1:]
value = value.replace(
key,
variables.get(
var_name,
os.environ.get(var_name, key)
)
)
return value
def dereference_arguments(f):
@wraps(f)
def wrapper(context, *args, **kwargs):
new_kwargs = {}
new_args = []
for key, value in kwargs.items():
new_kwargs[key] = dereference_variables(context.text, value)
for value in args:
new_args.append(dereference_variables(context.text, value))
context.processed_text = dereference_variables(context, context.text)\
if context.text else ''
return f(context, *new_args, **new_kwargs)
return wrapper
def compare_lists(expected_list, actual_list, path=None):
assert type(expected_list) is list,\
"Expected {0} is not a list".format(repr(expected_list))
assert type(actual_list) is list,\
"Actual {0} is not a list".format(repr(actual_list))
for i, item in enumerate(expected_list):
path = '{0}.{1}'.format(path, i) if path else str(i)
try:
actual_value = actual_list[i]
except ValueError:
actual_value = None
compare_values(item, actual_value, path=path)
def compare_dicts_structure(expected_dict, actual_dict):
'''
Make a comparison of the keys of the two dictionaries passed
and if they are not the same prepare an assertion message on
how they differ and raise the AssertionError
'''
msg_assert = "Keys/Properties of actual values do not match those of expected values"
msg_keys_in_act_but_not_exp = ""
msg_keys_in_exp_but_not_act = ""
if expected_dict.keys() == actual_dict.keys():
pass
else:
set_exp_keys = set(expected_dict.keys())
set_actual_keys = set(actual_dict.keys())
#Deal with actual keys not seen in expected
exp_keys_not_in_act = set_exp_keys - set_actual_keys
if exp_keys_not_in_act:
str_exp_keys_not_in_act = " ,".join(exp_keys_not_in_act)
msg_keys_in_exp_but_not_act = '''The following keys are in the expected values but not the actual values : {}'''.format(str_exp_keys_not_in_act)
#Deal with expected keys not seen in actual
act_keys_not_in_exp = set_actual_keys - set_exp_keys
if act_keys_not_in_exp:
str_act_keys_not_in_exp = " ,".join(act_keys_not_in_exp)
msg_keys_in_act_but_not_exp = '''The following keys are in the actual values but not the expected values : {}'''.format(str_act_keys_not_in_exp)
#Prepare a composite exception message and raise the Exception
if msg_keys_in_act_but_not_exp and msg_keys_in_exp_but_not_act:
msg_assert = '''{}. {} . {} .'''.format(msg_assert, msg_keys_in_act_but_not_exp, msg_keys_in_exp_but_not_act)
raise AssertionError (msg_assert)
elif msg_keys_in_act_but_not_exp:
msg_assert = '''{}. {} .'''.format(msg_assert, msg_keys_in_act_but_not_exp)
raise AssertionError (msg_assert)
elif msg_keys_in_exp_but_not_act:
msg_assert = '''{}. {} .'''.format(msg_assert, msg_keys_in_exp_but_not_act)
raise AssertionError (msg_assert)
def compare_dicts(expected_dict, actual_dict, strict=False, path=None):
assert type(expected_dict) is dict,\
"Expected {0} is not a dict".format(repr(expected_dict))
assert type(actual_dict) is dict,\
"Actual {0} is not a dict".format(repr(actual_dict))
if strict:
compare_dicts_structure(expected_dict, actual_dict)
for key in expected_dict:
expected_value = expected_dict[key]
actual_value = actual_dict.get(key, None)
path = '{0}.{1}'.format(path, key) if path else key
compare_values(expected_value, actual_value, strict=False, path=path)
def validate_value_iso_datetime(matchstr):
'''
Test whether the 'matchstr' argument matches an
ISO-8601.2019 format date/time. An example of such
a date/time is '2021-11-30T14:20:15'
'''
rgx_pttn = r"""[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}"""
# method 1: using a compile object
compile_obj = re.compile(rgx_pttn)
match_obj = compile_obj.search(matchstr)
if match_obj:
return True
else:
return False
def validate_value_iso_datetime_at_eoe(matchstr):
'''
Test whether the 'matchstr' argument matches an
ISO-8601.2019 format date/time and the value of the
date/time is '2300-01-01T00:00:00'
'''
rgx_pttn = r"""(?P<YYYY>[0-9]{4})
-
(?P<MM>[0-9]{2})
-
(?P<DD>[0-9]{2})
T
(?P<HH24>[0-9]{2})
:
(?P<MI>[0-9]{2})
:
(?P<S>[0-9]{2})"""
if validate_value_iso_datetime(matchstr):
compile_obj = re.compile(rgx_pttn, re.MULTILINE| re.VERBOSE)
match_obj = compile_obj.search(matchstr)
#
if ((match_obj.group('YYYY') == "2300") and
(match_obj.group('MM') == "01") and
(match_obj.group('DD') == "01") and
(match_obj.group('DD') == "01") and
(match_obj.group('HH24') == "00") and
(match_obj.group('MI') == "00") and
(match_obj.group('S') == "00")):
return True
else:
return False
else:
return False
def validate_value(validator, value):
'''
Test whether the 'value' argument matches one of a set
of pre-determined situations defined by the value of the
'validator' argument.
In addition deal with a special case where the validator
string is suffixed with a string to indicate that None
is a valid value
'''
# If the validator string ends with the 'NULL_SUFFIX'
# then trim the 'NULL_SUFFIX' off the end of the validator
# so that, for instance, ...
#
# 'integer_or_null'
#
# ... becomes ....
#
# 'integer'.
#
# In addition if the value is None (what null from JSON
# gets converted to) then return True, otherwise just let
# the normal validation take its course.
#
null_sfx_idx = -1 * len(NULL_SUFFIX)
if validator[null_sfx_idx : ] == NULL_SUFFIX:
#Trim the NULL_SUFFIX_ off the 'validator value'
validator = validator[ : null_sfx_idx]
#If the value is None, return True
if value==None:
return True
#Having dealt with the 'NULL_SUFFIX' special case now
#proceed with normal processing.
if validator == 'int':
return type(value) == int
if validator == 'float':
return type(value) == float
if validator == 'number':
return type(value) == int or type(value) == float
if validator == 'integer':
return type(value) == int
if validator == 'positive_integer':
return (type(value) == int and (value >= 0))
if validator == 'string':
return type(value) == str
if validator == 'string_and_not_empty':
return (type(value) == str and (len(value) > 0))
if validator == 'numeric_true_false':
return ((type(value) == int) and ((value==0) or (value==1)))
if validator == 'iso_date_time':
return validate_value_iso_datetime(value)
if validator == 'iso_date_time_at_eoe':
return validate_value_iso_datetime_at_eoe(value)
raise Exception('Unknown validator: {}'.format(validator))
def compare_values(expected_value, actual_value, strict=False, path=None):
validator_pattern = r'^<is_(.+)>$'
regex_pattern = r'^%(.+)%$'
if type(expected_value) is dict:
compare_dicts(expected_value, actual_value, strict=strict, path=path)
elif type(expected_value) is list:
compare_lists(expected_value, actual_value, path=path)
elif isinstance(expected_value, string_type) and re.match(regex_pattern, expected_value):
custom_regex = re.match(regex_pattern, expected_value).groups()[0]
if not re.match(custom_regex, actual_value or ''):
message = 'Expected {0} to match regex {1}'
params = [repr(actual_value), repr(expected_value)]
if path:
message = message + ' at path {2}'
params.append(path)
raise AssertionError(message.format(*params))
elif isinstance(expected_value, string_type) and re.match(validator_pattern, expected_value):
validator_name = re.match(validator_pattern, expected_value).groups()[0]
is_valid = validate_value(validator_name, actual_value)
if not is_valid:
message = 'Expected {0} to match validator {1}'
params = [repr(actual_value), validator_name]
if path:
message = message + ' at path {2}'
params.append(path)
raise AssertionError(message.format(*params))
else:
try:
assert expected_value == actual_value
except AssertionError:
message = 'Expected {0} to equal {1}'
params = [repr(actual_value), repr(expected_value)]
if path:
message = message + ' at path {2}'
params.append(path)
raise AssertionError(message.format(*params))
def compare_contents(expected_value, actual_value):
if expected_value[0] == '%' and expected_value[-1] == '%':
assert re.search(expected_value.strip('%'), actual_value or ''),\
'Expected response to contain regex \'{0}\''.format(expected_value)
else:
assert expected_value in actual_value,\
'Expected response to contain text \'{0}\''.format(expected_value)
def | (context, method, endingpoint, body=None):
fn = getattr(requests, method.lower())
kwargs = {}
if hasattr(context, 'request_headers'):
kwargs['headers'] = context.request_headers
if body:
kwargs['data'] = body
if hasattr(context, 'request_files'):
kwargs['files'] = context.request_files
context.response = fn(make_url(context, endingpoint), **kwargs)
| do_request | identifier_name |
utils.py | from __future__ import unicode_literals
import os
import re
import requests
from functools import wraps
NULL_SUFFIX = '''_or_null'''
try:
string_type = basestring
except NameError: # Python 3, basestring causes NameError
string_type = str
def make_url(context, endingpoint):
BASE_URL = dereference_variables(context, '$BASE_URL')
if 'http' not in BASE_URL:
BASE_URL = 'http://{0}'.format(BASE_URL)
return '{0}{1}'.format(BASE_URL, endingpoint)
def dereference_variables(context, value):
variables = context.variables\
if hasattr(context, 'variables') else {}
for key in re.findall('\$+\w+', value):
var_name = key[1:]
value = value.replace(
key,
variables.get(
var_name,
os.environ.get(var_name, key)
)
)
return value
def dereference_arguments(f):
@wraps(f)
def wrapper(context, *args, **kwargs):
new_kwargs = {}
new_args = []
for key, value in kwargs.items():
new_kwargs[key] = dereference_variables(context.text, value)
for value in args:
new_args.append(dereference_variables(context.text, value))
context.processed_text = dereference_variables(context, context.text)\
if context.text else ''
return f(context, *new_args, **new_kwargs)
return wrapper
def compare_lists(expected_list, actual_list, path=None):
assert type(expected_list) is list,\
"Expected {0} is not a list".format(repr(expected_list))
assert type(actual_list) is list,\
"Actual {0} is not a list".format(repr(actual_list))
for i, item in enumerate(expected_list):
path = '{0}.{1}'.format(path, i) if path else str(i)
try:
actual_value = actual_list[i]
except ValueError:
actual_value = None
compare_values(item, actual_value, path=path)
def compare_dicts_structure(expected_dict, actual_dict):
'''
Make a comparison of the keys of the two dictionaries passed
and if they are not the same prepare an assertion message on
how they differ and raise the AssertionError
'''
msg_assert = "Keys/Properties of actual values do not match those of expected values"
msg_keys_in_act_but_not_exp = ""
msg_keys_in_exp_but_not_act = ""
if expected_dict.keys() == actual_dict.keys():
pass
else:
set_exp_keys = set(expected_dict.keys())
set_actual_keys = set(actual_dict.keys())
#Deal with actual keys not seen in expected
exp_keys_not_in_act = set_exp_keys - set_actual_keys
if exp_keys_not_in_act:
str_exp_keys_not_in_act = " ,".join(exp_keys_not_in_act)
msg_keys_in_exp_but_not_act = '''The following keys are in the expected values but not the actual values : {}'''.format(str_exp_keys_not_in_act)
#Deal with expected keys not seen in actual
act_keys_not_in_exp = set_actual_keys - set_exp_keys
if act_keys_not_in_exp:
str_act_keys_not_in_exp = " ,".join(act_keys_not_in_exp)
msg_keys_in_act_but_not_exp = '''The following keys are in the actual values but not the expected values : {}'''.format(str_act_keys_not_in_exp)
#Prepare a composite exception message and raise the Exception
if msg_keys_in_act_but_not_exp and msg_keys_in_exp_but_not_act:
msg_assert = '''{}. {} . {} .'''.format(msg_assert, msg_keys_in_act_but_not_exp, msg_keys_in_exp_but_not_act)
raise AssertionError (msg_assert)
elif msg_keys_in_act_but_not_exp:
msg_assert = '''{}. {} .'''.format(msg_assert, msg_keys_in_act_but_not_exp)
raise AssertionError (msg_assert)
elif msg_keys_in_exp_but_not_act:
msg_assert = '''{}. {} .'''.format(msg_assert, msg_keys_in_exp_but_not_act)
raise AssertionError (msg_assert)
def compare_dicts(expected_dict, actual_dict, strict=False, path=None):
assert type(expected_dict) is dict,\
"Expected {0} is not a dict".format(repr(expected_dict))
assert type(actual_dict) is dict,\
"Actual {0} is not a dict".format(repr(actual_dict))
if strict:
compare_dicts_structure(expected_dict, actual_dict)
for key in expected_dict:
expected_value = expected_dict[key]
actual_value = actual_dict.get(key, None)
path = '{0}.{1}'.format(path, key) if path else key
compare_values(expected_value, actual_value, strict=False, path=path)
def validate_value_iso_datetime(matchstr):
'''
Test whether the 'matchstr' argument matches an
ISO-8601.2019 format date/time. An example of such
a date/time is '2021-11-30T14:20:15'
'''
rgx_pttn = r"""[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}"""
# method 1: using a compile object
compile_obj = re.compile(rgx_pttn)
match_obj = compile_obj.search(matchstr)
if match_obj:
return True
else:
return False
def validate_value_iso_datetime_at_eoe(matchstr):
'''
Test whether the 'matchstr' argument matches an
ISO-8601.2019 format date/time and the value of the
date/time is '2300-01-01T00:00:00'
'''
rgx_pttn = r"""(?P<YYYY>[0-9]{4})
-
(?P<MM>[0-9]{2})
-
(?P<DD>[0-9]{2})
T
(?P<HH24>[0-9]{2})
:
(?P<MI>[0-9]{2})
:
(?P<S>[0-9]{2})"""
if validate_value_iso_datetime(matchstr):
compile_obj = re.compile(rgx_pttn, re.MULTILINE| re.VERBOSE)
match_obj = compile_obj.search(matchstr)
#
if ((match_obj.group('YYYY') == "2300") and
(match_obj.group('MM') == "01") and
(match_obj.group('DD') == "01") and
(match_obj.group('DD') == "01") and
(match_obj.group('HH24') == "00") and
(match_obj.group('MI') == "00") and
(match_obj.group('S') == "00")):
return True
else:
return False
else:
return False
def validate_value(validator, value):
'''
Test whether the 'value' argument matches one of a set
of pre-determined situations defined by the value of the
'validator' argument.
In addition deal with a special case where the validator
string is suffixed with a string to indicate that None
is a valid value
'''
# If the validator string ends with the 'NULL_SUFFIX'
# then trim the 'NULL_SUFFIX' off the end of the validator
# so that, for instance, ...
#
# 'integer_or_null'
#
# ... becomes ....
#
# 'integer'.
#
# In addition if the value is None (what null from JSON
# gets converted to) then return True, otherwise just let
# the normal validation take its course.
#
null_sfx_idx = -1 * len(NULL_SUFFIX)
if validator[null_sfx_idx : ] == NULL_SUFFIX:
#Trim the NULL_SUFFIX_ off the 'validator value'
validator = validator[ : null_sfx_idx]
#If the value is None, return True
if value==None:
return True
#Having dealt with the 'NULL_SUFFIX' special case now
#proceed with normal processing.
if validator == 'int':
return type(value) == int
if validator == 'float':
return type(value) == float
if validator == 'number':
return type(value) == int or type(value) == float
if validator == 'integer':
return type(value) == int
if validator == 'positive_integer':
return (type(value) == int and (value >= 0))
if validator == 'string':
return type(value) == str
if validator == 'string_and_not_empty':
|
if validator == 'numeric_true_false':
return ((type(value) == int) and ((value==0) or (value==1)))
if validator == 'iso_date_time':
return validate_value_iso_datetime(value)
if validator == 'iso_date_time_at_eoe':
return validate_value_iso_datetime_at_eoe(value)
raise Exception('Unknown validator: {}'.format(validator))
def compare_values(expected_value, actual_value, strict=False, path=None):
validator_pattern = r'^<is_(.+)>$'
regex_pattern = r'^%(.+)%$'
if type(expected_value) is dict:
compare_dicts(expected_value, actual_value, strict=strict, path=path)
elif type(expected_value) is list:
compare_lists(expected_value, actual_value, path=path)
elif isinstance(expected_value, string_type) and re.match(regex_pattern, expected_value):
custom_regex = re.match(regex_pattern, expected_value).groups()[0]
if not re.match(custom_regex, actual_value or ''):
message = 'Expected {0} to match regex {1}'
params = [repr(actual_value), repr(expected_value)]
if path:
message = message + ' at path {2}'
params.append(path)
raise AssertionError(message.format(*params))
elif isinstance(expected_value, string_type) and re.match(validator_pattern, expected_value):
validator_name = re.match(validator_pattern, expected_value).groups()[0]
is_valid = validate_value(validator_name, actual_value)
if not is_valid:
message = 'Expected {0} to match validator {1}'
params = [repr(actual_value), validator_name]
if path:
message = message + ' at path {2}'
params.append(path)
raise AssertionError(message.format(*params))
else:
try:
assert expected_value == actual_value
except AssertionError:
message = 'Expected {0} to equal {1}'
params = [repr(actual_value), repr(expected_value)]
if path:
message = message + ' at path {2}'
params.append(path)
raise AssertionError(message.format(*params))
def compare_contents(expected_value, actual_value):
if expected_value[0] == '%' and expected_value[-1] == '%':
assert re.search(expected_value.strip('%'), actual_value or ''),\
'Expected response to contain regex \'{0}\''.format(expected_value)
else:
assert expected_value in actual_value,\
'Expected response to contain text \'{0}\''.format(expected_value)
def do_request(context, method, endingpoint, body=None):
fn = getattr(requests, method.lower())
kwargs = {}
if hasattr(context, 'request_headers'):
kwargs['headers'] = context.request_headers
if body:
kwargs['data'] = body
if hasattr(context, 'request_files'):
kwargs['files'] = context.request_files
context.response = fn(make_url(context, endingpoint), **kwargs)
| return (type(value) == str and (len(value) > 0)) | conditional_block |
main.rs | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Overnet daemon for Fuchsia
#![deny(missing_docs)]
mod mdns;
use failure::{Error, ResultExt};
use fidl_fuchsia_overnet::{
MeshControllerRequest, MeshControllerRequestStream, OvernetListPeersResponder, OvernetRequest,
OvernetRequestStream, ServiceConsumerListPeersResponder, ServiceConsumerRequest,
ServiceConsumerRequestStream, ServicePublisherRequest, ServicePublisherRequestStream,
};
use fuchsia_async as fasync;
use fuchsia_component::server::ServiceFs;
use fuchsia_zircon as zx;
use futures::future::{abortable, AbortHandle};
use futures::prelude::*;
use overnet_core::{LinkId, Node, NodeId, NodeOptions, NodeRuntime, RouterTime, SendHandle};
use std::cell::RefCell;
use std::collections::HashMap;
use std::net::{SocketAddr, SocketAddrV6};
use std::ops::Deref;
use std::rc::Rc;
use zx::AsHandleRef;
/// Identifier for a link as defined by overnetstack.
#[derive(Clone, Copy, Debug)]
enum AppLinkId {
Udp(SocketAddrV6),
}
/// Adapter of fasync::Time to RouterTime for overnet's core library.
#[derive(PartialEq, PartialOrd, Eq, Ord, Clone, Copy, Debug)]
struct Time(fasync::Time);
impl RouterTime for Time {
type Duration = zx::Duration;
fn now() -> Self {
Time(fasync::Time::now())
}
fn after(time: Self, duration: zx::Duration) -> Self {
Self(time.0 + duration)
}
}
struct AppRuntime;
impl NodeRuntime for AppRuntime {
type Time = Time;
type LinkId = AppLinkId;
const IMPLEMENTATION: fidl_fuchsia_overnet_protocol::Implementation =
fidl_fuchsia_overnet_protocol::Implementation::OvernetStack;
fn handle_type(handle: &zx::Handle) -> Result<SendHandle, Error> {
match handle.basic_info()?.object_type {
zx::ObjectType::CHANNEL => Ok(SendHandle::Channel),
_ => failure::bail!("Handle type not proxyable {:?}", handle.basic_info()?.object_type),
}
}
fn spawn_local<F>(&mut self, future: F)
where
F: Future<Output = ()> + 'static,
{
fasync::spawn_local(future)
}
fn at(&mut self, t: Self::Time, f: impl FnOnce() + 'static) {
fasync::spawn_local(at(t.0, f))
}
fn router_link_id(&self, id: AppLinkId) -> LinkId<overnet_core::PhysLinkId<AppLinkId>> {
with_app_mut(|app| match id {
AppLinkId::Udp(addr) => {
app.udp_link_ids.get(&addr).copied().unwrap_or(LinkId::invalid())
}
})
}
fn send_on_link(&mut self, id: Self::LinkId, packet: &mut [u8]) -> Result<(), Error> {
match id {
AppLinkId::Udp(addr) => {
println!("UDP_SEND to:{} len:{}", addr, packet.len());
let sock = with_app_mut(|app| -> Result<_, Error> {
Ok(app
.udp_socket
.as_ref()
.ok_or_else(|| failure::format_err!("no udp socket"))?
.sock
.clone())
})?;
let sock = sock.deref().as_ref();
if let Err(e) = sock.send_to(packet, addr) {
if e.kind() == std::io::ErrorKind::BrokenPipe {
log::warn!("BrokenPipe on UDP socket: let's make a new one");
with_app_mut(|app| {
app.udp_socket.take();
app.udp_socket = Some(UdpSocketHolder::new(app.node_id)?);
Ok(())
})
} else {
Err(e.into())
}
} else {
Ok(())
}
}
}
}
}
struct UdpSocketHolder {
sock: Rc<fasync::net::UdpSocket>,
abort_publisher: AbortHandle,
}
impl UdpSocketHolder {
fn new(node_id: NodeId) -> Result<Self, Error> {
// Must not call with_app_mut here, as this is called from with_app_mut
let sock = std::net::UdpSocket::bind("[::]:0").context("Creating UDP socket")?;
let publisher =
mdns::publish(node_id, sock.local_addr().context("Getting UDP local address")?.port());
let sock = Rc::new(fasync::net::UdpSocket::from_socket(sock)?);
let (publisher, abort_publisher) = abortable(publisher);
fasync::spawn_local(async move {
let _ = publisher.await;
});
Ok(Self { sock, abort_publisher })
}
}
impl Drop for UdpSocketHolder {
fn drop(&mut self) {
self.abort_publisher.abort();
}
}
/// Global state for overnetstack.
struct App {
node_id: NodeId,
node: Node<AppRuntime>,
// TODO(ctiller): This state should be moved out into its own file.
/// Map socket addresses to udp link ids.
udp_link_ids: HashMap<SocketAddrV6, LinkId<overnet_core::PhysLinkId<AppLinkId>>>,
/// UDP socket to communicate over.
udp_socket: Option<UdpSocketHolder>,
}
thread_local! {
// Always access via with_app_mut
static APP: RefCell<App> = RefCell::new(App::new());
}
fn with_app_mut<R>(f: impl FnOnce(&mut App) -> R) -> R {
APP.with(|rcapp| f(&mut rcapp.borrow_mut()))
}
async fn at(when: fasync::Time, f: impl FnOnce()) {
fasync::Timer::new(when).await;
f();
}
impl App {
/// Create a new instance of App
fn | () -> App {
let node = Node::new(
AppRuntime,
NodeOptions::new()
.set_quic_server_key_file(Box::new("/pkg/data/cert.key".to_string()))
.set_quic_server_cert_file(Box::new("/pkg/data/cert.crt".to_string())),
)
.unwrap();
App { node_id: node.id(), node, udp_link_ids: HashMap::new(), udp_socket: None }
}
}
fn normalize_addr(addr: SocketAddr) -> SocketAddrV6 {
match addr {
SocketAddr::V6(a) => a,
SocketAddr::V4(a) => SocketAddrV6::new(a.ip().to_ipv6_mapped(), a.port(), 0, 0),
}
}
/// UDP read inner loop.
async fn read_udp_inner() -> Result<(), Error> {
let mut buf: [u8; 1500] = [0; 1500];
loop {
let sock = with_app_mut(|app| -> Result<_, Error> {
Ok(app
.udp_socket
.as_ref()
.ok_or_else(|| failure::format_err!("No udp socket to read from"))?
.sock
.clone())
})?;
let (length, sender) = sock.recv_from(&mut buf).await?;
println!("UDP_RECV from:{} len:{}", sender, length);
let sender = normalize_addr(sender);
with_app_mut(|app| -> Result<(), Error> {
if let Some(link_id) = app.udp_link_ids.get(&sender) {
app.node.queue_recv(*link_id, &mut buf[..length]);
} else {
log::warn!("No link for received packet {:?}", sender);
}
Ok(())
})?;
}
}
/// Read UDP socket until closed, logging errors.
async fn read_udp() {
if let Err(e) = read_udp_inner().await {
log::warn!("UDP read loop failed: {:?}", e);
}
}
/// Register a new UDP endpoint for some node_id.
fn register_udp(addr: SocketAddr, node_id: NodeId) -> Result<(), Error> {
with_app_mut(|app| {
app.node.mention_node(node_id);
let addr = normalize_addr(addr);
if app.udp_link_ids.get(&addr).is_none() {
let rtr_id = app.node.new_link(node_id, AppLinkId::Udp(addr))?;
println!("register peer: {} node_id={:?} rtr_id={:?}", addr, node_id, rtr_id);
app.udp_link_ids.insert(addr, rtr_id);
}
Ok(())
})
}
trait ListPeersResponder {
fn respond(
self,
peers: &mut dyn ExactSizeIterator<Item = &mut fidl_fuchsia_overnet::Peer>,
) -> Result<(), fidl::Error>;
}
impl ListPeersResponder for ServiceConsumerListPeersResponder {
fn respond(
self,
peers: &mut dyn ExactSizeIterator<Item = &mut fidl_fuchsia_overnet::Peer>,
) -> Result<(), fidl::Error> {
self.send(peers)
}
}
impl ListPeersResponder for OvernetListPeersResponder {
fn respond(
self,
peers: &mut dyn ExactSizeIterator<Item = &mut fidl_fuchsia_overnet::Peer>,
) -> Result<(), fidl::Error> {
self.send(peers)
}
}
async fn run_list_peers_inner(responder: impl ListPeersResponder) -> Result<(), Error> {
let mut peers = with_app_mut(|app| app.node.clone().list_peers()).await?;
responder.respond(&mut peers.iter_mut())?;
Ok(())
}
async fn run_list_peers(responder: impl ListPeersResponder) {
if let Err(e) = run_list_peers_inner(responder).await {
log::warn!("List peers gets error: {:?}", e);
}
}
async fn run_service_publisher_server(
mut stream: ServicePublisherRequestStream,
) -> Result<(), Error> {
while let Some(request) = stream.try_next().await.context("error running overnet server")? {
let result = with_app_mut(|app| match request {
ServicePublisherRequest::PublishService { service_name, provider, .. } => {
app.node.register_service(service_name, provider)
}
});
if let Err(e) = result {
log::warn!("Error servicing request: {:?}", e)
}
}
Ok(())
}
async fn run_service_consumer_server(
mut stream: ServiceConsumerRequestStream,
) -> Result<(), Error> {
while let Some(request) = stream.try_next().await.context("error running overnet server")? {
let result = with_app_mut(|app| match request {
ServiceConsumerRequest::ListPeers { responder, .. } => {
fasync::spawn_local(run_list_peers(responder));
Ok(())
}
ServiceConsumerRequest::ConnectToService { node, service_name, chan, .. } => {
app.node.connect_to_service(node.id.into(), &service_name, chan)
}
});
if let Err(e) = result {
log::warn!("Error servicing request: {:?}", e);
}
}
Ok(())
}
async fn run_mesh_controller_server(mut stream: MeshControllerRequestStream) -> Result<(), Error> {
while let Some(request) = stream.try_next().await.context("error running overnet server")? {
let result = with_app_mut(|app| match request {
MeshControllerRequest::AttachSocketLink { socket, options, .. } => {
app.node.attach_socket_link(options.connection_label, socket)
}
});
if let Err(e) = result {
log::warn!("Error servicing request: {:?}", e);
}
}
Ok(())
}
async fn run_legacy_overnet_server(mut stream: OvernetRequestStream) -> Result<(), Error> {
while let Some(request) = stream.try_next().await.context("error running overnet server")? {
let result = with_app_mut(|app| match request {
OvernetRequest::PublishService { service_name, provider, .. } => {
app.node.register_service(service_name, provider)
}
OvernetRequest::ListPeers { responder, .. } => {
fasync::spawn_local(run_list_peers(responder));
Ok(())
}
OvernetRequest::ConnectToService { node, service_name, chan, .. } => {
app.node.connect_to_service(node.id.into(), &service_name, chan)
}
});
if let Err(e) = result {
log::warn!("Error servicing request: {:?}", e);
}
}
Ok(())
}
enum IncomingService {
ServiceConsumer(ServiceConsumerRequestStream),
ServicePublisher(ServicePublisherRequestStream),
MeshController(MeshControllerRequestStream),
LegacyOvernet(OvernetRequestStream),
// ... more services here
}
#[fasync::run_singlethreaded]
async fn main() -> Result<(), Error> {
fuchsia_syslog::init_with_tags(&["overnet"]).context("initialize logging")?;
let mut fs = ServiceFs::new_local();
let mut svc_dir = fs.dir("svc");
svc_dir.add_fidl_service(IncomingService::ServiceConsumer);
svc_dir.add_fidl_service(IncomingService::ServicePublisher);
svc_dir.add_fidl_service(IncomingService::MeshController);
svc_dir.add_fidl_service(IncomingService::LegacyOvernet);
fs.take_and_serve_directory_handle()?;
with_app_mut(|app| -> Result<(), Error> {
app.udp_socket = Some(UdpSocketHolder::new(app.node.id())?);
fasync::spawn_local(mdns::subscribe());
fasync::spawn_local(read_udp());
Ok(())
})
.context("Initializing UDP & MDNS")?;
const MAX_CONCURRENT: usize = 10_000;
fs.for_each_concurrent(MAX_CONCURRENT, |svcreq| match svcreq {
IncomingService::MeshController(stream) => {
run_mesh_controller_server(stream).unwrap_or_else(|e| log::trace!("{:?}", e)).boxed()
}
IncomingService::ServicePublisher(stream) => {
run_service_publisher_server(stream).unwrap_or_else(|e| log::trace!("{:?}", e)).boxed()
}
IncomingService::ServiceConsumer(stream) => {
run_service_consumer_server(stream).unwrap_or_else(|e| log::trace!("{:?}", e)).boxed()
}
IncomingService::LegacyOvernet(stream) => {
run_legacy_overnet_server(stream).unwrap_or_else(|e| log::trace!("{:?}", e)).boxed()
}
})
.await;
Ok(())
}
| new | identifier_name |
main.rs | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Overnet daemon for Fuchsia
#![deny(missing_docs)]
mod mdns;
use failure::{Error, ResultExt};
use fidl_fuchsia_overnet::{
MeshControllerRequest, MeshControllerRequestStream, OvernetListPeersResponder, OvernetRequest,
OvernetRequestStream, ServiceConsumerListPeersResponder, ServiceConsumerRequest,
ServiceConsumerRequestStream, ServicePublisherRequest, ServicePublisherRequestStream,
};
use fuchsia_async as fasync;
use fuchsia_component::server::ServiceFs;
use fuchsia_zircon as zx;
use futures::future::{abortable, AbortHandle};
use futures::prelude::*;
use overnet_core::{LinkId, Node, NodeId, NodeOptions, NodeRuntime, RouterTime, SendHandle};
use std::cell::RefCell;
use std::collections::HashMap;
use std::net::{SocketAddr, SocketAddrV6};
use std::ops::Deref;
use std::rc::Rc;
use zx::AsHandleRef;
/// Identifier for a link as defined by overnetstack.
#[derive(Clone, Copy, Debug)]
enum AppLinkId {
Udp(SocketAddrV6),
}
/// Adapter of fasync::Time to RouterTime for overnet's core library.
#[derive(PartialEq, PartialOrd, Eq, Ord, Clone, Copy, Debug)]
struct Time(fasync::Time);
impl RouterTime for Time {
type Duration = zx::Duration;
fn now() -> Self {
Time(fasync::Time::now())
}
fn after(time: Self, duration: zx::Duration) -> Self {
Self(time.0 + duration)
}
}
struct AppRuntime;
impl NodeRuntime for AppRuntime {
type Time = Time;
type LinkId = AppLinkId;
const IMPLEMENTATION: fidl_fuchsia_overnet_protocol::Implementation =
fidl_fuchsia_overnet_protocol::Implementation::OvernetStack;
fn handle_type(handle: &zx::Handle) -> Result<SendHandle, Error> {
match handle.basic_info()?.object_type {
zx::ObjectType::CHANNEL => Ok(SendHandle::Channel),
_ => failure::bail!("Handle type not proxyable {:?}", handle.basic_info()?.object_type),
}
}
fn spawn_local<F>(&mut self, future: F)
where
F: Future<Output = ()> + 'static,
{
fasync::spawn_local(future)
}
fn at(&mut self, t: Self::Time, f: impl FnOnce() + 'static) {
fasync::spawn_local(at(t.0, f))
}
fn router_link_id(&self, id: AppLinkId) -> LinkId<overnet_core::PhysLinkId<AppLinkId>> {
with_app_mut(|app| match id {
AppLinkId::Udp(addr) => {
app.udp_link_ids.get(&addr).copied().unwrap_or(LinkId::invalid())
}
})
}
fn send_on_link(&mut self, id: Self::LinkId, packet: &mut [u8]) -> Result<(), Error> {
match id {
AppLinkId::Udp(addr) => {
println!("UDP_SEND to:{} len:{}", addr, packet.len());
let sock = with_app_mut(|app| -> Result<_, Error> {
Ok(app
.udp_socket
.as_ref()
.ok_or_else(|| failure::format_err!("no udp socket"))?
.sock
.clone())
})?;
let sock = sock.deref().as_ref();
if let Err(e) = sock.send_to(packet, addr) {
if e.kind() == std::io::ErrorKind::BrokenPipe {
log::warn!("BrokenPipe on UDP socket: let's make a new one");
with_app_mut(|app| {
app.udp_socket.take();
app.udp_socket = Some(UdpSocketHolder::new(app.node_id)?);
Ok(())
})
} else {
Err(e.into())
}
} else {
Ok(())
}
}
}
}
}
struct UdpSocketHolder {
sock: Rc<fasync::net::UdpSocket>,
abort_publisher: AbortHandle,
}
impl UdpSocketHolder {
fn new(node_id: NodeId) -> Result<Self, Error> {
// Must not call with_app_mut here, as this is called from with_app_mut
let sock = std::net::UdpSocket::bind("[::]:0").context("Creating UDP socket")?;
let publisher =
mdns::publish(node_id, sock.local_addr().context("Getting UDP local address")?.port());
let sock = Rc::new(fasync::net::UdpSocket::from_socket(sock)?);
let (publisher, abort_publisher) = abortable(publisher);
fasync::spawn_local(async move {
let _ = publisher.await;
});
Ok(Self { sock, abort_publisher })
}
}
impl Drop for UdpSocketHolder {
fn drop(&mut self) {
self.abort_publisher.abort();
}
}
/// Global state for overnetstack.
struct App {
node_id: NodeId,
node: Node<AppRuntime>,
// TODO(ctiller): This state should be moved out into its own file.
/// Map socket addresses to udp link ids.
udp_link_ids: HashMap<SocketAddrV6, LinkId<overnet_core::PhysLinkId<AppLinkId>>>,
/// UDP socket to communicate over.
udp_socket: Option<UdpSocketHolder>,
}
thread_local! {
// Always access via with_app_mut
static APP: RefCell<App> = RefCell::new(App::new());
}
fn with_app_mut<R>(f: impl FnOnce(&mut App) -> R) -> R |
async fn at(when: fasync::Time, f: impl FnOnce()) {
fasync::Timer::new(when).await;
f();
}
impl App {
/// Create a new instance of App
fn new() -> App {
let node = Node::new(
AppRuntime,
NodeOptions::new()
.set_quic_server_key_file(Box::new("/pkg/data/cert.key".to_string()))
.set_quic_server_cert_file(Box::new("/pkg/data/cert.crt".to_string())),
)
.unwrap();
App { node_id: node.id(), node, udp_link_ids: HashMap::new(), udp_socket: None }
}
}
fn normalize_addr(addr: SocketAddr) -> SocketAddrV6 {
match addr {
SocketAddr::V6(a) => a,
SocketAddr::V4(a) => SocketAddrV6::new(a.ip().to_ipv6_mapped(), a.port(), 0, 0),
}
}
/// UDP read inner loop.
async fn read_udp_inner() -> Result<(), Error> {
let mut buf: [u8; 1500] = [0; 1500];
loop {
let sock = with_app_mut(|app| -> Result<_, Error> {
Ok(app
.udp_socket
.as_ref()
.ok_or_else(|| failure::format_err!("No udp socket to read from"))?
.sock
.clone())
})?;
let (length, sender) = sock.recv_from(&mut buf).await?;
println!("UDP_RECV from:{} len:{}", sender, length);
let sender = normalize_addr(sender);
with_app_mut(|app| -> Result<(), Error> {
if let Some(link_id) = app.udp_link_ids.get(&sender) {
app.node.queue_recv(*link_id, &mut buf[..length]);
} else {
log::warn!("No link for received packet {:?}", sender);
}
Ok(())
})?;
}
}
/// Read UDP socket until closed, logging errors.
async fn read_udp() {
if let Err(e) = read_udp_inner().await {
log::warn!("UDP read loop failed: {:?}", e);
}
}
/// Register a new UDP endpoint for some node_id.
fn register_udp(addr: SocketAddr, node_id: NodeId) -> Result<(), Error> {
with_app_mut(|app| {
app.node.mention_node(node_id);
let addr = normalize_addr(addr);
if app.udp_link_ids.get(&addr).is_none() {
let rtr_id = app.node.new_link(node_id, AppLinkId::Udp(addr))?;
println!("register peer: {} node_id={:?} rtr_id={:?}", addr, node_id, rtr_id);
app.udp_link_ids.insert(addr, rtr_id);
}
Ok(())
})
}
trait ListPeersResponder {
fn respond(
self,
peers: &mut dyn ExactSizeIterator<Item = &mut fidl_fuchsia_overnet::Peer>,
) -> Result<(), fidl::Error>;
}
impl ListPeersResponder for ServiceConsumerListPeersResponder {
fn respond(
self,
peers: &mut dyn ExactSizeIterator<Item = &mut fidl_fuchsia_overnet::Peer>,
) -> Result<(), fidl::Error> {
self.send(peers)
}
}
impl ListPeersResponder for OvernetListPeersResponder {
fn respond(
self,
peers: &mut dyn ExactSizeIterator<Item = &mut fidl_fuchsia_overnet::Peer>,
) -> Result<(), fidl::Error> {
self.send(peers)
}
}
async fn run_list_peers_inner(responder: impl ListPeersResponder) -> Result<(), Error> {
let mut peers = with_app_mut(|app| app.node.clone().list_peers()).await?;
responder.respond(&mut peers.iter_mut())?;
Ok(())
}
async fn run_list_peers(responder: impl ListPeersResponder) {
if let Err(e) = run_list_peers_inner(responder).await {
log::warn!("List peers gets error: {:?}", e);
}
}
async fn run_service_publisher_server(
mut stream: ServicePublisherRequestStream,
) -> Result<(), Error> {
while let Some(request) = stream.try_next().await.context("error running overnet server")? {
let result = with_app_mut(|app| match request {
ServicePublisherRequest::PublishService { service_name, provider, .. } => {
app.node.register_service(service_name, provider)
}
});
if let Err(e) = result {
log::warn!("Error servicing request: {:?}", e)
}
}
Ok(())
}
async fn run_service_consumer_server(
mut stream: ServiceConsumerRequestStream,
) -> Result<(), Error> {
while let Some(request) = stream.try_next().await.context("error running overnet server")? {
let result = with_app_mut(|app| match request {
ServiceConsumerRequest::ListPeers { responder, .. } => {
fasync::spawn_local(run_list_peers(responder));
Ok(())
}
ServiceConsumerRequest::ConnectToService { node, service_name, chan, .. } => {
app.node.connect_to_service(node.id.into(), &service_name, chan)
}
});
if let Err(e) = result {
log::warn!("Error servicing request: {:?}", e);
}
}
Ok(())
}
async fn run_mesh_controller_server(mut stream: MeshControllerRequestStream) -> Result<(), Error> {
while let Some(request) = stream.try_next().await.context("error running overnet server")? {
let result = with_app_mut(|app| match request {
MeshControllerRequest::AttachSocketLink { socket, options, .. } => {
app.node.attach_socket_link(options.connection_label, socket)
}
});
if let Err(e) = result {
log::warn!("Error servicing request: {:?}", e);
}
}
Ok(())
}
async fn run_legacy_overnet_server(mut stream: OvernetRequestStream) -> Result<(), Error> {
while let Some(request) = stream.try_next().await.context("error running overnet server")? {
let result = with_app_mut(|app| match request {
OvernetRequest::PublishService { service_name, provider, .. } => {
app.node.register_service(service_name, provider)
}
OvernetRequest::ListPeers { responder, .. } => {
fasync::spawn_local(run_list_peers(responder));
Ok(())
}
OvernetRequest::ConnectToService { node, service_name, chan, .. } => {
app.node.connect_to_service(node.id.into(), &service_name, chan)
}
});
if let Err(e) = result {
log::warn!("Error servicing request: {:?}", e);
}
}
Ok(())
}
enum IncomingService {
ServiceConsumer(ServiceConsumerRequestStream),
ServicePublisher(ServicePublisherRequestStream),
MeshController(MeshControllerRequestStream),
LegacyOvernet(OvernetRequestStream),
// ... more services here
}
#[fasync::run_singlethreaded]
async fn main() -> Result<(), Error> {
fuchsia_syslog::init_with_tags(&["overnet"]).context("initialize logging")?;
let mut fs = ServiceFs::new_local();
let mut svc_dir = fs.dir("svc");
svc_dir.add_fidl_service(IncomingService::ServiceConsumer);
svc_dir.add_fidl_service(IncomingService::ServicePublisher);
svc_dir.add_fidl_service(IncomingService::MeshController);
svc_dir.add_fidl_service(IncomingService::LegacyOvernet);
fs.take_and_serve_directory_handle()?;
with_app_mut(|app| -> Result<(), Error> {
app.udp_socket = Some(UdpSocketHolder::new(app.node.id())?);
fasync::spawn_local(mdns::subscribe());
fasync::spawn_local(read_udp());
Ok(())
})
.context("Initializing UDP & MDNS")?;
const MAX_CONCURRENT: usize = 10_000;
fs.for_each_concurrent(MAX_CONCURRENT, |svcreq| match svcreq {
IncomingService::MeshController(stream) => {
run_mesh_controller_server(stream).unwrap_or_else(|e| log::trace!("{:?}", e)).boxed()
}
IncomingService::ServicePublisher(stream) => {
run_service_publisher_server(stream).unwrap_or_else(|e| log::trace!("{:?}", e)).boxed()
}
IncomingService::ServiceConsumer(stream) => {
run_service_consumer_server(stream).unwrap_or_else(|e| log::trace!("{:?}", e)).boxed()
}
IncomingService::LegacyOvernet(stream) => {
run_legacy_overnet_server(stream).unwrap_or_else(|e| log::trace!("{:?}", e)).boxed()
}
})
.await;
Ok(())
}
| {
APP.with(|rcapp| f(&mut rcapp.borrow_mut()))
} | identifier_body |
main.rs | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Overnet daemon for Fuchsia
#![deny(missing_docs)]
mod mdns;
use failure::{Error, ResultExt};
use fidl_fuchsia_overnet::{
MeshControllerRequest, MeshControllerRequestStream, OvernetListPeersResponder, OvernetRequest,
OvernetRequestStream, ServiceConsumerListPeersResponder, ServiceConsumerRequest,
ServiceConsumerRequestStream, ServicePublisherRequest, ServicePublisherRequestStream,
};
use fuchsia_async as fasync;
use fuchsia_component::server::ServiceFs;
use fuchsia_zircon as zx;
use futures::future::{abortable, AbortHandle};
use futures::prelude::*;
use overnet_core::{LinkId, Node, NodeId, NodeOptions, NodeRuntime, RouterTime, SendHandle};
use std::cell::RefCell;
use std::collections::HashMap;
use std::net::{SocketAddr, SocketAddrV6};
use std::ops::Deref;
use std::rc::Rc;
use zx::AsHandleRef;
/// Identifier for a link as defined by overnetstack.
#[derive(Clone, Copy, Debug)]
enum AppLinkId {
Udp(SocketAddrV6),
}
/// Adapter of fasync::Time to RouterTime for overnet's core library.
#[derive(PartialEq, PartialOrd, Eq, Ord, Clone, Copy, Debug)]
struct Time(fasync::Time);
impl RouterTime for Time {
type Duration = zx::Duration;
fn now() -> Self {
Time(fasync::Time::now())
}
fn after(time: Self, duration: zx::Duration) -> Self {
Self(time.0 + duration)
}
}
struct AppRuntime;
impl NodeRuntime for AppRuntime {
type Time = Time;
type LinkId = AppLinkId;
const IMPLEMENTATION: fidl_fuchsia_overnet_protocol::Implementation =
fidl_fuchsia_overnet_protocol::Implementation::OvernetStack;
fn handle_type(handle: &zx::Handle) -> Result<SendHandle, Error> {
match handle.basic_info()?.object_type {
zx::ObjectType::CHANNEL => Ok(SendHandle::Channel),
_ => failure::bail!("Handle type not proxyable {:?}", handle.basic_info()?.object_type),
}
}
fn spawn_local<F>(&mut self, future: F)
where
F: Future<Output = ()> + 'static,
{
fasync::spawn_local(future)
}
fn at(&mut self, t: Self::Time, f: impl FnOnce() + 'static) {
fasync::spawn_local(at(t.0, f))
}
fn router_link_id(&self, id: AppLinkId) -> LinkId<overnet_core::PhysLinkId<AppLinkId>> {
with_app_mut(|app| match id {
AppLinkId::Udp(addr) => {
app.udp_link_ids.get(&addr).copied().unwrap_or(LinkId::invalid())
}
})
}
fn send_on_link(&mut self, id: Self::LinkId, packet: &mut [u8]) -> Result<(), Error> {
match id {
AppLinkId::Udp(addr) => {
println!("UDP_SEND to:{} len:{}", addr, packet.len());
let sock = with_app_mut(|app| -> Result<_, Error> {
Ok(app
.udp_socket
.as_ref()
.ok_or_else(|| failure::format_err!("no udp socket"))?
.sock
.clone())
})?;
let sock = sock.deref().as_ref();
if let Err(e) = sock.send_to(packet, addr) {
if e.kind() == std::io::ErrorKind::BrokenPipe {
log::warn!("BrokenPipe on UDP socket: let's make a new one");
with_app_mut(|app| {
app.udp_socket.take();
app.udp_socket = Some(UdpSocketHolder::new(app.node_id)?);
Ok(())
})
} else {
Err(e.into())
}
} else {
Ok(())
}
}
}
}
}
struct UdpSocketHolder {
sock: Rc<fasync::net::UdpSocket>,
abort_publisher: AbortHandle,
}
impl UdpSocketHolder {
fn new(node_id: NodeId) -> Result<Self, Error> {
// Must not call with_app_mut here, as this is called from with_app_mut
let sock = std::net::UdpSocket::bind("[::]:0").context("Creating UDP socket")?;
let publisher =
mdns::publish(node_id, sock.local_addr().context("Getting UDP local address")?.port());
let sock = Rc::new(fasync::net::UdpSocket::from_socket(sock)?);
let (publisher, abort_publisher) = abortable(publisher);
fasync::spawn_local(async move {
let _ = publisher.await;
});
Ok(Self { sock, abort_publisher })
}
}
impl Drop for UdpSocketHolder {
fn drop(&mut self) {
self.abort_publisher.abort();
}
}
/// Global state for overnetstack.
struct App {
node_id: NodeId,
node: Node<AppRuntime>,
// TODO(ctiller): This state should be moved out into its own file.
/// Map socket addresses to udp link ids.
udp_link_ids: HashMap<SocketAddrV6, LinkId<overnet_core::PhysLinkId<AppLinkId>>>,
/// UDP socket to communicate over.
udp_socket: Option<UdpSocketHolder>,
}
thread_local! {
// Always access via with_app_mut
static APP: RefCell<App> = RefCell::new(App::new());
}
fn with_app_mut<R>(f: impl FnOnce(&mut App) -> R) -> R {
APP.with(|rcapp| f(&mut rcapp.borrow_mut()))
}
async fn at(when: fasync::Time, f: impl FnOnce()) {
fasync::Timer::new(when).await;
f();
}
impl App {
/// Create a new instance of App
fn new() -> App {
let node = Node::new(
AppRuntime,
NodeOptions::new()
.set_quic_server_key_file(Box::new("/pkg/data/cert.key".to_string()))
.set_quic_server_cert_file(Box::new("/pkg/data/cert.crt".to_string())),
)
.unwrap();
App { node_id: node.id(), node, udp_link_ids: HashMap::new(), udp_socket: None }
}
}
fn normalize_addr(addr: SocketAddr) -> SocketAddrV6 {
match addr {
SocketAddr::V6(a) => a,
SocketAddr::V4(a) => SocketAddrV6::new(a.ip().to_ipv6_mapped(), a.port(), 0, 0),
}
}
/// UDP read inner loop.
async fn read_udp_inner() -> Result<(), Error> {
let mut buf: [u8; 1500] = [0; 1500];
loop {
let sock = with_app_mut(|app| -> Result<_, Error> {
Ok(app
.udp_socket
.as_ref()
.ok_or_else(|| failure::format_err!("No udp socket to read from"))?
.sock
.clone())
})?;
let (length, sender) = sock.recv_from(&mut buf).await?;
println!("UDP_RECV from:{} len:{}", sender, length);
let sender = normalize_addr(sender);
with_app_mut(|app| -> Result<(), Error> {
if let Some(link_id) = app.udp_link_ids.get(&sender) {
app.node.queue_recv(*link_id, &mut buf[..length]);
} else {
log::warn!("No link for received packet {:?}", sender);
}
Ok(())
})?;
}
}
/// Read UDP socket until closed, logging errors.
async fn read_udp() {
if let Err(e) = read_udp_inner().await {
log::warn!("UDP read loop failed: {:?}", e);
}
}
/// Register a new UDP endpoint for some node_id.
fn register_udp(addr: SocketAddr, node_id: NodeId) -> Result<(), Error> {
with_app_mut(|app| {
app.node.mention_node(node_id);
let addr = normalize_addr(addr);
if app.udp_link_ids.get(&addr).is_none() {
let rtr_id = app.node.new_link(node_id, AppLinkId::Udp(addr))?;
println!("register peer: {} node_id={:?} rtr_id={:?}", addr, node_id, rtr_id);
app.udp_link_ids.insert(addr, rtr_id);
}
Ok(())
})
}
trait ListPeersResponder {
fn respond(
self,
peers: &mut dyn ExactSizeIterator<Item = &mut fidl_fuchsia_overnet::Peer>,
) -> Result<(), fidl::Error>;
}
impl ListPeersResponder for ServiceConsumerListPeersResponder {
fn respond(
self,
peers: &mut dyn ExactSizeIterator<Item = &mut fidl_fuchsia_overnet::Peer>,
) -> Result<(), fidl::Error> {
self.send(peers)
}
}
impl ListPeersResponder for OvernetListPeersResponder {
fn respond(
self,
peers: &mut dyn ExactSizeIterator<Item = &mut fidl_fuchsia_overnet::Peer>,
) -> Result<(), fidl::Error> {
self.send(peers)
}
}
async fn run_list_peers_inner(responder: impl ListPeersResponder) -> Result<(), Error> {
let mut peers = with_app_mut(|app| app.node.clone().list_peers()).await?;
responder.respond(&mut peers.iter_mut())?;
Ok(())
}
async fn run_list_peers(responder: impl ListPeersResponder) {
if let Err(e) = run_list_peers_inner(responder).await {
log::warn!("List peers gets error: {:?}", e);
}
}
async fn run_service_publisher_server(
mut stream: ServicePublisherRequestStream,
) -> Result<(), Error> {
while let Some(request) = stream.try_next().await.context("error running overnet server")? {
let result = with_app_mut(|app| match request {
ServicePublisherRequest::PublishService { service_name, provider, .. } => {
app.node.register_service(service_name, provider)
}
});
if let Err(e) = result {
log::warn!("Error servicing request: {:?}", e)
}
}
Ok(())
}
async fn run_service_consumer_server(
mut stream: ServiceConsumerRequestStream,
) -> Result<(), Error> {
while let Some(request) = stream.try_next().await.context("error running overnet server")? {
let result = with_app_mut(|app| match request {
ServiceConsumerRequest::ListPeers { responder, .. } => {
fasync::spawn_local(run_list_peers(responder));
Ok(())
}
ServiceConsumerRequest::ConnectToService { node, service_name, chan, .. } => {
app.node.connect_to_service(node.id.into(), &service_name, chan)
}
});
if let Err(e) = result {
log::warn!("Error servicing request: {:?}", e);
}
}
Ok(())
}
async fn run_mesh_controller_server(mut stream: MeshControllerRequestStream) -> Result<(), Error> {
while let Some(request) = stream.try_next().await.context("error running overnet server")? {
let result = with_app_mut(|app| match request {
MeshControllerRequest::AttachSocketLink { socket, options, .. } => {
app.node.attach_socket_link(options.connection_label, socket)
}
});
if let Err(e) = result {
log::warn!("Error servicing request: {:?}", e);
}
}
Ok(())
}
async fn run_legacy_overnet_server(mut stream: OvernetRequestStream) -> Result<(), Error> {
while let Some(request) = stream.try_next().await.context("error running overnet server")? {
let result = with_app_mut(|app| match request {
OvernetRequest::PublishService { service_name, provider, .. } => {
app.node.register_service(service_name, provider)
}
OvernetRequest::ListPeers { responder, .. } => |
OvernetRequest::ConnectToService { node, service_name, chan, .. } => {
app.node.connect_to_service(node.id.into(), &service_name, chan)
}
});
if let Err(e) = result {
log::warn!("Error servicing request: {:?}", e);
}
}
Ok(())
}
enum IncomingService {
ServiceConsumer(ServiceConsumerRequestStream),
ServicePublisher(ServicePublisherRequestStream),
MeshController(MeshControllerRequestStream),
LegacyOvernet(OvernetRequestStream),
// ... more services here
}
#[fasync::run_singlethreaded]
async fn main() -> Result<(), Error> {
fuchsia_syslog::init_with_tags(&["overnet"]).context("initialize logging")?;
let mut fs = ServiceFs::new_local();
let mut svc_dir = fs.dir("svc");
svc_dir.add_fidl_service(IncomingService::ServiceConsumer);
svc_dir.add_fidl_service(IncomingService::ServicePublisher);
svc_dir.add_fidl_service(IncomingService::MeshController);
svc_dir.add_fidl_service(IncomingService::LegacyOvernet);
fs.take_and_serve_directory_handle()?;
with_app_mut(|app| -> Result<(), Error> {
app.udp_socket = Some(UdpSocketHolder::new(app.node.id())?);
fasync::spawn_local(mdns::subscribe());
fasync::spawn_local(read_udp());
Ok(())
})
.context("Initializing UDP & MDNS")?;
const MAX_CONCURRENT: usize = 10_000;
fs.for_each_concurrent(MAX_CONCURRENT, |svcreq| match svcreq {
IncomingService::MeshController(stream) => {
run_mesh_controller_server(stream).unwrap_or_else(|e| log::trace!("{:?}", e)).boxed()
}
IncomingService::ServicePublisher(stream) => {
run_service_publisher_server(stream).unwrap_or_else(|e| log::trace!("{:?}", e)).boxed()
}
IncomingService::ServiceConsumer(stream) => {
run_service_consumer_server(stream).unwrap_or_else(|e| log::trace!("{:?}", e)).boxed()
}
IncomingService::LegacyOvernet(stream) => {
run_legacy_overnet_server(stream).unwrap_or_else(|e| log::trace!("{:?}", e)).boxed()
}
})
.await;
Ok(())
}
| {
fasync::spawn_local(run_list_peers(responder));
Ok(())
} | conditional_block |
main.rs | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Overnet daemon for Fuchsia
#![deny(missing_docs)]
mod mdns;
use failure::{Error, ResultExt};
use fidl_fuchsia_overnet::{
MeshControllerRequest, MeshControllerRequestStream, OvernetListPeersResponder, OvernetRequest,
OvernetRequestStream, ServiceConsumerListPeersResponder, ServiceConsumerRequest,
ServiceConsumerRequestStream, ServicePublisherRequest, ServicePublisherRequestStream,
};
use fuchsia_async as fasync;
use fuchsia_component::server::ServiceFs;
use fuchsia_zircon as zx;
use futures::future::{abortable, AbortHandle};
use futures::prelude::*;
use overnet_core::{LinkId, Node, NodeId, NodeOptions, NodeRuntime, RouterTime, SendHandle};
use std::cell::RefCell;
use std::collections::HashMap;
use std::net::{SocketAddr, SocketAddrV6};
use std::ops::Deref;
use std::rc::Rc;
use zx::AsHandleRef;
/// Identifier for a link as defined by overnetstack.
#[derive(Clone, Copy, Debug)]
enum AppLinkId {
Udp(SocketAddrV6),
}
/// Adapter of fasync::Time to RouterTime for overnet's core library.
#[derive(PartialEq, PartialOrd, Eq, Ord, Clone, Copy, Debug)]
struct Time(fasync::Time);
impl RouterTime for Time {
type Duration = zx::Duration;
fn now() -> Self {
Time(fasync::Time::now())
}
fn after(time: Self, duration: zx::Duration) -> Self {
Self(time.0 + duration)
}
}
struct AppRuntime;
impl NodeRuntime for AppRuntime {
type Time = Time;
type LinkId = AppLinkId;
const IMPLEMENTATION: fidl_fuchsia_overnet_protocol::Implementation =
fidl_fuchsia_overnet_protocol::Implementation::OvernetStack;
fn handle_type(handle: &zx::Handle) -> Result<SendHandle, Error> {
match handle.basic_info()?.object_type {
zx::ObjectType::CHANNEL => Ok(SendHandle::Channel),
_ => failure::bail!("Handle type not proxyable {:?}", handle.basic_info()?.object_type),
}
}
fn spawn_local<F>(&mut self, future: F)
where
F: Future<Output = ()> + 'static,
{
fasync::spawn_local(future)
}
fn at(&mut self, t: Self::Time, f: impl FnOnce() + 'static) {
fasync::spawn_local(at(t.0, f))
}
fn router_link_id(&self, id: AppLinkId) -> LinkId<overnet_core::PhysLinkId<AppLinkId>> {
with_app_mut(|app| match id {
AppLinkId::Udp(addr) => {
app.udp_link_ids.get(&addr).copied().unwrap_or(LinkId::invalid())
}
})
}
fn send_on_link(&mut self, id: Self::LinkId, packet: &mut [u8]) -> Result<(), Error> {
match id {
AppLinkId::Udp(addr) => {
println!("UDP_SEND to:{} len:{}", addr, packet.len());
let sock = with_app_mut(|app| -> Result<_, Error> {
Ok(app
.udp_socket
.as_ref()
.ok_or_else(|| failure::format_err!("no udp socket"))?
.sock
.clone())
})?;
let sock = sock.deref().as_ref();
if let Err(e) = sock.send_to(packet, addr) {
if e.kind() == std::io::ErrorKind::BrokenPipe {
log::warn!("BrokenPipe on UDP socket: let's make a new one");
with_app_mut(|app| {
app.udp_socket.take();
app.udp_socket = Some(UdpSocketHolder::new(app.node_id)?);
Ok(())
})
} else {
Err(e.into())
}
} else {
Ok(())
}
}
}
}
}
struct UdpSocketHolder {
sock: Rc<fasync::net::UdpSocket>,
abort_publisher: AbortHandle,
}
impl UdpSocketHolder {
fn new(node_id: NodeId) -> Result<Self, Error> {
// Must not call with_app_mut here, as this is called from with_app_mut
let sock = std::net::UdpSocket::bind("[::]:0").context("Creating UDP socket")?;
let publisher =
mdns::publish(node_id, sock.local_addr().context("Getting UDP local address")?.port());
let sock = Rc::new(fasync::net::UdpSocket::from_socket(sock)?);
let (publisher, abort_publisher) = abortable(publisher);
fasync::spawn_local(async move {
let _ = publisher.await;
});
Ok(Self { sock, abort_publisher })
}
}
impl Drop for UdpSocketHolder {
fn drop(&mut self) {
self.abort_publisher.abort();
}
}
/// Global state for overnetstack.
struct App {
node_id: NodeId,
node: Node<AppRuntime>,
// TODO(ctiller): This state should be moved out into its own file.
/// Map socket addresses to udp link ids.
udp_link_ids: HashMap<SocketAddrV6, LinkId<overnet_core::PhysLinkId<AppLinkId>>>,
/// UDP socket to communicate over.
udp_socket: Option<UdpSocketHolder>,
}
thread_local! {
// Always access via with_app_mut
static APP: RefCell<App> = RefCell::new(App::new());
}
fn with_app_mut<R>(f: impl FnOnce(&mut App) -> R) -> R {
APP.with(|rcapp| f(&mut rcapp.borrow_mut()))
}
async fn at(when: fasync::Time, f: impl FnOnce()) {
fasync::Timer::new(when).await;
f();
}
impl App {
/// Create a new instance of App
fn new() -> App {
let node = Node::new(
AppRuntime,
NodeOptions::new()
.set_quic_server_key_file(Box::new("/pkg/data/cert.key".to_string()))
.set_quic_server_cert_file(Box::new("/pkg/data/cert.crt".to_string())),
)
.unwrap();
App { node_id: node.id(), node, udp_link_ids: HashMap::new(), udp_socket: None }
}
}
fn normalize_addr(addr: SocketAddr) -> SocketAddrV6 {
match addr {
SocketAddr::V6(a) => a,
SocketAddr::V4(a) => SocketAddrV6::new(a.ip().to_ipv6_mapped(), a.port(), 0, 0),
}
}
/// UDP read inner loop.
async fn read_udp_inner() -> Result<(), Error> {
let mut buf: [u8; 1500] = [0; 1500];
loop {
let sock = with_app_mut(|app| -> Result<_, Error> {
Ok(app
.udp_socket
.as_ref()
.ok_or_else(|| failure::format_err!("No udp socket to read from"))?
.sock
.clone())
})?;
let (length, sender) = sock.recv_from(&mut buf).await?;
println!("UDP_RECV from:{} len:{}", sender, length);
let sender = normalize_addr(sender);
with_app_mut(|app| -> Result<(), Error> {
if let Some(link_id) = app.udp_link_ids.get(&sender) {
app.node.queue_recv(*link_id, &mut buf[..length]);
} else {
log::warn!("No link for received packet {:?}", sender);
}
Ok(())
})?;
}
}
/// Read UDP socket until closed, logging errors.
async fn read_udp() {
if let Err(e) = read_udp_inner().await {
log::warn!("UDP read loop failed: {:?}", e);
}
}
/// Register a new UDP endpoint for some node_id.
fn register_udp(addr: SocketAddr, node_id: NodeId) -> Result<(), Error> {
with_app_mut(|app| {
app.node.mention_node(node_id);
let addr = normalize_addr(addr);
if app.udp_link_ids.get(&addr).is_none() {
let rtr_id = app.node.new_link(node_id, AppLinkId::Udp(addr))?;
println!("register peer: {} node_id={:?} rtr_id={:?}", addr, node_id, rtr_id);
app.udp_link_ids.insert(addr, rtr_id);
}
Ok(())
}) | trait ListPeersResponder {
fn respond(
self,
peers: &mut dyn ExactSizeIterator<Item = &mut fidl_fuchsia_overnet::Peer>,
) -> Result<(), fidl::Error>;
}
impl ListPeersResponder for ServiceConsumerListPeersResponder {
fn respond(
self,
peers: &mut dyn ExactSizeIterator<Item = &mut fidl_fuchsia_overnet::Peer>,
) -> Result<(), fidl::Error> {
self.send(peers)
}
}
impl ListPeersResponder for OvernetListPeersResponder {
fn respond(
self,
peers: &mut dyn ExactSizeIterator<Item = &mut fidl_fuchsia_overnet::Peer>,
) -> Result<(), fidl::Error> {
self.send(peers)
}
}
async fn run_list_peers_inner(responder: impl ListPeersResponder) -> Result<(), Error> {
let mut peers = with_app_mut(|app| app.node.clone().list_peers()).await?;
responder.respond(&mut peers.iter_mut())?;
Ok(())
}
async fn run_list_peers(responder: impl ListPeersResponder) {
if let Err(e) = run_list_peers_inner(responder).await {
log::warn!("List peers gets error: {:?}", e);
}
}
async fn run_service_publisher_server(
mut stream: ServicePublisherRequestStream,
) -> Result<(), Error> {
while let Some(request) = stream.try_next().await.context("error running overnet server")? {
let result = with_app_mut(|app| match request {
ServicePublisherRequest::PublishService { service_name, provider, .. } => {
app.node.register_service(service_name, provider)
}
});
if let Err(e) = result {
log::warn!("Error servicing request: {:?}", e)
}
}
Ok(())
}
async fn run_service_consumer_server(
mut stream: ServiceConsumerRequestStream,
) -> Result<(), Error> {
while let Some(request) = stream.try_next().await.context("error running overnet server")? {
let result = with_app_mut(|app| match request {
ServiceConsumerRequest::ListPeers { responder, .. } => {
fasync::spawn_local(run_list_peers(responder));
Ok(())
}
ServiceConsumerRequest::ConnectToService { node, service_name, chan, .. } => {
app.node.connect_to_service(node.id.into(), &service_name, chan)
}
});
if let Err(e) = result {
log::warn!("Error servicing request: {:?}", e);
}
}
Ok(())
}
async fn run_mesh_controller_server(mut stream: MeshControllerRequestStream) -> Result<(), Error> {
while let Some(request) = stream.try_next().await.context("error running overnet server")? {
let result = with_app_mut(|app| match request {
MeshControllerRequest::AttachSocketLink { socket, options, .. } => {
app.node.attach_socket_link(options.connection_label, socket)
}
});
if let Err(e) = result {
log::warn!("Error servicing request: {:?}", e);
}
}
Ok(())
}
async fn run_legacy_overnet_server(mut stream: OvernetRequestStream) -> Result<(), Error> {
while let Some(request) = stream.try_next().await.context("error running overnet server")? {
let result = with_app_mut(|app| match request {
OvernetRequest::PublishService { service_name, provider, .. } => {
app.node.register_service(service_name, provider)
}
OvernetRequest::ListPeers { responder, .. } => {
fasync::spawn_local(run_list_peers(responder));
Ok(())
}
OvernetRequest::ConnectToService { node, service_name, chan, .. } => {
app.node.connect_to_service(node.id.into(), &service_name, chan)
}
});
if let Err(e) = result {
log::warn!("Error servicing request: {:?}", e);
}
}
Ok(())
}
enum IncomingService {
ServiceConsumer(ServiceConsumerRequestStream),
ServicePublisher(ServicePublisherRequestStream),
MeshController(MeshControllerRequestStream),
LegacyOvernet(OvernetRequestStream),
// ... more services here
}
#[fasync::run_singlethreaded]
async fn main() -> Result<(), Error> {
fuchsia_syslog::init_with_tags(&["overnet"]).context("initialize logging")?;
let mut fs = ServiceFs::new_local();
let mut svc_dir = fs.dir("svc");
svc_dir.add_fidl_service(IncomingService::ServiceConsumer);
svc_dir.add_fidl_service(IncomingService::ServicePublisher);
svc_dir.add_fidl_service(IncomingService::MeshController);
svc_dir.add_fidl_service(IncomingService::LegacyOvernet);
fs.take_and_serve_directory_handle()?;
with_app_mut(|app| -> Result<(), Error> {
app.udp_socket = Some(UdpSocketHolder::new(app.node.id())?);
fasync::spawn_local(mdns::subscribe());
fasync::spawn_local(read_udp());
Ok(())
})
.context("Initializing UDP & MDNS")?;
const MAX_CONCURRENT: usize = 10_000;
fs.for_each_concurrent(MAX_CONCURRENT, |svcreq| match svcreq {
IncomingService::MeshController(stream) => {
run_mesh_controller_server(stream).unwrap_or_else(|e| log::trace!("{:?}", e)).boxed()
}
IncomingService::ServicePublisher(stream) => {
run_service_publisher_server(stream).unwrap_or_else(|e| log::trace!("{:?}", e)).boxed()
}
IncomingService::ServiceConsumer(stream) => {
run_service_consumer_server(stream).unwrap_or_else(|e| log::trace!("{:?}", e)).boxed()
}
IncomingService::LegacyOvernet(stream) => {
run_legacy_overnet_server(stream).unwrap_or_else(|e| log::trace!("{:?}", e)).boxed()
}
})
.await;
Ok(())
} | }
| random_line_split |
init.go | package model
import (
"github.com/gin-gonic/gin"
"github.com/terryli1643/apidemo/libs/datasource"
log "github.com/terryli1643/apidemo/libs/logger"
)
// swagger:model
type (
Device int
UserType int
AccountState int
MerchantState int
SettlementRecordState int
RecordType int
BankAccountState int
AmountFormat int
TradingMode int
FundchangeType int
FundSourceType int
RemarkType int
PayerState int
PayerType int
TerminalType int
Location int
OrderType int
Switch int
PayerProfitsState int
ChannelState int
OperateAccountType int
RateState int
ChannelType int
Channel int
RateChannel int
OrderState int
PaymentState int
AccountType int
OperateAccountOrderType int
FinancialSubjectType int
OperateAccountOrderState int
FillType int
FillState int
TransferType int
Online int
ParamType int
ParamState int
PushState int
BuildInType int
MatchState int
OperationType int
SourceType int
BuildInMerchant int
Domain int
FillchangeType int
FillTypeForMerchant int
IsAgent int
CompleteState int
TypeJpush int
IsFill int
ErrType int
QueryTimeType int
QRType int
IsPointType int
PointUporDown int
FixedCode int
AgentRecalculateProfitState int
OrderSettled int
YesOrNo int
PayerComplainState int
)
const (
_ AgentRecalculateProfitState = iota
AgentRecalculateProfitStateSettle //未发放
AgentRecalculateProfitStateAllSettled //全部已发放
)
const (
_ OrderSettled = iota
Unsettle
Settled
)
const (
RoleAnonymous = "ROLE_ANONYMOUS" // 陌生人
RoleAdmin = "ROLE_ADMIN" // 管理员
)
type IEnum interface {
Val() int
}
const (
IDSpaceUser = "user"
IDSpaceOrder = "order"
IDSpaceRecommendCode = "recommendcode"
IDSpaceReq = "req"
IDSPaceRole = "role"
)
const (
USD = "USD" //美元
EUR = "EUR" //欧元
JPY = "JPY" //日元
GBP = "GBP" //英镑
CHF = "CHF" //瑞士法郎
AUD = "AUD" //澳大利亚元
NZD = "NZD" //新西兰元
CAD = "CAD" //加拿大元
CNY = "CNY" //人民币
RUB = "RUB" //卢比
HKD = "HKD" //港币
IDR = "IDR" //印尼盾
KRW = "KRW" //韩国元
SAR = "SAR" //亚尔
THB = "THB" //泰铢
)
const (
_ Device = iota
PC //来源终端: 电脑 1
IOS //来源终端: 苹果手机 2
Android //来源终端: 安卓手机 3
)
const (
_ YesOrNo = iota
Yes
No
)
const (
_ OperationType = iota
OperationTypeQuery //查询
OperationTypeCreate //添加
OperationTypeUpdate //编辑
OperationTypeDelete //删除
OperationTypeUnknown //未知
)
const (
_ SourceType = iota
CLIENT //来源终端: 后台系统 1
APP //来源终端: APP 2
MerchantAPI //来源终端: 商户API 3
ExeAPI //来源终端: ExeAPI 4
)
const (
_ TypeJpush = iota
GoodsNotice //来源终端: 后台系统 1
ApprovalNotice //来源终端: APP 2
OfflineNotice //来源终端: 商户API 3
)
const (
_ IsFill = iota
IsFillNo //不是填报 1
IsFillYes //是填报 2
)
const (
_ FixedCode = iota
FixedCodeNo //不是固码 1
FixedCodeYes //固定二维码 2
)
const (
_ PointUporDown = iota
Up //
Down //
)
const (
_ IsPointType = iota
IsPointTypeNo //不是小数点形式 1
IsPointTypeYes //是小数点形式 2
)
const (
_ BuildInMerchant = iota
BuildInMerchantYes // 内置商户
BuildInMerchantNo // 真实商户
)
const (
_ AccountState = iota
AccountEnable // 正常 1
AccountDisable // 帐号冻结 2
)
const (
_ CompleteState = iota
Complete //绑定 1
UnComplete //未完成 2
CompleteStateDetete //删除 3
)
const (
_ Domain = iota
DomainHt // 后台 1
DomainAPI // api 2
DomainAPP // APP 3
)
const (
_ OperateAccountOrderType = iota
OperateAccountOrderTypeConverted // 内转 1
OperateAccountOrderTypeOutSide //填报 2
)
const (
_ TransferType = iota
TransferTypeIn // 转入 1
TransferTypeOut //转出 2
)
const (
_ Online = iota
POnline // 在线 1
POffline // 离线 2
PDropline // 掉线 3
)
const (
_ ParamType = iota
BizParam //业务参数
SysParam //系统参数
)
const (
_ ParamState = iota
ParamEnable
ParamDisable
)
const (
_ Location = iota
CHINA //中国 1
TAIWAN //台湾 2
XIANGGANG //香港 3
US //美国 4
VIETNAM //越南 5
THAILAND //泰国 6
KOREA //韩国 7
)
const (
_ BankAccountState = iota
BankAccountStateCreate //待审核 1
BankAccountStateEnable // 审核通过 2
BankAccountStateDisable // 审核不通过 3
)
const (
_ FinancialSubjectType = iota
FinancialSubjectTypeForeign // 外汇支出 1
FinancialSubjectTypeAmount // 费用支出 2
FinancialSubjectTypeFinancialInjection // 财务注入 3
FinancialSubjectTypeAdditionalIncome // 额外收入 4
)
const (
_ QueryTimeType = iota
Today // 今天 1
Yesterday // 昨天 2
LastSevenDays // 最近七天 3
LastOneMonth // 最近一月 4
)
const (
_ OperateAccountType = iota
OperateAccountTypeSh //商户卡 1
OperateAccountTypeZj //资金卡 2
OperateAccountTypeSf //收付卡 3
)
const (
_ UserType = iota
UserTypeMerchent //商户 1
UserTypePayer //付客 2
UserTypeOperateAcc //财务账户 3
UserTypeAdmin //管理员 4
UserTypeAgent //付客代理 5
)
const (
_ IsAgent = iota
IsAgentY //代理1
IsAgentN //非代理2
)
const (
_ RateState = iota
RateStateEnable //启用 1
RateStateDisable //禁用 2
)
const (
_ ChannelType = iota
ChannelTypeWeChat //微信
ChannelTypeAlipay //支付宝
ChannelTypeBank //银行
ChannelTypeJy //金燕E商
ChannelTypeYSF //云闪付
)
const (
AlipayH5 = 1 //支付宝H5
Alipay = 2 //支付宝
AlipayKs = 3 //支付宝快速
AlipayCard = 4 //支付宝卡收
UNIONPAY = 5 //云闪付
JY = 6 //金燕e商
WeiXin = 7 //微信
STUNIONPAY = 8 //st云闪付
CMBC = 1001 //中国民生银行
ICBC = 1002 //中国工商银行
BOC = 1003 //中国银行
BOCOM = 1004 //交通银行
PINGAN = 1005 //中国平安银行
CMB = 1006 //招商银行
ABC = 1007 //中国农业银行
CCB = 1008 //中国建设银行
PSBC = 1009 //中国邮政储蓄银行
CEBB = 1010 //中国光大银行
CIB = 1011 //兴业银行
SPDB = 1012 //浦发银行
CGB = 1013 //广发银行
CITIC = 1014 //中信银行
HXB = 1015 //华夏银行
BCCB = 1016 //北京银行
BOSC = 1017 //上海银行
GZCB = 1018 //广州银行
CZB = 1019 //网商银行
BANK = 2000 //银行
)
var channelNameMap = map[int]string{
AlipayH5: "支付宝H5",
Alipay: "支付宝",
AlipayKs: "支付宝快速",
AlipayCard: "支付宝转卡",
UNIONPAY: "云闪付",
JY: "金燕e商",
WeiXin: "微信",
STUNIONPAY: "ST云闪付",
CMBC: "民生银行",
ICBC: "工商银行",
BOC: "中国银行",
BOCOM: "交通银行",
PINGAN: "平安银行",
CMB: "招商银行",
ABC: "农业银行",
CCB: "建设银行",
PSBC: "邮政储蓄银行",
CEBB: "光大银行",
CIB: "兴业银行",
SPDB: "浦发银行",
CGB: "广发银行",
CITIC: "中信银行",
HXB: "华夏银行",
BCCB: "北京银行",
BOSC: "上海银行",
GZCB: "广州银行",
CZB: "网商银行",
BANK: "银行",
}
var channelValueMap = map[string]int{
"AlipayH5": AlipayH5,
"Alipay": Alipay,
"AlipayKs": AlipayKs,
"AlipayCard": AlipayCard,
"CMBC": CMBC,
"ICBC": ICBC,
"BOC": BOC,
"BOCOM": BOCOM,
"PINGAN": PINGAN,
"CMB": CMB,
"ABC": ABC,
"CCB": CCB,
"PSBC": PSBC,
"CEBB": CEBB,
"CIB": CIB,
"SPDB": SPDB,
"CGB": CGB,
"CITIC": CITIC,
"HXB": HXB,
"BCCB": BCCB,
"BOSC": BOSC,
"GZCB": GZCB,
"CZB": CZB,
"UNIONPAY": UNIONPAY,
"STUNIONPAY": STUNIONPAY,
"JY": JY,
"WECHAT": WeiXin,
"BANK": BANK,
}
var channelCodeMap = map[int]string{
AlipayH5: "AlipayH5",
Alipay: "Alipay",
AlipayKs: "AlipayKs",
AlipayCard: "AlipayCard",
CMBC: "CMBC",
ICBC: "ICBC",
BOC: "BOC",
BOCOM: "BOCOM",
PINGAN: "PINGAN",
CMB: "CMB",
ABC: "ABC",
CCB: "CCB",
PSBC: "PSBC",
CEBB: "CEBB",
CIB: "CIB",
SPDB: "SPDB",
CGB: "CGB",
CITIC: "CITIC",
HXB: "HXB",
BCCB: "BCCB",
BOSC: "BOSC",
GZCB: "GZCB",
CZB: "CZB",
UNIONPAY: "UNIONPAY",
STUNIONPAY: "STUNIONPAY",
JY: "JY",
WeiXin: "WECHAT",
BANK: "BANK",
}
func (channel Channel) ToChannelName() string {
ch := channelNameMap[int(channel)]
return ch
}
func (channel Channel) ToChannelCode() string {
ch := channelCodeMap[int(channel)]
return ch
}
func ToChannel(channelCode string) Channel {
ch := channelValueMap[channelCode]
return Channel(ch)
}
func (rateChannel RateChannel) ToChannelName() string {
ch := channelNameMap[int(rateChannel)]
return ch
}
func PkgToBankCode(bankname string) string {
channelMap := map[string]string{}
channelMap["com.eg.android.AlipayGphone"] = "Alipay"
channelMap["com.tencent.mm"] = "WeiXin"
channelMap["com.chinamworld.bocmbci"] = "BOC"
channelMap["cmb.pb"] = "CMB"
channelMap["com.mybank.android.phone"] = "CZB"
channelMap["com.chinamworld.main"] = "CCB"
channelMap["com.icbc"] = "ICBC"
channelMap["com.android.bankabc"] = "ABC"
channelMap["com.bankcomm.Bankcomm"] = "BOCOM"
channelMap["com.unionpay"] = "UNIONPAY"
channelMap["com.hnnx.sh.mbank"] = "JY"
channelMap["com.yitong.mbank.psbc"] = "PSBC"
channelMap["cn.com.spdb.mobilebank.per"] = "SPDB"
ch := channelMap[bankname]
return ch
}
func (ch | hannel == AlipayH5 {
return ChannelTypeAlipay
}
if channel == Alipay {
return ChannelTypeAlipay
}
if channel == AlipayKs {
return ChannelTypeAlipay
}
if channel == JY {
return ChannelTypeJy
}
if channel == AlipayCard {
return ChannelTypeBank
}
if channel == UNIONPAY {
return ChannelTypeYSF
}
if channel == STUNIONPAY {
return ChannelTypeYSF
}
if channel == WeiXin {
return ChannelTypeWeChat
}
return ChannelTypeBank
}
func (channel Channel) ToRateChannel() RateChannel {
if channel > 1000 {
return BANK
}
return RateChannel(channel)
}
func (channel RateChannel) ToChannelType() ChannelType {
if channel == AlipayH5 {
return ChannelTypeAlipay
}
if channel == WeiXin {
return ChannelTypeWeChat
}
if channel == Alipay {
return ChannelTypeAlipay
}
if channel == AlipayKs {
return ChannelTypeAlipay
}
if channel == AlipayCard {
return ChannelTypeBank
}
if channel == JY {
return ChannelTypeJy
}
if channel == UNIONPAY {
return ChannelTypeYSF
}
if channel == STUNIONPAY {
return ChannelTypeYSF
}
if channel == WeiXin {
return ChannelTypeWeChat
}
return ChannelTypeBank
}
const (
_ PayerState = iota
PayerStateEnable //启用 1
PayerStateDisable //禁用 2
)
const (
_ ChannelState = iota
ChannelStateEnable //启用 1
ChannelStateDisable //禁用 2
)
const (
_ PayerProfitsState = iota
PayerProfitsStateEnable //启用 1
PayerProfitsStateDisable //禁用 2
)
const (
_ PayerType = iota
PayerTypeMember //会员 1
PayerTypeVIP //VIP 2
)
const (
_ TerminalType = iota
TerminalTypeRrf //rrfApp 1
TerminalTypeDg //代购app 2
)
const (
_ OperateAccountOrderState = iota
OperateAccountOrderStateCreate //新建
OperateAccountOrderStateSuccess //审核通过 1
OperateAccountOrderStateFail //审核不通过 2
)
const (
_ SettlementRecordState = iota
SettlementRecordStatePending //未处理 1
SettlementRecordStateProcessing //处理中 2
SettlementRecordStatePreCompleted //预完成 3
SettlementRecordStatePreClose //预关闭 4
SettlementRecordStateComplete //已完成 5
SettlementRecordStateClose //已关闭 6
)
const (
_ RecordType = iota
RecordTypeInjection //注资 1
RecordTypeSettlement //结算 2
)
const (
_ TradingMode = iota
TradingModeByHand //手动 1
TradingModeByAPI //API 2
)
const (
_ FillchangeType = iota
FillIncrease //增加余额
FillDecrease //减少余额
FillFrezeIncrease //增加冻结
FillFrezeDecrease //减少冻结
)
const (
_ FundchangeType = iota
BalanceIncrease //账户余额增加 1
BalanceDecrease //账户余额减少 2
FrozenIncrease //账户冻结余额增加 3
FrozenDecrease //账户冻结余额减少 4
Frozen //冻结 5
UnFrozen //解冻 6
)
const (
_ FundSourceType = iota
FundSourceTypeDeposit //订单充值 1 (已废弃)
FundSourceTypeWithdraw //订单提现 2 (已废弃)
FundSourceTypeExeRecord //异常记录 3 (已废弃)
FundSourceTypeSettlement //结算/注资 4 (已废弃)
FundSourceTypeFill //付客填报 5 (已废弃)
FundSourceTypeManual //人工填报 6 (已废弃)
FundSourceTypeAgent //代理收益 7 (已废弃)
FundSourceTypeDepositMBI //商户充值 8
FundSourceTypeDepositFeeMBD //商户充值手续费 9
FundSourceTypeMerchantAgentDepositProfitMBI //商户代理充值收益 10
FundSourceTypePayerAssignedOrderPFR //付客接单冻结 11
FundSourceTypePayerAssignedOrderPUF //付客接单解冻 12
FundSourceTypePayerReceivedMoneySuccessPBD //付客收款成功 13
FundSourceTypeDepositPayerProfitPBI //付客接单收益 14
FundSourceTypeDepositPayerAgentProfitPBI //付客代理收益 15
FundSourceTypeMatchExeptionFailedPFR //流水未匹配 16
FundSourceTypeManualExeptionHandlePUF //人工异常处理解冻 17
FundSourceTypeOperateAccountOBD //扣减内部财务账户金额 18
FundSourceTypeOperateAccountOBI //增加内部财务账户金额 19
FundSourceTypeCreateMerchantWithdrawMFR //商户提现 20
FundSourceTypeCreateMerchantWithdrawMUF //商户提现冻结 21
FundSourceTypeCreateMerchantWithdrawSuccessMBD //商户提现扣款 22
FundSourceTypeCreateMerchantWithdrawFeeMBD //商户提现手续费 23
FundSourceTypeMerchantAgentWithdrawProfitMBI //商户提现代理收益 24
FundSourceTypePayerSeckillingOrderPBI //付客秒宝贝 25
FundSourceTypePayerSeckillingOrderProfitPBI //付客秒宝贝收益 26
FundSourceTypePayerSeckillingOrderAgentProfitPBI //付客代理秒宝贝收益 27
FundSourceTypeCreateMerchantSettlementMFR //商户结算冻结 28
FundSourceTypeCreateMerchantSettlementMUF //商户结算解冻 29
FundSourceTypeCreateMerchantSettlementSuccessMBD //商户结算扣款 30
FundSourceTypeCreateMerchantSettlementFeeMFR //商户结算手续费冻结 31
FundSourceTypeCreateMerchantSettlementFeeMUF //商户结算手续费解冻 32
FundSourceTypeCreateMerchantSettlementFeeMBD //商户结算手续费 33
FundSourceTypeCreateMerchantInjectionMBI //商户注资加款 34
FundSourceTypeManualAdjustMerchantBanlanceMBI //人工商户余额账户增加 35
FundSourceTypeManualAdjustMerchantBanlanceMBD //人工商户余额账户减少 36
FundSourceTypeManualAdjustMerchantFreezeMFI //人工商户冻结账户增加 37
FundSourceTypeManualAdjustMerchantFreezeMFD //人工商户冻结账户减少 38
FundSourceTypeManualAdjustPayerBanlancePBI //付客填报调整增加 39
FundSourceTypeManualAdjustPayerBanlancePBD //付客填报调整减少 40
FundSourceTypeManualAdjustPayerFreezePFI //付客填报调整冻结增加 41
FundSourceTypeManualAdjustPayerFreezePFD //付客填报调整冻结减少 42
FundSourceTypeManualActivityPayerBanlancePBI //付客填报活动增加 43
FundSourceTypeManualActivityPayerBanlancePBD //付客填报活动减少 44
FundSourceTypeManualActivityPayerBanlancePFI //付客填报活动冻结增加 45
FundSourceTypeManualActivityPayerBanlancePFD //付客填报活动冻结减少 46
FundSourceTypeTransferOutPayerBanlancePBD //付客转出 47
FundSourceTypeTransferInPayerBanlancePBI //付客转入 48
FundSourceTypePayerDepositPBI //付客充值 49
FundSourceTypeCreatePayerWithdrawPFR //付客提现冻结 50
FundSourceTypePayerWithdrawPUF //付客提现解冻 51
FundSourceTypePayerWithdrawSuccessPBD //付客提现扣款 52
FundSourceTypePayerWithdrawFeePBD //付客提现手续费 53
FundSourceTypeOperateAccountInOBI //财务账号转入 54
FundSourceTypeOperateAccountOutOFR //财务账号转出冻结 55
FundSourceTypeOperateAccountOutOUF //财务账号转出解冻 56
FundSourceTypeOperateAccountOutOBD //财务账号转出 57
)
const (
_ RemarkType = iota
RemarkTypePayer //付客 1
RemarkTypeOrder //订单 2
RemarkTypeOperateAcc //运营账户 3
RemarkTypeFill //填报备注 4
RemarkTypeExceptionRec //异常记录 5
RemarkTypeFillImg //填报备注 6
)
const (
_ OrderType = iota
OrderTypeDeposit //充值 收入 1
OrderTypeWithdraw //提现 支出 2
)
const (
_ OrderState = iota
OrderStatesFreeze //冻结 1
OrderStatesNew //未处理 2
OrderStatesProcessing //处理中3
OrderStatesPreComplete //预完成4
OrderStatesPreClose //预关闭5
OrderStatesCompleted //完成6
OrderStatesPartialCompleted //部分完成7
OrderStatesClose //关闭8
OrderStatesCloseByPayer //付客关闭9
)
const (
_ PaymentState = iota
PaymentStatesNew //初始状态 , 冻结三方帐号资金
PaymentStatesSettleFailed //结算失败
PaymentStatesSettled //已结算, 三方扣款
PaymentStatesAbandoned //丢弃的, 解冻三方帐号资金
)
const (
_ AccountType = iota
TypeThirdparty //三方
TypeBank //银行卡
TypeMerchant //商户
TypeQrCode //二维码
)
const (
_ Switch = iota
On //开
Off //关
)
const (
_ FillType = iota
Activity //活动
Adjust //调整
Transfer //转账
)
const (
_ FillState = iota
FillStatePending //未处理 1
FillStateProcessing //处理中 2
FillStateComplete //已完成 3
FillStateClose //已关闭 4
)
const (
_ FillTypeForMerchant = iota
FillPayment //付款
FillGathering //收款
)
const (
_ PushState = iota
Pushing //推送中 1
PushSuccess //推送成功2
PushFailed //推送失败3
)
const (
_ BuildInType = iota
BuildIn //内置类型 1
Real //真实类型 2
)
const (
_ MatchState = iota
MatchedNew //新建 1
AutoMatched //自动匹配 2
NotMatch //未匹配 3
MatchedClosed //已关闭 4
ManualMatched //人工匹配 5
ExcepitonMatch //异常 6
SmsMatch //需要短信匹配 7
)
const (
_ ErrType = iota
MerchantHuabei //商户版花呗
WrongProofNo //错误版流水号
OutofMemory //内存不足
)
const (
Unlimited AmountFormat = iota // 无限制
Integer // 整数
TwoDicemal // 两位小数
FixedAmount // 固定金额
)
const (
_ PayerComplainState = iota
PayerComplainStateInit //未处理
PayerComplainStateHandled //已处理
PayerComplainStateClosed //已关闭
)
const (
QRRawType = "raw" //原生二维码
QRPlatformType = "platform" //新二维码
)
const (
RequestResolveFailed gin.ErrorType = 401 //请求解析失败
MerchantResolveFailed gin.ErrorType = 402 //商户解析失败
SginVerifyFailed gin.ErrorType = 403 //请求校验失败
IPVerifyFailed gin.ErrorType = 404 //IP校验失败
QueryChannelListFailed gin.ErrorType = 9000 //查询通道失败
QueryOrderFailed gin.ErrorType = 9001 //查询订单失败
WithdrawVerifyFailed gin.ErrorType = 9002 //提现请求失败
ThirdPartyFailed gin.ErrorType = 10001 //三方支付请求失败
ChannelIsNotSupported gin.ErrorType = 10002 //请求的通道不被支持
OrderVerifyFailed gin.ErrorType = 10003 //订单验证失败
MerchantInsufficientBalance gin.ErrorType = 10003 //商户余额不足
OrderFailedApprove gin.ErrorType = 10004 //提现订单被拒
OrderCreateFailed gin.ErrorType = 10005 //创建订单失败
OrderRefuse gin.ErrorType = 10006 //拒绝支付
)
func InitialModels() {
log.Info("Register models")
t := []interface{}{
new(Admin),
}
datasource.RegisterModels(t...)
}
| annel Channel) ToChannelType() ChannelType {
if c | identifier_body |
init.go | package model
import (
"github.com/gin-gonic/gin"
"github.com/terryli1643/apidemo/libs/datasource"
log "github.com/terryli1643/apidemo/libs/logger"
)
// swagger:model
type (
Device int
UserType int
AccountState int
MerchantState int
SettlementRecordState int
RecordType int
BankAccountState int
AmountFormat int
TradingMode int
FundchangeType int
FundSourceType int
RemarkType int
PayerState int
PayerType int
TerminalType int
Location int
OrderType int
Switch int
PayerProfitsState int
ChannelState int
OperateAccountType int
RateState int
ChannelType int
Channel int
RateChannel int
OrderState int
PaymentState int
AccountType int
OperateAccountOrderType int
FinancialSubjectType int
OperateAccountOrderState int
FillType int
FillState int
TransferType int
Online int
ParamType int
ParamState int
PushState int
BuildInType int
MatchState int
OperationType int
SourceType int
BuildInMerchant int
Domain int
FillchangeType int
FillTypeForMerchant int
IsAgent int
CompleteState int
TypeJpush int
IsFill int
ErrType int
QueryTimeType int
QRType int
IsPointType int
PointUporDown int
FixedCode int
AgentRecalculateProfitState int
OrderSettled int
YesOrNo int
PayerComplainState int
)
const (
_ AgentRecalculateProfitState = iota
AgentRecalculateProfitStateSettle //未发放
AgentRecalculateProfitStateAllSettled //全部已发放
)
const (
_ OrderSettled = iota
Unsettle
Settled
)
const (
RoleAnonymous = "ROLE_ANONYMOUS" // 陌生人
RoleAdmin = "ROLE_ADMIN" // 管理员
)
type IEnum interface {
Val() int
}
const (
IDSpaceUser = "user"
IDSpaceOrder = "order"
IDSpaceRecommendCode = "recommendcode"
IDSpaceReq = "req"
IDSPaceRole = "role"
)
const (
USD = "USD" //美元
EUR = "EUR" //欧元
JPY = "JPY" //日元
GBP = "GBP" //英镑
CHF = "CHF" //瑞士法郎
AUD = "AUD" //澳大利亚元
NZD = "NZD" //新西兰元
CAD = "CAD" //加拿大元
CNY = "CNY" //人民币
RUB = "RUB" //卢比
HKD = "HKD" //港币
IDR = "IDR" //印尼盾
KRW = "KRW" //韩国元
SAR = "SAR" //亚尔
THB = "THB" //泰铢
)
const (
_ Device = iota
PC //来源终端: 电脑 1
IOS //来源终端: 苹果手机 2
Android //来源终端: 安卓手机 3
)
const (
_ YesOrNo = iota
Yes
No
)
const (
_ OperationType = iota
OperationTypeQuery //查询
OperationTypeCreate //添加
OperationTypeUpdate //编辑
OperationTypeDelete //删除
OperationTypeUnknown //未知
)
const (
_ SourceType = iota
CLIENT //来源终端: 后台系统 1
APP //来源终端: APP 2
MerchantAPI //来源终端: 商户API 3
ExeAPI //来源终端: ExeAPI 4
)
const (
_ TypeJpush = iota
GoodsNotice //来源终端: 后台系统 1
ApprovalNotice //来源终端: APP 2
OfflineNotice //来源终端: 商户API 3
)
const (
_ IsFill = iota
IsFillNo //不是填报 1
IsFillYes //是填报 2
)
const (
_ FixedCode = iota
FixedCodeNo //不是固码 1
FixedCodeYes //固定二维码 2
)
const (
_ PointUporDown = iota
Up //
Down //
)
const (
_ IsPointType = iota
IsPointTypeNo //不是小数点形式 1
IsPointTypeYes //是小数点形式 2
)
const (
_ BuildInMerchant = iota
BuildInMerchantYes // 内置商户
BuildInMerchantNo // 真实商户
)
const (
_ AccountState = iota
AccountEnable // 正常 1
AccountDisable // 帐号冻结 2
)
const (
_ CompleteState = iota
Complete //绑定 1
UnComplete //未完成 2
CompleteStateDetete //删除 3
)
const (
_ Domain = iota
DomainHt // 后台 1
DomainAPI // api 2
DomainAPP // APP 3
)
const (
_ OperateAccountOrderType = iota
OperateAccountOrderTypeConverted // 内转 1
OperateAccountOrderTypeOutSide //填报 2
)
const (
_ TransferType = iota
TransferTypeIn // 转入 1
TransferTypeOut //转出 2
)
const (
_ Online = iota
POnline // 在线 1
POffline // 离线 2
PDropline // 掉线 3
)
const (
_ ParamType = iota
BizParam //业务参数
SysParam //系统参数
)
const (
_ ParamState = iota
ParamEnable
ParamDisable
)
const (
_ Location = iota
CHINA //中国 1
TAIWAN //台湾 2
XIANGGANG //香港 3
US //美国 4
VIETNAM //越南 5
THAILAND //泰国 6
KOREA //韩国 7
)
const (
_ BankAccountState = iota
BankAccountStateCreate //待审核 1
BankAccountStateEnable // 审核通过 2
BankAccountStateDisable // 审核不通过 3
)
const (
_ FinancialSubjectType = iota
FinancialSubjectTypeForeign // 外汇支出 1
FinancialSubjectTypeAmount // 费用支出 2
FinancialSubjectTypeFinancialInjection // 财务注入 3
FinancialSubjectTypeAdditionalIncome // 额外收入 4
)
const (
_ QueryTimeType = iota
Today // 今天 1
Yesterday // 昨天 2
LastSevenDays // 最近七天 3
LastOneMonth // 最近一月 4
)
const (
_ OperateAccountType = iota
OperateAccountTypeSh //商户卡 1
OperateAccountTypeZj //资金卡 2
OperateAccountTypeSf //收付卡 3
)
const (
_ UserType = iota
UserTypeMerchent //商户 1
UserTypePayer //付客 2
UserTypeOperateAcc //财务账户 3
UserTypeAdmin //管理员 4
UserTypeAgent //付客代理 5
)
const (
_ IsAgent = iota
IsAgentY //代理1
IsAgentN //非代理2
)
const (
_ RateState = iota
RateStateEnable //启用 1
RateStateDisable //禁用 2
)
const (
_ ChannelType = iota
ChannelTypeWeChat //微信
ChannelTypeAlipay //支付宝
ChannelTypeBank //银行
ChannelTypeJy //金燕E商
ChannelTypeYSF //云闪付
)
const (
AlipayH5 = 1 //支付宝H5
Alipay = 2 //支付宝
AlipayKs = 3 //支付宝快速
AlipayCard = 4 //支付宝卡收
UNIONPAY = 5 //云闪付
JY = 6 //金燕e商
WeiXin = 7 //微信
STUNIONPAY = 8 //st云闪付
CMBC = 1001 //中国民生银行
ICBC = 1002 //中国工商银行
BOC = 1003 //中国银行
BOCOM = 1004 //交通银行
PINGAN = 1005 //中国平安银行
CMB = 1006 //招商银行
ABC = 1007 //中国农业银行
CCB = 1008 //中国建设银行
PSBC = 1009 //中国邮政储蓄银行
CEBB = 1010 //中国光大银行
CIB = 1011 //兴业银行
SPDB = 1012 //浦发银行
CGB = 1013 //广发银行
CITIC = 1014 //中信银行
HXB = 1015 //华夏银行
BCCB = 1016 //北京银行
BOSC = 1017 //上海银行
GZCB = 1018 //广州银行
CZB = 1019 //网商银行
BANK = 2000 //银行
)
var channelNameMap = map[int]string{
AlipayH5: "支付宝H5",
Alipay: "支付宝",
AlipayKs: "支付宝快速",
AlipayCard: "支付宝转卡",
UNIONPAY: "云闪付",
JY: "金燕e商",
WeiXin: "微信",
STUNIONPAY: "ST云闪付",
CMBC: "民生银行",
ICBC: "工商银行",
BOC: "中国银行",
BOCOM: "交通银行",
PINGAN: "平安银行",
CMB: "招商银行",
ABC: "农业银行",
CCB: "建设银行",
PSBC: "邮政储蓄银行",
CEBB: "光大银行",
CIB: "兴业银行",
SPDB: "浦发银行",
CGB: "广发银行",
CITIC: "中信银行",
HXB: "华夏银行",
BCCB: "北京银行",
BOSC: "上海银行",
GZCB: "广州银行",
CZB: "网商银行",
BANK: "银行",
}
var channelValueMap = map[string]int{
"AlipayH5": AlipayH5,
"Alipay": Alipay,
"AlipayKs": AlipayKs,
"AlipayCard": AlipayCard,
"CMBC": CMBC,
"ICBC": ICBC,
"BOC": BOC,
"BOCOM": BOCOM,
"PINGAN": PINGAN,
"CMB": CMB,
"ABC": ABC,
"CCB": CCB,
"PSBC": PSBC,
"CEBB": CEBB,
"CIB": CIB,
"SPDB": SPDB,
"CGB": CGB,
"CITIC": CITIC,
"HXB": HXB,
"BCCB": BCCB,
"BOSC": BOSC,
"GZCB": GZCB,
"CZB": CZB,
"UNIONPAY": UNIONPAY,
"STUNIONPAY": STUNIONPAY,
"JY": JY,
"WECHAT": WeiXin,
"BANK": BANK,
}
var channelCodeMap = map[int]string{
AlipayH5: "AlipayH5",
Alipay: "Alipay",
AlipayKs: "AlipayKs",
AlipayCard: "AlipayCard",
CMBC: "CMBC", | PINGAN: "PINGAN",
CMB: "CMB",
ABC: "ABC",
CCB: "CCB",
PSBC: "PSBC",
CEBB: "CEBB",
CIB: "CIB",
SPDB: "SPDB",
CGB: "CGB",
CITIC: "CITIC",
HXB: "HXB",
BCCB: "BCCB",
BOSC: "BOSC",
GZCB: "GZCB",
CZB: "CZB",
UNIONPAY: "UNIONPAY",
STUNIONPAY: "STUNIONPAY",
JY: "JY",
WeiXin: "WECHAT",
BANK: "BANK",
}
func (channel Channel) ToChannelName() string {
ch := channelNameMap[int(channel)]
return ch
}
func (channel Channel) ToChannelCode() string {
ch := channelCodeMap[int(channel)]
return ch
}
func ToChannel(channelCode string) Channel {
ch := channelValueMap[channelCode]
return Channel(ch)
}
func (rateChannel RateChannel) ToChannelName() string {
ch := channelNameMap[int(rateChannel)]
return ch
}
func PkgToBankCode(bankname string) string {
channelMap := map[string]string{}
channelMap["com.eg.android.AlipayGphone"] = "Alipay"
channelMap["com.tencent.mm"] = "WeiXin"
channelMap["com.chinamworld.bocmbci"] = "BOC"
channelMap["cmb.pb"] = "CMB"
channelMap["com.mybank.android.phone"] = "CZB"
channelMap["com.chinamworld.main"] = "CCB"
channelMap["com.icbc"] = "ICBC"
channelMap["com.android.bankabc"] = "ABC"
channelMap["com.bankcomm.Bankcomm"] = "BOCOM"
channelMap["com.unionpay"] = "UNIONPAY"
channelMap["com.hnnx.sh.mbank"] = "JY"
channelMap["com.yitong.mbank.psbc"] = "PSBC"
channelMap["cn.com.spdb.mobilebank.per"] = "SPDB"
ch := channelMap[bankname]
return ch
}
func (channel Channel) ToChannelType() ChannelType {
if channel == AlipayH5 {
return ChannelTypeAlipay
}
if channel == Alipay {
return ChannelTypeAlipay
}
if channel == AlipayKs {
return ChannelTypeAlipay
}
if channel == JY {
return ChannelTypeJy
}
if channel == AlipayCard {
return ChannelTypeBank
}
if channel == UNIONPAY {
return ChannelTypeYSF
}
if channel == STUNIONPAY {
return ChannelTypeYSF
}
if channel == WeiXin {
return ChannelTypeWeChat
}
return ChannelTypeBank
}
func (channel Channel) ToRateChannel() RateChannel {
if channel > 1000 {
return BANK
}
return RateChannel(channel)
}
func (channel RateChannel) ToChannelType() ChannelType {
if channel == AlipayH5 {
return ChannelTypeAlipay
}
if channel == WeiXin {
return ChannelTypeWeChat
}
if channel == Alipay {
return ChannelTypeAlipay
}
if channel == AlipayKs {
return ChannelTypeAlipay
}
if channel == AlipayCard {
return ChannelTypeBank
}
if channel == JY {
return ChannelTypeJy
}
if channel == UNIONPAY {
return ChannelTypeYSF
}
if channel == STUNIONPAY {
return ChannelTypeYSF
}
if channel == WeiXin {
return ChannelTypeWeChat
}
return ChannelTypeBank
}
const (
_ PayerState = iota
PayerStateEnable //启用 1
PayerStateDisable //禁用 2
)
const (
_ ChannelState = iota
ChannelStateEnable //启用 1
ChannelStateDisable //禁用 2
)
const (
_ PayerProfitsState = iota
PayerProfitsStateEnable //启用 1
PayerProfitsStateDisable //禁用 2
)
const (
_ PayerType = iota
PayerTypeMember //会员 1
PayerTypeVIP //VIP 2
)
const (
_ TerminalType = iota
TerminalTypeRrf //rrfApp 1
TerminalTypeDg //代购app 2
)
const (
_ OperateAccountOrderState = iota
OperateAccountOrderStateCreate //新建
OperateAccountOrderStateSuccess //审核通过 1
OperateAccountOrderStateFail //审核不通过 2
)
const (
_ SettlementRecordState = iota
SettlementRecordStatePending //未处理 1
SettlementRecordStateProcessing //处理中 2
SettlementRecordStatePreCompleted //预完成 3
SettlementRecordStatePreClose //预关闭 4
SettlementRecordStateComplete //已完成 5
SettlementRecordStateClose //已关闭 6
)
const (
_ RecordType = iota
RecordTypeInjection //注资 1
RecordTypeSettlement //结算 2
)
const (
_ TradingMode = iota
TradingModeByHand //手动 1
TradingModeByAPI //API 2
)
const (
_ FillchangeType = iota
FillIncrease //增加余额
FillDecrease //减少余额
FillFrezeIncrease //增加冻结
FillFrezeDecrease //减少冻结
)
const (
_ FundchangeType = iota
BalanceIncrease //账户余额增加 1
BalanceDecrease //账户余额减少 2
FrozenIncrease //账户冻结余额增加 3
FrozenDecrease //账户冻结余额减少 4
Frozen //冻结 5
UnFrozen //解冻 6
)
const (
_ FundSourceType = iota
FundSourceTypeDeposit //订单充值 1 (已废弃)
FundSourceTypeWithdraw //订单提现 2 (已废弃)
FundSourceTypeExeRecord //异常记录 3 (已废弃)
FundSourceTypeSettlement //结算/注资 4 (已废弃)
FundSourceTypeFill //付客填报 5 (已废弃)
FundSourceTypeManual //人工填报 6 (已废弃)
FundSourceTypeAgent //代理收益 7 (已废弃)
FundSourceTypeDepositMBI //商户充值 8
FundSourceTypeDepositFeeMBD //商户充值手续费 9
FundSourceTypeMerchantAgentDepositProfitMBI //商户代理充值收益 10
FundSourceTypePayerAssignedOrderPFR //付客接单冻结 11
FundSourceTypePayerAssignedOrderPUF //付客接单解冻 12
FundSourceTypePayerReceivedMoneySuccessPBD //付客收款成功 13
FundSourceTypeDepositPayerProfitPBI //付客接单收益 14
FundSourceTypeDepositPayerAgentProfitPBI //付客代理收益 15
FundSourceTypeMatchExeptionFailedPFR //流水未匹配 16
FundSourceTypeManualExeptionHandlePUF //人工异常处理解冻 17
FundSourceTypeOperateAccountOBD //扣减内部财务账户金额 18
FundSourceTypeOperateAccountOBI //增加内部财务账户金额 19
FundSourceTypeCreateMerchantWithdrawMFR //商户提现 20
FundSourceTypeCreateMerchantWithdrawMUF //商户提现冻结 21
FundSourceTypeCreateMerchantWithdrawSuccessMBD //商户提现扣款 22
FundSourceTypeCreateMerchantWithdrawFeeMBD //商户提现手续费 23
FundSourceTypeMerchantAgentWithdrawProfitMBI //商户提现代理收益 24
FundSourceTypePayerSeckillingOrderPBI //付客秒宝贝 25
FundSourceTypePayerSeckillingOrderProfitPBI //付客秒宝贝收益 26
FundSourceTypePayerSeckillingOrderAgentProfitPBI //付客代理秒宝贝收益 27
FundSourceTypeCreateMerchantSettlementMFR //商户结算冻结 28
FundSourceTypeCreateMerchantSettlementMUF //商户结算解冻 29
FundSourceTypeCreateMerchantSettlementSuccessMBD //商户结算扣款 30
FundSourceTypeCreateMerchantSettlementFeeMFR //商户结算手续费冻结 31
FundSourceTypeCreateMerchantSettlementFeeMUF //商户结算手续费解冻 32
FundSourceTypeCreateMerchantSettlementFeeMBD //商户结算手续费 33
FundSourceTypeCreateMerchantInjectionMBI //商户注资加款 34
FundSourceTypeManualAdjustMerchantBanlanceMBI //人工商户余额账户增加 35
FundSourceTypeManualAdjustMerchantBanlanceMBD //人工商户余额账户减少 36
FundSourceTypeManualAdjustMerchantFreezeMFI //人工商户冻结账户增加 37
FundSourceTypeManualAdjustMerchantFreezeMFD //人工商户冻结账户减少 38
FundSourceTypeManualAdjustPayerBanlancePBI //付客填报调整增加 39
FundSourceTypeManualAdjustPayerBanlancePBD //付客填报调整减少 40
FundSourceTypeManualAdjustPayerFreezePFI //付客填报调整冻结增加 41
FundSourceTypeManualAdjustPayerFreezePFD //付客填报调整冻结减少 42
FundSourceTypeManualActivityPayerBanlancePBI //付客填报活动增加 43
FundSourceTypeManualActivityPayerBanlancePBD //付客填报活动减少 44
FundSourceTypeManualActivityPayerBanlancePFI //付客填报活动冻结增加 45
FundSourceTypeManualActivityPayerBanlancePFD //付客填报活动冻结减少 46
FundSourceTypeTransferOutPayerBanlancePBD //付客转出 47
FundSourceTypeTransferInPayerBanlancePBI //付客转入 48
FundSourceTypePayerDepositPBI //付客充值 49
FundSourceTypeCreatePayerWithdrawPFR //付客提现冻结 50
FundSourceTypePayerWithdrawPUF //付客提现解冻 51
FundSourceTypePayerWithdrawSuccessPBD //付客提现扣款 52
FundSourceTypePayerWithdrawFeePBD //付客提现手续费 53
FundSourceTypeOperateAccountInOBI //财务账号转入 54
FundSourceTypeOperateAccountOutOFR //财务账号转出冻结 55
FundSourceTypeOperateAccountOutOUF //财务账号转出解冻 56
FundSourceTypeOperateAccountOutOBD //财务账号转出 57
)
const (
_ RemarkType = iota
RemarkTypePayer //付客 1
RemarkTypeOrder //订单 2
RemarkTypeOperateAcc //运营账户 3
RemarkTypeFill //填报备注 4
RemarkTypeExceptionRec //异常记录 5
RemarkTypeFillImg //填报备注 6
)
const (
_ OrderType = iota
OrderTypeDeposit //充值 收入 1
OrderTypeWithdraw //提现 支出 2
)
const (
_ OrderState = iota
OrderStatesFreeze //冻结 1
OrderStatesNew //未处理 2
OrderStatesProcessing //处理中3
OrderStatesPreComplete //预完成4
OrderStatesPreClose //预关闭5
OrderStatesCompleted //完成6
OrderStatesPartialCompleted //部分完成7
OrderStatesClose //关闭8
OrderStatesCloseByPayer //付客关闭9
)
const (
_ PaymentState = iota
PaymentStatesNew //初始状态 , 冻结三方帐号资金
PaymentStatesSettleFailed //结算失败
PaymentStatesSettled //已结算, 三方扣款
PaymentStatesAbandoned //丢弃的, 解冻三方帐号资金
)
const (
_ AccountType = iota
TypeThirdparty //三方
TypeBank //银行卡
TypeMerchant //商户
TypeQrCode //二维码
)
const (
_ Switch = iota
On //开
Off //关
)
const (
_ FillType = iota
Activity //活动
Adjust //调整
Transfer //转账
)
const (
_ FillState = iota
FillStatePending //未处理 1
FillStateProcessing //处理中 2
FillStateComplete //已完成 3
FillStateClose //已关闭 4
)
const (
_ FillTypeForMerchant = iota
FillPayment //付款
FillGathering //收款
)
const (
_ PushState = iota
Pushing //推送中 1
PushSuccess //推送成功2
PushFailed //推送失败3
)
const (
_ BuildInType = iota
BuildIn //内置类型 1
Real //真实类型 2
)
const (
_ MatchState = iota
MatchedNew //新建 1
AutoMatched //自动匹配 2
NotMatch //未匹配 3
MatchedClosed //已关闭 4
ManualMatched //人工匹配 5
ExcepitonMatch //异常 6
SmsMatch //需要短信匹配 7
)
const (
_ ErrType = iota
MerchantHuabei //商户版花呗
WrongProofNo //错误版流水号
OutofMemory //内存不足
)
const (
Unlimited AmountFormat = iota // 无限制
Integer // 整数
TwoDicemal // 两位小数
FixedAmount // 固定金额
)
const (
_ PayerComplainState = iota
PayerComplainStateInit //未处理
PayerComplainStateHandled //已处理
PayerComplainStateClosed //已关闭
)
const (
QRRawType = "raw" //原生二维码
QRPlatformType = "platform" //新二维码
)
const (
RequestResolveFailed gin.ErrorType = 401 //请求解析失败
MerchantResolveFailed gin.ErrorType = 402 //商户解析失败
SginVerifyFailed gin.ErrorType = 403 //请求校验失败
IPVerifyFailed gin.ErrorType = 404 //IP校验失败
QueryChannelListFailed gin.ErrorType = 9000 //查询通道失败
QueryOrderFailed gin.ErrorType = 9001 //查询订单失败
WithdrawVerifyFailed gin.ErrorType = 9002 //提现请求失败
ThirdPartyFailed gin.ErrorType = 10001 //三方支付请求失败
ChannelIsNotSupported gin.ErrorType = 10002 //请求的通道不被支持
OrderVerifyFailed gin.ErrorType = 10003 //订单验证失败
MerchantInsufficientBalance gin.ErrorType = 10003 //商户余额不足
OrderFailedApprove gin.ErrorType = 10004 //提现订单被拒
OrderCreateFailed gin.ErrorType = 10005 //创建订单失败
OrderRefuse gin.ErrorType = 10006 //拒绝支付
)
func InitialModels() {
log.Info("Register models")
t := []interface{}{
new(Admin),
}
datasource.RegisterModels(t...)
} | ICBC: "ICBC",
BOC: "BOC",
BOCOM: "BOCOM", | random_line_split |
init.go | package model
import (
"github.com/gin-gonic/gin"
"github.com/terryli1643/apidemo/libs/datasource"
log "github.com/terryli1643/apidemo/libs/logger"
)
// swagger:model
type (
Device int
UserType int
AccountState int
MerchantState int
SettlementRecordState int
RecordType int
BankAccountState int
AmountFormat int
TradingMode int
FundchangeType int
FundSourceType int
RemarkType int
PayerState int
PayerType int
TerminalType int
Location int
OrderType int
Switch int
PayerProfitsState int
ChannelState int
OperateAccountType int
RateState int
ChannelType int
Channel int
RateChannel int
OrderState int
PaymentState int
AccountType int
OperateAccountOrderType int
FinancialSubjectType int
OperateAccountOrderState int
FillType int
FillState int
TransferType int
Online int
ParamType int
ParamState int
PushState int
BuildInType int
MatchState int
OperationType int
SourceType int
BuildInMerchant int
Domain int
FillchangeType int
FillTypeForMerchant int
IsAgent int
CompleteState int
TypeJpush int
IsFill int
ErrType int
QueryTimeType int
QRType int
IsPointType int
PointUporDown int
FixedCode int
AgentRecalculateProfitState int
OrderSettled int
YesOrNo int
PayerComplainState int
)
const (
_ AgentRecalculateProfitState = iota
AgentRecalculateProfitStateSettle //未发放
AgentRecalculateProfitStateAllSettled //全部已发放
)
const (
_ OrderSettled = iota
Unsettle
Settled
)
const (
RoleAnonymous = "ROLE_ANONYMOUS" // 陌生人
RoleAdmin = "ROLE_ADMIN" // 管理员
)
type IEnum interface {
Val() int
}
const (
IDSpaceUser = "user"
IDSpaceOrder = "order"
IDSpaceRecommendCode = "recommendcode"
IDSpaceReq = "req"
IDSPaceRole = "role"
)
const (
USD = "USD" //美元
EUR = "EUR" //欧元
JPY = "JPY" //日元
GBP = "GBP" //英镑
CHF = "CHF" //瑞士法郎
AUD = "AUD" //澳大利亚元
NZD = "NZD" //新西兰元
CAD = "CAD" //加拿大元
CNY = "CNY" //人民币
RUB = "RUB" //卢比
HKD = "HKD" //港币
IDR = "IDR" //印尼盾
KRW = "KRW" //韩国元
SAR = "SAR" //亚尔
THB = "THB" //泰铢
)
const (
_ Device = iota
PC //来源终端: 电脑 1
IOS //来源终端: 苹果手机 2
Android //来源终端: 安卓手机 3
)
const (
_ YesOrNo = iota
Yes
No
)
const (
_ OperationType = iota
OperationTypeQuery //查询
OperationTypeCreate //添加
OperationTypeUpdate //编辑
OperationTypeDelete //删除
OperationTypeUnknown //未知
)
const (
_ SourceType = iota
CLIENT //来源终端: 后台系统 1
APP //来源终端: APP 2
MerchantAPI //来源终端: 商户API 3
ExeAPI //来源终端: ExeAPI 4
)
const (
_ TypeJpush = iota
GoodsNotice //来源终端: 后台系统 1
ApprovalNotice //来源终端: APP 2
OfflineNotice //来源终端: 商户API 3
)
const (
_ IsFill = iota
IsFillNo //不是填报 1
IsFillYes //是填报 2
)
const (
_ FixedCode = iota
FixedCodeNo //不是固码 1
FixedCodeYes //固定二维码 2
)
const (
_ PointUporDown = iota
Up //
Down //
)
const (
_ IsPointType = iota
IsPointTypeNo //不是小数点形式 1
IsPointTypeYes //是小数点形式 2
)
const (
_ BuildInMerchant = iota
BuildInMerchantYes // 内置商户
BuildInMerchantNo // 真实商户
)
const (
_ AccountState = iota
AccountEnable // 正常 1
AccountDisable // 帐号冻结 2
)
const (
_ CompleteState = iota
Complete //绑定 1
UnComplete //未完成 2
CompleteStateDetete //删除 3
)
const (
_ Domain = iota
DomainHt // 后台 1
DomainAPI // api 2
DomainAPP // APP 3
)
const (
_ OperateAccountOrderType = iota
OperateAccountOrderTypeConverted // 内转 1
OperateAccountOrderTypeOutSide //填报 2
)
const (
_ TransferType = iota
TransferTypeIn // 转入 1
TransferTypeOut //转出 2
)
const (
_ Online = iota
POnline // 在线 1
POffline // 离线 2
PDropline // 掉线 3
)
const (
_ ParamType = iota
BizParam //业务参数
SysParam //系统参数
)
const (
_ ParamState = iota
ParamEnable
ParamDisable
)
const (
_ Location = iota
CHINA //中国 1
TAIWAN //台湾 2
XIANGGANG //香港 3
US //美国 4
VIETNAM //越南 5
THAILAND //泰国 6
KOREA //韩国 7
)
const (
_ BankAccountState = iota
BankAccountStateCreate //待审核 1
BankAccountStateEnable // 审核通过 2
BankAccountStateDisable // 审核不通过 3
)
const (
_ FinancialSubjectType = iota
FinancialSubjectTypeForeign // 外汇支出 1
FinancialSubjectTypeAmount // 费用支出 2
FinancialSubjectTypeFinancialInjection // 财务注入 3
FinancialSubjectTypeAdditionalIncome // 额外收入 4
)
const (
_ QueryTimeType = iota
Today // 今天 1
Yesterday // 昨天 2
LastSevenDays // 最近七天 3
LastOneMonth // 最近一月 4
)
const (
_ OperateAccountType = iota
OperateAccountTypeSh //商户卡 1
OperateAccountTypeZj //资金卡 2
OperateAccountTypeSf //收付卡 3
)
const (
_ UserType = iota
UserTypeMerchent //商户 1
UserTypePayer //付客 2
UserTypeOperateAcc //财务账户 3
UserTypeAdmin //管理员 4
UserTypeAgent //付客代理 5
)
const (
_ IsAgent = iota
IsAgentY //代理1
IsAgentN //非代理2
)
const (
_ RateState = iota
RateStateEnable //启用 1
RateStateDisable //禁用 2
)
const (
_ ChannelType = iota
ChannelTypeWeChat //微信
ChannelTypeAlipay //支付宝
ChannelTypeBank //银行
ChannelTypeJy //金燕E商
ChannelTypeYSF //云闪付
)
const (
AlipayH5 = 1 //支付宝H5
Alipay = 2 //支付宝
AlipayKs = 3 //支付宝快速
AlipayCard = 4 //支付宝卡收
UNIONPAY = 5 //云闪付
JY = 6 //金燕e商
WeiXin = 7 //微信
STUNIONPAY = 8 //st云闪付
CMBC = 1001 //中国民生银行
ICBC = 1002 //中国工商银行
BOC = 1003 //中国银行
BOCOM = 1004 //交通银行
PINGAN = 1005 //中国平安银行
CMB = 1006 //招商银行
ABC = 1007 //中国农业银行
CCB = 1008 //中国建设银行
PSBC = 1009 //中国邮政储蓄银行
CEBB = 1010 //中国光大银行
CIB = 1011 //兴业银行
SPDB = 1012 //浦发银行
CGB = 1013 //广发银行
CITIC = 1014 //中信银行
HXB = 1015 //华夏银行
BCCB = 1016 //北京银行
BOSC = 1017 //上海银行
GZCB = 1018 //广州银行
CZB = 1019 //网商银行
BANK = 2000 //银行
)
var channelNameMap = map[int]string{
AlipayH5: "支付宝H5",
Alipay: "支付宝",
AlipayKs: "支付宝快速",
AlipayCard: "支付宝转卡",
UNIONPAY: "云闪付",
JY: "金燕e商",
WeiXin: "微信",
STUNIONPAY: "ST云闪付",
CMBC: "民生银行",
ICBC: "工商银行",
BOC: "中国银行",
BOCOM: "交通银行",
PINGAN: "平安银行",
CMB: "招商银行",
ABC: "农业银行",
CCB: "建设银行",
PSBC: "邮政储蓄银行",
CEBB: "光大银行",
CIB: "兴业银行",
SPDB: "浦发银行",
CGB: "广发银行",
CITIC: "中信银行",
HXB: "华夏银行",
BCCB: "北京银行",
BOSC: "上海银行",
GZCB: "广州银行",
CZB: "网商银行",
BANK: "银行",
}
var channelValueMap = map[string]int{
"AlipayH5": AlipayH5,
"Alipay": Alipay,
"AlipayKs": AlipayKs,
"AlipayCard": AlipayCard,
"CMBC": CMBC,
"ICBC": ICBC,
"BOC": BOC,
"BOCOM": BOCOM,
"PINGAN": PINGAN,
"CMB": CMB,
"ABC": ABC,
"CCB": CCB,
"PSBC": PSBC,
"CEBB": CEBB,
"CIB": CIB,
"SPDB": SPDB,
"CGB": CGB,
"CITIC": CITIC,
"HXB": HXB,
"BCCB": BCCB,
"BOSC": BOSC,
"GZCB": GZCB,
"CZB": CZB,
"UNIONPAY": UNIONPAY,
"STUNIONPAY": STUNIONPAY,
"JY": JY,
"WECHAT": WeiXin,
"BANK": BANK,
}
var channelCodeMap = map[int]string{
AlipayH5: "AlipayH5",
Alipay: "Alipay",
AlipayKs: "AlipayKs",
AlipayCard: "AlipayCard",
CMBC: "CMBC",
ICBC: "ICBC",
BOC: "BOC",
BOCOM: "BOCOM",
PINGAN: "PINGAN",
CMB: "CMB",
ABC: "ABC",
CCB: "CCB",
PSBC: "PSBC",
CEBB: "CEBB",
CIB: "CIB",
SPDB: "SPDB",
CGB: "CGB",
CITIC: "CITIC",
HXB: "HXB",
BCCB: "BCCB",
BOSC: "BOSC",
GZCB: "GZCB",
CZB: "CZB",
UNIONPAY: "UNIONPAY",
STUNIONPAY: "STUNIONPAY",
JY: "JY",
WeiXin: "WECHAT",
BANK: "BANK",
}
func (channel Channel) ToChannelName() string {
ch := channelNameMap[int(channel)]
return ch
}
func (channel Channel) ToChannelCode() string {
ch := channelCodeMap[int(channel)]
return ch
}
func ToChannel(channelCode string) Channel {
ch := channelValueMap[channelCode]
return Channel(ch)
}
func (rateChannel RateChannel) ToChannelName() string {
ch := channelNameMap[int(rateChannel)]
return ch
}
func PkgToBankCode(bankname string) string {
channelMap := map[string]string{}
channelMap["com.eg.android.AlipayGphone"] = "Alipay"
channelMap["com.tencent.mm"] = "WeiXin"
channelMap["com.chinamworld.bocmbci"] = "BOC"
channelMap["cmb.pb"] = "CMB"
channelMap["com.mybank.android.phone"] = "CZB"
channelMap["com.chinamworld.main"] = "CCB"
channelMap["com.icbc"] = "ICBC"
channelMap["com.android.bankabc"] = "ABC"
channelMap["com.bankcomm.Bankcomm"] = "BOCOM"
channelMap["com.unionpay"] = "UNIONPAY"
channelMap["com.hnnx.sh.mbank"] = "JY"
channelMap["com.yitong.mbank.psbc"] = "PSBC"
channelMap["cn.com.spdb.mobilebank.per"] = "SPDB"
ch := channelMap[bankname]
return ch
}
func (channel Channel) ToChannelType() ChannelType {
if channel == AlipayH5 {
return ChannelTypeAlipay
}
if channel == Alipay {
return ChannelTypeAlipay
}
if channel == AlipayKs {
return ChannelTypeAlipay
}
if channel == JY {
return ChannelTypeJy
}
if channel == AlipayCard {
return ChannelTypeBank
}
if channel == UNIONPAY {
return ChannelTypeYSF
}
if channel == STUNIONPAY {
return ChannelTypeYSF
}
if channel == WeiXin {
return ChannelTypeWeChat
}
return ChannelTypeBank
}
func (channel Channel) ToRateChannel() RateChannel {
if channel > 1000 {
return BANK
}
return RateChannel(channel)
}
func (channel RateChannel) ToChannelType() ChannelType {
if channel == AlipayH5 {
return ChannelTypeAlipay
}
if channel == WeiXin {
return ChannelTypeWeChat
}
if channel == Alipay {
return ChannelTypeAlipay
}
if channel == AlipayKs {
return ChannelTypeAlipay
}
if channel == AlipayCard {
return ChannelTypeBank
}
if channel == JY {
return ChannelTypeJy
}
if channel == UNIONPAY {
return ChannelTypeYSF
}
if channel == STUNIONPAY {
return ChannelTypeYSF
}
if channel == WeiXin {
return ChannelTypeWeChat
}
return ChannelTypeBank
}
const (
_ PayerState = iota
PayerStateEnable //启用 1
PayerStateDisable //禁用 2
)
const (
_ ChannelState = iota
ChannelStateEnable //启用 1
ChannelStateDisable //禁用 2
)
const (
_ PayerProfitsState = iota
PayerProfitsStateEnable //启用 1
PayerProfitsStateDisable //禁用 2
)
const (
_ PayerType = iota
PayerTypeMember //会员 1
PayerTypeVIP //VIP 2
)
const (
_ TerminalType = iota
TerminalTypeRrf //rrfApp 1
TerminalTypeDg //代购app 2
)
const (
_ OperateAccountOrderState = iota
OperateAccountOrderStateCreate //新建
OperateAccountOrderStateSuccess //审核通过 1
OperateAccountOrderStateFail //审核不通过 2
)
const (
_ SettlementRecordState = iota
SettlementRecordStatePending //未处理 1
SettlementRecordStateProcessing //处理中 2
SettlementRecordStatePreCompleted //预完成 3
SettlementRecordStatePreClose //预关闭 4
SettlementRecordStateComplete //已完成 5
SettlementRecordStateClose //已关闭 6
)
const (
_ RecordType = iota
RecordTypeInjection //注资 1
RecordTypeSettlement //结算 2
)
const (
_ TradingMode = iota
TradingModeByHand //手动 1
TradingModeByAPI //API 2
)
const (
_ FillchangeType = iota
FillIncrease //增加余额
FillDecrease //减少余额
FillFrezeIncrease //增加冻结
FillFrezeDecrease //减少冻结
)
const (
_ FundchangeType = iota
BalanceIncrease //账户余额增加 1
BalanceDecrease //账户余额减少 2
FrozenIncrease //账户冻结余额增加 3
FrozenDecrease //账户冻结余额减少 4
Frozen //冻结 5
UnFrozen //解冻 6
)
const (
_ FundSourceType = iota
FundSourceTypeDeposit //订单充值 1 (已废弃)
FundSourceTypeWithdraw //订单提现 2 (已废弃)
FundSourceTypeExeRecord //异常记录 3 (已废弃)
FundSourceTypeSettlement //结算/注资 4 (已废弃)
FundSourceTypeFill //付客填报 5 (已废弃)
FundSourceTypeManual //人工填报 6 (已废弃)
FundSourceTypeAgent //代理收益 7 (已废弃)
FundSourceTypeDepositMBI //商户充值 8
FundSourceTypeDepositFeeMBD //商户充值手续费 9
FundSourceTypeMerchantAgentDepositProfitMBI //商户代理充值收益 10
FundSourceTypePayerAssignedOrderPFR //付客接单冻结 11
FundSourceTypePayerAssignedOrderPUF //付客接单解冻 12
FundSourceTypePayerReceivedMoneySuccessPBD //付客收款成功 13
FundSourceTypeDepositPayerProfitPBI //付客接单收益 14
FundSourceTypeDepositPayerAgentProfitPBI //付客代理收益 15
FundSourceTypeMatchExeptionFailedPFR //流水未匹配 16
FundSourceTypeManualExeptionHandlePUF //人工异常处理解冻 17
FundSourceTypeOperateAccountOBD //扣减内部财务账户金额 18
FundSourceTypeOperateAccountOBI //增加内部财务账户金额 19
FundSourceTypeCreateMerchantWithdrawMFR //商户提现 20
FundSourceTypeCreateMerchantWithdrawMUF //商户提现冻结 21
FundSourceTypeCreateMerchantWithdrawSuccessMBD //商户提现扣款 22
FundSourceTypeCreateMerchantWithdrawFeeMBD //商户提现手续费 23
FundSourceTypeMerchantAgentWithdrawProfitMBI //商户提现代理收益 24
FundSourceTypePayerSeckillingOrderPBI //付客秒宝贝 25
FundSourceTypePayerSeckillingOrderProfitPBI //付客秒宝贝收益 26
FundSourceTypePayerSeckillingOrderAgentProfitPBI //付客代理秒宝贝收益 27
FundSourceTypeCreateMerchantSettlementMFR //商户结算冻结 28
FundSourceTypeCreateMerchantSettlementMUF //商户结算解冻 29
FundSourceTypeCreateMerchantSettlementSuccessMBD //商户结算扣款 30
FundSourceTypeCreateMerchantSettlementFeeMFR //商户结算手续费冻结 31
FundSourceTypeCreateMerchantSettlementFeeMUF //商户结算手续费解冻 32
FundSourceTypeCreateMerchantSettlementFeeMBD //商户结算手续费 33
FundSourceTypeCreateMerchantInjectionMBI //商户注资加款 34
FundSourceTypeManualAdjustMerchantBanlanceMBI //人工商户余额账户增加 35
FundSourceTypeManualAdjustMerchantBanlanceMBD //人工商户余额账户减少 36
FundSourceTypeManualAdjustMerchantFreezeMFI //人工商户冻结账户增加 37
FundSourceTypeManualAdjustMerchantFreezeMFD //人工商户冻结账户减少 38
FundSourceTypeManualAdjustPayerBanlancePBI //付客填报调整增加 39
FundSourceTypeManualAdjustPayerBanlancePBD //付客填报调整减少 40
FundSourceTypeManualAdjustPayerFreezePFI //付客填报调整冻结增加 41
FundSourceTypeManualAdjustPayerFreezePFD //付客填报调整冻结减少 42
FundSourceTypeManualActivityPayerBanlancePBI //付客填报活动增加 43
FundSourceTypeManualActivityPayerBanlancePBD //付客填报活动减少 44
FundSourceTypeManualActivityPayerBanlancePFI //付客填报活动冻结增加 45
FundSourceTypeManualActivityPayerBanlancePFD //付客填报活动冻结减少 46
FundSourceTypeTransferOutPayerBanlancePBD //付客转出 47
FundSourceTypeTransferInPayerBanlancePBI //付客转入 48
FundSourceTypePayerDepositPBI //付客充值 49
FundSourceTypeCreatePayerWithdrawPFR //付客提现冻结 50
FundSourceTypePayerWithdrawPUF //付客提现解冻 51
FundSourceTypePayerWithdrawSuccessPBD //付客提现扣款 52
FundSourceTypePayerWithdrawFeePBD //付客提现手续费 53
FundSourceTypeOperateAccountInOBI //财务账号转入 54
FundSourceTypeOperateAccountOutOFR //财务账号转出冻结 55
FundSourceTypeOperateAccountOutOUF //财务账号转出解冻 56
FundSourceTypeOperateAccountOutOBD //财务账号转出 57
)
const (
_ RemarkType = iota
RemarkTypePayer //付客 1
RemarkTypeOrder //订单 2
RemarkTypeOperateAcc //运营账户 3
RemarkTypeFill //填报备注 4
RemarkTypeExceptionRec //异常记录 5
RemarkTypeFillImg //填报备注 6
)
const (
_ OrderType = iota
OrderTypeDeposit //充值 收入 1
OrderTypeWithdraw //提现 支出 2
)
const (
_ OrderState = iota
OrderStatesFreeze //冻结 1
OrderStatesNew //未处理 2
OrderStatesProcessing //处理中3
OrderStatesPreComplete //预完成4
OrderStatesPreClose //预关闭5
OrderStatesCompleted //完成6
OrderStatesPartialCompleted //部分完成7
OrderStatesClose //关闭8
OrderStatesCloseByPayer //付客关闭9
)
const (
_ PaymentState = iota
PaymentStatesNew //初始状态 , 冻结三方帐号资金
PaymentStatesSettleFailed //结算失败
PaymentStatesSettled //已结算, 三方扣款
PaymentStatesAbandoned //丢弃的, 解冻三方帐号资金
)
const (
_ AccountType = iota
TypeThirdparty //三方
TypeBank //银行卡
TypeMerchant //商户
TypeQrCode //二维码
)
const (
_ Switch = iota
On //开
Off //关
)
const (
_ FillType = iota
Activity //活动
Adjust //调整
Transfer //转账
)
const (
_ FillState = iota
FillStatePending //未处理 1
FillStateProcessing //处理中 2
FillStateComplete //已完成 3
FillStateClose //已关闭 4
)
const (
_ FillTypeForMerchant = iota
FillPayment //付款
FillGathering //收款
)
const (
_ PushState = iota
Pushing //推送中 1
PushSuccess //推送成功2
PushFailed //推送失败3
)
const (
_ BuildInType = iota
BuildIn //内置类型 1
Real //真实类型 2
)
const (
_ MatchState = iota
MatchedNew //新建 1
AutoMatched //自动匹配 2
NotMatch //未匹配 3
MatchedClosed //已关闭 4
ManualMatched //人工匹配 5
ExcepitonMatch //异常 6
SmsMatch //需要短信匹配 7
)
const (
_ ErrType = iota
MerchantHuabei //商户版花呗
WrongProofNo //错误版流水号
OutofMemory //内存不足
)
const (
Unlimited AmountFormat = iota // 无限制
Integer // 整数
TwoDicemal // 两位小数
FixedAmount // 固定金额
)
const (
_ PayerComplainState = iota
PayerComplainStateInit //未处理
PayerComplainStateHandled //已处理
PayerComplainStateClosed //已关闭
)
const (
QRRawType = "raw" //原生二维码
QRPlatformType = "platform" //新二维码
)
const (
RequestResolveFailed gin.ErrorType = 401 //请求解析失败
MerchantResolveFailed gin.ErrorType = 402 //商户解析失败
SginVerifyFailed gin.ErrorType = 403 //请求校验失败
IPVerifyFailed gin.ErrorType = 404 //IP校验失败
QueryChannelListFailed gin.ErrorType = 9000 //查询通道失败
QueryOrderFailed gin.ErrorType = 9001 //查询订单失败
WithdrawVerifyFailed gin.ErrorType = 9002 //提现请求失败
ThirdPartyFailed gin.ErrorType = 10001 //三方支付请求失败
ChannelIsNotSupported gin.ErrorType = 10002 //请求的通道不被支持
OrderVerifyFailed gin.ErrorType = 10003 //订单验证失败
MerchantInsufficientBalance gin.ErrorType = 10003 //商户余额不足
OrderFailedApprove gin.ErrorType = 10004 //提现订单被拒
OrderCreateFailed gin.ErrorType = 10005 //创建订单失败
OrderRefuse gin.ErrorType = 10006 //拒绝支付
)
func InitialModels() {
log.Info("Register models")
t := []interface{}{
new(Admin),
}
datasource.RegisterModels(t...)
}
| identifier_name | ||
init.go | package model
import (
"github.com/gin-gonic/gin"
"github.com/terryli1643/apidemo/libs/datasource"
log "github.com/terryli1643/apidemo/libs/logger"
)
// swagger:model
type (
Device int
UserType int
AccountState int
MerchantState int
SettlementRecordState int
RecordType int
BankAccountState int
AmountFormat int
TradingMode int
FundchangeType int
FundSourceType int
RemarkType int
PayerState int
PayerType int
TerminalType int
Location int
OrderType int
Switch int
PayerProfitsState int
ChannelState int
OperateAccountType int
RateState int
ChannelType int
Channel int
RateChannel int
OrderState int
PaymentState int
AccountType int
OperateAccountOrderType int
FinancialSubjectType int
OperateAccountOrderState int
FillType int
FillState int
TransferType int
Online int
ParamType int
ParamState int
PushState int
BuildInType int
MatchState int
OperationType int
SourceType int
BuildInMerchant int
Domain int
FillchangeType int
FillTypeForMerchant int
IsAgent int
CompleteState int
TypeJpush int
IsFill int
ErrType int
QueryTimeType int
QRType int
IsPointType int
PointUporDown int
FixedCode int
AgentRecalculateProfitState int
OrderSettled int
YesOrNo int
PayerComplainState int
)
const (
_ AgentRecalculateProfitState = iota
AgentRecalculateProfitStateSettle //未发放
AgentRecalculateProfitStateAllSettled //全部已发放
)
const (
_ OrderSettled = iota
Unsettle
Settled
)
const (
RoleAnonymous = "ROLE_ANONYMOUS" // 陌生人
RoleAdmin = "ROLE_ADMIN" // 管理员
)
type IEnum interface {
Val() int
}
const (
IDSpaceUser = "user"
IDSpaceOrder = "order"
IDSpaceRecommendCode = "recommendcode"
IDSpaceReq = "req"
IDSPaceRole = "role"
)
const (
USD = "USD" //美元
EUR = "EUR" //欧元
JPY = "JPY" //日元
GBP = "GBP" //英镑
CHF = "CHF" //瑞士法郎
AUD = "AUD" //澳大利亚元
NZD = "NZD" //新西兰元
CAD = "CAD" //加拿大元
CNY = "CNY" //人民币
RUB = "RUB" //卢比
HKD = "HKD" //港币
IDR = "IDR" //印尼盾
KRW = "KRW" //韩国元
SAR = "SAR" //亚尔
THB = "THB" //泰铢
)
const (
_ Device = iota
PC //来源终端: 电脑 1
IOS //来源终端: 苹果手机 2
Android //来源终端: 安卓手机 3
)
const (
_ YesOrNo = iota
Yes
No
)
const (
_ OperationType = iota
OperationTypeQuery //查询
OperationTypeCreate //添加
OperationTypeUpdate //编辑
OperationTypeDelete //删除
OperationTypeUnknown //未知
)
const (
_ SourceType = iota
CLIENT //来源终端: 后台系统 1
APP //来源终端: APP 2
MerchantAPI //来源终端: 商户API 3
ExeAPI //来源终端: ExeAPI 4
)
const (
_ TypeJpush = iota
GoodsNotice //来源终端: 后台系统 1
ApprovalNotice //来源终端: APP 2
OfflineNotice //来源终端: 商户API 3
)
const (
_ IsFill = iota
IsFillNo //不是填报 1
IsFillYes //是填报 2
)
const (
_ FixedCode = iota
FixedCodeNo //不是固码 1
FixedCodeYes //固定二维码 2
)
const (
_ PointUporDown = iota
Up //
Down //
)
const (
_ IsPointType = iota
IsPointTypeNo //不是小数点形式 1
IsPointTypeYes //是小数点形式 2
)
const (
_ BuildInMerchant = iota
BuildInMerchantYes // 内置商户
BuildInMerchantNo // 真实商户
)
const (
_ AccountState = iota
AccountEnable // 正常 1
AccountDisable // 帐号冻结 2
)
const (
_ CompleteState = iota
Complete //绑定 1
UnComplete //未完成 2
CompleteStateDetete //删除 3
)
const (
_ Domain = iota
DomainHt // 后台 1
DomainAPI // api 2
DomainAPP // APP 3
)
const (
_ OperateAccountOrderType = iota
OperateAccountOrderTypeConverted // 内转 1
OperateAccountOrderTypeOutSide //填报 2
)
const (
_ TransferType = iota
TransferTypeIn // 转入 1
TransferTypeOut //转出 2
)
const (
_ Online = iota
POnline // 在线 1
POffline // 离线 2
PDropline // 掉线 3
)
const (
_ ParamType = iota
BizParam //业务参数
SysParam //系统参数
)
const (
_ ParamState = iota
ParamEnable
ParamDisable
)
const (
_ Location = iota
CHINA //中国 1
TAIWAN //台湾 2
XIANGGANG //香港 3
US //美国 4
VIETNAM //越南 5
THAILAND //泰国 6
KOREA //韩国 7
)
const (
_ BankAccountState = iota
BankAccountStateCreate //待审核 1
BankAccountStateEnable // 审核通过 2
BankAccountStateDisable // 审核不通过 3
)
const (
_ FinancialSubjectType = iota
FinancialSubjectTypeForeign // 外汇支出 1
FinancialSubjectTypeAmount // 费用支出 2
FinancialSubjectTypeFinancialInjection // 财务注入 3
FinancialSubjectTypeAdditionalIncome // 额外收入 4
)
const (
_ QueryTimeType = iota
Today // 今天 1
Yesterday // 昨天 2
LastSevenDays // 最近七天 3
LastOneMonth // 最近一月 4
)
const (
_ OperateAccountType = iota
OperateAccountTypeSh //商户卡 1
OperateAccountTypeZj //资金卡 2
OperateAccountTypeSf //收付卡 3
)
const (
_ UserType = iota
UserTypeMerchent //商户 1
UserTypePayer //付客 2
UserTypeOperateAcc //财务账户 3
UserTypeAdmin //管理员 4
UserTypeAgent //付客代理 5
)
const (
_ IsAgent = iota
IsAgentY //代理1
IsAgentN //非代理2
)
const (
_ RateState = iota
RateStateEnable //启用 1
RateStateDisable //禁用 2
)
const (
_ ChannelType = iota
ChannelTypeWeChat //微信
ChannelTypeAlipay //支付宝
ChannelTypeBank //银行
ChannelTypeJy //金燕E商
ChannelTypeYSF //云闪付
)
const (
AlipayH5 = 1 //支付宝H5
Alipay = 2 //支付宝
AlipayKs = 3 //支付宝快速
AlipayCard = 4 //支付宝卡收
UNIONPAY = 5 //云闪付
JY = 6 //金燕e商
WeiXin = 7 //微信
STUNIONPAY = 8 //st云闪付
CMBC = 1001 //中国民生银行
ICBC = 1002 //中国工商银行
BOC = 1003 //中国银行
BOCOM = 1004 //交通银行
PINGAN = 1005 //中国平安银行
CMB = 1006 //招商银行
ABC = 1007 //中国农业银行
CCB = 1008 //中国建设银行
PSBC = 1009 //中国邮政储蓄银行
CEBB = 1010 //中国光大银行
CIB = 1011 //兴业银行
SPDB = 1012 //浦发银行
CGB = 1013 //广发银行
CITIC = 1014 //中信银行
HXB = 1015 //华夏银行
BCCB = 1016 //北京银行
BOSC = 1017 //上海银行
GZCB = 1018 //广州银行
CZB = 1019 //网商银行
BANK = 2000 //银行
)
var channelNameMap = map[int]string{
AlipayH5: "支付宝H5",
Alipay: "支付宝",
AlipayKs: "支付宝快速",
AlipayCard: "支付宝转卡",
UNIONPAY: "云闪付",
JY: "金燕e商",
WeiXin: "微信",
STUNIONPAY: "ST云闪付",
CMBC: "民生银行",
ICBC: "工商银行",
BOC: "中国银行",
BOCOM: "交通银行",
PINGAN: "平安银行",
CMB: "招商银行",
ABC: "农业银行",
CCB: "建设银行",
PSBC: "邮政储蓄银行",
CEBB: "光大银行",
CIB: "兴业银行",
SPDB: "浦发银行",
CGB: "广发银行",
CITIC: "中信银行",
HXB: "华夏银行",
BCCB: "北京银行",
BOSC: "上海银行",
GZCB: "广州银行",
CZB: "网商银行",
BANK: "银行",
}
var channelValueMap = map[string]int{
"AlipayH5": AlipayH5,
"Alipay": Alipay,
"AlipayKs": AlipayKs,
"AlipayCard": AlipayCard,
"CMBC": CMBC,
"ICBC": ICBC,
"BOC": BOC,
"BOCOM": BOCOM,
"PINGAN": PINGAN,
"CMB": CMB,
"ABC": ABC,
"CCB": CCB,
"PSBC": PSBC,
"CEBB": CEBB,
"CIB": CIB,
"SPDB": SPDB,
"CGB": CGB,
"CITIC": CITIC,
"HXB": HXB,
"BCCB": BCCB,
"BOSC": BOSC,
"GZCB": GZCB,
"CZB": CZB,
"UNIONPAY": UNIONPAY,
"STUNIONPAY": STUNIONPAY,
"JY": JY,
"WECHAT": WeiXin,
"BANK": BANK,
}
var channelCodeMap = map[int]string{
AlipayH5: "AlipayH5",
Alipay: "Alipay",
AlipayKs: "AlipayKs",
AlipayCard: "AlipayCard",
CMBC: "CMBC",
ICBC: "ICBC",
BOC: "BOC",
BOCOM: "BOCOM",
PINGAN: "PINGAN",
CMB: "CMB",
ABC: "ABC",
CCB: "CCB",
PSBC: "PSBC",
CEBB: "CEBB",
CIB: "CIB",
SPDB: "SPDB",
CGB: "CGB",
CITIC: "CITIC",
HXB: "HXB",
BCCB: "BCCB",
BOSC: "BOSC",
GZCB: "GZCB",
CZB: "CZB",
UNIONPAY: "UNIONPAY",
STUNIONPAY: "STUNIONPAY",
JY: "JY",
WeiXin: "WECHAT",
BANK: "BANK",
}
func (channel Channel) ToChannelName() string {
ch := channelNameMap[int(channel)]
return ch
}
func (channel Channel) ToChannelCode() string {
ch := channelCodeMap[int(channel)]
return ch
}
func ToChannel(channelCode string) Channel {
ch := channelValueMap[channelCode]
return Channel(ch)
}
func (rateChannel RateChannel) ToChannelName() string {
ch := channelNameMap[int(rateChannel)]
return ch
}
func PkgToBankCode(bankname string) string {
channelMap := map[string]string{}
channelMap["com.eg.android.AlipayGphone"] = "Alipay"
channelMap["com.tencent.mm"] = "WeiXin"
channelMap["com.chinamworld.bocmbci"] = "BOC"
channelMap["cmb.pb"] = "CMB"
channelMap["com.mybank.android.phone"] = "CZB"
channelMap["com.chinamworld.main"] = "CCB"
channelMap["com.icbc"] = "ICBC"
channelMap["com.android.bankabc"] = "ABC"
channelMap["com.bankcomm.Bankcomm"] = "BOCOM"
channelMap["com.unionpay"] = "UNIONPAY"
channelMap["com.hnnx.sh.mbank"] = "JY"
channelMap["com.yitong.mbank.psbc"] = "PSBC"
channelMap["cn.com.spdb.mobilebank.per"] = "SPDB"
ch := channelMap[bankname]
return ch
}
func (channel Channel) ToChannelType() ChannelType {
if channel == AlipayH5 {
return ChannelTypeAlipay
}
if channel == Alipay {
return ChannelTypeAlipay
}
if channel == AlipayKs {
return ChannelTypeAlipay
}
if channel == JY {
return ChannelTypeJy
}
if channel == AlipayCard {
return ChannelTypeBank
}
if channel == UNIONPAY {
return ChannelTypeYSF
}
if channel == STUNIONPAY {
return ChannelTypeYSF
}
if channel == WeiXin {
return ChannelTypeWeChat
}
return ChannelTypeBank
}
func (channel Channel) ToRateChannel() RateChannel {
if channel > 1000 {
return BANK
}
return RateChannel(channel)
}
func (channel RateChannel) ToChannelType() ChannelType {
if channel == AlipayH5 {
return ChannelTypeAlipay
}
if channel == WeiXin {
return ChannelTypeWeChat
}
if channel == Alipay {
return ChannelTypeAlipay
}
if channel == AlipayKs {
return ChannelTypeAlipay
}
if channel == AlipayCard {
return ChannelTypeBank
}
if channel == JY {
return ChannelTypeJy
}
if channel == UNIONPAY {
return ChannelTypeYSF
}
if channel == STUNIONPAY {
return ChannelTypeYSF
}
if channel == WeiXin {
return ChannelTypeWeChat
}
return ChannelTypeBank
}
const (
_ PayerState = iota
PayerStateEnable //启用 1
PayerStateDisable //禁用 2
)
const (
_ ChannelState = iota
ChannelStateEnable //启用 1
ChannelStateDisable //禁用 2
)
const (
_ PayerProfitsState = iota
PayerProfitsStateEnable //启用 1
PayerProfitsStateDisable //禁用 2
)
const (
_ | a
PayerTypeMember //会员 1
PayerTypeVIP //VIP 2
)
const (
_ TerminalType = iota
TerminalTypeRrf //rrfApp 1
TerminalTypeDg //代购app 2
)
const (
_ OperateAccountOrderState = iota
OperateAccountOrderStateCreate //新建
OperateAccountOrderStateSuccess //审核通过 1
OperateAccountOrderStateFail //审核不通过 2
)
const (
_ SettlementRecordState = iota
SettlementRecordStatePending //未处理 1
SettlementRecordStateProcessing //处理中 2
SettlementRecordStatePreCompleted //预完成 3
SettlementRecordStatePreClose //预关闭 4
SettlementRecordStateComplete //已完成 5
SettlementRecordStateClose //已关闭 6
)
const (
_ RecordType = iota
RecordTypeInjection //注资 1
RecordTypeSettlement //结算 2
)
const (
_ TradingMode = iota
TradingModeByHand //手动 1
TradingModeByAPI //API 2
)
const (
_ FillchangeType = iota
FillIncrease //增加余额
FillDecrease //减少余额
FillFrezeIncrease //增加冻结
FillFrezeDecrease //减少冻结
)
const (
_ FundchangeType = iota
BalanceIncrease //账户余额增加 1
BalanceDecrease //账户余额减少 2
FrozenIncrease //账户冻结余额增加 3
FrozenDecrease //账户冻结余额减少 4
Frozen //冻结 5
UnFrozen //解冻 6
)
const (
_ FundSourceType = iota
FundSourceTypeDeposit //订单充值 1 (已废弃)
FundSourceTypeWithdraw //订单提现 2 (已废弃)
FundSourceTypeExeRecord //异常记录 3 (已废弃)
FundSourceTypeSettlement //结算/注资 4 (已废弃)
FundSourceTypeFill //付客填报 5 (已废弃)
FundSourceTypeManual //人工填报 6 (已废弃)
FundSourceTypeAgent //代理收益 7 (已废弃)
FundSourceTypeDepositMBI //商户充值 8
FundSourceTypeDepositFeeMBD //商户充值手续费 9
FundSourceTypeMerchantAgentDepositProfitMBI //商户代理充值收益 10
FundSourceTypePayerAssignedOrderPFR //付客接单冻结 11
FundSourceTypePayerAssignedOrderPUF //付客接单解冻 12
FundSourceTypePayerReceivedMoneySuccessPBD //付客收款成功 13
FundSourceTypeDepositPayerProfitPBI //付客接单收益 14
FundSourceTypeDepositPayerAgentProfitPBI //付客代理收益 15
FundSourceTypeMatchExeptionFailedPFR //流水未匹配 16
FundSourceTypeManualExeptionHandlePUF //人工异常处理解冻 17
FundSourceTypeOperateAccountOBD //扣减内部财务账户金额 18
FundSourceTypeOperateAccountOBI //增加内部财务账户金额 19
FundSourceTypeCreateMerchantWithdrawMFR //商户提现 20
FundSourceTypeCreateMerchantWithdrawMUF //商户提现冻结 21
FundSourceTypeCreateMerchantWithdrawSuccessMBD //商户提现扣款 22
FundSourceTypeCreateMerchantWithdrawFeeMBD //商户提现手续费 23
FundSourceTypeMerchantAgentWithdrawProfitMBI //商户提现代理收益 24
FundSourceTypePayerSeckillingOrderPBI //付客秒宝贝 25
FundSourceTypePayerSeckillingOrderProfitPBI //付客秒宝贝收益 26
FundSourceTypePayerSeckillingOrderAgentProfitPBI //付客代理秒宝贝收益 27
FundSourceTypeCreateMerchantSettlementMFR //商户结算冻结 28
FundSourceTypeCreateMerchantSettlementMUF //商户结算解冻 29
FundSourceTypeCreateMerchantSettlementSuccessMBD //商户结算扣款 30
FundSourceTypeCreateMerchantSettlementFeeMFR //商户结算手续费冻结 31
FundSourceTypeCreateMerchantSettlementFeeMUF //商户结算手续费解冻 32
FundSourceTypeCreateMerchantSettlementFeeMBD //商户结算手续费 33
FundSourceTypeCreateMerchantInjectionMBI //商户注资加款 34
FundSourceTypeManualAdjustMerchantBanlanceMBI //人工商户余额账户增加 35
FundSourceTypeManualAdjustMerchantBanlanceMBD //人工商户余额账户减少 36
FundSourceTypeManualAdjustMerchantFreezeMFI //人工商户冻结账户增加 37
FundSourceTypeManualAdjustMerchantFreezeMFD //人工商户冻结账户减少 38
FundSourceTypeManualAdjustPayerBanlancePBI //付客填报调整增加 39
FundSourceTypeManualAdjustPayerBanlancePBD //付客填报调整减少 40
FundSourceTypeManualAdjustPayerFreezePFI //付客填报调整冻结增加 41
FundSourceTypeManualAdjustPayerFreezePFD //付客填报调整冻结减少 42
FundSourceTypeManualActivityPayerBanlancePBI //付客填报活动增加 43
FundSourceTypeManualActivityPayerBanlancePBD //付客填报活动减少 44
FundSourceTypeManualActivityPayerBanlancePFI //付客填报活动冻结增加 45
FundSourceTypeManualActivityPayerBanlancePFD //付客填报活动冻结减少 46
FundSourceTypeTransferOutPayerBanlancePBD //付客转出 47
FundSourceTypeTransferInPayerBanlancePBI //付客转入 48
FundSourceTypePayerDepositPBI //付客充值 49
FundSourceTypeCreatePayerWithdrawPFR //付客提现冻结 50
FundSourceTypePayerWithdrawPUF //付客提现解冻 51
FundSourceTypePayerWithdrawSuccessPBD //付客提现扣款 52
FundSourceTypePayerWithdrawFeePBD //付客提现手续费 53
FundSourceTypeOperateAccountInOBI //财务账号转入 54
FundSourceTypeOperateAccountOutOFR //财务账号转出冻结 55
FundSourceTypeOperateAccountOutOUF //财务账号转出解冻 56
FundSourceTypeOperateAccountOutOBD //财务账号转出 57
)
const (
_ RemarkType = iota
RemarkTypePayer //付客 1
RemarkTypeOrder //订单 2
RemarkTypeOperateAcc //运营账户 3
RemarkTypeFill //填报备注 4
RemarkTypeExceptionRec //异常记录 5
RemarkTypeFillImg //填报备注 6
)
const (
_ OrderType = iota
OrderTypeDeposit //充值 收入 1
OrderTypeWithdraw //提现 支出 2
)
const (
_ OrderState = iota
OrderStatesFreeze //冻结 1
OrderStatesNew //未处理 2
OrderStatesProcessing //处理中3
OrderStatesPreComplete //预完成4
OrderStatesPreClose //预关闭5
OrderStatesCompleted //完成6
OrderStatesPartialCompleted //部分完成7
OrderStatesClose //关闭8
OrderStatesCloseByPayer //付客关闭9
)
const (
_ PaymentState = iota
PaymentStatesNew //初始状态 , 冻结三方帐号资金
PaymentStatesSettleFailed //结算失败
PaymentStatesSettled //已结算, 三方扣款
PaymentStatesAbandoned //丢弃的, 解冻三方帐号资金
)
const (
_ AccountType = iota
TypeThirdparty //三方
TypeBank //银行卡
TypeMerchant //商户
TypeQrCode //二维码
)
const (
_ Switch = iota
On //开
Off //关
)
const (
_ FillType = iota
Activity //活动
Adjust //调整
Transfer //转账
)
const (
_ FillState = iota
FillStatePending //未处理 1
FillStateProcessing //处理中 2
FillStateComplete //已完成 3
FillStateClose //已关闭 4
)
const (
_ FillTypeForMerchant = iota
FillPayment //付款
FillGathering //收款
)
const (
_ PushState = iota
Pushing //推送中 1
PushSuccess //推送成功2
PushFailed //推送失败3
)
const (
_ BuildInType = iota
BuildIn //内置类型 1
Real //真实类型 2
)
const (
_ MatchState = iota
MatchedNew //新建 1
AutoMatched //自动匹配 2
NotMatch //未匹配 3
MatchedClosed //已关闭 4
ManualMatched //人工匹配 5
ExcepitonMatch //异常 6
SmsMatch //需要短信匹配 7
)
const (
_ ErrType = iota
MerchantHuabei //商户版花呗
WrongProofNo //错误版流水号
OutofMemory //内存不足
)
const (
Unlimited AmountFormat = iota // 无限制
Integer // 整数
TwoDicemal // 两位小数
FixedAmount // 固定金额
)
const (
_ PayerComplainState = iota
PayerComplainStateInit //未处理
PayerComplainStateHandled //已处理
PayerComplainStateClosed //已关闭
)
const (
QRRawType = "raw" //原生二维码
QRPlatformType = "platform" //新二维码
)
const (
RequestResolveFailed gin.ErrorType = 401 //请求解析失败
MerchantResolveFailed gin.ErrorType = 402 //商户解析失败
SginVerifyFailed gin.ErrorType = 403 //请求校验失败
IPVerifyFailed gin.ErrorType = 404 //IP校验失败
QueryChannelListFailed gin.ErrorType = 9000 //查询通道失败
QueryOrderFailed gin.ErrorType = 9001 //查询订单失败
WithdrawVerifyFailed gin.ErrorType = 9002 //提现请求失败
ThirdPartyFailed gin.ErrorType = 10001 //三方支付请求失败
ChannelIsNotSupported gin.ErrorType = 10002 //请求的通道不被支持
OrderVerifyFailed gin.ErrorType = 10003 //订单验证失败
MerchantInsufficientBalance gin.ErrorType = 10003 //商户余额不足
OrderFailedApprove gin.ErrorType = 10004 //提现订单被拒
OrderCreateFailed gin.ErrorType = 10005 //创建订单失败
OrderRefuse gin.ErrorType = 10006 //拒绝支付
)
func InitialModels() {
log.Info("Register models")
t := []interface{}{
new(Admin),
}
datasource.RegisterModels(t...)
}
| PayerType = iot | conditional_block |
lwp_nav.js |
// Time-stamp: "2007-04-16 02:31:52 AKDT sburke@cpan.org"
var Contents_Order = [
'index', "Perl & LWP",
'intro', "Introduction to the 2007 online edition",
'foreword', "Foreword (by Gisle Aas)",
'ch00_01', "Preface",
'ch00_02', "Structure of This Book",
'ch00_03', "Order of Chapters",
'ch00_04', "Important Standards Documents",
'ch00_05', "Conventions Used in This Book",
'ch00_06', "Comments & Questions",
'ch00_07', "Acknowledgments",
'ch01_01', "Introduction to Web Automation",
'ch01_02', "History of LWP",
'ch01_03', "Installing LWP",
'ch01_04', "Words of Caution",
'ch01_05', "LWP in Action",
'ch02_01', "Web Basics",
'ch02_02', "An HTTP Transaction",
'ch02_03', "LWP::Simple",
'ch02_04', "Fetching Documents Without LWP::Simple",
'ch02_05', "Example: AltaVista",
'ch02_06', "HTTP POST",
'ch02_07', "Example: Babelfish",
'ch03_01', "The LWP Class Model",
'ch03_02', "Programming with LWP Classes",
'ch03_03', "Inside the do_GET and do_POST Functions",
'ch03_04', "User Agents",
'ch03_05', "HTTP::Response Objects",
'ch03_06', "LWP Classes: Behind the Scenes",
'ch04_01', "URLs",
'ch04_02', "Relative URLs",
'ch04_03', "Converting Absolute URLs to Relative",
'ch04_04', "Converting Relative URLs to Absolute",
'ch05_01', "Forms",
'ch05_02', "LWP and GET Requests",
'ch05_03', "Automating Form Analysis",
'ch05_04', "Idiosyncrasies of HTML Forms",
'ch05_05', "POST Example: License Plates",
'ch05_06', "POST Example: ABEBooks.com",
'ch05_07', "File Uploads",
'ch05_08', "Limits on Forms",
'ch06_01', "Simple HTML Processing with Regular Expressions",
'ch06_02', "Regular Expression Techniques",
'ch06_03', "Troubleshooting",
'ch06_04', "When Regular Expressions Aren't Enough",
'ch06_05', "Example: Extracting Links from a Bookmark File",
'ch06_06', "Example: Extracting Links from Arbitrary HTML",
'ch06_07', "Example: Extracting Temperatures from Weather Underground",
'ch07_01', "HTML Processing with Tokens",
'ch07_02', "Basic HTML::TokeParser Use",
'ch07_03', "Individual Tokens",
'ch07_04', "Token Sequences",
'ch07_05', "More HTML::TokeParser Methods",
'ch07_06', "Using Extracted Text",
'ch08_01', "Tokenizing Walkthrough",
'ch08_02', "Getting the Data",
'ch08_03', "Inspecting the HTML",
'ch08_04', "First Code",
'ch08_05', "Narrowing In",
'ch08_06', "Rewrite for Features",
'ch08_07', "Alternatives",
'ch09_01', "HTML Processing with Trees",
'ch09_02', "HTML::TreeBuilder",
'ch09_03', "Processing",
'ch09_04', "Example: BBC News",
'ch09_05', "Example: Fresh Air",
'ch10_01', "Modifying HTML with Trees",
'ch10_02', "Deleting Images",
'ch10_03', "Detaching and Reattaching",
'ch10_04', "Attaching in Another Tree",
'ch10_05', "Creating New Elements",
'ch11_01', "Cookies, Authentication, and Advanced Requests",
'ch11_02', "Adding Extra Request Header Lines",
'ch11_03', "Authentication",
'ch11_04', "An HTTP Authentication Example:The Unicode Mailing Archive",
'ch12_01', "Spiders",
'ch12_02', "A User Agent for Robots",
'ch12_03', "Example: A Link-Checking Spider",
'ch12_04', "Ideas for Further Expansion",
'appa_01', "LWP Modules",
'appb_01', "HTTP Status Codes",
'appb_02', "200s: Successful",
'appb_03', "300s: Redirection",
'appb_04', "400s: Client Errors",
'appb_05', "500s: Server Errors",
'appc_01', "Common MIME Types",
'appd_01', "Language Tags",
'appe_01', "Common Content Encodings",
'appf_01', "ASCII Table",
'appg_01', "User's View of Object-Oriented Modules",
'appg_02', "Modules and Their Functional Interfaces",
'appg_03', "Modules with Object-Oriented Interfaces",
'appg_04', "What Can You Do with Objects?",
'appg_05', "What's in an Object?",
'appg_06', "What Is an Object Value?",
'appg_07', "So Why Do Some Modules Use Objects?",
'appg_08', "The Gory Details",
'colophon', "Colophon",
'copyrght', "Copyright",
'i-index', "Perl & LWP: Index",
'i-idx_0', "Index: Symbols & Numbers",
'i-idx_a', "Index: A",
'i-idx_b', "Index: B",
'i-idx_c', "Index: C",
'i-idx_d', "Index: D",
'i-idx_e', "Index: E",
'i-idx_f', "Index: F",
'i-idx_g', "Index: G",
'i-idx_h', "Index: H",
'i-idx_i', "Index: I",
'i-idx_j', "Index: J",
'i-idx_k', "Index: K",
'i-idx_l', "Index: L",
'i-idx_m', "Index: M",
'i-idx_n', "Index: N",
'i-idx_o', "Index: O",
'i-idx_p', "Index: P",
'i-idx_q', "Index: Q",
'i-idx_r', "Index: R",
'i-idx_s', "Index: S",
'i-idx_t', "Index: T",
'i-idx_u', "Index: U",
'i-idx_v', "Index: V",
'i-idx_w', "Index: W",
'i-idx_x', "Index: X",
'i-idx_y', "Index: Y",
'i-idx_z', "Index: Z"
];
//======================================================================
if(window.lwp_pageid) {
init_page(window.lwp_pageid);
} else {
window.status = "LWP book nav error: Couldn't find page ID!";
}
//======================================================================
function pid2url (pid) {
var url = pid.toString().replace(/^i-/,"") + ".htm";
if( pid_is_indexy( lwp_pageid || '') ) {
if( pid_is_indexy(pid) ) {
; // we're both in ./index
} else {
url = "../" + url;
}
} else {
if( pid_is_indexy(pid) ) {
url = "index/" + url;
} else {
; // we're both not in ./index
}
}
url = url.replace( /\bindex\.htm$/, 'index.html' );
return url;
}
function pid_is_chapter (pid) {
//if( (/^i-/).test(pid) ) return false;
if( ( /_01$/ ).test(pid) ) return true;
if( ( /_/ ).test(pid) ) return false;
return true;
}
function pid_is_indexy (pid) {
return( (/^i-/).test(pid) );
}
//======================================================================
var Prev_url, Prev_title, Up_url, Up_title, Next_url, Next_title;
var Next_is_chapter;
var Am_indexy, Root;
function init_pointers (page_pid) {
var prev_id, prev_title, prev_chapter_id, prev_chapter_title;
Am_indexy = pid_is_indexy(page_pid);
Root = Am_indexy ? "../" : "./";
Up_url = Root + "index.html";
Up_title = "Table of Contents";
Next_is_chapter = false;
for(var i = 0; i < Contents_Order.length; i+= 2) {
var this_pid = Contents_Order[i ];
var this_title = Contents_Order[i+1];
var am_chapter = pid_is_chapter(this_pid);
if(this_pid == page_pid) {
//alert("I'm " + page_pid + " = @" + i.toString());
if(i == Contents_Order.length - 2) {
;// special case: last page.
} else {
Next_is_chapter = pid_is_chapter(Contents_Order[i+2]);
Next_url = pid2url( Contents_Order[i+2] );
Next_title = Contents_Order[i+3] ;
}
if(prev_id) {
Prev_url = pid2url( prev_id );
Prev_title = prev_title;
}
if(!am_chapter) {
Up_url = pid2url( prev_chapter_id );
Up_title = prev_chapter_title;
}
break;
} else {
; // anything special to do?
}
prev_id = this_pid;
prev_title = this_title;
if(am_chapter) {
prev_chapter_id = this_pid;
prev_chapter_title = this_title;
}
}
return;
}
//======================================================================
function init_page (pid) {
init_pointers(pid);
init_head();
if(pid != "index") make_top_navbar();
return;
}
function init_head () {
var head;
var u = "favicon.png";
var atts = { 'rel':'icon', 'type': "image/png"};
if( document.getElementsByTagName ) { // DOM
head = document.getElementsByTagName('head').item(0);
} else if ( document.all ) { // MSIE horror
head = document.all['head'];
u = 'favicon.ico';
atts['type'] = 'image/vnd.microsoft.icon';
} else {
return;
}
atts['href'] = Root + u;
graft(head, ['link', atts]);
/* Too flaky just now:
and if we do put this in, make a cookie persist it across pages.
graft(head, ['link', {
'rel':"alternate stylesheet", 'type':"text/css",
'title': "white-on-black",
'href': (Root + "lwp_bw.css") }
]);
*/
return;
}
function _button (parent, myclass, desc, url, key, text) {
if(url) {
var atts = { 'href': url };
if(key) {
atts.accesskey = key;
atts.title = "alt-" + key +
" or click, to go to this " + desc + " section";
} else {
atts.title =
"click to go to this " + desc + " section";
}
parent.push( ['td.' + myclass, ['a', atts, text ] ] );
} else {
parent.push( ['td.' + myclass+ '.blank'] );
}
return;
}
function make_top_navbar () {
if(Am_indexy) return;
var div = ["tr"];
if( Prev_url && Prev_url == Up_url) Prev_url = '';
_button(div, 'prevlink', 'previous', Prev_url, 'p', Prev_title );
_button(div, 'uplink', 'higher' , Up_url , 'u', Up_title );
_button(div, 'nextlink', 'next' , Next_url, 'n', Next_title );
if(div.length > 1)
graft( document.body, ['table.navbar.topnavbar', ['tbody', div]] );
return;
}
//======================================================================
function | () {
var div = ["p.morelink"];
if(Am_indexy) return;
if(Next_url) {
if( Next_is_chapter ) {
div.push( "The next chapter is: " );
div.push( ['a', {'href': Next_url}, Next_title] );
div.push( ['br'] );
div.push( "or go up to " );
div.push( ['a', {'href': "index.html"}, "the Table of Contents"] );
} else {
div.push( "Continue to section: " );
div.push( ['a', {'href': Next_url}, Next_title] );
}
} else {
graft( document.body, ['h3.the_end', "The End"] );
}
if(div.length > 1) graft( document.body, div );
return;
}
//======================================================================
function make_bottom_lastmod () {
if(!window.LastUpdate) return;
graft( document.body, ["p.morelink", "Last update: " + LastUpdate]);
return;
}
//======================================================================
function endpage () {
// do anything? Add stylesheets?
if(!window.lwp_pageid) return;
if(window.lwp_pageid == "index") {
make_bottom_lastmod();
} else {
make_bottom_navbar();
}
return true;
}
//======================================================================
// Library functions of mine ...
function complaining () {
var _ = [];
for(var i = 0; i < arguments.length; i++) { _.push(arguments[i]) }
_ = _.join("");
if(! _.length) out = "Unknown error!?!";
void alert(_);
return new Error(_,_);
}
function id (name,doc) { // find element with the given ID, else exception.
var object = id_try(name,doc);
if( ! object ) throw complaining("Failed to find element with id='"
+ name + "' in " + (doc || document).location );
return object;
}
function id_try (name,doc) {
var object = (doc || document).getElementById(name);
return object;
}
function graft (parent, t, doc) {
// graft( somenode, [ "I like ", ['em', { 'class':"stuff" },"stuff"], " oboy!"] )
//if(!doc) doc = parent.ownerDocument ? parent.ownerDocument : document;
doc = (doc || parent.ownerDocument || document);
var e;
if(t == undefined) {
if(parent == undefined) throw complaining("Can't graft an undefined value");
} else if(t.constructor == String) {
e = doc.createTextNode( t );
} else if(t.length == 0) {
e = doc.createElement( "span" );
e.setAttribute( "class", "fromEmptyLOL" );
} else {
for(var i = 0; i < t.length; i++) {
if( i == 0 && t[i].constructor == String ) {
var snared;
snared = t[i].match( /^([a-z][a-z0-9]*)\.([^\s]+)$/i );
if( snared ) {
e = doc.createElement( snared[1] );
e.setAttribute( 'class', snared[2].replace(/\./g, " "));
continue;
}
snared = t[i].match( /^([a-z][a-z0-9]*)$/i );
if( snared ) {
e = doc.createElement( snared[1] ); // but no class
continue;
}
// Otherwise:
e = doc.createElement( "span" );
e.setAttribute( "class", "namelessFromLOL" );
}
if( t[i] == undefined ) {
throw complaining("Can't graft an undefined value in a list!");
} else if( t[i].constructor == String || t[i].constructor == Array ) {
graft( e, t[i], doc );
} else if( t[i].constructor == Number ) {
graft( e, t[i].toString(), doc );
} else if( t[i].constructor == Object ) {
// turn this hash's properties:values into attributes of this element
for(var k in t[i]) e.setAttribute( k, t[i][k] );
} else {
// TODO: make it accept already-made DOM nodes? by checking "nodeType" in i?
throw complaining( "Object " + t[i] + " is inscrutable as an graft arglet." );
}
}
}
parent.appendChild( e );
return e;
}
//======================================================================
| make_bottom_navbar | identifier_name |
lwp_nav.js |
// Time-stamp: "2007-04-16 02:31:52 AKDT sburke@cpan.org"
var Contents_Order = [
'index', "Perl & LWP",
'intro', "Introduction to the 2007 online edition",
'foreword', "Foreword (by Gisle Aas)",
'ch00_01', "Preface",
'ch00_02', "Structure of This Book",
'ch00_03', "Order of Chapters",
'ch00_04', "Important Standards Documents",
'ch00_05', "Conventions Used in This Book",
'ch00_06', "Comments & Questions",
'ch00_07', "Acknowledgments",
'ch01_01', "Introduction to Web Automation",
'ch01_02', "History of LWP",
'ch01_03', "Installing LWP",
'ch01_04', "Words of Caution",
'ch01_05', "LWP in Action",
'ch02_01', "Web Basics",
'ch02_02', "An HTTP Transaction",
'ch02_03', "LWP::Simple",
'ch02_04', "Fetching Documents Without LWP::Simple",
'ch02_05', "Example: AltaVista",
'ch02_06', "HTTP POST",
'ch02_07', "Example: Babelfish",
'ch03_01', "The LWP Class Model",
'ch03_02', "Programming with LWP Classes",
'ch03_03', "Inside the do_GET and do_POST Functions",
'ch03_04', "User Agents",
'ch03_05', "HTTP::Response Objects",
'ch03_06', "LWP Classes: Behind the Scenes",
'ch04_01', "URLs",
'ch04_02', "Relative URLs",
'ch04_03', "Converting Absolute URLs to Relative",
'ch04_04', "Converting Relative URLs to Absolute",
'ch05_01', "Forms",
'ch05_02', "LWP and GET Requests",
'ch05_03', "Automating Form Analysis",
'ch05_04', "Idiosyncrasies of HTML Forms",
'ch05_05', "POST Example: License Plates",
'ch05_06', "POST Example: ABEBooks.com",
'ch05_07', "File Uploads",
'ch05_08', "Limits on Forms",
'ch06_01', "Simple HTML Processing with Regular Expressions",
'ch06_02', "Regular Expression Techniques",
'ch06_03', "Troubleshooting",
'ch06_04', "When Regular Expressions Aren't Enough",
'ch06_05', "Example: Extracting Links from a Bookmark File",
'ch06_06', "Example: Extracting Links from Arbitrary HTML",
'ch06_07', "Example: Extracting Temperatures from Weather Underground",
'ch07_01', "HTML Processing with Tokens",
'ch07_02', "Basic HTML::TokeParser Use",
'ch07_03', "Individual Tokens",
'ch07_04', "Token Sequences",
'ch07_05', "More HTML::TokeParser Methods",
'ch07_06', "Using Extracted Text",
'ch08_01', "Tokenizing Walkthrough",
'ch08_02', "Getting the Data",
'ch08_03', "Inspecting the HTML",
'ch08_04', "First Code",
'ch08_05', "Narrowing In",
'ch08_06', "Rewrite for Features",
'ch08_07', "Alternatives",
'ch09_01', "HTML Processing with Trees",
'ch09_02', "HTML::TreeBuilder",
'ch09_03', "Processing",
'ch09_04', "Example: BBC News",
'ch09_05', "Example: Fresh Air",
'ch10_01', "Modifying HTML with Trees",
'ch10_02', "Deleting Images",
'ch10_03', "Detaching and Reattaching",
'ch10_04', "Attaching in Another Tree",
'ch10_05', "Creating New Elements",
'ch11_01', "Cookies, Authentication, and Advanced Requests",
'ch11_02', "Adding Extra Request Header Lines",
'ch11_03', "Authentication",
'ch11_04', "An HTTP Authentication Example:The Unicode Mailing Archive",
'ch12_01', "Spiders",
'ch12_02', "A User Agent for Robots",
'ch12_03', "Example: A Link-Checking Spider",
'ch12_04', "Ideas for Further Expansion",
'appa_01', "LWP Modules",
'appb_01', "HTTP Status Codes",
'appb_02', "200s: Successful",
'appb_03', "300s: Redirection",
'appb_04', "400s: Client Errors",
'appb_05', "500s: Server Errors",
'appc_01', "Common MIME Types",
'appd_01', "Language Tags",
'appe_01', "Common Content Encodings",
'appf_01', "ASCII Table",
'appg_01', "User's View of Object-Oriented Modules",
'appg_02', "Modules and Their Functional Interfaces",
'appg_03', "Modules with Object-Oriented Interfaces",
'appg_04', "What Can You Do with Objects?",
'appg_05', "What's in an Object?",
'appg_06', "What Is an Object Value?",
'appg_07', "So Why Do Some Modules Use Objects?",
'appg_08', "The Gory Details",
'colophon', "Colophon",
'copyrght', "Copyright",
'i-index', "Perl & LWP: Index",
'i-idx_0', "Index: Symbols & Numbers",
'i-idx_a', "Index: A",
'i-idx_b', "Index: B",
'i-idx_c', "Index: C",
'i-idx_d', "Index: D",
'i-idx_e', "Index: E",
'i-idx_f', "Index: F",
'i-idx_g', "Index: G",
'i-idx_h', "Index: H",
'i-idx_i', "Index: I",
'i-idx_j', "Index: J",
'i-idx_k', "Index: K",
'i-idx_l', "Index: L",
'i-idx_m', "Index: M",
'i-idx_n', "Index: N",
'i-idx_o', "Index: O",
'i-idx_p', "Index: P",
'i-idx_q', "Index: Q",
'i-idx_r', "Index: R",
'i-idx_s', "Index: S",
'i-idx_t', "Index: T",
'i-idx_u', "Index: U",
'i-idx_v', "Index: V",
'i-idx_w', "Index: W",
'i-idx_x', "Index: X",
'i-idx_y', "Index: Y",
'i-idx_z', "Index: Z"
];
//======================================================================
if(window.lwp_pageid) {
init_page(window.lwp_pageid);
} else {
window.status = "LWP book nav error: Couldn't find page ID!";
}
//======================================================================
function pid2url (pid) {
var url = pid.toString().replace(/^i-/,"") + ".htm";
if( pid_is_indexy( lwp_pageid || '') ) {
if( pid_is_indexy(pid) ) {
; // we're both in ./index
} else |
} else {
if( pid_is_indexy(pid) ) {
url = "index/" + url;
} else {
; // we're both not in ./index
}
}
url = url.replace( /\bindex\.htm$/, 'index.html' );
return url;
}
function pid_is_chapter (pid) {
//if( (/^i-/).test(pid) ) return false;
if( ( /_01$/ ).test(pid) ) return true;
if( ( /_/ ).test(pid) ) return false;
return true;
}
function pid_is_indexy (pid) {
return( (/^i-/).test(pid) );
}
//======================================================================
var Prev_url, Prev_title, Up_url, Up_title, Next_url, Next_title;
var Next_is_chapter;
var Am_indexy, Root;
function init_pointers (page_pid) {
var prev_id, prev_title, prev_chapter_id, prev_chapter_title;
Am_indexy = pid_is_indexy(page_pid);
Root = Am_indexy ? "../" : "./";
Up_url = Root + "index.html";
Up_title = "Table of Contents";
Next_is_chapter = false;
for(var i = 0; i < Contents_Order.length; i+= 2) {
var this_pid = Contents_Order[i ];
var this_title = Contents_Order[i+1];
var am_chapter = pid_is_chapter(this_pid);
if(this_pid == page_pid) {
//alert("I'm " + page_pid + " = @" + i.toString());
if(i == Contents_Order.length - 2) {
;// special case: last page.
} else {
Next_is_chapter = pid_is_chapter(Contents_Order[i+2]);
Next_url = pid2url( Contents_Order[i+2] );
Next_title = Contents_Order[i+3] ;
}
if(prev_id) {
Prev_url = pid2url( prev_id );
Prev_title = prev_title;
}
if(!am_chapter) {
Up_url = pid2url( prev_chapter_id );
Up_title = prev_chapter_title;
}
break;
} else {
; // anything special to do?
}
prev_id = this_pid;
prev_title = this_title;
if(am_chapter) {
prev_chapter_id = this_pid;
prev_chapter_title = this_title;
}
}
return;
}
//======================================================================
function init_page (pid) {
init_pointers(pid);
init_head();
if(pid != "index") make_top_navbar();
return;
}
function init_head () {
var head;
var u = "favicon.png";
var atts = { 'rel':'icon', 'type': "image/png"};
if( document.getElementsByTagName ) { // DOM
head = document.getElementsByTagName('head').item(0);
} else if ( document.all ) { // MSIE horror
head = document.all['head'];
u = 'favicon.ico';
atts['type'] = 'image/vnd.microsoft.icon';
} else {
return;
}
atts['href'] = Root + u;
graft(head, ['link', atts]);
/* Too flaky just now:
and if we do put this in, make a cookie persist it across pages.
graft(head, ['link', {
'rel':"alternate stylesheet", 'type':"text/css",
'title': "white-on-black",
'href': (Root + "lwp_bw.css") }
]);
*/
return;
}
function _button (parent, myclass, desc, url, key, text) {
if(url) {
var atts = { 'href': url };
if(key) {
atts.accesskey = key;
atts.title = "alt-" + key +
" or click, to go to this " + desc + " section";
} else {
atts.title =
"click to go to this " + desc + " section";
}
parent.push( ['td.' + myclass, ['a', atts, text ] ] );
} else {
parent.push( ['td.' + myclass+ '.blank'] );
}
return;
}
function make_top_navbar () {
if(Am_indexy) return;
var div = ["tr"];
if( Prev_url && Prev_url == Up_url) Prev_url = '';
_button(div, 'prevlink', 'previous', Prev_url, 'p', Prev_title );
_button(div, 'uplink', 'higher' , Up_url , 'u', Up_title );
_button(div, 'nextlink', 'next' , Next_url, 'n', Next_title );
if(div.length > 1)
graft( document.body, ['table.navbar.topnavbar', ['tbody', div]] );
return;
}
//======================================================================
function make_bottom_navbar () {
var div = ["p.morelink"];
if(Am_indexy) return;
if(Next_url) {
if( Next_is_chapter ) {
div.push( "The next chapter is: " );
div.push( ['a', {'href': Next_url}, Next_title] );
div.push( ['br'] );
div.push( "or go up to " );
div.push( ['a', {'href': "index.html"}, "the Table of Contents"] );
} else {
div.push( "Continue to section: " );
div.push( ['a', {'href': Next_url}, Next_title] );
}
} else {
graft( document.body, ['h3.the_end', "The End"] );
}
if(div.length > 1) graft( document.body, div );
return;
}
//======================================================================
function make_bottom_lastmod () {
if(!window.LastUpdate) return;
graft( document.body, ["p.morelink", "Last update: " + LastUpdate]);
return;
}
//======================================================================
function endpage () {
// do anything? Add stylesheets?
if(!window.lwp_pageid) return;
if(window.lwp_pageid == "index") {
make_bottom_lastmod();
} else {
make_bottom_navbar();
}
return true;
}
//======================================================================
// Library functions of mine ...
function complaining () {
var _ = [];
for(var i = 0; i < arguments.length; i++) { _.push(arguments[i]) }
_ = _.join("");
if(! _.length) out = "Unknown error!?!";
void alert(_);
return new Error(_,_);
}
function id (name,doc) { // find element with the given ID, else exception.
var object = id_try(name,doc);
if( ! object ) throw complaining("Failed to find element with id='"
+ name + "' in " + (doc || document).location );
return object;
}
function id_try (name,doc) {
var object = (doc || document).getElementById(name);
return object;
}
function graft (parent, t, doc) {
// graft( somenode, [ "I like ", ['em', { 'class':"stuff" },"stuff"], " oboy!"] )
//if(!doc) doc = parent.ownerDocument ? parent.ownerDocument : document;
doc = (doc || parent.ownerDocument || document);
var e;
if(t == undefined) {
if(parent == undefined) throw complaining("Can't graft an undefined value");
} else if(t.constructor == String) {
e = doc.createTextNode( t );
} else if(t.length == 0) {
e = doc.createElement( "span" );
e.setAttribute( "class", "fromEmptyLOL" );
} else {
for(var i = 0; i < t.length; i++) {
if( i == 0 && t[i].constructor == String ) {
var snared;
snared = t[i].match( /^([a-z][a-z0-9]*)\.([^\s]+)$/i );
if( snared ) {
e = doc.createElement( snared[1] );
e.setAttribute( 'class', snared[2].replace(/\./g, " "));
continue;
}
snared = t[i].match( /^([a-z][a-z0-9]*)$/i );
if( snared ) {
e = doc.createElement( snared[1] ); // but no class
continue;
}
// Otherwise:
e = doc.createElement( "span" );
e.setAttribute( "class", "namelessFromLOL" );
}
if( t[i] == undefined ) {
throw complaining("Can't graft an undefined value in a list!");
} else if( t[i].constructor == String || t[i].constructor == Array ) {
graft( e, t[i], doc );
} else if( t[i].constructor == Number ) {
graft( e, t[i].toString(), doc );
} else if( t[i].constructor == Object ) {
// turn this hash's properties:values into attributes of this element
for(var k in t[i]) e.setAttribute( k, t[i][k] );
} else {
// TODO: make it accept already-made DOM nodes? by checking "nodeType" in i?
throw complaining( "Object " + t[i] + " is inscrutable as an graft arglet." );
}
}
}
parent.appendChild( e );
return e;
}
//======================================================================
| {
url = "../" + url;
} | conditional_block |
lwp_nav.js | // Time-stamp: "2007-04-16 02:31:52 AKDT sburke@cpan.org"
var Contents_Order = [
'index', "Perl & LWP",
'intro', "Introduction to the 2007 online edition",
'foreword', "Foreword (by Gisle Aas)",
'ch00_01', "Preface",
'ch00_02', "Structure of This Book",
'ch00_03', "Order of Chapters",
'ch00_04', "Important Standards Documents",
'ch00_05', "Conventions Used in This Book",
'ch00_06', "Comments & Questions",
'ch00_07', "Acknowledgments",
'ch01_01', "Introduction to Web Automation",
'ch01_02', "History of LWP",
'ch01_03', "Installing LWP",
'ch01_04', "Words of Caution",
'ch01_05', "LWP in Action",
'ch02_01', "Web Basics",
'ch02_02', "An HTTP Transaction",
'ch02_03', "LWP::Simple",
'ch02_04', "Fetching Documents Without LWP::Simple",
'ch02_05', "Example: AltaVista",
'ch02_06', "HTTP POST",
'ch02_07', "Example: Babelfish",
'ch03_01', "The LWP Class Model",
'ch03_02', "Programming with LWP Classes",
'ch03_03', "Inside the do_GET and do_POST Functions",
'ch03_04', "User Agents",
'ch03_05', "HTTP::Response Objects",
'ch03_06', "LWP Classes: Behind the Scenes",
'ch04_01', "URLs",
'ch04_02', "Relative URLs",
'ch04_03', "Converting Absolute URLs to Relative",
'ch04_04', "Converting Relative URLs to Absolute",
'ch05_01', "Forms",
'ch05_02', "LWP and GET Requests",
'ch05_03', "Automating Form Analysis",
'ch05_04', "Idiosyncrasies of HTML Forms",
'ch05_05', "POST Example: License Plates",
'ch05_06', "POST Example: ABEBooks.com",
'ch05_07', "File Uploads",
'ch05_08', "Limits on Forms",
'ch06_01', "Simple HTML Processing with Regular Expressions",
'ch06_02', "Regular Expression Techniques",
'ch06_03', "Troubleshooting",
'ch06_04', "When Regular Expressions Aren't Enough",
'ch06_05', "Example: Extracting Links from a Bookmark File",
'ch06_06', "Example: Extracting Links from Arbitrary HTML",
'ch06_07', "Example: Extracting Temperatures from Weather Underground",
'ch07_01', "HTML Processing with Tokens",
'ch07_02', "Basic HTML::TokeParser Use",
'ch07_03', "Individual Tokens",
'ch07_04', "Token Sequences",
'ch07_05', "More HTML::TokeParser Methods",
'ch07_06', "Using Extracted Text",
'ch08_01', "Tokenizing Walkthrough",
'ch08_02', "Getting the Data",
'ch08_03', "Inspecting the HTML",
'ch08_04', "First Code",
'ch08_05', "Narrowing In",
'ch08_06', "Rewrite for Features",
'ch08_07', "Alternatives",
'ch09_01', "HTML Processing with Trees",
'ch09_02', "HTML::TreeBuilder",
'ch09_03', "Processing",
'ch09_04', "Example: BBC News",
'ch09_05', "Example: Fresh Air",
'ch10_01', "Modifying HTML with Trees",
'ch10_02', "Deleting Images",
'ch10_03', "Detaching and Reattaching",
'ch10_04', "Attaching in Another Tree",
'ch10_05', "Creating New Elements",
'ch11_01', "Cookies, Authentication, and Advanced Requests",
'ch11_02', "Adding Extra Request Header Lines",
'ch11_03', "Authentication",
'ch11_04', "An HTTP Authentication Example:The Unicode Mailing Archive",
'ch12_01', "Spiders",
'ch12_02', "A User Agent for Robots",
'ch12_03', "Example: A Link-Checking Spider",
'ch12_04', "Ideas for Further Expansion",
'appa_01', "LWP Modules",
'appb_01', "HTTP Status Codes",
'appb_02', "200s: Successful",
'appb_03', "300s: Redirection",
'appb_04', "400s: Client Errors",
'appb_05', "500s: Server Errors",
'appc_01', "Common MIME Types",
'appd_01', "Language Tags",
'appe_01', "Common Content Encodings",
'appf_01', "ASCII Table",
'appg_01', "User's View of Object-Oriented Modules",
'appg_02', "Modules and Their Functional Interfaces",
'appg_03', "Modules with Object-Oriented Interfaces",
'appg_04', "What Can You Do with Objects?",
'appg_05', "What's in an Object?",
'appg_06', "What Is an Object Value?",
'appg_07', "So Why Do Some Modules Use Objects?",
'appg_08', "The Gory Details",
'colophon', "Colophon",
'copyrght', "Copyright",
'i-index', "Perl & LWP: Index",
'i-idx_0', "Index: Symbols & Numbers",
'i-idx_a', "Index: A",
'i-idx_b', "Index: B",
'i-idx_c', "Index: C",
'i-idx_d', "Index: D",
'i-idx_e', "Index: E",
'i-idx_f', "Index: F",
'i-idx_g', "Index: G",
'i-idx_h', "Index: H",
'i-idx_i', "Index: I",
'i-idx_j', "Index: J",
'i-idx_k', "Index: K",
'i-idx_l', "Index: L",
'i-idx_m', "Index: M",
'i-idx_n', "Index: N",
'i-idx_o', "Index: O",
'i-idx_p', "Index: P",
'i-idx_q', "Index: Q",
'i-idx_r', "Index: R",
'i-idx_s', "Index: S",
'i-idx_t', "Index: T",
'i-idx_u', "Index: U",
'i-idx_v', "Index: V",
'i-idx_w', "Index: W",
'i-idx_x', "Index: X",
'i-idx_y', "Index: Y",
'i-idx_z', "Index: Z"
];
//======================================================================
if(window.lwp_pageid) {
init_page(window.lwp_pageid);
} else {
window.status = "LWP book nav error: Couldn't find page ID!";
}
//======================================================================
function pid2url (pid) {
var url = pid.toString().replace(/^i-/,"") + ".htm";
if( pid_is_indexy( lwp_pageid || '') ) {
if( pid_is_indexy(pid) ) {
; // we're both in ./index
} else {
url = "../" + url;
}
} else {
if( pid_is_indexy(pid) ) {
url = "index/" + url;
} else {
; // we're both not in ./index
}
}
url = url.replace( /\bindex\.htm$/, 'index.html' );
return url;
}
function pid_is_chapter (pid) {
//if( (/^i-/).test(pid) ) return false;
if( ( /_01$/ ).test(pid) ) return true;
if( ( /_/ ).test(pid) ) return false;
return true;
}
function pid_is_indexy (pid) {
return( (/^i-/).test(pid) );
}
//======================================================================
var Prev_url, Prev_title, Up_url, Up_title, Next_url, Next_title;
var Next_is_chapter;
var Am_indexy, Root;
function init_pointers (page_pid) {
var prev_id, prev_title, prev_chapter_id, prev_chapter_title;
Am_indexy = pid_is_indexy(page_pid);
Root = Am_indexy ? "../" : "./";
Up_url = Root + "index.html";
Up_title = "Table of Contents";
Next_is_chapter = false;
for(var i = 0; i < Contents_Order.length; i+= 2) {
var this_pid = Contents_Order[i ];
var this_title = Contents_Order[i+1];
var am_chapter = pid_is_chapter(this_pid);
if(this_pid == page_pid) {
//alert("I'm " + page_pid + " = @" + i.toString());
if(i == Contents_Order.length - 2) {
;// special case: last page.
} else {
Next_is_chapter = pid_is_chapter(Contents_Order[i+2]);
Next_url = pid2url( Contents_Order[i+2] );
Next_title = Contents_Order[i+3] ;
}
if(prev_id) {
Prev_url = pid2url( prev_id );
Prev_title = prev_title;
}
if(!am_chapter) {
Up_url = pid2url( prev_chapter_id );
Up_title = prev_chapter_title;
}
break;
} else {
; // anything special to do?
}
prev_id = this_pid;
prev_title = this_title;
if(am_chapter) {
prev_chapter_id = this_pid;
prev_chapter_title = this_title;
}
}
return;
}
//======================================================================
function init_page (pid) {
init_pointers(pid);
init_head();
if(pid != "index") make_top_navbar();
return;
}
function init_head () {
var head;
var u = "favicon.png";
var atts = { 'rel':'icon', 'type': "image/png"};
if( document.getElementsByTagName ) { // DOM
head = document.getElementsByTagName('head').item(0);
} else if ( document.all ) { // MSIE horror
head = document.all['head'];
u = 'favicon.ico';
atts['type'] = 'image/vnd.microsoft.icon';
} else {
return;
}
atts['href'] = Root + u;
graft(head, ['link', atts]);
/* Too flaky just now:
and if we do put this in, make a cookie persist it across pages.
graft(head, ['link', {
'rel':"alternate stylesheet", 'type':"text/css",
'title': "white-on-black",
'href': (Root + "lwp_bw.css") }
]);
*/
return;
}
function _button (parent, myclass, desc, url, key, text) {
if(url) {
var atts = { 'href': url };
if(key) {
atts.accesskey = key;
atts.title = "alt-" + key +
" or click, to go to this " + desc + " section";
} else {
atts.title =
"click to go to this " + desc + " section";
}
parent.push( ['td.' + myclass, ['a', atts, text ] ] );
} else {
parent.push( ['td.' + myclass+ '.blank'] );
}
return;
}
function make_top_navbar () {
if(Am_indexy) return;
var div = ["tr"];
if( Prev_url && Prev_url == Up_url) Prev_url = '';
_button(div, 'prevlink', 'previous', Prev_url, 'p', Prev_title );
_button(div, 'uplink', 'higher' , Up_url , 'u', Up_title );
_button(div, 'nextlink', 'next' , Next_url, 'n', Next_title );
if(div.length > 1)
graft( document.body, ['table.navbar.topnavbar', ['tbody', div]] );
return;
}
//======================================================================
function make_bottom_navbar () {
var div = ["p.morelink"];
if(Am_indexy) return;
if(Next_url) {
if( Next_is_chapter ) {
div.push( "The next chapter is: " );
div.push( ['a', {'href': Next_url}, Next_title] );
div.push( ['br'] );
div.push( "or go up to " );
div.push( ['a', {'href': "index.html"}, "the Table of Contents"] );
} else {
div.push( "Continue to section: " );
div.push( ['a', {'href': Next_url}, Next_title] );
}
} else {
graft( document.body, ['h3.the_end', "The End"] );
}
if(div.length > 1) graft( document.body, div );
return;
}
//======================================================================
function make_bottom_lastmod () {
if(!window.LastUpdate) return;
graft( document.body, ["p.morelink", "Last update: " + LastUpdate]);
return;
}
//======================================================================
function endpage () {
// do anything? Add stylesheets?
if(!window.lwp_pageid) return;
if(window.lwp_pageid == "index") {
make_bottom_lastmod();
} else {
make_bottom_navbar();
}
return true;
}
//======================================================================
// Library functions of mine ...
function complaining () {
var _ = [];
for(var i = 0; i < arguments.length; i++) { _.push(arguments[i]) }
_ = _.join("");
if(! _.length) out = "Unknown error!?!";
void alert(_);
return new Error(_,_);
}
function id (name,doc) { // find element with the given ID, else exception.
var object = id_try(name,doc);
if( ! object ) throw complaining("Failed to find element with id='"
+ name + "' in " + (doc || document).location );
return object;
}
function id_try (name,doc) {
var object = (doc || document).getElementById(name);
return object;
}
| doc = (doc || parent.ownerDocument || document);
var e;
if(t == undefined) {
if(parent == undefined) throw complaining("Can't graft an undefined value");
} else if(t.constructor == String) {
e = doc.createTextNode( t );
} else if(t.length == 0) {
e = doc.createElement( "span" );
e.setAttribute( "class", "fromEmptyLOL" );
} else {
for(var i = 0; i < t.length; i++) {
if( i == 0 && t[i].constructor == String ) {
var snared;
snared = t[i].match( /^([a-z][a-z0-9]*)\.([^\s]+)$/i );
if( snared ) {
e = doc.createElement( snared[1] );
e.setAttribute( 'class', snared[2].replace(/\./g, " "));
continue;
}
snared = t[i].match( /^([a-z][a-z0-9]*)$/i );
if( snared ) {
e = doc.createElement( snared[1] ); // but no class
continue;
}
// Otherwise:
e = doc.createElement( "span" );
e.setAttribute( "class", "namelessFromLOL" );
}
if( t[i] == undefined ) {
throw complaining("Can't graft an undefined value in a list!");
} else if( t[i].constructor == String || t[i].constructor == Array ) {
graft( e, t[i], doc );
} else if( t[i].constructor == Number ) {
graft( e, t[i].toString(), doc );
} else if( t[i].constructor == Object ) {
// turn this hash's properties:values into attributes of this element
for(var k in t[i]) e.setAttribute( k, t[i][k] );
} else {
// TODO: make it accept already-made DOM nodes? by checking "nodeType" in i?
throw complaining( "Object " + t[i] + " is inscrutable as an graft arglet." );
}
}
}
parent.appendChild( e );
return e;
}
//====================================================================== | function graft (parent, t, doc) {
// graft( somenode, [ "I like ", ['em', { 'class':"stuff" },"stuff"], " oboy!"] )
//if(!doc) doc = parent.ownerDocument ? parent.ownerDocument : document; | random_line_split |
lwp_nav.js |
// Time-stamp: "2007-04-16 02:31:52 AKDT sburke@cpan.org"
var Contents_Order = [
'index', "Perl & LWP",
'intro', "Introduction to the 2007 online edition",
'foreword', "Foreword (by Gisle Aas)",
'ch00_01', "Preface",
'ch00_02', "Structure of This Book",
'ch00_03', "Order of Chapters",
'ch00_04', "Important Standards Documents",
'ch00_05', "Conventions Used in This Book",
'ch00_06', "Comments & Questions",
'ch00_07', "Acknowledgments",
'ch01_01', "Introduction to Web Automation",
'ch01_02', "History of LWP",
'ch01_03', "Installing LWP",
'ch01_04', "Words of Caution",
'ch01_05', "LWP in Action",
'ch02_01', "Web Basics",
'ch02_02', "An HTTP Transaction",
'ch02_03', "LWP::Simple",
'ch02_04', "Fetching Documents Without LWP::Simple",
'ch02_05', "Example: AltaVista",
'ch02_06', "HTTP POST",
'ch02_07', "Example: Babelfish",
'ch03_01', "The LWP Class Model",
'ch03_02', "Programming with LWP Classes",
'ch03_03', "Inside the do_GET and do_POST Functions",
'ch03_04', "User Agents",
'ch03_05', "HTTP::Response Objects",
'ch03_06', "LWP Classes: Behind the Scenes",
'ch04_01', "URLs",
'ch04_02', "Relative URLs",
'ch04_03', "Converting Absolute URLs to Relative",
'ch04_04', "Converting Relative URLs to Absolute",
'ch05_01', "Forms",
'ch05_02', "LWP and GET Requests",
'ch05_03', "Automating Form Analysis",
'ch05_04', "Idiosyncrasies of HTML Forms",
'ch05_05', "POST Example: License Plates",
'ch05_06', "POST Example: ABEBooks.com",
'ch05_07', "File Uploads",
'ch05_08', "Limits on Forms",
'ch06_01', "Simple HTML Processing with Regular Expressions",
'ch06_02', "Regular Expression Techniques",
'ch06_03', "Troubleshooting",
'ch06_04', "When Regular Expressions Aren't Enough",
'ch06_05', "Example: Extracting Links from a Bookmark File",
'ch06_06', "Example: Extracting Links from Arbitrary HTML",
'ch06_07', "Example: Extracting Temperatures from Weather Underground",
'ch07_01', "HTML Processing with Tokens",
'ch07_02', "Basic HTML::TokeParser Use",
'ch07_03', "Individual Tokens",
'ch07_04', "Token Sequences",
'ch07_05', "More HTML::TokeParser Methods",
'ch07_06', "Using Extracted Text",
'ch08_01', "Tokenizing Walkthrough",
'ch08_02', "Getting the Data",
'ch08_03', "Inspecting the HTML",
'ch08_04', "First Code",
'ch08_05', "Narrowing In",
'ch08_06', "Rewrite for Features",
'ch08_07', "Alternatives",
'ch09_01', "HTML Processing with Trees",
'ch09_02', "HTML::TreeBuilder",
'ch09_03', "Processing",
'ch09_04', "Example: BBC News",
'ch09_05', "Example: Fresh Air",
'ch10_01', "Modifying HTML with Trees",
'ch10_02', "Deleting Images",
'ch10_03', "Detaching and Reattaching",
'ch10_04', "Attaching in Another Tree",
'ch10_05', "Creating New Elements",
'ch11_01', "Cookies, Authentication, and Advanced Requests",
'ch11_02', "Adding Extra Request Header Lines",
'ch11_03', "Authentication",
'ch11_04', "An HTTP Authentication Example:The Unicode Mailing Archive",
'ch12_01', "Spiders",
'ch12_02', "A User Agent for Robots",
'ch12_03', "Example: A Link-Checking Spider",
'ch12_04', "Ideas for Further Expansion",
'appa_01', "LWP Modules",
'appb_01', "HTTP Status Codes",
'appb_02', "200s: Successful",
'appb_03', "300s: Redirection",
'appb_04', "400s: Client Errors",
'appb_05', "500s: Server Errors",
'appc_01', "Common MIME Types",
'appd_01', "Language Tags",
'appe_01', "Common Content Encodings",
'appf_01', "ASCII Table",
'appg_01', "User's View of Object-Oriented Modules",
'appg_02', "Modules and Their Functional Interfaces",
'appg_03', "Modules with Object-Oriented Interfaces",
'appg_04', "What Can You Do with Objects?",
'appg_05', "What's in an Object?",
'appg_06', "What Is an Object Value?",
'appg_07', "So Why Do Some Modules Use Objects?",
'appg_08', "The Gory Details",
'colophon', "Colophon",
'copyrght', "Copyright",
'i-index', "Perl & LWP: Index",
'i-idx_0', "Index: Symbols & Numbers",
'i-idx_a', "Index: A",
'i-idx_b', "Index: B",
'i-idx_c', "Index: C",
'i-idx_d', "Index: D",
'i-idx_e', "Index: E",
'i-idx_f', "Index: F",
'i-idx_g', "Index: G",
'i-idx_h', "Index: H",
'i-idx_i', "Index: I",
'i-idx_j', "Index: J",
'i-idx_k', "Index: K",
'i-idx_l', "Index: L",
'i-idx_m', "Index: M",
'i-idx_n', "Index: N",
'i-idx_o', "Index: O",
'i-idx_p', "Index: P",
'i-idx_q', "Index: Q",
'i-idx_r', "Index: R",
'i-idx_s', "Index: S",
'i-idx_t', "Index: T",
'i-idx_u', "Index: U",
'i-idx_v', "Index: V",
'i-idx_w', "Index: W",
'i-idx_x', "Index: X",
'i-idx_y', "Index: Y",
'i-idx_z', "Index: Z"
];
//======================================================================
if(window.lwp_pageid) {
init_page(window.lwp_pageid);
} else {
window.status = "LWP book nav error: Couldn't find page ID!";
}
//======================================================================
function pid2url (pid) |
function pid_is_chapter (pid) {
//if( (/^i-/).test(pid) ) return false;
if( ( /_01$/ ).test(pid) ) return true;
if( ( /_/ ).test(pid) ) return false;
return true;
}
function pid_is_indexy (pid) {
return( (/^i-/).test(pid) );
}
//======================================================================
var Prev_url, Prev_title, Up_url, Up_title, Next_url, Next_title;
var Next_is_chapter;
var Am_indexy, Root;
function init_pointers (page_pid) {
var prev_id, prev_title, prev_chapter_id, prev_chapter_title;
Am_indexy = pid_is_indexy(page_pid);
Root = Am_indexy ? "../" : "./";
Up_url = Root + "index.html";
Up_title = "Table of Contents";
Next_is_chapter = false;
for(var i = 0; i < Contents_Order.length; i+= 2) {
var this_pid = Contents_Order[i ];
var this_title = Contents_Order[i+1];
var am_chapter = pid_is_chapter(this_pid);
if(this_pid == page_pid) {
//alert("I'm " + page_pid + " = @" + i.toString());
if(i == Contents_Order.length - 2) {
;// special case: last page.
} else {
Next_is_chapter = pid_is_chapter(Contents_Order[i+2]);
Next_url = pid2url( Contents_Order[i+2] );
Next_title = Contents_Order[i+3] ;
}
if(prev_id) {
Prev_url = pid2url( prev_id );
Prev_title = prev_title;
}
if(!am_chapter) {
Up_url = pid2url( prev_chapter_id );
Up_title = prev_chapter_title;
}
break;
} else {
; // anything special to do?
}
prev_id = this_pid;
prev_title = this_title;
if(am_chapter) {
prev_chapter_id = this_pid;
prev_chapter_title = this_title;
}
}
return;
}
//======================================================================
function init_page (pid) {
init_pointers(pid);
init_head();
if(pid != "index") make_top_navbar();
return;
}
function init_head () {
var head;
var u = "favicon.png";
var atts = { 'rel':'icon', 'type': "image/png"};
if( document.getElementsByTagName ) { // DOM
head = document.getElementsByTagName('head').item(0);
} else if ( document.all ) { // MSIE horror
head = document.all['head'];
u = 'favicon.ico';
atts['type'] = 'image/vnd.microsoft.icon';
} else {
return;
}
atts['href'] = Root + u;
graft(head, ['link', atts]);
/* Too flaky just now:
and if we do put this in, make a cookie persist it across pages.
graft(head, ['link', {
'rel':"alternate stylesheet", 'type':"text/css",
'title': "white-on-black",
'href': (Root + "lwp_bw.css") }
]);
*/
return;
}
function _button (parent, myclass, desc, url, key, text) {
if(url) {
var atts = { 'href': url };
if(key) {
atts.accesskey = key;
atts.title = "alt-" + key +
" or click, to go to this " + desc + " section";
} else {
atts.title =
"click to go to this " + desc + " section";
}
parent.push( ['td.' + myclass, ['a', atts, text ] ] );
} else {
parent.push( ['td.' + myclass+ '.blank'] );
}
return;
}
function make_top_navbar () {
if(Am_indexy) return;
var div = ["tr"];
if( Prev_url && Prev_url == Up_url) Prev_url = '';
_button(div, 'prevlink', 'previous', Prev_url, 'p', Prev_title );
_button(div, 'uplink', 'higher' , Up_url , 'u', Up_title );
_button(div, 'nextlink', 'next' , Next_url, 'n', Next_title );
if(div.length > 1)
graft( document.body, ['table.navbar.topnavbar', ['tbody', div]] );
return;
}
//======================================================================
function make_bottom_navbar () {
var div = ["p.morelink"];
if(Am_indexy) return;
if(Next_url) {
if( Next_is_chapter ) {
div.push( "The next chapter is: " );
div.push( ['a', {'href': Next_url}, Next_title] );
div.push( ['br'] );
div.push( "or go up to " );
div.push( ['a', {'href': "index.html"}, "the Table of Contents"] );
} else {
div.push( "Continue to section: " );
div.push( ['a', {'href': Next_url}, Next_title] );
}
} else {
graft( document.body, ['h3.the_end', "The End"] );
}
if(div.length > 1) graft( document.body, div );
return;
}
//======================================================================
function make_bottom_lastmod () {
if(!window.LastUpdate) return;
graft( document.body, ["p.morelink", "Last update: " + LastUpdate]);
return;
}
//======================================================================
function endpage () {
// do anything? Add stylesheets?
if(!window.lwp_pageid) return;
if(window.lwp_pageid == "index") {
make_bottom_lastmod();
} else {
make_bottom_navbar();
}
return true;
}
//======================================================================
// Library functions of mine ...
function complaining () {
var _ = [];
for(var i = 0; i < arguments.length; i++) { _.push(arguments[i]) }
_ = _.join("");
if(! _.length) out = "Unknown error!?!";
void alert(_);
return new Error(_,_);
}
function id (name,doc) { // find element with the given ID, else exception.
var object = id_try(name,doc);
if( ! object ) throw complaining("Failed to find element with id='"
+ name + "' in " + (doc || document).location );
return object;
}
function id_try (name,doc) {
var object = (doc || document).getElementById(name);
return object;
}
function graft (parent, t, doc) {
// graft( somenode, [ "I like ", ['em', { 'class':"stuff" },"stuff"], " oboy!"] )
//if(!doc) doc = parent.ownerDocument ? parent.ownerDocument : document;
doc = (doc || parent.ownerDocument || document);
var e;
if(t == undefined) {
if(parent == undefined) throw complaining("Can't graft an undefined value");
} else if(t.constructor == String) {
e = doc.createTextNode( t );
} else if(t.length == 0) {
e = doc.createElement( "span" );
e.setAttribute( "class", "fromEmptyLOL" );
} else {
for(var i = 0; i < t.length; i++) {
if( i == 0 && t[i].constructor == String ) {
var snared;
snared = t[i].match( /^([a-z][a-z0-9]*)\.([^\s]+)$/i );
if( snared ) {
e = doc.createElement( snared[1] );
e.setAttribute( 'class', snared[2].replace(/\./g, " "));
continue;
}
snared = t[i].match( /^([a-z][a-z0-9]*)$/i );
if( snared ) {
e = doc.createElement( snared[1] ); // but no class
continue;
}
// Otherwise:
e = doc.createElement( "span" );
e.setAttribute( "class", "namelessFromLOL" );
}
if( t[i] == undefined ) {
throw complaining("Can't graft an undefined value in a list!");
} else if( t[i].constructor == String || t[i].constructor == Array ) {
graft( e, t[i], doc );
} else if( t[i].constructor == Number ) {
graft( e, t[i].toString(), doc );
} else if( t[i].constructor == Object ) {
// turn this hash's properties:values into attributes of this element
for(var k in t[i]) e.setAttribute( k, t[i][k] );
} else {
// TODO: make it accept already-made DOM nodes? by checking "nodeType" in i?
throw complaining( "Object " + t[i] + " is inscrutable as an graft arglet." );
}
}
}
parent.appendChild( e );
return e;
}
//======================================================================
| {
var url = pid.toString().replace(/^i-/,"") + ".htm";
if( pid_is_indexy( lwp_pageid || '') ) {
if( pid_is_indexy(pid) ) {
; // we're both in ./index
} else {
url = "../" + url;
}
} else {
if( pid_is_indexy(pid) ) {
url = "index/" + url;
} else {
; // we're both not in ./index
}
}
url = url.replace( /\bindex\.htm$/, 'index.html' );
return url;
} | identifier_body |
lib.rs | //! This crate provides the [`quote!`] macro for turning Rust syntax tree data
//! structures into tokens of source code.
//!
//! [`quote!`]: macro.quote.html
//!
//! Procedural macros in Rust receive a stream of tokens as input, execute
//! arbitrary Rust code to determine how to manipulate those tokens, and produce
//! a stream of tokens to hand back to the compiler to compile into the caller's
//! crate. Quasi-quoting is a solution to one piece of that -- producing tokens
//! to return to the compiler.
//!
//! The idea of quasi-quoting is that we write *code* that we treat as *data*.
//! Within the `quote!` macro, we can write what looks like code to our text
//! editor or IDE. We get all the benefits of the editor's brace matching,
//! syntax highlighting, indentation, and maybe autocompletion. But rather than
//! compiling that as code into the current crate, we can treat it as data, pass
//! it around, mutate it, and eventually hand it back to the compiler as tokens
//! to compile into the macro caller's crate.
//!
//! This crate is motivated by the procedural macro use case, but it is a
//! general-purpose Rust quasi-quoting library and is not specific to procedural
//! macros.
//!
//! # Example
//!
//! The following quasi-quoted block of code is something you might find in [a]
//! procedural macro having to do with data structure serialization. The `#var`
//! syntax performs interpolation of runtime variables into the quoted tokens.
//! Check out the documentation of the [`quote!`] macro for more detail about
//! the syntax. See also the [`quote_spanned!`] macro which is important for
//! implementing hygienic procedural macros.
//!
//! [a]: https://serde.rs/
//! [`quote_spanned!`]: macro.quote_spanned.html
//!
//! ```edition2018
//! # use quote::quote;
//! #
//! # let generics = "";
//! # let where_clause = "";
//! # let field_ty = "";
//! # let item_ty = "";
//! # let path = "";
//! # let value = "";
//! #
//! let tokens = quote! {
//! struct SerializeWith #generics #where_clause {
//! value: &'a #field_ty,
//! phantom: core::marker::PhantomData<#item_ty>,
//! }
//!
//! impl #generics serde::Serialize for SerializeWith #generics #where_clause {
//! fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
//! where
//! S: serde::Serializer,
//! {
//! #path(self.value, serializer)
//! }
//! }
//!
//! SerializeWith {
//! value: #value,
//! phantom: core::marker::PhantomData::<#item_ty>,
//! }
//! };
//! ```
use proc_macro_hack::proc_macro_hack;
mod repeat;
pub use self::repeat::*;
pub use quote::ToTokens;
pub use quote::TokenStreamExt;
/// The whole point.
///
/// Performs variable interpolation against the input and produces it as
/// [`TokenStream`]. For returning tokens to the compiler in a procedural macro, use
/// `into()` to build a `TokenStream`.
///
/// [`TokenStream`]: https://docs.rs/proc-macro2/0/proc_macro2/struct.TokenStream.html
///
/// # Interpolation
///
/// Variable interpolation is done with `#var` (similar to `$var` in
/// `macro_rules!` macros). This grabs the `var` variable that is currently in
/// scope and inserts it in that location in the output tokens. Any type
/// implementing the [`ToTokens`] trait can be interpolated. This includes most
/// Rust primitive types as well as most of the syntax tree types from the [Syn]
/// crate.
///
/// [`ToTokens`]: trait.ToTokens.html
/// [Syn]: https://github.com/dtolnay/syn
///
/// Repetition is done using `#(...)*` or `#(...),*` again similar to
/// `macro_rules!`. This iterates through the elements of any variable
/// interpolated within the repetition and inserts a copy of the repetition
/// body for each one.
///
/// - `#(#var)*` — no separators
/// - `#(#var),*` — the character before the asterisk is used as a separator
/// - `#( struct #var; )*` — the repetition can contain other tokens
/// - `#( #k => println!("{}", #v), )*` — even multiple interpolations
/// - `#(let #var = self.#var;)*` - the same variable can be used more than once
///
/// The [`proc_quote::Repeat`](https://docs.rs/proc-quote/0/proc_quote/trait.Repeat.html)
/// trait defines which types are allowed to be interpolated inside a repition pattern.
///
/// Which types that implement the following traits *do* `Repeat`:
/// - [`Iterator<T>`] consumes the iterator, iterating through every element.
/// - <a href="https://doc.rust-lang.org/std/borrow/trait.Borrow.html">`Borrow<[T]>`</a>
/// (includes [`Vec`], [`array`], and [`slice`]) iterates with the [`slice::iter`] method,
/// thus not consuming the original data.
/// - [`ToTokens`], interpolates the variable in every iteration.
///
/// Which types *do NOT* `Repeat`:
/// - [`IntoIterator`], to avoid ambiguity (Ex. "Which behavior would have been used for [`Vec`],
/// which implements both [`IntoIterator`] and <a href="https://doc.rust-lang.org/std/borrow/trait.Borrow.html">
/// `Borrow<[T]>`</a>?"; "Which behavior would have been used for [`TokenStream`], which implements both
/// [`IntoIterator`] and [`ToTokens`]?"). To use the iterator, you may call [`IntoIterator::into_iter`]
/// explicitly.
/// - Ambiguous types that implement at least two of the `Repeat` traits. In the very unlikely case
/// this happens, disambiguate the type by wrapping it under some structure that only implements the
/// trait you desire to use.
///
/// [`Iterator<T>`]: https://doc.rust-lang.org/std/iter/trait.Iterator.html
/// [`Vec`]: https://doc.rust-lang.org/std/vec/struct.Vec.html
/// [`array`]: https://doc.rust-lang.org/std/primitive.array.html
/// [`slice`]: https://doc.rust-lang.org/std/slice/index.html
/// [`slice::iter`]: https://doc.rust-lang.org/std/primitive.slice.html#method.iter
/// [`ToTokens`]: https://docs.rs/proc-quote/0/proc_quote/trait.ToTokens.html
/// [`IntoIterator`]: https://doc.rust-lang.org/std/iter/trait.IntoIterator.html
/// [`IntoIterator::into_iter`]: https://doc.rust-lang.org/std/iter/trait.IntoIterator.html#tymethod.into_iter
///
/// # Hygiene
///
/// Any interpolated tokens preserve the `Span` information provided by their
/// `ToTokens` implementation. Tokens that originate within the `quote!`
/// invocation are spanned with [`Span::call_site()`].
///
/// [`Span::call_site()`]: https://docs.rs/proc-macro2/0/proc_macro2/struct.Span.html#method.call_site
///
/// A different span can be provided through the [`quote_spanned!`] macro.
///
/// [`quote_spanned!`]: macro.quote_spanned.html
///
/// # Return type
///
/// The macro evaluates to an expression of type `proc_macro2::TokenStream`.
/// Meanwhile Rust procedural macros are expected to return the type
/// `proc_macro::TokenStream`.
///
/// The difference between the two types is that `proc_macro` types are entirely
/// specific to procedural macros and cannot ever exist in code outside of a
/// procedural macro, while `proc_macro2` types may exist anywhere including
/// tests and non-macro code like main.rs and build.rs. This is why even the
/// procedural macro ecosystem is largely built around `proc_macro2`, because
/// that ensures the libraries are unit testable and accessible in non-macro
/// contexts.
///
/// There is a [`From`]-conversion in both directions so returning the output of
/// `quote!` from a procedural macro usually looks like `tokens.into()` or
/// `proc_macro::TokenStream::from(tokens)`.
///
/// [`From`]: https://doc.rust-lang.org/std/convert/trait.From.html
///
/// # Examples
///
/// ## Procedural macro
///
/// The structure of a basic procedural macro is as follows. Refer to the [Syn]
/// crate for further useful guidance on using `quote!` as part of a procedural
/// macro.
///
/// [Syn]: https://github.com/dtolnay/syn
///
/// ```edition2018
/// # #[cfg(any())]
/// extern crate proc_macro;
/// # use proc_macro2 as proc_macro;
///
/// use proc_macro::TokenStream;
/// use quote::quote;
///
/// # const IGNORE_TOKENS: &'static str = stringify! {
/// #[proc_macro_derive(HeapSize)]
/// # };
/// pub fn derive_heap_size(input: TokenStream) -> TokenStream {
/// // Parse the input and figure out what implementation to generate...
/// # const IGNORE_TOKENS: &'static str = stringify! {
/// let name = /* ... */;
/// let expr = /* ... */;
/// # };
/// #
/// # let name = 0;
/// # let expr = 0;
///
/// let expanded = quote! {
/// // The generated impl.
/// impl heapsize::HeapSize for #name {
/// fn heap_size_of_children(&self) -> usize {
/// #expr
/// }
/// }
/// };
///
/// // Hand the output tokens back to the compiler.
/// TokenStream::from(expanded)
/// }
/// ```
///
/// ## Combining quoted fragments
///
/// Usually you don't end up constructing an entire final `TokenStream` in one
/// piece. Different parts may come from different helper functions. The tokens
/// produced by `quote!` themselves implement `ToTokens` and so can be
/// interpolated into later `quote!` invocations to build up a final result.
///
/// ```edition2018
/// # use quote::quote;
/// #
/// let type_definition = quote! {...};
/// let methods = quote! {...};
///
/// let tokens = quote! {
/// #type_definition
/// #methods
/// };
/// ```
///
/// ## Constructing identifiers
///
/// Suppose we have an identifier `ident` which came from somewhere in a macro
/// input and we need to modify it in some way for the macro output. Let's
/// consider prepending the identifier with an underscore.
///
/// Simply interpolating the identifier next to an underscore will not have the
/// behavior of concatenating them. The underscore and the identifier will
/// continue to be two separate tokens as if you had written `_ x`.
///
/// ```edition2018
/// # use proc_macro2::{self as syn, Span};
/// # use quote::quote;
/// #
/// # let ident = syn::Ident::new("i", Span::call_site());
/// #
/// // incorrect
/// quote! {
/// let mut _#ident = 0;
/// }
/// # ;
/// ```
///
/// The solution is to perform token-level manipulations using the APIs provided
/// by Syn and proc-macro2.
///
/// ```edition2018
/// # use proc_macro2::{self as syn, Span};
/// # use quote::quote;
/// #
/// # let ident = syn::Ident::new("i", Span::call_site());
/// #
/// let concatenated = format!("_{}", ident);
/// let varname = syn::Ident::new(&concatenated, ident.span());
/// quote! {
/// let mut #varname = 0;
/// }
/// # ;
/// ```
///
/// ## Making method calls
///
/// Let's say our macro requires some type specified in the macro input to have
/// a constructor called `new`. We have the type in a variable called
/// `field_type` of type `syn::Type` and want to invoke the constructor.
///
/// ```edition2018
/// # use quote::quote;
/// #
/// # let field_type = quote!(...);
/// #
/// // incorrect
/// quote! {
/// let value = #field_type::new();
/// }
/// # ;
/// ```
///
/// This works only sometimes. If `field_type` is `String`, the expanded code
/// contains `String::new()` which is fine. But if `field_type` is something
/// like `Vec<i32>` then the expanded code is `Vec<i32>::new()` which is invalid
/// syntax. Ordinarily in handwritten Rust we would write `Vec::<i32>::new()`
/// but for macros often the following is more convenient.
///
/// ```edition2018
/// # use quote::quote;
/// #
/// # let field_type = quote!(...);
/// #
/// quote! {
/// let value = <#field_type>::new();
/// }
/// # ;
/// ```
///
/// This expands to `<Vec<i32>>::new()` which behaves correctly.
///
/// A similar pattern is appropriate for trait methods.
///
/// ```edition2018
/// # use quote::quote;
/// #
/// # let field_type = quote!(...);
/// #
/// quote! {
/// let value = <#field_type as core::default::Default>::default();
/// }
/// # ;
/// ```
#[proc_macro_hack]
pub use proc_quote_impl::quote;
/// Same as `quote!`, but applies a given span to all tokens originating within
/// the macro invocation.
///
/// # Syntax
///
/// A span expression of type [`Span`], followed by `=>`, followed by the tokens
/// to quote. The span expression should be brief -- use a variable for anything
/// more than a few characters. There should be no space before the `=>` token.
///
/// [`Span`]: https://docs.rs/proc-macro2/0.4/proc_macro2/struct.Span.html
///
/// ```edition2018
/// # use proc_macro2::Span;
/// # use quote::quote_spanned;
/// #
/// # const IGNORE_TOKENS: &'static str = stringify! {
/// let span = /* ... */;
/// # };
/// # let span = Span::call_site();
/// # let init = 0;
///
/// // On one line, use parentheses.
/// let tokens = quote_spanned!(span=> Box::into_raw(Box::new(#init)));
///
/// // On multiple lines, place the span at the top and use braces.
/// let tokens = quote_spanned! {span=>
/// Box::into_raw(Box::new(#init))
/// };
/// ```
///
/// The lack of space before the `=>` should look jarring to Rust programmers
/// and this is intentional. The formatting is designed to be visibly
/// off-balance and draw the eye a particular way, due to the span expression
/// being evaluated in the context of the procedural macro and the remaining
/// tokens being evaluated in the generated code.
///
/// # Hygiene
///
/// Any interpolated tokens preserve the `Span` information provided by their
/// `ToTokens` implementation. Tokens that originate within the `quote_spanned!`
/// invocation are spanned with the given span argument.
///
/// # Example
///
/// The following procedural macro code uses `quote_spanned!` to assert that a
/// particular Rust type implements the [`Sync`] trait so that references can be
/// safely shared between threads.
///
/// [`Sync`]: https://doc.rust-lang.org/std/marker/trait.Sync.html
///
/// ```edition2018
/// # use quote::{quote_spanned, TokenStreamExt, ToTokens};
/// # use proc_macro2::{Span, TokenStream};
/// #
/// # struct Type;
/// #
/// # impl Type {
/// # fn span(&self) -> Span {
/// # Span::call_site()
/// # }
/// # }
/// #
/// # impl ToTokens for Type {
/// # fn to_tokens(&self, _tokens: &mut TokenStream) {}
/// # }
/// #
/// # let ty = Type;
/// # let call_site = Span::call_site();
/// #
/// let ty_span = ty.span();
/// let assert_sync = quote_spanned! {ty_span=>
/// struct _AssertSync where #ty: Sync;
/// };
/// ```
///
/// If the assertion fails, the user will see an error like the following. The
/// input span of their type is hightlighted in the error.
///
/// ```text
/// error[E0277]: the trait bound `*const (): std::marker::Sync` is not satisfied
/// --> src/main.rs:10:21
/// |
/// 10 | static ref PTR: *const () = &();
/// | ^^^^^^^^^ `*const ()` cannot be shared between threads safely
/// ```
///
/// In this example it is important for the where-clause to be spanned with the
/// line/column information of the user's input type so that error messages are
/// placed appropriately by the compiler. But it is also incredibly important
/// that `Sync` resolves at the macro definition site and not the macro call
/// site. If we resolve `Sync` at the same span that the user's type is going to
/// be resolved, then they could bypass our check by defining their own trait
/// named `Sync` that is implemented for their type.
#[proc_macro_hack]
pub use proc_quote_impl::quote_spanned;
// Not public API.
#[doc(hidden)]
pub mod __rt {
use super::*;
pub use proc_macro2::*;
pub fn append_ident(stream: &mut TokenStream, ident: &str, span: Span) {
// TODO(blocked on rust-lang/rust#54723)
// https://github.com/rust-lang/rust/issues/54723
// Use `new_raw` once it's stabilized
// stream.append(Ident::new_raw(ident, span));
match syn::parse_str::<Ident>(ident) {
Ok(mut ident) => {
ident.set_span(span);
stream.append(ident);
}
Err(_) => stream.append(Ident::new(ident, span)),
}
}
pub fn append_punct(stream: &mut TokenStream, punct: char, spacing: Spacing, span: Span) {
let mut punct = Punct::new(punct, spacing);
punct.set_span(span);
stream.append(punct);
}
pub fn append_stringified_tokens(stream: &mut TokenStream, s: &str, span: Span) {
| b fn append_to_tokens<T: ToTokens>(stream: &mut TokenStream, to_tokens: &T) {
to_tokens.to_tokens(stream);
}
pub fn append_group(
stream: &mut TokenStream,
inner: TokenStream,
delimiter: Delimiter,
span: Span,
) {
let mut group = Group::new(delimiter, inner);
group.set_span(span);
stream.append(group);
}
}
| let s: TokenStream = s.parse().expect("invalid token stream");
stream.extend(s.into_iter().map(|mut t| {
t.set_span(span);
t
}));
}
pu | identifier_body |
lib.rs | //! This crate provides the [`quote!`] macro for turning Rust syntax tree data
//! structures into tokens of source code.
//!
//! [`quote!`]: macro.quote.html
//!
//! Procedural macros in Rust receive a stream of tokens as input, execute
//! arbitrary Rust code to determine how to manipulate those tokens, and produce
//! a stream of tokens to hand back to the compiler to compile into the caller's
//! crate. Quasi-quoting is a solution to one piece of that -- producing tokens
//! to return to the compiler.
//!
//! The idea of quasi-quoting is that we write *code* that we treat as *data*.
//! Within the `quote!` macro, we can write what looks like code to our text
//! editor or IDE. We get all the benefits of the editor's brace matching,
//! syntax highlighting, indentation, and maybe autocompletion. But rather than
//! compiling that as code into the current crate, we can treat it as data, pass
//! it around, mutate it, and eventually hand it back to the compiler as tokens
//! to compile into the macro caller's crate.
//!
//! This crate is motivated by the procedural macro use case, but it is a
//! general-purpose Rust quasi-quoting library and is not specific to procedural
//! macros.
//!
//! # Example
//!
//! The following quasi-quoted block of code is something you might find in [a]
//! procedural macro having to do with data structure serialization. The `#var`
//! syntax performs interpolation of runtime variables into the quoted tokens.
//! Check out the documentation of the [`quote!`] macro for more detail about
//! the syntax. See also the [`quote_spanned!`] macro which is important for
//! implementing hygienic procedural macros.
//!
//! [a]: https://serde.rs/
//! [`quote_spanned!`]: macro.quote_spanned.html
//!
//! ```edition2018
//! # use quote::quote;
//! #
//! # let generics = "";
//! # let where_clause = "";
//! # let field_ty = "";
//! # let item_ty = "";
//! # let path = "";
//! # let value = "";
//! #
//! let tokens = quote! {
//! struct SerializeWith #generics #where_clause {
//! value: &'a #field_ty,
//! phantom: core::marker::PhantomData<#item_ty>,
//! }
//!
//! impl #generics serde::Serialize for SerializeWith #generics #where_clause {
//! fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
//! where
//! S: serde::Serializer,
//! {
//! #path(self.value, serializer)
//! }
//! }
//!
//! SerializeWith {
//! value: #value,
//! phantom: core::marker::PhantomData::<#item_ty>,
//! }
//! };
//! ```
use proc_macro_hack::proc_macro_hack;
mod repeat;
pub use self::repeat::*;
pub use quote::ToTokens;
pub use quote::TokenStreamExt;
/// The whole point.
///
/// Performs variable interpolation against the input and produces it as
/// [`TokenStream`]. For returning tokens to the compiler in a procedural macro, use
/// `into()` to build a `TokenStream`.
///
/// [`TokenStream`]: https://docs.rs/proc-macro2/0/proc_macro2/struct.TokenStream.html
///
/// # Interpolation
///
/// Variable interpolation is done with `#var` (similar to `$var` in
/// `macro_rules!` macros). This grabs the `var` variable that is currently in
/// scope and inserts it in that location in the output tokens. Any type
/// implementing the [`ToTokens`] trait can be interpolated. This includes most
/// Rust primitive types as well as most of the syntax tree types from the [Syn]
/// crate.
///
/// [`ToTokens`]: trait.ToTokens.html
/// [Syn]: https://github.com/dtolnay/syn
///
/// Repetition is done using `#(...)*` or `#(...),*` again similar to
/// `macro_rules!`. This iterates through the elements of any variable
/// interpolated within the repetition and inserts a copy of the repetition
/// body for each one.
///
/// - `#(#var)*` — no separators
/// - `#(#var),*` — the character before the asterisk is used as a separator
/// - `#( struct #var; )*` — the repetition can contain other tokens
/// - `#( #k => println!("{}", #v), )*` — even multiple interpolations
/// - `#(let #var = self.#var;)*` - the same variable can be used more than once
///
/// The [`proc_quote::Repeat`](https://docs.rs/proc-quote/0/proc_quote/trait.Repeat.html)
/// trait defines which types are allowed to be interpolated inside a repition pattern.
///
/// Which types that implement the following traits *do* `Repeat`:
/// - [`Iterator<T>`] consumes the iterator, iterating through every element.
/// - <a href="https://doc.rust-lang.org/std/borrow/trait.Borrow.html">`Borrow<[T]>`</a>
/// (includes [`Vec`], [`array`], and [`slice`]) iterates with the [`slice::iter`] method,
/// thus not consuming the original data.
/// - [`ToTokens`], interpolates the variable in every iteration.
///
/// Which types *do NOT* `Repeat`:
/// - [`IntoIterator`], to avoid ambiguity (Ex. "Which behavior would have been used for [`Vec`],
/// which implements both [`IntoIterator`] and <a href="https://doc.rust-lang.org/std/borrow/trait.Borrow.html">
/// `Borrow<[T]>`</a>?"; "Which behavior would have been used for [`TokenStream`], which implements both
/// [`IntoIterator`] and [`ToTokens`]?"). To use the iterator, you may call [`IntoIterator::into_iter`]
/// explicitly.
/// - Ambiguous types that implement at least two of the `Repeat` traits. In the very unlikely case
/// this happens, disambiguate the type by wrapping it under some structure that only implements the
/// trait you desire to use.
///
/// [`Iterator<T>`]: https://doc.rust-lang.org/std/iter/trait.Iterator.html
/// [`Vec`]: https://doc.rust-lang.org/std/vec/struct.Vec.html
/// [`array`]: https://doc.rust-lang.org/std/primitive.array.html
/// [`slice`]: https://doc.rust-lang.org/std/slice/index.html
/// [`slice::iter`]: https://doc.rust-lang.org/std/primitive.slice.html#method.iter
/// [`ToTokens`]: https://docs.rs/proc-quote/0/proc_quote/trait.ToTokens.html
/// [`IntoIterator`]: https://doc.rust-lang.org/std/iter/trait.IntoIterator.html
/// [`IntoIterator::into_iter`]: https://doc.rust-lang.org/std/iter/trait.IntoIterator.html#tymethod.into_iter
///
/// # Hygiene
///
/// Any interpolated tokens preserve the `Span` information provided by their
/// `ToTokens` implementation. Tokens that originate within the `quote!`
/// invocation are spanned with [`Span::call_site()`].
///
/// [`Span::call_site()`]: https://docs.rs/proc-macro2/0/proc_macro2/struct.Span.html#method.call_site
///
/// A different span can be provided through the [`quote_spanned!`] macro.
///
/// [`quote_spanned!`]: macro.quote_spanned.html
///
/// # Return type
///
/// The macro evaluates to an expression of type `proc_macro2::TokenStream`.
/// Meanwhile Rust procedural macros are expected to return the type
/// `proc_macro::TokenStream`.
///
/// The difference between the two types is that `proc_macro` types are entirely
/// specific to procedural macros and cannot ever exist in code outside of a
/// procedural macro, while `proc_macro2` types may exist anywhere including
/// tests and non-macro code like main.rs and build.rs. This is why even the
/// procedural macro ecosystem is largely built around `proc_macro2`, because
/// that ensures the libraries are unit testable and accessible in non-macro
/// contexts.
///
/// There is a [`From`]-conversion in both directions so returning the output of
/// `quote!` from a procedural macro usually looks like `tokens.into()` or
/// `proc_macro::TokenStream::from(tokens)`.
///
/// [`From`]: https://doc.rust-lang.org/std/convert/trait.From.html
///
/// # Examples
///
/// ## Procedural macro
///
/// The structure of a basic procedural macro is as follows. Refer to the [Syn]
/// crate for further useful guidance on using `quote!` as part of a procedural
/// macro.
///
/// [Syn]: https://github.com/dtolnay/syn
///
/// ```edition2018
/// # #[cfg(any())]
/// extern crate proc_macro;
/// # use proc_macro2 as proc_macro;
///
/// use proc_macro::TokenStream;
/// use quote::quote;
///
/// # const IGNORE_TOKENS: &'static str = stringify! {
/// #[proc_macro_derive(HeapSize)]
/// # };
/// pub fn derive_heap_size(input: TokenStream) -> TokenStream {
/// // Parse the input and figure out what implementation to generate...
/// # const IGNORE_TOKENS: &'static str = stringify! {
/// let name = /* ... */;
/// let expr = /* ... */;
/// # };
/// #
/// # let name = 0;
/// # let expr = 0;
///
/// let expanded = quote! {
/// // The generated impl.
/// impl heapsize::HeapSize for #name {
/// fn heap_size_of_children(&self) -> usize {
/// #expr
/// }
/// }
/// };
///
/// // Hand the output tokens back to the compiler.
/// TokenStream::from(expanded)
/// }
/// ```
///
/// ## Combining quoted fragments
///
/// Usually you don't end up constructing an entire final `TokenStream` in one
/// piece. Different parts may come from different helper functions. The tokens
/// produced by `quote!` themselves implement `ToTokens` and so can be
/// interpolated into later `quote!` invocations to build up a final result.
///
/// ```edition2018
/// # use quote::quote;
/// #
/// let type_definition = quote! {...};
/// let methods = quote! {...};
///
/// let tokens = quote! {
/// #type_definition
/// #methods
/// };
/// ```
///
/// ## Constructing identifiers
///
/// Suppose we have an identifier `ident` which came from somewhere in a macro
/// input and we need to modify it in some way for the macro output. Let's
/// consider prepending the identifier with an underscore.
///
/// Simply interpolating the identifier next to an underscore will not have the
/// behavior of concatenating them. The underscore and the identifier will
/// continue to be two separate tokens as if you had written `_ x`.
///
/// ```edition2018
/// # use proc_macro2::{self as syn, Span};
/// # use quote::quote;
/// #
/// # let ident = syn::Ident::new("i", Span::call_site());
/// #
/// // incorrect
/// quote! {
/// let mut _#ident = 0;
/// }
/// # ;
/// ```
///
/// The solution is to perform token-level manipulations using the APIs provided
/// by Syn and proc-macro2.
///
/// ```edition2018
/// # use proc_macro2::{self as syn, Span};
/// # use quote::quote;
/// #
/// # let ident = syn::Ident::new("i", Span::call_site());
/// #
/// let concatenated = format!("_{}", ident);
/// let varname = syn::Ident::new(&concatenated, ident.span());
/// quote! {
/// let mut #varname = 0;
/// }
/// # ;
/// ```
///
/// ## Making method calls
///
/// Let's say our macro requires some type specified in the macro input to have
/// a constructor called `new`. We have the type in a variable called
/// `field_type` of type `syn::Type` and want to invoke the constructor.
///
/// ```edition2018
/// # use quote::quote;
/// #
/// # let field_type = quote!(...);
/// #
/// // incorrect
/// quote! {
/// let value = #field_type::new();
/// }
/// # ;
/// ```
///
/// This works only sometimes. If `field_type` is `String`, the expanded code
/// contains `String::new()` which is fine. But if `field_type` is something
/// like `Vec<i32>` then the expanded code is `Vec<i32>::new()` which is invalid
/// syntax. Ordinarily in handwritten Rust we would write `Vec::<i32>::new()`
/// but for macros often the following is more convenient.
///
/// ```edition2018
/// # use quote::quote;
/// #
/// # let field_type = quote!(...);
/// #
/// quote! {
/// let value = <#field_type>::new();
/// }
/// # ;
/// ```
///
/// This expands to `<Vec<i32>>::new()` which behaves correctly.
///
/// A similar pattern is appropriate for trait methods.
///
/// ```edition2018
/// # use quote::quote;
/// #
/// # let field_type = quote!(...);
/// #
/// quote! {
/// let value = <#field_type as core::default::Default>::default();
/// }
/// # ;
/// ```
#[proc_macro_hack]
pub use proc_quote_impl::quote;
/// Same as `quote!`, but applies a given span to all tokens originating within
/// the macro invocation.
///
/// # Syntax
///
/// A span expression of type [`Span`], followed by `=>`, followed by the tokens
/// to quote. The span expression should be brief -- use a variable for anything
/// more than a few characters. There should be no space before the `=>` token.
///
/// [`Span`]: https://docs.rs/proc-macro2/0.4/proc_macro2/struct.Span.html
///
/// ```edition2018
/// # use proc_macro2::Span;
/// # use quote::quote_spanned;
/// #
/// # const IGNORE_TOKENS: &'static str = stringify! {
/// let span = /* ... */;
/// # };
/// # let span = Span::call_site();
/// # let init = 0;
///
/// // On one line, use parentheses.
/// let tokens = quote_spanned!(span=> Box::into_raw(Box::new(#init)));
///
/// // On multiple lines, place the span at the top and use braces.
/// let tokens = quote_spanned! {span=>
/// Box::into_raw(Box::new(#init))
/// };
/// ```
///
/// The lack of space before the `=>` should look jarring to Rust programmers
/// and this is intentional. The formatting is designed to be visibly
/// off-balance and draw the eye a particular way, due to the span expression
/// being evaluated in the context of the procedural macro and the remaining
/// tokens being evaluated in the generated code.
///
/// # Hygiene
///
/// Any interpolated tokens preserve the `Span` information provided by their
/// `ToTokens` implementation. Tokens that originate within the `quote_spanned!`
/// invocation are spanned with the given span argument.
///
/// # Example
///
/// The following procedural macro code uses `quote_spanned!` to assert that a
/// particular Rust type implements the [`Sync`] trait so that references can be
/// safely shared between threads.
///
/// [`Sync`]: https://doc.rust-lang.org/std/marker/trait.Sync.html
///
/// ```edition2018
/// # use quote::{quote_spanned, TokenStreamExt, ToTokens};
/// # use proc_macro2::{Span, TokenStream};
/// #
/// # struct Type;
/// #
/// # impl Type {
/// # fn span(&self) -> Span {
/// # Span::call_site()
/// # }
/// # }
/// #
/// # impl ToTokens for Type {
/// # fn to_tokens(&self, _tokens: &mut TokenStream) {}
/// # }
/// #
/// # let ty = Type;
/// # let call_site = Span::call_site();
/// #
/// let ty_span = ty.span();
/// let assert_sync = quote_spanned! {ty_span=>
/// struct _AssertSync where #ty: Sync;
/// };
/// ```
///
/// If the assertion fails, the user will see an error like the following. The
/// input span of their type is hightlighted in the error.
///
/// ```text
/// error[E0277]: the trait bound `*const (): std::marker::Sync` is not satisfied
/// --> src/main.rs:10:21
/// |
/// 10 | static ref PTR: *const () = &();
/// | ^^^^^^^^^ `*const ()` cannot be shared between threads safely
/// ```
///
/// In this example it is important for the where-clause to be spanned with the
/// line/column information of the user's input type so that error messages are
/// placed appropriately by the compiler. But it is also incredibly important
/// that `Sync` resolves at the macro definition site and not the macro call
/// site. If we resolve `Sync` at the same span that the user's type is going to
/// be resolved, then they could bypass our check by defining their own trait
/// named `Sync` that is implemented for their type.
#[proc_macro_hack]
pub use proc_quote_impl::quote_spanned;
// Not public API.
#[doc(hidden)]
pub mod __rt {
use super::*;
pub use proc_macro2::*;
pub fn append_ident(stream: &mut TokenStream, ident: &str, span: Span) {
// TODO(blocked on rust-lang/rust#54723)
// https://github.com/rust-lang/rust/issues/54723
// Use `new_raw` once it's stabilized
// stream.append(Ident::new_raw(ident, span));
match syn::parse_str::<Ident>(ident) {
Ok(mut ident) => {
ident.set_span(span);
stream.append(ident);
}
Err(_) => stream.append(Ident::new(ident, span)),
}
}
pub fn append_p | &mut TokenStream, punct: char, spacing: Spacing, span: Span) {
let mut punct = Punct::new(punct, spacing);
punct.set_span(span);
stream.append(punct);
}
pub fn append_stringified_tokens(stream: &mut TokenStream, s: &str, span: Span) {
let s: TokenStream = s.parse().expect("invalid token stream");
stream.extend(s.into_iter().map(|mut t| {
t.set_span(span);
t
}));
}
pub fn append_to_tokens<T: ToTokens>(stream: &mut TokenStream, to_tokens: &T) {
to_tokens.to_tokens(stream);
}
pub fn append_group(
stream: &mut TokenStream,
inner: TokenStream,
delimiter: Delimiter,
span: Span,
) {
let mut group = Group::new(delimiter, inner);
group.set_span(span);
stream.append(group);
}
}
| unct(stream: | identifier_name |
lib.rs | //! This crate provides the [`quote!`] macro for turning Rust syntax tree data
//! structures into tokens of source code.
//!
//! [`quote!`]: macro.quote.html
//!
//! Procedural macros in Rust receive a stream of tokens as input, execute
//! arbitrary Rust code to determine how to manipulate those tokens, and produce
//! a stream of tokens to hand back to the compiler to compile into the caller's
//! crate. Quasi-quoting is a solution to one piece of that -- producing tokens
//! to return to the compiler.
//!
//! The idea of quasi-quoting is that we write *code* that we treat as *data*.
//! Within the `quote!` macro, we can write what looks like code to our text
//! editor or IDE. We get all the benefits of the editor's brace matching,
//! syntax highlighting, indentation, and maybe autocompletion. But rather than
//! compiling that as code into the current crate, we can treat it as data, pass
//! it around, mutate it, and eventually hand it back to the compiler as tokens
//! to compile into the macro caller's crate.
//!
//! This crate is motivated by the procedural macro use case, but it is a
//! general-purpose Rust quasi-quoting library and is not specific to procedural
//! macros.
//!
//! # Example
//!
//! The following quasi-quoted block of code is something you might find in [a]
//! procedural macro having to do with data structure serialization. The `#var`
//! syntax performs interpolation of runtime variables into the quoted tokens.
//! Check out the documentation of the [`quote!`] macro for more detail about
//! the syntax. See also the [`quote_spanned!`] macro which is important for
//! implementing hygienic procedural macros.
//!
//! [a]: https://serde.rs/
//! [`quote_spanned!`]: macro.quote_spanned.html
//!
//! ```edition2018
//! # use quote::quote;
//! #
//! # let generics = "";
//! # let where_clause = "";
//! # let field_ty = "";
//! # let item_ty = "";
//! # let path = "";
//! # let value = "";
//! #
//! let tokens = quote! {
//! struct SerializeWith #generics #where_clause {
//! value: &'a #field_ty,
//! phantom: core::marker::PhantomData<#item_ty>,
//! }
//!
//! impl #generics serde::Serialize for SerializeWith #generics #where_clause {
//! fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
//! where
//! S: serde::Serializer,
//! {
//! #path(self.value, serializer)
//! }
//! }
//!
//! SerializeWith {
//! value: #value,
//! phantom: core::marker::PhantomData::<#item_ty>,
//! }
//! };
//! ```
use proc_macro_hack::proc_macro_hack;
mod repeat;
pub use self::repeat::*;
pub use quote::ToTokens;
pub use quote::TokenStreamExt;
/// The whole point.
///
/// Performs variable interpolation against the input and produces it as
/// [`TokenStream`]. For returning tokens to the compiler in a procedural macro, use
/// `into()` to build a `TokenStream`.
///
/// [`TokenStream`]: https://docs.rs/proc-macro2/0/proc_macro2/struct.TokenStream.html
///
/// # Interpolation
///
/// Variable interpolation is done with `#var` (similar to `$var` in
/// `macro_rules!` macros). This grabs the `var` variable that is currently in
/// scope and inserts it in that location in the output tokens. Any type
/// implementing the [`ToTokens`] trait can be interpolated. This includes most
/// Rust primitive types as well as most of the syntax tree types from the [Syn]
/// crate.
///
/// [`ToTokens`]: trait.ToTokens.html
/// [Syn]: https://github.com/dtolnay/syn
///
/// Repetition is done using `#(...)*` or `#(...),*` again similar to
/// `macro_rules!`. This iterates through the elements of any variable
/// interpolated within the repetition and inserts a copy of the repetition
/// body for each one.
///
/// - `#(#var)*` — no separators
/// - `#(#var),*` — the character before the asterisk is used as a separator
/// - `#( struct #var; )*` — the repetition can contain other tokens
/// - `#( #k => println!("{}", #v), )*` — even multiple interpolations
/// - `#(let #var = self.#var;)*` - the same variable can be used more than once
///
/// The [`proc_quote::Repeat`](https://docs.rs/proc-quote/0/proc_quote/trait.Repeat.html)
/// trait defines which types are allowed to be interpolated inside a repition pattern.
///
/// Which types that implement the following traits *do* `Repeat`:
/// - [`Iterator<T>`] consumes the iterator, iterating through every element.
/// - <a href="https://doc.rust-lang.org/std/borrow/trait.Borrow.html">`Borrow<[T]>`</a>
/// (includes [`Vec`], [`array`], and [`slice`]) iterates with the [`slice::iter`] method,
/// thus not consuming the original data.
/// - [`ToTokens`], interpolates the variable in every iteration.
///
/// Which types *do NOT* `Repeat`:
/// - [`IntoIterator`], to avoid ambiguity (Ex. "Which behavior would have been used for [`Vec`],
/// which implements both [`IntoIterator`] and <a href="https://doc.rust-lang.org/std/borrow/trait.Borrow.html">
/// `Borrow<[T]>`</a>?"; "Which behavior would have been used for [`TokenStream`], which implements both
/// [`IntoIterator`] and [`ToTokens`]?"). To use the iterator, you may call [`IntoIterator::into_iter`]
/// explicitly.
/// - Ambiguous types that implement at least two of the `Repeat` traits. In the very unlikely case
/// this happens, disambiguate the type by wrapping it under some structure that only implements the
/// trait you desire to use.
///
/// [`Iterator<T>`]: https://doc.rust-lang.org/std/iter/trait.Iterator.html
/// [`Vec`]: https://doc.rust-lang.org/std/vec/struct.Vec.html
/// [`array`]: https://doc.rust-lang.org/std/primitive.array.html
/// [`slice`]: https://doc.rust-lang.org/std/slice/index.html
/// [`slice::iter`]: https://doc.rust-lang.org/std/primitive.slice.html#method.iter
/// [`ToTokens`]: https://docs.rs/proc-quote/0/proc_quote/trait.ToTokens.html
/// [`IntoIterator`]: https://doc.rust-lang.org/std/iter/trait.IntoIterator.html
/// [`IntoIterator::into_iter`]: https://doc.rust-lang.org/std/iter/trait.IntoIterator.html#tymethod.into_iter
///
/// # Hygiene
///
/// Any interpolated tokens preserve the `Span` information provided by their
/// `ToTokens` implementation. Tokens that originate within the `quote!`
/// invocation are spanned with [`Span::call_site()`].
///
/// [`Span::call_site()`]: https://docs.rs/proc-macro2/0/proc_macro2/struct.Span.html#method.call_site
///
/// A different span can be provided through the [`quote_spanned!`] macro.
///
/// [`quote_spanned!`]: macro.quote_spanned.html
///
/// # Return type
///
/// The macro evaluates to an expression of type `proc_macro2::TokenStream`.
/// Meanwhile Rust procedural macros are expected to return the type
/// `proc_macro::TokenStream`.
///
/// The difference between the two types is that `proc_macro` types are entirely
/// specific to procedural macros and cannot ever exist in code outside of a
/// procedural macro, while `proc_macro2` types may exist anywhere including
/// tests and non-macro code like main.rs and build.rs. This is why even the
/// procedural macro ecosystem is largely built around `proc_macro2`, because
/// that ensures the libraries are unit testable and accessible in non-macro
/// contexts.
///
/// There is a [`From`]-conversion in both directions so returning the output of
/// `quote!` from a procedural macro usually looks like `tokens.into()` or
/// `proc_macro::TokenStream::from(tokens)`.
///
/// [`From`]: https://doc.rust-lang.org/std/convert/trait.From.html
///
/// # Examples
///
/// ## Procedural macro
///
/// The structure of a basic procedural macro is as follows. Refer to the [Syn]
/// crate for further useful guidance on using `quote!` as part of a procedural
/// macro.
///
/// [Syn]: https://github.com/dtolnay/syn
///
/// ```edition2018
/// # #[cfg(any())]
/// extern crate proc_macro;
/// # use proc_macro2 as proc_macro;
///
/// use proc_macro::TokenStream;
/// use quote::quote;
///
/// # const IGNORE_TOKENS: &'static str = stringify! {
/// #[proc_macro_derive(HeapSize)]
/// # };
/// pub fn derive_heap_size(input: TokenStream) -> TokenStream {
/// // Parse the input and figure out what implementation to generate...
/// # const IGNORE_TOKENS: &'static str = stringify! {
/// let name = /* ... */;
/// let expr = /* ... */;
/// # };
/// #
/// # let name = 0;
/// # let expr = 0;
///
/// let expanded = quote! {
/// // The generated impl.
/// impl heapsize::HeapSize for #name {
/// fn heap_size_of_children(&self) -> usize {
/// #expr
/// }
/// }
/// };
///
/// // Hand the output tokens back to the compiler.
/// TokenStream::from(expanded)
/// }
/// ```
///
/// ## Combining quoted fragments
///
/// Usually you don't end up constructing an entire final `TokenStream` in one
/// piece. Different parts may come from different helper functions. The tokens
/// produced by `quote!` themselves implement `ToTokens` and so can be
/// interpolated into later `quote!` invocations to build up a final result.
///
/// ```edition2018
/// # use quote::quote;
/// #
/// let type_definition = quote! {...};
/// let methods = quote! {...};
///
/// let tokens = quote! {
/// #type_definition
/// #methods
/// };
/// ```
///
/// ## Constructing identifiers
///
/// Suppose we have an identifier `ident` which came from somewhere in a macro
/// input and we need to modify it in some way for the macro output. Let's
/// consider prepending the identifier with an underscore.
///
/// Simply interpolating the identifier next to an underscore will not have the
/// behavior of concatenating them. The underscore and the identifier will
/// continue to be two separate tokens as if you had written `_ x`.
///
/// ```edition2018
/// # use proc_macro2::{self as syn, Span};
/// # use quote::quote;
/// #
/// # let ident = syn::Ident::new("i", Span::call_site());
/// #
/// // incorrect | /// # ;
/// ```
///
/// The solution is to perform token-level manipulations using the APIs provided
/// by Syn and proc-macro2.
///
/// ```edition2018
/// # use proc_macro2::{self as syn, Span};
/// # use quote::quote;
/// #
/// # let ident = syn::Ident::new("i", Span::call_site());
/// #
/// let concatenated = format!("_{}", ident);
/// let varname = syn::Ident::new(&concatenated, ident.span());
/// quote! {
/// let mut #varname = 0;
/// }
/// # ;
/// ```
///
/// ## Making method calls
///
/// Let's say our macro requires some type specified in the macro input to have
/// a constructor called `new`. We have the type in a variable called
/// `field_type` of type `syn::Type` and want to invoke the constructor.
///
/// ```edition2018
/// # use quote::quote;
/// #
/// # let field_type = quote!(...);
/// #
/// // incorrect
/// quote! {
/// let value = #field_type::new();
/// }
/// # ;
/// ```
///
/// This works only sometimes. If `field_type` is `String`, the expanded code
/// contains `String::new()` which is fine. But if `field_type` is something
/// like `Vec<i32>` then the expanded code is `Vec<i32>::new()` which is invalid
/// syntax. Ordinarily in handwritten Rust we would write `Vec::<i32>::new()`
/// but for macros often the following is more convenient.
///
/// ```edition2018
/// # use quote::quote;
/// #
/// # let field_type = quote!(...);
/// #
/// quote! {
/// let value = <#field_type>::new();
/// }
/// # ;
/// ```
///
/// This expands to `<Vec<i32>>::new()` which behaves correctly.
///
/// A similar pattern is appropriate for trait methods.
///
/// ```edition2018
/// # use quote::quote;
/// #
/// # let field_type = quote!(...);
/// #
/// quote! {
/// let value = <#field_type as core::default::Default>::default();
/// }
/// # ;
/// ```
#[proc_macro_hack]
pub use proc_quote_impl::quote;
/// Same as `quote!`, but applies a given span to all tokens originating within
/// the macro invocation.
///
/// # Syntax
///
/// A span expression of type [`Span`], followed by `=>`, followed by the tokens
/// to quote. The span expression should be brief -- use a variable for anything
/// more than a few characters. There should be no space before the `=>` token.
///
/// [`Span`]: https://docs.rs/proc-macro2/0.4/proc_macro2/struct.Span.html
///
/// ```edition2018
/// # use proc_macro2::Span;
/// # use quote::quote_spanned;
/// #
/// # const IGNORE_TOKENS: &'static str = stringify! {
/// let span = /* ... */;
/// # };
/// # let span = Span::call_site();
/// # let init = 0;
///
/// // On one line, use parentheses.
/// let tokens = quote_spanned!(span=> Box::into_raw(Box::new(#init)));
///
/// // On multiple lines, place the span at the top and use braces.
/// let tokens = quote_spanned! {span=>
/// Box::into_raw(Box::new(#init))
/// };
/// ```
///
/// The lack of space before the `=>` should look jarring to Rust programmers
/// and this is intentional. The formatting is designed to be visibly
/// off-balance and draw the eye a particular way, due to the span expression
/// being evaluated in the context of the procedural macro and the remaining
/// tokens being evaluated in the generated code.
///
/// # Hygiene
///
/// Any interpolated tokens preserve the `Span` information provided by their
/// `ToTokens` implementation. Tokens that originate within the `quote_spanned!`
/// invocation are spanned with the given span argument.
///
/// # Example
///
/// The following procedural macro code uses `quote_spanned!` to assert that a
/// particular Rust type implements the [`Sync`] trait so that references can be
/// safely shared between threads.
///
/// [`Sync`]: https://doc.rust-lang.org/std/marker/trait.Sync.html
///
/// ```edition2018
/// # use quote::{quote_spanned, TokenStreamExt, ToTokens};
/// # use proc_macro2::{Span, TokenStream};
/// #
/// # struct Type;
/// #
/// # impl Type {
/// # fn span(&self) -> Span {
/// # Span::call_site()
/// # }
/// # }
/// #
/// # impl ToTokens for Type {
/// # fn to_tokens(&self, _tokens: &mut TokenStream) {}
/// # }
/// #
/// # let ty = Type;
/// # let call_site = Span::call_site();
/// #
/// let ty_span = ty.span();
/// let assert_sync = quote_spanned! {ty_span=>
/// struct _AssertSync where #ty: Sync;
/// };
/// ```
///
/// If the assertion fails, the user will see an error like the following. The
/// input span of their type is hightlighted in the error.
///
/// ```text
/// error[E0277]: the trait bound `*const (): std::marker::Sync` is not satisfied
/// --> src/main.rs:10:21
/// |
/// 10 | static ref PTR: *const () = &();
/// | ^^^^^^^^^ `*const ()` cannot be shared between threads safely
/// ```
///
/// In this example it is important for the where-clause to be spanned with the
/// line/column information of the user's input type so that error messages are
/// placed appropriately by the compiler. But it is also incredibly important
/// that `Sync` resolves at the macro definition site and not the macro call
/// site. If we resolve `Sync` at the same span that the user's type is going to
/// be resolved, then they could bypass our check by defining their own trait
/// named `Sync` that is implemented for their type.
#[proc_macro_hack]
pub use proc_quote_impl::quote_spanned;
// Not public API.
#[doc(hidden)]
pub mod __rt {
use super::*;
pub use proc_macro2::*;
pub fn append_ident(stream: &mut TokenStream, ident: &str, span: Span) {
// TODO(blocked on rust-lang/rust#54723)
// https://github.com/rust-lang/rust/issues/54723
// Use `new_raw` once it's stabilized
// stream.append(Ident::new_raw(ident, span));
match syn::parse_str::<Ident>(ident) {
Ok(mut ident) => {
ident.set_span(span);
stream.append(ident);
}
Err(_) => stream.append(Ident::new(ident, span)),
}
}
pub fn append_punct(stream: &mut TokenStream, punct: char, spacing: Spacing, span: Span) {
let mut punct = Punct::new(punct, spacing);
punct.set_span(span);
stream.append(punct);
}
pub fn append_stringified_tokens(stream: &mut TokenStream, s: &str, span: Span) {
let s: TokenStream = s.parse().expect("invalid token stream");
stream.extend(s.into_iter().map(|mut t| {
t.set_span(span);
t
}));
}
pub fn append_to_tokens<T: ToTokens>(stream: &mut TokenStream, to_tokens: &T) {
to_tokens.to_tokens(stream);
}
pub fn append_group(
stream: &mut TokenStream,
inner: TokenStream,
delimiter: Delimiter,
span: Span,
) {
let mut group = Group::new(delimiter, inner);
group.set_span(span);
stream.append(group);
}
} | /// quote! {
/// let mut _#ident = 0;
/// } | random_line_split |
miningManager.go | // Copyright (c) The Tellor Authors.
// Licensed under the MIT License.
package ops
import (
"context"
"encoding/hex"
"fmt"
"math/big"
"os"
"strconv"
"time"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
tellorCommon "github.com/tellor-io/telliot/pkg/common"
"github.com/tellor-io/telliot/pkg/config"
"github.com/tellor-io/telliot/pkg/contracts"
"github.com/tellor-io/telliot/pkg/db"
"github.com/tellor-io/telliot/pkg/logging"
"github.com/tellor-io/telliot/pkg/pow"
"github.com/tellor-io/telliot/pkg/rpc"
"github.com/tellor-io/telliot/pkg/tracker"
)
type WorkSource interface {
GetWork() (*pow.Work, bool)
}
type SolutionSink interface {
Submit(context.Context, *pow.Result) (*types.Transaction, error)
}
// MiningMgr manages mining, submiting a solution and requesting data.
// In the tellor contract a solution is saved in slots where a value is valid only when it has 5 confirmed slots.
// The manager tracks tx costs and profitThreshold is set it skips any transactions below the profit threshold.
// The profit is calculated the same way as in the Tellor contract.
// Transaction cost for submitting in each slot might be different so because of this
// the manager needs to complete few transaction to gather the tx cost for each slot.
type MiningMgr struct {
exitCh chan os.Signal
logger log.Logger
Running bool
ethClient contracts.ETHClient
group *pow.MiningGroup
tasker WorkSource
solHandler SolutionSink
solutionPending *pow.Result
database db.DataServerProxy
contractInstance *contracts.ITellor
cfg *config.Config
toMineInput chan *pow.Work
solutionOutput chan *pow.Result
submitCount prometheus.Counter
submitFailCount prometheus.Counter
submitProfit *prometheus.GaugeVec
submitCost *prometheus.GaugeVec
submitReward *prometheus.GaugeVec
}
// CreateMiningManager is the MiningMgr constructor.
func CreateMiningManager(
logger log.Logger,
exitCh chan os.Signal,
cfg *config.Config,
database db.DataServerProxy,
contract *contracts.ITellor,
account *rpc.Account,
) (*MiningMgr, error) {
group, err := pow.SetupMiningGroup(logger, cfg, exitCh)
if err != nil {
return nil, errors.Wrap(err, "setup miners")
}
client, err := rpc.NewClient(logger, cfg, os.Getenv(config.NodeURLEnvName))
if err != nil {
return nil, errors.Wrap(err, "creating client")
}
contractInstance, err := contracts.NewITellor(client)
if err != nil {
return nil, errors.Wrap(err, "getting addresses")
}
logger, err = logging.ApplyFilter(*cfg, ComponentName, logger)
if err != nil {
return nil, errors.Wrap(err, "apply filter logger")
}
submitter := NewSubmitter(logger, cfg, client, contract, account)
mng := &MiningMgr{
exitCh: exitCh,
logger: log.With(logger, "component", ComponentName),
Running: false,
group: group,
tasker: nil,
solutionPending: nil,
solHandler: nil,
contractInstance: contractInstance,
cfg: cfg,
database: database,
ethClient: client,
toMineInput: make(chan *pow.Work),
solutionOutput: make(chan *pow.Result),
submitCount: promauto.NewCounter(prometheus.CounterOpts{
Namespace: "telliot",
Subsystem: "mining",
Name: "submit_total",
Help: "The total number of submitted solutions",
}),
submitFailCount: promauto.NewCounter(prometheus.CounterOpts{
Namespace: "telliot",
Subsystem: "mining",
Name: "submit_fails_total",
Help: "The total number of failed submission",
}),
submitProfit: promauto.NewGaugeVec(prometheus.GaugeOpts{
Namespace: "telliot",
Subsystem: "mining",
Name: "submit_profit",
Help: "The current submit profit in percents",
},
[]string{"slot"},
),
submitCost: promauto.NewGaugeVec(prometheus.GaugeOpts{
Namespace: "telliot",
Subsystem: "mining",
Name: "submit_cost",
Help: "The current submit cost in 1e18 eth",
},
[]string{"slot"},
),
submitReward: promauto.NewGaugeVec(prometheus.GaugeOpts{
Namespace: "telliot",
Subsystem: "mining",
Name: "submit_reward",
Help: "The current reward in 1e18 eth",
},
[]string{"slot"},
),
}
mng.tasker = pow.CreateTasker(logger, cfg, mng.contractInstance, database)
mng.solHandler = pow.CreateSolutionHandler(cfg, logger, submitter, database)
return mng, nil
}
// Start will start the mining run loop.
func (mgr *MiningMgr) Start(ctx context.Context) {
mgr.Running = true
ticker := time.NewTicker(mgr.cfg.Mine.MiningInterruptCheckInterval.Duration)
// Start the mining group.
go mgr.group.Mine(mgr.toMineInput, mgr.solutionOutput)
for {
select {
// Boss wants us to quit for the day.
case <-mgr.exitCh:
mgr.Running = false
return
// Found a solution.
case solution := <-mgr.solutionOutput:
// There is no new challenge so resend any pending solution.
if solution == nil {
if mgr.solutionPending == nil {
continue
}
solution = mgr.solutionPending
var ids []int64
for _, id := range mgr.solutionPending.Work.Challenge.RequestIDs {
ids = append(ids, id.Int64())
}
level.Debug(mgr.logger).Log("msg", "re-submitting a pending solution", "reqIDs", fmt.Sprintf("%+v", ids))
}
// Set this solution as pending so that if
// any of the checks below fail and will be retried
// when there is no new challenge.
mgr.solutionPending = solution
profitPercent, err := mgr.profit() // Call it regardless of whether we use so that is sets the exposed metrics.
if mgr.cfg.Mine.ProfitThreshold > 0 {
if err != nil {
level.Error(mgr.logger).Log("msg", "submit solution profit check", "err", err)
continue
}
if profitPercent < int64(mgr.cfg.Mine.ProfitThreshold) {
level.Debug(mgr.logger).Log("msg", "transaction not profitable, so will wait for the next cycle")
continue
}
}
lastSubmit, err := mgr.lastSubmit()
if err != nil {
level.Error(mgr.logger).Log("msg", "checking last submit time", "err", err)
} else if lastSubmit < mgr.cfg.Mine.MinSubmitPeriod.Duration {
level.Debug(mgr.logger).Log("msg", "min transaction submit threshold hasn't passed", "minSubmitPeriod", mgr.cfg.Mine.MinSubmitPeriod, "lastSubmit", lastSubmit)
continue
}
tx, err := mgr.solHandler.Submit(ctx, solution)
if err != nil {
level.Error(mgr.logger).Log("msg", "submiting a solution", "err", err)
mgr.submitFailCount.Inc()
continue
}
level.Debug(mgr.logger).Log("msg", "submited a solution", "txHash", tx.Hash().String())
mgr.saveGasUsed(ctx, tx)
mgr.submitCount.Inc()
// A solution has been submitted so the
// pending solution doesn't matter here any more so reset it.
mgr.solutionPending = nil
// Time to check for a new challenge.
case <-ticker.C:
mgr.newWork()
}
}
}
// newWork is non blocking worker that sends new work to the pow workers
// or re-sends a current pending solution to the submitter when the challenge hasn't changes.
func (mgr *MiningMgr) newWork() {
go func() {
// instantSubmit means 15 mins have passed so
// the difficulty now is zero and any solution/nonce will work so
// can just submit without sending to the miner.
work, instantSubmit := mgr.tasker.GetWork()
if instantSubmit {
mgr.solutionOutput <- &pow.Result{Work: work, Nonce: "anything will work"}
} else {
// It sends even nil work to indicate that no new challenge is available.
if work == nil {
mgr.solutionOutput <- nil
return
}
var ids []int64
for _, id := range work.Challenge.RequestIDs {
ids = append(ids, id.Int64())
}
level.Debug(mgr.logger).Log("msg", "sending new chalenge for mining", "reqIDs", fmt.Sprintf("%+v", ids))
mgr.toMineInput <- work
}
}()
}
func (mgr *MiningMgr) lastSubmit() (time.Duration, error) {
address := "000000000000000000000000" + mgr.cfg.PublicAddress[2:]
decoded, err := hex.DecodeString(address)
if err != nil {
return 0, errors.Wrapf(err, "decoding address")
}
last, err := mgr.contractInstance.GetUintVar(nil, rpc.Keccak256(decoded))
if err != nil {
return 0, errors.Wrapf(err, "getting last submit time for:%v", mgr.cfg.PublicAddress)
}
// The Miner has never submitted so put a timestamp at the beginning of unix time.
if last.Int64() == 0 {
last.Set(big.NewInt(1))
}
lastInt := last.Int64()
now := time.Now()
var lastSubmit time.Duration
if lastInt > 0 {
tm := time.Unix(lastInt, 0)
lastSubmit = now.Sub(tm)
}
return lastSubmit, nil
}
// currentReward returns the current TRB rewards converted to ETH.
// TODO[Krasi] This is a duplicate code from the tellor conract so
// Should add `currentReward` func to the contract to avoid this code duplication.
// Tracking issue https://github.com/tellor-io/TellorCore/issues/101
func (mgr *MiningMgr) currentReward() (*big.Int, error) {
timeOfLastNewValue, err := mgr.contractInstance.GetUintVar(nil, rpc.Keccak256([]byte("_TIME_OF_LAST_NEW_VALUE")))
if err != nil {
return nil, errors.New("getting _TIME_OF_LAST_NEW_VALUE")
}
totalTips, err := mgr.contractInstance.GetUintVar(nil, rpc.Keccak256([]byte("_CURRENT_TOTAL_TIPS")))
if err != nil {
return nil, errors.New("getting _CURRENT_TOTAL_TIPS")
}
timeDiff := big.NewInt(time.Now().Unix() - timeOfLastNewValue.Int64())
trb := big.NewInt(1e18)
rewardPerSec := big.NewInt(0).Div(trb, big.NewInt(300)) // 1 TRB every 5 minutes so total reward is timeDiff multiplied by reward per second.
rewardTRB := big.NewInt(0).Mul(rewardPerSec, timeDiff)
singleMinerTip := big.NewInt(0).Div(totalTips, big.NewInt(10)) // Half of the tips are burned(remain in the contract) to reduce inflation.
rewardWithTips := big.NewInt(0).Add(singleMinerTip, rewardTRB)
if rewardWithTips == big.NewInt(0) {
return big.NewInt(0), nil
}
return mgr.convertTRBtoETH(rewardWithTips)
}
func (mgr *MiningMgr) convertTRBtoETH(trb *big.Int) (*big.Int, error) {
val, err := mgr.database.Get(db.QueriedValuePrefix + strconv.Itoa(tracker.RequestID_TRB_ETH))
if err != nil {
return nil, errors.New("getting the trb price from the db")
}
if len(val) == 0 |
priceTRB, err := hexutil.DecodeBig(string(val))
if err != nil {
return nil, errors.New("decoding trb price from the db")
}
wei := big.NewInt(tellorCommon.WEI)
precisionUpscale := big.NewInt(0).Div(wei, big.NewInt(tracker.PSRs[tracker.RequestID_TRB_ETH].Granularity()))
priceTRB.Mul(priceTRB, precisionUpscale)
eth := big.NewInt(0).Mul(priceTRB, trb)
eth.Div(eth, big.NewInt(1e18))
return eth, nil
}
func (mgr *MiningMgr) gasUsed() (*big.Int, *big.Int, error) {
slotNum, err := mgr.contractInstance.GetUintVar(nil, rpc.Keccak256([]byte("_SLOT_PROGRESS")))
if err != nil {
return nil, nil, errors.Wrap(err, "getting _SLOT_PROGRESS")
}
// This is the price for the last transaction so increment +1
// to get the price for next slot transaction.
// Slots numbers should be from 0 to 4 so
// use mod of 5 in order to save 5 as slot 0.
slotNum.Add(slotNum, big.NewInt(1)).Mod(slotNum, big.NewInt(5))
txID := tellorCommon.PriceTXs + slotNum.String()
gas, err := mgr.database.Get(txID)
if err != nil {
return nil, nil, errors.New("getting the tx eth cost from the db")
}
// No price record in the db yet.
if gas == nil {
return big.NewInt(0), slotNum, nil
}
return big.NewInt(0).SetBytes(gas), slotNum, nil
}
// saveGasUsed calculates the price for a given slot.
// Since the transaction doesn't include the slot number it gets the slot number
// as soon as the transaction passes and
// saves it in the database for profit calculations.
// TODO[Krasi] To be more detirministic and simplify this
// should get the `_SLOT_PROGRESS` and `gasUsed` from the `NonceSubmitted` event.
// At the moment there is a slight chance of a race condition if
// another transaction has passed between checking the transaction cost and
// checking the `_SLOT_PROGRESS`
// Tracking issue https://github.com/tellor-io/TellorCore/issues/101
func (mgr *MiningMgr) saveGasUsed(ctx context.Context, tx *types.Transaction) {
go func(tx *types.Transaction) {
receipt, err := bind.WaitMined(ctx, mgr.ethClient, tx)
if err != nil {
level.Error(mgr.logger).Log("msg", "transaction result for calculating transaction cost", "err", err)
}
if receipt.Status != 1 {
mgr.submitFailCount.Inc()
level.Error(mgr.logger).Log("msg", "unsuccessful submitSolution transaction, not saving the tx cost in the db", "txHash", receipt.TxHash.String())
return
}
gasUsed := big.NewInt(int64(receipt.GasUsed))
slotNum, err := mgr.contractInstance.GetUintVar(nil, rpc.Keccak256([]byte("_SLOT_PROGRESS")))
if err != nil {
level.Error(mgr.logger).Log("msg", "getting _SLOT_PROGRESS for calculating transaction cost", "err", err)
}
txID := tellorCommon.PriceTXs + slotNum.String()
err = mgr.database.Put(txID, gasUsed.Bytes())
if err != nil {
level.Error(mgr.logger).Log("msg", "saving transaction cost", "err", err)
}
level.Debug(mgr.logger).Log("msg", "saved transaction gas used", "txHash", receipt.TxHash.String(), "amount", gasUsed.Int64(), "slot", slotNum.Int64())
}(tx)
}
// profit returns the profit in percents.
// When the transaction cost is unknown it returns -1 so
// that the caller can decide how to handle.
// Transaction cost is zero when the manager hasn't done any transactions yet.
// Each transaction cost is known for any siquential transactions.
func (mgr *MiningMgr) profit() (int64, error) {
gasUsed, slotNum, err := mgr.gasUsed()
if err != nil {
return 0, errors.Wrap(err, "getting TX cost")
}
if gasUsed.Int64() == 0 {
level.Debug(mgr.logger).Log("msg", "profit checking:no data for gas used", "slot", slotNum)
return -100, nil
}
gasPrice, err := mgr.ethClient.SuggestGasPrice(context.Background())
if err != nil {
return 0, errors.Wrap(err, "getting gas price")
}
reward, err := mgr.currentReward()
if err != nil {
return 0, errors.Wrap(err, "getting current rewards")
}
txCost := gasPrice.Mul(gasPrice, gasUsed)
profit := big.NewInt(0).Sub(reward, txCost)
profitPercentFloat := float64(profit.Int64()) / float64(txCost.Int64()) * 100
profitPercent := int64(profitPercentFloat)
level.Debug(mgr.logger).Log(
"msg", "profit checking",
"reward", fmt.Sprintf("%.2e", float64(reward.Int64())),
"txCost", fmt.Sprintf("%.2e", float64(txCost.Int64())),
"slot", slotNum,
"profit", fmt.Sprintf("%.2e", float64(profit.Int64())),
"profitMargin", profitPercent,
"profitThreshold", mgr.cfg.Mine.ProfitThreshold,
)
mgr.submitProfit.With(prometheus.Labels{"slot": strconv.Itoa(int(slotNum.Int64()))}).(prometheus.Gauge).Set(float64(profitPercent))
mgr.submitCost.With(prometheus.Labels{"slot": strconv.Itoa(int(slotNum.Int64()))}).(prometheus.Gauge).Set(float64(txCost.Int64()))
mgr.submitReward.With(prometheus.Labels{"slot": strconv.Itoa(int(slotNum.Int64()))}).(prometheus.Gauge).Set(float64(reward.Int64()))
return profitPercent, nil
}
| {
return nil, errors.New("the db doesn't have the trb price")
} | conditional_block |
miningManager.go | // Copyright (c) The Tellor Authors.
// Licensed under the MIT License.
package ops
import (
"context"
"encoding/hex"
"fmt"
"math/big"
"os"
"strconv"
"time"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
tellorCommon "github.com/tellor-io/telliot/pkg/common"
"github.com/tellor-io/telliot/pkg/config"
"github.com/tellor-io/telliot/pkg/contracts"
"github.com/tellor-io/telliot/pkg/db"
"github.com/tellor-io/telliot/pkg/logging"
"github.com/tellor-io/telliot/pkg/pow"
"github.com/tellor-io/telliot/pkg/rpc"
"github.com/tellor-io/telliot/pkg/tracker"
)
type WorkSource interface {
GetWork() (*pow.Work, bool)
}
type SolutionSink interface {
Submit(context.Context, *pow.Result) (*types.Transaction, error)
}
// MiningMgr manages mining, submiting a solution and requesting data.
// In the tellor contract a solution is saved in slots where a value is valid only when it has 5 confirmed slots.
// The manager tracks tx costs and profitThreshold is set it skips any transactions below the profit threshold.
// The profit is calculated the same way as in the Tellor contract.
// Transaction cost for submitting in each slot might be different so because of this
// the manager needs to complete few transaction to gather the tx cost for each slot.
type MiningMgr struct {
exitCh chan os.Signal
logger log.Logger
Running bool
ethClient contracts.ETHClient
group *pow.MiningGroup
tasker WorkSource
solHandler SolutionSink
solutionPending *pow.Result
database db.DataServerProxy
contractInstance *contracts.ITellor
cfg *config.Config
toMineInput chan *pow.Work
solutionOutput chan *pow.Result
submitCount prometheus.Counter
submitFailCount prometheus.Counter
submitProfit *prometheus.GaugeVec
submitCost *prometheus.GaugeVec
submitReward *prometheus.GaugeVec
}
// CreateMiningManager is the MiningMgr constructor.
func CreateMiningManager(
logger log.Logger,
exitCh chan os.Signal,
cfg *config.Config,
database db.DataServerProxy,
contract *contracts.ITellor,
account *rpc.Account,
) (*MiningMgr, error) {
group, err := pow.SetupMiningGroup(logger, cfg, exitCh)
if err != nil {
return nil, errors.Wrap(err, "setup miners")
}
client, err := rpc.NewClient(logger, cfg, os.Getenv(config.NodeURLEnvName))
if err != nil {
return nil, errors.Wrap(err, "creating client")
}
contractInstance, err := contracts.NewITellor(client)
if err != nil {
return nil, errors.Wrap(err, "getting addresses")
}
logger, err = logging.ApplyFilter(*cfg, ComponentName, logger)
if err != nil {
return nil, errors.Wrap(err, "apply filter logger")
}
submitter := NewSubmitter(logger, cfg, client, contract, account)
mng := &MiningMgr{
exitCh: exitCh,
logger: log.With(logger, "component", ComponentName),
Running: false,
group: group,
tasker: nil,
solutionPending: nil,
solHandler: nil,
contractInstance: contractInstance,
cfg: cfg,
database: database,
ethClient: client,
toMineInput: make(chan *pow.Work),
solutionOutput: make(chan *pow.Result),
submitCount: promauto.NewCounter(prometheus.CounterOpts{
Namespace: "telliot",
Subsystem: "mining",
Name: "submit_total",
Help: "The total number of submitted solutions",
}),
submitFailCount: promauto.NewCounter(prometheus.CounterOpts{
Namespace: "telliot",
Subsystem: "mining",
Name: "submit_fails_total",
Help: "The total number of failed submission",
}),
submitProfit: promauto.NewGaugeVec(prometheus.GaugeOpts{
Namespace: "telliot",
Subsystem: "mining",
Name: "submit_profit",
Help: "The current submit profit in percents",
},
[]string{"slot"},
),
submitCost: promauto.NewGaugeVec(prometheus.GaugeOpts{
Namespace: "telliot",
Subsystem: "mining",
Name: "submit_cost",
Help: "The current submit cost in 1e18 eth",
},
[]string{"slot"},
),
submitReward: promauto.NewGaugeVec(prometheus.GaugeOpts{
Namespace: "telliot",
Subsystem: "mining",
Name: "submit_reward",
Help: "The current reward in 1e18 eth",
},
[]string{"slot"},
),
}
mng.tasker = pow.CreateTasker(logger, cfg, mng.contractInstance, database)
mng.solHandler = pow.CreateSolutionHandler(cfg, logger, submitter, database)
return mng, nil
}
// Start will start the mining run loop.
func (mgr *MiningMgr) Start(ctx context.Context) {
mgr.Running = true
ticker := time.NewTicker(mgr.cfg.Mine.MiningInterruptCheckInterval.Duration)
// Start the mining group.
go mgr.group.Mine(mgr.toMineInput, mgr.solutionOutput)
for {
select {
// Boss wants us to quit for the day.
case <-mgr.exitCh:
mgr.Running = false
return
// Found a solution.
case solution := <-mgr.solutionOutput:
// There is no new challenge so resend any pending solution.
if solution == nil {
if mgr.solutionPending == nil {
continue
}
solution = mgr.solutionPending
var ids []int64
for _, id := range mgr.solutionPending.Work.Challenge.RequestIDs {
ids = append(ids, id.Int64()) | // any of the checks below fail and will be retried
// when there is no new challenge.
mgr.solutionPending = solution
profitPercent, err := mgr.profit() // Call it regardless of whether we use so that is sets the exposed metrics.
if mgr.cfg.Mine.ProfitThreshold > 0 {
if err != nil {
level.Error(mgr.logger).Log("msg", "submit solution profit check", "err", err)
continue
}
if profitPercent < int64(mgr.cfg.Mine.ProfitThreshold) {
level.Debug(mgr.logger).Log("msg", "transaction not profitable, so will wait for the next cycle")
continue
}
}
lastSubmit, err := mgr.lastSubmit()
if err != nil {
level.Error(mgr.logger).Log("msg", "checking last submit time", "err", err)
} else if lastSubmit < mgr.cfg.Mine.MinSubmitPeriod.Duration {
level.Debug(mgr.logger).Log("msg", "min transaction submit threshold hasn't passed", "minSubmitPeriod", mgr.cfg.Mine.MinSubmitPeriod, "lastSubmit", lastSubmit)
continue
}
tx, err := mgr.solHandler.Submit(ctx, solution)
if err != nil {
level.Error(mgr.logger).Log("msg", "submiting a solution", "err", err)
mgr.submitFailCount.Inc()
continue
}
level.Debug(mgr.logger).Log("msg", "submited a solution", "txHash", tx.Hash().String())
mgr.saveGasUsed(ctx, tx)
mgr.submitCount.Inc()
// A solution has been submitted so the
// pending solution doesn't matter here any more so reset it.
mgr.solutionPending = nil
// Time to check for a new challenge.
case <-ticker.C:
mgr.newWork()
}
}
}
// newWork is non blocking worker that sends new work to the pow workers
// or re-sends a current pending solution to the submitter when the challenge hasn't changes.
func (mgr *MiningMgr) newWork() {
go func() {
// instantSubmit means 15 mins have passed so
// the difficulty now is zero and any solution/nonce will work so
// can just submit without sending to the miner.
work, instantSubmit := mgr.tasker.GetWork()
if instantSubmit {
mgr.solutionOutput <- &pow.Result{Work: work, Nonce: "anything will work"}
} else {
// It sends even nil work to indicate that no new challenge is available.
if work == nil {
mgr.solutionOutput <- nil
return
}
var ids []int64
for _, id := range work.Challenge.RequestIDs {
ids = append(ids, id.Int64())
}
level.Debug(mgr.logger).Log("msg", "sending new chalenge for mining", "reqIDs", fmt.Sprintf("%+v", ids))
mgr.toMineInput <- work
}
}()
}
func (mgr *MiningMgr) lastSubmit() (time.Duration, error) {
address := "000000000000000000000000" + mgr.cfg.PublicAddress[2:]
decoded, err := hex.DecodeString(address)
if err != nil {
return 0, errors.Wrapf(err, "decoding address")
}
last, err := mgr.contractInstance.GetUintVar(nil, rpc.Keccak256(decoded))
if err != nil {
return 0, errors.Wrapf(err, "getting last submit time for:%v", mgr.cfg.PublicAddress)
}
// The Miner has never submitted so put a timestamp at the beginning of unix time.
if last.Int64() == 0 {
last.Set(big.NewInt(1))
}
lastInt := last.Int64()
now := time.Now()
var lastSubmit time.Duration
if lastInt > 0 {
tm := time.Unix(lastInt, 0)
lastSubmit = now.Sub(tm)
}
return lastSubmit, nil
}
// currentReward returns the current TRB rewards converted to ETH.
// TODO[Krasi] This is a duplicate code from the tellor conract so
// Should add `currentReward` func to the contract to avoid this code duplication.
// Tracking issue https://github.com/tellor-io/TellorCore/issues/101
func (mgr *MiningMgr) currentReward() (*big.Int, error) {
timeOfLastNewValue, err := mgr.contractInstance.GetUintVar(nil, rpc.Keccak256([]byte("_TIME_OF_LAST_NEW_VALUE")))
if err != nil {
return nil, errors.New("getting _TIME_OF_LAST_NEW_VALUE")
}
totalTips, err := mgr.contractInstance.GetUintVar(nil, rpc.Keccak256([]byte("_CURRENT_TOTAL_TIPS")))
if err != nil {
return nil, errors.New("getting _CURRENT_TOTAL_TIPS")
}
timeDiff := big.NewInt(time.Now().Unix() - timeOfLastNewValue.Int64())
trb := big.NewInt(1e18)
rewardPerSec := big.NewInt(0).Div(trb, big.NewInt(300)) // 1 TRB every 5 minutes so total reward is timeDiff multiplied by reward per second.
rewardTRB := big.NewInt(0).Mul(rewardPerSec, timeDiff)
singleMinerTip := big.NewInt(0).Div(totalTips, big.NewInt(10)) // Half of the tips are burned(remain in the contract) to reduce inflation.
rewardWithTips := big.NewInt(0).Add(singleMinerTip, rewardTRB)
if rewardWithTips == big.NewInt(0) {
return big.NewInt(0), nil
}
return mgr.convertTRBtoETH(rewardWithTips)
}
func (mgr *MiningMgr) convertTRBtoETH(trb *big.Int) (*big.Int, error) {
val, err := mgr.database.Get(db.QueriedValuePrefix + strconv.Itoa(tracker.RequestID_TRB_ETH))
if err != nil {
return nil, errors.New("getting the trb price from the db")
}
if len(val) == 0 {
return nil, errors.New("the db doesn't have the trb price")
}
priceTRB, err := hexutil.DecodeBig(string(val))
if err != nil {
return nil, errors.New("decoding trb price from the db")
}
wei := big.NewInt(tellorCommon.WEI)
precisionUpscale := big.NewInt(0).Div(wei, big.NewInt(tracker.PSRs[tracker.RequestID_TRB_ETH].Granularity()))
priceTRB.Mul(priceTRB, precisionUpscale)
eth := big.NewInt(0).Mul(priceTRB, trb)
eth.Div(eth, big.NewInt(1e18))
return eth, nil
}
func (mgr *MiningMgr) gasUsed() (*big.Int, *big.Int, error) {
slotNum, err := mgr.contractInstance.GetUintVar(nil, rpc.Keccak256([]byte("_SLOT_PROGRESS")))
if err != nil {
return nil, nil, errors.Wrap(err, "getting _SLOT_PROGRESS")
}
// This is the price for the last transaction so increment +1
// to get the price for next slot transaction.
// Slots numbers should be from 0 to 4 so
// use mod of 5 in order to save 5 as slot 0.
slotNum.Add(slotNum, big.NewInt(1)).Mod(slotNum, big.NewInt(5))
txID := tellorCommon.PriceTXs + slotNum.String()
gas, err := mgr.database.Get(txID)
if err != nil {
return nil, nil, errors.New("getting the tx eth cost from the db")
}
// No price record in the db yet.
if gas == nil {
return big.NewInt(0), slotNum, nil
}
return big.NewInt(0).SetBytes(gas), slotNum, nil
}
// saveGasUsed calculates the price for a given slot.
// Since the transaction doesn't include the slot number it gets the slot number
// as soon as the transaction passes and
// saves it in the database for profit calculations.
// TODO[Krasi] To be more detirministic and simplify this
// should get the `_SLOT_PROGRESS` and `gasUsed` from the `NonceSubmitted` event.
// At the moment there is a slight chance of a race condition if
// another transaction has passed between checking the transaction cost and
// checking the `_SLOT_PROGRESS`
// Tracking issue https://github.com/tellor-io/TellorCore/issues/101
func (mgr *MiningMgr) saveGasUsed(ctx context.Context, tx *types.Transaction) {
go func(tx *types.Transaction) {
receipt, err := bind.WaitMined(ctx, mgr.ethClient, tx)
if err != nil {
level.Error(mgr.logger).Log("msg", "transaction result for calculating transaction cost", "err", err)
}
if receipt.Status != 1 {
mgr.submitFailCount.Inc()
level.Error(mgr.logger).Log("msg", "unsuccessful submitSolution transaction, not saving the tx cost in the db", "txHash", receipt.TxHash.String())
return
}
gasUsed := big.NewInt(int64(receipt.GasUsed))
slotNum, err := mgr.contractInstance.GetUintVar(nil, rpc.Keccak256([]byte("_SLOT_PROGRESS")))
if err != nil {
level.Error(mgr.logger).Log("msg", "getting _SLOT_PROGRESS for calculating transaction cost", "err", err)
}
txID := tellorCommon.PriceTXs + slotNum.String()
err = mgr.database.Put(txID, gasUsed.Bytes())
if err != nil {
level.Error(mgr.logger).Log("msg", "saving transaction cost", "err", err)
}
level.Debug(mgr.logger).Log("msg", "saved transaction gas used", "txHash", receipt.TxHash.String(), "amount", gasUsed.Int64(), "slot", slotNum.Int64())
}(tx)
}
// profit returns the profit in percents.
// When the transaction cost is unknown it returns -1 so
// that the caller can decide how to handle.
// Transaction cost is zero when the manager hasn't done any transactions yet.
// Each transaction cost is known for any siquential transactions.
func (mgr *MiningMgr) profit() (int64, error) {
gasUsed, slotNum, err := mgr.gasUsed()
if err != nil {
return 0, errors.Wrap(err, "getting TX cost")
}
if gasUsed.Int64() == 0 {
level.Debug(mgr.logger).Log("msg", "profit checking:no data for gas used", "slot", slotNum)
return -100, nil
}
gasPrice, err := mgr.ethClient.SuggestGasPrice(context.Background())
if err != nil {
return 0, errors.Wrap(err, "getting gas price")
}
reward, err := mgr.currentReward()
if err != nil {
return 0, errors.Wrap(err, "getting current rewards")
}
txCost := gasPrice.Mul(gasPrice, gasUsed)
profit := big.NewInt(0).Sub(reward, txCost)
profitPercentFloat := float64(profit.Int64()) / float64(txCost.Int64()) * 100
profitPercent := int64(profitPercentFloat)
level.Debug(mgr.logger).Log(
"msg", "profit checking",
"reward", fmt.Sprintf("%.2e", float64(reward.Int64())),
"txCost", fmt.Sprintf("%.2e", float64(txCost.Int64())),
"slot", slotNum,
"profit", fmt.Sprintf("%.2e", float64(profit.Int64())),
"profitMargin", profitPercent,
"profitThreshold", mgr.cfg.Mine.ProfitThreshold,
)
mgr.submitProfit.With(prometheus.Labels{"slot": strconv.Itoa(int(slotNum.Int64()))}).(prometheus.Gauge).Set(float64(profitPercent))
mgr.submitCost.With(prometheus.Labels{"slot": strconv.Itoa(int(slotNum.Int64()))}).(prometheus.Gauge).Set(float64(txCost.Int64()))
mgr.submitReward.With(prometheus.Labels{"slot": strconv.Itoa(int(slotNum.Int64()))}).(prometheus.Gauge).Set(float64(reward.Int64()))
return profitPercent, nil
} | }
level.Debug(mgr.logger).Log("msg", "re-submitting a pending solution", "reqIDs", fmt.Sprintf("%+v", ids))
}
// Set this solution as pending so that if | random_line_split |
miningManager.go | // Copyright (c) The Tellor Authors.
// Licensed under the MIT License.
package ops
import (
"context"
"encoding/hex"
"fmt"
"math/big"
"os"
"strconv"
"time"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
tellorCommon "github.com/tellor-io/telliot/pkg/common"
"github.com/tellor-io/telliot/pkg/config"
"github.com/tellor-io/telliot/pkg/contracts"
"github.com/tellor-io/telliot/pkg/db"
"github.com/tellor-io/telliot/pkg/logging"
"github.com/tellor-io/telliot/pkg/pow"
"github.com/tellor-io/telliot/pkg/rpc"
"github.com/tellor-io/telliot/pkg/tracker"
)
type WorkSource interface {
GetWork() (*pow.Work, bool)
}
type SolutionSink interface {
Submit(context.Context, *pow.Result) (*types.Transaction, error)
}
// MiningMgr manages mining, submiting a solution and requesting data.
// In the tellor contract a solution is saved in slots where a value is valid only when it has 5 confirmed slots.
// The manager tracks tx costs and profitThreshold is set it skips any transactions below the profit threshold.
// The profit is calculated the same way as in the Tellor contract.
// Transaction cost for submitting in each slot might be different so because of this
// the manager needs to complete few transaction to gather the tx cost for each slot.
type MiningMgr struct {
exitCh chan os.Signal
logger log.Logger
Running bool
ethClient contracts.ETHClient
group *pow.MiningGroup
tasker WorkSource
solHandler SolutionSink
solutionPending *pow.Result
database db.DataServerProxy
contractInstance *contracts.ITellor
cfg *config.Config
toMineInput chan *pow.Work
solutionOutput chan *pow.Result
submitCount prometheus.Counter
submitFailCount prometheus.Counter
submitProfit *prometheus.GaugeVec
submitCost *prometheus.GaugeVec
submitReward *prometheus.GaugeVec
}
// CreateMiningManager is the MiningMgr constructor.
func CreateMiningManager(
logger log.Logger,
exitCh chan os.Signal,
cfg *config.Config,
database db.DataServerProxy,
contract *contracts.ITellor,
account *rpc.Account,
) (*MiningMgr, error) {
group, err := pow.SetupMiningGroup(logger, cfg, exitCh)
if err != nil {
return nil, errors.Wrap(err, "setup miners")
}
client, err := rpc.NewClient(logger, cfg, os.Getenv(config.NodeURLEnvName))
if err != nil {
return nil, errors.Wrap(err, "creating client")
}
contractInstance, err := contracts.NewITellor(client)
if err != nil {
return nil, errors.Wrap(err, "getting addresses")
}
logger, err = logging.ApplyFilter(*cfg, ComponentName, logger)
if err != nil {
return nil, errors.Wrap(err, "apply filter logger")
}
submitter := NewSubmitter(logger, cfg, client, contract, account)
mng := &MiningMgr{
exitCh: exitCh,
logger: log.With(logger, "component", ComponentName),
Running: false,
group: group,
tasker: nil,
solutionPending: nil,
solHandler: nil,
contractInstance: contractInstance,
cfg: cfg,
database: database,
ethClient: client,
toMineInput: make(chan *pow.Work),
solutionOutput: make(chan *pow.Result),
submitCount: promauto.NewCounter(prometheus.CounterOpts{
Namespace: "telliot",
Subsystem: "mining",
Name: "submit_total",
Help: "The total number of submitted solutions",
}),
submitFailCount: promauto.NewCounter(prometheus.CounterOpts{
Namespace: "telliot",
Subsystem: "mining",
Name: "submit_fails_total",
Help: "The total number of failed submission",
}),
submitProfit: promauto.NewGaugeVec(prometheus.GaugeOpts{
Namespace: "telliot",
Subsystem: "mining",
Name: "submit_profit",
Help: "The current submit profit in percents",
},
[]string{"slot"},
),
submitCost: promauto.NewGaugeVec(prometheus.GaugeOpts{
Namespace: "telliot",
Subsystem: "mining",
Name: "submit_cost",
Help: "The current submit cost in 1e18 eth",
},
[]string{"slot"},
),
submitReward: promauto.NewGaugeVec(prometheus.GaugeOpts{
Namespace: "telliot",
Subsystem: "mining",
Name: "submit_reward",
Help: "The current reward in 1e18 eth",
},
[]string{"slot"},
),
}
mng.tasker = pow.CreateTasker(logger, cfg, mng.contractInstance, database)
mng.solHandler = pow.CreateSolutionHandler(cfg, logger, submitter, database)
return mng, nil
}
// Start will start the mining run loop.
func (mgr *MiningMgr) Start(ctx context.Context) {
mgr.Running = true
ticker := time.NewTicker(mgr.cfg.Mine.MiningInterruptCheckInterval.Duration)
// Start the mining group.
go mgr.group.Mine(mgr.toMineInput, mgr.solutionOutput)
for {
select {
// Boss wants us to quit for the day.
case <-mgr.exitCh:
mgr.Running = false
return
// Found a solution.
case solution := <-mgr.solutionOutput:
// There is no new challenge so resend any pending solution.
if solution == nil {
if mgr.solutionPending == nil {
continue
}
solution = mgr.solutionPending
var ids []int64
for _, id := range mgr.solutionPending.Work.Challenge.RequestIDs {
ids = append(ids, id.Int64())
}
level.Debug(mgr.logger).Log("msg", "re-submitting a pending solution", "reqIDs", fmt.Sprintf("%+v", ids))
}
// Set this solution as pending so that if
// any of the checks below fail and will be retried
// when there is no new challenge.
mgr.solutionPending = solution
profitPercent, err := mgr.profit() // Call it regardless of whether we use so that is sets the exposed metrics.
if mgr.cfg.Mine.ProfitThreshold > 0 {
if err != nil {
level.Error(mgr.logger).Log("msg", "submit solution profit check", "err", err)
continue
}
if profitPercent < int64(mgr.cfg.Mine.ProfitThreshold) {
level.Debug(mgr.logger).Log("msg", "transaction not profitable, so will wait for the next cycle")
continue
}
}
lastSubmit, err := mgr.lastSubmit()
if err != nil {
level.Error(mgr.logger).Log("msg", "checking last submit time", "err", err)
} else if lastSubmit < mgr.cfg.Mine.MinSubmitPeriod.Duration {
level.Debug(mgr.logger).Log("msg", "min transaction submit threshold hasn't passed", "minSubmitPeriod", mgr.cfg.Mine.MinSubmitPeriod, "lastSubmit", lastSubmit)
continue
}
tx, err := mgr.solHandler.Submit(ctx, solution)
if err != nil {
level.Error(mgr.logger).Log("msg", "submiting a solution", "err", err)
mgr.submitFailCount.Inc()
continue
}
level.Debug(mgr.logger).Log("msg", "submited a solution", "txHash", tx.Hash().String())
mgr.saveGasUsed(ctx, tx)
mgr.submitCount.Inc()
// A solution has been submitted so the
// pending solution doesn't matter here any more so reset it.
mgr.solutionPending = nil
// Time to check for a new challenge.
case <-ticker.C:
mgr.newWork()
}
}
}
// newWork is non blocking worker that sends new work to the pow workers
// or re-sends a current pending solution to the submitter when the challenge hasn't changes.
func (mgr *MiningMgr) newWork() {
go func() {
// instantSubmit means 15 mins have passed so
// the difficulty now is zero and any solution/nonce will work so
// can just submit without sending to the miner.
work, instantSubmit := mgr.tasker.GetWork()
if instantSubmit {
mgr.solutionOutput <- &pow.Result{Work: work, Nonce: "anything will work"}
} else {
// It sends even nil work to indicate that no new challenge is available.
if work == nil {
mgr.solutionOutput <- nil
return
}
var ids []int64
for _, id := range work.Challenge.RequestIDs {
ids = append(ids, id.Int64())
}
level.Debug(mgr.logger).Log("msg", "sending new chalenge for mining", "reqIDs", fmt.Sprintf("%+v", ids))
mgr.toMineInput <- work
}
}()
}
func (mgr *MiningMgr) | () (time.Duration, error) {
address := "000000000000000000000000" + mgr.cfg.PublicAddress[2:]
decoded, err := hex.DecodeString(address)
if err != nil {
return 0, errors.Wrapf(err, "decoding address")
}
last, err := mgr.contractInstance.GetUintVar(nil, rpc.Keccak256(decoded))
if err != nil {
return 0, errors.Wrapf(err, "getting last submit time for:%v", mgr.cfg.PublicAddress)
}
// The Miner has never submitted so put a timestamp at the beginning of unix time.
if last.Int64() == 0 {
last.Set(big.NewInt(1))
}
lastInt := last.Int64()
now := time.Now()
var lastSubmit time.Duration
if lastInt > 0 {
tm := time.Unix(lastInt, 0)
lastSubmit = now.Sub(tm)
}
return lastSubmit, nil
}
// currentReward returns the current TRB rewards converted to ETH.
// TODO[Krasi] This is a duplicate code from the tellor conract so
// Should add `currentReward` func to the contract to avoid this code duplication.
// Tracking issue https://github.com/tellor-io/TellorCore/issues/101
func (mgr *MiningMgr) currentReward() (*big.Int, error) {
timeOfLastNewValue, err := mgr.contractInstance.GetUintVar(nil, rpc.Keccak256([]byte("_TIME_OF_LAST_NEW_VALUE")))
if err != nil {
return nil, errors.New("getting _TIME_OF_LAST_NEW_VALUE")
}
totalTips, err := mgr.contractInstance.GetUintVar(nil, rpc.Keccak256([]byte("_CURRENT_TOTAL_TIPS")))
if err != nil {
return nil, errors.New("getting _CURRENT_TOTAL_TIPS")
}
timeDiff := big.NewInt(time.Now().Unix() - timeOfLastNewValue.Int64())
trb := big.NewInt(1e18)
rewardPerSec := big.NewInt(0).Div(trb, big.NewInt(300)) // 1 TRB every 5 minutes so total reward is timeDiff multiplied by reward per second.
rewardTRB := big.NewInt(0).Mul(rewardPerSec, timeDiff)
singleMinerTip := big.NewInt(0).Div(totalTips, big.NewInt(10)) // Half of the tips are burned(remain in the contract) to reduce inflation.
rewardWithTips := big.NewInt(0).Add(singleMinerTip, rewardTRB)
if rewardWithTips == big.NewInt(0) {
return big.NewInt(0), nil
}
return mgr.convertTRBtoETH(rewardWithTips)
}
func (mgr *MiningMgr) convertTRBtoETH(trb *big.Int) (*big.Int, error) {
val, err := mgr.database.Get(db.QueriedValuePrefix + strconv.Itoa(tracker.RequestID_TRB_ETH))
if err != nil {
return nil, errors.New("getting the trb price from the db")
}
if len(val) == 0 {
return nil, errors.New("the db doesn't have the trb price")
}
priceTRB, err := hexutil.DecodeBig(string(val))
if err != nil {
return nil, errors.New("decoding trb price from the db")
}
wei := big.NewInt(tellorCommon.WEI)
precisionUpscale := big.NewInt(0).Div(wei, big.NewInt(tracker.PSRs[tracker.RequestID_TRB_ETH].Granularity()))
priceTRB.Mul(priceTRB, precisionUpscale)
eth := big.NewInt(0).Mul(priceTRB, trb)
eth.Div(eth, big.NewInt(1e18))
return eth, nil
}
func (mgr *MiningMgr) gasUsed() (*big.Int, *big.Int, error) {
slotNum, err := mgr.contractInstance.GetUintVar(nil, rpc.Keccak256([]byte("_SLOT_PROGRESS")))
if err != nil {
return nil, nil, errors.Wrap(err, "getting _SLOT_PROGRESS")
}
// This is the price for the last transaction so increment +1
// to get the price for next slot transaction.
// Slots numbers should be from 0 to 4 so
// use mod of 5 in order to save 5 as slot 0.
slotNum.Add(slotNum, big.NewInt(1)).Mod(slotNum, big.NewInt(5))
txID := tellorCommon.PriceTXs + slotNum.String()
gas, err := mgr.database.Get(txID)
if err != nil {
return nil, nil, errors.New("getting the tx eth cost from the db")
}
// No price record in the db yet.
if gas == nil {
return big.NewInt(0), slotNum, nil
}
return big.NewInt(0).SetBytes(gas), slotNum, nil
}
// saveGasUsed calculates the price for a given slot.
// Since the transaction doesn't include the slot number it gets the slot number
// as soon as the transaction passes and
// saves it in the database for profit calculations.
// TODO[Krasi] To be more detirministic and simplify this
// should get the `_SLOT_PROGRESS` and `gasUsed` from the `NonceSubmitted` event.
// At the moment there is a slight chance of a race condition if
// another transaction has passed between checking the transaction cost and
// checking the `_SLOT_PROGRESS`
// Tracking issue https://github.com/tellor-io/TellorCore/issues/101
func (mgr *MiningMgr) saveGasUsed(ctx context.Context, tx *types.Transaction) {
go func(tx *types.Transaction) {
receipt, err := bind.WaitMined(ctx, mgr.ethClient, tx)
if err != nil {
level.Error(mgr.logger).Log("msg", "transaction result for calculating transaction cost", "err", err)
}
if receipt.Status != 1 {
mgr.submitFailCount.Inc()
level.Error(mgr.logger).Log("msg", "unsuccessful submitSolution transaction, not saving the tx cost in the db", "txHash", receipt.TxHash.String())
return
}
gasUsed := big.NewInt(int64(receipt.GasUsed))
slotNum, err := mgr.contractInstance.GetUintVar(nil, rpc.Keccak256([]byte("_SLOT_PROGRESS")))
if err != nil {
level.Error(mgr.logger).Log("msg", "getting _SLOT_PROGRESS for calculating transaction cost", "err", err)
}
txID := tellorCommon.PriceTXs + slotNum.String()
err = mgr.database.Put(txID, gasUsed.Bytes())
if err != nil {
level.Error(mgr.logger).Log("msg", "saving transaction cost", "err", err)
}
level.Debug(mgr.logger).Log("msg", "saved transaction gas used", "txHash", receipt.TxHash.String(), "amount", gasUsed.Int64(), "slot", slotNum.Int64())
}(tx)
}
// profit returns the profit in percents.
// When the transaction cost is unknown it returns -1 so
// that the caller can decide how to handle.
// Transaction cost is zero when the manager hasn't done any transactions yet.
// Each transaction cost is known for any siquential transactions.
func (mgr *MiningMgr) profit() (int64, error) {
gasUsed, slotNum, err := mgr.gasUsed()
if err != nil {
return 0, errors.Wrap(err, "getting TX cost")
}
if gasUsed.Int64() == 0 {
level.Debug(mgr.logger).Log("msg", "profit checking:no data for gas used", "slot", slotNum)
return -100, nil
}
gasPrice, err := mgr.ethClient.SuggestGasPrice(context.Background())
if err != nil {
return 0, errors.Wrap(err, "getting gas price")
}
reward, err := mgr.currentReward()
if err != nil {
return 0, errors.Wrap(err, "getting current rewards")
}
txCost := gasPrice.Mul(gasPrice, gasUsed)
profit := big.NewInt(0).Sub(reward, txCost)
profitPercentFloat := float64(profit.Int64()) / float64(txCost.Int64()) * 100
profitPercent := int64(profitPercentFloat)
level.Debug(mgr.logger).Log(
"msg", "profit checking",
"reward", fmt.Sprintf("%.2e", float64(reward.Int64())),
"txCost", fmt.Sprintf("%.2e", float64(txCost.Int64())),
"slot", slotNum,
"profit", fmt.Sprintf("%.2e", float64(profit.Int64())),
"profitMargin", profitPercent,
"profitThreshold", mgr.cfg.Mine.ProfitThreshold,
)
mgr.submitProfit.With(prometheus.Labels{"slot": strconv.Itoa(int(slotNum.Int64()))}).(prometheus.Gauge).Set(float64(profitPercent))
mgr.submitCost.With(prometheus.Labels{"slot": strconv.Itoa(int(slotNum.Int64()))}).(prometheus.Gauge).Set(float64(txCost.Int64()))
mgr.submitReward.With(prometheus.Labels{"slot": strconv.Itoa(int(slotNum.Int64()))}).(prometheus.Gauge).Set(float64(reward.Int64()))
return profitPercent, nil
}
| lastSubmit | identifier_name |
miningManager.go | // Copyright (c) The Tellor Authors.
// Licensed under the MIT License.
package ops
import (
"context"
"encoding/hex"
"fmt"
"math/big"
"os"
"strconv"
"time"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
tellorCommon "github.com/tellor-io/telliot/pkg/common"
"github.com/tellor-io/telliot/pkg/config"
"github.com/tellor-io/telliot/pkg/contracts"
"github.com/tellor-io/telliot/pkg/db"
"github.com/tellor-io/telliot/pkg/logging"
"github.com/tellor-io/telliot/pkg/pow"
"github.com/tellor-io/telliot/pkg/rpc"
"github.com/tellor-io/telliot/pkg/tracker"
)
type WorkSource interface {
GetWork() (*pow.Work, bool)
}
type SolutionSink interface {
Submit(context.Context, *pow.Result) (*types.Transaction, error)
}
// MiningMgr manages mining, submiting a solution and requesting data.
// In the tellor contract a solution is saved in slots where a value is valid only when it has 5 confirmed slots.
// The manager tracks tx costs and profitThreshold is set it skips any transactions below the profit threshold.
// The profit is calculated the same way as in the Tellor contract.
// Transaction cost for submitting in each slot might be different so because of this
// the manager needs to complete few transaction to gather the tx cost for each slot.
type MiningMgr struct {
exitCh chan os.Signal
logger log.Logger
Running bool
ethClient contracts.ETHClient
group *pow.MiningGroup
tasker WorkSource
solHandler SolutionSink
solutionPending *pow.Result
database db.DataServerProxy
contractInstance *contracts.ITellor
cfg *config.Config
toMineInput chan *pow.Work
solutionOutput chan *pow.Result
submitCount prometheus.Counter
submitFailCount prometheus.Counter
submitProfit *prometheus.GaugeVec
submitCost *prometheus.GaugeVec
submitReward *prometheus.GaugeVec
}
// CreateMiningManager is the MiningMgr constructor.
func CreateMiningManager(
logger log.Logger,
exitCh chan os.Signal,
cfg *config.Config,
database db.DataServerProxy,
contract *contracts.ITellor,
account *rpc.Account,
) (*MiningMgr, error) {
group, err := pow.SetupMiningGroup(logger, cfg, exitCh)
if err != nil {
return nil, errors.Wrap(err, "setup miners")
}
client, err := rpc.NewClient(logger, cfg, os.Getenv(config.NodeURLEnvName))
if err != nil {
return nil, errors.Wrap(err, "creating client")
}
contractInstance, err := contracts.NewITellor(client)
if err != nil {
return nil, errors.Wrap(err, "getting addresses")
}
logger, err = logging.ApplyFilter(*cfg, ComponentName, logger)
if err != nil {
return nil, errors.Wrap(err, "apply filter logger")
}
submitter := NewSubmitter(logger, cfg, client, contract, account)
mng := &MiningMgr{
exitCh: exitCh,
logger: log.With(logger, "component", ComponentName),
Running: false,
group: group,
tasker: nil,
solutionPending: nil,
solHandler: nil,
contractInstance: contractInstance,
cfg: cfg,
database: database,
ethClient: client,
toMineInput: make(chan *pow.Work),
solutionOutput: make(chan *pow.Result),
submitCount: promauto.NewCounter(prometheus.CounterOpts{
Namespace: "telliot",
Subsystem: "mining",
Name: "submit_total",
Help: "The total number of submitted solutions",
}),
submitFailCount: promauto.NewCounter(prometheus.CounterOpts{
Namespace: "telliot",
Subsystem: "mining",
Name: "submit_fails_total",
Help: "The total number of failed submission",
}),
submitProfit: promauto.NewGaugeVec(prometheus.GaugeOpts{
Namespace: "telliot",
Subsystem: "mining",
Name: "submit_profit",
Help: "The current submit profit in percents",
},
[]string{"slot"},
),
submitCost: promauto.NewGaugeVec(prometheus.GaugeOpts{
Namespace: "telliot",
Subsystem: "mining",
Name: "submit_cost",
Help: "The current submit cost in 1e18 eth",
},
[]string{"slot"},
),
submitReward: promauto.NewGaugeVec(prometheus.GaugeOpts{
Namespace: "telliot",
Subsystem: "mining",
Name: "submit_reward",
Help: "The current reward in 1e18 eth",
},
[]string{"slot"},
),
}
mng.tasker = pow.CreateTasker(logger, cfg, mng.contractInstance, database)
mng.solHandler = pow.CreateSolutionHandler(cfg, logger, submitter, database)
return mng, nil
}
// Start will start the mining run loop.
func (mgr *MiningMgr) Start(ctx context.Context) {
mgr.Running = true
ticker := time.NewTicker(mgr.cfg.Mine.MiningInterruptCheckInterval.Duration)
// Start the mining group.
go mgr.group.Mine(mgr.toMineInput, mgr.solutionOutput)
for {
select {
// Boss wants us to quit for the day.
case <-mgr.exitCh:
mgr.Running = false
return
// Found a solution.
case solution := <-mgr.solutionOutput:
// There is no new challenge so resend any pending solution.
if solution == nil {
if mgr.solutionPending == nil {
continue
}
solution = mgr.solutionPending
var ids []int64
for _, id := range mgr.solutionPending.Work.Challenge.RequestIDs {
ids = append(ids, id.Int64())
}
level.Debug(mgr.logger).Log("msg", "re-submitting a pending solution", "reqIDs", fmt.Sprintf("%+v", ids))
}
// Set this solution as pending so that if
// any of the checks below fail and will be retried
// when there is no new challenge.
mgr.solutionPending = solution
profitPercent, err := mgr.profit() // Call it regardless of whether we use so that is sets the exposed metrics.
if mgr.cfg.Mine.ProfitThreshold > 0 {
if err != nil {
level.Error(mgr.logger).Log("msg", "submit solution profit check", "err", err)
continue
}
if profitPercent < int64(mgr.cfg.Mine.ProfitThreshold) {
level.Debug(mgr.logger).Log("msg", "transaction not profitable, so will wait for the next cycle")
continue
}
}
lastSubmit, err := mgr.lastSubmit()
if err != nil {
level.Error(mgr.logger).Log("msg", "checking last submit time", "err", err)
} else if lastSubmit < mgr.cfg.Mine.MinSubmitPeriod.Duration {
level.Debug(mgr.logger).Log("msg", "min transaction submit threshold hasn't passed", "minSubmitPeriod", mgr.cfg.Mine.MinSubmitPeriod, "lastSubmit", lastSubmit)
continue
}
tx, err := mgr.solHandler.Submit(ctx, solution)
if err != nil {
level.Error(mgr.logger).Log("msg", "submiting a solution", "err", err)
mgr.submitFailCount.Inc()
continue
}
level.Debug(mgr.logger).Log("msg", "submited a solution", "txHash", tx.Hash().String())
mgr.saveGasUsed(ctx, tx)
mgr.submitCount.Inc()
// A solution has been submitted so the
// pending solution doesn't matter here any more so reset it.
mgr.solutionPending = nil
// Time to check for a new challenge.
case <-ticker.C:
mgr.newWork()
}
}
}
// newWork is non blocking worker that sends new work to the pow workers
// or re-sends a current pending solution to the submitter when the challenge hasn't changes.
func (mgr *MiningMgr) newWork() {
go func() {
// instantSubmit means 15 mins have passed so
// the difficulty now is zero and any solution/nonce will work so
// can just submit without sending to the miner.
work, instantSubmit := mgr.tasker.GetWork()
if instantSubmit {
mgr.solutionOutput <- &pow.Result{Work: work, Nonce: "anything will work"}
} else {
// It sends even nil work to indicate that no new challenge is available.
if work == nil {
mgr.solutionOutput <- nil
return
}
var ids []int64
for _, id := range work.Challenge.RequestIDs {
ids = append(ids, id.Int64())
}
level.Debug(mgr.logger).Log("msg", "sending new chalenge for mining", "reqIDs", fmt.Sprintf("%+v", ids))
mgr.toMineInput <- work
}
}()
}
func (mgr *MiningMgr) lastSubmit() (time.Duration, error) {
address := "000000000000000000000000" + mgr.cfg.PublicAddress[2:]
decoded, err := hex.DecodeString(address)
if err != nil {
return 0, errors.Wrapf(err, "decoding address")
}
last, err := mgr.contractInstance.GetUintVar(nil, rpc.Keccak256(decoded))
if err != nil {
return 0, errors.Wrapf(err, "getting last submit time for:%v", mgr.cfg.PublicAddress)
}
// The Miner has never submitted so put a timestamp at the beginning of unix time.
if last.Int64() == 0 {
last.Set(big.NewInt(1))
}
lastInt := last.Int64()
now := time.Now()
var lastSubmit time.Duration
if lastInt > 0 {
tm := time.Unix(lastInt, 0)
lastSubmit = now.Sub(tm)
}
return lastSubmit, nil
}
// currentReward returns the current TRB rewards converted to ETH.
// TODO[Krasi] This is a duplicate code from the tellor conract so
// Should add `currentReward` func to the contract to avoid this code duplication.
// Tracking issue https://github.com/tellor-io/TellorCore/issues/101
func (mgr *MiningMgr) currentReward() (*big.Int, error) {
timeOfLastNewValue, err := mgr.contractInstance.GetUintVar(nil, rpc.Keccak256([]byte("_TIME_OF_LAST_NEW_VALUE")))
if err != nil {
return nil, errors.New("getting _TIME_OF_LAST_NEW_VALUE")
}
totalTips, err := mgr.contractInstance.GetUintVar(nil, rpc.Keccak256([]byte("_CURRENT_TOTAL_TIPS")))
if err != nil {
return nil, errors.New("getting _CURRENT_TOTAL_TIPS")
}
timeDiff := big.NewInt(time.Now().Unix() - timeOfLastNewValue.Int64())
trb := big.NewInt(1e18)
rewardPerSec := big.NewInt(0).Div(trb, big.NewInt(300)) // 1 TRB every 5 minutes so total reward is timeDiff multiplied by reward per second.
rewardTRB := big.NewInt(0).Mul(rewardPerSec, timeDiff)
singleMinerTip := big.NewInt(0).Div(totalTips, big.NewInt(10)) // Half of the tips are burned(remain in the contract) to reduce inflation.
rewardWithTips := big.NewInt(0).Add(singleMinerTip, rewardTRB)
if rewardWithTips == big.NewInt(0) {
return big.NewInt(0), nil
}
return mgr.convertTRBtoETH(rewardWithTips)
}
func (mgr *MiningMgr) convertTRBtoETH(trb *big.Int) (*big.Int, error) {
val, err := mgr.database.Get(db.QueriedValuePrefix + strconv.Itoa(tracker.RequestID_TRB_ETH))
if err != nil {
return nil, errors.New("getting the trb price from the db")
}
if len(val) == 0 {
return nil, errors.New("the db doesn't have the trb price")
}
priceTRB, err := hexutil.DecodeBig(string(val))
if err != nil {
return nil, errors.New("decoding trb price from the db")
}
wei := big.NewInt(tellorCommon.WEI)
precisionUpscale := big.NewInt(0).Div(wei, big.NewInt(tracker.PSRs[tracker.RequestID_TRB_ETH].Granularity()))
priceTRB.Mul(priceTRB, precisionUpscale)
eth := big.NewInt(0).Mul(priceTRB, trb)
eth.Div(eth, big.NewInt(1e18))
return eth, nil
}
func (mgr *MiningMgr) gasUsed() (*big.Int, *big.Int, error) {
slotNum, err := mgr.contractInstance.GetUintVar(nil, rpc.Keccak256([]byte("_SLOT_PROGRESS")))
if err != nil {
return nil, nil, errors.Wrap(err, "getting _SLOT_PROGRESS")
}
// This is the price for the last transaction so increment +1
// to get the price for next slot transaction.
// Slots numbers should be from 0 to 4 so
// use mod of 5 in order to save 5 as slot 0.
slotNum.Add(slotNum, big.NewInt(1)).Mod(slotNum, big.NewInt(5))
txID := tellorCommon.PriceTXs + slotNum.String()
gas, err := mgr.database.Get(txID)
if err != nil {
return nil, nil, errors.New("getting the tx eth cost from the db")
}
// No price record in the db yet.
if gas == nil {
return big.NewInt(0), slotNum, nil
}
return big.NewInt(0).SetBytes(gas), slotNum, nil
}
// saveGasUsed calculates the price for a given slot.
// Since the transaction doesn't include the slot number it gets the slot number
// as soon as the transaction passes and
// saves it in the database for profit calculations.
// TODO[Krasi] To be more detirministic and simplify this
// should get the `_SLOT_PROGRESS` and `gasUsed` from the `NonceSubmitted` event.
// At the moment there is a slight chance of a race condition if
// another transaction has passed between checking the transaction cost and
// checking the `_SLOT_PROGRESS`
// Tracking issue https://github.com/tellor-io/TellorCore/issues/101
func (mgr *MiningMgr) saveGasUsed(ctx context.Context, tx *types.Transaction) |
// profit returns the profit in percents.
// When the transaction cost is unknown it returns -1 so
// that the caller can decide how to handle.
// Transaction cost is zero when the manager hasn't done any transactions yet.
// Each transaction cost is known for any siquential transactions.
func (mgr *MiningMgr) profit() (int64, error) {
gasUsed, slotNum, err := mgr.gasUsed()
if err != nil {
return 0, errors.Wrap(err, "getting TX cost")
}
if gasUsed.Int64() == 0 {
level.Debug(mgr.logger).Log("msg", "profit checking:no data for gas used", "slot", slotNum)
return -100, nil
}
gasPrice, err := mgr.ethClient.SuggestGasPrice(context.Background())
if err != nil {
return 0, errors.Wrap(err, "getting gas price")
}
reward, err := mgr.currentReward()
if err != nil {
return 0, errors.Wrap(err, "getting current rewards")
}
txCost := gasPrice.Mul(gasPrice, gasUsed)
profit := big.NewInt(0).Sub(reward, txCost)
profitPercentFloat := float64(profit.Int64()) / float64(txCost.Int64()) * 100
profitPercent := int64(profitPercentFloat)
level.Debug(mgr.logger).Log(
"msg", "profit checking",
"reward", fmt.Sprintf("%.2e", float64(reward.Int64())),
"txCost", fmt.Sprintf("%.2e", float64(txCost.Int64())),
"slot", slotNum,
"profit", fmt.Sprintf("%.2e", float64(profit.Int64())),
"profitMargin", profitPercent,
"profitThreshold", mgr.cfg.Mine.ProfitThreshold,
)
mgr.submitProfit.With(prometheus.Labels{"slot": strconv.Itoa(int(slotNum.Int64()))}).(prometheus.Gauge).Set(float64(profitPercent))
mgr.submitCost.With(prometheus.Labels{"slot": strconv.Itoa(int(slotNum.Int64()))}).(prometheus.Gauge).Set(float64(txCost.Int64()))
mgr.submitReward.With(prometheus.Labels{"slot": strconv.Itoa(int(slotNum.Int64()))}).(prometheus.Gauge).Set(float64(reward.Int64()))
return profitPercent, nil
}
| {
go func(tx *types.Transaction) {
receipt, err := bind.WaitMined(ctx, mgr.ethClient, tx)
if err != nil {
level.Error(mgr.logger).Log("msg", "transaction result for calculating transaction cost", "err", err)
}
if receipt.Status != 1 {
mgr.submitFailCount.Inc()
level.Error(mgr.logger).Log("msg", "unsuccessful submitSolution transaction, not saving the tx cost in the db", "txHash", receipt.TxHash.String())
return
}
gasUsed := big.NewInt(int64(receipt.GasUsed))
slotNum, err := mgr.contractInstance.GetUintVar(nil, rpc.Keccak256([]byte("_SLOT_PROGRESS")))
if err != nil {
level.Error(mgr.logger).Log("msg", "getting _SLOT_PROGRESS for calculating transaction cost", "err", err)
}
txID := tellorCommon.PriceTXs + slotNum.String()
err = mgr.database.Put(txID, gasUsed.Bytes())
if err != nil {
level.Error(mgr.logger).Log("msg", "saving transaction cost", "err", err)
}
level.Debug(mgr.logger).Log("msg", "saved transaction gas used", "txHash", receipt.TxHash.String(), "amount", gasUsed.Int64(), "slot", slotNum.Int64())
}(tx)
} | identifier_body |
spell_parser.py | import math
import sys, os
from os import remove, close, path, name
import re
import datetime
# check dependent modules
from importlib import util
req_spec = util.find_spec("requests")
req_found = req_spec is not None
if (not req_found):
print('"requests" module not found, install using the command "pip install requests"')
sys.exit()
col_spec = util.find_spec("colorama")
col_found = col_spec is not None
if (not col_found):
print('"colorama" module not found, install using the command "pip install colorama"')
sys.exit()
# end check modules
class WindowsInhibitor:
ES_CONTINUOUS = 0x80000000
ES_SYSTEM_REQUIRED = 0x00000001
def __init__(self):
pass
def Inhibit(self):
import ctypes
#print("Preventing Windows from going to sleep")
ctypes.windll.kernel32.SetThreadExecutionState(
WindowsInhibitor.ES_CONTINUOUS | \
WindowsInhibitor.ES_SYSTEM_REQUIRED)
def Uninhibit(self):
import ctypes
#print("Allowing Windows to go to sleep")
ctypes.windll.kernel32.SetThreadExecutionState(
WindowsInhibitor.ES_CONTINUOUS)
class File():
def __init__(self, path_):
self.path = path_
self.lines = 0
def Exists(self):
return os.path.isfile(self.path)
def Write(self, text):
f = open(self.path,'w', encoding="utf8")
f.write(text)
f.close()
def WriteAppend(self, text):
f = open(self.path,'a', encoding="utf8")
f.write(text)
f.close()
def ReadFile(self):
f = open(self.path, "r", encoding="utf8")
contents = f.readlines()
f.close()
return contents
def WriteLines(self, linesArray):
f = open(self.path,'w', encoding="utf8")
for line in linesArray:
f.write(line)
f.close()
def ReplacePattern(self, pattern, subst):
from tempfile import mkstemp
from shutil import move
while not self.CheckFileAccess():
pass
#Create temp file
fh, abs_path = mkstemp()
with open(abs_path,'w', encoding="utf8") as new_file:
with open(self.path, encoding="utf8") as old_file:
for line in old_file:
new_file.write(line.replace(pattern, subst))
close(fh)
#Remove original file
remove(self.path)
#Move new file
move(abs_path, self.path)
def DeleteLine(self, lineNum):
from tempfile import mkstemp
from shutil import move
while not self.CheckFileAccess():
pass
#Create temp file
fh, abs_path = mkstemp()
with open(abs_path,'w', encoding="utf8") as new_file:
with open(self.path, encoding="utf8") as old_file:
for lineno, line in enumerate(old_file, 1):
if lineno != lineNum:
new_file.write(line)
close(fh)
#Remove original file
remove(self.path)
#Move new file
move(abs_path, self.path)
def CheckFileAccess(self):
if os.path.exists(self.path):
try:
os.rename(self.path, self.path)
#print('Access on file "' + self.path +'" is available!')
return True
except OSError as e:
pass
#print('Access-error on file "' + self.path + '"! \n' + str(e))
return False
def GetNumberOfLines(self):
f = open(self.path, 'r', encoding="utf8")
lines = 0
for line in f:
lines += 1
f.close()
self.lines = lines
return lines
def InsertInLine(self, index, value):
f = open(self.path, "r", encoding="utf8")
contents = f.readlines()
f.close()
contents.insert(index, value)
contents = "".join(contents)
self.Write(contents)
class Parser():
def __init__(self, args):
from colorama import init
init()
#If Windows then prevent sleep
self.osSleep = None
if os.name == 'nt':
self.osSleep = WindowsInhibitor()
self.osSleep.Inhibit()
# INIT
print("--------------------------")
print(" \033[33mSpellNameLocalized WoW Api Parser\033[0m")
print("--------------------------")
self.localesList = ["en_US","es_MX","pt_BR","de_DE","es_ES","fr_FR","it_IT","ru_RU","ko_KR","zh_TW","zh_CN"]
self.eachLangcheck = []
self.rangeStart = 25
self.rangeEnd = 26
self.processStarted = False
self.lastItemID = 0
self.lastSavedID = 0
self.minimumPrints = False
self.baseUrl = 'https://eu.api.blizzard.com/data/wow/spell/%d'
self.files = []
self.dbFile = File("parser_progress.txt")
#Color flags
self.ADDED = "32"
self.REPLACED = "36"
self.UNTOUCHED = "30;1"
self.REMOVED = "31"
self.NOT_FOUND = "33"
#Create locale files if missing
for i in range(len(self.localesList)):
locale = self.localesList[i];
path = 'SpellLocales/' + locale + '.lua'
init = 'INL_Spells.%s = {\n}' % (locale.replace("_", ""))
self.files.append(File(path))
if not self.files[i].Exists():
self.files[i].Write(init);
self.files[i].GetNumberOfLines();
self.eachLangcheck.append(0);
#Check dbFile file
if not self.dbFile.Exists():
self.PrintError("E", 'File "parser_progress.txt" does not exist.')
self.Exit()
#Config
if len(args) <= 1:
self.PrintArgs()
self.Exit()
else:
self.Config(args)
if self.rangeEnd <= self.rangeStart:
self.PrintError("E", "rangeStart can't be less or equal than rangeEnd")
self.Exit()
self.lastSavedID = self.rangeStart
#Utils
self.utils = Utils()
self.utils.PlaySound(2000, 250, 1)
#restart after error
self.Command = ""
self.Continue = True
try:
os.system("chcp 65001 > nul")
except:
pass
def Exit(self):
try:
if self.osSleep:
self.osSleep.Uninhibit()
if self.Continue:
print(self.Command)
if self.Command != "":
os.system(self.Command)
else:
sys.exit()
except KeyboardInterrupt:
sys.exit()
except:
sys.exit()
raise
def PrintArgs(self):
print(" \033[36m(i) HELP\033[0m: Arguments: spells_parser.py [rangeStart][rangeEnd][DisablePrints]")
print(" Example: spells_parser.py 15649 150000 False")
print("--------------------------")
print(" Color codes displayed while parsing:")
print(" \033[%smGreen\033[0m: New spell added" % self.ADDED)
print(" \033[%smRed\033[0m: Spell deleted (The spell was in the file, but not in the API)" % self.REMOVED)
print(" \033[%smYellow\033[0m: Spell not found (The spell was not present in the file and the API)" % self.NOT_FOUND)
print(" \033[%smBlue\033[0m: Spell replaced (A different version of the spell was found in the API)" % self.REPLACED)
print(" \033[%smBlack\033[0m: Spell not changed (The spell in the file is the same as the found in the API)" % self.UNTOUCHED)
print("--------------------------")
def PrintConfig(self):
print(" Config:")
print("\tRange Start: \033[36m%i\033[0m" % (self.rangeStart))
print("\tRange End: \033[36m%i\033[0m" % (self.rangeEnd))
print("\tMinimum Prints: \033[36m%s\033[0m" % (self.minimumPrints))
print("--------------------------")
def PrintError(self, type, message):
t = ""
if type == "E":
t = "\033[31m/!\\ ERROR\033[0m"
self.utils.PlaySound(200, 200, 3)
elif type == "W":
t = "\033[33m/!\\ WARNING\033[0m"
self.utils.PlaySound(200, 200, 2)
print(" %s: %s" % (t, message))
print("--------------------------")
def Config(self, args):
if len(args) >= 2:
if args[1] is not None:
self.rangeStart = int(args[1])
if len(args) >= 3:
if args[2] is not None:
self.rangeEnd = int(args[2])
if len(args) >= 4:
if args[3] is not None:
self.minimumPrints = args[3].lower() == 'true'
def SaveIndexes(self):
dbFilename = "spell_parser";
self.Command = "%s.py %i %i" % (dbFilename, self.lastItemID, self.rangeEnd)
print(" \033[35m%s.py %i %i\033[0m" % (dbFilename, self.lastItemID, self.rangeEnd))
self.dbFile.ReplacePattern("%s.py %i %i" % (dbFilename, self.lastSavedID, self.rangeEnd), self.Command)
def FindInFile(self, fileIndex, item):
contents = self.files[fileIndex].ReadFile()
return self.FindInContents(item, 2, self.files[fileIndex].lines -1, contents)
def FindInContents(self, item, minI, maxI, contents):
guess = int(math.floor(minI + (maxI - minI) / 2))
#print ("guess: %d | min: %d | max: %d" % (guess, minI, maxI))
if maxI >= minI:
guessed_line = contents[guess-1]
#print ("guess: %d | min: %d | max: %d | guessed_line: %s" % (guess, minI, maxI, guessed_line[:-1]))
m = re.search('(\d{1,7})', guessed_line)
if m is None:
return [1, False]
guessed_ID = int(m.group(0))
#print("ID: %d" % guessed_ID)
if guessed_ID == item:
#print ("END | guess: %d | line: %s" % (guess-1, contents[guess-1]))
return [guess-1, True]
if guessed_ID < item:
#print ("ID: %d < item: %s" % (guessed_ID, item))
return self.FindInContents(item, guess + 1, maxI, contents)
else:
#print ("ID: %d > item: %s" % (guessed_ID, item))
return self.FindInContents(item, minI, guess - 1, contents)
else:
#print ("END | %d NOT FOUND at pos %d" % (item, guess))
return [guess-1, False]
def GetNameFromLine(self, fileIndex, lineIndex):
contents = self.files[fileIndex].ReadFile()
m = re.search(r'"(.+?)(?<!\\)"', contents[lineIndex])
if(m is not None):
return m.group(0)
return '""'
def ReplaceNameFromLine(self, fileIndex, lineIndex, newText):
contents = self.files[fileIndex].ReadFile()
self.files[fileIndex].ReplacePattern(contents[lineIndex], newText)
def PrintItem(self, itemID):
now = datetime.datetime.now()
time = now.strftime('%H:%M:%S')
if not self.minimumPrints:
data = "|"
for localeIndex in range(len(self.localesList)):
data = "%s\033[%sm%s\033[0m|" % (data, self.eachLangcheck[localeIndex], self.localesList[localeIndex])
print(" %s - #%i - [%s]" % (time, itemID, data))
else:
if itemID % 50 == 0:
print(" %s - \033[36m#%i\033[0m" % (time, itemID))
def Run(self):
import json
reqs = Requests()
reqs.GetToken()
error = False
addedItems = 0
replacedItems = 0
isReplaced = False
try:
import requests, datetime
print(" \033[32mStarting\033[0m spells parser")
print("--------------------------")
self.lastItemID = self.rangeStart
params = dict(
namespace="static-eu",
access_token=reqs.token
)
self.processStarted = True
while self.lastItemID < self.rangeEnd-1 and not error:
try:
#REQUEST AND PARSE ITEMS
for itemID in range(self.lastItemID, self.rangeEnd):
self.lastItemID = itemID
url = self.baseUrl % (itemID)
reqs.MakeRequest(url, params)
req_status_code = reqs.GetRequestStatusCode()
data = reqs.GetData()
if data != "Downstream Error":
data = json.loads(data)
if req_status_code == 200:
if 'name' not in data:
print(data)
self.PrintError("E", "No 'name' field in data")
print("--------------------------")
error = True
break
for localeIndex in range(len(self.localesList)):
locale = self.localesList[localeIndex];
nameDict = data["name"]
if locale not in nameDict:
self.eachLangcheck[localeIndex] = self.NOT_FOUND;
else:
name = nameDict[locale]
name = name.replace('"', '\\"')
name = name.replace('\r\n', '')
name = name.replace('\n', '')
luaString = ' {%i,"%s"},\n' % (itemID, name)
result = self.FindInFile(localeIndex, itemID)
exists = result[1]
if not exists:
self.files[localeIndex].InsertInLine(result[0]+1, luaString)
self.files[localeIndex].lines += 1
addedItems += 1
self.eachLangcheck[localeIndex] = self.ADDED;
else:
current_name = self.GetNameFromLine(localeIndex, result[0])
new_name = '"%s"' % (name)
if current_name != new_name:
isReplaced = True
self.ReplaceNameFromLine(localeIndex, result[0], luaString)
replacedItems += 1
self.eachLangcheck[localeIndex] = self.REPLACED;
else:
isReplaced = False
self.eachLangcheck[localeIndex] = self.UNTOUCHED;
self.PrintItem(itemID)
else:
if req_status_code == 404:
if 'detail' not in data:
self.PrintError("E", "404 Error: No reason found")
error = True
break
else:
for localeIndex in range(len(self.localesList)):
result = self.FindInFile(localeIndex, itemID)
exists = result[1]
if exists:
self.files[localeIndex].DeleteLine(result[0]+1)
self.eachLangcheck[localeIndex] = self.REMOVED;
else:
self.eachLangcheck[localeIndex] = self.NOT_FOUND;
self.PrintItem(itemID)
elif req_status_code == 504:
self.PrintError("E", "504 Error: Gateway timeout")
error = True
break
else:
self.PrintError("E", "%i Error: Unknown error code" % (req_status_code)) #504
error = True
break
else:
print(" %s - \033[33m#%i\033[0m - %s" % (time, itemID, "Downstream Error"))
if itemID % 50 == 0:
print(" New indexes saved:")
self.SaveIndexes()
self.lastSavedID = itemID
except KeyboardInterrupt:
error = True
raise
except requests.exceptions.ConnectionError:
error = True
raise
except IOError:
error = True
raise
except ValueError:
error = True
raise
except:
print("--------------------------")
self.PrintError("E", "Unknown Error")
error = True
raise
if not error:
self.lastItemID += 1
if self.lastItemID >= self.rangeEnd-1:
self.Continue = False
print("--------------------------")
except KeyboardInterrupt:
print("--------------------------")
if self.processStarted:
self.PrintError("W", "Process interrupted by the user")
else:
self.PrintError("W", "Process interrupted by the user before starting")
self.Continue = False
except requests.exceptions.ConnectionError:
print("--------------------------")
self.PrintError("E", "There was a problem with the Internet connection.")
except IOError:
print("--------------------------")
self.PrintError("E", "There was a problem with the file access.")
except ValueError:
print("--------------------------")
self.PrintError("E", "No JSON object could be decoded // ValueError")
except:
print("--------------------------")
self.PrintError("E", "Unknown Error")
raise
finally:
print(" New indexes saved:")
self.SaveIndexes()
print("--------------------------")
print(" Stats:")
print("\tSpells parsed: \033[36m%i\033[0m" % (self.lastItemID - self.rangeStart))
print("\tNew spells added: \033[36m%i\033[0m" % (addedItems))
print("\tReplaced spells: \033[36m%i\033[0m" % (replacedItems))
print("--------------------------")
print(" \033[32mProcess Finished\033[0m")
print("--------------------------")
self.utils.PlaySound(2000, 250, 1)
class Utils():
def __init__(self):
self.Mute = False
pass
def PlaySound(self, frequency, duration, repetitions):
if not self.Mute:
try:
import winsound
for x in range(1, repetitions+1):
winsound.Beep(frequency, duration)
except:
pass
class Requests():
def __init__(self):
self.client_id_file = "client_id.key"
self.client_secret_file = "client_secret.key"
self.token = ""
def GetToken(self):
try:
import requests, json
url = "https://eu.battle.net/oauth/token"
params = dict(
grant_type="client_credentials",
client_id=self.ReadClientID(),
client_secret=self.ReadClientSecret()
)
resp = requests.get(url=url, params=params)
data = json.loads(resp.text)
if resp.status_code == 200:
if 'access_token' not in data:
print(data)
self.PrintError("E", "No 'name' field in data")
print("--------------------------")
self.token = data["access_token"]
except ValueError:
print("--------------------------")
self.PrintError("E", "No JSON TOKEN object could be decoded")
except requests.exceptions.ConnectionError:
print("--------------------------")
self.PrintError("E", "There was a problem with the Internet connection.")
def ReadClientID(self):
if os.path.isfile(self.client_id_file):
fa = open(self.client_id_file,'r', encoding="utf8")
client_id = fa.readline()
fa.close()
return client_id
def ReadClientSecret(self):
if os.path.isfile(self.client_secret_file):
fa = open(self.client_secret_file,'r', encoding="utf8")
client_secret = fa.readline()
fa.close()
return client_secret
def MakeRequest(self, url, params):
import requests
self.resp = requests.get(url=url, params=params)
def GetRequestStatusCode(self):
return self.resp.status_code
def GetData(self):
return self.resp.text
def PrintError(self, type, message):
t = "" | t = "\033[31m/!\\ ERROR\033[0m"
# self.utils.PlaySound(200, 200, 3)
elif type == "W":
t = "\033[33m/!\\ WARNING\033[0m"
# self.utils.PlaySound(200, 200, 2)
print(" %s: %s" % (t, message))
print("--------------------------")
''' ****************
PROGRAM START
**************** '''
parser = Parser(sys.argv)
parser.PrintConfig()
parser.Run()
parser.Exit() | if type == "E": | random_line_split |
spell_parser.py | import math
import sys, os
from os import remove, close, path, name
import re
import datetime
# check dependent modules
from importlib import util
req_spec = util.find_spec("requests")
req_found = req_spec is not None
if (not req_found):
print('"requests" module not found, install using the command "pip install requests"')
sys.exit()
col_spec = util.find_spec("colorama")
col_found = col_spec is not None
if (not col_found):
print('"colorama" module not found, install using the command "pip install colorama"')
sys.exit()
# end check modules
class WindowsInhibitor:
ES_CONTINUOUS = 0x80000000
ES_SYSTEM_REQUIRED = 0x00000001
def __init__(self):
pass
def Inhibit(self):
import ctypes
#print("Preventing Windows from going to sleep")
ctypes.windll.kernel32.SetThreadExecutionState(
WindowsInhibitor.ES_CONTINUOUS | \
WindowsInhibitor.ES_SYSTEM_REQUIRED)
def Uninhibit(self):
import ctypes
#print("Allowing Windows to go to sleep")
ctypes.windll.kernel32.SetThreadExecutionState(
WindowsInhibitor.ES_CONTINUOUS)
class File():
def __init__(self, path_):
self.path = path_
self.lines = 0
def Exists(self):
return os.path.isfile(self.path)
def Write(self, text):
f = open(self.path,'w', encoding="utf8")
f.write(text)
f.close()
def WriteAppend(self, text):
f = open(self.path,'a', encoding="utf8")
f.write(text)
f.close()
def ReadFile(self):
f = open(self.path, "r", encoding="utf8")
contents = f.readlines()
f.close()
return contents
def WriteLines(self, linesArray):
f = open(self.path,'w', encoding="utf8")
for line in linesArray:
f.write(line)
f.close()
def ReplacePattern(self, pattern, subst):
from tempfile import mkstemp
from shutil import move
while not self.CheckFileAccess():
pass
#Create temp file
fh, abs_path = mkstemp()
with open(abs_path,'w', encoding="utf8") as new_file:
with open(self.path, encoding="utf8") as old_file:
for line in old_file:
new_file.write(line.replace(pattern, subst))
close(fh)
#Remove original file
remove(self.path)
#Move new file
move(abs_path, self.path)
def DeleteLine(self, lineNum):
from tempfile import mkstemp
from shutil import move
while not self.CheckFileAccess():
pass
#Create temp file
fh, abs_path = mkstemp()
with open(abs_path,'w', encoding="utf8") as new_file:
with open(self.path, encoding="utf8") as old_file:
for lineno, line in enumerate(old_file, 1):
if lineno != lineNum:
new_file.write(line)
close(fh)
#Remove original file
remove(self.path)
#Move new file
move(abs_path, self.path)
def CheckFileAccess(self):
if os.path.exists(self.path):
try:
os.rename(self.path, self.path)
#print('Access on file "' + self.path +'" is available!')
return True
except OSError as e:
pass
#print('Access-error on file "' + self.path + '"! \n' + str(e))
return False
def GetNumberOfLines(self):
f = open(self.path, 'r', encoding="utf8")
lines = 0
for line in f:
lines += 1
f.close()
self.lines = lines
return lines
def InsertInLine(self, index, value):
f = open(self.path, "r", encoding="utf8")
contents = f.readlines()
f.close()
contents.insert(index, value)
contents = "".join(contents)
self.Write(contents)
class Parser():
def __init__(self, args):
from colorama import init
init()
#If Windows then prevent sleep
self.osSleep = None
if os.name == 'nt':
self.osSleep = WindowsInhibitor()
self.osSleep.Inhibit()
# INIT
print("--------------------------")
print(" \033[33mSpellNameLocalized WoW Api Parser\033[0m")
print("--------------------------")
self.localesList = ["en_US","es_MX","pt_BR","de_DE","es_ES","fr_FR","it_IT","ru_RU","ko_KR","zh_TW","zh_CN"]
self.eachLangcheck = []
self.rangeStart = 25
self.rangeEnd = 26
self.processStarted = False
self.lastItemID = 0
self.lastSavedID = 0
self.minimumPrints = False
self.baseUrl = 'https://eu.api.blizzard.com/data/wow/spell/%d'
self.files = []
self.dbFile = File("parser_progress.txt")
#Color flags
self.ADDED = "32"
self.REPLACED = "36"
self.UNTOUCHED = "30;1"
self.REMOVED = "31"
self.NOT_FOUND = "33"
#Create locale files if missing
for i in range(len(self.localesList)):
locale = self.localesList[i];
path = 'SpellLocales/' + locale + '.lua'
init = 'INL_Spells.%s = {\n}' % (locale.replace("_", ""))
self.files.append(File(path))
if not self.files[i].Exists():
self.files[i].Write(init);
self.files[i].GetNumberOfLines();
self.eachLangcheck.append(0);
#Check dbFile file
if not self.dbFile.Exists():
self.PrintError("E", 'File "parser_progress.txt" does not exist.')
self.Exit()
#Config
if len(args) <= 1:
self.PrintArgs()
self.Exit()
else:
self.Config(args)
if self.rangeEnd <= self.rangeStart:
self.PrintError("E", "rangeStart can't be less or equal than rangeEnd")
self.Exit()
self.lastSavedID = self.rangeStart
#Utils
self.utils = Utils()
self.utils.PlaySound(2000, 250, 1)
#restart after error
self.Command = ""
self.Continue = True
try:
os.system("chcp 65001 > nul")
except:
pass
def Exit(self):
try:
if self.osSleep:
self.osSleep.Uninhibit()
if self.Continue:
print(self.Command)
if self.Command != "":
os.system(self.Command)
else:
sys.exit()
except KeyboardInterrupt:
sys.exit()
except:
sys.exit()
raise
def PrintArgs(self):
print(" \033[36m(i) HELP\033[0m: Arguments: spells_parser.py [rangeStart][rangeEnd][DisablePrints]")
print(" Example: spells_parser.py 15649 150000 False")
print("--------------------------")
print(" Color codes displayed while parsing:")
print(" \033[%smGreen\033[0m: New spell added" % self.ADDED)
print(" \033[%smRed\033[0m: Spell deleted (The spell was in the file, but not in the API)" % self.REMOVED)
print(" \033[%smYellow\033[0m: Spell not found (The spell was not present in the file and the API)" % self.NOT_FOUND)
print(" \033[%smBlue\033[0m: Spell replaced (A different version of the spell was found in the API)" % self.REPLACED)
print(" \033[%smBlack\033[0m: Spell not changed (The spell in the file is the same as the found in the API)" % self.UNTOUCHED)
print("--------------------------")
def PrintConfig(self):
print(" Config:")
print("\tRange Start: \033[36m%i\033[0m" % (self.rangeStart))
print("\tRange End: \033[36m%i\033[0m" % (self.rangeEnd))
print("\tMinimum Prints: \033[36m%s\033[0m" % (self.minimumPrints))
print("--------------------------")
def PrintError(self, type, message):
t = ""
if type == "E":
t = "\033[31m/!\\ ERROR\033[0m"
self.utils.PlaySound(200, 200, 3)
elif type == "W":
t = "\033[33m/!\\ WARNING\033[0m"
self.utils.PlaySound(200, 200, 2)
print(" %s: %s" % (t, message))
print("--------------------------")
def Config(self, args):
if len(args) >= 2:
|
if len(args) >= 3:
if args[2] is not None:
self.rangeEnd = int(args[2])
if len(args) >= 4:
if args[3] is not None:
self.minimumPrints = args[3].lower() == 'true'
def SaveIndexes(self):
dbFilename = "spell_parser";
self.Command = "%s.py %i %i" % (dbFilename, self.lastItemID, self.rangeEnd)
print(" \033[35m%s.py %i %i\033[0m" % (dbFilename, self.lastItemID, self.rangeEnd))
self.dbFile.ReplacePattern("%s.py %i %i" % (dbFilename, self.lastSavedID, self.rangeEnd), self.Command)
def FindInFile(self, fileIndex, item):
contents = self.files[fileIndex].ReadFile()
return self.FindInContents(item, 2, self.files[fileIndex].lines -1, contents)
def FindInContents(self, item, minI, maxI, contents):
guess = int(math.floor(minI + (maxI - minI) / 2))
#print ("guess: %d | min: %d | max: %d" % (guess, minI, maxI))
if maxI >= minI:
guessed_line = contents[guess-1]
#print ("guess: %d | min: %d | max: %d | guessed_line: %s" % (guess, minI, maxI, guessed_line[:-1]))
m = re.search('(\d{1,7})', guessed_line)
if m is None:
return [1, False]
guessed_ID = int(m.group(0))
#print("ID: %d" % guessed_ID)
if guessed_ID == item:
#print ("END | guess: %d | line: %s" % (guess-1, contents[guess-1]))
return [guess-1, True]
if guessed_ID < item:
#print ("ID: %d < item: %s" % (guessed_ID, item))
return self.FindInContents(item, guess + 1, maxI, contents)
else:
#print ("ID: %d > item: %s" % (guessed_ID, item))
return self.FindInContents(item, minI, guess - 1, contents)
else:
#print ("END | %d NOT FOUND at pos %d" % (item, guess))
return [guess-1, False]
def GetNameFromLine(self, fileIndex, lineIndex):
contents = self.files[fileIndex].ReadFile()
m = re.search(r'"(.+?)(?<!\\)"', contents[lineIndex])
if(m is not None):
return m.group(0)
return '""'
def ReplaceNameFromLine(self, fileIndex, lineIndex, newText):
contents = self.files[fileIndex].ReadFile()
self.files[fileIndex].ReplacePattern(contents[lineIndex], newText)
def PrintItem(self, itemID):
now = datetime.datetime.now()
time = now.strftime('%H:%M:%S')
if not self.minimumPrints:
data = "|"
for localeIndex in range(len(self.localesList)):
data = "%s\033[%sm%s\033[0m|" % (data, self.eachLangcheck[localeIndex], self.localesList[localeIndex])
print(" %s - #%i - [%s]" % (time, itemID, data))
else:
if itemID % 50 == 0:
print(" %s - \033[36m#%i\033[0m" % (time, itemID))
def Run(self):
import json
reqs = Requests()
reqs.GetToken()
error = False
addedItems = 0
replacedItems = 0
isReplaced = False
try:
import requests, datetime
print(" \033[32mStarting\033[0m spells parser")
print("--------------------------")
self.lastItemID = self.rangeStart
params = dict(
namespace="static-eu",
access_token=reqs.token
)
self.processStarted = True
while self.lastItemID < self.rangeEnd-1 and not error:
try:
#REQUEST AND PARSE ITEMS
for itemID in range(self.lastItemID, self.rangeEnd):
self.lastItemID = itemID
url = self.baseUrl % (itemID)
reqs.MakeRequest(url, params)
req_status_code = reqs.GetRequestStatusCode()
data = reqs.GetData()
if data != "Downstream Error":
data = json.loads(data)
if req_status_code == 200:
if 'name' not in data:
print(data)
self.PrintError("E", "No 'name' field in data")
print("--------------------------")
error = True
break
for localeIndex in range(len(self.localesList)):
locale = self.localesList[localeIndex];
nameDict = data["name"]
if locale not in nameDict:
self.eachLangcheck[localeIndex] = self.NOT_FOUND;
else:
name = nameDict[locale]
name = name.replace('"', '\\"')
name = name.replace('\r\n', '')
name = name.replace('\n', '')
luaString = ' {%i,"%s"},\n' % (itemID, name)
result = self.FindInFile(localeIndex, itemID)
exists = result[1]
if not exists:
self.files[localeIndex].InsertInLine(result[0]+1, luaString)
self.files[localeIndex].lines += 1
addedItems += 1
self.eachLangcheck[localeIndex] = self.ADDED;
else:
current_name = self.GetNameFromLine(localeIndex, result[0])
new_name = '"%s"' % (name)
if current_name != new_name:
isReplaced = True
self.ReplaceNameFromLine(localeIndex, result[0], luaString)
replacedItems += 1
self.eachLangcheck[localeIndex] = self.REPLACED;
else:
isReplaced = False
self.eachLangcheck[localeIndex] = self.UNTOUCHED;
self.PrintItem(itemID)
else:
if req_status_code == 404:
if 'detail' not in data:
self.PrintError("E", "404 Error: No reason found")
error = True
break
else:
for localeIndex in range(len(self.localesList)):
result = self.FindInFile(localeIndex, itemID)
exists = result[1]
if exists:
self.files[localeIndex].DeleteLine(result[0]+1)
self.eachLangcheck[localeIndex] = self.REMOVED;
else:
self.eachLangcheck[localeIndex] = self.NOT_FOUND;
self.PrintItem(itemID)
elif req_status_code == 504:
self.PrintError("E", "504 Error: Gateway timeout")
error = True
break
else:
self.PrintError("E", "%i Error: Unknown error code" % (req_status_code)) #504
error = True
break
else:
print(" %s - \033[33m#%i\033[0m - %s" % (time, itemID, "Downstream Error"))
if itemID % 50 == 0:
print(" New indexes saved:")
self.SaveIndexes()
self.lastSavedID = itemID
except KeyboardInterrupt:
error = True
raise
except requests.exceptions.ConnectionError:
error = True
raise
except IOError:
error = True
raise
except ValueError:
error = True
raise
except:
print("--------------------------")
self.PrintError("E", "Unknown Error")
error = True
raise
if not error:
self.lastItemID += 1
if self.lastItemID >= self.rangeEnd-1:
self.Continue = False
print("--------------------------")
except KeyboardInterrupt:
print("--------------------------")
if self.processStarted:
self.PrintError("W", "Process interrupted by the user")
else:
self.PrintError("W", "Process interrupted by the user before starting")
self.Continue = False
except requests.exceptions.ConnectionError:
print("--------------------------")
self.PrintError("E", "There was a problem with the Internet connection.")
except IOError:
print("--------------------------")
self.PrintError("E", "There was a problem with the file access.")
except ValueError:
print("--------------------------")
self.PrintError("E", "No JSON object could be decoded // ValueError")
except:
print("--------------------------")
self.PrintError("E", "Unknown Error")
raise
finally:
print(" New indexes saved:")
self.SaveIndexes()
print("--------------------------")
print(" Stats:")
print("\tSpells parsed: \033[36m%i\033[0m" % (self.lastItemID - self.rangeStart))
print("\tNew spells added: \033[36m%i\033[0m" % (addedItems))
print("\tReplaced spells: \033[36m%i\033[0m" % (replacedItems))
print("--------------------------")
print(" \033[32mProcess Finished\033[0m")
print("--------------------------")
self.utils.PlaySound(2000, 250, 1)
class Utils():
def __init__(self):
self.Mute = False
pass
def PlaySound(self, frequency, duration, repetitions):
if not self.Mute:
try:
import winsound
for x in range(1, repetitions+1):
winsound.Beep(frequency, duration)
except:
pass
class Requests():
def __init__(self):
self.client_id_file = "client_id.key"
self.client_secret_file = "client_secret.key"
self.token = ""
def GetToken(self):
try:
import requests, json
url = "https://eu.battle.net/oauth/token"
params = dict(
grant_type="client_credentials",
client_id=self.ReadClientID(),
client_secret=self.ReadClientSecret()
)
resp = requests.get(url=url, params=params)
data = json.loads(resp.text)
if resp.status_code == 200:
if 'access_token' not in data:
print(data)
self.PrintError("E", "No 'name' field in data")
print("--------------------------")
self.token = data["access_token"]
except ValueError:
print("--------------------------")
self.PrintError("E", "No JSON TOKEN object could be decoded")
except requests.exceptions.ConnectionError:
print("--------------------------")
self.PrintError("E", "There was a problem with the Internet connection.")
def ReadClientID(self):
if os.path.isfile(self.client_id_file):
fa = open(self.client_id_file,'r', encoding="utf8")
client_id = fa.readline()
fa.close()
return client_id
def ReadClientSecret(self):
if os.path.isfile(self.client_secret_file):
fa = open(self.client_secret_file,'r', encoding="utf8")
client_secret = fa.readline()
fa.close()
return client_secret
def MakeRequest(self, url, params):
import requests
self.resp = requests.get(url=url, params=params)
def GetRequestStatusCode(self):
return self.resp.status_code
def GetData(self):
return self.resp.text
def PrintError(self, type, message):
t = ""
if type == "E":
t = "\033[31m/!\\ ERROR\033[0m"
# self.utils.PlaySound(200, 200, 3)
elif type == "W":
t = "\033[33m/!\\ WARNING\033[0m"
# self.utils.PlaySound(200, 200, 2)
print(" %s: %s" % (t, message))
print("--------------------------")
''' ****************
PROGRAM START
**************** '''
parser = Parser(sys.argv)
parser.PrintConfig()
parser.Run()
parser.Exit() | if args[1] is not None:
self.rangeStart = int(args[1]) | conditional_block |
spell_parser.py | import math
import sys, os
from os import remove, close, path, name
import re
import datetime
# check dependent modules
from importlib import util
req_spec = util.find_spec("requests")
req_found = req_spec is not None
if (not req_found):
print('"requests" module not found, install using the command "pip install requests"')
sys.exit()
col_spec = util.find_spec("colorama")
col_found = col_spec is not None
if (not col_found):
print('"colorama" module not found, install using the command "pip install colorama"')
sys.exit()
# end check modules
class WindowsInhibitor:
ES_CONTINUOUS = 0x80000000
ES_SYSTEM_REQUIRED = 0x00000001
def __init__(self):
pass
def Inhibit(self):
import ctypes
#print("Preventing Windows from going to sleep")
ctypes.windll.kernel32.SetThreadExecutionState(
WindowsInhibitor.ES_CONTINUOUS | \
WindowsInhibitor.ES_SYSTEM_REQUIRED)
def Uninhibit(self):
import ctypes
#print("Allowing Windows to go to sleep")
ctypes.windll.kernel32.SetThreadExecutionState(
WindowsInhibitor.ES_CONTINUOUS)
class File():
def __init__(self, path_):
self.path = path_
self.lines = 0
def Exists(self):
return os.path.isfile(self.path)
def Write(self, text):
f = open(self.path,'w', encoding="utf8")
f.write(text)
f.close()
def WriteAppend(self, text):
f = open(self.path,'a', encoding="utf8")
f.write(text)
f.close()
def ReadFile(self):
f = open(self.path, "r", encoding="utf8")
contents = f.readlines()
f.close()
return contents
def WriteLines(self, linesArray):
f = open(self.path,'w', encoding="utf8")
for line in linesArray:
f.write(line)
f.close()
def ReplacePattern(self, pattern, subst):
from tempfile import mkstemp
from shutil import move
while not self.CheckFileAccess():
pass
#Create temp file
fh, abs_path = mkstemp()
with open(abs_path,'w', encoding="utf8") as new_file:
with open(self.path, encoding="utf8") as old_file:
for line in old_file:
new_file.write(line.replace(pattern, subst))
close(fh)
#Remove original file
remove(self.path)
#Move new file
move(abs_path, self.path)
def DeleteLine(self, lineNum):
from tempfile import mkstemp
from shutil import move
while not self.CheckFileAccess():
pass
#Create temp file
fh, abs_path = mkstemp()
with open(abs_path,'w', encoding="utf8") as new_file:
with open(self.path, encoding="utf8") as old_file:
for lineno, line in enumerate(old_file, 1):
if lineno != lineNum:
new_file.write(line)
close(fh)
#Remove original file
remove(self.path)
#Move new file
move(abs_path, self.path)
def CheckFileAccess(self):
if os.path.exists(self.path):
try:
os.rename(self.path, self.path)
#print('Access on file "' + self.path +'" is available!')
return True
except OSError as e:
pass
#print('Access-error on file "' + self.path + '"! \n' + str(e))
return False
def GetNumberOfLines(self):
f = open(self.path, 'r', encoding="utf8")
lines = 0
for line in f:
lines += 1
f.close()
self.lines = lines
return lines
def InsertInLine(self, index, value):
f = open(self.path, "r", encoding="utf8")
contents = f.readlines()
f.close()
contents.insert(index, value)
contents = "".join(contents)
self.Write(contents)
class Parser():
def __init__(self, args):
from colorama import init
init()
#If Windows then prevent sleep
self.osSleep = None
if os.name == 'nt':
self.osSleep = WindowsInhibitor()
self.osSleep.Inhibit()
# INIT
print("--------------------------")
print(" \033[33mSpellNameLocalized WoW Api Parser\033[0m")
print("--------------------------")
self.localesList = ["en_US","es_MX","pt_BR","de_DE","es_ES","fr_FR","it_IT","ru_RU","ko_KR","zh_TW","zh_CN"]
self.eachLangcheck = []
self.rangeStart = 25
self.rangeEnd = 26
self.processStarted = False
self.lastItemID = 0
self.lastSavedID = 0
self.minimumPrints = False
self.baseUrl = 'https://eu.api.blizzard.com/data/wow/spell/%d'
self.files = []
self.dbFile = File("parser_progress.txt")
#Color flags
self.ADDED = "32"
self.REPLACED = "36"
self.UNTOUCHED = "30;1"
self.REMOVED = "31"
self.NOT_FOUND = "33"
#Create locale files if missing
for i in range(len(self.localesList)):
locale = self.localesList[i];
path = 'SpellLocales/' + locale + '.lua'
init = 'INL_Spells.%s = {\n}' % (locale.replace("_", ""))
self.files.append(File(path))
if not self.files[i].Exists():
self.files[i].Write(init);
self.files[i].GetNumberOfLines();
self.eachLangcheck.append(0);
#Check dbFile file
if not self.dbFile.Exists():
self.PrintError("E", 'File "parser_progress.txt" does not exist.')
self.Exit()
#Config
if len(args) <= 1:
self.PrintArgs()
self.Exit()
else:
self.Config(args)
if self.rangeEnd <= self.rangeStart:
self.PrintError("E", "rangeStart can't be less or equal than rangeEnd")
self.Exit()
self.lastSavedID = self.rangeStart
#Utils
self.utils = Utils()
self.utils.PlaySound(2000, 250, 1)
#restart after error
self.Command = ""
self.Continue = True
try:
os.system("chcp 65001 > nul")
except:
pass
def Exit(self):
try:
if self.osSleep:
self.osSleep.Uninhibit()
if self.Continue:
print(self.Command)
if self.Command != "":
os.system(self.Command)
else:
sys.exit()
except KeyboardInterrupt:
sys.exit()
except:
sys.exit()
raise
def PrintArgs(self):
print(" \033[36m(i) HELP\033[0m: Arguments: spells_parser.py [rangeStart][rangeEnd][DisablePrints]")
print(" Example: spells_parser.py 15649 150000 False")
print("--------------------------")
print(" Color codes displayed while parsing:")
print(" \033[%smGreen\033[0m: New spell added" % self.ADDED)
print(" \033[%smRed\033[0m: Spell deleted (The spell was in the file, but not in the API)" % self.REMOVED)
print(" \033[%smYellow\033[0m: Spell not found (The spell was not present in the file and the API)" % self.NOT_FOUND)
print(" \033[%smBlue\033[0m: Spell replaced (A different version of the spell was found in the API)" % self.REPLACED)
print(" \033[%smBlack\033[0m: Spell not changed (The spell in the file is the same as the found in the API)" % self.UNTOUCHED)
print("--------------------------")
def PrintConfig(self):
print(" Config:")
print("\tRange Start: \033[36m%i\033[0m" % (self.rangeStart))
print("\tRange End: \033[36m%i\033[0m" % (self.rangeEnd))
print("\tMinimum Prints: \033[36m%s\033[0m" % (self.minimumPrints))
print("--------------------------")
def PrintError(self, type, message):
t = ""
if type == "E":
t = "\033[31m/!\\ ERROR\033[0m"
self.utils.PlaySound(200, 200, 3)
elif type == "W":
t = "\033[33m/!\\ WARNING\033[0m"
self.utils.PlaySound(200, 200, 2)
print(" %s: %s" % (t, message))
print("--------------------------")
def Config(self, args):
if len(args) >= 2:
if args[1] is not None:
self.rangeStart = int(args[1])
if len(args) >= 3:
if args[2] is not None:
self.rangeEnd = int(args[2])
if len(args) >= 4:
if args[3] is not None:
self.minimumPrints = args[3].lower() == 'true'
def SaveIndexes(self):
dbFilename = "spell_parser";
self.Command = "%s.py %i %i" % (dbFilename, self.lastItemID, self.rangeEnd)
print(" \033[35m%s.py %i %i\033[0m" % (dbFilename, self.lastItemID, self.rangeEnd))
self.dbFile.ReplacePattern("%s.py %i %i" % (dbFilename, self.lastSavedID, self.rangeEnd), self.Command)
def FindInFile(self, fileIndex, item):
contents = self.files[fileIndex].ReadFile()
return self.FindInContents(item, 2, self.files[fileIndex].lines -1, contents)
def FindInContents(self, item, minI, maxI, contents):
guess = int(math.floor(minI + (maxI - minI) / 2))
#print ("guess: %d | min: %d | max: %d" % (guess, minI, maxI))
if maxI >= minI:
guessed_line = contents[guess-1]
#print ("guess: %d | min: %d | max: %d | guessed_line: %s" % (guess, minI, maxI, guessed_line[:-1]))
m = re.search('(\d{1,7})', guessed_line)
if m is None:
return [1, False]
guessed_ID = int(m.group(0))
#print("ID: %d" % guessed_ID)
if guessed_ID == item:
#print ("END | guess: %d | line: %s" % (guess-1, contents[guess-1]))
return [guess-1, True]
if guessed_ID < item:
#print ("ID: %d < item: %s" % (guessed_ID, item))
return self.FindInContents(item, guess + 1, maxI, contents)
else:
#print ("ID: %d > item: %s" % (guessed_ID, item))
return self.FindInContents(item, minI, guess - 1, contents)
else:
#print ("END | %d NOT FOUND at pos %d" % (item, guess))
return [guess-1, False]
def GetNameFromLine(self, fileIndex, lineIndex):
contents = self.files[fileIndex].ReadFile()
m = re.search(r'"(.+?)(?<!\\)"', contents[lineIndex])
if(m is not None):
return m.group(0)
return '""'
def ReplaceNameFromLine(self, fileIndex, lineIndex, newText):
contents = self.files[fileIndex].ReadFile()
self.files[fileIndex].ReplacePattern(contents[lineIndex], newText)
def PrintItem(self, itemID):
now = datetime.datetime.now()
time = now.strftime('%H:%M:%S')
if not self.minimumPrints:
data = "|"
for localeIndex in range(len(self.localesList)):
data = "%s\033[%sm%s\033[0m|" % (data, self.eachLangcheck[localeIndex], self.localesList[localeIndex])
print(" %s - #%i - [%s]" % (time, itemID, data))
else:
if itemID % 50 == 0:
print(" %s - \033[36m#%i\033[0m" % (time, itemID))
def Run(self):
import json
reqs = Requests()
reqs.GetToken()
error = False
addedItems = 0
replacedItems = 0
isReplaced = False
try:
import requests, datetime
print(" \033[32mStarting\033[0m spells parser")
print("--------------------------")
self.lastItemID = self.rangeStart
params = dict(
namespace="static-eu",
access_token=reqs.token
)
self.processStarted = True
while self.lastItemID < self.rangeEnd-1 and not error:
try:
#REQUEST AND PARSE ITEMS
for itemID in range(self.lastItemID, self.rangeEnd):
self.lastItemID = itemID
url = self.baseUrl % (itemID)
reqs.MakeRequest(url, params)
req_status_code = reqs.GetRequestStatusCode()
data = reqs.GetData()
if data != "Downstream Error":
data = json.loads(data)
if req_status_code == 200:
if 'name' not in data:
print(data)
self.PrintError("E", "No 'name' field in data")
print("--------------------------")
error = True
break
for localeIndex in range(len(self.localesList)):
locale = self.localesList[localeIndex];
nameDict = data["name"]
if locale not in nameDict:
self.eachLangcheck[localeIndex] = self.NOT_FOUND;
else:
name = nameDict[locale]
name = name.replace('"', '\\"')
name = name.replace('\r\n', '')
name = name.replace('\n', '')
luaString = ' {%i,"%s"},\n' % (itemID, name)
result = self.FindInFile(localeIndex, itemID)
exists = result[1]
if not exists:
self.files[localeIndex].InsertInLine(result[0]+1, luaString)
self.files[localeIndex].lines += 1
addedItems += 1
self.eachLangcheck[localeIndex] = self.ADDED;
else:
current_name = self.GetNameFromLine(localeIndex, result[0])
new_name = '"%s"' % (name)
if current_name != new_name:
isReplaced = True
self.ReplaceNameFromLine(localeIndex, result[0], luaString)
replacedItems += 1
self.eachLangcheck[localeIndex] = self.REPLACED;
else:
isReplaced = False
self.eachLangcheck[localeIndex] = self.UNTOUCHED;
self.PrintItem(itemID)
else:
if req_status_code == 404:
if 'detail' not in data:
self.PrintError("E", "404 Error: No reason found")
error = True
break
else:
for localeIndex in range(len(self.localesList)):
result = self.FindInFile(localeIndex, itemID)
exists = result[1]
if exists:
self.files[localeIndex].DeleteLine(result[0]+1)
self.eachLangcheck[localeIndex] = self.REMOVED;
else:
self.eachLangcheck[localeIndex] = self.NOT_FOUND;
self.PrintItem(itemID)
elif req_status_code == 504:
self.PrintError("E", "504 Error: Gateway timeout")
error = True
break
else:
self.PrintError("E", "%i Error: Unknown error code" % (req_status_code)) #504
error = True
break
else:
print(" %s - \033[33m#%i\033[0m - %s" % (time, itemID, "Downstream Error"))
if itemID % 50 == 0:
print(" New indexes saved:")
self.SaveIndexes()
self.lastSavedID = itemID
except KeyboardInterrupt:
error = True
raise
except requests.exceptions.ConnectionError:
error = True
raise
except IOError:
error = True
raise
except ValueError:
error = True
raise
except:
print("--------------------------")
self.PrintError("E", "Unknown Error")
error = True
raise
if not error:
self.lastItemID += 1
if self.lastItemID >= self.rangeEnd-1:
self.Continue = False
print("--------------------------")
except KeyboardInterrupt:
print("--------------------------")
if self.processStarted:
self.PrintError("W", "Process interrupted by the user")
else:
self.PrintError("W", "Process interrupted by the user before starting")
self.Continue = False
except requests.exceptions.ConnectionError:
print("--------------------------")
self.PrintError("E", "There was a problem with the Internet connection.")
except IOError:
print("--------------------------")
self.PrintError("E", "There was a problem with the file access.")
except ValueError:
print("--------------------------")
self.PrintError("E", "No JSON object could be decoded // ValueError")
except:
print("--------------------------")
self.PrintError("E", "Unknown Error")
raise
finally:
print(" New indexes saved:")
self.SaveIndexes()
print("--------------------------")
print(" Stats:")
print("\tSpells parsed: \033[36m%i\033[0m" % (self.lastItemID - self.rangeStart))
print("\tNew spells added: \033[36m%i\033[0m" % (addedItems))
print("\tReplaced spells: \033[36m%i\033[0m" % (replacedItems))
print("--------------------------")
print(" \033[32mProcess Finished\033[0m")
print("--------------------------")
self.utils.PlaySound(2000, 250, 1)
class Utils():
def __init__(self):
self.Mute = False
pass
def PlaySound(self, frequency, duration, repetitions):
if not self.Mute:
try:
import winsound
for x in range(1, repetitions+1):
winsound.Beep(frequency, duration)
except:
pass
class Requests():
def __init__(self):
self.client_id_file = "client_id.key"
self.client_secret_file = "client_secret.key"
self.token = ""
def GetToken(self):
try:
import requests, json
url = "https://eu.battle.net/oauth/token"
params = dict(
grant_type="client_credentials",
client_id=self.ReadClientID(),
client_secret=self.ReadClientSecret()
)
resp = requests.get(url=url, params=params)
data = json.loads(resp.text)
if resp.status_code == 200:
if 'access_token' not in data:
print(data)
self.PrintError("E", "No 'name' field in data")
print("--------------------------")
self.token = data["access_token"]
except ValueError:
print("--------------------------")
self.PrintError("E", "No JSON TOKEN object could be decoded")
except requests.exceptions.ConnectionError:
print("--------------------------")
self.PrintError("E", "There was a problem with the Internet connection.")
def ReadClientID(self):
|
def ReadClientSecret(self):
if os.path.isfile(self.client_secret_file):
fa = open(self.client_secret_file,'r', encoding="utf8")
client_secret = fa.readline()
fa.close()
return client_secret
def MakeRequest(self, url, params):
import requests
self.resp = requests.get(url=url, params=params)
def GetRequestStatusCode(self):
return self.resp.status_code
def GetData(self):
return self.resp.text
def PrintError(self, type, message):
t = ""
if type == "E":
t = "\033[31m/!\\ ERROR\033[0m"
# self.utils.PlaySound(200, 200, 3)
elif type == "W":
t = "\033[33m/!\\ WARNING\033[0m"
# self.utils.PlaySound(200, 200, 2)
print(" %s: %s" % (t, message))
print("--------------------------")
''' ****************
PROGRAM START
**************** '''
parser = Parser(sys.argv)
parser.PrintConfig()
parser.Run()
parser.Exit() | if os.path.isfile(self.client_id_file):
fa = open(self.client_id_file,'r', encoding="utf8")
client_id = fa.readline()
fa.close()
return client_id | identifier_body |
spell_parser.py | import math
import sys, os
from os import remove, close, path, name
import re
import datetime
# check dependent modules
from importlib import util
req_spec = util.find_spec("requests")
req_found = req_spec is not None
if (not req_found):
print('"requests" module not found, install using the command "pip install requests"')
sys.exit()
col_spec = util.find_spec("colorama")
col_found = col_spec is not None
if (not col_found):
print('"colorama" module not found, install using the command "pip install colorama"')
sys.exit()
# end check modules
class WindowsInhibitor:
ES_CONTINUOUS = 0x80000000
ES_SYSTEM_REQUIRED = 0x00000001
def __init__(self):
pass
def Inhibit(self):
import ctypes
#print("Preventing Windows from going to sleep")
ctypes.windll.kernel32.SetThreadExecutionState(
WindowsInhibitor.ES_CONTINUOUS | \
WindowsInhibitor.ES_SYSTEM_REQUIRED)
def Uninhibit(self):
import ctypes
#print("Allowing Windows to go to sleep")
ctypes.windll.kernel32.SetThreadExecutionState(
WindowsInhibitor.ES_CONTINUOUS)
class File():
def __init__(self, path_):
self.path = path_
self.lines = 0
def Exists(self):
return os.path.isfile(self.path)
def Write(self, text):
f = open(self.path,'w', encoding="utf8")
f.write(text)
f.close()
def WriteAppend(self, text):
f = open(self.path,'a', encoding="utf8")
f.write(text)
f.close()
def ReadFile(self):
f = open(self.path, "r", encoding="utf8")
contents = f.readlines()
f.close()
return contents
def WriteLines(self, linesArray):
f = open(self.path,'w', encoding="utf8")
for line in linesArray:
f.write(line)
f.close()
def ReplacePattern(self, pattern, subst):
from tempfile import mkstemp
from shutil import move
while not self.CheckFileAccess():
pass
#Create temp file
fh, abs_path = mkstemp()
with open(abs_path,'w', encoding="utf8") as new_file:
with open(self.path, encoding="utf8") as old_file:
for line in old_file:
new_file.write(line.replace(pattern, subst))
close(fh)
#Remove original file
remove(self.path)
#Move new file
move(abs_path, self.path)
def DeleteLine(self, lineNum):
from tempfile import mkstemp
from shutil import move
while not self.CheckFileAccess():
pass
#Create temp file
fh, abs_path = mkstemp()
with open(abs_path,'w', encoding="utf8") as new_file:
with open(self.path, encoding="utf8") as old_file:
for lineno, line in enumerate(old_file, 1):
if lineno != lineNum:
new_file.write(line)
close(fh)
#Remove original file
remove(self.path)
#Move new file
move(abs_path, self.path)
def CheckFileAccess(self):
if os.path.exists(self.path):
try:
os.rename(self.path, self.path)
#print('Access on file "' + self.path +'" is available!')
return True
except OSError as e:
pass
#print('Access-error on file "' + self.path + '"! \n' + str(e))
return False
def GetNumberOfLines(self):
f = open(self.path, 'r', encoding="utf8")
lines = 0
for line in f:
lines += 1
f.close()
self.lines = lines
return lines
def InsertInLine(self, index, value):
f = open(self.path, "r", encoding="utf8")
contents = f.readlines()
f.close()
contents.insert(index, value)
contents = "".join(contents)
self.Write(contents)
class | ():
def __init__(self, args):
from colorama import init
init()
#If Windows then prevent sleep
self.osSleep = None
if os.name == 'nt':
self.osSleep = WindowsInhibitor()
self.osSleep.Inhibit()
# INIT
print("--------------------------")
print(" \033[33mSpellNameLocalized WoW Api Parser\033[0m")
print("--------------------------")
self.localesList = ["en_US","es_MX","pt_BR","de_DE","es_ES","fr_FR","it_IT","ru_RU","ko_KR","zh_TW","zh_CN"]
self.eachLangcheck = []
self.rangeStart = 25
self.rangeEnd = 26
self.processStarted = False
self.lastItemID = 0
self.lastSavedID = 0
self.minimumPrints = False
self.baseUrl = 'https://eu.api.blizzard.com/data/wow/spell/%d'
self.files = []
self.dbFile = File("parser_progress.txt")
#Color flags
self.ADDED = "32"
self.REPLACED = "36"
self.UNTOUCHED = "30;1"
self.REMOVED = "31"
self.NOT_FOUND = "33"
#Create locale files if missing
for i in range(len(self.localesList)):
locale = self.localesList[i];
path = 'SpellLocales/' + locale + '.lua'
init = 'INL_Spells.%s = {\n}' % (locale.replace("_", ""))
self.files.append(File(path))
if not self.files[i].Exists():
self.files[i].Write(init);
self.files[i].GetNumberOfLines();
self.eachLangcheck.append(0);
#Check dbFile file
if not self.dbFile.Exists():
self.PrintError("E", 'File "parser_progress.txt" does not exist.')
self.Exit()
#Config
if len(args) <= 1:
self.PrintArgs()
self.Exit()
else:
self.Config(args)
if self.rangeEnd <= self.rangeStart:
self.PrintError("E", "rangeStart can't be less or equal than rangeEnd")
self.Exit()
self.lastSavedID = self.rangeStart
#Utils
self.utils = Utils()
self.utils.PlaySound(2000, 250, 1)
#restart after error
self.Command = ""
self.Continue = True
try:
os.system("chcp 65001 > nul")
except:
pass
def Exit(self):
try:
if self.osSleep:
self.osSleep.Uninhibit()
if self.Continue:
print(self.Command)
if self.Command != "":
os.system(self.Command)
else:
sys.exit()
except KeyboardInterrupt:
sys.exit()
except:
sys.exit()
raise
def PrintArgs(self):
print(" \033[36m(i) HELP\033[0m: Arguments: spells_parser.py [rangeStart][rangeEnd][DisablePrints]")
print(" Example: spells_parser.py 15649 150000 False")
print("--------------------------")
print(" Color codes displayed while parsing:")
print(" \033[%smGreen\033[0m: New spell added" % self.ADDED)
print(" \033[%smRed\033[0m: Spell deleted (The spell was in the file, but not in the API)" % self.REMOVED)
print(" \033[%smYellow\033[0m: Spell not found (The spell was not present in the file and the API)" % self.NOT_FOUND)
print(" \033[%smBlue\033[0m: Spell replaced (A different version of the spell was found in the API)" % self.REPLACED)
print(" \033[%smBlack\033[0m: Spell not changed (The spell in the file is the same as the found in the API)" % self.UNTOUCHED)
print("--------------------------")
def PrintConfig(self):
print(" Config:")
print("\tRange Start: \033[36m%i\033[0m" % (self.rangeStart))
print("\tRange End: \033[36m%i\033[0m" % (self.rangeEnd))
print("\tMinimum Prints: \033[36m%s\033[0m" % (self.minimumPrints))
print("--------------------------")
def PrintError(self, type, message):
t = ""
if type == "E":
t = "\033[31m/!\\ ERROR\033[0m"
self.utils.PlaySound(200, 200, 3)
elif type == "W":
t = "\033[33m/!\\ WARNING\033[0m"
self.utils.PlaySound(200, 200, 2)
print(" %s: %s" % (t, message))
print("--------------------------")
def Config(self, args):
if len(args) >= 2:
if args[1] is not None:
self.rangeStart = int(args[1])
if len(args) >= 3:
if args[2] is not None:
self.rangeEnd = int(args[2])
if len(args) >= 4:
if args[3] is not None:
self.minimumPrints = args[3].lower() == 'true'
def SaveIndexes(self):
dbFilename = "spell_parser";
self.Command = "%s.py %i %i" % (dbFilename, self.lastItemID, self.rangeEnd)
print(" \033[35m%s.py %i %i\033[0m" % (dbFilename, self.lastItemID, self.rangeEnd))
self.dbFile.ReplacePattern("%s.py %i %i" % (dbFilename, self.lastSavedID, self.rangeEnd), self.Command)
def FindInFile(self, fileIndex, item):
contents = self.files[fileIndex].ReadFile()
return self.FindInContents(item, 2, self.files[fileIndex].lines -1, contents)
def FindInContents(self, item, minI, maxI, contents):
guess = int(math.floor(minI + (maxI - minI) / 2))
#print ("guess: %d | min: %d | max: %d" % (guess, minI, maxI))
if maxI >= minI:
guessed_line = contents[guess-1]
#print ("guess: %d | min: %d | max: %d | guessed_line: %s" % (guess, minI, maxI, guessed_line[:-1]))
m = re.search('(\d{1,7})', guessed_line)
if m is None:
return [1, False]
guessed_ID = int(m.group(0))
#print("ID: %d" % guessed_ID)
if guessed_ID == item:
#print ("END | guess: %d | line: %s" % (guess-1, contents[guess-1]))
return [guess-1, True]
if guessed_ID < item:
#print ("ID: %d < item: %s" % (guessed_ID, item))
return self.FindInContents(item, guess + 1, maxI, contents)
else:
#print ("ID: %d > item: %s" % (guessed_ID, item))
return self.FindInContents(item, minI, guess - 1, contents)
else:
#print ("END | %d NOT FOUND at pos %d" % (item, guess))
return [guess-1, False]
def GetNameFromLine(self, fileIndex, lineIndex):
contents = self.files[fileIndex].ReadFile()
m = re.search(r'"(.+?)(?<!\\)"', contents[lineIndex])
if(m is not None):
return m.group(0)
return '""'
def ReplaceNameFromLine(self, fileIndex, lineIndex, newText):
contents = self.files[fileIndex].ReadFile()
self.files[fileIndex].ReplacePattern(contents[lineIndex], newText)
def PrintItem(self, itemID):
now = datetime.datetime.now()
time = now.strftime('%H:%M:%S')
if not self.minimumPrints:
data = "|"
for localeIndex in range(len(self.localesList)):
data = "%s\033[%sm%s\033[0m|" % (data, self.eachLangcheck[localeIndex], self.localesList[localeIndex])
print(" %s - #%i - [%s]" % (time, itemID, data))
else:
if itemID % 50 == 0:
print(" %s - \033[36m#%i\033[0m" % (time, itemID))
def Run(self):
import json
reqs = Requests()
reqs.GetToken()
error = False
addedItems = 0
replacedItems = 0
isReplaced = False
try:
import requests, datetime
print(" \033[32mStarting\033[0m spells parser")
print("--------------------------")
self.lastItemID = self.rangeStart
params = dict(
namespace="static-eu",
access_token=reqs.token
)
self.processStarted = True
while self.lastItemID < self.rangeEnd-1 and not error:
try:
#REQUEST AND PARSE ITEMS
for itemID in range(self.lastItemID, self.rangeEnd):
self.lastItemID = itemID
url = self.baseUrl % (itemID)
reqs.MakeRequest(url, params)
req_status_code = reqs.GetRequestStatusCode()
data = reqs.GetData()
if data != "Downstream Error":
data = json.loads(data)
if req_status_code == 200:
if 'name' not in data:
print(data)
self.PrintError("E", "No 'name' field in data")
print("--------------------------")
error = True
break
for localeIndex in range(len(self.localesList)):
locale = self.localesList[localeIndex];
nameDict = data["name"]
if locale not in nameDict:
self.eachLangcheck[localeIndex] = self.NOT_FOUND;
else:
name = nameDict[locale]
name = name.replace('"', '\\"')
name = name.replace('\r\n', '')
name = name.replace('\n', '')
luaString = ' {%i,"%s"},\n' % (itemID, name)
result = self.FindInFile(localeIndex, itemID)
exists = result[1]
if not exists:
self.files[localeIndex].InsertInLine(result[0]+1, luaString)
self.files[localeIndex].lines += 1
addedItems += 1
self.eachLangcheck[localeIndex] = self.ADDED;
else:
current_name = self.GetNameFromLine(localeIndex, result[0])
new_name = '"%s"' % (name)
if current_name != new_name:
isReplaced = True
self.ReplaceNameFromLine(localeIndex, result[0], luaString)
replacedItems += 1
self.eachLangcheck[localeIndex] = self.REPLACED;
else:
isReplaced = False
self.eachLangcheck[localeIndex] = self.UNTOUCHED;
self.PrintItem(itemID)
else:
if req_status_code == 404:
if 'detail' not in data:
self.PrintError("E", "404 Error: No reason found")
error = True
break
else:
for localeIndex in range(len(self.localesList)):
result = self.FindInFile(localeIndex, itemID)
exists = result[1]
if exists:
self.files[localeIndex].DeleteLine(result[0]+1)
self.eachLangcheck[localeIndex] = self.REMOVED;
else:
self.eachLangcheck[localeIndex] = self.NOT_FOUND;
self.PrintItem(itemID)
elif req_status_code == 504:
self.PrintError("E", "504 Error: Gateway timeout")
error = True
break
else:
self.PrintError("E", "%i Error: Unknown error code" % (req_status_code)) #504
error = True
break
else:
print(" %s - \033[33m#%i\033[0m - %s" % (time, itemID, "Downstream Error"))
if itemID % 50 == 0:
print(" New indexes saved:")
self.SaveIndexes()
self.lastSavedID = itemID
except KeyboardInterrupt:
error = True
raise
except requests.exceptions.ConnectionError:
error = True
raise
except IOError:
error = True
raise
except ValueError:
error = True
raise
except:
print("--------------------------")
self.PrintError("E", "Unknown Error")
error = True
raise
if not error:
self.lastItemID += 1
if self.lastItemID >= self.rangeEnd-1:
self.Continue = False
print("--------------------------")
except KeyboardInterrupt:
print("--------------------------")
if self.processStarted:
self.PrintError("W", "Process interrupted by the user")
else:
self.PrintError("W", "Process interrupted by the user before starting")
self.Continue = False
except requests.exceptions.ConnectionError:
print("--------------------------")
self.PrintError("E", "There was a problem with the Internet connection.")
except IOError:
print("--------------------------")
self.PrintError("E", "There was a problem with the file access.")
except ValueError:
print("--------------------------")
self.PrintError("E", "No JSON object could be decoded // ValueError")
except:
print("--------------------------")
self.PrintError("E", "Unknown Error")
raise
finally:
print(" New indexes saved:")
self.SaveIndexes()
print("--------------------------")
print(" Stats:")
print("\tSpells parsed: \033[36m%i\033[0m" % (self.lastItemID - self.rangeStart))
print("\tNew spells added: \033[36m%i\033[0m" % (addedItems))
print("\tReplaced spells: \033[36m%i\033[0m" % (replacedItems))
print("--------------------------")
print(" \033[32mProcess Finished\033[0m")
print("--------------------------")
self.utils.PlaySound(2000, 250, 1)
class Utils():
def __init__(self):
self.Mute = False
pass
def PlaySound(self, frequency, duration, repetitions):
if not self.Mute:
try:
import winsound
for x in range(1, repetitions+1):
winsound.Beep(frequency, duration)
except:
pass
class Requests():
def __init__(self):
self.client_id_file = "client_id.key"
self.client_secret_file = "client_secret.key"
self.token = ""
def GetToken(self):
try:
import requests, json
url = "https://eu.battle.net/oauth/token"
params = dict(
grant_type="client_credentials",
client_id=self.ReadClientID(),
client_secret=self.ReadClientSecret()
)
resp = requests.get(url=url, params=params)
data = json.loads(resp.text)
if resp.status_code == 200:
if 'access_token' not in data:
print(data)
self.PrintError("E", "No 'name' field in data")
print("--------------------------")
self.token = data["access_token"]
except ValueError:
print("--------------------------")
self.PrintError("E", "No JSON TOKEN object could be decoded")
except requests.exceptions.ConnectionError:
print("--------------------------")
self.PrintError("E", "There was a problem with the Internet connection.")
def ReadClientID(self):
if os.path.isfile(self.client_id_file):
fa = open(self.client_id_file,'r', encoding="utf8")
client_id = fa.readline()
fa.close()
return client_id
def ReadClientSecret(self):
if os.path.isfile(self.client_secret_file):
fa = open(self.client_secret_file,'r', encoding="utf8")
client_secret = fa.readline()
fa.close()
return client_secret
def MakeRequest(self, url, params):
import requests
self.resp = requests.get(url=url, params=params)
def GetRequestStatusCode(self):
return self.resp.status_code
def GetData(self):
return self.resp.text
def PrintError(self, type, message):
t = ""
if type == "E":
t = "\033[31m/!\\ ERROR\033[0m"
# self.utils.PlaySound(200, 200, 3)
elif type == "W":
t = "\033[33m/!\\ WARNING\033[0m"
# self.utils.PlaySound(200, 200, 2)
print(" %s: %s" % (t, message))
print("--------------------------")
''' ****************
PROGRAM START
**************** '''
parser = Parser(sys.argv)
parser.PrintConfig()
parser.Run()
parser.Exit() | Parser | identifier_name |
tpl.rs | //! TeX templating
//!
//! The `tpl` module contains a way of constructing a TeX-document programmatically. It ensures
//! documents are well-formed syntactically, but not semantically (e.g. it is possible to express
//! documents that contain multiple `\documentclass` macro calls inside the document but not a
//! `\begin{foo}` without a matching `\end`).
//!
//! As a result of this deliberate limitation, the API is fairly simple. The core module offers the
//! entire abstraction through the `TexElement` trait, while the `elements` module contains
//! syntactic sugar for building documents quickly.
//!
//! ## "Hello, world" using `TexElement` directly.
//!
//! ```rust
//! use texrender::tpl::{Args, BeginEndBlock, Group, IntoTexElement, MacroCall, OptArgs, RawTex,
//! TexElement, Text};
//!
//! let doctype = MacroCall::new("documentclass",
//! OptArgs::single("12pt"),
//! Args::single("article"));
//! let mut contents: Vec<Box<dyn TexElement>> = Vec::new();
//! contents.push(Box::new(MacroCall::new("section",
//! Default::default(),
//! Args::single("Hello, world"))));
//! contents.push("This is fun & easy.".into_tex_element());
//! let document = BeginEndBlock::new("document", Default::default(), Default::default(), contents);
//! let tex = Group::new(vec![Box::new(doctype) as Box<dyn TexElement>, Box::new(document)]);
//! let output = tex.render().expect("rendering failed");
//! assert_eq!(output,
//! "\\documentclass[12pt]{article}\n\
//! \\begin{document}\n\
//! \\section{Hello, world}\n\
//! This is fun \\& easy.\n\
//! \\end{document}\n");
//! ```
//!
//! While this form uses no macros, it is rather inconvenient to write. Luckily there is an
//! alternative:
//!
//! ## "Hello, world" using elements and macros.
//!
//! ```rust
//! use texrender::elems;
//! use texrender::tpl::TexElement;
//! use texrender::tpl::elements::{N, doc, document, documentclass, section};
//!
//! let tex = doc(elems!(
//! documentclass(elems!(), "article"),
//! document(elems!(
//! section("Hello, world"),
//! "This is fun & easy."
//! ))
//! ));
//!
//! let output = tex.render().expect("rendering failed");
//!
//! assert_eq!(output,
//! "\\documentclass{article}\n\
//! \\begin{document}\n\
//! \\section{Hello, world}\n\
//! This is fun \\& easy.\n\
//! \\end{document}\n");
//! ```
//!
//! Element functions like `section` above typically cover most use cases, while not preventing the
//! u ser to drop back to the raw functions above. The `elems` macro conveniently boxes and
//! type-erases children, while `N` can be used for "no arguments" for both args and optargs.
#[macro_use]
pub mod macros;
pub mod elements;
use std::fmt::Debug;
use std::io::Write;
use std::{io, string};
/// Renderable Tex element.
pub trait TexElement: Debug {
/// Type-erases a `TexElement`.
fn boxed(self) -> Box<dyn TexElement>
where
Self: Sized + 'static,
{
Box::new(self) as Box<dyn TexElement>
}
/// Renders the element into a string.
///
/// May return an error if a non-utf8 element has been given.
fn render(&self) -> Result<String, string::FromUtf8Error> {
let mut buffer: Vec<u8> = Vec::new();
self.write_tex(&mut buffer)
.expect("should always be able to write to in-memory buffer");
String::from_utf8(buffer)
}
/// Writes a rendering of the element to the given writer.
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()>;
}
/// Conversion trait for various types.
///
/// Used for primitive conversions of various types directly into tex elements. Implementations
/// include:
///
/// * `Box<dyn TexElement>` are passed through unchanged.
/// * Any other `TexElement` will be boxed.
/// * `str` and `String` are converted to escaped `Text` elements.
/// * Any number (`u8`, ...) is converted to escaped `Text` using display.
/// * A `Vec<Box<dyn TexElement>>` is converted into a `Group`.
/// * The unit type `()` is converted into an empty element.
pub trait IntoTexElement {
/// Converts the given element into a `TexElement`.
fn into_tex_element(self) -> Box<dyn TexElement>;
}
impl IntoTexElement for Box<dyn TexElement> {
#[inline]
fn into_tex_element(self) -> Box<dyn TexElement> {
self
}
}
impl<'a> IntoTexElement for &'a str {
#[inline]
fn into_tex_element(self) -> Box<dyn TexElement> {
self.to_owned().into_tex_element()
}
}
impl IntoTexElement for String {
#[inline]
fn into_tex_element(self) -> Box<dyn TexElement> {
Box::new(Text::new(self))
}
}
impl IntoTexElement for () {
fn into_tex_element(self) -> Box<dyn TexElement> {
Box::new(RawTex(Vec::new()))
}
}
impl<T: TexElement + Sized + 'static> IntoTexElement for T {
#[inline]
fn into_tex_element(self) -> Box<dyn TexElement> {
Box::new(self)
}
}
impl IntoTexElement for Vec<Box<dyn TexElement>> {
#[inline]
fn into_tex_element(self) -> Box<dyn TexElement> {
Box::new(Group::new(self))
}
}
macro_rules! using_display {
($ty:ty) => {
impl IntoTexElement for $ty {
#[inline]
fn into_tex_element(self) -> Box<dyn TexElement> {
Box::new(Text::new(format!("{}", self)))
}
}
};
}
using_display!(u8);
using_display!(u16);
using_display!(u32);
using_display!(u64);
using_display!(u128);
using_display!(i8);
using_display!(i16);
using_display!(i32);
using_display!(i64);
using_display!(i128);
using_display!(f32);
using_display!(f64);
/// Writes a list of tex elements to a stream with a separator.
pub fn write_list<'a, I>(writer: &mut dyn Write, separator: &str, iter: I) -> io::Result<()>
where
I: Iterator<Item = &'a Box<dyn TexElement>> + 'a,
{
for (idx, arg) in iter.enumerate() {
if idx != 0 {
writer.write_all(separator.as_bytes())?;
}
arg.write_tex(writer)?;
}
Ok(())
}
/// A raw, unescaped piece of tex code.
///
/// Tex is not guaranteed to be UTF-8 encoded, thus `RawTex` internally keeps bytes. The value will
/// be inserted into the document without any escaping. The value is unchecked, thus it is possible
/// to create syntactically incorrect invalid documents using this element.
#[derive(Clone, Debug)]
pub struct RawTex(Vec<u8>);
impl RawTex {
/// Crates a new raw tex element from a string.
#[inline]
pub fn new(raw: Vec<u8>) -> Self {
RawTex(raw)
}
}
impl TexElement for RawTex {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
writer.write_all(self.0.as_slice())
}
}
/// A text string.
///
/// Text strings will be escaped before insertion.
#[derive(Clone, Debug)]
pub struct Text(String);
impl Text {
/// Creates a new text string.
#[inline]
pub fn new(raw: String) -> Self {
Text(raw)
}
}
impl TexElement for Text {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
crate::tex_escape::write_escaped(writer, &self.0)
}
}
/// A set of optional arguments.
///
/// Optional arguments in LaTeX are typically denoted using square brackets and comma-separated.
#[derive(Debug, Default)]
pub struct OptArgs(Vec<Box<dyn TexElement>>);
impl OptArgs {
/// Creates a new set of optional arguments.
#[inline]
pub fn new(elements: Vec<Box<dyn TexElement>>) -> Self {
OptArgs(elements)
}
/// Creates a new optinal argument from a single value.
#[inline]
pub fn single<T: IntoTexElement>(elem: T) -> Self |
}
impl TexElement for OptArgs {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
if !self.0.is_empty() {
writer.write_all(b"[")?;
write_list(writer, ",", self.0.iter())?;
writer.write_all(b"]")?;
}
Ok(())
}
}
/// A set of arguments.
///
/// Each argument is enclosed by curly braces when rendered, otherwise arguments are just
/// concatenated.
#[derive(Debug, Default)]
pub struct Args(Vec<Box<dyn TexElement>>);
impl Args {
/// Creates a new set of arguments.
#[inline]
pub fn new(elements: Vec<Box<dyn TexElement>>) -> Self {
Args(elements)
}
/// Creates a new optinal argument from a single value.
#[inline]
pub fn single<T: IntoTexElement>(elem: T) -> Self {
Args(vec![elem.into_tex_element()])
}
}
impl TexElement for Args {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
if !self.0.is_empty() {
writer.write_all(b"{")?;
write_list(writer, "}{", self.0.iter())?;
writer.write_all(b"}")?;
}
Ok(())
}
}
/// A TeX-macro invocation.
///
/// This is the typical `\macroname[opt1]{arg1}{arg2}` call that is common in latex documents.
#[derive(Debug)]
pub struct MacroCall {
/// Name of the instruction.
ident: Box<dyn TexElement>,
/// Optional arguments.
opt_args: OptArgs,
/// Mandatory arguments.
args: Args,
/// Whether or not to append a newline afterwards.
newline: bool,
}
impl MacroCall {
/// Creates a new macro call.
///
/// The resulting call will end with a newline when output.
pub fn new<T: IntoTexElement>(ident: T, opt_args: OptArgs, args: Args) -> Self {
MacroCall {
ident: ident.into_tex_element(),
opt_args,
args,
newline: true,
}
}
/// Creates a new inline macro call.
///
/// Does not end with a newline.
pub fn new_inline<T: IntoTexElement>(ident: T, opt_args: OptArgs, args: Args) -> Self {
MacroCall {
ident: ident.into_tex_element(),
opt_args,
args,
newline: false,
}
}
}
impl TexElement for MacroCall {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
writer.write_all(br"\")?;
self.ident.write_tex(writer)?;
self.opt_args.write_tex(writer)?;
self.args.write_tex(writer)?;
if self.newline {
writer.write_all(b"\n")?;
}
Ok(())
}
}
/// A block with a begin and end instruction.
///
/// Begin-end blocks usually start with a `\begin{blockname}` and end with `\end{blockname}`.
#[derive(Debug)]
pub struct BeginEndBlock {
/// The identifier for the block.
ident: Box<dyn TexElement>,
/// Optional arguments.
opt_args: OptArgs,
/// Actual arguments.
args: Args,
/// Child elements of the block.
children: Vec<Box<dyn TexElement>>,
}
impl BeginEndBlock {
/// Creates a new begin/end block.
pub fn new<T: IntoTexElement>(
ident: T,
opt_args: OptArgs,
args: Args,
children: Vec<Box<dyn TexElement>>,
) -> Self {
BeginEndBlock {
ident: ident.into_tex_element(),
opt_args,
args,
children,
}
}
}
impl TexElement for BeginEndBlock {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
writer.write_all(b"\\begin{")?;
self.ident.write_tex(writer)?;
writer.write_all(b"}")?;
self.opt_args.write_tex(writer)?;
self.args.write_tex(writer)?;
writer.write_all(b"\n")?;
for child in &self.children {
child.write_tex(writer)?;
}
writer.write_all(b"\n\\end{")?;
self.ident.write_tex(writer)?;
writer.write_all(b"}\n")?;
Ok(())
}
}
/// An anonymous block.
///
/// Anonymous blocks are other elements enclosed in curly braces when output.
#[derive(Debug)]
pub struct AnonymousBlock(Vec<Box<dyn TexElement>>);
impl AnonymousBlock {
/// Creates a new anonymous block.
pub fn new(elems: Vec<Box<dyn TexElement>>) -> Self {
AnonymousBlock(elems)
}
}
impl TexElement for AnonymousBlock {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
writer.write_all(b"{")?;
for child in &self.0 {
child.write_tex(writer)?;
}
writer.write_all(b"}")?;
Ok(())
}
}
/// Grouping of elements.
///
/// Groups multiple elements together; when output they are written in order, without any characters
/// added.
#[derive(Debug)]
pub struct Group(Vec<Box<dyn TexElement>>);
impl Group {
/// Creates a new group.
pub fn new(elems: Vec<Box<dyn TexElement>>) -> Self {
Group(elems)
}
}
impl TexElement for Group {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
for child in &self.0 {
child.write_tex(writer)?;
}
Ok(())
}
}
/// Table row.
///
/// Multiple elements joined by ` & ` when rendered.
#[derive(Debug)]
pub struct TableRow(Vec<Box<dyn TexElement>>);
impl TableRow {
/// Creates a new table row.
pub fn new(elems: Vec<Box<dyn TexElement>>) -> Self {
TableRow(elems)
}
}
impl TexElement for TableRow {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
write_list(writer, " & ", self.0.iter())?;
writer.write_all(b"\\\\\n")
}
}
| {
OptArgs(vec![elem.into_tex_element()])
} | identifier_body |
tpl.rs | //! TeX templating
//!
//! The `tpl` module contains a way of constructing a TeX-document programmatically. It ensures
//! documents are well-formed syntactically, but not semantically (e.g. it is possible to express
//! documents that contain multiple `\documentclass` macro calls inside the document but not a
//! `\begin{foo}` without a matching `\end`).
//!
//! As a result of this deliberate limitation, the API is fairly simple. The core module offers the
//! entire abstraction through the `TexElement` trait, while the `elements` module contains
//! syntactic sugar for building documents quickly.
//!
//! ## "Hello, world" using `TexElement` directly.
//!
//! ```rust
//! use texrender::tpl::{Args, BeginEndBlock, Group, IntoTexElement, MacroCall, OptArgs, RawTex,
//! TexElement, Text};
//!
//! let doctype = MacroCall::new("documentclass",
//! OptArgs::single("12pt"),
//! Args::single("article"));
//! let mut contents: Vec<Box<dyn TexElement>> = Vec::new();
//! contents.push(Box::new(MacroCall::new("section",
//! Default::default(),
//! Args::single("Hello, world"))));
//! contents.push("This is fun & easy.".into_tex_element());
//! let document = BeginEndBlock::new("document", Default::default(), Default::default(), contents);
//! let tex = Group::new(vec![Box::new(doctype) as Box<dyn TexElement>, Box::new(document)]);
//! let output = tex.render().expect("rendering failed");
//! assert_eq!(output,
//! "\\documentclass[12pt]{article}\n\
//! \\begin{document}\n\
//! \\section{Hello, world}\n\
//! This is fun \\& easy.\n\
//! \\end{document}\n");
//! ```
//!
//! While this form uses no macros, it is rather inconvenient to write. Luckily there is an
//! alternative:
//!
//! ## "Hello, world" using elements and macros.
//!
//! ```rust
//! use texrender::elems;
//! use texrender::tpl::TexElement;
//! use texrender::tpl::elements::{N, doc, document, documentclass, section};
//!
//! let tex = doc(elems!(
//! documentclass(elems!(), "article"),
//! document(elems!(
//! section("Hello, world"),
//! "This is fun & easy."
//! ))
//! ));
//!
//! let output = tex.render().expect("rendering failed");
//!
//! assert_eq!(output,
//! "\\documentclass{article}\n\
//! \\begin{document}\n\
//! \\section{Hello, world}\n\
//! This is fun \\& easy.\n\
//! \\end{document}\n");
//! ```
//!
//! Element functions like `section` above typically cover most use cases, while not preventing the
//! u ser to drop back to the raw functions above. The `elems` macro conveniently boxes and
//! type-erases children, while `N` can be used for "no arguments" for both args and optargs.
#[macro_use]
pub mod macros;
pub mod elements;
use std::fmt::Debug;
use std::io::Write;
use std::{io, string};
/// Renderable Tex element.
pub trait TexElement: Debug {
/// Type-erases a `TexElement`.
fn boxed(self) -> Box<dyn TexElement>
where
Self: Sized + 'static,
{
Box::new(self) as Box<dyn TexElement>
}
/// Renders the element into a string.
///
/// May return an error if a non-utf8 element has been given.
fn render(&self) -> Result<String, string::FromUtf8Error> {
let mut buffer: Vec<u8> = Vec::new();
self.write_tex(&mut buffer)
.expect("should always be able to write to in-memory buffer");
String::from_utf8(buffer)
}
/// Writes a rendering of the element to the given writer.
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()>;
}
/// Conversion trait for various types.
///
/// Used for primitive conversions of various types directly into tex elements. Implementations
/// include:
///
/// * `Box<dyn TexElement>` are passed through unchanged.
/// * Any other `TexElement` will be boxed.
/// * `str` and `String` are converted to escaped `Text` elements.
/// * Any number (`u8`, ...) is converted to escaped `Text` using display.
/// * A `Vec<Box<dyn TexElement>>` is converted into a `Group`.
/// * The unit type `()` is converted into an empty element.
pub trait IntoTexElement {
/// Converts the given element into a `TexElement`.
fn into_tex_element(self) -> Box<dyn TexElement>;
}
impl IntoTexElement for Box<dyn TexElement> {
#[inline]
fn into_tex_element(self) -> Box<dyn TexElement> {
self
}
}
impl<'a> IntoTexElement for &'a str {
#[inline]
fn into_tex_element(self) -> Box<dyn TexElement> {
self.to_owned().into_tex_element()
}
}
impl IntoTexElement for String {
#[inline]
fn into_tex_element(self) -> Box<dyn TexElement> {
Box::new(Text::new(self))
}
}
impl IntoTexElement for () {
fn into_tex_element(self) -> Box<dyn TexElement> {
Box::new(RawTex(Vec::new()))
}
}
impl<T: TexElement + Sized + 'static> IntoTexElement for T {
#[inline]
fn into_tex_element(self) -> Box<dyn TexElement> {
Box::new(self)
}
}
impl IntoTexElement for Vec<Box<dyn TexElement>> {
#[inline]
fn into_tex_element(self) -> Box<dyn TexElement> {
Box::new(Group::new(self))
}
}
macro_rules! using_display {
($ty:ty) => {
impl IntoTexElement for $ty {
#[inline]
fn into_tex_element(self) -> Box<dyn TexElement> {
Box::new(Text::new(format!("{}", self)))
}
}
};
}
using_display!(u8);
using_display!(u16);
using_display!(u32);
using_display!(u64);
using_display!(u128);
using_display!(i8);
using_display!(i16);
using_display!(i32);
using_display!(i64);
using_display!(i128);
using_display!(f32);
using_display!(f64);
/// Writes a list of tex elements to a stream with a separator.
pub fn write_list<'a, I>(writer: &mut dyn Write, separator: &str, iter: I) -> io::Result<()>
where
I: Iterator<Item = &'a Box<dyn TexElement>> + 'a,
{
for (idx, arg) in iter.enumerate() {
if idx != 0 {
writer.write_all(separator.as_bytes())?;
}
arg.write_tex(writer)?;
}
Ok(())
}
/// A raw, unescaped piece of tex code.
///
/// Tex is not guaranteed to be UTF-8 encoded, thus `RawTex` internally keeps bytes. The value will
/// be inserted into the document without any escaping. The value is unchecked, thus it is possible
/// to create syntactically incorrect invalid documents using this element.
#[derive(Clone, Debug)]
pub struct RawTex(Vec<u8>);
impl RawTex {
/// Crates a new raw tex element from a string.
#[inline]
pub fn new(raw: Vec<u8>) -> Self {
RawTex(raw)
}
}
impl TexElement for RawTex {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
writer.write_all(self.0.as_slice())
}
}
/// A text string.
///
/// Text strings will be escaped before insertion.
#[derive(Clone, Debug)]
pub struct Text(String);
impl Text {
/// Creates a new text string.
#[inline]
pub fn new(raw: String) -> Self {
Text(raw)
}
}
impl TexElement for Text {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
crate::tex_escape::write_escaped(writer, &self.0)
}
}
/// A set of optional arguments.
///
/// Optional arguments in LaTeX are typically denoted using square brackets and comma-separated.
#[derive(Debug, Default)]
pub struct OptArgs(Vec<Box<dyn TexElement>>);
impl OptArgs {
/// Creates a new set of optional arguments.
#[inline]
pub fn new(elements: Vec<Box<dyn TexElement>>) -> Self {
OptArgs(elements)
}
/// Creates a new optinal argument from a single value.
#[inline]
pub fn single<T: IntoTexElement>(elem: T) -> Self {
OptArgs(vec![elem.into_tex_element()])
}
}
impl TexElement for OptArgs {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
if !self.0.is_empty() {
writer.write_all(b"[")?;
write_list(writer, ",", self.0.iter())?;
writer.write_all(b"]")?;
}
Ok(())
}
}
/// A set of arguments.
///
/// Each argument is enclosed by curly braces when rendered, otherwise arguments are just
/// concatenated.
#[derive(Debug, Default)]
pub struct Args(Vec<Box<dyn TexElement>>);
impl Args {
/// Creates a new set of arguments.
#[inline]
pub fn new(elements: Vec<Box<dyn TexElement>>) -> Self {
Args(elements)
}
/// Creates a new optinal argument from a single value.
#[inline]
pub fn single<T: IntoTexElement>(elem: T) -> Self {
Args(vec![elem.into_tex_element()])
}
}
impl TexElement for Args {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
if !self.0.is_empty() {
writer.write_all(b"{")?;
write_list(writer, "}{", self.0.iter())?;
writer.write_all(b"}")?;
}
Ok(())
}
}
/// A TeX-macro invocation.
///
/// This is the typical `\macroname[opt1]{arg1}{arg2}` call that is common in latex documents.
#[derive(Debug)]
pub struct MacroCall {
/// Name of the instruction.
ident: Box<dyn TexElement>,
/// Optional arguments.
opt_args: OptArgs,
/// Mandatory arguments.
args: Args,
/// Whether or not to append a newline afterwards.
newline: bool,
}
impl MacroCall {
/// Creates a new macro call.
///
/// The resulting call will end with a newline when output.
pub fn new<T: IntoTexElement>(ident: T, opt_args: OptArgs, args: Args) -> Self {
MacroCall {
ident: ident.into_tex_element(),
opt_args,
args,
newline: true,
}
}
/// Creates a new inline macro call.
///
/// Does not end with a newline.
pub fn new_inline<T: IntoTexElement>(ident: T, opt_args: OptArgs, args: Args) -> Self {
MacroCall {
ident: ident.into_tex_element(),
opt_args,
args,
newline: false,
}
}
}
impl TexElement for MacroCall {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
writer.write_all(br"\")?;
self.ident.write_tex(writer)?;
self.opt_args.write_tex(writer)?;
self.args.write_tex(writer)?;
if self.newline {
writer.write_all(b"\n")?;
}
Ok(())
}
}
/// A block with a begin and end instruction.
///
/// Begin-end blocks usually start with a `\begin{blockname}` and end with `\end{blockname}`.
#[derive(Debug)]
pub struct BeginEndBlock {
/// The identifier for the block.
ident: Box<dyn TexElement>,
/// Optional arguments.
opt_args: OptArgs,
/// Actual arguments.
args: Args,
/// Child elements of the block.
children: Vec<Box<dyn TexElement>>,
}
impl BeginEndBlock {
/// Creates a new begin/end block.
pub fn new<T: IntoTexElement>(
ident: T,
opt_args: OptArgs,
args: Args,
children: Vec<Box<dyn TexElement>>,
) -> Self {
BeginEndBlock {
ident: ident.into_tex_element(),
opt_args,
args,
children,
}
}
}
impl TexElement for BeginEndBlock {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
writer.write_all(b"\\begin{")?;
self.ident.write_tex(writer)?;
writer.write_all(b"}")?;
self.opt_args.write_tex(writer)?;
self.args.write_tex(writer)?;
writer.write_all(b"\n")?;
for child in &self.children {
child.write_tex(writer)?;
}
writer.write_all(b"\n\\end{")?;
self.ident.write_tex(writer)?;
writer.write_all(b"}\n")?;
Ok(())
}
}
/// An anonymous block.
///
/// Anonymous blocks are other elements enclosed in curly braces when output.
#[derive(Debug)]
pub struct AnonymousBlock(Vec<Box<dyn TexElement>>);
impl AnonymousBlock {
/// Creates a new anonymous block.
pub fn new(elems: Vec<Box<dyn TexElement>>) -> Self {
AnonymousBlock(elems)
}
}
impl TexElement for AnonymousBlock {
fn | (&self, writer: &mut dyn Write) -> io::Result<()> {
writer.write_all(b"{")?;
for child in &self.0 {
child.write_tex(writer)?;
}
writer.write_all(b"}")?;
Ok(())
}
}
/// Grouping of elements.
///
/// Groups multiple elements together; when output they are written in order, without any characters
/// added.
#[derive(Debug)]
pub struct Group(Vec<Box<dyn TexElement>>);
impl Group {
/// Creates a new group.
pub fn new(elems: Vec<Box<dyn TexElement>>) -> Self {
Group(elems)
}
}
impl TexElement for Group {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
for child in &self.0 {
child.write_tex(writer)?;
}
Ok(())
}
}
/// Table row.
///
/// Multiple elements joined by ` & ` when rendered.
#[derive(Debug)]
pub struct TableRow(Vec<Box<dyn TexElement>>);
impl TableRow {
/// Creates a new table row.
pub fn new(elems: Vec<Box<dyn TexElement>>) -> Self {
TableRow(elems)
}
}
impl TexElement for TableRow {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
write_list(writer, " & ", self.0.iter())?;
writer.write_all(b"\\\\\n")
}
}
| write_tex | identifier_name |
tpl.rs | //! TeX templating
//!
//! The `tpl` module contains a way of constructing a TeX-document programmatically. It ensures
//! documents are well-formed syntactically, but not semantically (e.g. it is possible to express
//! documents that contain multiple `\documentclass` macro calls inside the document but not a
//! `\begin{foo}` without a matching `\end`).
//!
//! As a result of this deliberate limitation, the API is fairly simple. The core module offers the
//! entire abstraction through the `TexElement` trait, while the `elements` module contains
//! syntactic sugar for building documents quickly.
//!
//! ## "Hello, world" using `TexElement` directly.
//!
//! ```rust
//! use texrender::tpl::{Args, BeginEndBlock, Group, IntoTexElement, MacroCall, OptArgs, RawTex,
//! TexElement, Text};
//!
//! let doctype = MacroCall::new("documentclass",
//! OptArgs::single("12pt"),
//! Args::single("article"));
//! let mut contents: Vec<Box<dyn TexElement>> = Vec::new();
//! contents.push(Box::new(MacroCall::new("section",
//! Default::default(),
//! Args::single("Hello, world"))));
//! contents.push("This is fun & easy.".into_tex_element());
//! let document = BeginEndBlock::new("document", Default::default(), Default::default(), contents);
//! let tex = Group::new(vec![Box::new(doctype) as Box<dyn TexElement>, Box::new(document)]);
//! let output = tex.render().expect("rendering failed");
//! assert_eq!(output,
//! "\\documentclass[12pt]{article}\n\
//! \\begin{document}\n\
//! \\section{Hello, world}\n\
//! This is fun \\& easy.\n\
//! \\end{document}\n");
//! ```
//!
//! While this form uses no macros, it is rather inconvenient to write. Luckily there is an
//! alternative:
//!
//! ## "Hello, world" using elements and macros.
//!
//! ```rust
//! use texrender::elems;
//! use texrender::tpl::TexElement;
//! use texrender::tpl::elements::{N, doc, document, documentclass, section};
//!
//! let tex = doc(elems!(
//! documentclass(elems!(), "article"),
//! document(elems!(
//! section("Hello, world"),
//! "This is fun & easy."
//! ))
//! ));
//!
//! let output = tex.render().expect("rendering failed");
//!
//! assert_eq!(output,
//! "\\documentclass{article}\n\
//! \\begin{document}\n\
//! \\section{Hello, world}\n\
//! This is fun \\& easy.\n\
//! \\end{document}\n");
//! ```
//!
//! Element functions like `section` above typically cover most use cases, while not preventing the
//! u ser to drop back to the raw functions above. The `elems` macro conveniently boxes and
//! type-erases children, while `N` can be used for "no arguments" for both args and optargs.
#[macro_use]
pub mod macros;
pub mod elements;
use std::fmt::Debug;
use std::io::Write;
use std::{io, string};
/// Renderable Tex element.
pub trait TexElement: Debug {
/// Type-erases a `TexElement`.
fn boxed(self) -> Box<dyn TexElement>
where
Self: Sized + 'static,
{
Box::new(self) as Box<dyn TexElement>
}
/// Renders the element into a string.
///
/// May return an error if a non-utf8 element has been given.
fn render(&self) -> Result<String, string::FromUtf8Error> {
let mut buffer: Vec<u8> = Vec::new();
self.write_tex(&mut buffer)
.expect("should always be able to write to in-memory buffer");
String::from_utf8(buffer)
}
/// Writes a rendering of the element to the given writer.
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()>;
}
/// Conversion trait for various types.
///
/// Used for primitive conversions of various types directly into tex elements. Implementations
/// include:
///
/// * `Box<dyn TexElement>` are passed through unchanged.
/// * Any other `TexElement` will be boxed.
/// * `str` and `String` are converted to escaped `Text` elements.
/// * Any number (`u8`, ...) is converted to escaped `Text` using display.
/// * A `Vec<Box<dyn TexElement>>` is converted into a `Group`.
/// * The unit type `()` is converted into an empty element.
pub trait IntoTexElement {
/// Converts the given element into a `TexElement`.
fn into_tex_element(self) -> Box<dyn TexElement>;
}
impl IntoTexElement for Box<dyn TexElement> {
#[inline]
fn into_tex_element(self) -> Box<dyn TexElement> {
self
}
}
impl<'a> IntoTexElement for &'a str {
#[inline]
fn into_tex_element(self) -> Box<dyn TexElement> {
self.to_owned().into_tex_element()
}
}
impl IntoTexElement for String {
#[inline]
fn into_tex_element(self) -> Box<dyn TexElement> {
Box::new(Text::new(self))
}
}
impl IntoTexElement for () {
fn into_tex_element(self) -> Box<dyn TexElement> {
Box::new(RawTex(Vec::new()))
}
}
impl<T: TexElement + Sized + 'static> IntoTexElement for T {
#[inline]
fn into_tex_element(self) -> Box<dyn TexElement> {
Box::new(self)
}
}
impl IntoTexElement for Vec<Box<dyn TexElement>> {
#[inline]
fn into_tex_element(self) -> Box<dyn TexElement> {
Box::new(Group::new(self))
}
}
macro_rules! using_display {
($ty:ty) => {
impl IntoTexElement for $ty {
#[inline]
fn into_tex_element(self) -> Box<dyn TexElement> {
Box::new(Text::new(format!("{}", self)))
}
}
};
}
using_display!(u8);
using_display!(u16);
using_display!(u32);
using_display!(u64);
using_display!(u128);
using_display!(i8);
using_display!(i16);
using_display!(i32);
using_display!(i64);
using_display!(i128);
using_display!(f32);
using_display!(f64);
/// Writes a list of tex elements to a stream with a separator.
pub fn write_list<'a, I>(writer: &mut dyn Write, separator: &str, iter: I) -> io::Result<()>
where
I: Iterator<Item = &'a Box<dyn TexElement>> + 'a,
{
for (idx, arg) in iter.enumerate() {
if idx != 0 {
writer.write_all(separator.as_bytes())?;
}
arg.write_tex(writer)?;
}
Ok(())
}
/// A raw, unescaped piece of tex code.
///
/// Tex is not guaranteed to be UTF-8 encoded, thus `RawTex` internally keeps bytes. The value will
/// be inserted into the document without any escaping. The value is unchecked, thus it is possible
/// to create syntactically incorrect invalid documents using this element.
#[derive(Clone, Debug)]
pub struct RawTex(Vec<u8>);
impl RawTex {
/// Crates a new raw tex element from a string.
#[inline]
pub fn new(raw: Vec<u8>) -> Self {
RawTex(raw)
}
}
impl TexElement for RawTex {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
writer.write_all(self.0.as_slice())
}
}
/// A text string.
///
/// Text strings will be escaped before insertion.
#[derive(Clone, Debug)]
pub struct Text(String);
impl Text {
/// Creates a new text string.
#[inline]
pub fn new(raw: String) -> Self {
Text(raw)
}
}
impl TexElement for Text {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
crate::tex_escape::write_escaped(writer, &self.0)
}
}
/// A set of optional arguments.
///
/// Optional arguments in LaTeX are typically denoted using square brackets and comma-separated.
#[derive(Debug, Default)]
pub struct OptArgs(Vec<Box<dyn TexElement>>);
impl OptArgs {
/// Creates a new set of optional arguments.
#[inline]
pub fn new(elements: Vec<Box<dyn TexElement>>) -> Self {
OptArgs(elements)
}
/// Creates a new optinal argument from a single value.
#[inline]
pub fn single<T: IntoTexElement>(elem: T) -> Self {
OptArgs(vec![elem.into_tex_element()])
}
}
impl TexElement for OptArgs {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
if !self.0.is_empty() {
writer.write_all(b"[")?;
write_list(writer, ",", self.0.iter())?;
writer.write_all(b"]")?;
}
Ok(())
}
}
/// A set of arguments.
///
/// Each argument is enclosed by curly braces when rendered, otherwise arguments are just
/// concatenated.
#[derive(Debug, Default)]
pub struct Args(Vec<Box<dyn TexElement>>);
impl Args {
/// Creates a new set of arguments.
#[inline]
pub fn new(elements: Vec<Box<dyn TexElement>>) -> Self {
Args(elements)
}
/// Creates a new optinal argument from a single value.
#[inline]
pub fn single<T: IntoTexElement>(elem: T) -> Self {
Args(vec![elem.into_tex_element()])
}
}
impl TexElement for Args {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
if !self.0.is_empty() {
writer.write_all(b"{")?;
write_list(writer, "}{", self.0.iter())?;
writer.write_all(b"}")?;
}
Ok(())
}
}
/// A TeX-macro invocation.
///
/// This is the typical `\macroname[opt1]{arg1}{arg2}` call that is common in latex documents.
#[derive(Debug)]
pub struct MacroCall {
/// Name of the instruction.
ident: Box<dyn TexElement>,
/// Optional arguments.
opt_args: OptArgs,
/// Mandatory arguments.
args: Args,
/// Whether or not to append a newline afterwards.
newline: bool,
}
impl MacroCall {
/// Creates a new macro call.
///
/// The resulting call will end with a newline when output.
pub fn new<T: IntoTexElement>(ident: T, opt_args: OptArgs, args: Args) -> Self {
MacroCall {
ident: ident.into_tex_element(),
opt_args,
args,
newline: true,
}
}
/// Creates a new inline macro call.
///
/// Does not end with a newline.
pub fn new_inline<T: IntoTexElement>(ident: T, opt_args: OptArgs, args: Args) -> Self {
MacroCall {
ident: ident.into_tex_element(),
opt_args, | newline: false,
}
}
}
impl TexElement for MacroCall {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
writer.write_all(br"\")?;
self.ident.write_tex(writer)?;
self.opt_args.write_tex(writer)?;
self.args.write_tex(writer)?;
if self.newline {
writer.write_all(b"\n")?;
}
Ok(())
}
}
/// A block with a begin and end instruction.
///
/// Begin-end blocks usually start with a `\begin{blockname}` and end with `\end{blockname}`.
#[derive(Debug)]
pub struct BeginEndBlock {
/// The identifier for the block.
ident: Box<dyn TexElement>,
/// Optional arguments.
opt_args: OptArgs,
/// Actual arguments.
args: Args,
/// Child elements of the block.
children: Vec<Box<dyn TexElement>>,
}
impl BeginEndBlock {
/// Creates a new begin/end block.
pub fn new<T: IntoTexElement>(
ident: T,
opt_args: OptArgs,
args: Args,
children: Vec<Box<dyn TexElement>>,
) -> Self {
BeginEndBlock {
ident: ident.into_tex_element(),
opt_args,
args,
children,
}
}
}
impl TexElement for BeginEndBlock {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
writer.write_all(b"\\begin{")?;
self.ident.write_tex(writer)?;
writer.write_all(b"}")?;
self.opt_args.write_tex(writer)?;
self.args.write_tex(writer)?;
writer.write_all(b"\n")?;
for child in &self.children {
child.write_tex(writer)?;
}
writer.write_all(b"\n\\end{")?;
self.ident.write_tex(writer)?;
writer.write_all(b"}\n")?;
Ok(())
}
}
/// An anonymous block.
///
/// Anonymous blocks are other elements enclosed in curly braces when output.
#[derive(Debug)]
pub struct AnonymousBlock(Vec<Box<dyn TexElement>>);
impl AnonymousBlock {
/// Creates a new anonymous block.
pub fn new(elems: Vec<Box<dyn TexElement>>) -> Self {
AnonymousBlock(elems)
}
}
impl TexElement for AnonymousBlock {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
writer.write_all(b"{")?;
for child in &self.0 {
child.write_tex(writer)?;
}
writer.write_all(b"}")?;
Ok(())
}
}
/// Grouping of elements.
///
/// Groups multiple elements together; when output they are written in order, without any characters
/// added.
#[derive(Debug)]
pub struct Group(Vec<Box<dyn TexElement>>);
impl Group {
/// Creates a new group.
pub fn new(elems: Vec<Box<dyn TexElement>>) -> Self {
Group(elems)
}
}
impl TexElement for Group {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
for child in &self.0 {
child.write_tex(writer)?;
}
Ok(())
}
}
/// Table row.
///
/// Multiple elements joined by ` & ` when rendered.
#[derive(Debug)]
pub struct TableRow(Vec<Box<dyn TexElement>>);
impl TableRow {
/// Creates a new table row.
pub fn new(elems: Vec<Box<dyn TexElement>>) -> Self {
TableRow(elems)
}
}
impl TexElement for TableRow {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
write_list(writer, " & ", self.0.iter())?;
writer.write_all(b"\\\\\n")
}
} | args, | random_line_split |
tpl.rs | //! TeX templating
//!
//! The `tpl` module contains a way of constructing a TeX-document programmatically. It ensures
//! documents are well-formed syntactically, but not semantically (e.g. it is possible to express
//! documents that contain multiple `\documentclass` macro calls inside the document but not a
//! `\begin{foo}` without a matching `\end`).
//!
//! As a result of this deliberate limitation, the API is fairly simple. The core module offers the
//! entire abstraction through the `TexElement` trait, while the `elements` module contains
//! syntactic sugar for building documents quickly.
//!
//! ## "Hello, world" using `TexElement` directly.
//!
//! ```rust
//! use texrender::tpl::{Args, BeginEndBlock, Group, IntoTexElement, MacroCall, OptArgs, RawTex,
//! TexElement, Text};
//!
//! let doctype = MacroCall::new("documentclass",
//! OptArgs::single("12pt"),
//! Args::single("article"));
//! let mut contents: Vec<Box<dyn TexElement>> = Vec::new();
//! contents.push(Box::new(MacroCall::new("section",
//! Default::default(),
//! Args::single("Hello, world"))));
//! contents.push("This is fun & easy.".into_tex_element());
//! let document = BeginEndBlock::new("document", Default::default(), Default::default(), contents);
//! let tex = Group::new(vec![Box::new(doctype) as Box<dyn TexElement>, Box::new(document)]);
//! let output = tex.render().expect("rendering failed");
//! assert_eq!(output,
//! "\\documentclass[12pt]{article}\n\
//! \\begin{document}\n\
//! \\section{Hello, world}\n\
//! This is fun \\& easy.\n\
//! \\end{document}\n");
//! ```
//!
//! While this form uses no macros, it is rather inconvenient to write. Luckily there is an
//! alternative:
//!
//! ## "Hello, world" using elements and macros.
//!
//! ```rust
//! use texrender::elems;
//! use texrender::tpl::TexElement;
//! use texrender::tpl::elements::{N, doc, document, documentclass, section};
//!
//! let tex = doc(elems!(
//! documentclass(elems!(), "article"),
//! document(elems!(
//! section("Hello, world"),
//! "This is fun & easy."
//! ))
//! ));
//!
//! let output = tex.render().expect("rendering failed");
//!
//! assert_eq!(output,
//! "\\documentclass{article}\n\
//! \\begin{document}\n\
//! \\section{Hello, world}\n\
//! This is fun \\& easy.\n\
//! \\end{document}\n");
//! ```
//!
//! Element functions like `section` above typically cover most use cases, while not preventing the
//! u ser to drop back to the raw functions above. The `elems` macro conveniently boxes and
//! type-erases children, while `N` can be used for "no arguments" for both args and optargs.
#[macro_use]
pub mod macros;
pub mod elements;
use std::fmt::Debug;
use std::io::Write;
use std::{io, string};
/// Renderable Tex element.
pub trait TexElement: Debug {
/// Type-erases a `TexElement`.
fn boxed(self) -> Box<dyn TexElement>
where
Self: Sized + 'static,
{
Box::new(self) as Box<dyn TexElement>
}
/// Renders the element into a string.
///
/// May return an error if a non-utf8 element has been given.
fn render(&self) -> Result<String, string::FromUtf8Error> {
let mut buffer: Vec<u8> = Vec::new();
self.write_tex(&mut buffer)
.expect("should always be able to write to in-memory buffer");
String::from_utf8(buffer)
}
/// Writes a rendering of the element to the given writer.
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()>;
}
/// Conversion trait for various types.
///
/// Used for primitive conversions of various types directly into tex elements. Implementations
/// include:
///
/// * `Box<dyn TexElement>` are passed through unchanged.
/// * Any other `TexElement` will be boxed.
/// * `str` and `String` are converted to escaped `Text` elements.
/// * Any number (`u8`, ...) is converted to escaped `Text` using display.
/// * A `Vec<Box<dyn TexElement>>` is converted into a `Group`.
/// * The unit type `()` is converted into an empty element.
pub trait IntoTexElement {
/// Converts the given element into a `TexElement`.
fn into_tex_element(self) -> Box<dyn TexElement>;
}
impl IntoTexElement for Box<dyn TexElement> {
#[inline]
fn into_tex_element(self) -> Box<dyn TexElement> {
self
}
}
impl<'a> IntoTexElement for &'a str {
#[inline]
fn into_tex_element(self) -> Box<dyn TexElement> {
self.to_owned().into_tex_element()
}
}
impl IntoTexElement for String {
#[inline]
fn into_tex_element(self) -> Box<dyn TexElement> {
Box::new(Text::new(self))
}
}
impl IntoTexElement for () {
fn into_tex_element(self) -> Box<dyn TexElement> {
Box::new(RawTex(Vec::new()))
}
}
impl<T: TexElement + Sized + 'static> IntoTexElement for T {
#[inline]
fn into_tex_element(self) -> Box<dyn TexElement> {
Box::new(self)
}
}
impl IntoTexElement for Vec<Box<dyn TexElement>> {
#[inline]
fn into_tex_element(self) -> Box<dyn TexElement> {
Box::new(Group::new(self))
}
}
macro_rules! using_display {
($ty:ty) => {
impl IntoTexElement for $ty {
#[inline]
fn into_tex_element(self) -> Box<dyn TexElement> {
Box::new(Text::new(format!("{}", self)))
}
}
};
}
using_display!(u8);
using_display!(u16);
using_display!(u32);
using_display!(u64);
using_display!(u128);
using_display!(i8);
using_display!(i16);
using_display!(i32);
using_display!(i64);
using_display!(i128);
using_display!(f32);
using_display!(f64);
/// Writes a list of tex elements to a stream with a separator.
pub fn write_list<'a, I>(writer: &mut dyn Write, separator: &str, iter: I) -> io::Result<()>
where
I: Iterator<Item = &'a Box<dyn TexElement>> + 'a,
{
for (idx, arg) in iter.enumerate() {
if idx != 0 |
arg.write_tex(writer)?;
}
Ok(())
}
/// A raw, unescaped piece of tex code.
///
/// Tex is not guaranteed to be UTF-8 encoded, thus `RawTex` internally keeps bytes. The value will
/// be inserted into the document without any escaping. The value is unchecked, thus it is possible
/// to create syntactically incorrect invalid documents using this element.
#[derive(Clone, Debug)]
pub struct RawTex(Vec<u8>);
impl RawTex {
/// Crates a new raw tex element from a string.
#[inline]
pub fn new(raw: Vec<u8>) -> Self {
RawTex(raw)
}
}
impl TexElement for RawTex {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
writer.write_all(self.0.as_slice())
}
}
/// A text string.
///
/// Text strings will be escaped before insertion.
#[derive(Clone, Debug)]
pub struct Text(String);
impl Text {
/// Creates a new text string.
#[inline]
pub fn new(raw: String) -> Self {
Text(raw)
}
}
impl TexElement for Text {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
crate::tex_escape::write_escaped(writer, &self.0)
}
}
/// A set of optional arguments.
///
/// Optional arguments in LaTeX are typically denoted using square brackets and comma-separated.
#[derive(Debug, Default)]
pub struct OptArgs(Vec<Box<dyn TexElement>>);
impl OptArgs {
/// Creates a new set of optional arguments.
#[inline]
pub fn new(elements: Vec<Box<dyn TexElement>>) -> Self {
OptArgs(elements)
}
/// Creates a new optinal argument from a single value.
#[inline]
pub fn single<T: IntoTexElement>(elem: T) -> Self {
OptArgs(vec![elem.into_tex_element()])
}
}
impl TexElement for OptArgs {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
if !self.0.is_empty() {
writer.write_all(b"[")?;
write_list(writer, ",", self.0.iter())?;
writer.write_all(b"]")?;
}
Ok(())
}
}
/// A set of arguments.
///
/// Each argument is enclosed by curly braces when rendered, otherwise arguments are just
/// concatenated.
#[derive(Debug, Default)]
pub struct Args(Vec<Box<dyn TexElement>>);
impl Args {
/// Creates a new set of arguments.
#[inline]
pub fn new(elements: Vec<Box<dyn TexElement>>) -> Self {
Args(elements)
}
/// Creates a new optinal argument from a single value.
#[inline]
pub fn single<T: IntoTexElement>(elem: T) -> Self {
Args(vec![elem.into_tex_element()])
}
}
impl TexElement for Args {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
if !self.0.is_empty() {
writer.write_all(b"{")?;
write_list(writer, "}{", self.0.iter())?;
writer.write_all(b"}")?;
}
Ok(())
}
}
/// A TeX-macro invocation.
///
/// This is the typical `\macroname[opt1]{arg1}{arg2}` call that is common in latex documents.
#[derive(Debug)]
pub struct MacroCall {
/// Name of the instruction.
ident: Box<dyn TexElement>,
/// Optional arguments.
opt_args: OptArgs,
/// Mandatory arguments.
args: Args,
/// Whether or not to append a newline afterwards.
newline: bool,
}
impl MacroCall {
/// Creates a new macro call.
///
/// The resulting call will end with a newline when output.
pub fn new<T: IntoTexElement>(ident: T, opt_args: OptArgs, args: Args) -> Self {
MacroCall {
ident: ident.into_tex_element(),
opt_args,
args,
newline: true,
}
}
/// Creates a new inline macro call.
///
/// Does not end with a newline.
pub fn new_inline<T: IntoTexElement>(ident: T, opt_args: OptArgs, args: Args) -> Self {
MacroCall {
ident: ident.into_tex_element(),
opt_args,
args,
newline: false,
}
}
}
impl TexElement for MacroCall {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
writer.write_all(br"\")?;
self.ident.write_tex(writer)?;
self.opt_args.write_tex(writer)?;
self.args.write_tex(writer)?;
if self.newline {
writer.write_all(b"\n")?;
}
Ok(())
}
}
/// A block with a begin and end instruction.
///
/// Begin-end blocks usually start with a `\begin{blockname}` and end with `\end{blockname}`.
#[derive(Debug)]
pub struct BeginEndBlock {
/// The identifier for the block.
ident: Box<dyn TexElement>,
/// Optional arguments.
opt_args: OptArgs,
/// Actual arguments.
args: Args,
/// Child elements of the block.
children: Vec<Box<dyn TexElement>>,
}
impl BeginEndBlock {
/// Creates a new begin/end block.
pub fn new<T: IntoTexElement>(
ident: T,
opt_args: OptArgs,
args: Args,
children: Vec<Box<dyn TexElement>>,
) -> Self {
BeginEndBlock {
ident: ident.into_tex_element(),
opt_args,
args,
children,
}
}
}
impl TexElement for BeginEndBlock {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
writer.write_all(b"\\begin{")?;
self.ident.write_tex(writer)?;
writer.write_all(b"}")?;
self.opt_args.write_tex(writer)?;
self.args.write_tex(writer)?;
writer.write_all(b"\n")?;
for child in &self.children {
child.write_tex(writer)?;
}
writer.write_all(b"\n\\end{")?;
self.ident.write_tex(writer)?;
writer.write_all(b"}\n")?;
Ok(())
}
}
/// An anonymous block.
///
/// Anonymous blocks are other elements enclosed in curly braces when output.
#[derive(Debug)]
pub struct AnonymousBlock(Vec<Box<dyn TexElement>>);
impl AnonymousBlock {
/// Creates a new anonymous block.
pub fn new(elems: Vec<Box<dyn TexElement>>) -> Self {
AnonymousBlock(elems)
}
}
impl TexElement for AnonymousBlock {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
writer.write_all(b"{")?;
for child in &self.0 {
child.write_tex(writer)?;
}
writer.write_all(b"}")?;
Ok(())
}
}
/// Grouping of elements.
///
/// Groups multiple elements together; when output they are written in order, without any characters
/// added.
#[derive(Debug)]
pub struct Group(Vec<Box<dyn TexElement>>);
impl Group {
/// Creates a new group.
pub fn new(elems: Vec<Box<dyn TexElement>>) -> Self {
Group(elems)
}
}
impl TexElement for Group {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
for child in &self.0 {
child.write_tex(writer)?;
}
Ok(())
}
}
/// Table row.
///
/// Multiple elements joined by ` & ` when rendered.
#[derive(Debug)]
pub struct TableRow(Vec<Box<dyn TexElement>>);
impl TableRow {
/// Creates a new table row.
pub fn new(elems: Vec<Box<dyn TexElement>>) -> Self {
TableRow(elems)
}
}
impl TexElement for TableRow {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
write_list(writer, " & ", self.0.iter())?;
writer.write_all(b"\\\\\n")
}
}
| {
writer.write_all(separator.as_bytes())?;
} | conditional_block |
parser.go | package histweet
import (
"fmt"
"regexp"
"strconv"
"strings"
"time"
)
const (
timeLayout = "02-Jan-2006"
)
// Tokens for terminals of the Twitter rule parser grammar
//
// All token regular expressions _must_ start with ^ to ensure
// that the match is computed from the current position in the
// stream.
var Tokens = map[tokenKind]string{
tokenIdent: "^[a-zA-Z_]+",
tokenNumber: "^[0-9]+",
tokenString: `^"[^\"]*"`,
tokenAge: `^\s*([0-9]+[ymd])?([0-9]+[ymd])?([0-9]+[ymd])`,
tokenTime: `^\d\d-\w\w\w-\d\d\d\d`,
tokenLparen: `^\(`,
tokenRparen: `^\)`,
tokenOr: `^\|\|`,
tokenAnd: "^&&",
tokenGte: "^>=",
tokenGt: "^>",
tokenLte: "^<=",
tokenLt: "^<",
tokenEq: "^==",
tokenNeq: "^!=",
tokenIn: "^~",
tokenNotIn: "^!~",
}
type nodeKind int
// Types of parser nodes
const (
nodeCond nodeKind = iota
nodeLogical
)
// parseNode represents a single node in the parse tree.
//
// Each node has a kind, which is one of: "expr", "logical", or "cond".
// Logical nodes indicate that the node's two children are connected by a
// logical operation (&& or ||). Expr nodes indicate one or more expressions or
// conditions, tied by logical operators.
//
// If the node is a condition (cond) node, the rule field will contains the logic
// required to evaluate a match for a given tweet.
//
// Parsers can only be used once; to re-use a parser, make sure to call the
// Reset() method.
type parseNode struct {
kind nodeKind
op tokenKind
rule *RuleTweet
left *parseNode
right *parseNode
}
func (node *parseNode) String() string {
return fmt.Sprintf("Kind: %d, Op: %d, Rule: %+v", node.kind, node.op, node.rule)
}
// ParsedRule represents a single parsed Rule as a tree of parseNodes.
type ParsedRule struct {
root *parseNode
numNodes int
}
func evalInternal(tweet *Tweet, node *parseNode) bool {
switch node.kind {
case nodeCond:
return tweet.IsMatch(node.rule)
case nodeLogical:
left := evalInternal(tweet, node.left)
right := evalInternal(tweet, node.right)
switch node.op {
case tokenAnd:
return left && right
case tokenOr:
return left || right
default:
panic(fmt.Sprintf("Unexpected logical op: %d\n", node.op))
}
default:
panic(fmt.Sprintf("Unexpected node type: %d", node.kind))
}
}
// Eval walks the parse tree and evaluates each condition against
// the given Tweet. Returns true if the Tweet matches all of the rules.
func (rule *ParsedRule) Eval(tweet *Tweet) bool {
return evalInternal(tweet, rule.root)
}
// Parser is a simple parser for tweet deletion rule strings.
//
// Examples:
//
// - age > 3d
// - age > 10m3d || likes == 0
// - (likes > 10 && retweets > 3) || (text ~ "hello, world!")
// - retweets >= 3 && time <= "10 May 2020"
//
// Grammar:
//
// Expr <- ( Expr ) | Cond [Logical Expr]?
// Cond <- Ident Op Literal
// Logical <- Or | And
// Op <- Gt | Gte | Lt | Lte | Eq | Neq | In | NotIn
// Literal <- Number | String | Age | Time
//
// Ident := [A-Za-z0-9_]+
// Number := [0-9]+
// String := " [^"]* "
// Age := ^\s*([0-9]+[ymd])?([0-9]+[ymd])?([0-9]+[ymd])
// Time := \d\d-\w\w\w-\d\d\d\d
// Lparen := (
// Rparen := )
// Or := ||
// And := &&
// Gt := >
// Gte := >=
// Lt := <
// Lte := <=
// Eq := ==
// Neq := !=
// In := ~
// NotIn := !~
type Parser struct {
lexer *lexer
// Pointer to the current token
currToken *token
// Tree of parse nodes
rule *ParsedRule
}
// ParserError represents errors hit during rule parsing
type ParserError struct {
msg string
pos int
kind tokenKind
val string
// TODO: Add line
}
func (err *ParserError) Error() string {
return fmt.Sprintf("%s: \"%s\" (%s) (at col %d)", err.msg, err.val, err.kind.ToString(), err.pos+1)
}
func newParserError(msg string, token *token) *ParserError {
return &ParserError{
msg: msg,
pos: token.pos,
kind: token.kind,
val: token.val,
}
}
// Verifies that current token is of the specified `kind`,
// returns it, and reads in the next token
func (parser *Parser) match(kind tokenKind) (*token, error) {
currToken := parser.currToken
// If the current token is not a match, return the token for
// error reporting purposes. Do not consume the token.
if currToken.kind != kind {
return currToken, fmt.Errorf(`Unexpected token - found: "%s", expected: "%s"`,
currToken.kind.ToString(), kind.ToString())
}
token, err := parser.lexer.nextToken()
if err != nil {
return nil, err
}
parser.currToken = token
return currToken, nil
}
func (parser *Parser) expr() (*parseNode, error) {
var node *parseNode
var err error
for {
token := parser.currToken
// TODO(aksiksi): Handle the case of a non-logical expression that follows
// an expression or cond
switch token.kind {
// Nested expression
case tokenLparen:
_, err = parser.match(tokenLparen)
if err != nil {
return nil, err
}
// Parse the internal expression and return the resulting node
node, err = parser.expr()
if err != nil {
return nil, err
}
token, err = parser.match(tokenRparen)
if err != nil {
return nil, err
}
// Conditional expression
case tokenIdent:
node, err = parser.cond()
if err != nil {
return nil, err
}
// Logical/binary expression
case tokenAnd, tokenOr:
// Logical expresion with no preceding expression is invalid
if node == nil {
return nil, fmt.Errorf("Unexpected logical operator at %d: %s", token.pos, token.kind.ToString())
}
op, err := parser.logical()
if err != nil {
return nil, err
}
newNode, err := parser.expr()
if err != nil {
return nil, err
}
node = &parseNode{
kind: nodeLogical,
op: op.kind,
rule: nil,
left: node,
right: newNode,
}
default:
return node, nil
}
parser.rule.numNodes++
}
}
func (parser *Parser) cond() (*parseNode, error) {
ident, err := parser.ident()
if err != nil {
return nil, err
}
op, err1 := parser.op()
if err1 != nil {
return nil, err1
}
literal, err2 := parser.literal()
if err2 != nil {
return nil, err2
}
// Build the rule
rule := &RuleTweet{}
switch ident.val {
case "age":
if literal.kind != tokenAge {
return nil, newParserError("Invalid literal for \"age\"", literal)
}
time, err3 := convertAgeToTime(literal.val)
if err3 != nil {
return nil, newParserError("Invalid format for \"age\"", literal)
}
switch op.kind {
case tokenGt, tokenGte:
rule.Before = time
case tokenLt, tokenLte:
rule.After = time
default:
return nil, newParserError("Invalid operator for \"age\"", op)
}
case "text":
if literal.kind != tokenString {
return nil, newParserError("Invalid literal for \"text\"", literal)
}
switch op.kind {
case tokenIn, tokenNotIn:
// Gotcha: the literal contains quotes - remove them before building the regexp
pat := strings.Replace(literal.val, "\"", "", 2)
rule.Match = regexp.MustCompile(pat)
rule.IsNegativeMatch = (op.kind == tokenNotIn)
default:
return nil, newParserError("Invalid operator for \"text\"", op)
}
case "created":
if literal.kind != tokenTime {
return nil, newParserError("Invalid literal for \"created\"", literal)
}
time, err4 := time.Parse(timeLayout, literal.val)
if err4 != nil {
return nil, newParserError("Invalid time format for \"created\"", literal)
}
switch op.kind {
case tokenGt, tokenGte:
rule.Before = time
case tokenLt, tokenLte:
rule.After = time
default:
return nil, newParserError("Invalid operator for \"created\"", op)
}
case "likes":
if literal.kind != tokenNumber {
return nil, newParserError("Invalid literal for \"likes\"", literal)
}
num, err := strconv.Atoi(literal.val)
if err != nil {
return nil, newParserError("Invalid number for \"likes\"", literal)
}
rule.Likes = num
switch op.kind {
case tokenGt:
rule.LikesComparator = comparatorGt
case tokenGte:
rule.LikesComparator = comparatorGte
case tokenLt:
rule.LikesComparator = comparatorLt
case tokenLte:
rule.LikesComparator = comparatorLte
case tokenEq:
rule.LikesComparator = comparatorEq
case tokenNeq:
rule.LikesComparator = comparatorNeq
default:
return nil, newParserError("Invalid operator for \"likes\"", op)
}
case "retweets":
if literal.kind != tokenNumber {
return nil, newParserError("Invalid literal for \"retweets\"", literal)
}
num, err := strconv.Atoi(literal.val)
if err != nil {
return nil, newParserError("Invalid number for \"retweets\"", literal)
}
rule.Retweets = num
switch op.kind {
case tokenGt:
rule.RetweetsComparator = comparatorGt
case tokenGte:
rule.RetweetsComparator = comparatorGte
case tokenLt:
rule.RetweetsComparator = comparatorLt
case tokenLte:
rule.RetweetsComparator = comparatorLte
case tokenEq:
rule.RetweetsComparator = comparatorEq
case tokenNeq:
rule.RetweetsComparator = comparatorNeq
default:
return nil, newParserError("Invalid operator for \"retweets\"", op)
}
default:
return nil, newParserError("Invalid identifier", ident)
}
node := &parseNode{
kind: nodeCond,
rule: rule,
op: op.kind,
}
return node, nil
}
func (parser *Parser) ident() (*token, error) {
token, err := parser.match(tokenIdent)
if err != nil {
return nil, err
}
return token, nil
}
func (parser *Parser) logical() (*token, error) {
token := parser.currToken
switch token.kind {
case tokenAnd, tokenOr:
token, err := parser.match(token.kind)
if err != nil {
return nil, err
}
return token, nil
default:
return nil, newParserError("Invalid operator for logical expression", token)
}
}
func (parser *Parser) op() (*token, error) {
token := parser.currToken
switch token.kind {
case tokenLt, tokenLte, tokenGt, tokenGte, tokenEq, tokenNeq, tokenIn, tokenNotIn:
token, err := parser.match(parser.currToken.kind)
if err != nil {
return nil, err
}
return token, nil
default:
return nil, newParserError("Invalid comparison operator", token)
}
}
func (parser *Parser) literal() (*token, error) {
token := parser.currToken
switch token.kind {
case tokenString, tokenNumber, tokenAge, tokenTime:
token, err := parser.match(token.kind)
if err != nil {
return nil, err
}
return token, nil
default:
return nil, newParserError("Invalid literal", token)
}
}
func toStringHelper(p *parseNode, depth int, output *strings.Builder) {
if p == nil {
return
}
s := fmt.Sprintf("depth = %d, %s\n", depth, p)
output.WriteString(s)
toStringHelper(p.left, depth+1, output)
toStringHelper(p.right, depth+1, output)
}
// ToString walks the parse tree and outputs it in string form
func (rule *ParsedRule) | () string {
var output strings.Builder
toStringHelper(rule.root, 0, &output)
return output.String()
}
// NewParser builds a new Parser from the input
func NewParser(input string) *Parser {
lexer := newLexer(Tokens, input)
parser := &Parser{
lexer: lexer,
rule: &ParsedRule{},
}
return parser
}
// Reset this Parser to a clean state with the provided input
func (parser *Parser) Reset(input string) {
parser.lexer.Reset()
parser.rule = &ParsedRule{}
}
// Parse is the entry point for parser
func (parser *Parser) Parse() (*ParsedRule, error) {
// Prior to parsing a rule, check for unbalanced parens
err := checkUnbalancedParens(parser.lexer.input)
if err != nil {
return nil, err
}
token, err := parser.lexer.nextToken()
if err != nil {
return nil, err
}
parser.currToken = token
node, err := parser.expr()
// Set the root to the returned root
parser.rule.root = node
return parser.rule, err
}
// Parse is the entry point to the rule parser infra.
// Users of the library should only be using this function.
func Parse(input string) (*ParsedRule, error) {
parser := NewParser(input)
rule, err := parser.Parse()
if err != nil {
return nil, err
}
return rule, nil
}
| ToString | identifier_name |
parser.go | package histweet
import (
"fmt"
"regexp"
"strconv"
"strings"
"time"
)
const (
timeLayout = "02-Jan-2006"
)
// Tokens for terminals of the Twitter rule parser grammar
//
// All token regular expressions _must_ start with ^ to ensure
// that the match is computed from the current position in the
// stream.
var Tokens = map[tokenKind]string{
tokenIdent: "^[a-zA-Z_]+",
tokenNumber: "^[0-9]+",
tokenString: `^"[^\"]*"`,
tokenAge: `^\s*([0-9]+[ymd])?([0-9]+[ymd])?([0-9]+[ymd])`,
tokenTime: `^\d\d-\w\w\w-\d\d\d\d`,
tokenLparen: `^\(`,
tokenRparen: `^\)`,
tokenOr: `^\|\|`,
tokenAnd: "^&&",
tokenGte: "^>=",
tokenGt: "^>",
tokenLte: "^<=",
tokenLt: "^<",
tokenEq: "^==",
tokenNeq: "^!=",
tokenIn: "^~",
tokenNotIn: "^!~",
}
type nodeKind int
// Types of parser nodes
const (
nodeCond nodeKind = iota
nodeLogical
)
// parseNode represents a single node in the parse tree.
//
// Each node has a kind, which is one of: "expr", "logical", or "cond".
// Logical nodes indicate that the node's two children are connected by a
// logical operation (&& or ||). Expr nodes indicate one or more expressions or
// conditions, tied by logical operators.
//
// If the node is a condition (cond) node, the rule field will contains the logic
// required to evaluate a match for a given tweet.
//
// Parsers can only be used once; to re-use a parser, make sure to call the
// Reset() method.
type parseNode struct {
kind nodeKind
op tokenKind
rule *RuleTweet
left *parseNode
right *parseNode
}
func (node *parseNode) String() string {
return fmt.Sprintf("Kind: %d, Op: %d, Rule: %+v", node.kind, node.op, node.rule)
}
// ParsedRule represents a single parsed Rule as a tree of parseNodes.
type ParsedRule struct {
root *parseNode
numNodes int
}
func evalInternal(tweet *Tweet, node *parseNode) bool {
switch node.kind {
case nodeCond:
return tweet.IsMatch(node.rule)
case nodeLogical:
left := evalInternal(tweet, node.left)
right := evalInternal(tweet, node.right)
switch node.op {
case tokenAnd:
return left && right
case tokenOr:
return left || right
default:
panic(fmt.Sprintf("Unexpected logical op: %d\n", node.op))
}
default:
panic(fmt.Sprintf("Unexpected node type: %d", node.kind))
}
}
// Eval walks the parse tree and evaluates each condition against
// the given Tweet. Returns true if the Tweet matches all of the rules.
func (rule *ParsedRule) Eval(tweet *Tweet) bool {
return evalInternal(tweet, rule.root)
}
// Parser is a simple parser for tweet deletion rule strings.
//
// Examples:
//
// - age > 3d
// - age > 10m3d || likes == 0
// - (likes > 10 && retweets > 3) || (text ~ "hello, world!")
// - retweets >= 3 && time <= "10 May 2020"
//
// Grammar:
//
// Expr <- ( Expr ) | Cond [Logical Expr]?
// Cond <- Ident Op Literal
// Logical <- Or | And
// Op <- Gt | Gte | Lt | Lte | Eq | Neq | In | NotIn
// Literal <- Number | String | Age | Time
//
// Ident := [A-Za-z0-9_]+
// Number := [0-9]+
// String := " [^"]* "
// Age := ^\s*([0-9]+[ymd])?([0-9]+[ymd])?([0-9]+[ymd])
// Time := \d\d-\w\w\w-\d\d\d\d
// Lparen := (
// Rparen := )
// Or := ||
// And := &&
// Gt := >
// Gte := >=
// Lt := <
// Lte := <=
// Eq := ==
// Neq := !=
// In := ~
// NotIn := !~
type Parser struct {
lexer *lexer
// Pointer to the current token
currToken *token
// Tree of parse nodes
rule *ParsedRule
}
// ParserError represents errors hit during rule parsing
type ParserError struct {
msg string
pos int
kind tokenKind
val string
// TODO: Add line
}
func (err *ParserError) Error() string {
return fmt.Sprintf("%s: \"%s\" (%s) (at col %d)", err.msg, err.val, err.kind.ToString(), err.pos+1)
}
func newParserError(msg string, token *token) *ParserError {
return &ParserError{
msg: msg,
pos: token.pos,
kind: token.kind,
val: token.val,
}
}
// Verifies that current token is of the specified `kind`,
// returns it, and reads in the next token
func (parser *Parser) match(kind tokenKind) (*token, error) {
currToken := parser.currToken
// If the current token is not a match, return the token for
// error reporting purposes. Do not consume the token.
if currToken.kind != kind {
return currToken, fmt.Errorf(`Unexpected token - found: "%s", expected: "%s"`,
currToken.kind.ToString(), kind.ToString())
}
token, err := parser.lexer.nextToken()
if err != nil |
parser.currToken = token
return currToken, nil
}
func (parser *Parser) expr() (*parseNode, error) {
var node *parseNode
var err error
for {
token := parser.currToken
// TODO(aksiksi): Handle the case of a non-logical expression that follows
// an expression or cond
switch token.kind {
// Nested expression
case tokenLparen:
_, err = parser.match(tokenLparen)
if err != nil {
return nil, err
}
// Parse the internal expression and return the resulting node
node, err = parser.expr()
if err != nil {
return nil, err
}
token, err = parser.match(tokenRparen)
if err != nil {
return nil, err
}
// Conditional expression
case tokenIdent:
node, err = parser.cond()
if err != nil {
return nil, err
}
// Logical/binary expression
case tokenAnd, tokenOr:
// Logical expresion with no preceding expression is invalid
if node == nil {
return nil, fmt.Errorf("Unexpected logical operator at %d: %s", token.pos, token.kind.ToString())
}
op, err := parser.logical()
if err != nil {
return nil, err
}
newNode, err := parser.expr()
if err != nil {
return nil, err
}
node = &parseNode{
kind: nodeLogical,
op: op.kind,
rule: nil,
left: node,
right: newNode,
}
default:
return node, nil
}
parser.rule.numNodes++
}
}
func (parser *Parser) cond() (*parseNode, error) {
ident, err := parser.ident()
if err != nil {
return nil, err
}
op, err1 := parser.op()
if err1 != nil {
return nil, err1
}
literal, err2 := parser.literal()
if err2 != nil {
return nil, err2
}
// Build the rule
rule := &RuleTweet{}
switch ident.val {
case "age":
if literal.kind != tokenAge {
return nil, newParserError("Invalid literal for \"age\"", literal)
}
time, err3 := convertAgeToTime(literal.val)
if err3 != nil {
return nil, newParserError("Invalid format for \"age\"", literal)
}
switch op.kind {
case tokenGt, tokenGte:
rule.Before = time
case tokenLt, tokenLte:
rule.After = time
default:
return nil, newParserError("Invalid operator for \"age\"", op)
}
case "text":
if literal.kind != tokenString {
return nil, newParserError("Invalid literal for \"text\"", literal)
}
switch op.kind {
case tokenIn, tokenNotIn:
// Gotcha: the literal contains quotes - remove them before building the regexp
pat := strings.Replace(literal.val, "\"", "", 2)
rule.Match = regexp.MustCompile(pat)
rule.IsNegativeMatch = (op.kind == tokenNotIn)
default:
return nil, newParserError("Invalid operator for \"text\"", op)
}
case "created":
if literal.kind != tokenTime {
return nil, newParserError("Invalid literal for \"created\"", literal)
}
time, err4 := time.Parse(timeLayout, literal.val)
if err4 != nil {
return nil, newParserError("Invalid time format for \"created\"", literal)
}
switch op.kind {
case tokenGt, tokenGte:
rule.Before = time
case tokenLt, tokenLte:
rule.After = time
default:
return nil, newParserError("Invalid operator for \"created\"", op)
}
case "likes":
if literal.kind != tokenNumber {
return nil, newParserError("Invalid literal for \"likes\"", literal)
}
num, err := strconv.Atoi(literal.val)
if err != nil {
return nil, newParserError("Invalid number for \"likes\"", literal)
}
rule.Likes = num
switch op.kind {
case tokenGt:
rule.LikesComparator = comparatorGt
case tokenGte:
rule.LikesComparator = comparatorGte
case tokenLt:
rule.LikesComparator = comparatorLt
case tokenLte:
rule.LikesComparator = comparatorLte
case tokenEq:
rule.LikesComparator = comparatorEq
case tokenNeq:
rule.LikesComparator = comparatorNeq
default:
return nil, newParserError("Invalid operator for \"likes\"", op)
}
case "retweets":
if literal.kind != tokenNumber {
return nil, newParserError("Invalid literal for \"retweets\"", literal)
}
num, err := strconv.Atoi(literal.val)
if err != nil {
return nil, newParserError("Invalid number for \"retweets\"", literal)
}
rule.Retweets = num
switch op.kind {
case tokenGt:
rule.RetweetsComparator = comparatorGt
case tokenGte:
rule.RetweetsComparator = comparatorGte
case tokenLt:
rule.RetweetsComparator = comparatorLt
case tokenLte:
rule.RetweetsComparator = comparatorLte
case tokenEq:
rule.RetweetsComparator = comparatorEq
case tokenNeq:
rule.RetweetsComparator = comparatorNeq
default:
return nil, newParserError("Invalid operator for \"retweets\"", op)
}
default:
return nil, newParserError("Invalid identifier", ident)
}
node := &parseNode{
kind: nodeCond,
rule: rule,
op: op.kind,
}
return node, nil
}
func (parser *Parser) ident() (*token, error) {
token, err := parser.match(tokenIdent)
if err != nil {
return nil, err
}
return token, nil
}
func (parser *Parser) logical() (*token, error) {
token := parser.currToken
switch token.kind {
case tokenAnd, tokenOr:
token, err := parser.match(token.kind)
if err != nil {
return nil, err
}
return token, nil
default:
return nil, newParserError("Invalid operator for logical expression", token)
}
}
func (parser *Parser) op() (*token, error) {
token := parser.currToken
switch token.kind {
case tokenLt, tokenLte, tokenGt, tokenGte, tokenEq, tokenNeq, tokenIn, tokenNotIn:
token, err := parser.match(parser.currToken.kind)
if err != nil {
return nil, err
}
return token, nil
default:
return nil, newParserError("Invalid comparison operator", token)
}
}
func (parser *Parser) literal() (*token, error) {
token := parser.currToken
switch token.kind {
case tokenString, tokenNumber, tokenAge, tokenTime:
token, err := parser.match(token.kind)
if err != nil {
return nil, err
}
return token, nil
default:
return nil, newParserError("Invalid literal", token)
}
}
func toStringHelper(p *parseNode, depth int, output *strings.Builder) {
if p == nil {
return
}
s := fmt.Sprintf("depth = %d, %s\n", depth, p)
output.WriteString(s)
toStringHelper(p.left, depth+1, output)
toStringHelper(p.right, depth+1, output)
}
// ToString walks the parse tree and outputs it in string form
func (rule *ParsedRule) ToString() string {
var output strings.Builder
toStringHelper(rule.root, 0, &output)
return output.String()
}
// NewParser builds a new Parser from the input
func NewParser(input string) *Parser {
lexer := newLexer(Tokens, input)
parser := &Parser{
lexer: lexer,
rule: &ParsedRule{},
}
return parser
}
// Reset this Parser to a clean state with the provided input
func (parser *Parser) Reset(input string) {
parser.lexer.Reset()
parser.rule = &ParsedRule{}
}
// Parse is the entry point for parser
func (parser *Parser) Parse() (*ParsedRule, error) {
// Prior to parsing a rule, check for unbalanced parens
err := checkUnbalancedParens(parser.lexer.input)
if err != nil {
return nil, err
}
token, err := parser.lexer.nextToken()
if err != nil {
return nil, err
}
parser.currToken = token
node, err := parser.expr()
// Set the root to the returned root
parser.rule.root = node
return parser.rule, err
}
// Parse is the entry point to the rule parser infra.
// Users of the library should only be using this function.
func Parse(input string) (*ParsedRule, error) {
parser := NewParser(input)
rule, err := parser.Parse()
if err != nil {
return nil, err
}
return rule, nil
}
| {
return nil, err
} | conditional_block |
parser.go | package histweet
import (
"fmt"
"regexp"
"strconv"
"strings"
"time"
)
const (
timeLayout = "02-Jan-2006"
)
// Tokens for terminals of the Twitter rule parser grammar
//
// All token regular expressions _must_ start with ^ to ensure
// that the match is computed from the current position in the
// stream.
var Tokens = map[tokenKind]string{
tokenIdent: "^[a-zA-Z_]+",
tokenNumber: "^[0-9]+",
tokenString: `^"[^\"]*"`,
tokenAge: `^\s*([0-9]+[ymd])?([0-9]+[ymd])?([0-9]+[ymd])`,
tokenTime: `^\d\d-\w\w\w-\d\d\d\d`,
tokenLparen: `^\(`,
tokenRparen: `^\)`,
tokenOr: `^\|\|`,
tokenAnd: "^&&",
tokenGte: "^>=",
tokenGt: "^>",
tokenLte: "^<=",
tokenLt: "^<",
tokenEq: "^==",
tokenNeq: "^!=",
tokenIn: "^~",
tokenNotIn: "^!~",
}
type nodeKind int
// Types of parser nodes
const (
nodeCond nodeKind = iota
nodeLogical
)
// parseNode represents a single node in the parse tree.
//
// Each node has a kind, which is one of: "expr", "logical", or "cond".
// Logical nodes indicate that the node's two children are connected by a
// logical operation (&& or ||). Expr nodes indicate one or more expressions or
// conditions, tied by logical operators.
//
// If the node is a condition (cond) node, the rule field will contains the logic
// required to evaluate a match for a given tweet.
//
// Parsers can only be used once; to re-use a parser, make sure to call the
// Reset() method.
type parseNode struct {
kind nodeKind
op tokenKind
rule *RuleTweet
left *parseNode
right *parseNode
}
func (node *parseNode) String() string {
return fmt.Sprintf("Kind: %d, Op: %d, Rule: %+v", node.kind, node.op, node.rule)
}
// ParsedRule represents a single parsed Rule as a tree of parseNodes.
type ParsedRule struct {
root *parseNode
numNodes int
}
func evalInternal(tweet *Tweet, node *parseNode) bool |
// Eval walks the parse tree and evaluates each condition against
// the given Tweet. Returns true if the Tweet matches all of the rules.
func (rule *ParsedRule) Eval(tweet *Tweet) bool {
return evalInternal(tweet, rule.root)
}
// Parser is a simple parser for tweet deletion rule strings.
//
// Examples:
//
// - age > 3d
// - age > 10m3d || likes == 0
// - (likes > 10 && retweets > 3) || (text ~ "hello, world!")
// - retweets >= 3 && time <= "10 May 2020"
//
// Grammar:
//
// Expr <- ( Expr ) | Cond [Logical Expr]?
// Cond <- Ident Op Literal
// Logical <- Or | And
// Op <- Gt | Gte | Lt | Lte | Eq | Neq | In | NotIn
// Literal <- Number | String | Age | Time
//
// Ident := [A-Za-z0-9_]+
// Number := [0-9]+
// String := " [^"]* "
// Age := ^\s*([0-9]+[ymd])?([0-9]+[ymd])?([0-9]+[ymd])
// Time := \d\d-\w\w\w-\d\d\d\d
// Lparen := (
// Rparen := )
// Or := ||
// And := &&
// Gt := >
// Gte := >=
// Lt := <
// Lte := <=
// Eq := ==
// Neq := !=
// In := ~
// NotIn := !~
type Parser struct {
lexer *lexer
// Pointer to the current token
currToken *token
// Tree of parse nodes
rule *ParsedRule
}
// ParserError represents errors hit during rule parsing
type ParserError struct {
msg string
pos int
kind tokenKind
val string
// TODO: Add line
}
func (err *ParserError) Error() string {
return fmt.Sprintf("%s: \"%s\" (%s) (at col %d)", err.msg, err.val, err.kind.ToString(), err.pos+1)
}
func newParserError(msg string, token *token) *ParserError {
return &ParserError{
msg: msg,
pos: token.pos,
kind: token.kind,
val: token.val,
}
}
// Verifies that current token is of the specified `kind`,
// returns it, and reads in the next token
func (parser *Parser) match(kind tokenKind) (*token, error) {
currToken := parser.currToken
// If the current token is not a match, return the token for
// error reporting purposes. Do not consume the token.
if currToken.kind != kind {
return currToken, fmt.Errorf(`Unexpected token - found: "%s", expected: "%s"`,
currToken.kind.ToString(), kind.ToString())
}
token, err := parser.lexer.nextToken()
if err != nil {
return nil, err
}
parser.currToken = token
return currToken, nil
}
func (parser *Parser) expr() (*parseNode, error) {
var node *parseNode
var err error
for {
token := parser.currToken
// TODO(aksiksi): Handle the case of a non-logical expression that follows
// an expression or cond
switch token.kind {
// Nested expression
case tokenLparen:
_, err = parser.match(tokenLparen)
if err != nil {
return nil, err
}
// Parse the internal expression and return the resulting node
node, err = parser.expr()
if err != nil {
return nil, err
}
token, err = parser.match(tokenRparen)
if err != nil {
return nil, err
}
// Conditional expression
case tokenIdent:
node, err = parser.cond()
if err != nil {
return nil, err
}
// Logical/binary expression
case tokenAnd, tokenOr:
// Logical expresion with no preceding expression is invalid
if node == nil {
return nil, fmt.Errorf("Unexpected logical operator at %d: %s", token.pos, token.kind.ToString())
}
op, err := parser.logical()
if err != nil {
return nil, err
}
newNode, err := parser.expr()
if err != nil {
return nil, err
}
node = &parseNode{
kind: nodeLogical,
op: op.kind,
rule: nil,
left: node,
right: newNode,
}
default:
return node, nil
}
parser.rule.numNodes++
}
}
func (parser *Parser) cond() (*parseNode, error) {
ident, err := parser.ident()
if err != nil {
return nil, err
}
op, err1 := parser.op()
if err1 != nil {
return nil, err1
}
literal, err2 := parser.literal()
if err2 != nil {
return nil, err2
}
// Build the rule
rule := &RuleTweet{}
switch ident.val {
case "age":
if literal.kind != tokenAge {
return nil, newParserError("Invalid literal for \"age\"", literal)
}
time, err3 := convertAgeToTime(literal.val)
if err3 != nil {
return nil, newParserError("Invalid format for \"age\"", literal)
}
switch op.kind {
case tokenGt, tokenGte:
rule.Before = time
case tokenLt, tokenLte:
rule.After = time
default:
return nil, newParserError("Invalid operator for \"age\"", op)
}
case "text":
if literal.kind != tokenString {
return nil, newParserError("Invalid literal for \"text\"", literal)
}
switch op.kind {
case tokenIn, tokenNotIn:
// Gotcha: the literal contains quotes - remove them before building the regexp
pat := strings.Replace(literal.val, "\"", "", 2)
rule.Match = regexp.MustCompile(pat)
rule.IsNegativeMatch = (op.kind == tokenNotIn)
default:
return nil, newParserError("Invalid operator for \"text\"", op)
}
case "created":
if literal.kind != tokenTime {
return nil, newParserError("Invalid literal for \"created\"", literal)
}
time, err4 := time.Parse(timeLayout, literal.val)
if err4 != nil {
return nil, newParserError("Invalid time format for \"created\"", literal)
}
switch op.kind {
case tokenGt, tokenGte:
rule.Before = time
case tokenLt, tokenLte:
rule.After = time
default:
return nil, newParserError("Invalid operator for \"created\"", op)
}
case "likes":
if literal.kind != tokenNumber {
return nil, newParserError("Invalid literal for \"likes\"", literal)
}
num, err := strconv.Atoi(literal.val)
if err != nil {
return nil, newParserError("Invalid number for \"likes\"", literal)
}
rule.Likes = num
switch op.kind {
case tokenGt:
rule.LikesComparator = comparatorGt
case tokenGte:
rule.LikesComparator = comparatorGte
case tokenLt:
rule.LikesComparator = comparatorLt
case tokenLte:
rule.LikesComparator = comparatorLte
case tokenEq:
rule.LikesComparator = comparatorEq
case tokenNeq:
rule.LikesComparator = comparatorNeq
default:
return nil, newParserError("Invalid operator for \"likes\"", op)
}
case "retweets":
if literal.kind != tokenNumber {
return nil, newParserError("Invalid literal for \"retweets\"", literal)
}
num, err := strconv.Atoi(literal.val)
if err != nil {
return nil, newParserError("Invalid number for \"retweets\"", literal)
}
rule.Retweets = num
switch op.kind {
case tokenGt:
rule.RetweetsComparator = comparatorGt
case tokenGte:
rule.RetweetsComparator = comparatorGte
case tokenLt:
rule.RetweetsComparator = comparatorLt
case tokenLte:
rule.RetweetsComparator = comparatorLte
case tokenEq:
rule.RetweetsComparator = comparatorEq
case tokenNeq:
rule.RetweetsComparator = comparatorNeq
default:
return nil, newParserError("Invalid operator for \"retweets\"", op)
}
default:
return nil, newParserError("Invalid identifier", ident)
}
node := &parseNode{
kind: nodeCond,
rule: rule,
op: op.kind,
}
return node, nil
}
func (parser *Parser) ident() (*token, error) {
token, err := parser.match(tokenIdent)
if err != nil {
return nil, err
}
return token, nil
}
func (parser *Parser) logical() (*token, error) {
token := parser.currToken
switch token.kind {
case tokenAnd, tokenOr:
token, err := parser.match(token.kind)
if err != nil {
return nil, err
}
return token, nil
default:
return nil, newParserError("Invalid operator for logical expression", token)
}
}
func (parser *Parser) op() (*token, error) {
token := parser.currToken
switch token.kind {
case tokenLt, tokenLte, tokenGt, tokenGte, tokenEq, tokenNeq, tokenIn, tokenNotIn:
token, err := parser.match(parser.currToken.kind)
if err != nil {
return nil, err
}
return token, nil
default:
return nil, newParserError("Invalid comparison operator", token)
}
}
func (parser *Parser) literal() (*token, error) {
token := parser.currToken
switch token.kind {
case tokenString, tokenNumber, tokenAge, tokenTime:
token, err := parser.match(token.kind)
if err != nil {
return nil, err
}
return token, nil
default:
return nil, newParserError("Invalid literal", token)
}
}
func toStringHelper(p *parseNode, depth int, output *strings.Builder) {
if p == nil {
return
}
s := fmt.Sprintf("depth = %d, %s\n", depth, p)
output.WriteString(s)
toStringHelper(p.left, depth+1, output)
toStringHelper(p.right, depth+1, output)
}
// ToString walks the parse tree and outputs it in string form
func (rule *ParsedRule) ToString() string {
var output strings.Builder
toStringHelper(rule.root, 0, &output)
return output.String()
}
// NewParser builds a new Parser from the input
func NewParser(input string) *Parser {
lexer := newLexer(Tokens, input)
parser := &Parser{
lexer: lexer,
rule: &ParsedRule{},
}
return parser
}
// Reset this Parser to a clean state with the provided input
func (parser *Parser) Reset(input string) {
parser.lexer.Reset()
parser.rule = &ParsedRule{}
}
// Parse is the entry point for parser
func (parser *Parser) Parse() (*ParsedRule, error) {
// Prior to parsing a rule, check for unbalanced parens
err := checkUnbalancedParens(parser.lexer.input)
if err != nil {
return nil, err
}
token, err := parser.lexer.nextToken()
if err != nil {
return nil, err
}
parser.currToken = token
node, err := parser.expr()
// Set the root to the returned root
parser.rule.root = node
return parser.rule, err
}
// Parse is the entry point to the rule parser infra.
// Users of the library should only be using this function.
func Parse(input string) (*ParsedRule, error) {
parser := NewParser(input)
rule, err := parser.Parse()
if err != nil {
return nil, err
}
return rule, nil
}
| {
switch node.kind {
case nodeCond:
return tweet.IsMatch(node.rule)
case nodeLogical:
left := evalInternal(tweet, node.left)
right := evalInternal(tweet, node.right)
switch node.op {
case tokenAnd:
return left && right
case tokenOr:
return left || right
default:
panic(fmt.Sprintf("Unexpected logical op: %d\n", node.op))
}
default:
panic(fmt.Sprintf("Unexpected node type: %d", node.kind))
}
} | identifier_body |
parser.go | package histweet
import (
"fmt"
"regexp"
"strconv"
"strings"
"time"
)
const (
timeLayout = "02-Jan-2006"
)
// Tokens for terminals of the Twitter rule parser grammar
//
// All token regular expressions _must_ start with ^ to ensure
// that the match is computed from the current position in the
// stream.
var Tokens = map[tokenKind]string{
tokenIdent: "^[a-zA-Z_]+",
tokenNumber: "^[0-9]+",
tokenString: `^"[^\"]*"`,
tokenAge: `^\s*([0-9]+[ymd])?([0-9]+[ymd])?([0-9]+[ymd])`,
tokenTime: `^\d\d-\w\w\w-\d\d\d\d`,
tokenLparen: `^\(`,
tokenRparen: `^\)`,
tokenOr: `^\|\|`,
tokenAnd: "^&&",
tokenGte: "^>=",
tokenGt: "^>",
tokenLte: "^<=",
tokenLt: "^<",
tokenEq: "^==",
tokenNeq: "^!=",
tokenIn: "^~",
tokenNotIn: "^!~",
}
type nodeKind int
// Types of parser nodes
const (
nodeCond nodeKind = iota
nodeLogical
)
// parseNode represents a single node in the parse tree.
//
// Each node has a kind, which is one of: "expr", "logical", or "cond".
// Logical nodes indicate that the node's two children are connected by a
// logical operation (&& or ||). Expr nodes indicate one or more expressions or
// conditions, tied by logical operators.
//
// If the node is a condition (cond) node, the rule field will contains the logic
// required to evaluate a match for a given tweet.
//
// Parsers can only be used once; to re-use a parser, make sure to call the
// Reset() method.
type parseNode struct {
kind nodeKind
op tokenKind
rule *RuleTweet
left *parseNode
right *parseNode
}
func (node *parseNode) String() string {
return fmt.Sprintf("Kind: %d, Op: %d, Rule: %+v", node.kind, node.op, node.rule)
}
// ParsedRule represents a single parsed Rule as a tree of parseNodes.
type ParsedRule struct {
root *parseNode
numNodes int
}
func evalInternal(tweet *Tweet, node *parseNode) bool {
switch node.kind {
case nodeCond:
return tweet.IsMatch(node.rule)
case nodeLogical:
left := evalInternal(tweet, node.left)
right := evalInternal(tweet, node.right)
switch node.op {
case tokenAnd:
return left && right
case tokenOr:
return left || right
default:
panic(fmt.Sprintf("Unexpected logical op: %d\n", node.op))
}
default:
panic(fmt.Sprintf("Unexpected node type: %d", node.kind))
}
}
// Eval walks the parse tree and evaluates each condition against
// the given Tweet. Returns true if the Tweet matches all of the rules.
func (rule *ParsedRule) Eval(tweet *Tweet) bool {
return evalInternal(tweet, rule.root)
}
// Parser is a simple parser for tweet deletion rule strings.
//
// Examples:
//
// - age > 3d
// - age > 10m3d || likes == 0
// - (likes > 10 && retweets > 3) || (text ~ "hello, world!")
// - retweets >= 3 && time <= "10 May 2020"
//
// Grammar:
//
// Expr <- ( Expr ) | Cond [Logical Expr]?
// Cond <- Ident Op Literal
// Logical <- Or | And
// Op <- Gt | Gte | Lt | Lte | Eq | Neq | In | NotIn
// Literal <- Number | String | Age | Time
//
// Ident := [A-Za-z0-9_]+
// Number := [0-9]+
// String := " [^"]* "
// Age := ^\s*([0-9]+[ymd])?([0-9]+[ymd])?([0-9]+[ymd])
// Time := \d\d-\w\w\w-\d\d\d\d
// Lparen := (
// Rparen := )
// Or := ||
// And := &&
// Gt := >
// Gte := >=
// Lt := <
// Lte := <=
// Eq := ==
// Neq := !=
// In := ~
// NotIn := !~
type Parser struct {
lexer *lexer
// Pointer to the current token
currToken *token
| }
// ParserError represents errors hit during rule parsing
type ParserError struct {
msg string
pos int
kind tokenKind
val string
// TODO: Add line
}
func (err *ParserError) Error() string {
return fmt.Sprintf("%s: \"%s\" (%s) (at col %d)", err.msg, err.val, err.kind.ToString(), err.pos+1)
}
func newParserError(msg string, token *token) *ParserError {
return &ParserError{
msg: msg,
pos: token.pos,
kind: token.kind,
val: token.val,
}
}
// Verifies that current token is of the specified `kind`,
// returns it, and reads in the next token
func (parser *Parser) match(kind tokenKind) (*token, error) {
currToken := parser.currToken
// If the current token is not a match, return the token for
// error reporting purposes. Do not consume the token.
if currToken.kind != kind {
return currToken, fmt.Errorf(`Unexpected token - found: "%s", expected: "%s"`,
currToken.kind.ToString(), kind.ToString())
}
token, err := parser.lexer.nextToken()
if err != nil {
return nil, err
}
parser.currToken = token
return currToken, nil
}
func (parser *Parser) expr() (*parseNode, error) {
var node *parseNode
var err error
for {
token := parser.currToken
// TODO(aksiksi): Handle the case of a non-logical expression that follows
// an expression or cond
switch token.kind {
// Nested expression
case tokenLparen:
_, err = parser.match(tokenLparen)
if err != nil {
return nil, err
}
// Parse the internal expression and return the resulting node
node, err = parser.expr()
if err != nil {
return nil, err
}
token, err = parser.match(tokenRparen)
if err != nil {
return nil, err
}
// Conditional expression
case tokenIdent:
node, err = parser.cond()
if err != nil {
return nil, err
}
// Logical/binary expression
case tokenAnd, tokenOr:
// Logical expresion with no preceding expression is invalid
if node == nil {
return nil, fmt.Errorf("Unexpected logical operator at %d: %s", token.pos, token.kind.ToString())
}
op, err := parser.logical()
if err != nil {
return nil, err
}
newNode, err := parser.expr()
if err != nil {
return nil, err
}
node = &parseNode{
kind: nodeLogical,
op: op.kind,
rule: nil,
left: node,
right: newNode,
}
default:
return node, nil
}
parser.rule.numNodes++
}
}
func (parser *Parser) cond() (*parseNode, error) {
ident, err := parser.ident()
if err != nil {
return nil, err
}
op, err1 := parser.op()
if err1 != nil {
return nil, err1
}
literal, err2 := parser.literal()
if err2 != nil {
return nil, err2
}
// Build the rule
rule := &RuleTweet{}
switch ident.val {
case "age":
if literal.kind != tokenAge {
return nil, newParserError("Invalid literal for \"age\"", literal)
}
time, err3 := convertAgeToTime(literal.val)
if err3 != nil {
return nil, newParserError("Invalid format for \"age\"", literal)
}
switch op.kind {
case tokenGt, tokenGte:
rule.Before = time
case tokenLt, tokenLte:
rule.After = time
default:
return nil, newParserError("Invalid operator for \"age\"", op)
}
case "text":
if literal.kind != tokenString {
return nil, newParserError("Invalid literal for \"text\"", literal)
}
switch op.kind {
case tokenIn, tokenNotIn:
// Gotcha: the literal contains quotes - remove them before building the regexp
pat := strings.Replace(literal.val, "\"", "", 2)
rule.Match = regexp.MustCompile(pat)
rule.IsNegativeMatch = (op.kind == tokenNotIn)
default:
return nil, newParserError("Invalid operator for \"text\"", op)
}
case "created":
if literal.kind != tokenTime {
return nil, newParserError("Invalid literal for \"created\"", literal)
}
time, err4 := time.Parse(timeLayout, literal.val)
if err4 != nil {
return nil, newParserError("Invalid time format for \"created\"", literal)
}
switch op.kind {
case tokenGt, tokenGte:
rule.Before = time
case tokenLt, tokenLte:
rule.After = time
default:
return nil, newParserError("Invalid operator for \"created\"", op)
}
case "likes":
if literal.kind != tokenNumber {
return nil, newParserError("Invalid literal for \"likes\"", literal)
}
num, err := strconv.Atoi(literal.val)
if err != nil {
return nil, newParserError("Invalid number for \"likes\"", literal)
}
rule.Likes = num
switch op.kind {
case tokenGt:
rule.LikesComparator = comparatorGt
case tokenGte:
rule.LikesComparator = comparatorGte
case tokenLt:
rule.LikesComparator = comparatorLt
case tokenLte:
rule.LikesComparator = comparatorLte
case tokenEq:
rule.LikesComparator = comparatorEq
case tokenNeq:
rule.LikesComparator = comparatorNeq
default:
return nil, newParserError("Invalid operator for \"likes\"", op)
}
case "retweets":
if literal.kind != tokenNumber {
return nil, newParserError("Invalid literal for \"retweets\"", literal)
}
num, err := strconv.Atoi(literal.val)
if err != nil {
return nil, newParserError("Invalid number for \"retweets\"", literal)
}
rule.Retweets = num
switch op.kind {
case tokenGt:
rule.RetweetsComparator = comparatorGt
case tokenGte:
rule.RetweetsComparator = comparatorGte
case tokenLt:
rule.RetweetsComparator = comparatorLt
case tokenLte:
rule.RetweetsComparator = comparatorLte
case tokenEq:
rule.RetweetsComparator = comparatorEq
case tokenNeq:
rule.RetweetsComparator = comparatorNeq
default:
return nil, newParserError("Invalid operator for \"retweets\"", op)
}
default:
return nil, newParserError("Invalid identifier", ident)
}
node := &parseNode{
kind: nodeCond,
rule: rule,
op: op.kind,
}
return node, nil
}
func (parser *Parser) ident() (*token, error) {
token, err := parser.match(tokenIdent)
if err != nil {
return nil, err
}
return token, nil
}
func (parser *Parser) logical() (*token, error) {
token := parser.currToken
switch token.kind {
case tokenAnd, tokenOr:
token, err := parser.match(token.kind)
if err != nil {
return nil, err
}
return token, nil
default:
return nil, newParserError("Invalid operator for logical expression", token)
}
}
func (parser *Parser) op() (*token, error) {
token := parser.currToken
switch token.kind {
case tokenLt, tokenLte, tokenGt, tokenGte, tokenEq, tokenNeq, tokenIn, tokenNotIn:
token, err := parser.match(parser.currToken.kind)
if err != nil {
return nil, err
}
return token, nil
default:
return nil, newParserError("Invalid comparison operator", token)
}
}
func (parser *Parser) literal() (*token, error) {
token := parser.currToken
switch token.kind {
case tokenString, tokenNumber, tokenAge, tokenTime:
token, err := parser.match(token.kind)
if err != nil {
return nil, err
}
return token, nil
default:
return nil, newParserError("Invalid literal", token)
}
}
func toStringHelper(p *parseNode, depth int, output *strings.Builder) {
if p == nil {
return
}
s := fmt.Sprintf("depth = %d, %s\n", depth, p)
output.WriteString(s)
toStringHelper(p.left, depth+1, output)
toStringHelper(p.right, depth+1, output)
}
// ToString walks the parse tree and outputs it in string form
func (rule *ParsedRule) ToString() string {
var output strings.Builder
toStringHelper(rule.root, 0, &output)
return output.String()
}
// NewParser builds a new Parser from the input
func NewParser(input string) *Parser {
lexer := newLexer(Tokens, input)
parser := &Parser{
lexer: lexer,
rule: &ParsedRule{},
}
return parser
}
// Reset this Parser to a clean state with the provided input
func (parser *Parser) Reset(input string) {
parser.lexer.Reset()
parser.rule = &ParsedRule{}
}
// Parse is the entry point for parser
func (parser *Parser) Parse() (*ParsedRule, error) {
// Prior to parsing a rule, check for unbalanced parens
err := checkUnbalancedParens(parser.lexer.input)
if err != nil {
return nil, err
}
token, err := parser.lexer.nextToken()
if err != nil {
return nil, err
}
parser.currToken = token
node, err := parser.expr()
// Set the root to the returned root
parser.rule.root = node
return parser.rule, err
}
// Parse is the entry point to the rule parser infra.
// Users of the library should only be using this function.
func Parse(input string) (*ParsedRule, error) {
parser := NewParser(input)
rule, err := parser.Parse()
if err != nil {
return nil, err
}
return rule, nil
} | // Tree of parse nodes
rule *ParsedRule | random_line_split |
all_13.js | var searchData=
[
['table',['table',['../table_8c.html#a90098db02448ef8e3de8c811dcc37522',1,'table.c']]],
['table_2ec',['table.c',['../table_8c.html',1,'']]],
['table_2eh',['table.h',['../table_8h.html',1,'']]],
['table_5fatk_5faccept',['TABLE_ATK_ACCEPT',['../table_8h.html#a2670187e338d8b1b08f699ceb5a7e90c',1,'table.h']]],
['table_5fatk_5faccept_5flng',['TABLE_ATK_ACCEPT_LNG',['../table_8h.html#abe6c60026b1f1980e83e5759b5246c5e',1,'table.h']]],
['table_5fatk_5fchunked',['TABLE_ATK_CHUNKED',['../table_8h.html#a89772e3bed797cb16a658f935bc8c37b',1,'table.h']]],
['table_5fatk_5fcloudflare_5fnginx',['TABLE_ATK_CLOUDFLARE_NGINX',['../table_8h.html#a260fbfdad3b4d569ea5bf904538e0628',1,'table.h']]],
['table_5fatk_5fconnection_5fhdr',['TABLE_ATK_CONNECTION_HDR',['../table_8h.html#afdae87bf33d9c2f2407b37fab6e91628',1,'table.h']]],
['table_5fatk_5fcontent_5flength_5fhdr',['TABLE_ATK_CONTENT_LENGTH_HDR',['../table_8h.html#a98de50549fdb839f2ba413f44e0d3e6a',1,'table.h']]],
['table_5fatk_5fcontent_5ftype',['TABLE_ATK_CONTENT_TYPE',['../table_8h.html#a1f919690118832e6a9c5680ca6ab8686',1,'table.h']]],
['table_5fatk_5fdosarrest',['TABLE_ATK_DOSARREST',['../table_8h.html#a3e6171ef594c03bf2526e8d43c9fbdb3',1,'table.h']]],
['table_5fatk_5fkeep_5falive',['TABLE_ATK_KEEP_ALIVE',['../table_8h.html#af12a034b04caa3e4ac948b0ef4761da6',1,'table.h']]],
['table_5fatk_5fkeep_5falive_5fhdr',['TABLE_ATK_KEEP_ALIVE_HDR',['../table_8h.html#a1665384941722febe6d2a8b1e58655be',1,'table.h']]],
['table_5fatk_5flocation_5fhdr',['TABLE_ATK_LOCATION_HDR',['../table_8h.html#a0fcf8e231e646aaed0d7fcb495da1935',1,'table.h']]],
['table_5fatk_5fnserv',['TABLE_ATK_NSERV',['../table_8h.html#ae618e3e3e5bb6a8224428fb0eb024c50',1,'table.h']]],
['table_5fatk_5frefresh_5fhdr',['TABLE_ATK_REFRESH_HDR',['../table_8h.html#a56932ae77dd79f16be9f28341dfd438f',1,'table.h']]],
['table_5fatk_5fresolver',['TABLE_ATK_RESOLVER',['../table_8h.html#abb4c0b3796ea4bde2bf4f25538cc3f5e',1,'table.h']]],
['table_5fatk_5fset_5fcookie',['TABLE_ATK_SET_COOKIE',['../table_8h.html#aa5dbdc8879922e71d9a920cc8bc91657',1,'table.h']]],
['table_5fatk_5fset_5fcookie_5fhdr',['TABLE_ATK_SET_COOKIE_HDR',['../table_8h.html#a31f1bf8dcf162592001058f12362eb55',1,'table.h']]],
['table_5fatk_5ftransfer_5fencoding_5fhdr',['TABLE_ATK_TRANSFER_ENCODING_HDR',['../table_8h.html#a6bb03316c5d32d6053734ac6ceec2d82',1,'table.h']]],
['table_5fatk_5fvse',['TABLE_ATK_VSE',['../table_8h.html#a2b2b737a21de0ee9d38eda3b2771d355',1,'table.h']]],
['table_5fcnc_5fdomain',['TABLE_CNC_DOMAIN',['../table_8h.html#a2f99458af0cd9449b6d8db50dc047f7c',1,'table.h']]],
['table_5fcnc_5fport',['TABLE_CNC_PORT',['../table_8h.html#a01ffacd2c4e44391a63f204bafbd1343',1,'table.h']]],
['table_5fexec_5fsuccess',['TABLE_EXEC_SUCCESS',['../table_8h.html#aad0424e600948a4dc855c3e7d33cf6a7',1,'table.h']]],
['table_5fhttp_5ffive',['TABLE_HTTP_FIVE',['../table_8h.html#ac1a27c372d2f9a9c055a1bc0196c8764',1,'table.h']]],
['table_5fhttp_5ffour',['TABLE_HTTP_FOUR',['../table_8h.html#aa990a86d534b2e4181148de832c921c5',1,'table.h']]],
['table_5fhttp_5fone',['TABLE_HTTP_ONE',['../table_8h.html#ab6dfc5137f16deb497a130057eb69830',1,'table.h']]],
['table_5fhttp_5fthree',['TABLE_HTTP_THREE',['../table_8h.html#af1d5bc3f0c5007adc2bb968ab21fda6c',1,'table.h']]],
['table_5fhttp_5ftwo',['TABLE_HTTP_TWO',['../table_8h.html#a99cbff873fe834c6ee8c966fd88c6bdc',1,'table.h']]],
['table_5finit',['table_init',['../table_8c.html#a79eb587b0c1d1a11eb5e75134ca01261',1,'table_init(void): table.c'],['../table_8h.html#a79eb587b0c1d1a11eb5e75134ca01261',1,'table_init(void): table.c']]],
['table_5fkey',['table_key',['../table_8c.html#afaef6e8f1a3f106fc5d74bd5f48fa3dd',1,'table.c']]],
['table_5fkiller_5fanime',['TABLE_KILLER_ANIME',['../table_8h.html#a3e70100afacdeb9574b9c019e528be40',1,'table.h']]],
['table_5fkiller_5fdeleted',['TABLE_KILLER_DELETED',['../table_8h.html#a6e6330e2db4184e52ac16bafd3f255cb',1,'table.h']]],
['table_5fkiller_5fexe',['TABLE_KILLER_EXE',['../table_8h.html#a2bdc9d197e52b92ed4a6f76e021080e8',1,'table.h']]],
['table_5fkiller_5ffd',['TABLE_KILLER_FD',['../table_8h.html#a7f1a329c7e10e06e16395b7e6e4db2c4',1,'table.h']]],
['table_5fkiller_5fproc',['TABLE_KILLER_PROC',['../table_8h.html#a9695f2b8287ebe5d3cded5348e2f0008',1,'table.h']]],
['table_5fkiller_5fsafe',['TABLE_KILLER_SAFE',['../table_8h.html#a69a175119e08f13e47c51aa3235e77aa',1,'table.h']]],
['table_5fkiller_5fstatus',['TABLE_KILLER_STATUS',['../table_8h.html#a6fc91c8fe5c6fbada7b70c474499bf91',1,'table.h']]],
['table_5flock_5fval',['table_lock_val',['../table_8c.html#a0c03e9a9ebea1c0e6b98b969b56825b2',1,'table_lock_val(uint8_t id): table.c'],['../table_8h.html#a399eee42b057585a6c4ba6efc19c277c',1,'table_lock_val(uint8_t): table.c']]],
['table_5fmax_5fkeys',['TABLE_MAX_KEYS',['../table_8h.html#a434d46cfbdf47b6e1f14d83c29ac4b32',1,'table.h']]],
['table_5fmem_5fqbot',['TABLE_MEM_QBOT',['../table_8h.html#a2fab0a4fd817beaaf5aaa64d12570e41',1,'table.h']]],
['table_5fmem_5fqbot2',['TABLE_MEM_QBOT2',['../table_8h.html#a84a4a40867454fe210ae41b54f714ba3',1,'table.h']]],
['table_5fmem_5fqbot3',['TABLE_MEM_QBOT3',['../table_8h.html#a986eb7cd7fffbcff55e548a0d15711ae',1,'table.h']]],
['table_5fmem_5fremaiten',['TABLE_MEM_REMAITEN',['../table_8h.html#ad354533a1b16515e68349532d17fe5f6',1,'table.h']]],
['table_5fmem_5fupx',['TABLE_MEM_UPX',['../table_8h.html#a98b99a3ec29f08ad36d1829a3946d07b',1,'table.h']]],
['table_5fmem_5fzollard',['TABLE_MEM_ZOLLARD',['../table_8h.html#a11dfa91d25c285d4426a07f3fbae19d9',1,'table.h']]],
['table_5fprocess_5fargv',['TABLE_PROCESS_ARGV',['../table_8h.html#abbd6d1bac086452cf5d2798533849e70',1,'table.h']]],
['table_5fretrieve_5fval',['table_retrieve_val',['../table_8c.html#a960bec3b47b287c42b06c26eb370ed67',1,'table_retrieve_val(int id, int *len): table.c'],['../table_8h.html#a66ca9372f9f5d6d89326044287496af6',1,'table_retrieve_val(int, int *): table.c']]],
['table_5fscan_5fcb_5fdomain',['TABLE_SCAN_CB_DOMAIN',['../table_8h.html#a4e6214d35394b8de3a0eb9ed3f4d74a5',1,'table.h']]],
['table_5fscan_5fcb_5fport',['TABLE_SCAN_CB_PORT',['../table_8h.html#a61681f81c214ba8539ebd133b4eaaa35',1,'table.h']]],
['table_5fscan_5fenable',['TABLE_SCAN_ENABLE',['../table_8h.html#a577985acedbf4e923d0fb33980a3ed0d',1,'table.h']]],
['table_5fscan_5fkill_5f9',['TABLE_SCAN_KILL_9',['../table_8h.html#a83eb24725e4d17e9d86983db8f952cfd',1,'table.h']]],
['table_5fscan_5fncorrect',['TABLE_SCAN_NCORRECT',['../table_8h.html#ae37c352aff727690678d80789e9153a6',1,'table.h']]],
['table_5fscan_5fps',['TABLE_SCAN_PS',['../table_8h.html#a0fe8afccf2d4bf1cbfc53b271491cbb7',1,'table.h']]],
['table_5fscan_5fquery',['TABLE_SCAN_QUERY',['../table_8h.html#a573abc111267f05189a98e4745fc8e60',1,'table.h']]],
['table_5fscan_5fresp',['TABLE_SCAN_RESP',['../table_8h.html#a79e035097ac55fe0a2533ca035b07102',1,'table.h']]],
['table_5fscan_5fsh',['TABLE_SCAN_SH',['../table_8h.html#a580e3d9937b4ebf6cf286fd2cd0820a7',1,'table.h']]],
['table_5fscan_5fshell',['TABLE_SCAN_SHELL',['../table_8h.html#a1ef90a6398b8126ff3a500acd6d6b566',1,'table.h']]],
['table_5fscan_5fsystem',['TABLE_SCAN_SYSTEM',['../table_8h.html#a778dd0ebf9da419861e8352b5c13046c',1,'table.h']]],
['table_5funlock_5fval',['table_unlock_val',['../table_8c.html#aa5a64b3cddd9889fca3114b5a87c402e',1,'table_unlock_val(uint8_t id): table.c'],['../table_8h.html#a945ccf4140ff645cd4b7b2db917a12de',1,'table_unlock_val(uint8_t): table.c']]],
['table_5fvalue',['table_value',['../structtable__value.html',1,'']]],
['telnet_5farm_5fsubtype',['TELNET_ARM_SUBTYPE',['../structconnection.html#a06fc87d81c62e9abb8790b6e5713c55ba821472fcea41f8e6cedc4c9153123851',1,'connection']]],
['telnet_5fcheck_5flogin',['TELNET_CHECK_LOGIN',['../structconnection.html#a06fc87d81c62e9abb8790b6e5713c55baf592f402b74ddbe8d0ef5f9c639a083e',1,'connection']]],
['telnet_5fcleanup',['TELNET_CLEANUP',['../structconnection.html#a06fc87d81c62e9abb8790b6e5713c55bae78cffc409b9ed638432c86ff3d2a3d5',1,'connection']]],
['telnet_5fclosed',['TELNET_CLOSED',['../structconnection.html#a06fc87d81c62e9abb8790b6e5713c55baa1b4bff41eac9b0771879a7445e2e625',1,'connection']]],
['telnet_5fconnecting',['TELNET_CONNECTING',['../structconnection.html#a06fc87d81c62e9abb8790b6e5713c55ba8998e127095176ae14edef8bde7299c9',1,'connection']]],
['telnet_5fcopy_5fecho',['TELNET_COPY_ECHO',['../structconnection.html#a06fc87d81c62e9abb8790b6e5713c55ba717896f9bee5fc1d743b4644031ee815',1,'connection']]],
['telnet_5fdetect_5farch',['TELNET_DETECT_ARCH',['../structconnection.html#a06fc87d81c62e9abb8790b6e5713c55ba752653bedd6abb27e63c5f22a85359bb',1,'connection']]],
['telnet_5finfo',['telnet_info',['../structtelnet__info.html',1,'']]],
['telnet_5finfo_2ec',['telnet_info.c',['../telnet__info_8c.html',1,'']]],
['telnet_5finfo_2eh',['telnet_info.h',['../telnet__info_8h.html',1,'']]],
['telnet_5finfo_5fnew',['telnet_info_new',['../telnet__info_8h.html#acce353c62f3a116e721646242182c337',1,'telnet_info_new(char *user, char *pass, char *arch, ipv4_t addr, port_t port, struct telnet_info *info): telnet_info.c'],['../telnet__info_8c.html#acce353c62f3a116e721646242182c337',1,'telnet_info_new(char *user, char *pass, char *arch, ipv4_t addr, port_t port, struct telnet_info *info): telnet_info.c']]],
['telnet_5finfo_5fparse',['telnet_info_parse',['../telnet__info_8h.html#ae0743363f8b8250176612170f00abe8f',1,'telnet_info_parse(char *str, struct telnet_info *out): telnet_info.c'],['../telnet__info_8c.html#ae0743363f8b8250176612170f00abe8f',1,'telnet_info_parse(char *str, struct telnet_info *out): telnet_info.c']]],
['telnet_5fparse_5fmounts',['TELNET_PARSE_MOUNTS',['../structconnection.html#a06fc87d81c62e9abb8790b6e5713c55bae48fd3d9abd51f1ea571568362f75d6e',1,'connection']]],
['telnet_5fparse_5fps',['TELNET_PARSE_PS',['../structconnection.html#a06fc87d81c62e9abb8790b6e5713c55ba8f2fbda0323cd96ef8613fba917824ce',1,'connection']]],
['telnet_5fpass_5fprompt',['TELNET_PASS_PROMPT',['../structconnection.html#a06fc87d81c62e9abb8790b6e5713c55ba069eb41c505a98e20f9b8165532a1b1e',1,'connection']]],
['telnet_5fread_5fiacs',['TELNET_READ_IACS',['../structconnection.html#a06fc87d81c62e9abb8790b6e5713c55ba8b8b1dabcf7f403ea393c6dab70b6c57',1,'connection']]],
['telnet_5fread_5fwriteable',['TELNET_READ_WRITEABLE',['../structconnection.html#a06fc87d81c62e9abb8790b6e5713c55bafbb752f7bdf2ba25f10aa223bcdb2612',1,'connection']]],
['telnet_5frun_5fbinary',['TELNET_RUN_BINARY',['../structconnection.html#a06fc87d81c62e9abb8790b6e5713c55ba8b7546ff9ea83cbca6f571cb2b610982',1,'connection']]],
['telnet_5fupload_5fecho',['TELNET_UPLOAD_ECHO',['../structconnection.html#a06fc87d81c62e9abb8790b6e5713c55ba9228ad22627e4d005e4f5b0d29cfeefc',1,'connection']]],
['telnet_5fupload_5fmethods',['TELNET_UPLOAD_METHODS',['../structconnection.html#a06fc87d81c62e9abb8790b6e5713c55baf7c3ce71da3ce76dea8983fe4953047d',1,'connection']]],
['telnet_5fupload_5ftftp',['TELNET_UPLOAD_TFTP',['../structconnection.html#a06fc87d81c62e9abb8790b6e5713c55bac22061615917ac32ad622371da5aebc3',1,'connection']]],
['telnet_5fupload_5fwget',['TELNET_UPLOAD_WGET',['../structconnection.html#a06fc87d81c62e9abb8790b6e5713c55babe5eda2fcf7fc14a2de30f126d52d0de',1,'connection']]],
['telnet_5fuser_5fprompt',['TELNET_USER_PROMPT',['../structconnection.html#a06fc87d81c62e9abb8790b6e5713c55ba2a51f1b5fa2595ce04dbdba371555378',1,'connection']]],
['telnet_5fverify_5flogin',['TELNET_VERIFY_LOGIN',['../structconnection.html#a06fc87d81c62e9abb8790b6e5713c55bae4406ee9adc88ace91975587f0c9774d',1,'connection']]],
['telnet_5fwaitpass_5fprompt',['TELNET_WAITPASS_PROMPT',['../structconnection.html#a06fc87d81c62e9abb8790b6e5713c55ba0cbee3610b4a9c9ccad087a9c85d1dd5',1,'connection']]],
['tftp_5fhost_5fip',['tftp_host_ip',['../structserver.html#a084b7a7e8a34f91c71fa99a1985f8e3a',1,'server']]],
['thread',['thread',['../structserver__worker.html#a01f75a9ad916f63a94e06a27635ba278',1,'server_worker']]],
['thread_5fid',['thread_id',['../structserver__worker.html#a2a8bb64b8b47a31561ece0650b3cdc6e',1,'server_worker']]],
['timed_5fout',['timed_out',['../single__load_8c.html#a9ae97c4e98713a8e03d2dc33d07248d5',1,'single_load.c']]],
['timeout',['timeout',['../structconnection.html#a7f1ad43d3bf79b40bc39dbb5a6c3a5ae',1,'connection']]],
['to_5fsend',['to_send',['../structattack__cfnull__state.html#a17ed86462464729ba8f66f8488aaf329',1,'attack_cfnull_state']]],
['to_5fthrd',['to_thrd',['../structserver.html#a672086e622555f69db447ee87ffb0dee',1,'server']]], | ['token_5fresponse',['TOKEN_RESPONSE',['../loader_2src_2headers_2includes_8h.html#ae23e7bab8d63782c87258cdf5c6b3efd',1,'includes.h']]],
['token_5fverify',['TOKEN_VERIFY',['../single__load_8c.html#aa2b7fda41691fd8bf8cdfb06ca803451',1,'single_load.c']]],
['total_5fechoes',['total_echoes',['../structserver.html#a5dc4c264e773d0bada48651a52c59c47',1,'server']]],
['total_5ffailures',['total_failures',['../structserver.html#a9876401c17a01e030d0d1c0cccb081b3',1,'server']]],
['total_5finput',['total_input',['../structserver.html#a6272db0d7f3ee3172f5908fc6c53f52c',1,'server']]],
['total_5flogins',['total_logins',['../structserver.html#a775d7dfa5b4752f9de7d4b1a4f848562',1,'server']]],
['total_5fsuccesses',['total_successes',['../structserver.html#a4077e0ba638cb87bfaa0412929cd8cf6',1,'server']]],
['total_5ftftps',['total_tftps',['../structserver.html#a366af7f3a5ea2f51a94717763cc60533',1,'server']]],
['total_5fwgets',['total_wgets',['../structserver.html#aba6bc4241f091f9fe65a221e47fcae4f',1,'server']]],
['tries',['tries',['../structscanner__connection.html#a5522305c1de4f07a6c6feb7275863f8f',1,'scanner_connection']]],
['true',['TRUE',['../loader_2src_2headers_2includes_8h.html#aa8cecfc5c5c054d2875c03e77b7be15d',1,'TRUE(): includes.h'],['../mirai_2bot_2includes_8h.html#aa8cecfc5c5c054d2875c03e77b7be15d',1,'TRUE(): includes.h']]],
['ttl',['ttl',['../structdns__resource.html#a48b9f3382e0929bbd75cda2bf2838126',1,'dns_resource::ttl()'],['../protocol_8h.html#a48b9f3382e0929bbd75cda2bf2838126',1,'ttl(): protocol.h']]],
['type',['type',['../structdns__resource.html#acb5cfd209ba75c853d03f701e7f91679',1,'dns_resource::type()'],['../protocol_8h.html#acb5cfd209ba75c853d03f701e7f91679',1,'type(): protocol.h']]]
]; | ['token',['TOKEN',['../single__load_8c.html#a5c3a83864bf5991d09aa5c2abb911bf0',1,'single_load.c']]],
['token_5fquery',['TOKEN_QUERY',['../loader_2src_2headers_2includes_8h.html#a5584596b6561bf1ef1c462d5032c6e12',1,'includes.h']]], | random_line_split |
battle.js | /**
* @license
* Copyright 2013 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @fileoverview Creates an pond for avatars to compete in.
* @author fraser@google.com (Neil Fraser)
*/
'use strict';
goog.provide('Pond.Battle');
goog.require('Blockly.utils.Coordinate');
goog.require('Blockly.utils.math');
goog.require('Pond.Avatar');
/**
* List of avatars in this battle.
* @type !Array<!Pond.Avatar>
*/
Pond.Battle.AVATARS = [];
/**
* Ordered list of avatar with the best avatar first.
* @type !Array<!Pond.Avatar>
*/
Pond.Battle.RANK = [];
/**
* List of events to be visualized.
* @type !Array<!Object>
*/
Pond.Battle.EVENTS = [];
/**
* List of missiles in flight.
* @type !Array<!Object>
*/
Pond.Battle.MISSILES = [];
/**
* Speed of game execution.
*/
Pond.Battle.GAME_FPS = 50;
/**
* Speed of avatar execution.
*/
Pond.Battle.STATEMENTS_PER_FRAME = 100;
/**
* Number of seconds it takes to reload the cannon.
*/
Pond.Battle.RELOAD_TIME = .5;
/**
* The avatar currently executing code.
* @type Pond.Avatar
*/
Pond.Battle.currentAvatar = null;
/**
* Speed of avatar movement.
*/
Pond.Battle.AVATAR_SPEED = 1;
/**
* Speed of missile movement.
*/
Pond.Battle.MISSILE_SPEED = 3;
/**
* Rate of acceleration.
*/
Pond.Battle.ACCELERATION = 5;
/**
* Center to center distance for avatars to collide.
*/
Pond.Battle.COLLISION_RADIUS = 5;
/**
* Damage from worst-case collision.
*/
Pond.Battle.COLLISION_DAMAGE = 3;
/**
* PID of executing task.
*/
Pond.Battle.pid = 0;
/**
* Time to end the battle.
* @private
*/
Pond.Battle.endTime_ = 0;
/**
* Number of ticks used in battle.
*/
Pond.Battle.ticks = 0;
/**
* Time limit of game (in milliseconds).
*/
Pond.Battle.TIME_LIMIT = 5 * 60 * 1000;
/**
* Callback function for end of game.
* @type Function
*/
Pond.Battle.doneCallback_ = null;
/**
* Stop and reset the battle.
*/
Pond.Battle.reset = function() {
clearTimeout(Pond.Battle.pid);
Pond.Battle.EVENTS.length = 0;
Pond.Battle.MISSILES.length = 0;
Pond.Battle.RANK.length = 0;
Pond.Battle.ticks = 0;
for (const avatar of Pond.Battle.AVATARS) {
avatar.reset();
}
};
/**
* Start the battle executing. Avatars should already be added.
* @param {Function} doneCallback Function to call when game ends.
*/
Pond.Battle.start = function(doneCallback) {
Pond.Battle.doneCallback_ = doneCallback;
Pond.Battle.endTime_ = Date.now() + Pond.Battle.TIME_LIMIT;
console.log('Starting battle with ' + Pond.Battle.AVATARS.length +
' avatars.');
for (const avatar of Pond.Battle.AVATARS) {
try {
avatar.initInterpreter();
} catch (e) {
console.log(avatar + ' fails to load: ' + e);
avatar.die();
}
}
Pond.Battle.update();
};
/**
* Update the avatars states.
*/
Pond.Battle.update = function() {
// Execute a few statements.
Pond.Battle.updateInterpreters_();
// Update state of all missiles.
Pond.Battle.updateMissiles_();
// Update state of all avatars.
Pond.Battle.updateAvatars_();
if (Pond.Battle.AVATARS.length <= Pond.Battle.RANK.length + 1) {
// Game over because there are less than two avatars.
// Schedule the game to end in a second.
Pond.Battle.endTime_ = Math.min(Pond.Battle.endTime_, Date.now() + 1000);
}
if (Date.now() > Pond.Battle.endTime_) {
// Timeout reached. End the game.
Pond.Battle.stop();
} else {
// Do it all again in a moment.
Pond.Battle.pid =
setTimeout(Pond.Battle.update, 1000 / Pond.Battle.GAME_FPS);
}
};
Pond.Battle.stop = function() {
// Add the survivors to the ranks based on their damage.
const survivors = [];
for (const avatar of Pond.Battle.AVATARS) {
if (!avatar.dead) {
survivors.push(avatar);
}
}
const survivorCount = survivors.length;
survivors.sort(function(a, b) {return a.damage - b.damage;});
while (survivors.length) {
Pond.Battle.RANK.unshift(survivors.pop());
}
// Fire any callback.
Pond.Battle.doneCallback_ && Pond.Battle.doneCallback_(survivorCount);
};
/**
* Update state of all missiles.
* @private
*/
Pond.Battle.updateMissiles_ = function() {
for (let i = Pond.Battle.MISSILES.length - 1; i >= 0; i--) {
const missile = Pond.Battle.MISSILES[i];
missile.progress += Pond.Battle.MISSILE_SPEED;
let maxDamage = 0;
if (missile.range - missile.progress < Pond.Battle.MISSILE_SPEED / 2) {
// Boom.
Pond.Battle.MISSILES.splice(i, 1);
// Damage any avatar in range.
for (const avatar of Pond.Battle.AVATARS) {
if (avatar.dead) {
continue;
}
const range = Blockly.utils.Coordinate.distance(avatar.loc, missile.endLoc);
const damage = (1 - range / 4) * 10;
if (damage > 0) {
avatar.addDamage(damage);
maxDamage = Math.max(maxDamage, damage);
}
}
Pond.Battle.EVENTS.push({'type': 'BOOM', 'damage': maxDamage,
'x': missile.endLoc.x, 'y': missile.endLoc.y});
}
}
};
/**
* Update state of all avatars.
* @private
*/
Pond.Battle.updateAvatars_ = function() {
for (const avatar of Pond.Battle.AVATARS) {
if (avatar.dead) {
continue;
}
// Accelerate or decelerate.
if (avatar.speed < avatar.desiredSpeed) {
avatar.speed = Math.min(avatar.speed + Pond.Battle.ACCELERATION,
avatar.desiredSpeed);
} else if (avatar.speed > avatar.desiredSpeed) {
avatar.speed = Math.max(avatar.speed - Pond.Battle.ACCELERATION,
avatar.desiredSpeed);
}
// Move.
if (avatar.speed > 0) {
const tuple = Pond.Battle.closestNeighbour(avatar);
const closestBefore = tuple[1];
const angleRadians = Blockly.utils.math.toRadians(avatar.degree);
const speed = avatar.speed / 100 * Pond.Battle.AVATAR_SPEED;
const dx = Math.cos(angleRadians) * speed;
const dy = Math.sin(angleRadians) * speed;
avatar.loc.x += dx;
avatar.loc.y += dy;
if (avatar.loc.x < 0 || avatar.loc.x > 100 ||
avatar.loc.y < 0 || avatar.loc.y > 100) {
// Collision with wall.
avatar.loc.x = Blockly.utils.math.clamp(avatar.loc.x, 0, 100);
avatar.loc.y = Blockly.utils.math.clamp(avatar.loc.y, 0, 100);
const damage = avatar.speed / 100 * Pond.Battle.COLLISION_DAMAGE;
avatar.addDamage(damage);
avatar.speed = 0;
avatar.desiredSpeed = 0;
Pond.Battle.EVENTS.push(
{'type': 'CRASH', 'avatar': avatar, 'damage': damage});
} else |
}
}
};
/**
* Let the avatars think.
* @private
*/
Pond.Battle.updateInterpreters_ = function() {
for (let i = 0; i < Pond.Battle.STATEMENTS_PER_FRAME; i++) {
Pond.Battle.ticks++;
for (const avatar of Pond.Battle.AVATARS) {
if (avatar.dead) {
continue;
}
Pond.Battle.currentAvatar = avatar;
try {
avatar.interpreter.step();
} catch (e) {
console.log(avatar + ' throws an error: ' + e);
avatar.die();
}
Pond.Battle.currentAvatar = null;
}
}
};
/**
* Inject the Pond API into a JavaScript interpreter.
* @param {!Interpreter} interpreter The JS-Interpreter.
* @param {!Interpreter.Object} globalObject Global object.
*/
Pond.Battle.initInterpreter = function(interpreter, globalObject) {
// API
let wrapper;
wrapper = function(value) {
// Restrict logging to just numbers so that the console doesn't fill up
// with 'problematic' messages when running 3rd party ducks.
console.log(Pond.Battle.currentAvatar.name + ' logs: ' + Number(value));
};
wrap('log');
wrapper = function(degree, resolution) {
return Pond.Battle.currentAvatar.scan(degree, resolution);
};
wrap('scan');
wrapper = function(degree, range) {
return Pond.Battle.currentAvatar.cannon(degree, range);
};
wrap('cannon');
wrapper = function(degree, speed) {
Pond.Battle.currentAvatar.drive(degree, speed);
};
wrap('drive');
wrap('swim');
wrapper = function() {
Pond.Battle.currentAvatar.stop();
};
wrap('stop');
wrapper = function() {
return Pond.Battle.currentAvatar.damage;
};
wrap('damage');
wrapper = function() {
return 100 - Pond.Battle.currentAvatar.damage;
};
wrap('health');
wrapper = function() {
return Pond.Battle.currentAvatar.speed;
};
wrap('speed');
wrapper = function() {
return Pond.Battle.currentAvatar.loc.x;
};
wrap('loc_x');
wrap('getX');
wrapper = function() {
return Pond.Battle.currentAvatar.loc.y;
};
wrap('loc_y');
wrap('getY');
function wrap(name) {
interpreter.setProperty(globalObject, name,
interpreter.createNativeFunction(wrapper, false));
}
const myMath = interpreter.getProperty(globalObject, 'Math');
if (myMath) {
wrapper = function(number) {
return Math.sin(Blockly.utils.math.toRadians(number));
};
wrapMath('sin_deg');
wrapper = function(number) {
return Math.cos(Blockly.utils.math.toRadians(number));
};
wrapMath('cos_deg');
wrapper = function(number) {
return Math.tan(Blockly.utils.math.toRadians(number));
};
wrapMath('tan_deg');
wrapper = function(number) {
return Blockly.utils.math.toDegrees(Math.asin(number));
};
wrapMath('asin_deg');
wrapper = function(number) {
return Blockly.utils.math.toDegrees(Math.acos(number));
};
wrapMath('acos_deg');
wrapper = function(number) {
return Blockly.utils.math.toDegrees(Math.atan(number));
};
wrapMath('atan_deg');
function wrapMath(name) {
interpreter.setProperty(myMath, name,
interpreter.createNativeFunction(wrapper, false));
}
}
};
/**
* Finds the distance between the given avatar and its nearest neighbour.
* @param {!Pond.Avatar} avatar The avatar to find distances from.
* @returns {!Array} Tuple of closest avatar and distance to that avatar.
*/
Pond.Battle.closestNeighbour = function(avatar) {
let closest = null;
let distance = Infinity;
for (const neighbour of Pond.Battle.AVATARS) {
if (!neighbour.dead && avatar !== neighbour) {
const thisDistance = Math.min(distance,
Blockly.utils.Coordinate.distance(avatar.loc, neighbour.loc));
if (thisDistance < distance) {
distance = thisDistance;
closest = neighbour;
}
}
}
return [closest, distance];
};
| {
const tuple = Pond.Battle.closestNeighbour(avatar);
const [neighbour, closestAfter] = tuple;
if (closestAfter < Pond.Battle.COLLISION_RADIUS &&
closestBefore > closestAfter) {
// Collision with another avatar.
avatar.loc.x -= dx;
avatar.loc.y -= dy;
const damage = Math.max(avatar.speed, neighbour.speed) / 100 *
Pond.Battle.COLLISION_DAMAGE;
avatar.addDamage(damage);
avatar.speed = 0;
avatar.desiredSpeed = 0;
neighbour.addDamage(damage);
neighbour.speed = 0;
neighbour.desiredSpeed = 0;
Pond.Battle.EVENTS.push(
{'type': 'CRASH', 'avatar': avatar, 'damage': damage},
{'type': 'CRASH', 'avatar': neighbour, 'damage': damage});
}
} | conditional_block |
battle.js | /**
* @license
* Copyright 2013 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @fileoverview Creates an pond for avatars to compete in.
* @author fraser@google.com (Neil Fraser)
*/
'use strict';
goog.provide('Pond.Battle');
goog.require('Blockly.utils.Coordinate');
goog.require('Blockly.utils.math');
goog.require('Pond.Avatar');
/**
* List of avatars in this battle.
* @type !Array<!Pond.Avatar>
*/
Pond.Battle.AVATARS = [];
/**
* Ordered list of avatar with the best avatar first.
* @type !Array<!Pond.Avatar>
*/
Pond.Battle.RANK = [];
/**
* List of events to be visualized.
* @type !Array<!Object>
*/
Pond.Battle.EVENTS = [];
/**
* List of missiles in flight.
* @type !Array<!Object>
*/
Pond.Battle.MISSILES = [];
/**
* Speed of game execution.
*/
Pond.Battle.GAME_FPS = 50;
/**
* Speed of avatar execution.
*/
Pond.Battle.STATEMENTS_PER_FRAME = 100;
/**
* Number of seconds it takes to reload the cannon.
*/
Pond.Battle.RELOAD_TIME = .5;
/**
* The avatar currently executing code.
* @type Pond.Avatar
*/
Pond.Battle.currentAvatar = null;
/**
* Speed of avatar movement.
*/
Pond.Battle.AVATAR_SPEED = 1;
/**
* Speed of missile movement.
*/
Pond.Battle.MISSILE_SPEED = 3;
/**
* Rate of acceleration.
*/
Pond.Battle.ACCELERATION = 5;
/**
* Center to center distance for avatars to collide.
*/
Pond.Battle.COLLISION_RADIUS = 5;
/**
* Damage from worst-case collision.
*/
Pond.Battle.COLLISION_DAMAGE = 3;
/**
* PID of executing task.
*/
Pond.Battle.pid = 0;
/**
* Time to end the battle.
* @private
*/
Pond.Battle.endTime_ = 0;
/**
* Number of ticks used in battle.
*/
Pond.Battle.ticks = 0;
/**
* Time limit of game (in milliseconds).
*/
Pond.Battle.TIME_LIMIT = 5 * 60 * 1000;
/**
* Callback function for end of game.
* @type Function
*/
Pond.Battle.doneCallback_ = null;
/**
* Stop and reset the battle.
*/
Pond.Battle.reset = function() {
clearTimeout(Pond.Battle.pid);
Pond.Battle.EVENTS.length = 0;
Pond.Battle.MISSILES.length = 0;
Pond.Battle.RANK.length = 0;
Pond.Battle.ticks = 0;
for (const avatar of Pond.Battle.AVATARS) {
avatar.reset();
}
};
/**
* Start the battle executing. Avatars should already be added.
* @param {Function} doneCallback Function to call when game ends.
*/
Pond.Battle.start = function(doneCallback) {
Pond.Battle.doneCallback_ = doneCallback;
Pond.Battle.endTime_ = Date.now() + Pond.Battle.TIME_LIMIT;
console.log('Starting battle with ' + Pond.Battle.AVATARS.length +
' avatars.');
for (const avatar of Pond.Battle.AVATARS) {
try {
avatar.initInterpreter();
} catch (e) {
console.log(avatar + ' fails to load: ' + e);
avatar.die();
}
}
Pond.Battle.update();
};
/**
* Update the avatars states.
*/
Pond.Battle.update = function() {
// Execute a few statements.
Pond.Battle.updateInterpreters_();
// Update state of all missiles.
Pond.Battle.updateMissiles_();
// Update state of all avatars.
Pond.Battle.updateAvatars_();
if (Pond.Battle.AVATARS.length <= Pond.Battle.RANK.length + 1) {
// Game over because there are less than two avatars.
// Schedule the game to end in a second.
Pond.Battle.endTime_ = Math.min(Pond.Battle.endTime_, Date.now() + 1000);
}
if (Date.now() > Pond.Battle.endTime_) {
// Timeout reached. End the game.
Pond.Battle.stop();
} else {
// Do it all again in a moment.
Pond.Battle.pid =
setTimeout(Pond.Battle.update, 1000 / Pond.Battle.GAME_FPS);
}
};
Pond.Battle.stop = function() {
// Add the survivors to the ranks based on their damage.
const survivors = [];
for (const avatar of Pond.Battle.AVATARS) {
if (!avatar.dead) {
survivors.push(avatar);
}
}
const survivorCount = survivors.length;
survivors.sort(function(a, b) {return a.damage - b.damage;});
while (survivors.length) {
Pond.Battle.RANK.unshift(survivors.pop());
}
// Fire any callback.
Pond.Battle.doneCallback_ && Pond.Battle.doneCallback_(survivorCount);
};
/**
* Update state of all missiles.
* @private
*/
Pond.Battle.updateMissiles_ = function() {
for (let i = Pond.Battle.MISSILES.length - 1; i >= 0; i--) {
const missile = Pond.Battle.MISSILES[i];
missile.progress += Pond.Battle.MISSILE_SPEED;
let maxDamage = 0;
if (missile.range - missile.progress < Pond.Battle.MISSILE_SPEED / 2) {
// Boom.
Pond.Battle.MISSILES.splice(i, 1);
// Damage any avatar in range.
for (const avatar of Pond.Battle.AVATARS) {
if (avatar.dead) {
continue;
}
const range = Blockly.utils.Coordinate.distance(avatar.loc, missile.endLoc);
const damage = (1 - range / 4) * 10;
if (damage > 0) {
avatar.addDamage(damage);
maxDamage = Math.max(maxDamage, damage);
}
}
Pond.Battle.EVENTS.push({'type': 'BOOM', 'damage': maxDamage,
'x': missile.endLoc.x, 'y': missile.endLoc.y});
}
}
};
/**
* Update state of all avatars.
* @private
*/
Pond.Battle.updateAvatars_ = function() {
for (const avatar of Pond.Battle.AVATARS) {
if (avatar.dead) {
continue;
}
// Accelerate or decelerate.
if (avatar.speed < avatar.desiredSpeed) {
avatar.speed = Math.min(avatar.speed + Pond.Battle.ACCELERATION,
avatar.desiredSpeed);
} else if (avatar.speed > avatar.desiredSpeed) {
avatar.speed = Math.max(avatar.speed - Pond.Battle.ACCELERATION,
avatar.desiredSpeed);
}
// Move.
if (avatar.speed > 0) {
const tuple = Pond.Battle.closestNeighbour(avatar);
const closestBefore = tuple[1];
const angleRadians = Blockly.utils.math.toRadians(avatar.degree);
const speed = avatar.speed / 100 * Pond.Battle.AVATAR_SPEED;
const dx = Math.cos(angleRadians) * speed;
const dy = Math.sin(angleRadians) * speed;
avatar.loc.x += dx;
avatar.loc.y += dy;
if (avatar.loc.x < 0 || avatar.loc.x > 100 ||
avatar.loc.y < 0 || avatar.loc.y > 100) {
// Collision with wall.
avatar.loc.x = Blockly.utils.math.clamp(avatar.loc.x, 0, 100);
avatar.loc.y = Blockly.utils.math.clamp(avatar.loc.y, 0, 100);
const damage = avatar.speed / 100 * Pond.Battle.COLLISION_DAMAGE;
avatar.addDamage(damage);
avatar.speed = 0;
avatar.desiredSpeed = 0;
Pond.Battle.EVENTS.push(
{'type': 'CRASH', 'avatar': avatar, 'damage': damage});
} else {
const tuple = Pond.Battle.closestNeighbour(avatar);
const [neighbour, closestAfter] = tuple;
if (closestAfter < Pond.Battle.COLLISION_RADIUS &&
closestBefore > closestAfter) {
// Collision with another avatar.
avatar.loc.x -= dx;
avatar.loc.y -= dy;
const damage = Math.max(avatar.speed, neighbour.speed) / 100 *
Pond.Battle.COLLISION_DAMAGE;
avatar.addDamage(damage);
avatar.speed = 0;
avatar.desiredSpeed = 0;
neighbour.addDamage(damage);
neighbour.speed = 0;
neighbour.desiredSpeed = 0;
Pond.Battle.EVENTS.push(
{'type': 'CRASH', 'avatar': avatar, 'damage': damage},
{'type': 'CRASH', 'avatar': neighbour, 'damage': damage});
}
}
}
}
};
/**
* Let the avatars think.
* @private
*/
Pond.Battle.updateInterpreters_ = function() {
for (let i = 0; i < Pond.Battle.STATEMENTS_PER_FRAME; i++) {
Pond.Battle.ticks++;
for (const avatar of Pond.Battle.AVATARS) {
if (avatar.dead) {
continue;
}
Pond.Battle.currentAvatar = avatar;
try {
avatar.interpreter.step();
} catch (e) {
console.log(avatar + ' throws an error: ' + e);
avatar.die();
}
Pond.Battle.currentAvatar = null;
}
}
};
/**
* Inject the Pond API into a JavaScript interpreter.
* @param {!Interpreter} interpreter The JS-Interpreter.
* @param {!Interpreter.Object} globalObject Global object.
*/
Pond.Battle.initInterpreter = function(interpreter, globalObject) {
// API
let wrapper;
wrapper = function(value) {
// Restrict logging to just numbers so that the console doesn't fill up
// with 'problematic' messages when running 3rd party ducks.
console.log(Pond.Battle.currentAvatar.name + ' logs: ' + Number(value));
};
wrap('log');
wrapper = function(degree, resolution) {
return Pond.Battle.currentAvatar.scan(degree, resolution);
};
wrap('scan');
wrapper = function(degree, range) {
return Pond.Battle.currentAvatar.cannon(degree, range);
};
wrap('cannon');
wrapper = function(degree, speed) {
Pond.Battle.currentAvatar.drive(degree, speed);
};
wrap('drive');
wrap('swim');
wrapper = function() {
Pond.Battle.currentAvatar.stop();
};
wrap('stop');
wrapper = function() {
return Pond.Battle.currentAvatar.damage;
};
wrap('damage');
wrapper = function() {
return 100 - Pond.Battle.currentAvatar.damage;
};
wrap('health');
wrapper = function() {
return Pond.Battle.currentAvatar.speed;
};
wrap('speed');
wrapper = function() {
return Pond.Battle.currentAvatar.loc.x;
};
wrap('loc_x');
wrap('getX');
wrapper = function() {
return Pond.Battle.currentAvatar.loc.y;
};
wrap('loc_y');
wrap('getY');
function wrap(name) {
interpreter.setProperty(globalObject, name,
interpreter.createNativeFunction(wrapper, false));
}
const myMath = interpreter.getProperty(globalObject, 'Math');
if (myMath) {
wrapper = function(number) {
return Math.sin(Blockly.utils.math.toRadians(number));
};
wrapMath('sin_deg');
wrapper = function(number) {
return Math.cos(Blockly.utils.math.toRadians(number));
};
wrapMath('cos_deg');
wrapper = function(number) {
return Math.tan(Blockly.utils.math.toRadians(number));
};
wrapMath('tan_deg');
wrapper = function(number) {
return Blockly.utils.math.toDegrees(Math.asin(number));
};
wrapMath('asin_deg');
wrapper = function(number) {
return Blockly.utils.math.toDegrees(Math.acos(number));
};
wrapMath('acos_deg');
wrapper = function(number) {
return Blockly.utils.math.toDegrees(Math.atan(number));
};
wrapMath('atan_deg');
function | (name) {
interpreter.setProperty(myMath, name,
interpreter.createNativeFunction(wrapper, false));
}
}
};
/**
* Finds the distance between the given avatar and its nearest neighbour.
* @param {!Pond.Avatar} avatar The avatar to find distances from.
* @returns {!Array} Tuple of closest avatar and distance to that avatar.
*/
Pond.Battle.closestNeighbour = function(avatar) {
let closest = null;
let distance = Infinity;
for (const neighbour of Pond.Battle.AVATARS) {
if (!neighbour.dead && avatar !== neighbour) {
const thisDistance = Math.min(distance,
Blockly.utils.Coordinate.distance(avatar.loc, neighbour.loc));
if (thisDistance < distance) {
distance = thisDistance;
closest = neighbour;
}
}
}
return [closest, distance];
};
| wrapMath | identifier_name |
battle.js | /**
* @license
* Copyright 2013 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @fileoverview Creates an pond for avatars to compete in.
* @author fraser@google.com (Neil Fraser)
*/
'use strict';
goog.provide('Pond.Battle');
goog.require('Blockly.utils.Coordinate');
goog.require('Blockly.utils.math');
goog.require('Pond.Avatar');
/**
* List of avatars in this battle.
* @type !Array<!Pond.Avatar>
*/
Pond.Battle.AVATARS = [];
/**
* Ordered list of avatar with the best avatar first.
* @type !Array<!Pond.Avatar>
*/
Pond.Battle.RANK = [];
/**
* List of events to be visualized.
* @type !Array<!Object>
*/
Pond.Battle.EVENTS = [];
/**
* List of missiles in flight.
* @type !Array<!Object>
*/
Pond.Battle.MISSILES = [];
/**
* Speed of game execution.
*/
Pond.Battle.GAME_FPS = 50;
/**
* Speed of avatar execution.
*/
Pond.Battle.STATEMENTS_PER_FRAME = 100;
/**
* Number of seconds it takes to reload the cannon.
*/
Pond.Battle.RELOAD_TIME = .5;
/**
* The avatar currently executing code.
* @type Pond.Avatar
*/
Pond.Battle.currentAvatar = null;
/**
* Speed of avatar movement.
*/
Pond.Battle.AVATAR_SPEED = 1;
/**
* Speed of missile movement.
*/
Pond.Battle.MISSILE_SPEED = 3;
/**
* Rate of acceleration.
*/
Pond.Battle.ACCELERATION = 5;
/**
* Center to center distance for avatars to collide.
*/
Pond.Battle.COLLISION_RADIUS = 5;
/**
* Damage from worst-case collision.
*/
Pond.Battle.COLLISION_DAMAGE = 3;
/**
* PID of executing task.
*/
Pond.Battle.pid = 0;
/**
* Time to end the battle.
* @private
*/
Pond.Battle.endTime_ = 0;
/**
* Number of ticks used in battle.
*/
Pond.Battle.ticks = 0;
/**
* Time limit of game (in milliseconds).
*/
Pond.Battle.TIME_LIMIT = 5 * 60 * 1000;
/**
* Callback function for end of game.
* @type Function
*/
Pond.Battle.doneCallback_ = null;
/**
* Stop and reset the battle.
*/
Pond.Battle.reset = function() {
clearTimeout(Pond.Battle.pid);
Pond.Battle.EVENTS.length = 0;
Pond.Battle.MISSILES.length = 0;
Pond.Battle.RANK.length = 0;
Pond.Battle.ticks = 0;
for (const avatar of Pond.Battle.AVATARS) {
avatar.reset();
}
};
/**
* Start the battle executing. Avatars should already be added.
* @param {Function} doneCallback Function to call when game ends.
*/
Pond.Battle.start = function(doneCallback) {
Pond.Battle.doneCallback_ = doneCallback;
Pond.Battle.endTime_ = Date.now() + Pond.Battle.TIME_LIMIT;
console.log('Starting battle with ' + Pond.Battle.AVATARS.length +
' avatars.');
for (const avatar of Pond.Battle.AVATARS) {
try {
avatar.initInterpreter();
} catch (e) {
console.log(avatar + ' fails to load: ' + e);
avatar.die();
}
}
Pond.Battle.update();
};
/**
* Update the avatars states.
*/
Pond.Battle.update = function() {
// Execute a few statements.
Pond.Battle.updateInterpreters_();
// Update state of all missiles.
Pond.Battle.updateMissiles_();
// Update state of all avatars.
Pond.Battle.updateAvatars_();
if (Pond.Battle.AVATARS.length <= Pond.Battle.RANK.length + 1) {
// Game over because there are less than two avatars.
// Schedule the game to end in a second.
Pond.Battle.endTime_ = Math.min(Pond.Battle.endTime_, Date.now() + 1000);
}
if (Date.now() > Pond.Battle.endTime_) {
// Timeout reached. End the game.
Pond.Battle.stop();
} else {
// Do it all again in a moment.
Pond.Battle.pid =
setTimeout(Pond.Battle.update, 1000 / Pond.Battle.GAME_FPS);
}
};
Pond.Battle.stop = function() {
// Add the survivors to the ranks based on their damage.
const survivors = [];
for (const avatar of Pond.Battle.AVATARS) {
if (!avatar.dead) {
survivors.push(avatar);
}
}
const survivorCount = survivors.length;
survivors.sort(function(a, b) {return a.damage - b.damage;});
while (survivors.length) {
Pond.Battle.RANK.unshift(survivors.pop());
}
// Fire any callback.
Pond.Battle.doneCallback_ && Pond.Battle.doneCallback_(survivorCount);
};
/**
* Update state of all missiles.
* @private
*/
Pond.Battle.updateMissiles_ = function() {
for (let i = Pond.Battle.MISSILES.length - 1; i >= 0; i--) {
const missile = Pond.Battle.MISSILES[i];
missile.progress += Pond.Battle.MISSILE_SPEED;
let maxDamage = 0;
if (missile.range - missile.progress < Pond.Battle.MISSILE_SPEED / 2) {
// Boom.
Pond.Battle.MISSILES.splice(i, 1);
// Damage any avatar in range.
for (const avatar of Pond.Battle.AVATARS) {
if (avatar.dead) {
continue;
}
const range = Blockly.utils.Coordinate.distance(avatar.loc, missile.endLoc);
const damage = (1 - range / 4) * 10;
if (damage > 0) {
avatar.addDamage(damage);
maxDamage = Math.max(maxDamage, damage);
}
}
Pond.Battle.EVENTS.push({'type': 'BOOM', 'damage': maxDamage,
'x': missile.endLoc.x, 'y': missile.endLoc.y});
}
}
};
/**
* Update state of all avatars.
* @private
*/
Pond.Battle.updateAvatars_ = function() {
for (const avatar of Pond.Battle.AVATARS) {
if (avatar.dead) {
continue;
}
// Accelerate or decelerate.
if (avatar.speed < avatar.desiredSpeed) {
avatar.speed = Math.min(avatar.speed + Pond.Battle.ACCELERATION,
avatar.desiredSpeed);
} else if (avatar.speed > avatar.desiredSpeed) {
avatar.speed = Math.max(avatar.speed - Pond.Battle.ACCELERATION,
avatar.desiredSpeed);
}
// Move.
if (avatar.speed > 0) {
const tuple = Pond.Battle.closestNeighbour(avatar);
const closestBefore = tuple[1];
const angleRadians = Blockly.utils.math.toRadians(avatar.degree);
const speed = avatar.speed / 100 * Pond.Battle.AVATAR_SPEED;
const dx = Math.cos(angleRadians) * speed;
const dy = Math.sin(angleRadians) * speed;
avatar.loc.x += dx;
avatar.loc.y += dy;
if (avatar.loc.x < 0 || avatar.loc.x > 100 ||
avatar.loc.y < 0 || avatar.loc.y > 100) {
// Collision with wall.
avatar.loc.x = Blockly.utils.math.clamp(avatar.loc.x, 0, 100);
avatar.loc.y = Blockly.utils.math.clamp(avatar.loc.y, 0, 100);
const damage = avatar.speed / 100 * Pond.Battle.COLLISION_DAMAGE;
avatar.addDamage(damage);
avatar.speed = 0;
avatar.desiredSpeed = 0;
Pond.Battle.EVENTS.push(
{'type': 'CRASH', 'avatar': avatar, 'damage': damage});
} else {
const tuple = Pond.Battle.closestNeighbour(avatar);
const [neighbour, closestAfter] = tuple;
if (closestAfter < Pond.Battle.COLLISION_RADIUS &&
closestBefore > closestAfter) {
// Collision with another avatar.
avatar.loc.x -= dx;
avatar.loc.y -= dy;
const damage = Math.max(avatar.speed, neighbour.speed) / 100 *
Pond.Battle.COLLISION_DAMAGE;
avatar.addDamage(damage);
avatar.speed = 0;
avatar.desiredSpeed = 0;
neighbour.addDamage(damage);
neighbour.speed = 0;
neighbour.desiredSpeed = 0;
Pond.Battle.EVENTS.push(
{'type': 'CRASH', 'avatar': avatar, 'damage': damage},
{'type': 'CRASH', 'avatar': neighbour, 'damage': damage});
}
}
}
}
};
/**
* Let the avatars think.
* @private
*/
Pond.Battle.updateInterpreters_ = function() {
for (let i = 0; i < Pond.Battle.STATEMENTS_PER_FRAME; i++) {
Pond.Battle.ticks++;
for (const avatar of Pond.Battle.AVATARS) {
if (avatar.dead) {
continue;
}
Pond.Battle.currentAvatar = avatar;
try {
avatar.interpreter.step();
} catch (e) {
console.log(avatar + ' throws an error: ' + e);
avatar.die();
}
Pond.Battle.currentAvatar = null;
}
}
};
/**
* Inject the Pond API into a JavaScript interpreter.
* @param {!Interpreter} interpreter The JS-Interpreter.
* @param {!Interpreter.Object} globalObject Global object.
*/
Pond.Battle.initInterpreter = function(interpreter, globalObject) {
// API
let wrapper;
wrapper = function(value) {
// Restrict logging to just numbers so that the console doesn't fill up
// with 'problematic' messages when running 3rd party ducks.
console.log(Pond.Battle.currentAvatar.name + ' logs: ' + Number(value));
};
wrap('log');
wrapper = function(degree, resolution) {
return Pond.Battle.currentAvatar.scan(degree, resolution);
};
wrap('scan');
wrapper = function(degree, range) {
return Pond.Battle.currentAvatar.cannon(degree, range);
};
wrap('cannon');
wrapper = function(degree, speed) {
Pond.Battle.currentAvatar.drive(degree, speed);
};
wrap('drive');
wrap('swim');
wrapper = function() {
Pond.Battle.currentAvatar.stop();
};
wrap('stop');
wrapper = function() {
return Pond.Battle.currentAvatar.damage;
};
wrap('damage');
wrapper = function() {
return 100 - Pond.Battle.currentAvatar.damage;
};
wrap('health');
wrapper = function() {
return Pond.Battle.currentAvatar.speed;
};
wrap('speed');
wrapper = function() {
return Pond.Battle.currentAvatar.loc.x;
};
wrap('loc_x');
wrap('getX');
wrapper = function() {
return Pond.Battle.currentAvatar.loc.y;
};
wrap('loc_y');
wrap('getY');
function wrap(name) |
const myMath = interpreter.getProperty(globalObject, 'Math');
if (myMath) {
wrapper = function(number) {
return Math.sin(Blockly.utils.math.toRadians(number));
};
wrapMath('sin_deg');
wrapper = function(number) {
return Math.cos(Blockly.utils.math.toRadians(number));
};
wrapMath('cos_deg');
wrapper = function(number) {
return Math.tan(Blockly.utils.math.toRadians(number));
};
wrapMath('tan_deg');
wrapper = function(number) {
return Blockly.utils.math.toDegrees(Math.asin(number));
};
wrapMath('asin_deg');
wrapper = function(number) {
return Blockly.utils.math.toDegrees(Math.acos(number));
};
wrapMath('acos_deg');
wrapper = function(number) {
return Blockly.utils.math.toDegrees(Math.atan(number));
};
wrapMath('atan_deg');
function wrapMath(name) {
interpreter.setProperty(myMath, name,
interpreter.createNativeFunction(wrapper, false));
}
}
};
/**
* Finds the distance between the given avatar and its nearest neighbour.
* @param {!Pond.Avatar} avatar The avatar to find distances from.
* @returns {!Array} Tuple of closest avatar and distance to that avatar.
*/
Pond.Battle.closestNeighbour = function(avatar) {
let closest = null;
let distance = Infinity;
for (const neighbour of Pond.Battle.AVATARS) {
if (!neighbour.dead && avatar !== neighbour) {
const thisDistance = Math.min(distance,
Blockly.utils.Coordinate.distance(avatar.loc, neighbour.loc));
if (thisDistance < distance) {
distance = thisDistance;
closest = neighbour;
}
}
}
return [closest, distance];
};
| {
interpreter.setProperty(globalObject, name,
interpreter.createNativeFunction(wrapper, false));
} | identifier_body |
battle.js | /**
* @license
* Copyright 2013 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @fileoverview Creates an pond for avatars to compete in.
* @author fraser@google.com (Neil Fraser)
*/
'use strict';
goog.provide('Pond.Battle');
goog.require('Blockly.utils.Coordinate');
goog.require('Blockly.utils.math');
goog.require('Pond.Avatar');
/**
* List of avatars in this battle.
* @type !Array<!Pond.Avatar>
*/
Pond.Battle.AVATARS = [];
/**
* Ordered list of avatar with the best avatar first.
* @type !Array<!Pond.Avatar>
*/
Pond.Battle.RANK = [];
/**
* List of events to be visualized.
* @type !Array<!Object>
*/
Pond.Battle.EVENTS = [];
/**
* List of missiles in flight.
* @type !Array<!Object>
*/
Pond.Battle.MISSILES = [];
/**
* Speed of game execution.
*/
Pond.Battle.GAME_FPS = 50;
/**
* Speed of avatar execution.
*/
Pond.Battle.STATEMENTS_PER_FRAME = 100;
/**
* Number of seconds it takes to reload the cannon.
*/
Pond.Battle.RELOAD_TIME = .5;
/**
* The avatar currently executing code.
* @type Pond.Avatar
*/
Pond.Battle.currentAvatar = null;
/**
* Speed of avatar movement.
*/
Pond.Battle.AVATAR_SPEED = 1;
/**
* Speed of missile movement.
*/
Pond.Battle.MISSILE_SPEED = 3;
/**
* Rate of acceleration.
*/
Pond.Battle.ACCELERATION = 5;
/**
* Center to center distance for avatars to collide.
*/
Pond.Battle.COLLISION_RADIUS = 5;
/**
* Damage from worst-case collision.
*/
Pond.Battle.COLLISION_DAMAGE = 3;
/**
* PID of executing task.
*/
Pond.Battle.pid = 0;
/**
* Time to end the battle.
* @private
*/
Pond.Battle.endTime_ = 0;
/**
* Number of ticks used in battle.
*/
Pond.Battle.ticks = 0;
/**
* Time limit of game (in milliseconds).
*/
Pond.Battle.TIME_LIMIT = 5 * 60 * 1000;
/**
* Callback function for end of game.
* @type Function
*/
Pond.Battle.doneCallback_ = null;
/**
* Stop and reset the battle.
*/
Pond.Battle.reset = function() {
clearTimeout(Pond.Battle.pid);
Pond.Battle.EVENTS.length = 0;
Pond.Battle.MISSILES.length = 0;
Pond.Battle.RANK.length = 0;
Pond.Battle.ticks = 0;
for (const avatar of Pond.Battle.AVATARS) {
avatar.reset();
}
};
/**
* Start the battle executing. Avatars should already be added.
* @param {Function} doneCallback Function to call when game ends.
*/
Pond.Battle.start = function(doneCallback) {
Pond.Battle.doneCallback_ = doneCallback;
Pond.Battle.endTime_ = Date.now() + Pond.Battle.TIME_LIMIT;
console.log('Starting battle with ' + Pond.Battle.AVATARS.length +
' avatars.');
for (const avatar of Pond.Battle.AVATARS) {
try {
avatar.initInterpreter();
} catch (e) {
console.log(avatar + ' fails to load: ' + e);
avatar.die();
}
}
Pond.Battle.update();
};
/**
* Update the avatars states.
*/
Pond.Battle.update = function() {
// Execute a few statements.
Pond.Battle.updateInterpreters_();
// Update state of all missiles.
Pond.Battle.updateMissiles_();
// Update state of all avatars.
Pond.Battle.updateAvatars_();
if (Pond.Battle.AVATARS.length <= Pond.Battle.RANK.length + 1) {
// Game over because there are less than two avatars.
// Schedule the game to end in a second.
Pond.Battle.endTime_ = Math.min(Pond.Battle.endTime_, Date.now() + 1000);
}
if (Date.now() > Pond.Battle.endTime_) {
// Timeout reached. End the game.
Pond.Battle.stop();
} else {
// Do it all again in a moment.
Pond.Battle.pid =
setTimeout(Pond.Battle.update, 1000 / Pond.Battle.GAME_FPS);
}
};
Pond.Battle.stop = function() {
// Add the survivors to the ranks based on their damage.
const survivors = [];
for (const avatar of Pond.Battle.AVATARS) {
if (!avatar.dead) {
survivors.push(avatar);
}
}
const survivorCount = survivors.length;
survivors.sort(function(a, b) {return a.damage - b.damage;});
while (survivors.length) {
Pond.Battle.RANK.unshift(survivors.pop());
}
// Fire any callback.
Pond.Battle.doneCallback_ && Pond.Battle.doneCallback_(survivorCount);
};
/**
* Update state of all missiles.
* @private
*/
Pond.Battle.updateMissiles_ = function() {
for (let i = Pond.Battle.MISSILES.length - 1; i >= 0; i--) {
const missile = Pond.Battle.MISSILES[i];
missile.progress += Pond.Battle.MISSILE_SPEED;
let maxDamage = 0;
if (missile.range - missile.progress < Pond.Battle.MISSILE_SPEED / 2) {
// Boom.
Pond.Battle.MISSILES.splice(i, 1);
// Damage any avatar in range.
for (const avatar of Pond.Battle.AVATARS) {
if (avatar.dead) {
continue;
}
const range = Blockly.utils.Coordinate.distance(avatar.loc, missile.endLoc);
const damage = (1 - range / 4) * 10;
if (damage > 0) {
avatar.addDamage(damage);
maxDamage = Math.max(maxDamage, damage);
}
}
Pond.Battle.EVENTS.push({'type': 'BOOM', 'damage': maxDamage,
'x': missile.endLoc.x, 'y': missile.endLoc.y});
}
}
};
/**
* Update state of all avatars.
* @private
*/
Pond.Battle.updateAvatars_ = function() {
for (const avatar of Pond.Battle.AVATARS) {
if (avatar.dead) {
continue;
}
// Accelerate or decelerate.
if (avatar.speed < avatar.desiredSpeed) {
avatar.speed = Math.min(avatar.speed + Pond.Battle.ACCELERATION,
avatar.desiredSpeed);
} else if (avatar.speed > avatar.desiredSpeed) {
avatar.speed = Math.max(avatar.speed - Pond.Battle.ACCELERATION,
avatar.desiredSpeed);
}
// Move.
if (avatar.speed > 0) {
const tuple = Pond.Battle.closestNeighbour(avatar);
const closestBefore = tuple[1];
const angleRadians = Blockly.utils.math.toRadians(avatar.degree);
const speed = avatar.speed / 100 * Pond.Battle.AVATAR_SPEED;
const dx = Math.cos(angleRadians) * speed;
const dy = Math.sin(angleRadians) * speed;
avatar.loc.x += dx;
avatar.loc.y += dy;
if (avatar.loc.x < 0 || avatar.loc.x > 100 ||
avatar.loc.y < 0 || avatar.loc.y > 100) {
// Collision with wall.
avatar.loc.x = Blockly.utils.math.clamp(avatar.loc.x, 0, 100);
avatar.loc.y = Blockly.utils.math.clamp(avatar.loc.y, 0, 100);
const damage = avatar.speed / 100 * Pond.Battle.COLLISION_DAMAGE;
avatar.addDamage(damage);
avatar.speed = 0;
avatar.desiredSpeed = 0;
Pond.Battle.EVENTS.push(
{'type': 'CRASH', 'avatar': avatar, 'damage': damage});
} else {
const tuple = Pond.Battle.closestNeighbour(avatar);
const [neighbour, closestAfter] = tuple;
if (closestAfter < Pond.Battle.COLLISION_RADIUS &&
closestBefore > closestAfter) {
// Collision with another avatar.
avatar.loc.x -= dx;
avatar.loc.y -= dy;
const damage = Math.max(avatar.speed, neighbour.speed) / 100 *
Pond.Battle.COLLISION_DAMAGE;
avatar.addDamage(damage);
avatar.speed = 0;
avatar.desiredSpeed = 0;
neighbour.addDamage(damage);
neighbour.speed = 0;
neighbour.desiredSpeed = 0;
Pond.Battle.EVENTS.push(
{'type': 'CRASH', 'avatar': avatar, 'damage': damage},
{'type': 'CRASH', 'avatar': neighbour, 'damage': damage});
}
}
}
}
};
/**
* Let the avatars think.
* @private
*/
Pond.Battle.updateInterpreters_ = function() {
for (let i = 0; i < Pond.Battle.STATEMENTS_PER_FRAME; i++) {
Pond.Battle.ticks++;
for (const avatar of Pond.Battle.AVATARS) {
if (avatar.dead) {
continue;
}
Pond.Battle.currentAvatar = avatar;
try {
avatar.interpreter.step();
} catch (e) {
console.log(avatar + ' throws an error: ' + e);
avatar.die();
}
Pond.Battle.currentAvatar = null;
} | * @param {!Interpreter} interpreter The JS-Interpreter.
* @param {!Interpreter.Object} globalObject Global object.
*/
Pond.Battle.initInterpreter = function(interpreter, globalObject) {
// API
let wrapper;
wrapper = function(value) {
// Restrict logging to just numbers so that the console doesn't fill up
// with 'problematic' messages when running 3rd party ducks.
console.log(Pond.Battle.currentAvatar.name + ' logs: ' + Number(value));
};
wrap('log');
wrapper = function(degree, resolution) {
return Pond.Battle.currentAvatar.scan(degree, resolution);
};
wrap('scan');
wrapper = function(degree, range) {
return Pond.Battle.currentAvatar.cannon(degree, range);
};
wrap('cannon');
wrapper = function(degree, speed) {
Pond.Battle.currentAvatar.drive(degree, speed);
};
wrap('drive');
wrap('swim');
wrapper = function() {
Pond.Battle.currentAvatar.stop();
};
wrap('stop');
wrapper = function() {
return Pond.Battle.currentAvatar.damage;
};
wrap('damage');
wrapper = function() {
return 100 - Pond.Battle.currentAvatar.damage;
};
wrap('health');
wrapper = function() {
return Pond.Battle.currentAvatar.speed;
};
wrap('speed');
wrapper = function() {
return Pond.Battle.currentAvatar.loc.x;
};
wrap('loc_x');
wrap('getX');
wrapper = function() {
return Pond.Battle.currentAvatar.loc.y;
};
wrap('loc_y');
wrap('getY');
function wrap(name) {
interpreter.setProperty(globalObject, name,
interpreter.createNativeFunction(wrapper, false));
}
const myMath = interpreter.getProperty(globalObject, 'Math');
if (myMath) {
wrapper = function(number) {
return Math.sin(Blockly.utils.math.toRadians(number));
};
wrapMath('sin_deg');
wrapper = function(number) {
return Math.cos(Blockly.utils.math.toRadians(number));
};
wrapMath('cos_deg');
wrapper = function(number) {
return Math.tan(Blockly.utils.math.toRadians(number));
};
wrapMath('tan_deg');
wrapper = function(number) {
return Blockly.utils.math.toDegrees(Math.asin(number));
};
wrapMath('asin_deg');
wrapper = function(number) {
return Blockly.utils.math.toDegrees(Math.acos(number));
};
wrapMath('acos_deg');
wrapper = function(number) {
return Blockly.utils.math.toDegrees(Math.atan(number));
};
wrapMath('atan_deg');
function wrapMath(name) {
interpreter.setProperty(myMath, name,
interpreter.createNativeFunction(wrapper, false));
}
}
};
/**
* Finds the distance between the given avatar and its nearest neighbour.
* @param {!Pond.Avatar} avatar The avatar to find distances from.
* @returns {!Array} Tuple of closest avatar and distance to that avatar.
*/
Pond.Battle.closestNeighbour = function(avatar) {
let closest = null;
let distance = Infinity;
for (const neighbour of Pond.Battle.AVATARS) {
if (!neighbour.dead && avatar !== neighbour) {
const thisDistance = Math.min(distance,
Blockly.utils.Coordinate.distance(avatar.loc, neighbour.loc));
if (thisDistance < distance) {
distance = thisDistance;
closest = neighbour;
}
}
}
return [closest, distance];
}; | }
};
/**
* Inject the Pond API into a JavaScript interpreter. | random_line_split |
0_gatherer.go | package clusterconfig
import (
"context"
"fmt"
"reflect"
"runtime"
"sort"
"strings"
"time"
"k8s.io/client-go/rest"
"k8s.io/klog/v2"
"github.com/openshift/insights-operator/pkg/anonymization"
"github.com/openshift/insights-operator/pkg/record"
"github.com/openshift/insights-operator/pkg/recorder"
"github.com/openshift/insights-operator/pkg/utils"
)
// gatherMetadata contains general information about collected data
type gatherMetadata struct {
// info about gathering functions
StatusReports []gathererStatusReport `json:"status_reports"`
MemoryAlloc uint64 `json:"memory_alloc_bytes"`
Uptime float64 `json:"uptime_seconds"`
// shows if obfuscation(hiding IPs and cluster domain) is enabled
IsGlobalObfuscationEnabled bool `json:"is_global_obfuscation_enabled"`
}
// gathererStatusReport contains general information about specific gatherer function
type gathererStatusReport struct {
Name string `json:"name"`
Duration time.Duration `json:"duration_in_ms"`
RecordsCount int `json:"records_count"`
Errors []string `json:"errors"`
}
// Gatherer is a driving instance invoking collection of data
type Gatherer struct {
ctx context.Context
gatherKubeConfig *rest.Config
gatherProtoKubeConfig *rest.Config
metricsGatherKubeConfig *rest.Config
anonymizer *anonymization.Anonymizer
startTime time.Time
}
type gatherResult struct {
records []record.Record
errors []error
}
type gatherFunction func(g *Gatherer, c chan<- gatherResult)
type gathering struct {
function gatherFunction
canFail bool
}
func important(function gatherFunction) gathering {
return gathering{function, false}
}
func failable(function gatherFunction) gathering {
return gathering{function, true}
}
const gatherAll = "ALL"
var gatherFunctions = map[string]gathering{
"pdbs": important(GatherPodDisruptionBudgets),
"metrics": failable(GatherMostRecentMetrics),
"operators": important(GatherClusterOperators),
"operators_pods_and_events": important(GatherClusterOperatorPodsAndEvents),
"container_images": important(GatherContainerImages),
"workload_info": failable(GatherWorkloadInfo),
"nodes": important(GatherNodes),
"config_maps": failable(GatherConfigMaps),
"version": important(GatherClusterVersion),
"infrastructures": important(GatherClusterInfrastructure),
"networks": important(GatherClusterNetwork),
"authentication": important(GatherClusterAuthentication),
"image_registries": important(GatherClusterImageRegistry),
"image_pruners": important(GatherClusterImagePruner),
"feature_gates": important(GatherClusterFeatureGates),
"oauths": important(GatherClusterOAuth),
"ingress": important(GatherClusterIngress),
"proxies": important(GatherClusterProxy),
"certificate_signing_requests": important(GatherCertificateSigningRequests),
"crds": important(GatherCRD),
"host_subnets": important(GatherHostSubnet),
"machine_sets": important(GatherMachineSet),
"install_plans": important(GatherInstallPlans),
"service_accounts": important(GatherServiceAccounts),
"machine_config_pools": important(GatherMachineConfigPool),
"container_runtime_configs": important(GatherContainerRuntimeConfig),
"netnamespaces": important(GatherNetNamespace),
"openshift_apiserver_operator_logs": failable(GatherOpenShiftAPIServerOperatorLogs),
"openshift_sdn_logs": failable(GatherOpenshiftSDNLogs),
"openshift_sdn_controller_logs": failable(GatherOpenshiftSDNControllerLogs),
"openshift_authentication_logs": failable(GatherOpenshiftAuthenticationLogs),
"sap_config": failable(GatherSAPConfig),
"sap_license_management_logs": failable(GatherSAPVsystemIptablesLogs),
"sap_pods": failable(GatherSAPPods),
"sap_datahubs": failable(GatherSAPDatahubs),
"olm_operators": failable(GatherOLMOperators),
"pod_network_connectivity_checks": failable(GatherPNCC),
}
// New creates new Gatherer
func New(
gatherKubeConfig, gatherProtoKubeConfig, metricsGatherKubeConfig *rest.Config, anonymizer *anonymization.Anonymizer,
) *Gatherer {
return &Gatherer{
gatherKubeConfig: gatherKubeConfig,
gatherProtoKubeConfig: gatherProtoKubeConfig,
metricsGatherKubeConfig: metricsGatherKubeConfig,
anonymizer: anonymizer,
startTime: time.Now(),
}
}
// GatherInfo from reflection
type GatherInfo struct {
name string
result gatherResult
function gatherFunction
canFail bool
rvString string
}
// NewGatherInfo that holds reflection information
func NewGatherInfo(gather string, rv reflect.Value) *GatherInfo {
gatherFunc := gatherFunctions[gather].function
return &GatherInfo{
name: runtime.FuncForPC(reflect.ValueOf(gatherFunc).Pointer()).Name(),
result: rv.Interface().(gatherResult),
function: gatherFunc,
canFail: gatherFunctions[gather].canFail,
rvString: rv.String(),
}
}
// Gather is hosting and calling all the recording functions
func (g *Gatherer) Gather(ctx context.Context, gatherList []string, rec recorder.Interface) error {
g.ctx = ctx
var errors []string
var gatherReport gatherMetadata
if len(gatherList) == 0 {
errors = append(errors, "no gather functions are specified to run")
}
if utils.StringInSlice(gatherAll, gatherList) {
gatherList = fullGatherList()
}
// Starts the gathers in Go routines
cases, starts, err := g.startGathering(gatherList, &errors)
if err != nil {
return err
}
// Gets the info from the Go routines
for range gatherList {
chosen, value, _ := reflect.Select(cases)
// The chosen channel has been closed, so zero out the channel to disable the case
cases[chosen].Chan = reflect.ValueOf(nil)
gather := gatherList[chosen]
gi := NewGatherInfo(gather, value)
statusReport, errorsReport := createStatusReport(gi, rec, starts[chosen])
if len(errorsReport) > 0 {
errors = append(errors, errorsReport...)
}
gatherReport.StatusReports = append(gatherReport.StatusReports, statusReport)
}
// if obfuscation is enabled, we want to know it from the archive
gatherReport.IsGlobalObfuscationEnabled = g.anonymizer != nil
// fill in performance related data to the report
var m runtime.MemStats
runtime.ReadMemStats(&m)
gatherReport.MemoryAlloc = m.HeapAlloc
gatherReport.Uptime = time.Since(g.startTime).Truncate(time.Millisecond).Seconds()
// records the report
if err := recordGatherReport(rec, gatherReport); err != nil {
errors = append(errors, fmt.Sprintf("unable to record io status reports: %v", err))
}
if len(errors) > 0 {
return sumErrors(errors)
}
return nil
}
func createStatusReport(gather *GatherInfo, rec recorder.Interface, starts time.Time) (statusReport gathererStatusReport, errors []string) {
elapsed := time.Since(starts).Truncate(time.Millisecond)
klog.V(4).Infof("Gather %s took %s to process %d records", gather.name, elapsed, len(gather.result.records))
shortName := strings.Replace(gather.name, "github.com/openshift/insights-operator/pkg/gather/", "", 1)
statusReport = gathererStatusReport{
Name: shortName,
Duration: time.Duration(elapsed.Milliseconds()),
RecordsCount: len(gather.result.records),
Errors: extractErrors(gather.result.errors),
}
if gather.canFail {
for _, err := range gather.result.errors {
klog.V(5).Infof("Couldn't gather %s' received following error: %s\n", gather.name, err.Error())
}
} else {
errors = extractErrors(gather.result.errors)
}
errors = append(errors, recordStatusReport(rec, gather.result.records)...)
klog.V(5).Infof("Read from %s's channel and received %s\n", gather.name, gather.rvString)
return statusReport, errors
}
func recordStatusReport(rec recorder.Interface, records []record.Record) []string {
var errors []string
for _, r := range records {
if err := rec.Record(r); err != nil {
errors = append(errors, fmt.Sprintf("unable to record %s: %v", r.Name, err))
continue
}
}
return errors
}
// Runs each gather functions in a goroutine.
// Every gather function is given its own channel to send back the results.
// 1. return value: `cases` list, used for dynamically reading from the channels.
// 2. return value: `starts` list, contains that start time of each gather function.
func (g *Gatherer) startGathering(gatherList []string, errors *[]string) ([]reflect.SelectCase, []time.Time, error) {
var cases []reflect.SelectCase
var starts []time.Time
// Starts the gathers in Go routines
for _, gatherID := range gatherList {
gather, ok := gatherFunctions[gatherID]
gFn := gather.function
if !ok {
*errors = append(*errors, fmt.Sprintf("unknown gatherId in config: %s", gatherID))
continue
}
channel := make(chan gatherResult)
cases = append(cases, reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(channel)})
gatherName := runtime.FuncForPC(reflect.ValueOf(gFn).Pointer()).Name()
klog.V(5).Infof("Gathering %s", gatherName)
starts = append(starts, time.Now())
go gFn(g, channel)
if err := g.ctx.Err(); err != nil {
return nil, nil, err
}
}
return cases, starts, nil
}
func recordGatherReport(rec recorder.Interface, metadata gatherMetadata) error {
r := record.Record{Name: "insights-operator/gathers", Item: record.JSONMarshaller{Object: metadata}}
return rec.Record(r)
}
func extractErrors(errors []error) []string {
var errStrings []string
for _, err := range errors {
errStrings = append(errStrings, err.Error())
}
return errStrings
}
func sumErrors(errors []string) error |
func fullGatherList() []string {
gatherList := make([]string, 0, len(gatherFunctions))
for k := range gatherFunctions {
gatherList = append(gatherList, k)
}
return gatherList
}
| {
sort.Strings(errors)
errors = utils.UniqueStrings(errors)
return fmt.Errorf("%s", strings.Join(errors, ", "))
} | identifier_body |
0_gatherer.go | package clusterconfig
import (
"context"
"fmt"
"reflect"
"runtime"
"sort"
"strings"
"time"
"k8s.io/client-go/rest"
"k8s.io/klog/v2"
"github.com/openshift/insights-operator/pkg/anonymization"
"github.com/openshift/insights-operator/pkg/record"
"github.com/openshift/insights-operator/pkg/recorder"
"github.com/openshift/insights-operator/pkg/utils"
)
// gatherMetadata contains general information about collected data
type gatherMetadata struct {
// info about gathering functions
StatusReports []gathererStatusReport `json:"status_reports"`
MemoryAlloc uint64 `json:"memory_alloc_bytes"`
Uptime float64 `json:"uptime_seconds"`
// shows if obfuscation(hiding IPs and cluster domain) is enabled
IsGlobalObfuscationEnabled bool `json:"is_global_obfuscation_enabled"`
}
// gathererStatusReport contains general information about specific gatherer function
type gathererStatusReport struct {
Name string `json:"name"`
Duration time.Duration `json:"duration_in_ms"`
RecordsCount int `json:"records_count"`
Errors []string `json:"errors"`
}
// Gatherer is a driving instance invoking collection of data
type Gatherer struct {
ctx context.Context
gatherKubeConfig *rest.Config
gatherProtoKubeConfig *rest.Config
metricsGatherKubeConfig *rest.Config
anonymizer *anonymization.Anonymizer
startTime time.Time
}
type gatherResult struct {
records []record.Record
errors []error
}
type gatherFunction func(g *Gatherer, c chan<- gatherResult)
type gathering struct {
function gatherFunction
canFail bool
}
func | (function gatherFunction) gathering {
return gathering{function, false}
}
func failable(function gatherFunction) gathering {
return gathering{function, true}
}
const gatherAll = "ALL"
var gatherFunctions = map[string]gathering{
"pdbs": important(GatherPodDisruptionBudgets),
"metrics": failable(GatherMostRecentMetrics),
"operators": important(GatherClusterOperators),
"operators_pods_and_events": important(GatherClusterOperatorPodsAndEvents),
"container_images": important(GatherContainerImages),
"workload_info": failable(GatherWorkloadInfo),
"nodes": important(GatherNodes),
"config_maps": failable(GatherConfigMaps),
"version": important(GatherClusterVersion),
"infrastructures": important(GatherClusterInfrastructure),
"networks": important(GatherClusterNetwork),
"authentication": important(GatherClusterAuthentication),
"image_registries": important(GatherClusterImageRegistry),
"image_pruners": important(GatherClusterImagePruner),
"feature_gates": important(GatherClusterFeatureGates),
"oauths": important(GatherClusterOAuth),
"ingress": important(GatherClusterIngress),
"proxies": important(GatherClusterProxy),
"certificate_signing_requests": important(GatherCertificateSigningRequests),
"crds": important(GatherCRD),
"host_subnets": important(GatherHostSubnet),
"machine_sets": important(GatherMachineSet),
"install_plans": important(GatherInstallPlans),
"service_accounts": important(GatherServiceAccounts),
"machine_config_pools": important(GatherMachineConfigPool),
"container_runtime_configs": important(GatherContainerRuntimeConfig),
"netnamespaces": important(GatherNetNamespace),
"openshift_apiserver_operator_logs": failable(GatherOpenShiftAPIServerOperatorLogs),
"openshift_sdn_logs": failable(GatherOpenshiftSDNLogs),
"openshift_sdn_controller_logs": failable(GatherOpenshiftSDNControllerLogs),
"openshift_authentication_logs": failable(GatherOpenshiftAuthenticationLogs),
"sap_config": failable(GatherSAPConfig),
"sap_license_management_logs": failable(GatherSAPVsystemIptablesLogs),
"sap_pods": failable(GatherSAPPods),
"sap_datahubs": failable(GatherSAPDatahubs),
"olm_operators": failable(GatherOLMOperators),
"pod_network_connectivity_checks": failable(GatherPNCC),
}
// New creates new Gatherer
func New(
gatherKubeConfig, gatherProtoKubeConfig, metricsGatherKubeConfig *rest.Config, anonymizer *anonymization.Anonymizer,
) *Gatherer {
return &Gatherer{
gatherKubeConfig: gatherKubeConfig,
gatherProtoKubeConfig: gatherProtoKubeConfig,
metricsGatherKubeConfig: metricsGatherKubeConfig,
anonymizer: anonymizer,
startTime: time.Now(),
}
}
// GatherInfo from reflection
type GatherInfo struct {
name string
result gatherResult
function gatherFunction
canFail bool
rvString string
}
// NewGatherInfo that holds reflection information
func NewGatherInfo(gather string, rv reflect.Value) *GatherInfo {
gatherFunc := gatherFunctions[gather].function
return &GatherInfo{
name: runtime.FuncForPC(reflect.ValueOf(gatherFunc).Pointer()).Name(),
result: rv.Interface().(gatherResult),
function: gatherFunc,
canFail: gatherFunctions[gather].canFail,
rvString: rv.String(),
}
}
// Gather is hosting and calling all the recording functions
func (g *Gatherer) Gather(ctx context.Context, gatherList []string, rec recorder.Interface) error {
g.ctx = ctx
var errors []string
var gatherReport gatherMetadata
if len(gatherList) == 0 {
errors = append(errors, "no gather functions are specified to run")
}
if utils.StringInSlice(gatherAll, gatherList) {
gatherList = fullGatherList()
}
// Starts the gathers in Go routines
cases, starts, err := g.startGathering(gatherList, &errors)
if err != nil {
return err
}
// Gets the info from the Go routines
for range gatherList {
chosen, value, _ := reflect.Select(cases)
// The chosen channel has been closed, so zero out the channel to disable the case
cases[chosen].Chan = reflect.ValueOf(nil)
gather := gatherList[chosen]
gi := NewGatherInfo(gather, value)
statusReport, errorsReport := createStatusReport(gi, rec, starts[chosen])
if len(errorsReport) > 0 {
errors = append(errors, errorsReport...)
}
gatherReport.StatusReports = append(gatherReport.StatusReports, statusReport)
}
// if obfuscation is enabled, we want to know it from the archive
gatherReport.IsGlobalObfuscationEnabled = g.anonymizer != nil
// fill in performance related data to the report
var m runtime.MemStats
runtime.ReadMemStats(&m)
gatherReport.MemoryAlloc = m.HeapAlloc
gatherReport.Uptime = time.Since(g.startTime).Truncate(time.Millisecond).Seconds()
// records the report
if err := recordGatherReport(rec, gatherReport); err != nil {
errors = append(errors, fmt.Sprintf("unable to record io status reports: %v", err))
}
if len(errors) > 0 {
return sumErrors(errors)
}
return nil
}
func createStatusReport(gather *GatherInfo, rec recorder.Interface, starts time.Time) (statusReport gathererStatusReport, errors []string) {
elapsed := time.Since(starts).Truncate(time.Millisecond)
klog.V(4).Infof("Gather %s took %s to process %d records", gather.name, elapsed, len(gather.result.records))
shortName := strings.Replace(gather.name, "github.com/openshift/insights-operator/pkg/gather/", "", 1)
statusReport = gathererStatusReport{
Name: shortName,
Duration: time.Duration(elapsed.Milliseconds()),
RecordsCount: len(gather.result.records),
Errors: extractErrors(gather.result.errors),
}
if gather.canFail {
for _, err := range gather.result.errors {
klog.V(5).Infof("Couldn't gather %s' received following error: %s\n", gather.name, err.Error())
}
} else {
errors = extractErrors(gather.result.errors)
}
errors = append(errors, recordStatusReport(rec, gather.result.records)...)
klog.V(5).Infof("Read from %s's channel and received %s\n", gather.name, gather.rvString)
return statusReport, errors
}
func recordStatusReport(rec recorder.Interface, records []record.Record) []string {
var errors []string
for _, r := range records {
if err := rec.Record(r); err != nil {
errors = append(errors, fmt.Sprintf("unable to record %s: %v", r.Name, err))
continue
}
}
return errors
}
// Runs each gather functions in a goroutine.
// Every gather function is given its own channel to send back the results.
// 1. return value: `cases` list, used for dynamically reading from the channels.
// 2. return value: `starts` list, contains that start time of each gather function.
func (g *Gatherer) startGathering(gatherList []string, errors *[]string) ([]reflect.SelectCase, []time.Time, error) {
var cases []reflect.SelectCase
var starts []time.Time
// Starts the gathers in Go routines
for _, gatherID := range gatherList {
gather, ok := gatherFunctions[gatherID]
gFn := gather.function
if !ok {
*errors = append(*errors, fmt.Sprintf("unknown gatherId in config: %s", gatherID))
continue
}
channel := make(chan gatherResult)
cases = append(cases, reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(channel)})
gatherName := runtime.FuncForPC(reflect.ValueOf(gFn).Pointer()).Name()
klog.V(5).Infof("Gathering %s", gatherName)
starts = append(starts, time.Now())
go gFn(g, channel)
if err := g.ctx.Err(); err != nil {
return nil, nil, err
}
}
return cases, starts, nil
}
func recordGatherReport(rec recorder.Interface, metadata gatherMetadata) error {
r := record.Record{Name: "insights-operator/gathers", Item: record.JSONMarshaller{Object: metadata}}
return rec.Record(r)
}
func extractErrors(errors []error) []string {
var errStrings []string
for _, err := range errors {
errStrings = append(errStrings, err.Error())
}
return errStrings
}
func sumErrors(errors []string) error {
sort.Strings(errors)
errors = utils.UniqueStrings(errors)
return fmt.Errorf("%s", strings.Join(errors, ", "))
}
func fullGatherList() []string {
gatherList := make([]string, 0, len(gatherFunctions))
for k := range gatherFunctions {
gatherList = append(gatherList, k)
}
return gatherList
}
| important | identifier_name |
0_gatherer.go | package clusterconfig
import (
"context"
"fmt"
"reflect"
"runtime"
"sort"
"strings"
"time"
"k8s.io/client-go/rest"
"k8s.io/klog/v2"
"github.com/openshift/insights-operator/pkg/anonymization"
"github.com/openshift/insights-operator/pkg/record"
"github.com/openshift/insights-operator/pkg/recorder"
"github.com/openshift/insights-operator/pkg/utils"
)
// gatherMetadata contains general information about collected data
type gatherMetadata struct {
// info about gathering functions
StatusReports []gathererStatusReport `json:"status_reports"`
MemoryAlloc uint64 `json:"memory_alloc_bytes"`
Uptime float64 `json:"uptime_seconds"`
// shows if obfuscation(hiding IPs and cluster domain) is enabled
IsGlobalObfuscationEnabled bool `json:"is_global_obfuscation_enabled"`
}
// gathererStatusReport contains general information about specific gatherer function
type gathererStatusReport struct {
Name string `json:"name"`
Duration time.Duration `json:"duration_in_ms"`
RecordsCount int `json:"records_count"`
Errors []string `json:"errors"`
}
// Gatherer is a driving instance invoking collection of data
type Gatherer struct {
ctx context.Context
gatherKubeConfig *rest.Config
gatherProtoKubeConfig *rest.Config
metricsGatherKubeConfig *rest.Config
anonymizer *anonymization.Anonymizer
startTime time.Time
}
type gatherResult struct {
records []record.Record
errors []error
}
type gatherFunction func(g *Gatherer, c chan<- gatherResult)
type gathering struct {
function gatherFunction
canFail bool
}
func important(function gatherFunction) gathering {
return gathering{function, false}
}
func failable(function gatherFunction) gathering {
return gathering{function, true}
}
const gatherAll = "ALL"
var gatherFunctions = map[string]gathering{
"pdbs": important(GatherPodDisruptionBudgets),
"metrics": failable(GatherMostRecentMetrics),
"operators": important(GatherClusterOperators),
"operators_pods_and_events": important(GatherClusterOperatorPodsAndEvents),
"container_images": important(GatherContainerImages),
"workload_info": failable(GatherWorkloadInfo),
"nodes": important(GatherNodes),
"config_maps": failable(GatherConfigMaps),
"version": important(GatherClusterVersion),
"infrastructures": important(GatherClusterInfrastructure),
"networks": important(GatherClusterNetwork),
"authentication": important(GatherClusterAuthentication),
"image_registries": important(GatherClusterImageRegistry),
"image_pruners": important(GatherClusterImagePruner),
"feature_gates": important(GatherClusterFeatureGates),
"oauths": important(GatherClusterOAuth),
"ingress": important(GatherClusterIngress),
"proxies": important(GatherClusterProxy),
"certificate_signing_requests": important(GatherCertificateSigningRequests),
"crds": important(GatherCRD),
"host_subnets": important(GatherHostSubnet),
"machine_sets": important(GatherMachineSet),
"install_plans": important(GatherInstallPlans),
"service_accounts": important(GatherServiceAccounts),
"machine_config_pools": important(GatherMachineConfigPool), | "openshift_sdn_controller_logs": failable(GatherOpenshiftSDNControllerLogs),
"openshift_authentication_logs": failable(GatherOpenshiftAuthenticationLogs),
"sap_config": failable(GatherSAPConfig),
"sap_license_management_logs": failable(GatherSAPVsystemIptablesLogs),
"sap_pods": failable(GatherSAPPods),
"sap_datahubs": failable(GatherSAPDatahubs),
"olm_operators": failable(GatherOLMOperators),
"pod_network_connectivity_checks": failable(GatherPNCC),
}
// New creates new Gatherer
func New(
gatherKubeConfig, gatherProtoKubeConfig, metricsGatherKubeConfig *rest.Config, anonymizer *anonymization.Anonymizer,
) *Gatherer {
return &Gatherer{
gatherKubeConfig: gatherKubeConfig,
gatherProtoKubeConfig: gatherProtoKubeConfig,
metricsGatherKubeConfig: metricsGatherKubeConfig,
anonymizer: anonymizer,
startTime: time.Now(),
}
}
// GatherInfo from reflection
type GatherInfo struct {
name string
result gatherResult
function gatherFunction
canFail bool
rvString string
}
// NewGatherInfo that holds reflection information
func NewGatherInfo(gather string, rv reflect.Value) *GatherInfo {
gatherFunc := gatherFunctions[gather].function
return &GatherInfo{
name: runtime.FuncForPC(reflect.ValueOf(gatherFunc).Pointer()).Name(),
result: rv.Interface().(gatherResult),
function: gatherFunc,
canFail: gatherFunctions[gather].canFail,
rvString: rv.String(),
}
}
// Gather is hosting and calling all the recording functions
func (g *Gatherer) Gather(ctx context.Context, gatherList []string, rec recorder.Interface) error {
g.ctx = ctx
var errors []string
var gatherReport gatherMetadata
if len(gatherList) == 0 {
errors = append(errors, "no gather functions are specified to run")
}
if utils.StringInSlice(gatherAll, gatherList) {
gatherList = fullGatherList()
}
// Starts the gathers in Go routines
cases, starts, err := g.startGathering(gatherList, &errors)
if err != nil {
return err
}
// Gets the info from the Go routines
for range gatherList {
chosen, value, _ := reflect.Select(cases)
// The chosen channel has been closed, so zero out the channel to disable the case
cases[chosen].Chan = reflect.ValueOf(nil)
gather := gatherList[chosen]
gi := NewGatherInfo(gather, value)
statusReport, errorsReport := createStatusReport(gi, rec, starts[chosen])
if len(errorsReport) > 0 {
errors = append(errors, errorsReport...)
}
gatherReport.StatusReports = append(gatherReport.StatusReports, statusReport)
}
// if obfuscation is enabled, we want to know it from the archive
gatherReport.IsGlobalObfuscationEnabled = g.anonymizer != nil
// fill in performance related data to the report
var m runtime.MemStats
runtime.ReadMemStats(&m)
gatherReport.MemoryAlloc = m.HeapAlloc
gatherReport.Uptime = time.Since(g.startTime).Truncate(time.Millisecond).Seconds()
// records the report
if err := recordGatherReport(rec, gatherReport); err != nil {
errors = append(errors, fmt.Sprintf("unable to record io status reports: %v", err))
}
if len(errors) > 0 {
return sumErrors(errors)
}
return nil
}
func createStatusReport(gather *GatherInfo, rec recorder.Interface, starts time.Time) (statusReport gathererStatusReport, errors []string) {
elapsed := time.Since(starts).Truncate(time.Millisecond)
klog.V(4).Infof("Gather %s took %s to process %d records", gather.name, elapsed, len(gather.result.records))
shortName := strings.Replace(gather.name, "github.com/openshift/insights-operator/pkg/gather/", "", 1)
statusReport = gathererStatusReport{
Name: shortName,
Duration: time.Duration(elapsed.Milliseconds()),
RecordsCount: len(gather.result.records),
Errors: extractErrors(gather.result.errors),
}
if gather.canFail {
for _, err := range gather.result.errors {
klog.V(5).Infof("Couldn't gather %s' received following error: %s\n", gather.name, err.Error())
}
} else {
errors = extractErrors(gather.result.errors)
}
errors = append(errors, recordStatusReport(rec, gather.result.records)...)
klog.V(5).Infof("Read from %s's channel and received %s\n", gather.name, gather.rvString)
return statusReport, errors
}
func recordStatusReport(rec recorder.Interface, records []record.Record) []string {
var errors []string
for _, r := range records {
if err := rec.Record(r); err != nil {
errors = append(errors, fmt.Sprintf("unable to record %s: %v", r.Name, err))
continue
}
}
return errors
}
// Runs each gather functions in a goroutine.
// Every gather function is given its own channel to send back the results.
// 1. return value: `cases` list, used for dynamically reading from the channels.
// 2. return value: `starts` list, contains that start time of each gather function.
func (g *Gatherer) startGathering(gatherList []string, errors *[]string) ([]reflect.SelectCase, []time.Time, error) {
var cases []reflect.SelectCase
var starts []time.Time
// Starts the gathers in Go routines
for _, gatherID := range gatherList {
gather, ok := gatherFunctions[gatherID]
gFn := gather.function
if !ok {
*errors = append(*errors, fmt.Sprintf("unknown gatherId in config: %s", gatherID))
continue
}
channel := make(chan gatherResult)
cases = append(cases, reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(channel)})
gatherName := runtime.FuncForPC(reflect.ValueOf(gFn).Pointer()).Name()
klog.V(5).Infof("Gathering %s", gatherName)
starts = append(starts, time.Now())
go gFn(g, channel)
if err := g.ctx.Err(); err != nil {
return nil, nil, err
}
}
return cases, starts, nil
}
func recordGatherReport(rec recorder.Interface, metadata gatherMetadata) error {
r := record.Record{Name: "insights-operator/gathers", Item: record.JSONMarshaller{Object: metadata}}
return rec.Record(r)
}
func extractErrors(errors []error) []string {
var errStrings []string
for _, err := range errors {
errStrings = append(errStrings, err.Error())
}
return errStrings
}
func sumErrors(errors []string) error {
sort.Strings(errors)
errors = utils.UniqueStrings(errors)
return fmt.Errorf("%s", strings.Join(errors, ", "))
}
func fullGatherList() []string {
gatherList := make([]string, 0, len(gatherFunctions))
for k := range gatherFunctions {
gatherList = append(gatherList, k)
}
return gatherList
} | "container_runtime_configs": important(GatherContainerRuntimeConfig),
"netnamespaces": important(GatherNetNamespace),
"openshift_apiserver_operator_logs": failable(GatherOpenShiftAPIServerOperatorLogs),
"openshift_sdn_logs": failable(GatherOpenshiftSDNLogs), | random_line_split |
0_gatherer.go | package clusterconfig
import (
"context"
"fmt"
"reflect"
"runtime"
"sort"
"strings"
"time"
"k8s.io/client-go/rest"
"k8s.io/klog/v2"
"github.com/openshift/insights-operator/pkg/anonymization"
"github.com/openshift/insights-operator/pkg/record"
"github.com/openshift/insights-operator/pkg/recorder"
"github.com/openshift/insights-operator/pkg/utils"
)
// gatherMetadata contains general information about collected data
type gatherMetadata struct {
// info about gathering functions
StatusReports []gathererStatusReport `json:"status_reports"`
MemoryAlloc uint64 `json:"memory_alloc_bytes"`
Uptime float64 `json:"uptime_seconds"`
// shows if obfuscation(hiding IPs and cluster domain) is enabled
IsGlobalObfuscationEnabled bool `json:"is_global_obfuscation_enabled"`
}
// gathererStatusReport contains general information about specific gatherer function
type gathererStatusReport struct {
Name string `json:"name"`
Duration time.Duration `json:"duration_in_ms"`
RecordsCount int `json:"records_count"`
Errors []string `json:"errors"`
}
// Gatherer is a driving instance invoking collection of data
type Gatherer struct {
ctx context.Context
gatherKubeConfig *rest.Config
gatherProtoKubeConfig *rest.Config
metricsGatherKubeConfig *rest.Config
anonymizer *anonymization.Anonymizer
startTime time.Time
}
type gatherResult struct {
records []record.Record
errors []error
}
type gatherFunction func(g *Gatherer, c chan<- gatherResult)
type gathering struct {
function gatherFunction
canFail bool
}
func important(function gatherFunction) gathering {
return gathering{function, false}
}
func failable(function gatherFunction) gathering {
return gathering{function, true}
}
const gatherAll = "ALL"
var gatherFunctions = map[string]gathering{
"pdbs": important(GatherPodDisruptionBudgets),
"metrics": failable(GatherMostRecentMetrics),
"operators": important(GatherClusterOperators),
"operators_pods_and_events": important(GatherClusterOperatorPodsAndEvents),
"container_images": important(GatherContainerImages),
"workload_info": failable(GatherWorkloadInfo),
"nodes": important(GatherNodes),
"config_maps": failable(GatherConfigMaps),
"version": important(GatherClusterVersion),
"infrastructures": important(GatherClusterInfrastructure),
"networks": important(GatherClusterNetwork),
"authentication": important(GatherClusterAuthentication),
"image_registries": important(GatherClusterImageRegistry),
"image_pruners": important(GatherClusterImagePruner),
"feature_gates": important(GatherClusterFeatureGates),
"oauths": important(GatherClusterOAuth),
"ingress": important(GatherClusterIngress),
"proxies": important(GatherClusterProxy),
"certificate_signing_requests": important(GatherCertificateSigningRequests),
"crds": important(GatherCRD),
"host_subnets": important(GatherHostSubnet),
"machine_sets": important(GatherMachineSet),
"install_plans": important(GatherInstallPlans),
"service_accounts": important(GatherServiceAccounts),
"machine_config_pools": important(GatherMachineConfigPool),
"container_runtime_configs": important(GatherContainerRuntimeConfig),
"netnamespaces": important(GatherNetNamespace),
"openshift_apiserver_operator_logs": failable(GatherOpenShiftAPIServerOperatorLogs),
"openshift_sdn_logs": failable(GatherOpenshiftSDNLogs),
"openshift_sdn_controller_logs": failable(GatherOpenshiftSDNControllerLogs),
"openshift_authentication_logs": failable(GatherOpenshiftAuthenticationLogs),
"sap_config": failable(GatherSAPConfig),
"sap_license_management_logs": failable(GatherSAPVsystemIptablesLogs),
"sap_pods": failable(GatherSAPPods),
"sap_datahubs": failable(GatherSAPDatahubs),
"olm_operators": failable(GatherOLMOperators),
"pod_network_connectivity_checks": failable(GatherPNCC),
}
// New creates new Gatherer
func New(
gatherKubeConfig, gatherProtoKubeConfig, metricsGatherKubeConfig *rest.Config, anonymizer *anonymization.Anonymizer,
) *Gatherer {
return &Gatherer{
gatherKubeConfig: gatherKubeConfig,
gatherProtoKubeConfig: gatherProtoKubeConfig,
metricsGatherKubeConfig: metricsGatherKubeConfig,
anonymizer: anonymizer,
startTime: time.Now(),
}
}
// GatherInfo from reflection
type GatherInfo struct {
name string
result gatherResult
function gatherFunction
canFail bool
rvString string
}
// NewGatherInfo that holds reflection information
func NewGatherInfo(gather string, rv reflect.Value) *GatherInfo {
gatherFunc := gatherFunctions[gather].function
return &GatherInfo{
name: runtime.FuncForPC(reflect.ValueOf(gatherFunc).Pointer()).Name(),
result: rv.Interface().(gatherResult),
function: gatherFunc,
canFail: gatherFunctions[gather].canFail,
rvString: rv.String(),
}
}
// Gather is hosting and calling all the recording functions
func (g *Gatherer) Gather(ctx context.Context, gatherList []string, rec recorder.Interface) error {
g.ctx = ctx
var errors []string
var gatherReport gatherMetadata
if len(gatherList) == 0 {
errors = append(errors, "no gather functions are specified to run")
}
if utils.StringInSlice(gatherAll, gatherList) {
gatherList = fullGatherList()
}
// Starts the gathers in Go routines
cases, starts, err := g.startGathering(gatherList, &errors)
if err != nil {
return err
}
// Gets the info from the Go routines
for range gatherList {
chosen, value, _ := reflect.Select(cases)
// The chosen channel has been closed, so zero out the channel to disable the case
cases[chosen].Chan = reflect.ValueOf(nil)
gather := gatherList[chosen]
gi := NewGatherInfo(gather, value)
statusReport, errorsReport := createStatusReport(gi, rec, starts[chosen])
if len(errorsReport) > 0 {
errors = append(errors, errorsReport...)
}
gatherReport.StatusReports = append(gatherReport.StatusReports, statusReport)
}
// if obfuscation is enabled, we want to know it from the archive
gatherReport.IsGlobalObfuscationEnabled = g.anonymizer != nil
// fill in performance related data to the report
var m runtime.MemStats
runtime.ReadMemStats(&m)
gatherReport.MemoryAlloc = m.HeapAlloc
gatherReport.Uptime = time.Since(g.startTime).Truncate(time.Millisecond).Seconds()
// records the report
if err := recordGatherReport(rec, gatherReport); err != nil {
errors = append(errors, fmt.Sprintf("unable to record io status reports: %v", err))
}
if len(errors) > 0 {
return sumErrors(errors)
}
return nil
}
func createStatusReport(gather *GatherInfo, rec recorder.Interface, starts time.Time) (statusReport gathererStatusReport, errors []string) {
elapsed := time.Since(starts).Truncate(time.Millisecond)
klog.V(4).Infof("Gather %s took %s to process %d records", gather.name, elapsed, len(gather.result.records))
shortName := strings.Replace(gather.name, "github.com/openshift/insights-operator/pkg/gather/", "", 1)
statusReport = gathererStatusReport{
Name: shortName,
Duration: time.Duration(elapsed.Milliseconds()),
RecordsCount: len(gather.result.records),
Errors: extractErrors(gather.result.errors),
}
if gather.canFail {
for _, err := range gather.result.errors |
} else {
errors = extractErrors(gather.result.errors)
}
errors = append(errors, recordStatusReport(rec, gather.result.records)...)
klog.V(5).Infof("Read from %s's channel and received %s\n", gather.name, gather.rvString)
return statusReport, errors
}
func recordStatusReport(rec recorder.Interface, records []record.Record) []string {
var errors []string
for _, r := range records {
if err := rec.Record(r); err != nil {
errors = append(errors, fmt.Sprintf("unable to record %s: %v", r.Name, err))
continue
}
}
return errors
}
// Runs each gather functions in a goroutine.
// Every gather function is given its own channel to send back the results.
// 1. return value: `cases` list, used for dynamically reading from the channels.
// 2. return value: `starts` list, contains that start time of each gather function.
func (g *Gatherer) startGathering(gatherList []string, errors *[]string) ([]reflect.SelectCase, []time.Time, error) {
var cases []reflect.SelectCase
var starts []time.Time
// Starts the gathers in Go routines
for _, gatherID := range gatherList {
gather, ok := gatherFunctions[gatherID]
gFn := gather.function
if !ok {
*errors = append(*errors, fmt.Sprintf("unknown gatherId in config: %s", gatherID))
continue
}
channel := make(chan gatherResult)
cases = append(cases, reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(channel)})
gatherName := runtime.FuncForPC(reflect.ValueOf(gFn).Pointer()).Name()
klog.V(5).Infof("Gathering %s", gatherName)
starts = append(starts, time.Now())
go gFn(g, channel)
if err := g.ctx.Err(); err != nil {
return nil, nil, err
}
}
return cases, starts, nil
}
func recordGatherReport(rec recorder.Interface, metadata gatherMetadata) error {
r := record.Record{Name: "insights-operator/gathers", Item: record.JSONMarshaller{Object: metadata}}
return rec.Record(r)
}
func extractErrors(errors []error) []string {
var errStrings []string
for _, err := range errors {
errStrings = append(errStrings, err.Error())
}
return errStrings
}
func sumErrors(errors []string) error {
sort.Strings(errors)
errors = utils.UniqueStrings(errors)
return fmt.Errorf("%s", strings.Join(errors, ", "))
}
func fullGatherList() []string {
gatherList := make([]string, 0, len(gatherFunctions))
for k := range gatherFunctions {
gatherList = append(gatherList, k)
}
return gatherList
}
| {
klog.V(5).Infof("Couldn't gather %s' received following error: %s\n", gather.name, err.Error())
} | conditional_block |
delegated_credentials.go | // Copyright 2020-2021 Cloudflare, Inc. All rights reserved. Use of this source code
// is governed by a BSD-style license that can be found in the LICENSE file.
package tls
// Delegated Credentials for TLS
// (https://tools.ietf.org/html/draft-ietf-tls-subcerts) is an IETF Internet
// draft and proposed TLS extension. If the client or server supports this
// extension, then the server or client may use a "delegated credential" as the
// signing key in the handshake. A delegated credential is a short lived
// public/secret key pair delegated to the peer by an entity trusted by the
// corresponding peer. This allows a reverse proxy to terminate a TLS connection
// on behalf of the entity. Credentials can't be revoked; in order to
// mitigate risk in case the reverse proxy is compromised, the credential is only
// valid for a short time (days, hours, or even minutes).
import (
"bytes"
"crypto"
"crypto/ecdsa"
"crypto/ed25519"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"encoding/binary"
"errors"
"fmt"
"io"
"time"
"golang.org/x/crypto/cryptobyte"
)
const (
// In the absence of an application profile standard specifying otherwise,
// the maximum validity period is set to 7 days.
dcMaxTTLSeconds = 60 * 60 * 24 * 7
dcMaxTTL = time.Duration(dcMaxTTLSeconds * time.Second)
dcMaxPubLen = (1 << 24) - 1 // Bytes
dcMaxSignatureLen = (1 << 16) - 1 // Bytes
)
const (
undefinedSignatureScheme SignatureScheme = 0x0000
)
var extensionDelegatedCredential = []int{1, 3, 6, 1, 4, 1, 44363, 44}
| // The certificate must contains the digitalSignature KeyUsage.
if (cert.KeyUsage & x509.KeyUsageDigitalSignature) == 0 {
return false
}
// Check that the certificate has the DelegationUsage extension and that
// it's marked as non-critical (See Section 4.2 of RFC5280).
for _, extension := range cert.Extensions {
if extension.Id.Equal(extensionDelegatedCredential) {
if extension.Critical {
return false
}
return true
}
}
return false
}
// isExpired returns true if the credential has expired. The end of the validity
// interval is defined as the delegator certificate's notBefore field ('start')
// plus dc.cred.validTime seconds. This function simply checks that the current time
// ('now') is before the end of the validity interval.
func (dc *DelegatedCredential) isExpired(start, now time.Time) bool {
end := start.Add(dc.cred.validTime)
return !now.Before(end)
}
// invalidTTL returns true if the credential's validity period is longer than the
// maximum permitted. This is defined by the certificate's notBefore field
// ('start') plus the dc.validTime, minus the current time ('now').
func (dc *DelegatedCredential) invalidTTL(start, now time.Time) bool {
return dc.cred.validTime > (now.Sub(start) + dcMaxTTL).Round(time.Second)
}
// credential stores the public components of a Delegated Credential.
type credential struct {
// The amount of time for which the credential is valid. Specifically, the
// the credential expires 'validTime' seconds after the 'notBefore' of the
// delegation certificate. The delegator shall not issue Delegated
// Credentials that are valid for more than 7 days from the current time.
//
// When this data structure is serialized, this value is converted to a
// uint32 representing the duration in seconds.
validTime time.Duration
// The signature scheme associated with the credential public key.
// This is expected to be the same as the CertificateVerify.algorithm
// sent by the client or server.
expCertVerfAlgo SignatureScheme
// The credential's public key.
publicKey crypto.PublicKey
}
// DelegatedCredential stores a Delegated Credential with the credential and its
// signature.
type DelegatedCredential struct {
// The serialized form of the Delegated Credential.
raw []byte
// Cred stores the public components of a Delegated Credential.
cred *credential
// The signature scheme used to sign the Delegated Credential.
algorithm SignatureScheme
// The Credential's delegation: a signature that binds the credential to
// the end-entity certificate's public key.
signature []byte
}
// marshalPublicKeyInfo returns a DER encoded PublicKeyInfo
// from a Delegated Credential (as defined in the X.509 standard).
// The following key types are currently supported: *ecdsa.PublicKey
// and ed25519.PublicKey. Unsupported key types result in an error.
// rsa.PublicKey is not supported as defined by the draft.
func (cred *credential) marshalPublicKeyInfo() ([]byte, error) {
switch cred.expCertVerfAlgo {
case ECDSAWithP256AndSHA256,
ECDSAWithP384AndSHA384,
ECDSAWithP521AndSHA512,
Ed25519:
rawPub, err := x509.MarshalPKIXPublicKey(cred.publicKey)
if err != nil {
return nil, err
}
return rawPub, nil
default:
return nil, fmt.Errorf("tls: unsupported signature scheme: 0x%04x", cred.expCertVerfAlgo)
}
}
// marshal encodes the credential struct of the Delegated Credential.
func (cred *credential) marshal() ([]byte, error) {
var b cryptobyte.Builder
b.AddUint32(uint32(cred.validTime / time.Second))
b.AddUint16(uint16(cred.expCertVerfAlgo))
// Encode the public key
rawPub, err := cred.marshalPublicKeyInfo()
if err != nil {
return nil, err
}
// Assert that the public key encoding is no longer than 2^24-1 bytes.
if len(rawPub) > dcMaxPubLen {
return nil, errors.New("tls: public key length exceeds 2^24-1 limit")
}
b.AddUint24(uint32(len(rawPub)))
b.AddBytes(rawPub)
raw := b.BytesOrPanic()
return raw, nil
}
// unmarshalCredential decodes serialized bytes and returns a credential, if possible.
func unmarshalCredential(raw []byte) (*credential, error) {
if len(raw) < 10 {
return nil, errors.New("tls: Delegated Credential is not valid: invalid length")
}
s := cryptobyte.String(raw)
var t uint32
if !s.ReadUint32(&t) {
return nil, errors.New("tls: Delegated Credential is not valid")
}
validTime := time.Duration(t) * time.Second
var pubAlgo uint16
if !s.ReadUint16(&pubAlgo) {
return nil, errors.New("tls: Delegated Credential is not valid")
}
algo := SignatureScheme(pubAlgo)
var pubLen uint32
s.ReadUint24(&pubLen)
pubKey, err := x509.ParsePKIXPublicKey(s)
if err != nil {
return nil, err
}
return &credential{validTime, algo, pubKey}, nil
}
// getCredentialLen returns the number of bytes comprising the serialized
// credential struct inside the Delegated Credential.
func getCredentialLen(raw []byte) (int, error) {
if len(raw) < 10 {
return 0, errors.New("tls: Delegated Credential is not valid")
}
var read []byte
s := cryptobyte.String(raw)
s.ReadBytes(&read, 6)
var pubLen uint32
s.ReadUint24(&pubLen)
if !(pubLen > 0) {
return 0, errors.New("tls: Delegated Credential is not valid")
}
raw = raw[6:]
if len(raw) < int(pubLen) {
return 0, errors.New("tls: Delegated Credential is not valid")
}
return 9 + int(pubLen), nil
}
// getHash maps the SignatureScheme to its corresponding hash function.
func getHash(scheme SignatureScheme) crypto.Hash {
switch scheme {
case ECDSAWithP256AndSHA256:
return crypto.SHA256
case ECDSAWithP384AndSHA384:
return crypto.SHA384
case ECDSAWithP521AndSHA512:
return crypto.SHA512
case Ed25519:
return directSigning
case PKCS1WithSHA256, PSSWithSHA256:
return crypto.SHA256
case PSSWithSHA384:
return crypto.SHA384
case PSSWithSHA512:
return crypto.SHA512
default:
return 0 //Unknown hash function
}
}
// getECDSACurve maps the SignatureScheme to its corresponding ecdsa elliptic.Curve.
func getECDSACurve(scheme SignatureScheme) elliptic.Curve {
switch scheme {
case ECDSAWithP256AndSHA256:
return elliptic.P256()
case ECDSAWithP384AndSHA384:
return elliptic.P384()
case ECDSAWithP521AndSHA512:
return elliptic.P521()
default:
return nil
}
}
// prepareDelegationSignatureInput returns the message that the delegator is going to sign.
func prepareDelegationSignatureInput(hash crypto.Hash, cred *credential, dCert []byte, algo SignatureScheme, isClient bool) ([]byte, error) {
header := make([]byte, 64)
for i := range header {
header[i] = 0x20
}
var context string
if !isClient {
context = "TLS, server delegated credentials\x00"
} else {
context = "TLS, client delegated credentials\x00"
}
rawCred, err := cred.marshal()
if err != nil {
return nil, err
}
var rawAlgo [2]byte
binary.BigEndian.PutUint16(rawAlgo[:], uint16(algo))
if hash == directSigning {
b := &bytes.Buffer{}
b.Write(header)
io.WriteString(b, context)
b.Write(dCert)
b.Write(rawCred)
b.Write(rawAlgo[:])
return b.Bytes(), nil
}
h := hash.New()
h.Write(header)
io.WriteString(h, context)
h.Write(dCert)
h.Write(rawCred)
h.Write(rawAlgo[:])
return h.Sum(nil), nil
}
// Extract the algorithm used to sign the Delegated Credential from the
// end-entity (leaf) certificate.
func getSignatureAlgorithm(cert *Certificate) (SignatureScheme, error) {
switch sk := cert.PrivateKey.(type) {
case *ecdsa.PrivateKey:
pk := sk.Public().(*ecdsa.PublicKey)
curveName := pk.Curve.Params().Name
certAlg := cert.Leaf.PublicKeyAlgorithm
if certAlg == x509.ECDSA && curveName == "P-256" {
return ECDSAWithP256AndSHA256, nil
} else if certAlg == x509.ECDSA && curveName == "P-384" {
return ECDSAWithP384AndSHA384, nil
} else if certAlg == x509.ECDSA && curveName == "P-521" {
return ECDSAWithP521AndSHA512, nil
} else {
return undefinedSignatureScheme, fmt.Errorf("using curve %s for %s is not supported", curveName, cert.Leaf.SignatureAlgorithm)
}
case ed25519.PrivateKey:
return Ed25519, nil
case *rsa.PrivateKey:
// If the certificate has the RSAEncryption OID there are a number of valid signature schemes that may sign the DC.
// In the absence of better information, we make a reasonable choice.
return PSSWithSHA256, nil
default:
return undefinedSignatureScheme, fmt.Errorf("tls: unsupported algorithm for signing Delegated Credential")
}
}
// NewDelegatedCredential creates a new Delegated Credential using 'cert' for
// delegation, depending if the caller is the client or the server (defined by
// 'isClient'). It generates a public/private key pair for the provided signature
// algorithm ('pubAlgo') and it defines a validity interval (defined
// by 'cert.Leaf.notBefore' and 'validTime'). It signs the Delegated Credential
// using 'cert.PrivateKey'.
func NewDelegatedCredential(cert *Certificate, pubAlgo SignatureScheme, validTime time.Duration, isClient bool) (*DelegatedCredential, crypto.PrivateKey, error) {
// The granularity of DC validity is seconds.
validTime = validTime.Round(time.Second)
// Parse the leaf certificate if needed.
var err error
if cert.Leaf == nil {
if len(cert.Certificate[0]) == 0 {
return nil, nil, errors.New("tls: missing leaf certificate for Delegated Credential")
}
cert.Leaf, err = x509.ParseCertificate(cert.Certificate[0])
if err != nil {
return nil, nil, err
}
}
// Check that the leaf certificate can be used for delegation.
if !isValidForDelegation(cert.Leaf) {
return nil, nil, errors.New("tls: certificate not authorized for delegation")
}
sigAlgo, err := getSignatureAlgorithm(cert)
if err != nil {
return nil, nil, err
}
// Generate the Delegated Credential key pair based on the provided scheme
var privK crypto.PrivateKey
var pubK crypto.PublicKey
switch pubAlgo {
case ECDSAWithP256AndSHA256,
ECDSAWithP384AndSHA384,
ECDSAWithP521AndSHA512:
privK, err = ecdsa.GenerateKey(getECDSACurve(pubAlgo), rand.Reader)
if err != nil {
return nil, nil, err
}
pubK = privK.(*ecdsa.PrivateKey).Public()
case Ed25519:
pubK, privK, err = ed25519.GenerateKey(rand.Reader)
if err != nil {
return nil, nil, err
}
default:
return nil, nil, fmt.Errorf("tls: unsupported algorithm for Delegated Credential: %s", pubAlgo)
}
// Prepare the credential for signing
hash := getHash(sigAlgo)
credential := &credential{validTime, pubAlgo, pubK}
values, err := prepareDelegationSignatureInput(hash, credential, cert.Leaf.Raw, sigAlgo, isClient)
if err != nil {
return nil, nil, err
}
var sig []byte
switch sk := cert.PrivateKey.(type) {
case *ecdsa.PrivateKey:
opts := crypto.SignerOpts(hash)
sig, err = sk.Sign(rand.Reader, values, opts)
if err != nil {
return nil, nil, err
}
case ed25519.PrivateKey:
opts := crypto.SignerOpts(hash)
sig, err = sk.Sign(rand.Reader, values, opts)
if err != nil {
return nil, nil, err
}
case *rsa.PrivateKey:
opts := &rsa.PSSOptions{SaltLength: rsa.PSSSaltLengthEqualsHash,
Hash: hash}
sig, err = rsa.SignPSS(rand.Reader, sk, hash, values, opts)
if err != nil {
return nil, nil, err
}
default:
return nil, nil, fmt.Errorf("tls: unsupported key type for Delegated Credential")
}
if len(sig) > dcMaxSignatureLen {
return nil, nil, errors.New("tls: unable to create a Delegated Credential")
}
return &DelegatedCredential{
cred: credential,
algorithm: sigAlgo,
signature: sig,
}, privK, nil
}
// Validate validates the Delegated Credential by checking that the signature is
// valid, that it hasn't expired, and that the TTL is valid. It also checks that
// certificate can be used for delegation.
func (dc *DelegatedCredential) Validate(cert *x509.Certificate, isClient bool, now time.Time, certVerifyMsg *certificateVerifyMsg) bool {
if dc.isExpired(cert.NotBefore, now) {
return false
}
if dc.invalidTTL(cert.NotBefore, now) {
return false
}
if dc.cred.expCertVerfAlgo != certVerifyMsg.signatureAlgorithm {
return false
}
if !isValidForDelegation(cert) {
return false
}
hash := getHash(dc.algorithm)
in, err := prepareDelegationSignatureInput(hash, dc.cred, cert.Raw, dc.algorithm, isClient)
if err != nil {
return false
}
switch dc.algorithm {
case ECDSAWithP256AndSHA256,
ECDSAWithP384AndSHA384,
ECDSAWithP521AndSHA512:
pk, ok := cert.PublicKey.(*ecdsa.PublicKey)
if !ok {
return false
}
return ecdsa.VerifyASN1(pk, in, dc.signature)
case Ed25519:
pk, ok := cert.PublicKey.(ed25519.PublicKey)
if !ok {
return false
}
return ed25519.Verify(pk, in, dc.signature)
case PSSWithSHA256,
PSSWithSHA384,
PSSWithSHA512:
pk, ok := cert.PublicKey.(*rsa.PublicKey)
if !ok {
return false
}
hash := getHash(dc.algorithm)
return rsa.VerifyPSS(pk, hash, in, dc.signature, nil) == nil
default:
return false
}
}
// Marshal encodes a DelegatedCredential structure. It also sets dc.Raw to that
// encoding.
func (dc *DelegatedCredential) Marshal() ([]byte, error) {
if len(dc.signature) > dcMaxSignatureLen {
return nil, errors.New("tls: delegated credential is not valid")
}
if len(dc.signature) == 0 {
return nil, errors.New("tls: delegated credential has no signature")
}
raw, err := dc.cred.marshal()
if err != nil {
return nil, err
}
var b cryptobyte.Builder
b.AddBytes(raw)
b.AddUint16(uint16(dc.algorithm))
b.AddUint16(uint16(len(dc.signature)))
b.AddBytes(dc.signature)
dc.raw = b.BytesOrPanic()
return dc.raw, nil
}
// UnmarshalDelegatedCredential decodes a DelegatedCredential structure.
func UnmarshalDelegatedCredential(raw []byte) (*DelegatedCredential, error) {
rawCredentialLen, err := getCredentialLen(raw)
if err != nil {
return nil, err
}
credential, err := unmarshalCredential(raw[:rawCredentialLen])
if err != nil {
return nil, err
}
raw = raw[rawCredentialLen:]
if len(raw) < 4 {
return nil, errors.New("tls: Delegated Credential is not valid")
}
s := cryptobyte.String(raw)
var algo uint16
if !s.ReadUint16(&algo) {
return nil, errors.New("tls: Delegated Credential is not valid")
}
var rawSignatureLen uint16
if !s.ReadUint16(&rawSignatureLen) {
return nil, errors.New("tls: Delegated Credential is not valid")
}
var sig []byte
if !s.ReadBytes(&sig, int(rawSignatureLen)) {
return nil, errors.New("tls: Delegated Credential is not valid")
}
return &DelegatedCredential{
cred: credential,
algorithm: SignatureScheme(algo),
signature: sig,
}, nil
} | // isValidForDelegation returns true if a certificate can be used for Delegated
// Credentials.
func isValidForDelegation(cert *x509.Certificate) bool {
// Check that the digitalSignature key usage is set. | random_line_split |
delegated_credentials.go | // Copyright 2020-2021 Cloudflare, Inc. All rights reserved. Use of this source code
// is governed by a BSD-style license that can be found in the LICENSE file.
package tls
// Delegated Credentials for TLS
// (https://tools.ietf.org/html/draft-ietf-tls-subcerts) is an IETF Internet
// draft and proposed TLS extension. If the client or server supports this
// extension, then the server or client may use a "delegated credential" as the
// signing key in the handshake. A delegated credential is a short lived
// public/secret key pair delegated to the peer by an entity trusted by the
// corresponding peer. This allows a reverse proxy to terminate a TLS connection
// on behalf of the entity. Credentials can't be revoked; in order to
// mitigate risk in case the reverse proxy is compromised, the credential is only
// valid for a short time (days, hours, or even minutes).
import (
"bytes"
"crypto"
"crypto/ecdsa"
"crypto/ed25519"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"encoding/binary"
"errors"
"fmt"
"io"
"time"
"golang.org/x/crypto/cryptobyte"
)
const (
// In the absence of an application profile standard specifying otherwise,
// the maximum validity period is set to 7 days.
dcMaxTTLSeconds = 60 * 60 * 24 * 7
dcMaxTTL = time.Duration(dcMaxTTLSeconds * time.Second)
dcMaxPubLen = (1 << 24) - 1 // Bytes
dcMaxSignatureLen = (1 << 16) - 1 // Bytes
)
const (
undefinedSignatureScheme SignatureScheme = 0x0000
)
var extensionDelegatedCredential = []int{1, 3, 6, 1, 4, 1, 44363, 44}
// isValidForDelegation returns true if a certificate can be used for Delegated
// Credentials.
func isValidForDelegation(cert *x509.Certificate) bool {
// Check that the digitalSignature key usage is set.
// The certificate must contains the digitalSignature KeyUsage.
if (cert.KeyUsage & x509.KeyUsageDigitalSignature) == 0 {
return false
}
// Check that the certificate has the DelegationUsage extension and that
// it's marked as non-critical (See Section 4.2 of RFC5280).
for _, extension := range cert.Extensions {
if extension.Id.Equal(extensionDelegatedCredential) {
if extension.Critical {
return false
}
return true
}
}
return false
}
// isExpired returns true if the credential has expired. The end of the validity
// interval is defined as the delegator certificate's notBefore field ('start')
// plus dc.cred.validTime seconds. This function simply checks that the current time
// ('now') is before the end of the validity interval.
func (dc *DelegatedCredential) isExpired(start, now time.Time) bool {
end := start.Add(dc.cred.validTime)
return !now.Before(end)
}
// invalidTTL returns true if the credential's validity period is longer than the
// maximum permitted. This is defined by the certificate's notBefore field
// ('start') plus the dc.validTime, minus the current time ('now').
func (dc *DelegatedCredential) invalidTTL(start, now time.Time) bool {
return dc.cred.validTime > (now.Sub(start) + dcMaxTTL).Round(time.Second)
}
// credential stores the public components of a Delegated Credential.
type credential struct {
// The amount of time for which the credential is valid. Specifically, the
// the credential expires 'validTime' seconds after the 'notBefore' of the
// delegation certificate. The delegator shall not issue Delegated
// Credentials that are valid for more than 7 days from the current time.
//
// When this data structure is serialized, this value is converted to a
// uint32 representing the duration in seconds.
validTime time.Duration
// The signature scheme associated with the credential public key.
// This is expected to be the same as the CertificateVerify.algorithm
// sent by the client or server.
expCertVerfAlgo SignatureScheme
// The credential's public key.
publicKey crypto.PublicKey
}
// DelegatedCredential stores a Delegated Credential with the credential and its
// signature.
type DelegatedCredential struct {
// The serialized form of the Delegated Credential.
raw []byte
// Cred stores the public components of a Delegated Credential.
cred *credential
// The signature scheme used to sign the Delegated Credential.
algorithm SignatureScheme
// The Credential's delegation: a signature that binds the credential to
// the end-entity certificate's public key.
signature []byte
}
// marshalPublicKeyInfo returns a DER encoded PublicKeyInfo
// from a Delegated Credential (as defined in the X.509 standard).
// The following key types are currently supported: *ecdsa.PublicKey
// and ed25519.PublicKey. Unsupported key types result in an error.
// rsa.PublicKey is not supported as defined by the draft.
func (cred *credential) marshalPublicKeyInfo() ([]byte, error) {
switch cred.expCertVerfAlgo {
case ECDSAWithP256AndSHA256,
ECDSAWithP384AndSHA384,
ECDSAWithP521AndSHA512,
Ed25519:
rawPub, err := x509.MarshalPKIXPublicKey(cred.publicKey)
if err != nil {
return nil, err
}
return rawPub, nil
default:
return nil, fmt.Errorf("tls: unsupported signature scheme: 0x%04x", cred.expCertVerfAlgo)
}
}
// marshal encodes the credential struct of the Delegated Credential.
func (cred *credential) marshal() ([]byte, error) {
var b cryptobyte.Builder
b.AddUint32(uint32(cred.validTime / time.Second))
b.AddUint16(uint16(cred.expCertVerfAlgo))
// Encode the public key
rawPub, err := cred.marshalPublicKeyInfo()
if err != nil {
return nil, err
}
// Assert that the public key encoding is no longer than 2^24-1 bytes.
if len(rawPub) > dcMaxPubLen {
return nil, errors.New("tls: public key length exceeds 2^24-1 limit")
}
b.AddUint24(uint32(len(rawPub)))
b.AddBytes(rawPub)
raw := b.BytesOrPanic()
return raw, nil
}
// unmarshalCredential decodes serialized bytes and returns a credential, if possible.
func unmarshalCredential(raw []byte) (*credential, error) {
if len(raw) < 10 {
return nil, errors.New("tls: Delegated Credential is not valid: invalid length")
}
s := cryptobyte.String(raw)
var t uint32
if !s.ReadUint32(&t) {
return nil, errors.New("tls: Delegated Credential is not valid")
}
validTime := time.Duration(t) * time.Second
var pubAlgo uint16
if !s.ReadUint16(&pubAlgo) {
return nil, errors.New("tls: Delegated Credential is not valid")
}
algo := SignatureScheme(pubAlgo)
var pubLen uint32
s.ReadUint24(&pubLen)
pubKey, err := x509.ParsePKIXPublicKey(s)
if err != nil {
return nil, err
}
return &credential{validTime, algo, pubKey}, nil
}
// getCredentialLen returns the number of bytes comprising the serialized
// credential struct inside the Delegated Credential.
func getCredentialLen(raw []byte) (int, error) {
if len(raw) < 10 {
return 0, errors.New("tls: Delegated Credential is not valid")
}
var read []byte
s := cryptobyte.String(raw)
s.ReadBytes(&read, 6)
var pubLen uint32
s.ReadUint24(&pubLen)
if !(pubLen > 0) {
return 0, errors.New("tls: Delegated Credential is not valid")
}
raw = raw[6:]
if len(raw) < int(pubLen) {
return 0, errors.New("tls: Delegated Credential is not valid")
}
return 9 + int(pubLen), nil
}
// getHash maps the SignatureScheme to its corresponding hash function.
func getHash(scheme SignatureScheme) crypto.Hash {
switch scheme {
case ECDSAWithP256AndSHA256:
return crypto.SHA256
case ECDSAWithP384AndSHA384:
return crypto.SHA384
case ECDSAWithP521AndSHA512:
return crypto.SHA512
case Ed25519:
return directSigning
case PKCS1WithSHA256, PSSWithSHA256:
return crypto.SHA256
case PSSWithSHA384:
return crypto.SHA384
case PSSWithSHA512:
return crypto.SHA512
default:
return 0 //Unknown hash function
}
}
// getECDSACurve maps the SignatureScheme to its corresponding ecdsa elliptic.Curve.
func getECDSACurve(scheme SignatureScheme) elliptic.Curve {
switch scheme {
case ECDSAWithP256AndSHA256:
return elliptic.P256()
case ECDSAWithP384AndSHA384:
return elliptic.P384()
case ECDSAWithP521AndSHA512:
return elliptic.P521()
default:
return nil
}
}
// prepareDelegationSignatureInput returns the message that the delegator is going to sign.
func prepareDelegationSignatureInput(hash crypto.Hash, cred *credential, dCert []byte, algo SignatureScheme, isClient bool) ([]byte, error) {
header := make([]byte, 64)
for i := range header {
header[i] = 0x20
}
var context string
if !isClient {
context = "TLS, server delegated credentials\x00"
} else {
context = "TLS, client delegated credentials\x00"
}
rawCred, err := cred.marshal()
if err != nil {
return nil, err
}
var rawAlgo [2]byte
binary.BigEndian.PutUint16(rawAlgo[:], uint16(algo))
if hash == directSigning {
b := &bytes.Buffer{}
b.Write(header)
io.WriteString(b, context)
b.Write(dCert)
b.Write(rawCred)
b.Write(rawAlgo[:])
return b.Bytes(), nil
}
h := hash.New()
h.Write(header)
io.WriteString(h, context)
h.Write(dCert)
h.Write(rawCred)
h.Write(rawAlgo[:])
return h.Sum(nil), nil
}
// Extract the algorithm used to sign the Delegated Credential from the
// end-entity (leaf) certificate.
func getSignatureAlgorithm(cert *Certificate) (SignatureScheme, error) {
switch sk := cert.PrivateKey.(type) {
case *ecdsa.PrivateKey:
pk := sk.Public().(*ecdsa.PublicKey)
curveName := pk.Curve.Params().Name
certAlg := cert.Leaf.PublicKeyAlgorithm
if certAlg == x509.ECDSA && curveName == "P-256" {
return ECDSAWithP256AndSHA256, nil
} else if certAlg == x509.ECDSA && curveName == "P-384" {
return ECDSAWithP384AndSHA384, nil
} else if certAlg == x509.ECDSA && curveName == "P-521" {
return ECDSAWithP521AndSHA512, nil
} else {
return undefinedSignatureScheme, fmt.Errorf("using curve %s for %s is not supported", curveName, cert.Leaf.SignatureAlgorithm)
}
case ed25519.PrivateKey:
return Ed25519, nil
case *rsa.PrivateKey:
// If the certificate has the RSAEncryption OID there are a number of valid signature schemes that may sign the DC.
// In the absence of better information, we make a reasonable choice.
return PSSWithSHA256, nil
default:
return undefinedSignatureScheme, fmt.Errorf("tls: unsupported algorithm for signing Delegated Credential")
}
}
// NewDelegatedCredential creates a new Delegated Credential using 'cert' for
// delegation, depending if the caller is the client or the server (defined by
// 'isClient'). It generates a public/private key pair for the provided signature
// algorithm ('pubAlgo') and it defines a validity interval (defined
// by 'cert.Leaf.notBefore' and 'validTime'). It signs the Delegated Credential
// using 'cert.PrivateKey'.
func NewDelegatedCredential(cert *Certificate, pubAlgo SignatureScheme, validTime time.Duration, isClient bool) (*DelegatedCredential, crypto.PrivateKey, error) {
// The granularity of DC validity is seconds.
validTime = validTime.Round(time.Second)
// Parse the leaf certificate if needed.
var err error
if cert.Leaf == nil {
if len(cert.Certificate[0]) == 0 {
return nil, nil, errors.New("tls: missing leaf certificate for Delegated Credential")
}
cert.Leaf, err = x509.ParseCertificate(cert.Certificate[0])
if err != nil {
return nil, nil, err
}
}
// Check that the leaf certificate can be used for delegation.
if !isValidForDelegation(cert.Leaf) {
return nil, nil, errors.New("tls: certificate not authorized for delegation")
}
sigAlgo, err := getSignatureAlgorithm(cert)
if err != nil {
return nil, nil, err
}
// Generate the Delegated Credential key pair based on the provided scheme
var privK crypto.PrivateKey
var pubK crypto.PublicKey
switch pubAlgo {
case ECDSAWithP256AndSHA256,
ECDSAWithP384AndSHA384,
ECDSAWithP521AndSHA512:
privK, err = ecdsa.GenerateKey(getECDSACurve(pubAlgo), rand.Reader)
if err != nil {
return nil, nil, err
}
pubK = privK.(*ecdsa.PrivateKey).Public()
case Ed25519:
pubK, privK, err = ed25519.GenerateKey(rand.Reader)
if err != nil {
return nil, nil, err
}
default:
return nil, nil, fmt.Errorf("tls: unsupported algorithm for Delegated Credential: %s", pubAlgo)
}
// Prepare the credential for signing
hash := getHash(sigAlgo)
credential := &credential{validTime, pubAlgo, pubK}
values, err := prepareDelegationSignatureInput(hash, credential, cert.Leaf.Raw, sigAlgo, isClient)
if err != nil {
return nil, nil, err
}
var sig []byte
switch sk := cert.PrivateKey.(type) {
case *ecdsa.PrivateKey:
opts := crypto.SignerOpts(hash)
sig, err = sk.Sign(rand.Reader, values, opts)
if err != nil {
return nil, nil, err
}
case ed25519.PrivateKey:
opts := crypto.SignerOpts(hash)
sig, err = sk.Sign(rand.Reader, values, opts)
if err != nil {
return nil, nil, err
}
case *rsa.PrivateKey:
opts := &rsa.PSSOptions{SaltLength: rsa.PSSSaltLengthEqualsHash,
Hash: hash}
sig, err = rsa.SignPSS(rand.Reader, sk, hash, values, opts)
if err != nil {
return nil, nil, err
}
default:
return nil, nil, fmt.Errorf("tls: unsupported key type for Delegated Credential")
}
if len(sig) > dcMaxSignatureLen {
return nil, nil, errors.New("tls: unable to create a Delegated Credential")
}
return &DelegatedCredential{
cred: credential,
algorithm: sigAlgo,
signature: sig,
}, privK, nil
}
// Validate validates the Delegated Credential by checking that the signature is
// valid, that it hasn't expired, and that the TTL is valid. It also checks that
// certificate can be used for delegation.
func (dc *DelegatedCredential) Validate(cert *x509.Certificate, isClient bool, now time.Time, certVerifyMsg *certificateVerifyMsg) bool {
if dc.isExpired(cert.NotBefore, now) {
return false
}
if dc.invalidTTL(cert.NotBefore, now) {
return false
}
if dc.cred.expCertVerfAlgo != certVerifyMsg.signatureAlgorithm {
return false
}
if !isValidForDelegation(cert) {
return false
}
hash := getHash(dc.algorithm)
in, err := prepareDelegationSignatureInput(hash, dc.cred, cert.Raw, dc.algorithm, isClient)
if err != nil {
return false
}
switch dc.algorithm {
case ECDSAWithP256AndSHA256,
ECDSAWithP384AndSHA384,
ECDSAWithP521AndSHA512:
pk, ok := cert.PublicKey.(*ecdsa.PublicKey)
if !ok {
return false
}
return ecdsa.VerifyASN1(pk, in, dc.signature)
case Ed25519:
pk, ok := cert.PublicKey.(ed25519.PublicKey)
if !ok {
return false
}
return ed25519.Verify(pk, in, dc.signature)
case PSSWithSHA256,
PSSWithSHA384,
PSSWithSHA512:
pk, ok := cert.PublicKey.(*rsa.PublicKey)
if !ok {
return false
}
hash := getHash(dc.algorithm)
return rsa.VerifyPSS(pk, hash, in, dc.signature, nil) == nil
default:
return false
}
}
// Marshal encodes a DelegatedCredential structure. It also sets dc.Raw to that
// encoding.
func (dc *DelegatedCredential) Marshal() ([]byte, error) {
if len(dc.signature) > dcMaxSignatureLen {
return nil, errors.New("tls: delegated credential is not valid")
}
if len(dc.signature) == 0 {
return nil, errors.New("tls: delegated credential has no signature")
}
raw, err := dc.cred.marshal()
if err != nil {
return nil, err
}
var b cryptobyte.Builder
b.AddBytes(raw)
b.AddUint16(uint16(dc.algorithm))
b.AddUint16(uint16(len(dc.signature)))
b.AddBytes(dc.signature)
dc.raw = b.BytesOrPanic()
return dc.raw, nil
}
// UnmarshalDelegatedCredential decodes a DelegatedCredential structure.
func UnmarshalDelegatedCredential(raw []byte) (*DelegatedCredential, error) {
rawCredentialLen, err := getCredentialLen(raw)
if err != nil {
return nil, err
}
credential, err := unmarshalCredential(raw[:rawCredentialLen])
if err != nil {
return nil, err
}
raw = raw[rawCredentialLen:]
if len(raw) < 4 {
return nil, errors.New("tls: Delegated Credential is not valid")
}
s := cryptobyte.String(raw)
var algo uint16
if !s.ReadUint16(&algo) |
var rawSignatureLen uint16
if !s.ReadUint16(&rawSignatureLen) {
return nil, errors.New("tls: Delegated Credential is not valid")
}
var sig []byte
if !s.ReadBytes(&sig, int(rawSignatureLen)) {
return nil, errors.New("tls: Delegated Credential is not valid")
}
return &DelegatedCredential{
cred: credential,
algorithm: SignatureScheme(algo),
signature: sig,
}, nil
}
| {
return nil, errors.New("tls: Delegated Credential is not valid")
} | conditional_block |
delegated_credentials.go | // Copyright 2020-2021 Cloudflare, Inc. All rights reserved. Use of this source code
// is governed by a BSD-style license that can be found in the LICENSE file.
package tls
// Delegated Credentials for TLS
// (https://tools.ietf.org/html/draft-ietf-tls-subcerts) is an IETF Internet
// draft and proposed TLS extension. If the client or server supports this
// extension, then the server or client may use a "delegated credential" as the
// signing key in the handshake. A delegated credential is a short lived
// public/secret key pair delegated to the peer by an entity trusted by the
// corresponding peer. This allows a reverse proxy to terminate a TLS connection
// on behalf of the entity. Credentials can't be revoked; in order to
// mitigate risk in case the reverse proxy is compromised, the credential is only
// valid for a short time (days, hours, or even minutes).
import (
"bytes"
"crypto"
"crypto/ecdsa"
"crypto/ed25519"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"encoding/binary"
"errors"
"fmt"
"io"
"time"
"golang.org/x/crypto/cryptobyte"
)
const (
// In the absence of an application profile standard specifying otherwise,
// the maximum validity period is set to 7 days.
dcMaxTTLSeconds = 60 * 60 * 24 * 7
dcMaxTTL = time.Duration(dcMaxTTLSeconds * time.Second)
dcMaxPubLen = (1 << 24) - 1 // Bytes
dcMaxSignatureLen = (1 << 16) - 1 // Bytes
)
const (
undefinedSignatureScheme SignatureScheme = 0x0000
)
var extensionDelegatedCredential = []int{1, 3, 6, 1, 4, 1, 44363, 44}
// isValidForDelegation returns true if a certificate can be used for Delegated
// Credentials.
func isValidForDelegation(cert *x509.Certificate) bool {
// Check that the digitalSignature key usage is set.
// The certificate must contains the digitalSignature KeyUsage.
if (cert.KeyUsage & x509.KeyUsageDigitalSignature) == 0 {
return false
}
// Check that the certificate has the DelegationUsage extension and that
// it's marked as non-critical (See Section 4.2 of RFC5280).
for _, extension := range cert.Extensions {
if extension.Id.Equal(extensionDelegatedCredential) {
if extension.Critical {
return false
}
return true
}
}
return false
}
// isExpired returns true if the credential has expired. The end of the validity
// interval is defined as the delegator certificate's notBefore field ('start')
// plus dc.cred.validTime seconds. This function simply checks that the current time
// ('now') is before the end of the validity interval.
func (dc *DelegatedCredential) isExpired(start, now time.Time) bool {
end := start.Add(dc.cred.validTime)
return !now.Before(end)
}
// invalidTTL returns true if the credential's validity period is longer than the
// maximum permitted. This is defined by the certificate's notBefore field
// ('start') plus the dc.validTime, minus the current time ('now').
func (dc *DelegatedCredential) invalidTTL(start, now time.Time) bool |
// credential stores the public components of a Delegated Credential.
type credential struct {
// The amount of time for which the credential is valid. Specifically, the
// the credential expires 'validTime' seconds after the 'notBefore' of the
// delegation certificate. The delegator shall not issue Delegated
// Credentials that are valid for more than 7 days from the current time.
//
// When this data structure is serialized, this value is converted to a
// uint32 representing the duration in seconds.
validTime time.Duration
// The signature scheme associated with the credential public key.
// This is expected to be the same as the CertificateVerify.algorithm
// sent by the client or server.
expCertVerfAlgo SignatureScheme
// The credential's public key.
publicKey crypto.PublicKey
}
// DelegatedCredential stores a Delegated Credential with the credential and its
// signature.
type DelegatedCredential struct {
// The serialized form of the Delegated Credential.
raw []byte
// Cred stores the public components of a Delegated Credential.
cred *credential
// The signature scheme used to sign the Delegated Credential.
algorithm SignatureScheme
// The Credential's delegation: a signature that binds the credential to
// the end-entity certificate's public key.
signature []byte
}
// marshalPublicKeyInfo returns a DER encoded PublicKeyInfo
// from a Delegated Credential (as defined in the X.509 standard).
// The following key types are currently supported: *ecdsa.PublicKey
// and ed25519.PublicKey. Unsupported key types result in an error.
// rsa.PublicKey is not supported as defined by the draft.
func (cred *credential) marshalPublicKeyInfo() ([]byte, error) {
switch cred.expCertVerfAlgo {
case ECDSAWithP256AndSHA256,
ECDSAWithP384AndSHA384,
ECDSAWithP521AndSHA512,
Ed25519:
rawPub, err := x509.MarshalPKIXPublicKey(cred.publicKey)
if err != nil {
return nil, err
}
return rawPub, nil
default:
return nil, fmt.Errorf("tls: unsupported signature scheme: 0x%04x", cred.expCertVerfAlgo)
}
}
// marshal encodes the credential struct of the Delegated Credential.
func (cred *credential) marshal() ([]byte, error) {
var b cryptobyte.Builder
b.AddUint32(uint32(cred.validTime / time.Second))
b.AddUint16(uint16(cred.expCertVerfAlgo))
// Encode the public key
rawPub, err := cred.marshalPublicKeyInfo()
if err != nil {
return nil, err
}
// Assert that the public key encoding is no longer than 2^24-1 bytes.
if len(rawPub) > dcMaxPubLen {
return nil, errors.New("tls: public key length exceeds 2^24-1 limit")
}
b.AddUint24(uint32(len(rawPub)))
b.AddBytes(rawPub)
raw := b.BytesOrPanic()
return raw, nil
}
// unmarshalCredential decodes serialized bytes and returns a credential, if possible.
func unmarshalCredential(raw []byte) (*credential, error) {
if len(raw) < 10 {
return nil, errors.New("tls: Delegated Credential is not valid: invalid length")
}
s := cryptobyte.String(raw)
var t uint32
if !s.ReadUint32(&t) {
return nil, errors.New("tls: Delegated Credential is not valid")
}
validTime := time.Duration(t) * time.Second
var pubAlgo uint16
if !s.ReadUint16(&pubAlgo) {
return nil, errors.New("tls: Delegated Credential is not valid")
}
algo := SignatureScheme(pubAlgo)
var pubLen uint32
s.ReadUint24(&pubLen)
pubKey, err := x509.ParsePKIXPublicKey(s)
if err != nil {
return nil, err
}
return &credential{validTime, algo, pubKey}, nil
}
// getCredentialLen returns the number of bytes comprising the serialized
// credential struct inside the Delegated Credential.
func getCredentialLen(raw []byte) (int, error) {
if len(raw) < 10 {
return 0, errors.New("tls: Delegated Credential is not valid")
}
var read []byte
s := cryptobyte.String(raw)
s.ReadBytes(&read, 6)
var pubLen uint32
s.ReadUint24(&pubLen)
if !(pubLen > 0) {
return 0, errors.New("tls: Delegated Credential is not valid")
}
raw = raw[6:]
if len(raw) < int(pubLen) {
return 0, errors.New("tls: Delegated Credential is not valid")
}
return 9 + int(pubLen), nil
}
// getHash maps the SignatureScheme to its corresponding hash function.
func getHash(scheme SignatureScheme) crypto.Hash {
switch scheme {
case ECDSAWithP256AndSHA256:
return crypto.SHA256
case ECDSAWithP384AndSHA384:
return crypto.SHA384
case ECDSAWithP521AndSHA512:
return crypto.SHA512
case Ed25519:
return directSigning
case PKCS1WithSHA256, PSSWithSHA256:
return crypto.SHA256
case PSSWithSHA384:
return crypto.SHA384
case PSSWithSHA512:
return crypto.SHA512
default:
return 0 //Unknown hash function
}
}
// getECDSACurve maps the SignatureScheme to its corresponding ecdsa elliptic.Curve.
func getECDSACurve(scheme SignatureScheme) elliptic.Curve {
switch scheme {
case ECDSAWithP256AndSHA256:
return elliptic.P256()
case ECDSAWithP384AndSHA384:
return elliptic.P384()
case ECDSAWithP521AndSHA512:
return elliptic.P521()
default:
return nil
}
}
// prepareDelegationSignatureInput returns the message that the delegator is going to sign.
func prepareDelegationSignatureInput(hash crypto.Hash, cred *credential, dCert []byte, algo SignatureScheme, isClient bool) ([]byte, error) {
header := make([]byte, 64)
for i := range header {
header[i] = 0x20
}
var context string
if !isClient {
context = "TLS, server delegated credentials\x00"
} else {
context = "TLS, client delegated credentials\x00"
}
rawCred, err := cred.marshal()
if err != nil {
return nil, err
}
var rawAlgo [2]byte
binary.BigEndian.PutUint16(rawAlgo[:], uint16(algo))
if hash == directSigning {
b := &bytes.Buffer{}
b.Write(header)
io.WriteString(b, context)
b.Write(dCert)
b.Write(rawCred)
b.Write(rawAlgo[:])
return b.Bytes(), nil
}
h := hash.New()
h.Write(header)
io.WriteString(h, context)
h.Write(dCert)
h.Write(rawCred)
h.Write(rawAlgo[:])
return h.Sum(nil), nil
}
// Extract the algorithm used to sign the Delegated Credential from the
// end-entity (leaf) certificate.
func getSignatureAlgorithm(cert *Certificate) (SignatureScheme, error) {
switch sk := cert.PrivateKey.(type) {
case *ecdsa.PrivateKey:
pk := sk.Public().(*ecdsa.PublicKey)
curveName := pk.Curve.Params().Name
certAlg := cert.Leaf.PublicKeyAlgorithm
if certAlg == x509.ECDSA && curveName == "P-256" {
return ECDSAWithP256AndSHA256, nil
} else if certAlg == x509.ECDSA && curveName == "P-384" {
return ECDSAWithP384AndSHA384, nil
} else if certAlg == x509.ECDSA && curveName == "P-521" {
return ECDSAWithP521AndSHA512, nil
} else {
return undefinedSignatureScheme, fmt.Errorf("using curve %s for %s is not supported", curveName, cert.Leaf.SignatureAlgorithm)
}
case ed25519.PrivateKey:
return Ed25519, nil
case *rsa.PrivateKey:
// If the certificate has the RSAEncryption OID there are a number of valid signature schemes that may sign the DC.
// In the absence of better information, we make a reasonable choice.
return PSSWithSHA256, nil
default:
return undefinedSignatureScheme, fmt.Errorf("tls: unsupported algorithm for signing Delegated Credential")
}
}
// NewDelegatedCredential creates a new Delegated Credential using 'cert' for
// delegation, depending if the caller is the client or the server (defined by
// 'isClient'). It generates a public/private key pair for the provided signature
// algorithm ('pubAlgo') and it defines a validity interval (defined
// by 'cert.Leaf.notBefore' and 'validTime'). It signs the Delegated Credential
// using 'cert.PrivateKey'.
func NewDelegatedCredential(cert *Certificate, pubAlgo SignatureScheme, validTime time.Duration, isClient bool) (*DelegatedCredential, crypto.PrivateKey, error) {
// The granularity of DC validity is seconds.
validTime = validTime.Round(time.Second)
// Parse the leaf certificate if needed.
var err error
if cert.Leaf == nil {
if len(cert.Certificate[0]) == 0 {
return nil, nil, errors.New("tls: missing leaf certificate for Delegated Credential")
}
cert.Leaf, err = x509.ParseCertificate(cert.Certificate[0])
if err != nil {
return nil, nil, err
}
}
// Check that the leaf certificate can be used for delegation.
if !isValidForDelegation(cert.Leaf) {
return nil, nil, errors.New("tls: certificate not authorized for delegation")
}
sigAlgo, err := getSignatureAlgorithm(cert)
if err != nil {
return nil, nil, err
}
// Generate the Delegated Credential key pair based on the provided scheme
var privK crypto.PrivateKey
var pubK crypto.PublicKey
switch pubAlgo {
case ECDSAWithP256AndSHA256,
ECDSAWithP384AndSHA384,
ECDSAWithP521AndSHA512:
privK, err = ecdsa.GenerateKey(getECDSACurve(pubAlgo), rand.Reader)
if err != nil {
return nil, nil, err
}
pubK = privK.(*ecdsa.PrivateKey).Public()
case Ed25519:
pubK, privK, err = ed25519.GenerateKey(rand.Reader)
if err != nil {
return nil, nil, err
}
default:
return nil, nil, fmt.Errorf("tls: unsupported algorithm for Delegated Credential: %s", pubAlgo)
}
// Prepare the credential for signing
hash := getHash(sigAlgo)
credential := &credential{validTime, pubAlgo, pubK}
values, err := prepareDelegationSignatureInput(hash, credential, cert.Leaf.Raw, sigAlgo, isClient)
if err != nil {
return nil, nil, err
}
var sig []byte
switch sk := cert.PrivateKey.(type) {
case *ecdsa.PrivateKey:
opts := crypto.SignerOpts(hash)
sig, err = sk.Sign(rand.Reader, values, opts)
if err != nil {
return nil, nil, err
}
case ed25519.PrivateKey:
opts := crypto.SignerOpts(hash)
sig, err = sk.Sign(rand.Reader, values, opts)
if err != nil {
return nil, nil, err
}
case *rsa.PrivateKey:
opts := &rsa.PSSOptions{SaltLength: rsa.PSSSaltLengthEqualsHash,
Hash: hash}
sig, err = rsa.SignPSS(rand.Reader, sk, hash, values, opts)
if err != nil {
return nil, nil, err
}
default:
return nil, nil, fmt.Errorf("tls: unsupported key type for Delegated Credential")
}
if len(sig) > dcMaxSignatureLen {
return nil, nil, errors.New("tls: unable to create a Delegated Credential")
}
return &DelegatedCredential{
cred: credential,
algorithm: sigAlgo,
signature: sig,
}, privK, nil
}
// Validate validates the Delegated Credential by checking that the signature is
// valid, that it hasn't expired, and that the TTL is valid. It also checks that
// certificate can be used for delegation.
func (dc *DelegatedCredential) Validate(cert *x509.Certificate, isClient bool, now time.Time, certVerifyMsg *certificateVerifyMsg) bool {
if dc.isExpired(cert.NotBefore, now) {
return false
}
if dc.invalidTTL(cert.NotBefore, now) {
return false
}
if dc.cred.expCertVerfAlgo != certVerifyMsg.signatureAlgorithm {
return false
}
if !isValidForDelegation(cert) {
return false
}
hash := getHash(dc.algorithm)
in, err := prepareDelegationSignatureInput(hash, dc.cred, cert.Raw, dc.algorithm, isClient)
if err != nil {
return false
}
switch dc.algorithm {
case ECDSAWithP256AndSHA256,
ECDSAWithP384AndSHA384,
ECDSAWithP521AndSHA512:
pk, ok := cert.PublicKey.(*ecdsa.PublicKey)
if !ok {
return false
}
return ecdsa.VerifyASN1(pk, in, dc.signature)
case Ed25519:
pk, ok := cert.PublicKey.(ed25519.PublicKey)
if !ok {
return false
}
return ed25519.Verify(pk, in, dc.signature)
case PSSWithSHA256,
PSSWithSHA384,
PSSWithSHA512:
pk, ok := cert.PublicKey.(*rsa.PublicKey)
if !ok {
return false
}
hash := getHash(dc.algorithm)
return rsa.VerifyPSS(pk, hash, in, dc.signature, nil) == nil
default:
return false
}
}
// Marshal encodes a DelegatedCredential structure. It also sets dc.Raw to that
// encoding.
func (dc *DelegatedCredential) Marshal() ([]byte, error) {
if len(dc.signature) > dcMaxSignatureLen {
return nil, errors.New("tls: delegated credential is not valid")
}
if len(dc.signature) == 0 {
return nil, errors.New("tls: delegated credential has no signature")
}
raw, err := dc.cred.marshal()
if err != nil {
return nil, err
}
var b cryptobyte.Builder
b.AddBytes(raw)
b.AddUint16(uint16(dc.algorithm))
b.AddUint16(uint16(len(dc.signature)))
b.AddBytes(dc.signature)
dc.raw = b.BytesOrPanic()
return dc.raw, nil
}
// UnmarshalDelegatedCredential decodes a DelegatedCredential structure.
func UnmarshalDelegatedCredential(raw []byte) (*DelegatedCredential, error) {
rawCredentialLen, err := getCredentialLen(raw)
if err != nil {
return nil, err
}
credential, err := unmarshalCredential(raw[:rawCredentialLen])
if err != nil {
return nil, err
}
raw = raw[rawCredentialLen:]
if len(raw) < 4 {
return nil, errors.New("tls: Delegated Credential is not valid")
}
s := cryptobyte.String(raw)
var algo uint16
if !s.ReadUint16(&algo) {
return nil, errors.New("tls: Delegated Credential is not valid")
}
var rawSignatureLen uint16
if !s.ReadUint16(&rawSignatureLen) {
return nil, errors.New("tls: Delegated Credential is not valid")
}
var sig []byte
if !s.ReadBytes(&sig, int(rawSignatureLen)) {
return nil, errors.New("tls: Delegated Credential is not valid")
}
return &DelegatedCredential{
cred: credential,
algorithm: SignatureScheme(algo),
signature: sig,
}, nil
}
| {
return dc.cred.validTime > (now.Sub(start) + dcMaxTTL).Round(time.Second)
} | identifier_body |
delegated_credentials.go | // Copyright 2020-2021 Cloudflare, Inc. All rights reserved. Use of this source code
// is governed by a BSD-style license that can be found in the LICENSE file.
package tls
// Delegated Credentials for TLS
// (https://tools.ietf.org/html/draft-ietf-tls-subcerts) is an IETF Internet
// draft and proposed TLS extension. If the client or server supports this
// extension, then the server or client may use a "delegated credential" as the
// signing key in the handshake. A delegated credential is a short lived
// public/secret key pair delegated to the peer by an entity trusted by the
// corresponding peer. This allows a reverse proxy to terminate a TLS connection
// on behalf of the entity. Credentials can't be revoked; in order to
// mitigate risk in case the reverse proxy is compromised, the credential is only
// valid for a short time (days, hours, or even minutes).
import (
"bytes"
"crypto"
"crypto/ecdsa"
"crypto/ed25519"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"encoding/binary"
"errors"
"fmt"
"io"
"time"
"golang.org/x/crypto/cryptobyte"
)
const (
// In the absence of an application profile standard specifying otherwise,
// the maximum validity period is set to 7 days.
dcMaxTTLSeconds = 60 * 60 * 24 * 7
dcMaxTTL = time.Duration(dcMaxTTLSeconds * time.Second)
dcMaxPubLen = (1 << 24) - 1 // Bytes
dcMaxSignatureLen = (1 << 16) - 1 // Bytes
)
const (
undefinedSignatureScheme SignatureScheme = 0x0000
)
var extensionDelegatedCredential = []int{1, 3, 6, 1, 4, 1, 44363, 44}
// isValidForDelegation returns true if a certificate can be used for Delegated
// Credentials.
func isValidForDelegation(cert *x509.Certificate) bool {
// Check that the digitalSignature key usage is set.
// The certificate must contains the digitalSignature KeyUsage.
if (cert.KeyUsage & x509.KeyUsageDigitalSignature) == 0 {
return false
}
// Check that the certificate has the DelegationUsage extension and that
// it's marked as non-critical (See Section 4.2 of RFC5280).
for _, extension := range cert.Extensions {
if extension.Id.Equal(extensionDelegatedCredential) {
if extension.Critical {
return false
}
return true
}
}
return false
}
// isExpired returns true if the credential has expired. The end of the validity
// interval is defined as the delegator certificate's notBefore field ('start')
// plus dc.cred.validTime seconds. This function simply checks that the current time
// ('now') is before the end of the validity interval.
func (dc *DelegatedCredential) isExpired(start, now time.Time) bool {
end := start.Add(dc.cred.validTime)
return !now.Before(end)
}
// invalidTTL returns true if the credential's validity period is longer than the
// maximum permitted. This is defined by the certificate's notBefore field
// ('start') plus the dc.validTime, minus the current time ('now').
func (dc *DelegatedCredential) | (start, now time.Time) bool {
return dc.cred.validTime > (now.Sub(start) + dcMaxTTL).Round(time.Second)
}
// credential stores the public components of a Delegated Credential.
type credential struct {
// The amount of time for which the credential is valid. Specifically, the
// the credential expires 'validTime' seconds after the 'notBefore' of the
// delegation certificate. The delegator shall not issue Delegated
// Credentials that are valid for more than 7 days from the current time.
//
// When this data structure is serialized, this value is converted to a
// uint32 representing the duration in seconds.
validTime time.Duration
// The signature scheme associated with the credential public key.
// This is expected to be the same as the CertificateVerify.algorithm
// sent by the client or server.
expCertVerfAlgo SignatureScheme
// The credential's public key.
publicKey crypto.PublicKey
}
// DelegatedCredential stores a Delegated Credential with the credential and its
// signature.
type DelegatedCredential struct {
// The serialized form of the Delegated Credential.
raw []byte
// Cred stores the public components of a Delegated Credential.
cred *credential
// The signature scheme used to sign the Delegated Credential.
algorithm SignatureScheme
// The Credential's delegation: a signature that binds the credential to
// the end-entity certificate's public key.
signature []byte
}
// marshalPublicKeyInfo returns a DER encoded PublicKeyInfo
// from a Delegated Credential (as defined in the X.509 standard).
// The following key types are currently supported: *ecdsa.PublicKey
// and ed25519.PublicKey. Unsupported key types result in an error.
// rsa.PublicKey is not supported as defined by the draft.
func (cred *credential) marshalPublicKeyInfo() ([]byte, error) {
switch cred.expCertVerfAlgo {
case ECDSAWithP256AndSHA256,
ECDSAWithP384AndSHA384,
ECDSAWithP521AndSHA512,
Ed25519:
rawPub, err := x509.MarshalPKIXPublicKey(cred.publicKey)
if err != nil {
return nil, err
}
return rawPub, nil
default:
return nil, fmt.Errorf("tls: unsupported signature scheme: 0x%04x", cred.expCertVerfAlgo)
}
}
// marshal encodes the credential struct of the Delegated Credential.
func (cred *credential) marshal() ([]byte, error) {
var b cryptobyte.Builder
b.AddUint32(uint32(cred.validTime / time.Second))
b.AddUint16(uint16(cred.expCertVerfAlgo))
// Encode the public key
rawPub, err := cred.marshalPublicKeyInfo()
if err != nil {
return nil, err
}
// Assert that the public key encoding is no longer than 2^24-1 bytes.
if len(rawPub) > dcMaxPubLen {
return nil, errors.New("tls: public key length exceeds 2^24-1 limit")
}
b.AddUint24(uint32(len(rawPub)))
b.AddBytes(rawPub)
raw := b.BytesOrPanic()
return raw, nil
}
// unmarshalCredential decodes serialized bytes and returns a credential, if possible.
func unmarshalCredential(raw []byte) (*credential, error) {
if len(raw) < 10 {
return nil, errors.New("tls: Delegated Credential is not valid: invalid length")
}
s := cryptobyte.String(raw)
var t uint32
if !s.ReadUint32(&t) {
return nil, errors.New("tls: Delegated Credential is not valid")
}
validTime := time.Duration(t) * time.Second
var pubAlgo uint16
if !s.ReadUint16(&pubAlgo) {
return nil, errors.New("tls: Delegated Credential is not valid")
}
algo := SignatureScheme(pubAlgo)
var pubLen uint32
s.ReadUint24(&pubLen)
pubKey, err := x509.ParsePKIXPublicKey(s)
if err != nil {
return nil, err
}
return &credential{validTime, algo, pubKey}, nil
}
// getCredentialLen returns the number of bytes comprising the serialized
// credential struct inside the Delegated Credential.
func getCredentialLen(raw []byte) (int, error) {
if len(raw) < 10 {
return 0, errors.New("tls: Delegated Credential is not valid")
}
var read []byte
s := cryptobyte.String(raw)
s.ReadBytes(&read, 6)
var pubLen uint32
s.ReadUint24(&pubLen)
if !(pubLen > 0) {
return 0, errors.New("tls: Delegated Credential is not valid")
}
raw = raw[6:]
if len(raw) < int(pubLen) {
return 0, errors.New("tls: Delegated Credential is not valid")
}
return 9 + int(pubLen), nil
}
// getHash maps the SignatureScheme to its corresponding hash function.
func getHash(scheme SignatureScheme) crypto.Hash {
switch scheme {
case ECDSAWithP256AndSHA256:
return crypto.SHA256
case ECDSAWithP384AndSHA384:
return crypto.SHA384
case ECDSAWithP521AndSHA512:
return crypto.SHA512
case Ed25519:
return directSigning
case PKCS1WithSHA256, PSSWithSHA256:
return crypto.SHA256
case PSSWithSHA384:
return crypto.SHA384
case PSSWithSHA512:
return crypto.SHA512
default:
return 0 //Unknown hash function
}
}
// getECDSACurve maps the SignatureScheme to its corresponding ecdsa elliptic.Curve.
func getECDSACurve(scheme SignatureScheme) elliptic.Curve {
switch scheme {
case ECDSAWithP256AndSHA256:
return elliptic.P256()
case ECDSAWithP384AndSHA384:
return elliptic.P384()
case ECDSAWithP521AndSHA512:
return elliptic.P521()
default:
return nil
}
}
// prepareDelegationSignatureInput returns the message that the delegator is going to sign.
func prepareDelegationSignatureInput(hash crypto.Hash, cred *credential, dCert []byte, algo SignatureScheme, isClient bool) ([]byte, error) {
header := make([]byte, 64)
for i := range header {
header[i] = 0x20
}
var context string
if !isClient {
context = "TLS, server delegated credentials\x00"
} else {
context = "TLS, client delegated credentials\x00"
}
rawCred, err := cred.marshal()
if err != nil {
return nil, err
}
var rawAlgo [2]byte
binary.BigEndian.PutUint16(rawAlgo[:], uint16(algo))
if hash == directSigning {
b := &bytes.Buffer{}
b.Write(header)
io.WriteString(b, context)
b.Write(dCert)
b.Write(rawCred)
b.Write(rawAlgo[:])
return b.Bytes(), nil
}
h := hash.New()
h.Write(header)
io.WriteString(h, context)
h.Write(dCert)
h.Write(rawCred)
h.Write(rawAlgo[:])
return h.Sum(nil), nil
}
// Extract the algorithm used to sign the Delegated Credential from the
// end-entity (leaf) certificate.
func getSignatureAlgorithm(cert *Certificate) (SignatureScheme, error) {
switch sk := cert.PrivateKey.(type) {
case *ecdsa.PrivateKey:
pk := sk.Public().(*ecdsa.PublicKey)
curveName := pk.Curve.Params().Name
certAlg := cert.Leaf.PublicKeyAlgorithm
if certAlg == x509.ECDSA && curveName == "P-256" {
return ECDSAWithP256AndSHA256, nil
} else if certAlg == x509.ECDSA && curveName == "P-384" {
return ECDSAWithP384AndSHA384, nil
} else if certAlg == x509.ECDSA && curveName == "P-521" {
return ECDSAWithP521AndSHA512, nil
} else {
return undefinedSignatureScheme, fmt.Errorf("using curve %s for %s is not supported", curveName, cert.Leaf.SignatureAlgorithm)
}
case ed25519.PrivateKey:
return Ed25519, nil
case *rsa.PrivateKey:
// If the certificate has the RSAEncryption OID there are a number of valid signature schemes that may sign the DC.
// In the absence of better information, we make a reasonable choice.
return PSSWithSHA256, nil
default:
return undefinedSignatureScheme, fmt.Errorf("tls: unsupported algorithm for signing Delegated Credential")
}
}
// NewDelegatedCredential creates a new Delegated Credential using 'cert' for
// delegation, depending if the caller is the client or the server (defined by
// 'isClient'). It generates a public/private key pair for the provided signature
// algorithm ('pubAlgo') and it defines a validity interval (defined
// by 'cert.Leaf.notBefore' and 'validTime'). It signs the Delegated Credential
// using 'cert.PrivateKey'.
func NewDelegatedCredential(cert *Certificate, pubAlgo SignatureScheme, validTime time.Duration, isClient bool) (*DelegatedCredential, crypto.PrivateKey, error) {
// The granularity of DC validity is seconds.
validTime = validTime.Round(time.Second)
// Parse the leaf certificate if needed.
var err error
if cert.Leaf == nil {
if len(cert.Certificate[0]) == 0 {
return nil, nil, errors.New("tls: missing leaf certificate for Delegated Credential")
}
cert.Leaf, err = x509.ParseCertificate(cert.Certificate[0])
if err != nil {
return nil, nil, err
}
}
// Check that the leaf certificate can be used for delegation.
if !isValidForDelegation(cert.Leaf) {
return nil, nil, errors.New("tls: certificate not authorized for delegation")
}
sigAlgo, err := getSignatureAlgorithm(cert)
if err != nil {
return nil, nil, err
}
// Generate the Delegated Credential key pair based on the provided scheme
var privK crypto.PrivateKey
var pubK crypto.PublicKey
switch pubAlgo {
case ECDSAWithP256AndSHA256,
ECDSAWithP384AndSHA384,
ECDSAWithP521AndSHA512:
privK, err = ecdsa.GenerateKey(getECDSACurve(pubAlgo), rand.Reader)
if err != nil {
return nil, nil, err
}
pubK = privK.(*ecdsa.PrivateKey).Public()
case Ed25519:
pubK, privK, err = ed25519.GenerateKey(rand.Reader)
if err != nil {
return nil, nil, err
}
default:
return nil, nil, fmt.Errorf("tls: unsupported algorithm for Delegated Credential: %s", pubAlgo)
}
// Prepare the credential for signing
hash := getHash(sigAlgo)
credential := &credential{validTime, pubAlgo, pubK}
values, err := prepareDelegationSignatureInput(hash, credential, cert.Leaf.Raw, sigAlgo, isClient)
if err != nil {
return nil, nil, err
}
var sig []byte
switch sk := cert.PrivateKey.(type) {
case *ecdsa.PrivateKey:
opts := crypto.SignerOpts(hash)
sig, err = sk.Sign(rand.Reader, values, opts)
if err != nil {
return nil, nil, err
}
case ed25519.PrivateKey:
opts := crypto.SignerOpts(hash)
sig, err = sk.Sign(rand.Reader, values, opts)
if err != nil {
return nil, nil, err
}
case *rsa.PrivateKey:
opts := &rsa.PSSOptions{SaltLength: rsa.PSSSaltLengthEqualsHash,
Hash: hash}
sig, err = rsa.SignPSS(rand.Reader, sk, hash, values, opts)
if err != nil {
return nil, nil, err
}
default:
return nil, nil, fmt.Errorf("tls: unsupported key type for Delegated Credential")
}
if len(sig) > dcMaxSignatureLen {
return nil, nil, errors.New("tls: unable to create a Delegated Credential")
}
return &DelegatedCredential{
cred: credential,
algorithm: sigAlgo,
signature: sig,
}, privK, nil
}
// Validate validates the Delegated Credential by checking that the signature is
// valid, that it hasn't expired, and that the TTL is valid. It also checks that
// certificate can be used for delegation.
func (dc *DelegatedCredential) Validate(cert *x509.Certificate, isClient bool, now time.Time, certVerifyMsg *certificateVerifyMsg) bool {
if dc.isExpired(cert.NotBefore, now) {
return false
}
if dc.invalidTTL(cert.NotBefore, now) {
return false
}
if dc.cred.expCertVerfAlgo != certVerifyMsg.signatureAlgorithm {
return false
}
if !isValidForDelegation(cert) {
return false
}
hash := getHash(dc.algorithm)
in, err := prepareDelegationSignatureInput(hash, dc.cred, cert.Raw, dc.algorithm, isClient)
if err != nil {
return false
}
switch dc.algorithm {
case ECDSAWithP256AndSHA256,
ECDSAWithP384AndSHA384,
ECDSAWithP521AndSHA512:
pk, ok := cert.PublicKey.(*ecdsa.PublicKey)
if !ok {
return false
}
return ecdsa.VerifyASN1(pk, in, dc.signature)
case Ed25519:
pk, ok := cert.PublicKey.(ed25519.PublicKey)
if !ok {
return false
}
return ed25519.Verify(pk, in, dc.signature)
case PSSWithSHA256,
PSSWithSHA384,
PSSWithSHA512:
pk, ok := cert.PublicKey.(*rsa.PublicKey)
if !ok {
return false
}
hash := getHash(dc.algorithm)
return rsa.VerifyPSS(pk, hash, in, dc.signature, nil) == nil
default:
return false
}
}
// Marshal encodes a DelegatedCredential structure. It also sets dc.Raw to that
// encoding.
func (dc *DelegatedCredential) Marshal() ([]byte, error) {
if len(dc.signature) > dcMaxSignatureLen {
return nil, errors.New("tls: delegated credential is not valid")
}
if len(dc.signature) == 0 {
return nil, errors.New("tls: delegated credential has no signature")
}
raw, err := dc.cred.marshal()
if err != nil {
return nil, err
}
var b cryptobyte.Builder
b.AddBytes(raw)
b.AddUint16(uint16(dc.algorithm))
b.AddUint16(uint16(len(dc.signature)))
b.AddBytes(dc.signature)
dc.raw = b.BytesOrPanic()
return dc.raw, nil
}
// UnmarshalDelegatedCredential decodes a DelegatedCredential structure.
func UnmarshalDelegatedCredential(raw []byte) (*DelegatedCredential, error) {
rawCredentialLen, err := getCredentialLen(raw)
if err != nil {
return nil, err
}
credential, err := unmarshalCredential(raw[:rawCredentialLen])
if err != nil {
return nil, err
}
raw = raw[rawCredentialLen:]
if len(raw) < 4 {
return nil, errors.New("tls: Delegated Credential is not valid")
}
s := cryptobyte.String(raw)
var algo uint16
if !s.ReadUint16(&algo) {
return nil, errors.New("tls: Delegated Credential is not valid")
}
var rawSignatureLen uint16
if !s.ReadUint16(&rawSignatureLen) {
return nil, errors.New("tls: Delegated Credential is not valid")
}
var sig []byte
if !s.ReadBytes(&sig, int(rawSignatureLen)) {
return nil, errors.New("tls: Delegated Credential is not valid")
}
return &DelegatedCredential{
cred: credential,
algorithm: SignatureScheme(algo),
signature: sig,
}, nil
}
| invalidTTL | identifier_name |
run.py | # x = 6
# print(x/2)
# print(type(x))
# print(type(x/2))
# y = "hello"
# z = " world"
# print(y+z)
# print(type(y+z))
# p = input()
# p = int(p)
# #p = float(p)
# print(p*2)
# print(type(p*2))
# x = "asddfafdafads"
# x = x[1:4]
# print(x)
# x = 67
# if x == 5:
# print("x jest inne")
# else:
# print("jakie?")
# print("gotowe")
# x = 6 < 7
# print(x)
# print(type(x))
# y = 1
# print(bool(y))
# # LISTS
# produkty = ["ser","mleko","parówki","pizza"]
# # appending to the end of list
# produkty.append("pomidor")
# # insert in the middle of list with parameters index
# produkty.insert(2,"ciastko")
# print(produkty)
# print(produkty[1:3])
# print(type(produkty))
# # clearing all list elements
# produkty.clear()
# print(1, produkty)
# # counting hom many specific elements are on list
# x = produkty.count("pomidor")
# print(x)
# wiecej_produktow = ["kawa","sos"]
# produkty.extend(wiecej_produktow)
# produkty.pop(0)
# produkty.remove("sos")
# print(produkty)
# # sorting in alphabetical order
# produkty.sort()
# print(produkty)
# # reverse list
# produkty.reverse()
# # copying list
# produkty2 = produkty.copy()
# print(produkty2)
# # TUPLES tupli nie można edytować (immutable)
# przybory = ("dlugopis","linijka","kredka")
# print(przybory)
# # słowniki DICTIONARIES
# person = {"wiek":20, "imię":"ania", "nazwisko":"kowalska"}
# print(person)
# print(person["wiek"])
# keys = person.keys()
# print(keys)
# # WHILE LOOP pętla while
# i = 0
# while i <= 10:
# print(i)
# i += 1
# print("koniec")
# i = 10
# while i>0:
# print(i)
# i -= 1
# print("koniec")
# suma = 0
# while True:
# print("wpisz liczbę")
# x = input()
# suma += int(x)
# print(suma)
# lista = ["a","b","c","d","e","f","g"]
# for litera in lista:
# print(litera)
# if litera == "e":
# print("to jest e!")
# for i in range(10,30,2):
# print(i)
# fruits = ["apple", "pear", "banana", "orange", "apple"]
# # first one is enumerating number:
# for i, fruit in enumerate(fruits):
# print("Sprawdzam {}".format(i))
# if i == 3:
# break
# print(i)
# print(fruit)
# print("koniec")
# print(enumerate(fruits))
# x = "Hello {}"
# y = x.format("world!")
# print(y)
# fruits = ["bee", "pear", "banana", "orange", "strawberry"]
# for fruit in fruits:
# if fruit == "pear": continue
# print(fruit)
# if fruit == "banana": break
# fruits = ["bee", "pear", "banana", "orange", "strawberry"]
# if "apple" in fruits:
# print("znaleziono jabłko")
# elif "orange" in fruits:
# print("nie znaleziono jabłka, ale mamy pomarańczę")
# else:
# print("nic nie znaleziono")
# while True:
# liczba = int(input())
# if liczba > 20 and liczba < 80:
# print("liczba jest większa od 20, ale mniejsza od 80")
# elif liczba >100 or liczba > 80:
# print("liczba jest większa od 100 lub od 80")
# else:
# print("nic")
# # TIME czas
# import time
# print("Start")
# time.sleep(2)
# # stop pojawi się po 2 sekundach
# print("Stop")
# timer = time.time()
# time.sleep(2)
# d = time.time() - timer
# print(d)
# timer = time.time()
# timer2 = time.time()
# timer3 = time.time()
# while True:
# if time.time() - timer > 2:
# print("minelo 2 sekundy")
# timer = time.time()
# if time.time() - timer2 >1:
# print("minela 1 sekunda")
# timer2 = time.time()
# if time.time() - timer3 >5:
# break
#
# import datetime
# teraz = datetime.datetime.now()
# print(str(teraz.hour)+" : "+str(teraz.minute))
# print(str(teraz.strftime("%H:%M %d.%m.%Y")))
# FUNCTIONS funkcje
# def printme(liczba):
# print("hello")
# print(liczba)
# printme(5)
# def mnoz(a,b=4):
# return a*b
# wynik = mnoz(2)
# print(wynik)
# # IMPORT FILE
# import new
# new.test("hello")
# new.test("121")
# # import function from file
# from new import test
# test("hello")
# test("121")
# # import all from file
# from new import *
# test("another one")
# def testOne():
# print("Test one run.py")
# # import function with same name as another name
# from new import testOne as testOneNew
# testOne()
# testOneNew()
# files, tribes
# r - read, r+ - read+write, w - also create, w+, a, a+
# f = open("plik.txt","w")
# f.write("test1")
# f.close()
# \n - write in new line
# f = open("plik.txt","r")
# # print(f.read())
# # print(f.readlines())
# print(f.readline())
# f.close()
# f = open("plik.txt","r")
# for line in f.readlines():
# print(line)
# f.close()
# # folders
# import os
# lista = os.listdir(".")
# print(lista)
# for item in os.listdir("."):
# # print(item)
# if os.path.isfile(item):
# # .format put in curly braces item from ()
# print("{} is a file".format(item))
# elif os.path.isdir("."):
# print("{} is a folder".format(item))
# else:
# print("{} is not a folder or file".format(item))
# # creating folder
# import os
# os.mkdir("newFolder")
# # rename folder
# os.rename("newFolder","bestFolder")
# # remove directory - os.rmdir("directory"); remove file - os.remove("file")
# os.rmdir("bestFolder")
# open("test2.txt","w").close()
# # making directories
# import os
# path="pliki/11/data.txt"
# print(os.path.dirname(path))
# print(os.path.basepath(path))
# print(os.path.abspath(path))
# os.makedirs(os.path.dirname(path))
# open(path,"w").close()
# # exceptions
# try:
# file = open("tekst.txt","r+")
# file.write("tester")
# file.close()
# # if file can't be created exception works:
# except FileNotFoundError as e:
# print("wystąpił błąd z plikiem")
# print(e)
#
# try:
# file2 = open("plik.txt","r")
# file2.write("xero")
# file.close()
# except FileNotFoundError as e:
# print("wystąpił błąd z plikiem")
# print(e)
# except:
# print("some other error")
# # OBJECTS
# class Calculator():
# def __init__(self):
# print("init")
# #declaring self parametr
# self.liczba=10
# def __del__(self):
# print("del")
# def __str__(self):
# return "hello"
# def __len__(self):
# return 6
# def __bool__(self):
# return False
# def dodaj(self, a, b):
# wynik = a+b
# print(wynik)
# def odejmij(self, a, b):
# wynik = a-b
# print(wynik)
# my_calculator = Calculator()
# my_calculator.dodaj(2,3)
# test = Calculator()
# # this del deletes test variable:
# del test
# print(test)
# test3 = Calculator()
# test3.dodaj(3,3)
# # this prints hello (__str__) when converting to string
# print(test3)
# if test3:
# # __bool__ store boolean value of class
# print("True")
# else:
# print("Falseeee")
# calc = Calculator()
# calc.liczba = 10
# calc.liczba+=5
# print(calc.liczba)
#
# calc2 = Calculator()
# calc2.liczba+=5
# print(calc2.liczba)
# class Calculator2:
# def __init__(self):
# self.ostatni_wynik = 0
#
# def dodaj(self, a, b):
# wynik = a+b
# self.ostatni_wynik = wynik
# print(wynik)
# def odejmij(self, a, b):
# wynik = a - b
# self.ostatni_wynik = wynik
# print(wynik)
#
# calc21 = Calculator2()
# calc21.dodaj(3,2)
# calc21.dodaj(10,5)
# calc21.odejmij(17,9)
# print("Ostatni wynik:{}".format(calc21.ostatni_wynik))
# # inheritence
# class Parent():
# def __init__(self):
# print("Parent init")
# def parent(self):
# print("Parent parent")
# def poke(self):
# print("Parent poked")
# parent = Parent()
# parent.parent()
# parent.poke()
# class Child(Parent):
# def __init__(self):
# # super is realising parent __init__ function
# super().__init__()
# print("Child init")
# def poke(self):
# super().poke()
# print("Child poked")
# child = Child()
# child.poke()
# child.parent()
# class Person():
# def __init__(self, name):
# self.name = name
# self.surname = "Kwiatkowski"
# self.age = 25
# class Employee(Person):
# def __init__(self, position):
# super().__init__("Tomek")
# self.position = position
# self.specialization = "Python"
# class Client(Person):
# def __init__(self, name):
# super().__init__(name)
# self.ordered = "website"
# worker = Employee("programmer")
# print(worker.name)
# print(worker.position)
# print(worker.specialization)
# worker2 = Employee("designer")
# print(worker2.position)
# buyer = Client("Marta")
# print(buyer.name)
# # Exceptions
# class TooColdException(Exception):
# def __init__(self, temp):
# super().__init__("Temperature {} is below absolute zero.".format(temp))
# def celsius_to_kelvin(temp):
# if temp < -273:
# raise TooColdException(temp)
# return temp+273
# # try:
# # print(celsius_to_kelvin(-300))
# # except TooColdException:
# # print("Too cold")
#
# print(celsius_to_kelvin(-300))
# ************TRAVERSY MEDIA CRASH COURSE
"""
this is
multiline
comments
"""
# # this prints multiline string
# print("""fsdfsdfsd
# sdsdfdsf
# sdfsdfsfds""")
# print("hello"[2:4])
# # this above printing ll
# print(2,3,4,"string")
# # printing in new line
# print("line1\nline2\nline3")
# # variables and data types
# greeting = "hello world"
# print(greeting)
# myStr = "hello"
# myInt = 5
# myFloat = 2.3
# print(type(myFloat))
# myList = [1, 2, 3, "abc"]
# myDictionary = {"a":1,"b":2,"c":3}
# print(type(myList), myList)
# print(type(myDictionary), myDictionary)
# print(myList[3])
# print(myDictionary["a"])
# # CONDITIONALS
# x = 4;
# # basic if
# if x < 6:
# print("this is true")
# print("only here it will be works")
# # if and else
# if x > 6:
# print("your number is greater than 6")
# else:
# print("your number is lower than 6")
# # elif
# color = "blue"
# if color == "red":
# print("color is red")
# elif color == "blue":
# print("color is blue")
# else:
# print("color is different")
# # nested if
# if color == "blue":
# if x <10:
# print("color is blue and x is lower than 10")
# # logical operators
# if color == "blue" and x <10:
# print("true")
# # LOOPs
# # FOR LOOP
# people = ["jonh","barry","bob","alf"]
# for person in people:
# print("current person: ", person)
# for i in range(len(people)):
# print("current person: ", people[i])
# for i in range(1,10):
# print(i)
# # while loop
# count = 0
# while count < 10:
# print("count: ", count)
# if count == 5:
# break
# count = count + 1
# # functions
# # default name is barry, but when you put different function output will be different
# def sayHello(name = "barry"):
# print("hello", name)
#
# sayHello("alf")
#
# def getSum(num1,num2):
# total = num1 + num2
# return total
#
# numSum = getSum(3,4)
# print(numSum)
#
# # scope
# def addOneToNum(num):
# num = num + 1
# print("value inside a function", num)
# return num
#
# num = 5
# addOneToNum(num)
# print("value outside a function", num)
#
# def addOneToList(myList):
# myList.append(4)
# print("myList inside function: ", myList)
#
# myList = [1,2,3]
# addOneToList(myList)
# print("myList outside function:", myList)
# # string functions
# myStr = "Hello World!"
#
# # only first letter is capitalize
# print(myStr.capitalize())
#
# # first letter is lower, other are capitalize
# print(myStr.swapcase())
#
# # get length
# print(len(myStr))
#
# # replace | # print(myStr.replace("World","Everyone"))
#
# # count - counting nummber of something
# sub = "l"
# print(myStr.count(sub))
#
# # startswith() - checking if something starts with signs
# print(myStr.startswith("Hello"))
#
# # endswith() - checking if string ends with parameter substring
# print(myStr.endswith("!"))
#
# # split to list
# print(myStr.split())
#
# # find - checking position of substring
# print(myStr.find("lo"))
#
# # index
# print(myStr.index("l"))
#
# # is alphanumeric - checking if string is alphanumeric
# print(myStr.isalpha())
#
# # new line
# print("Andy\nRobbins")
#
# #quotation mark
# print("Andy\"Dfen")
#
# # upper case
# avc = "dsfsdfs"
# print(avc.upper())
#
# # length of string
# print(len(avc))
#
# # index of element
# print(avc.index("f"))
#
# # replacing some words/letters
# print(avc.replace("sf","xxx"))
# WORKING ON MODULES
# # you need to have file new.py with function sayHello
# import new
# new.sayHello("mark")
# # you import only one function
# from new import sayGoodbye
# sayGoodbye("andy")
# # OPENING FILES
# openingVariable = open("plik.txt","w")
# # printing name of file
# print("Name: ", openingVariable.name)
# # checking if file is closed
# print("Is Closed: ",openingVariable.closed)
# # checking mode of file
# print("Mode of file: ", openingVariable.mode)
# openingVariable.write("Python is ok")
# openingVariable.write("Javascript is better")
# openingVariable.close()
#
# # opening again and write something replace the previous text
# openingVariable = open("plik.txt","w")
# openingVariable.write("i don't like java")
# openingVariable.close()
#
# # using append mode - letter "a" - append new text,
# openingVariable = open("plik.txt","a")
# openingVariable.write(" python is the best")
# openingVariable.close()
#
# # using "r+" mode you can read variable and print it out
# openingVariable = open("plik.txt", "r+")
# readingVariable = openingVariable.read(10)
# # reading only first 10 characters when you put 10 in read()
# print(readingVariable)
# # CLASSES & OBJECTS
# class Person:
# __name = ''
# __email = ''
#
# def __init__(self, name, email):
# self.__name = name
# self.__email = email
#
# def set_name(self, name):
# self.__name = name
#
# def get_name(self):
# return self.__name
#
# def set_email(self, email):
# self.__email = email
#
# def get_email(self):
# return self.__email
#
# def toString(self):
# return "{} can be contacted at {}".format(self.__name, self.__email)
#
# # brad = Person('Brad Traversy', 'brad@gmail.com')
# # print(brad.get_name())
# # print(brad.toString())
# #
# # andy = Person("Andy", "andy@gmail.com")
# # print(andy.get_name())
# # inherit class person
# class Customer(Person):
# __balance = 0
#
# def __init__(self, name, email, balance):
# self.__name = name
# self.__email = email
# self.__balance = balance
# super(Customer, self).__init__(name, email)
#
# def set_balance(self, balance):
# self.__balance = balance
#
# def get_balance(self):
# return self.__balance
#
# def toString(self):
# return "{} has a balance of {}, and can be contacted at {}".format(self.__name, self.__balance, self.__email)
#
# john = Customer("john","jj@gdfg.gdf",200)
# print(john.toString())
# john.set_balance(434)
# print(john.get_balance())
# infoshare test
# a,b = 1,2
# a=b
# print(a)
# b+=a
# print(a)
#
# s = "Python or Java?"
# print(s[-5] + s[1:6])
#
# print(s[-5])
#
# a,b,c = None,True,False
# if a and b or c:
# print(1)
# elif b:
# print(2)
# elif c:
# print(3)
# else:
# print(4)
# for a in "ABC":
# for b in a:
# print(a,b)
#
# x = 1
# while x<10:
# x + 1
# print(x)
# def funkcja1():
# pass
# def funkcja2(None):
# return None
# def funkcja3(a=None, b):
# pass
# def funkcj4(a, b):
# return a
#
#
# class Language(object):
# def show(self):
# print("i use {0}".format(self.ide))
#
# class Pycharm(Language):
# ide = "Pycharm"
#
# class Eclipse(Language):
# ide = "Eclipse"
#
# class Finalide(Eclipse,Pycharm):
# pass
#
# print(Finalide().show())
#
#
# class Str2(str):
# def __init__(self,a):
# if a and len(a)<3:
# raise ValueError
# Str2("Pi")
# # ************************FREECODECAMP
# # numbers
# p = -5
# # absolute number
# print(abs(p))
# # p^3
# print(pow(p,3))
# # greatest number from parameters
# print(max(2,3))
# #rounding down the number
# print(round(4.4))
#
# # importing some additional functions
# from math import *
# # rounding to the nearest down number
# print(floor(3.4))
# # rounding to the nearest up number
# print(ceil(3.4))
#
# print(sqrt(144))
# # INPUT FROM USER
# name = input("Enter your name: ")
# age = input("Enter your age: ")
# print("Hello {}".format(name)+" Your age is "+age)
# # simple calculator, adding two numbers
# a = input("Enter first number: ")
# b = input("Enter second number: ")
# # float() converting to integer type
# result = float(a)+float(b)
# print(result)
# # CLASSES and OBJECTS ONE MORE TIME
# class Student:
# def __init__(self, name, major, gpa, is_on_probation):
# self.name = name
# self.major = major
# self.gpa = gpa
# self.is_on_probation = is_on_probation
#
# def is_good_student(self):
# if self.gpa > 4.2:
# return True
# else:
# return False
#
#
# student1 = Student("Andy","Math",4.4,True)
# student2 = Student("Mark","Physics",4.1,False)
#
# print(student1.name)
# print(student2.is_good_student())
# # QUESTION GAME
# question_prompts =[
# "What color are apples: \n A. Red \n B. Purple \n C.Yellow \n\n",
# "What color are bananas: \n A. Black \n B. Yellow \n C. White \n\n",
# "What color are pears: \n A. Purple \n B. Grey \n C. Green \n\n"
# ]
#
# class Question:
# def __init__(self, prompt, answer):
# self.prompt = prompt
# self.answer = answer
#
# questions = [
# Question(question_prompts[0],"a"),
# Question(question_prompts[1],"b"),
# Question(question_prompts[2],"c")
# ]
#
# def run_test(questions):
# score = 0
# for question in questions:
# answer = input(question.prompt)
# if answer == question.answer:
# score += 1
# print("You got: "+str(score) + "/" + str(len(questions)))
#
# run_test(questions) | random_line_split | |
minijail.rs | // Copyright (c) 2020 ESRLabs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::{
process::{
exit_handle, waitpid, Error, ExitHandleWait, ExitStatus, Pid, ENV_NAME, ENV_VERSION,
},
state::Container,
OutputStream,
};
use crate::runtime::{Event, EventTx};
use futures::channel::oneshot;
use itertools::Itertools;
use log::{debug, error, info, trace, warn, Level};
use nix::{
fcntl::{self, fcntl, OFlag},
sys::signal,
unistd::{self, chown, pipe},
};
use npk::manifest::{Dev, Mount, MountFlag};
use std::{
fmt, iter, ops,
os::unix::prelude::RawFd,
path::{Path, PathBuf},
pin::Pin,
task::{Context, Poll},
};
use tokio::{
fs,
io::{self, unix::AsyncFd, AsyncBufReadExt, AsyncRead, AsyncWriteExt, ReadBuf},
select, task, time,
};
// We need a Send + Sync version of minijail::Minijail
struct MinijailHandle(::minijail::Minijail);
unsafe impl Send for MinijailHandle {}
unsafe impl Sync for MinijailHandle {}
impl ops::Deref for MinijailHandle {
type Target = ::minijail::Minijail;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl ops::DerefMut for MinijailHandle {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
#[derive(Debug)]
pub struct Minijail {
log_fd: i32,
event_tx: EventTx,
run_dir: PathBuf,
data_dir: PathBuf,
uid: u32,
gid: u32,
}
impl Minijail {
pub(crate) fn new(
event_tx: EventTx,
run_dir: &Path,
data_dir: &Path,
uid: u32,
gid: u32,
) -> Result<Minijail, Error> {
let pipe = AsyncPipe::new()?;
let log_fd = pipe.writefd();
let mut lines = io::BufReader::new(pipe).lines();
// Spawn a task that forwards logs from minijail to the rust logger.
task::spawn(async move {
while let Ok(Some(line)) = lines.next_line().await {
let l = line.split_whitespace().skip(2).collect::<String>();
match line.chars().next() {
Some('D') => debug!("{}", l),
Some('I') => info!("{}", l),
Some('W') => warn!("{}", l),
Some('E') => error!("{}", l),
_ => trace!("{}", line),
}
}
});
let minijail_log_level = match log::max_level().to_level().unwrap_or(Level::Warn) {
Level::Error => 3,
Level::Warn => 4,
Level::Info => 6,
Level::Debug => 7,
Level::Trace => i32::MAX,
};
::minijail::Minijail::log_to_fd(log_fd, minijail_log_level as i32);
Ok(Minijail {
event_tx,
run_dir: run_dir.into(),
data_dir: data_dir.into(),
uid,
gid,
log_fd,
})
}
pub(crate) fn shutdown(&self) -> Result<(), Error> {
// Just make clippy happy
if false {
Err(Error::Stop)
} else {
Ok(())
}
}
pub(crate) async fn start(&self, container: &Container) -> Result<Process, Error> {
let root = &container.root;
let manifest = &container.manifest;
let mut jail = MinijailHandle(::minijail::Minijail::new().map_err(Error::Minijail)?);
let init = manifest
.init
.as_ref()
.ok_or_else(|| Error::Start("Cannot start a resource".to_string()))?;
let tmpdir = tempfile::TempDir::new()
.map_err(|e| Error::Io(format!("Failed to create tmpdir for {}", manifest.name), e))?;
let tmpdir_path = tmpdir.path();
// Dump seccomp config to process tmpdir. This is a subject to be changed since
// minijail provides a API to configure seccomp without writing to a file.
// TODO: configure seccomp via API instead of a file
if let Some(ref seccomp) = container.manifest.seccomp |
// Configure UID
jail.change_uid(self.uid);
// Configure PID
jail.change_gid(self.gid);
// Update the capability mask if specified
if let Some(capabilities) = &manifest.capabilities {
// TODO: the capabilities should be passed as an array
jail.update_caps(&capabilities.join(" "))
.map_err(Error::Minijail)?;
}
// Update the supplementary group list if specified
if let Some(suppl_groups) = &manifest.suppl_groups {
// TODO: the groups should be passed an array
jail.update_suppl_groups(&suppl_groups.join(" "))
.map_err(Error::Minijail)?;
}
// TODO: Do not use pid namespace because of multithreadding
// issues discovered by minijail. See libminijail.c for details.
// Make the process enter a pid namespace
//jail.namespace_pids();
// Make the process enter a vfs namespace
jail.namespace_vfs();
// Set no_new_privs. See </kernel/seccomp.c> and </kernel/sys.c>
// in the kernel source tree for an explanation of the parameters.
jail.no_new_privs();
// Set chroot dir for process
jail.enter_chroot(&root.as_path())?;
// Make the application the init process
jail.run_as_init();
self.setup_mounts(&mut jail, container).await?;
// Arguments
let args = manifest.args.clone().unwrap_or_default();
let init_str = init.display().to_string();
let argv: Vec<&str> = iter::once(init_str.as_str())
.chain(args.iter().map(|s| s.as_str()))
.collect();
// Create environment for process. Set data directory, container name and version
let mut env = manifest.env.clone().unwrap_or_default();
env.insert(ENV_NAME.to_string(), manifest.name.to_string());
env.insert(ENV_VERSION.to_string(), manifest.version.to_string());
let env = env
.iter()
.map(|(k, v)| format!("{}={}", k, v))
.collect::<Vec<String>>();
let env = env.iter().map(|a| a.as_str()).collect::<Vec<&str>>();
debug!(
"Executing \"{}{}{}\"",
init.display(),
if args.len() > 1 { " " } else { "" },
argv.iter().skip(1).join(" ")
);
let stdout =
CaptureOutput::new(OutputStream::Stdout, &manifest.name, self.event_tx.clone()).await?;
let stderr =
CaptureOutput::new(OutputStream::Stderr, &manifest.name, self.event_tx.clone()).await?;
// Prevent minijail to close the log fd so that errors aren't missed
let log_fd = (self.log_fd, self.log_fd);
let pid = jail.run_remap_env_preload(
&init.as_path(),
&[(stdout.0, 1), (stderr.0, 2), log_fd],
&argv,
&env,
false,
)? as u32;
let (exit_handle_signal, exit_handle_wait) = exit_handle();
// Spawn a task thats waits for the child to exit
waitpid(
&manifest.name,
pid,
exit_handle_signal,
self.event_tx.clone(),
)
.await;
Ok(Process {
pid,
_jail: jail,
_tmpdir: tmpdir,
_stdout: stdout,
_stderr: stderr,
exit_handle_wait,
})
}
async fn setup_mounts(
&self,
jail: &mut MinijailHandle,
container: &Container,
) -> Result<(), Error> {
let proc = Path::new("/proc");
jail.mount_bind(&proc, &proc, false)
.map_err(Error::Minijail)?;
jail.remount_proc_readonly();
// If there's no explicit mount for /dev add a minimal variant
if !container
.manifest
.mounts
.contains_key(&PathBuf::from("/dev"))
{
debug!("Mounting minimal /dev");
jail.mount_dev();
}
for (target, mount) in &container.manifest.mounts {
match &mount {
Mount::Bind { host, flags } => {
if !&host.exists() {
warn!(
"Cannot bind mount nonexitent source {} to {}",
host.display(),
target.display()
);
continue;
}
let rw = flags.contains(&MountFlag::Rw);
debug!(
"Mounting {} on {}{}",
host.display(),
target.display(),
if rw { " (rw)" } else { "" }
);
jail.mount_bind(&host, &target, rw)
.map_err(Error::Minijail)?;
}
Mount::Persist => {
let dir = self.data_dir.join(&container.manifest.name);
if !dir.exists() {
debug!("Creating {}", dir.display());
fs::create_dir_all(&dir).await.map_err(|e| {
Error::Io(format!("Failed to create {}", dir.display()), e)
})?;
}
debug!("Chowning {} to {}:{}", dir.display(), self.uid, self.gid);
chown(
dir.as_os_str(),
Some(unistd::Uid::from_raw(self.uid)),
Some(unistd::Gid::from_raw(self.gid)),
)
.map_err(|e| {
Error::Os(
format!(
"Failed to chown {} to {}:{}",
dir.display(),
self.uid,
self.gid
),
e,
)
})?;
debug!("Mounting {} on {}", dir.display(), target.display(),);
jail.mount_bind(&dir, &target, true)
.map_err(Error::Minijail)?;
}
Mount::Resource { name, version, dir } => {
let src = {
// Join the source of the resource container with the mount dir
let resource_root = self.run_dir.join(&name).join(&version.to_string());
let dir = dir
.strip_prefix("/")
.map(|d| resource_root.join(d))
.unwrap_or(resource_root);
if !dir.exists() {
return Err(Error::Start(format!(
"Resource folder {} is missing",
dir.display()
)));
}
dir
};
debug!("Mounting {} on {}", src.display(), target.display());
jail.mount_bind(&src, &target, false)
.map_err(Error::Minijail)?;
}
Mount::Tmpfs { size } => {
debug!(
"Mounting tmpfs with size {} on {}",
bytesize::ByteSize::b(*size),
target.display()
);
let data = format!("size={},mode=1777", size);
jail.mount_with_data(&Path::new("none"), &target, "tmpfs", 0, &data)
.map_err(Error::Minijail)?;
}
Mount::Dev { r#type } => {
match r#type {
// The Full mount of /dev is a simple rw bind mount of /dev
Dev::Full => {
let dev = Path::new("/dev");
jail.mount_bind(&dev, &dev, true).map_err(Error::Minijail)?;
}
}
}
}
}
Ok(())
}
}
pub(crate) struct Process {
/// PID of this process
pid: u32,
/// Handle to a libminijail configuration
_jail: MinijailHandle,
/// Temporary directory created in the systems tmp folder.
/// This directory holds process instance specific data that needs
/// to be dumped to disk for startup. e.g seccomp config (TODO)
_tmpdir: tempfile::TempDir,
/// Captured stdout output
_stdout: CaptureOutput,
/// Captured stderr output
_stderr: CaptureOutput,
/// Rx part of the exit handle of this process
exit_handle_wait: ExitHandleWait,
}
impl fmt::Debug for Process {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Process").field("pid", &self.pid).finish()
}
}
impl Process {
pub fn pid(&self) -> Pid {
self.pid
}
pub async fn stop(&mut self, timeout: time::Duration) -> Result<ExitStatus, Error> {
// Send a SIGTERM to the application. If the application does not terminate with a timeout
// it is SIGKILLed.
let sigterm = signal::Signal::SIGTERM;
signal::kill(unistd::Pid::from_raw(self.pid as i32), Some(sigterm))
.map_err(|e| Error::Os(format!("Failed to SIGTERM {}", self.pid), e))?;
let timeout = Box::pin(time::sleep(timeout));
let exited = Box::pin(self.exit_handle_wait.recv());
let pid = self.pid;
Ok(select! {
s = exited => {
s.expect("Internal channel error during process termination") // This is the happy path...
},
_ = timeout => {
signal::kill(unistd::Pid::from_raw(pid as i32), Some(signal::Signal::SIGKILL))
.map_err(|e| Error::Os("Failed to kill process".to_string(), e))?;
ExitStatus::Signaled(signal::Signal::SIGKILL)
}
})
}
}
struct AsyncPipe {
inner: AsyncFd<std::fs::File>,
writefd: i32,
}
impl AsyncPipe {
fn new() -> Result<AsyncPipe, Error> {
let (readfd, writefd) =
pipe().map_err(|e| Error::Os("Failed to create pipe".to_string(), e))?;
let mut flags = OFlag::from_bits(fcntl(readfd, fcntl::FcntlArg::F_GETFL).unwrap()).unwrap();
flags.set(OFlag::O_NONBLOCK, true);
fcntl(readfd, fcntl::FcntlArg::F_SETFL(flags)).expect("Failed to configure pipe fd");
let pipe =
unsafe { <std::fs::File as std::os::unix::prelude::FromRawFd>::from_raw_fd(readfd) };
let inner = AsyncFd::new(pipe).map_err(|e| Error::Io("Async fd".to_string(), e))?;
Ok(AsyncPipe { inner, writefd })
}
fn writefd(&self) -> RawFd {
self.writefd
}
}
impl AsyncRead for AsyncPipe {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
loop {
let mut guard = futures::ready!(self.inner.poll_read_ready(cx))?;
match guard
.try_io(|inner| std::io::Read::read(&mut inner.get_ref(), buf.initialized_mut()))
{
Ok(Ok(n)) => {
buf.advance(n);
break Poll::Ready(Ok(()));
}
Ok(Err(e)) => break Poll::Ready(Err(e)),
Err(_would_block) => continue,
}
}
}
}
// Capture output of a child process. Create a pipe and spawn a task that forwards each line to
// the main loop. When this struct is dropped the internal spawned tasks are stopped.
#[derive(Debug)]
struct CaptureOutput(i32, oneshot::Sender<()>);
impl CaptureOutput {
pub async fn new(
stream: OutputStream,
tag: &str,
event_tx: EventTx,
) -> Result<CaptureOutput, Error> {
let pipe = AsyncPipe::new()?;
let writefd = pipe.writefd();
let mut lines = io::BufReader::new(pipe).lines();
let tag = tag.to_string();
let (tx, mut rx) = oneshot::channel();
debug!("Starting stream capture of {} on {:?}", tag, stream);
task::spawn(async move {
loop {
select! {
_ = &mut rx => break,
line = lines.next_line() => {
if let Ok(Some(line)) = line {
let event = Event::ChildOutput {
name: tag.clone(),
stream: stream.clone(),
line,
};
event_tx.send(event).await.ok();
} else {
break;
}
}
}
}
debug!("Stopped stream capture of {} on {:?}", tag, stream);
});
Ok(CaptureOutput(writefd, tx))
}
}
| {
let seccomp_config = tmpdir_path.join("seccomp");
let mut f = fs::File::create(&seccomp_config)
.await
.map_err(|e| Error::Io("Failed to create seccomp configuraiton".to_string(), e))?;
let s = itertools::join(seccomp.iter().map(|(k, v)| format!("{}: {}", k, v)), "\n");
f.write_all(s.as_bytes())
.await
.map_err(|e| Error::Io("Failed to write seccomp configuraiton".to_string(), e))?;
// Temporary disabled
// Must be called before parse_seccomp_filters
// jail.log_seccomp_filter_failures();
// let p: std::path::PathBuf = seccomp_config.into();
// jail.parse_seccomp_filters(p.as_path())
// .context("Failed parse seccomp config")?;
// jail.use_seccomp_filter();
} | conditional_block |
minijail.rs | // Copyright (c) 2020 ESRLabs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::{
process::{
exit_handle, waitpid, Error, ExitHandleWait, ExitStatus, Pid, ENV_NAME, ENV_VERSION,
},
state::Container,
OutputStream,
};
use crate::runtime::{Event, EventTx};
use futures::channel::oneshot;
use itertools::Itertools;
use log::{debug, error, info, trace, warn, Level};
use nix::{
fcntl::{self, fcntl, OFlag},
sys::signal,
unistd::{self, chown, pipe},
};
use npk::manifest::{Dev, Mount, MountFlag};
use std::{
fmt, iter, ops,
os::unix::prelude::RawFd,
path::{Path, PathBuf},
pin::Pin,
task::{Context, Poll},
};
use tokio::{
fs,
io::{self, unix::AsyncFd, AsyncBufReadExt, AsyncRead, AsyncWriteExt, ReadBuf},
select, task, time,
};
// We need a Send + Sync version of minijail::Minijail
struct MinijailHandle(::minijail::Minijail);
unsafe impl Send for MinijailHandle {}
unsafe impl Sync for MinijailHandle {}
impl ops::Deref for MinijailHandle {
type Target = ::minijail::Minijail;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl ops::DerefMut for MinijailHandle {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
#[derive(Debug)]
pub struct Minijail {
log_fd: i32,
event_tx: EventTx,
run_dir: PathBuf,
data_dir: PathBuf,
uid: u32,
gid: u32,
}
impl Minijail {
pub(crate) fn new(
event_tx: EventTx,
run_dir: &Path,
data_dir: &Path,
uid: u32,
gid: u32,
) -> Result<Minijail, Error> {
let pipe = AsyncPipe::new()?;
let log_fd = pipe.writefd();
let mut lines = io::BufReader::new(pipe).lines();
// Spawn a task that forwards logs from minijail to the rust logger.
task::spawn(async move {
while let Ok(Some(line)) = lines.next_line().await {
let l = line.split_whitespace().skip(2).collect::<String>();
match line.chars().next() {
Some('D') => debug!("{}", l),
Some('I') => info!("{}", l),
Some('W') => warn!("{}", l),
Some('E') => error!("{}", l),
_ => trace!("{}", line),
}
}
});
let minijail_log_level = match log::max_level().to_level().unwrap_or(Level::Warn) {
Level::Error => 3,
Level::Warn => 4,
Level::Info => 6,
Level::Debug => 7,
Level::Trace => i32::MAX,
};
::minijail::Minijail::log_to_fd(log_fd, minijail_log_level as i32);
Ok(Minijail {
event_tx,
run_dir: run_dir.into(),
data_dir: data_dir.into(),
uid,
gid,
log_fd,
})
}
pub(crate) fn | (&self) -> Result<(), Error> {
// Just make clippy happy
if false {
Err(Error::Stop)
} else {
Ok(())
}
}
pub(crate) async fn start(&self, container: &Container) -> Result<Process, Error> {
let root = &container.root;
let manifest = &container.manifest;
let mut jail = MinijailHandle(::minijail::Minijail::new().map_err(Error::Minijail)?);
let init = manifest
.init
.as_ref()
.ok_or_else(|| Error::Start("Cannot start a resource".to_string()))?;
let tmpdir = tempfile::TempDir::new()
.map_err(|e| Error::Io(format!("Failed to create tmpdir for {}", manifest.name), e))?;
let tmpdir_path = tmpdir.path();
// Dump seccomp config to process tmpdir. This is a subject to be changed since
// minijail provides a API to configure seccomp without writing to a file.
// TODO: configure seccomp via API instead of a file
if let Some(ref seccomp) = container.manifest.seccomp {
let seccomp_config = tmpdir_path.join("seccomp");
let mut f = fs::File::create(&seccomp_config)
.await
.map_err(|e| Error::Io("Failed to create seccomp configuraiton".to_string(), e))?;
let s = itertools::join(seccomp.iter().map(|(k, v)| format!("{}: {}", k, v)), "\n");
f.write_all(s.as_bytes())
.await
.map_err(|e| Error::Io("Failed to write seccomp configuraiton".to_string(), e))?;
// Temporary disabled
// Must be called before parse_seccomp_filters
// jail.log_seccomp_filter_failures();
// let p: std::path::PathBuf = seccomp_config.into();
// jail.parse_seccomp_filters(p.as_path())
// .context("Failed parse seccomp config")?;
// jail.use_seccomp_filter();
}
// Configure UID
jail.change_uid(self.uid);
// Configure PID
jail.change_gid(self.gid);
// Update the capability mask if specified
if let Some(capabilities) = &manifest.capabilities {
// TODO: the capabilities should be passed as an array
jail.update_caps(&capabilities.join(" "))
.map_err(Error::Minijail)?;
}
// Update the supplementary group list if specified
if let Some(suppl_groups) = &manifest.suppl_groups {
// TODO: the groups should be passed an array
jail.update_suppl_groups(&suppl_groups.join(" "))
.map_err(Error::Minijail)?;
}
// TODO: Do not use pid namespace because of multithreadding
// issues discovered by minijail. See libminijail.c for details.
// Make the process enter a pid namespace
//jail.namespace_pids();
// Make the process enter a vfs namespace
jail.namespace_vfs();
// Set no_new_privs. See </kernel/seccomp.c> and </kernel/sys.c>
// in the kernel source tree for an explanation of the parameters.
jail.no_new_privs();
// Set chroot dir for process
jail.enter_chroot(&root.as_path())?;
// Make the application the init process
jail.run_as_init();
self.setup_mounts(&mut jail, container).await?;
// Arguments
let args = manifest.args.clone().unwrap_or_default();
let init_str = init.display().to_string();
let argv: Vec<&str> = iter::once(init_str.as_str())
.chain(args.iter().map(|s| s.as_str()))
.collect();
// Create environment for process. Set data directory, container name and version
let mut env = manifest.env.clone().unwrap_or_default();
env.insert(ENV_NAME.to_string(), manifest.name.to_string());
env.insert(ENV_VERSION.to_string(), manifest.version.to_string());
let env = env
.iter()
.map(|(k, v)| format!("{}={}", k, v))
.collect::<Vec<String>>();
let env = env.iter().map(|a| a.as_str()).collect::<Vec<&str>>();
debug!(
"Executing \"{}{}{}\"",
init.display(),
if args.len() > 1 { " " } else { "" },
argv.iter().skip(1).join(" ")
);
let stdout =
CaptureOutput::new(OutputStream::Stdout, &manifest.name, self.event_tx.clone()).await?;
let stderr =
CaptureOutput::new(OutputStream::Stderr, &manifest.name, self.event_tx.clone()).await?;
// Prevent minijail to close the log fd so that errors aren't missed
let log_fd = (self.log_fd, self.log_fd);
let pid = jail.run_remap_env_preload(
&init.as_path(),
&[(stdout.0, 1), (stderr.0, 2), log_fd],
&argv,
&env,
false,
)? as u32;
let (exit_handle_signal, exit_handle_wait) = exit_handle();
// Spawn a task thats waits for the child to exit
waitpid(
&manifest.name,
pid,
exit_handle_signal,
self.event_tx.clone(),
)
.await;
Ok(Process {
pid,
_jail: jail,
_tmpdir: tmpdir,
_stdout: stdout,
_stderr: stderr,
exit_handle_wait,
})
}
async fn setup_mounts(
&self,
jail: &mut MinijailHandle,
container: &Container,
) -> Result<(), Error> {
let proc = Path::new("/proc");
jail.mount_bind(&proc, &proc, false)
.map_err(Error::Minijail)?;
jail.remount_proc_readonly();
// If there's no explicit mount for /dev add a minimal variant
if !container
.manifest
.mounts
.contains_key(&PathBuf::from("/dev"))
{
debug!("Mounting minimal /dev");
jail.mount_dev();
}
for (target, mount) in &container.manifest.mounts {
match &mount {
Mount::Bind { host, flags } => {
if !&host.exists() {
warn!(
"Cannot bind mount nonexitent source {} to {}",
host.display(),
target.display()
);
continue;
}
let rw = flags.contains(&MountFlag::Rw);
debug!(
"Mounting {} on {}{}",
host.display(),
target.display(),
if rw { " (rw)" } else { "" }
);
jail.mount_bind(&host, &target, rw)
.map_err(Error::Minijail)?;
}
Mount::Persist => {
let dir = self.data_dir.join(&container.manifest.name);
if !dir.exists() {
debug!("Creating {}", dir.display());
fs::create_dir_all(&dir).await.map_err(|e| {
Error::Io(format!("Failed to create {}", dir.display()), e)
})?;
}
debug!("Chowning {} to {}:{}", dir.display(), self.uid, self.gid);
chown(
dir.as_os_str(),
Some(unistd::Uid::from_raw(self.uid)),
Some(unistd::Gid::from_raw(self.gid)),
)
.map_err(|e| {
Error::Os(
format!(
"Failed to chown {} to {}:{}",
dir.display(),
self.uid,
self.gid
),
e,
)
})?;
debug!("Mounting {} on {}", dir.display(), target.display(),);
jail.mount_bind(&dir, &target, true)
.map_err(Error::Minijail)?;
}
Mount::Resource { name, version, dir } => {
let src = {
// Join the source of the resource container with the mount dir
let resource_root = self.run_dir.join(&name).join(&version.to_string());
let dir = dir
.strip_prefix("/")
.map(|d| resource_root.join(d))
.unwrap_or(resource_root);
if !dir.exists() {
return Err(Error::Start(format!(
"Resource folder {} is missing",
dir.display()
)));
}
dir
};
debug!("Mounting {} on {}", src.display(), target.display());
jail.mount_bind(&src, &target, false)
.map_err(Error::Minijail)?;
}
Mount::Tmpfs { size } => {
debug!(
"Mounting tmpfs with size {} on {}",
bytesize::ByteSize::b(*size),
target.display()
);
let data = format!("size={},mode=1777", size);
jail.mount_with_data(&Path::new("none"), &target, "tmpfs", 0, &data)
.map_err(Error::Minijail)?;
}
Mount::Dev { r#type } => {
match r#type {
// The Full mount of /dev is a simple rw bind mount of /dev
Dev::Full => {
let dev = Path::new("/dev");
jail.mount_bind(&dev, &dev, true).map_err(Error::Minijail)?;
}
}
}
}
}
Ok(())
}
}
pub(crate) struct Process {
/// PID of this process
pid: u32,
/// Handle to a libminijail configuration
_jail: MinijailHandle,
/// Temporary directory created in the systems tmp folder.
/// This directory holds process instance specific data that needs
/// to be dumped to disk for startup. e.g seccomp config (TODO)
_tmpdir: tempfile::TempDir,
/// Captured stdout output
_stdout: CaptureOutput,
/// Captured stderr output
_stderr: CaptureOutput,
/// Rx part of the exit handle of this process
exit_handle_wait: ExitHandleWait,
}
impl fmt::Debug for Process {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Process").field("pid", &self.pid).finish()
}
}
impl Process {
pub fn pid(&self) -> Pid {
self.pid
}
pub async fn stop(&mut self, timeout: time::Duration) -> Result<ExitStatus, Error> {
// Send a SIGTERM to the application. If the application does not terminate with a timeout
// it is SIGKILLed.
let sigterm = signal::Signal::SIGTERM;
signal::kill(unistd::Pid::from_raw(self.pid as i32), Some(sigterm))
.map_err(|e| Error::Os(format!("Failed to SIGTERM {}", self.pid), e))?;
let timeout = Box::pin(time::sleep(timeout));
let exited = Box::pin(self.exit_handle_wait.recv());
let pid = self.pid;
Ok(select! {
s = exited => {
s.expect("Internal channel error during process termination") // This is the happy path...
},
_ = timeout => {
signal::kill(unistd::Pid::from_raw(pid as i32), Some(signal::Signal::SIGKILL))
.map_err(|e| Error::Os("Failed to kill process".to_string(), e))?;
ExitStatus::Signaled(signal::Signal::SIGKILL)
}
})
}
}
struct AsyncPipe {
inner: AsyncFd<std::fs::File>,
writefd: i32,
}
impl AsyncPipe {
fn new() -> Result<AsyncPipe, Error> {
let (readfd, writefd) =
pipe().map_err(|e| Error::Os("Failed to create pipe".to_string(), e))?;
let mut flags = OFlag::from_bits(fcntl(readfd, fcntl::FcntlArg::F_GETFL).unwrap()).unwrap();
flags.set(OFlag::O_NONBLOCK, true);
fcntl(readfd, fcntl::FcntlArg::F_SETFL(flags)).expect("Failed to configure pipe fd");
let pipe =
unsafe { <std::fs::File as std::os::unix::prelude::FromRawFd>::from_raw_fd(readfd) };
let inner = AsyncFd::new(pipe).map_err(|e| Error::Io("Async fd".to_string(), e))?;
Ok(AsyncPipe { inner, writefd })
}
fn writefd(&self) -> RawFd {
self.writefd
}
}
impl AsyncRead for AsyncPipe {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
loop {
let mut guard = futures::ready!(self.inner.poll_read_ready(cx))?;
match guard
.try_io(|inner| std::io::Read::read(&mut inner.get_ref(), buf.initialized_mut()))
{
Ok(Ok(n)) => {
buf.advance(n);
break Poll::Ready(Ok(()));
}
Ok(Err(e)) => break Poll::Ready(Err(e)),
Err(_would_block) => continue,
}
}
}
}
// Capture output of a child process. Create a pipe and spawn a task that forwards each line to
// the main loop. When this struct is dropped the internal spawned tasks are stopped.
#[derive(Debug)]
struct CaptureOutput(i32, oneshot::Sender<()>);
impl CaptureOutput {
pub async fn new(
stream: OutputStream,
tag: &str,
event_tx: EventTx,
) -> Result<CaptureOutput, Error> {
let pipe = AsyncPipe::new()?;
let writefd = pipe.writefd();
let mut lines = io::BufReader::new(pipe).lines();
let tag = tag.to_string();
let (tx, mut rx) = oneshot::channel();
debug!("Starting stream capture of {} on {:?}", tag, stream);
task::spawn(async move {
loop {
select! {
_ = &mut rx => break,
line = lines.next_line() => {
if let Ok(Some(line)) = line {
let event = Event::ChildOutput {
name: tag.clone(),
stream: stream.clone(),
line,
};
event_tx.send(event).await.ok();
} else {
break;
}
}
}
}
debug!("Stopped stream capture of {} on {:?}", tag, stream);
});
Ok(CaptureOutput(writefd, tx))
}
}
| shutdown | identifier_name |
minijail.rs | // Copyright (c) 2020 ESRLabs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::{
process::{
exit_handle, waitpid, Error, ExitHandleWait, ExitStatus, Pid, ENV_NAME, ENV_VERSION,
},
state::Container,
OutputStream,
};
use crate::runtime::{Event, EventTx};
use futures::channel::oneshot;
use itertools::Itertools;
use log::{debug, error, info, trace, warn, Level};
use nix::{
fcntl::{self, fcntl, OFlag},
sys::signal,
unistd::{self, chown, pipe},
};
use npk::manifest::{Dev, Mount, MountFlag};
use std::{
fmt, iter, ops,
os::unix::prelude::RawFd,
path::{Path, PathBuf},
pin::Pin,
task::{Context, Poll},
};
use tokio::{
fs,
io::{self, unix::AsyncFd, AsyncBufReadExt, AsyncRead, AsyncWriteExt, ReadBuf},
select, task, time,
};
// We need a Send + Sync version of minijail::Minijail
struct MinijailHandle(::minijail::Minijail);
unsafe impl Send for MinijailHandle {}
unsafe impl Sync for MinijailHandle {}
impl ops::Deref for MinijailHandle {
type Target = ::minijail::Minijail;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl ops::DerefMut for MinijailHandle {
fn deref_mut(&mut self) -> &mut Self::Target |
}
#[derive(Debug)]
pub struct Minijail {
log_fd: i32,
event_tx: EventTx,
run_dir: PathBuf,
data_dir: PathBuf,
uid: u32,
gid: u32,
}
impl Minijail {
pub(crate) fn new(
event_tx: EventTx,
run_dir: &Path,
data_dir: &Path,
uid: u32,
gid: u32,
) -> Result<Minijail, Error> {
let pipe = AsyncPipe::new()?;
let log_fd = pipe.writefd();
let mut lines = io::BufReader::new(pipe).lines();
// Spawn a task that forwards logs from minijail to the rust logger.
task::spawn(async move {
while let Ok(Some(line)) = lines.next_line().await {
let l = line.split_whitespace().skip(2).collect::<String>();
match line.chars().next() {
Some('D') => debug!("{}", l),
Some('I') => info!("{}", l),
Some('W') => warn!("{}", l),
Some('E') => error!("{}", l),
_ => trace!("{}", line),
}
}
});
let minijail_log_level = match log::max_level().to_level().unwrap_or(Level::Warn) {
Level::Error => 3,
Level::Warn => 4,
Level::Info => 6,
Level::Debug => 7,
Level::Trace => i32::MAX,
};
::minijail::Minijail::log_to_fd(log_fd, minijail_log_level as i32);
Ok(Minijail {
event_tx,
run_dir: run_dir.into(),
data_dir: data_dir.into(),
uid,
gid,
log_fd,
})
}
pub(crate) fn shutdown(&self) -> Result<(), Error> {
// Just make clippy happy
if false {
Err(Error::Stop)
} else {
Ok(())
}
}
pub(crate) async fn start(&self, container: &Container) -> Result<Process, Error> {
let root = &container.root;
let manifest = &container.manifest;
let mut jail = MinijailHandle(::minijail::Minijail::new().map_err(Error::Minijail)?);
let init = manifest
.init
.as_ref()
.ok_or_else(|| Error::Start("Cannot start a resource".to_string()))?;
let tmpdir = tempfile::TempDir::new()
.map_err(|e| Error::Io(format!("Failed to create tmpdir for {}", manifest.name), e))?;
let tmpdir_path = tmpdir.path();
// Dump seccomp config to process tmpdir. This is a subject to be changed since
// minijail provides a API to configure seccomp without writing to a file.
// TODO: configure seccomp via API instead of a file
if let Some(ref seccomp) = container.manifest.seccomp {
let seccomp_config = tmpdir_path.join("seccomp");
let mut f = fs::File::create(&seccomp_config)
.await
.map_err(|e| Error::Io("Failed to create seccomp configuraiton".to_string(), e))?;
let s = itertools::join(seccomp.iter().map(|(k, v)| format!("{}: {}", k, v)), "\n");
f.write_all(s.as_bytes())
.await
.map_err(|e| Error::Io("Failed to write seccomp configuraiton".to_string(), e))?;
// Temporary disabled
// Must be called before parse_seccomp_filters
// jail.log_seccomp_filter_failures();
// let p: std::path::PathBuf = seccomp_config.into();
// jail.parse_seccomp_filters(p.as_path())
// .context("Failed parse seccomp config")?;
// jail.use_seccomp_filter();
}
// Configure UID
jail.change_uid(self.uid);
// Configure PID
jail.change_gid(self.gid);
// Update the capability mask if specified
if let Some(capabilities) = &manifest.capabilities {
// TODO: the capabilities should be passed as an array
jail.update_caps(&capabilities.join(" "))
.map_err(Error::Minijail)?;
}
// Update the supplementary group list if specified
if let Some(suppl_groups) = &manifest.suppl_groups {
// TODO: the groups should be passed an array
jail.update_suppl_groups(&suppl_groups.join(" "))
.map_err(Error::Minijail)?;
}
// TODO: Do not use pid namespace because of multithreadding
// issues discovered by minijail. See libminijail.c for details.
// Make the process enter a pid namespace
//jail.namespace_pids();
// Make the process enter a vfs namespace
jail.namespace_vfs();
// Set no_new_privs. See </kernel/seccomp.c> and </kernel/sys.c>
// in the kernel source tree for an explanation of the parameters.
jail.no_new_privs();
// Set chroot dir for process
jail.enter_chroot(&root.as_path())?;
// Make the application the init process
jail.run_as_init();
self.setup_mounts(&mut jail, container).await?;
// Arguments
let args = manifest.args.clone().unwrap_or_default();
let init_str = init.display().to_string();
let argv: Vec<&str> = iter::once(init_str.as_str())
.chain(args.iter().map(|s| s.as_str()))
.collect();
// Create environment for process. Set data directory, container name and version
let mut env = manifest.env.clone().unwrap_or_default();
env.insert(ENV_NAME.to_string(), manifest.name.to_string());
env.insert(ENV_VERSION.to_string(), manifest.version.to_string());
let env = env
.iter()
.map(|(k, v)| format!("{}={}", k, v))
.collect::<Vec<String>>();
let env = env.iter().map(|a| a.as_str()).collect::<Vec<&str>>();
debug!(
"Executing \"{}{}{}\"",
init.display(),
if args.len() > 1 { " " } else { "" },
argv.iter().skip(1).join(" ")
);
let stdout =
CaptureOutput::new(OutputStream::Stdout, &manifest.name, self.event_tx.clone()).await?;
let stderr =
CaptureOutput::new(OutputStream::Stderr, &manifest.name, self.event_tx.clone()).await?;
// Prevent minijail to close the log fd so that errors aren't missed
let log_fd = (self.log_fd, self.log_fd);
let pid = jail.run_remap_env_preload(
&init.as_path(),
&[(stdout.0, 1), (stderr.0, 2), log_fd],
&argv,
&env,
false,
)? as u32;
let (exit_handle_signal, exit_handle_wait) = exit_handle();
// Spawn a task thats waits for the child to exit
waitpid(
&manifest.name,
pid,
exit_handle_signal,
self.event_tx.clone(),
)
.await;
Ok(Process {
pid,
_jail: jail,
_tmpdir: tmpdir,
_stdout: stdout,
_stderr: stderr,
exit_handle_wait,
})
}
async fn setup_mounts(
&self,
jail: &mut MinijailHandle,
container: &Container,
) -> Result<(), Error> {
let proc = Path::new("/proc");
jail.mount_bind(&proc, &proc, false)
.map_err(Error::Minijail)?;
jail.remount_proc_readonly();
// If there's no explicit mount for /dev add a minimal variant
if !container
.manifest
.mounts
.contains_key(&PathBuf::from("/dev"))
{
debug!("Mounting minimal /dev");
jail.mount_dev();
}
for (target, mount) in &container.manifest.mounts {
match &mount {
Mount::Bind { host, flags } => {
if !&host.exists() {
warn!(
"Cannot bind mount nonexitent source {} to {}",
host.display(),
target.display()
);
continue;
}
let rw = flags.contains(&MountFlag::Rw);
debug!(
"Mounting {} on {}{}",
host.display(),
target.display(),
if rw { " (rw)" } else { "" }
);
jail.mount_bind(&host, &target, rw)
.map_err(Error::Minijail)?;
}
Mount::Persist => {
let dir = self.data_dir.join(&container.manifest.name);
if !dir.exists() {
debug!("Creating {}", dir.display());
fs::create_dir_all(&dir).await.map_err(|e| {
Error::Io(format!("Failed to create {}", dir.display()), e)
})?;
}
debug!("Chowning {} to {}:{}", dir.display(), self.uid, self.gid);
chown(
dir.as_os_str(),
Some(unistd::Uid::from_raw(self.uid)),
Some(unistd::Gid::from_raw(self.gid)),
)
.map_err(|e| {
Error::Os(
format!(
"Failed to chown {} to {}:{}",
dir.display(),
self.uid,
self.gid
),
e,
)
})?;
debug!("Mounting {} on {}", dir.display(), target.display(),);
jail.mount_bind(&dir, &target, true)
.map_err(Error::Minijail)?;
}
Mount::Resource { name, version, dir } => {
let src = {
// Join the source of the resource container with the mount dir
let resource_root = self.run_dir.join(&name).join(&version.to_string());
let dir = dir
.strip_prefix("/")
.map(|d| resource_root.join(d))
.unwrap_or(resource_root);
if !dir.exists() {
return Err(Error::Start(format!(
"Resource folder {} is missing",
dir.display()
)));
}
dir
};
debug!("Mounting {} on {}", src.display(), target.display());
jail.mount_bind(&src, &target, false)
.map_err(Error::Minijail)?;
}
Mount::Tmpfs { size } => {
debug!(
"Mounting tmpfs with size {} on {}",
bytesize::ByteSize::b(*size),
target.display()
);
let data = format!("size={},mode=1777", size);
jail.mount_with_data(&Path::new("none"), &target, "tmpfs", 0, &data)
.map_err(Error::Minijail)?;
}
Mount::Dev { r#type } => {
match r#type {
// The Full mount of /dev is a simple rw bind mount of /dev
Dev::Full => {
let dev = Path::new("/dev");
jail.mount_bind(&dev, &dev, true).map_err(Error::Minijail)?;
}
}
}
}
}
Ok(())
}
}
pub(crate) struct Process {
/// PID of this process
pid: u32,
/// Handle to a libminijail configuration
_jail: MinijailHandle,
/// Temporary directory created in the systems tmp folder.
/// This directory holds process instance specific data that needs
/// to be dumped to disk for startup. e.g seccomp config (TODO)
_tmpdir: tempfile::TempDir,
/// Captured stdout output
_stdout: CaptureOutput,
/// Captured stderr output
_stderr: CaptureOutput,
/// Rx part of the exit handle of this process
exit_handle_wait: ExitHandleWait,
}
impl fmt::Debug for Process {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Process").field("pid", &self.pid).finish()
}
}
impl Process {
pub fn pid(&self) -> Pid {
self.pid
}
pub async fn stop(&mut self, timeout: time::Duration) -> Result<ExitStatus, Error> {
// Send a SIGTERM to the application. If the application does not terminate with a timeout
// it is SIGKILLed.
let sigterm = signal::Signal::SIGTERM;
signal::kill(unistd::Pid::from_raw(self.pid as i32), Some(sigterm))
.map_err(|e| Error::Os(format!("Failed to SIGTERM {}", self.pid), e))?;
let timeout = Box::pin(time::sleep(timeout));
let exited = Box::pin(self.exit_handle_wait.recv());
let pid = self.pid;
Ok(select! {
s = exited => {
s.expect("Internal channel error during process termination") // This is the happy path...
},
_ = timeout => {
signal::kill(unistd::Pid::from_raw(pid as i32), Some(signal::Signal::SIGKILL))
.map_err(|e| Error::Os("Failed to kill process".to_string(), e))?;
ExitStatus::Signaled(signal::Signal::SIGKILL)
}
})
}
}
struct AsyncPipe {
inner: AsyncFd<std::fs::File>,
writefd: i32,
}
impl AsyncPipe {
fn new() -> Result<AsyncPipe, Error> {
let (readfd, writefd) =
pipe().map_err(|e| Error::Os("Failed to create pipe".to_string(), e))?;
let mut flags = OFlag::from_bits(fcntl(readfd, fcntl::FcntlArg::F_GETFL).unwrap()).unwrap();
flags.set(OFlag::O_NONBLOCK, true);
fcntl(readfd, fcntl::FcntlArg::F_SETFL(flags)).expect("Failed to configure pipe fd");
let pipe =
unsafe { <std::fs::File as std::os::unix::prelude::FromRawFd>::from_raw_fd(readfd) };
let inner = AsyncFd::new(pipe).map_err(|e| Error::Io("Async fd".to_string(), e))?;
Ok(AsyncPipe { inner, writefd })
}
fn writefd(&self) -> RawFd {
self.writefd
}
}
impl AsyncRead for AsyncPipe {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
loop {
let mut guard = futures::ready!(self.inner.poll_read_ready(cx))?;
match guard
.try_io(|inner| std::io::Read::read(&mut inner.get_ref(), buf.initialized_mut()))
{
Ok(Ok(n)) => {
buf.advance(n);
break Poll::Ready(Ok(()));
}
Ok(Err(e)) => break Poll::Ready(Err(e)),
Err(_would_block) => continue,
}
}
}
}
// Capture output of a child process. Create a pipe and spawn a task that forwards each line to
// the main loop. When this struct is dropped the internal spawned tasks are stopped.
#[derive(Debug)]
struct CaptureOutput(i32, oneshot::Sender<()>);
impl CaptureOutput {
pub async fn new(
stream: OutputStream,
tag: &str,
event_tx: EventTx,
) -> Result<CaptureOutput, Error> {
let pipe = AsyncPipe::new()?;
let writefd = pipe.writefd();
let mut lines = io::BufReader::new(pipe).lines();
let tag = tag.to_string();
let (tx, mut rx) = oneshot::channel();
debug!("Starting stream capture of {} on {:?}", tag, stream);
task::spawn(async move {
loop {
select! {
_ = &mut rx => break,
line = lines.next_line() => {
if let Ok(Some(line)) = line {
let event = Event::ChildOutput {
name: tag.clone(),
stream: stream.clone(),
line,
};
event_tx.send(event).await.ok();
} else {
break;
}
}
}
}
debug!("Stopped stream capture of {} on {:?}", tag, stream);
});
Ok(CaptureOutput(writefd, tx))
}
}
| {
&mut self.0
} | identifier_body |
minijail.rs | // Copyright (c) 2020 ESRLabs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::{
process::{
exit_handle, waitpid, Error, ExitHandleWait, ExitStatus, Pid, ENV_NAME, ENV_VERSION,
},
state::Container,
OutputStream,
};
use crate::runtime::{Event, EventTx};
use futures::channel::oneshot;
use itertools::Itertools;
use log::{debug, error, info, trace, warn, Level};
use nix::{
fcntl::{self, fcntl, OFlag},
sys::signal,
unistd::{self, chown, pipe},
};
use npk::manifest::{Dev, Mount, MountFlag};
use std::{
fmt, iter, ops,
os::unix::prelude::RawFd,
path::{Path, PathBuf},
pin::Pin,
task::{Context, Poll},
};
use tokio::{
fs,
io::{self, unix::AsyncFd, AsyncBufReadExt, AsyncRead, AsyncWriteExt, ReadBuf},
select, task, time,
};
// We need a Send + Sync version of minijail::Minijail
struct MinijailHandle(::minijail::Minijail);
unsafe impl Send for MinijailHandle {}
unsafe impl Sync for MinijailHandle {}
impl ops::Deref for MinijailHandle {
type Target = ::minijail::Minijail;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl ops::DerefMut for MinijailHandle {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
#[derive(Debug)]
pub struct Minijail {
log_fd: i32,
event_tx: EventTx,
run_dir: PathBuf,
data_dir: PathBuf,
uid: u32,
gid: u32,
}
impl Minijail {
pub(crate) fn new(
event_tx: EventTx,
run_dir: &Path,
data_dir: &Path,
uid: u32,
gid: u32,
) -> Result<Minijail, Error> {
let pipe = AsyncPipe::new()?;
let log_fd = pipe.writefd();
let mut lines = io::BufReader::new(pipe).lines();
// Spawn a task that forwards logs from minijail to the rust logger.
task::spawn(async move {
while let Ok(Some(line)) = lines.next_line().await {
let l = line.split_whitespace().skip(2).collect::<String>();
match line.chars().next() {
Some('D') => debug!("{}", l),
Some('I') => info!("{}", l),
Some('W') => warn!("{}", l),
Some('E') => error!("{}", l),
_ => trace!("{}", line),
}
}
});
let minijail_log_level = match log::max_level().to_level().unwrap_or(Level::Warn) {
Level::Error => 3,
Level::Warn => 4,
Level::Info => 6,
Level::Debug => 7,
Level::Trace => i32::MAX,
};
::minijail::Minijail::log_to_fd(log_fd, minijail_log_level as i32);
Ok(Minijail {
event_tx,
run_dir: run_dir.into(),
data_dir: data_dir.into(),
uid,
gid,
log_fd,
})
}
pub(crate) fn shutdown(&self) -> Result<(), Error> {
// Just make clippy happy
if false {
Err(Error::Stop)
} else {
Ok(())
}
}
pub(crate) async fn start(&self, container: &Container) -> Result<Process, Error> {
let root = &container.root;
let manifest = &container.manifest;
let mut jail = MinijailHandle(::minijail::Minijail::new().map_err(Error::Minijail)?);
let init = manifest
.init
.as_ref()
.ok_or_else(|| Error::Start("Cannot start a resource".to_string()))?;
let tmpdir = tempfile::TempDir::new()
.map_err(|e| Error::Io(format!("Failed to create tmpdir for {}", manifest.name), e))?;
let tmpdir_path = tmpdir.path();
// Dump seccomp config to process tmpdir. This is a subject to be changed since
// minijail provides a API to configure seccomp without writing to a file.
// TODO: configure seccomp via API instead of a file
if let Some(ref seccomp) = container.manifest.seccomp {
let seccomp_config = tmpdir_path.join("seccomp");
let mut f = fs::File::create(&seccomp_config)
.await
.map_err(|e| Error::Io("Failed to create seccomp configuraiton".to_string(), e))?;
let s = itertools::join(seccomp.iter().map(|(k, v)| format!("{}: {}", k, v)), "\n");
f.write_all(s.as_bytes())
.await
.map_err(|e| Error::Io("Failed to write seccomp configuraiton".to_string(), e))?;
// Temporary disabled
// Must be called before parse_seccomp_filters
// jail.log_seccomp_filter_failures();
// let p: std::path::PathBuf = seccomp_config.into();
// jail.parse_seccomp_filters(p.as_path())
// .context("Failed parse seccomp config")?;
// jail.use_seccomp_filter();
}
// Configure UID
jail.change_uid(self.uid);
// Configure PID
jail.change_gid(self.gid);
// Update the capability mask if specified
if let Some(capabilities) = &manifest.capabilities {
// TODO: the capabilities should be passed as an array
jail.update_caps(&capabilities.join(" "))
.map_err(Error::Minijail)?;
}
// Update the supplementary group list if specified
if let Some(suppl_groups) = &manifest.suppl_groups {
// TODO: the groups should be passed an array
jail.update_suppl_groups(&suppl_groups.join(" "))
.map_err(Error::Minijail)?;
}
// TODO: Do not use pid namespace because of multithreadding
// issues discovered by minijail. See libminijail.c for details.
// Make the process enter a pid namespace
//jail.namespace_pids();
// Make the process enter a vfs namespace
jail.namespace_vfs();
// Set no_new_privs. See </kernel/seccomp.c> and </kernel/sys.c>
// in the kernel source tree for an explanation of the parameters.
jail.no_new_privs();
// Set chroot dir for process
jail.enter_chroot(&root.as_path())?;
// Make the application the init process
jail.run_as_init();
self.setup_mounts(&mut jail, container).await?;
// Arguments
let args = manifest.args.clone().unwrap_or_default();
let init_str = init.display().to_string();
let argv: Vec<&str> = iter::once(init_str.as_str())
.chain(args.iter().map(|s| s.as_str()))
.collect();
// Create environment for process. Set data directory, container name and version
let mut env = manifest.env.clone().unwrap_or_default();
env.insert(ENV_NAME.to_string(), manifest.name.to_string());
env.insert(ENV_VERSION.to_string(), manifest.version.to_string());
let env = env
.iter()
.map(|(k, v)| format!("{}={}", k, v))
.collect::<Vec<String>>();
let env = env.iter().map(|a| a.as_str()).collect::<Vec<&str>>();
debug!(
"Executing \"{}{}{}\"",
init.display(),
if args.len() > 1 { " " } else { "" },
argv.iter().skip(1).join(" ")
);
let stdout =
CaptureOutput::new(OutputStream::Stdout, &manifest.name, self.event_tx.clone()).await?;
let stderr =
CaptureOutput::new(OutputStream::Stderr, &manifest.name, self.event_tx.clone()).await?;
// Prevent minijail to close the log fd so that errors aren't missed
let log_fd = (self.log_fd, self.log_fd);
let pid = jail.run_remap_env_preload(
&init.as_path(),
&[(stdout.0, 1), (stderr.0, 2), log_fd],
&argv,
&env,
false,
)? as u32;
let (exit_handle_signal, exit_handle_wait) = exit_handle();
// Spawn a task thats waits for the child to exit
waitpid(
&manifest.name,
pid,
exit_handle_signal,
self.event_tx.clone(),
)
.await;
Ok(Process {
pid,
_jail: jail,
_tmpdir: tmpdir,
_stdout: stdout,
_stderr: stderr,
exit_handle_wait,
})
}
| let proc = Path::new("/proc");
jail.mount_bind(&proc, &proc, false)
.map_err(Error::Minijail)?;
jail.remount_proc_readonly();
// If there's no explicit mount for /dev add a minimal variant
if !container
.manifest
.mounts
.contains_key(&PathBuf::from("/dev"))
{
debug!("Mounting minimal /dev");
jail.mount_dev();
}
for (target, mount) in &container.manifest.mounts {
match &mount {
Mount::Bind { host, flags } => {
if !&host.exists() {
warn!(
"Cannot bind mount nonexitent source {} to {}",
host.display(),
target.display()
);
continue;
}
let rw = flags.contains(&MountFlag::Rw);
debug!(
"Mounting {} on {}{}",
host.display(),
target.display(),
if rw { " (rw)" } else { "" }
);
jail.mount_bind(&host, &target, rw)
.map_err(Error::Minijail)?;
}
Mount::Persist => {
let dir = self.data_dir.join(&container.manifest.name);
if !dir.exists() {
debug!("Creating {}", dir.display());
fs::create_dir_all(&dir).await.map_err(|e| {
Error::Io(format!("Failed to create {}", dir.display()), e)
})?;
}
debug!("Chowning {} to {}:{}", dir.display(), self.uid, self.gid);
chown(
dir.as_os_str(),
Some(unistd::Uid::from_raw(self.uid)),
Some(unistd::Gid::from_raw(self.gid)),
)
.map_err(|e| {
Error::Os(
format!(
"Failed to chown {} to {}:{}",
dir.display(),
self.uid,
self.gid
),
e,
)
})?;
debug!("Mounting {} on {}", dir.display(), target.display(),);
jail.mount_bind(&dir, &target, true)
.map_err(Error::Minijail)?;
}
Mount::Resource { name, version, dir } => {
let src = {
// Join the source of the resource container with the mount dir
let resource_root = self.run_dir.join(&name).join(&version.to_string());
let dir = dir
.strip_prefix("/")
.map(|d| resource_root.join(d))
.unwrap_or(resource_root);
if !dir.exists() {
return Err(Error::Start(format!(
"Resource folder {} is missing",
dir.display()
)));
}
dir
};
debug!("Mounting {} on {}", src.display(), target.display());
jail.mount_bind(&src, &target, false)
.map_err(Error::Minijail)?;
}
Mount::Tmpfs { size } => {
debug!(
"Mounting tmpfs with size {} on {}",
bytesize::ByteSize::b(*size),
target.display()
);
let data = format!("size={},mode=1777", size);
jail.mount_with_data(&Path::new("none"), &target, "tmpfs", 0, &data)
.map_err(Error::Minijail)?;
}
Mount::Dev { r#type } => {
match r#type {
// The Full mount of /dev is a simple rw bind mount of /dev
Dev::Full => {
let dev = Path::new("/dev");
jail.mount_bind(&dev, &dev, true).map_err(Error::Minijail)?;
}
}
}
}
}
Ok(())
}
}
pub(crate) struct Process {
/// PID of this process
pid: u32,
/// Handle to a libminijail configuration
_jail: MinijailHandle,
/// Temporary directory created in the systems tmp folder.
/// This directory holds process instance specific data that needs
/// to be dumped to disk for startup. e.g seccomp config (TODO)
_tmpdir: tempfile::TempDir,
/// Captured stdout output
_stdout: CaptureOutput,
/// Captured stderr output
_stderr: CaptureOutput,
/// Rx part of the exit handle of this process
exit_handle_wait: ExitHandleWait,
}
impl fmt::Debug for Process {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Process").field("pid", &self.pid).finish()
}
}
impl Process {
pub fn pid(&self) -> Pid {
self.pid
}
pub async fn stop(&mut self, timeout: time::Duration) -> Result<ExitStatus, Error> {
// Send a SIGTERM to the application. If the application does not terminate with a timeout
// it is SIGKILLed.
let sigterm = signal::Signal::SIGTERM;
signal::kill(unistd::Pid::from_raw(self.pid as i32), Some(sigterm))
.map_err(|e| Error::Os(format!("Failed to SIGTERM {}", self.pid), e))?;
let timeout = Box::pin(time::sleep(timeout));
let exited = Box::pin(self.exit_handle_wait.recv());
let pid = self.pid;
Ok(select! {
s = exited => {
s.expect("Internal channel error during process termination") // This is the happy path...
},
_ = timeout => {
signal::kill(unistd::Pid::from_raw(pid as i32), Some(signal::Signal::SIGKILL))
.map_err(|e| Error::Os("Failed to kill process".to_string(), e))?;
ExitStatus::Signaled(signal::Signal::SIGKILL)
}
})
}
}
struct AsyncPipe {
inner: AsyncFd<std::fs::File>,
writefd: i32,
}
impl AsyncPipe {
fn new() -> Result<AsyncPipe, Error> {
let (readfd, writefd) =
pipe().map_err(|e| Error::Os("Failed to create pipe".to_string(), e))?;
let mut flags = OFlag::from_bits(fcntl(readfd, fcntl::FcntlArg::F_GETFL).unwrap()).unwrap();
flags.set(OFlag::O_NONBLOCK, true);
fcntl(readfd, fcntl::FcntlArg::F_SETFL(flags)).expect("Failed to configure pipe fd");
let pipe =
unsafe { <std::fs::File as std::os::unix::prelude::FromRawFd>::from_raw_fd(readfd) };
let inner = AsyncFd::new(pipe).map_err(|e| Error::Io("Async fd".to_string(), e))?;
Ok(AsyncPipe { inner, writefd })
}
fn writefd(&self) -> RawFd {
self.writefd
}
}
impl AsyncRead for AsyncPipe {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
loop {
let mut guard = futures::ready!(self.inner.poll_read_ready(cx))?;
match guard
.try_io(|inner| std::io::Read::read(&mut inner.get_ref(), buf.initialized_mut()))
{
Ok(Ok(n)) => {
buf.advance(n);
break Poll::Ready(Ok(()));
}
Ok(Err(e)) => break Poll::Ready(Err(e)),
Err(_would_block) => continue,
}
}
}
}
// Capture output of a child process. Create a pipe and spawn a task that forwards each line to
// the main loop. When this struct is dropped the internal spawned tasks are stopped.
#[derive(Debug)]
struct CaptureOutput(i32, oneshot::Sender<()>);
impl CaptureOutput {
pub async fn new(
stream: OutputStream,
tag: &str,
event_tx: EventTx,
) -> Result<CaptureOutput, Error> {
let pipe = AsyncPipe::new()?;
let writefd = pipe.writefd();
let mut lines = io::BufReader::new(pipe).lines();
let tag = tag.to_string();
let (tx, mut rx) = oneshot::channel();
debug!("Starting stream capture of {} on {:?}", tag, stream);
task::spawn(async move {
loop {
select! {
_ = &mut rx => break,
line = lines.next_line() => {
if let Ok(Some(line)) = line {
let event = Event::ChildOutput {
name: tag.clone(),
stream: stream.clone(),
line,
};
event_tx.send(event).await.ok();
} else {
break;
}
}
}
}
debug!("Stopped stream capture of {} on {:?}", tag, stream);
});
Ok(CaptureOutput(writefd, tx))
}
} | async fn setup_mounts(
&self,
jail: &mut MinijailHandle,
container: &Container,
) -> Result<(), Error> { | random_line_split |
mod.rs | use std::error::Error as StdError;
use std::ffi::{CStr, CString};
use std::fmt;
use std::io;
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use std::collections::HashSet;
use std::fs::File;
use digest::{Digest, FixedOutput};
use nix::fcntl::{fcntl, open, OFlag};
use nix::sys::stat::Mode;
use nix::unistd::{execveat, fork, lseek64, write, ForkResult, Gid, Pid, Uid, Whence};
use sha2::{Sha256, Sha512};
use sha3::{Sha3_512, Keccak512};
use tracing::{event, Level};
use users::{get_group_by_name, get_user_by_name};
use yscloud_config_model::ImageType;
use memfd::{MemFd, MemFdOptions, SealFlag};
use owned_fd::{OwnedFd, IntoOwnedFd};
use super::posix_imp::relabel_file_descriptors;
pub use super::posix_imp::run_reified;
use crate::{Void, AppPreforkConfiguration};
pub mod arch;
pub mod seccomp;
pub mod unshare;
pub mod container;
pub mod mount;
pub struct ContainerImage {
file: PathBuf,
mounts: Vec<(PathBuf, PathBuf)>,
}
impl ContainerImage {
pub fn start(&self) -> io::Result<Void> {
return Err(io::Error::new(io::ErrorKind::Other, "containers not yet implemented"));
}
}
#[derive(Debug)]
pub struct Executable {
file: OwnedFd,
}
impl fmt::Display for Executable {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "fd+executable:{}", self.file.as_raw_fd())
}
}
impl AsRawFd for Executable {
fn as_raw_fd(&self) -> RawFd {
self.file.as_raw_fd()
}
}
fn nix_error_to_io_error(err: nix::Error) -> io::Error {
match err {
nix::Error::Sys(syserr) => io::Error::from_raw_os_error(syserr as i32),
nix::Error::InvalidPath => io::Error::new(io::ErrorKind::Other, "Invalid path (nix)"),
nix::Error::InvalidUtf8 => io::Error::new(io::ErrorKind::Other, "Invalid UTF-8 (nix)"),
nix::Error::UnsupportedOperation => {
io::Error::new(io::ErrorKind::Other, "Unsupported operation (nix)")
}
}
}
pub struct ExecutableFactory {
storage: File,
expected_path: PathBuf,
disk_linked: bool,
}
impl ExecutableFactory {
pub fn new_unspecified(name: &str, capacity: i64) -> io::Result<ExecutableFactory> {
Self::new_in_memory(name, capacity)
}
pub fn new_in_memory(name: &str, capacity: i64) -> io::Result<ExecutableFactory> {
let mut mem_fd = MemFdOptions::new()
.cloexec(true)
.allow_sealing(true)
.with_capacity(capacity)
.set_mode(Mode::S_IRWXU | Mode::S_IRGRP | Mode::S_IXGRP | Mode::S_IROTH | Mode::S_IXOTH)
.open(name)
.map_err(|e| nix_error_to_io_error(e))?;
mem_fd
.seal(SealFlag::F_SEAL_SEAL | SealFlag::F_SEAL_SHRINK | SealFlag::F_SEAL_GROW)
.map_err(|e| nix_error_to_io_error(e))?;
Ok(ExecutableFactory {
storage: mem_fd.into(),
expected_path: Default::default(),
disk_linked: false,
})
}
pub fn new_on_disk(name: &str, capacity: i64, root: &Path) -> io::Result<ExecutableFactory> {
let mut full_path = root.to_owned();
full_path.push(name);
let storage = File::create(&full_path)?;
Ok(ExecutableFactory {
storage,
expected_path: full_path,
disk_linked: true,
})
}
pub fn finalize_container(self) -> ContainerImage {
assert!(self.disk_linked, "must be disk-linked");
ContainerImage {
file: self.expected_path,
mounts: Vec::new(),
}
}
pub fn finalize_executable(self) -> Executable {
Executable {
file: self.storage.into_owned_fd(),
}
}
}
impl io::Write for ExecutableFactory {
fn write(&mut self, data: &[u8]) -> io::Result<usize> {
self.storage.write(data)
}
fn flush(&mut self) -> io::Result<()> {
self.storage.flush()
}
}
impl Executable {
pub fn open<P>(path: P) -> io::Result<Executable>
where
P: AsRef<Path>,
{
let path: &Path = path.as_ref();
let artifact_file = open(path, OFlag::O_RDONLY | OFlag::O_CLOEXEC, Mode::empty())
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))?;
Ok(Executable {
file: unsafe { OwnedFd::from_raw_fd(artifact_file) },
})
}
pub fn execute(&self, arguments: &[&CStr], env: &[&CStr]) -> io::Result<Void> {
use nix::fcntl::{AtFlags, FcntlArg, FdFlag};
fcntl(self.file.as_raw_fd(), FcntlArg::F_SETFD(FdFlag::FD_CLOEXEC))
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
let program_name = CString::new("").unwrap();
execveat(
self.file.as_raw_fd(),
&program_name,
arguments,
env,
AtFlags::AT_EMPTY_PATH,
)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
// successful invokations of execveat don't return.
unreachable!();
}
}
pub const EXTENSION: &str = "";
#[cfg(target_arch = "x86_64")]
pub const PLATFORM_TRIPLES: &[&str] = &[
"x86_64-unknown-linux-gnu",
"x86_64-unknown-linux-musl",
"x86_64-unknown-linux",
];
pub fn keep_hook(c: &AppPreforkConfiguration, keep_map: &mut [bool]) {
keep_map[c.artifact.0.as_raw_fd() as usize] = true;
}
pub trait SandboxingStrategy {
fn preexec(&self) -> io::Result<()>;
}
impl SandboxingStrategy for () {
fn preexec(&self) -> io::Result<()> {
Ok(())
}
}
pub struct UserChangeStrategy {
workdir: Option<PathBuf>,
set_user: Option<Uid>,
set_group: Option<Gid>,
seccomp_props: HashSet<&'static str>,
}
fn io_other<E>(e: E) -> io::Error
where | io::Error::new(io::ErrorKind::Other, e)
}
impl SandboxingStrategy for UserChangeStrategy {
fn preexec(&self) -> io::Result<()> {
if let Some(ref wd) = self.workdir {
std::fs::create_dir_all(wd)?;
nix::unistd::chown(wd, self.set_user, self.set_group)
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))?;
}
// seccomp::setup(&self.seccomp_props).unwrap();
if let Some(ref wd) = self.workdir {
event!(Level::INFO, "setting cwd = {}", wd.display());
nix::unistd::chdir(wd).map_err(io_other)?;
}
if !self.seccomp_props.contains("*") {
if !self.seccomp_props.contains("@filesystem") {
unshare::restrict_filesystem()?;
}
if !self.seccomp_props.contains("@network") {
unshare::restrict_network()?;
}
}
if let Some(gid) = self.set_group {
event!(Level::INFO, "setting gid = {:?}", gid);
nix::unistd::setgid(gid).map_err(io_other)?;
}
if let Some(uid) = self.set_user {
event!(Level::INFO, "setting uid = {:?}", uid);
nix::unistd::setuid(uid).map_err(io_other)?;
}
Ok(())
}
}
pub struct ExecExtras {
sandboxing_strategy: Option<Arc<dyn SandboxingStrategy>>,
}
impl ExecExtras {
pub fn builder() -> ExecExtrasBuilder {
let mut builder: ExecExtrasBuilder = Default::default();
builder.seccomp_props.insert("@network");
builder.seccomp_props.insert("*");
builder
}
}
#[derive(Default)]
pub struct ExecExtrasBuilder {
workdir: Option<PathBuf>,
set_user: Option<Uid>,
set_group: Option<Gid>,
seccomp_props: HashSet<&'static str>,
}
impl ExecExtrasBuilder {
pub fn set_user(&mut self, name: &str) -> io::Result<()> {
let uid = get_user_by_name(name).ok_or_else(|| {
let msg = format!("unknown user {}", name);
io::Error::new(io::ErrorKind::Other, msg)
})?;
self.set_user = Some(Uid::from_raw(uid.uid()));
Ok(())
}
pub fn set_group(&mut self, name: &str) -> io::Result<()> {
let gid = get_group_by_name(name).ok_or_else(|| {
let msg = format!("unknown group {}", name);
io::Error::new(io::ErrorKind::Other, msg)
})?;
self.set_group = Some(Gid::from_raw(gid.gid()));
Ok(())
}
pub fn set_workdir(&mut self, workdir: &Path) -> io::Result<()> {
self.workdir = Some(workdir.to_owned());
Ok(())
}
pub fn clear_seccomp_permission(&mut self) {
self.seccomp_props.clear();
}
pub fn add_seccomp_permission(&mut self, value: &'static str) {
self.seccomp_props.insert(value);
}
pub fn build(&self) -> ExecExtras {
let mut sandboxing_strategy = None;
if self.set_user.is_some() || self.set_group.is_some() {
let obj: Box<dyn SandboxingStrategy> = Box::new(UserChangeStrategy {
workdir: self.workdir.clone(),
set_user: self.set_user.clone(),
set_group: self.set_group.clone(),
seccomp_props: self.seccomp_props.clone(),
});
sandboxing_strategy = Some(obj.into());
}
ExecExtras {
sandboxing_strategy,
}
}
}
fn exec_artifact_child(ext: &ExecExtras, c: &AppPreforkConfiguration) -> io::Result<Void> {
let package_id = c.package_id.clone();
let app_config = relabel_file_descriptors(&c)?;
let tmpfile = open(
"/tmp",
OFlag::O_RDWR | OFlag::O_TMPFILE,
Mode::S_IRUSR | Mode::S_IWUSR,
)
.map_err(|e| {
event!(Level::WARN, "error opening temporary: {:?}", e);
io_other(e)
})?;
let data = serde_json::to_string(&app_config)?;
let data_len =
write(tmpfile, data.as_bytes()).map_err(|err| io::Error::new(io::ErrorKind::Other, err))?;
lseek64(tmpfile, 0, Whence::SeekSet)
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))?;
assert_eq!(data_len, data.len());
let tmpfile = format!("{}\0", tmpfile);
let arguments: &[&CStr] = &[
CStr::from_bytes_with_nul(b"yscloud-executable\0").unwrap(),
CStr::from_bytes_with_nul(b"--config-fd\0").unwrap(),
CStr::from_bytes_with_nul(tmpfile.as_bytes()).unwrap(),
];
event!(
Level::INFO,
"running {} {:?} -- {}",
package_id,
arguments,
data
);
if let Some(ref sandbox) = ext.sandboxing_strategy {
sandbox.preexec()?;
}
let env: &[&CStr] = &[
CStr::from_bytes_with_nul(b"RUST_BACKTRACE=1\0").unwrap(),
CStr::from_bytes_with_nul(b"YSCLOUD=1\0").unwrap(),
];
c.artifact.execute(arguments, env)?;
unreachable!();
}
pub fn exec_artifact(e: &ExecExtras, c: AppPreforkConfiguration) -> io::Result<Pid> {
match unsafe { fork() } {
Ok(ForkResult::Child) => {
if let Err(err) = exec_artifact_child(e, &c) {
event!(Level::WARN, "failed to execute: {:?}", err);
std::process::exit(1);
} else {
unreachable!();
}
}
Ok(ForkResult::Parent { child, .. }) => Ok(child),
Err(err) => Err(io::Error::new(io::ErrorKind::Other, err)),
}
} | E: Into<Box<dyn StdError + Send + Sync>>,
{ | random_line_split |
mod.rs | use std::error::Error as StdError;
use std::ffi::{CStr, CString};
use std::fmt;
use std::io;
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use std::collections::HashSet;
use std::fs::File;
use digest::{Digest, FixedOutput};
use nix::fcntl::{fcntl, open, OFlag};
use nix::sys::stat::Mode;
use nix::unistd::{execveat, fork, lseek64, write, ForkResult, Gid, Pid, Uid, Whence};
use sha2::{Sha256, Sha512};
use sha3::{Sha3_512, Keccak512};
use tracing::{event, Level};
use users::{get_group_by_name, get_user_by_name};
use yscloud_config_model::ImageType;
use memfd::{MemFd, MemFdOptions, SealFlag};
use owned_fd::{OwnedFd, IntoOwnedFd};
use super::posix_imp::relabel_file_descriptors;
pub use super::posix_imp::run_reified;
use crate::{Void, AppPreforkConfiguration};
pub mod arch;
pub mod seccomp;
pub mod unshare;
pub mod container;
pub mod mount;
pub struct ContainerImage {
file: PathBuf,
mounts: Vec<(PathBuf, PathBuf)>,
}
impl ContainerImage {
pub fn start(&self) -> io::Result<Void> {
return Err(io::Error::new(io::ErrorKind::Other, "containers not yet implemented"));
}
}
#[derive(Debug)]
pub struct Executable {
file: OwnedFd,
}
impl fmt::Display for Executable {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "fd+executable:{}", self.file.as_raw_fd())
}
}
impl AsRawFd for Executable {
fn as_raw_fd(&self) -> RawFd {
self.file.as_raw_fd()
}
}
fn nix_error_to_io_error(err: nix::Error) -> io::Error {
match err {
nix::Error::Sys(syserr) => io::Error::from_raw_os_error(syserr as i32),
nix::Error::InvalidPath => io::Error::new(io::ErrorKind::Other, "Invalid path (nix)"),
nix::Error::InvalidUtf8 => io::Error::new(io::ErrorKind::Other, "Invalid UTF-8 (nix)"),
nix::Error::UnsupportedOperation => {
io::Error::new(io::ErrorKind::Other, "Unsupported operation (nix)")
}
}
}
pub struct ExecutableFactory {
storage: File,
expected_path: PathBuf,
disk_linked: bool,
}
impl ExecutableFactory {
pub fn new_unspecified(name: &str, capacity: i64) -> io::Result<ExecutableFactory> {
Self::new_in_memory(name, capacity)
}
pub fn new_in_memory(name: &str, capacity: i64) -> io::Result<ExecutableFactory> {
let mut mem_fd = MemFdOptions::new()
.cloexec(true)
.allow_sealing(true)
.with_capacity(capacity)
.set_mode(Mode::S_IRWXU | Mode::S_IRGRP | Mode::S_IXGRP | Mode::S_IROTH | Mode::S_IXOTH)
.open(name)
.map_err(|e| nix_error_to_io_error(e))?;
mem_fd
.seal(SealFlag::F_SEAL_SEAL | SealFlag::F_SEAL_SHRINK | SealFlag::F_SEAL_GROW)
.map_err(|e| nix_error_to_io_error(e))?;
Ok(ExecutableFactory {
storage: mem_fd.into(),
expected_path: Default::default(),
disk_linked: false,
})
}
pub fn new_on_disk(name: &str, capacity: i64, root: &Path) -> io::Result<ExecutableFactory> {
let mut full_path = root.to_owned();
full_path.push(name);
let storage = File::create(&full_path)?;
Ok(ExecutableFactory {
storage,
expected_path: full_path,
disk_linked: true,
})
}
pub fn | (self) -> ContainerImage {
assert!(self.disk_linked, "must be disk-linked");
ContainerImage {
file: self.expected_path,
mounts: Vec::new(),
}
}
pub fn finalize_executable(self) -> Executable {
Executable {
file: self.storage.into_owned_fd(),
}
}
}
impl io::Write for ExecutableFactory {
fn write(&mut self, data: &[u8]) -> io::Result<usize> {
self.storage.write(data)
}
fn flush(&mut self) -> io::Result<()> {
self.storage.flush()
}
}
impl Executable {
pub fn open<P>(path: P) -> io::Result<Executable>
where
P: AsRef<Path>,
{
let path: &Path = path.as_ref();
let artifact_file = open(path, OFlag::O_RDONLY | OFlag::O_CLOEXEC, Mode::empty())
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))?;
Ok(Executable {
file: unsafe { OwnedFd::from_raw_fd(artifact_file) },
})
}
pub fn execute(&self, arguments: &[&CStr], env: &[&CStr]) -> io::Result<Void> {
use nix::fcntl::{AtFlags, FcntlArg, FdFlag};
fcntl(self.file.as_raw_fd(), FcntlArg::F_SETFD(FdFlag::FD_CLOEXEC))
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
let program_name = CString::new("").unwrap();
execveat(
self.file.as_raw_fd(),
&program_name,
arguments,
env,
AtFlags::AT_EMPTY_PATH,
)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
// successful invokations of execveat don't return.
unreachable!();
}
}
pub const EXTENSION: &str = "";
#[cfg(target_arch = "x86_64")]
pub const PLATFORM_TRIPLES: &[&str] = &[
"x86_64-unknown-linux-gnu",
"x86_64-unknown-linux-musl",
"x86_64-unknown-linux",
];
pub fn keep_hook(c: &AppPreforkConfiguration, keep_map: &mut [bool]) {
keep_map[c.artifact.0.as_raw_fd() as usize] = true;
}
pub trait SandboxingStrategy {
fn preexec(&self) -> io::Result<()>;
}
impl SandboxingStrategy for () {
fn preexec(&self) -> io::Result<()> {
Ok(())
}
}
pub struct UserChangeStrategy {
workdir: Option<PathBuf>,
set_user: Option<Uid>,
set_group: Option<Gid>,
seccomp_props: HashSet<&'static str>,
}
fn io_other<E>(e: E) -> io::Error
where
E: Into<Box<dyn StdError + Send + Sync>>,
{
io::Error::new(io::ErrorKind::Other, e)
}
impl SandboxingStrategy for UserChangeStrategy {
fn preexec(&self) -> io::Result<()> {
if let Some(ref wd) = self.workdir {
std::fs::create_dir_all(wd)?;
nix::unistd::chown(wd, self.set_user, self.set_group)
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))?;
}
// seccomp::setup(&self.seccomp_props).unwrap();
if let Some(ref wd) = self.workdir {
event!(Level::INFO, "setting cwd = {}", wd.display());
nix::unistd::chdir(wd).map_err(io_other)?;
}
if !self.seccomp_props.contains("*") {
if !self.seccomp_props.contains("@filesystem") {
unshare::restrict_filesystem()?;
}
if !self.seccomp_props.contains("@network") {
unshare::restrict_network()?;
}
}
if let Some(gid) = self.set_group {
event!(Level::INFO, "setting gid = {:?}", gid);
nix::unistd::setgid(gid).map_err(io_other)?;
}
if let Some(uid) = self.set_user {
event!(Level::INFO, "setting uid = {:?}", uid);
nix::unistd::setuid(uid).map_err(io_other)?;
}
Ok(())
}
}
pub struct ExecExtras {
sandboxing_strategy: Option<Arc<dyn SandboxingStrategy>>,
}
impl ExecExtras {
pub fn builder() -> ExecExtrasBuilder {
let mut builder: ExecExtrasBuilder = Default::default();
builder.seccomp_props.insert("@network");
builder.seccomp_props.insert("*");
builder
}
}
#[derive(Default)]
pub struct ExecExtrasBuilder {
workdir: Option<PathBuf>,
set_user: Option<Uid>,
set_group: Option<Gid>,
seccomp_props: HashSet<&'static str>,
}
impl ExecExtrasBuilder {
pub fn set_user(&mut self, name: &str) -> io::Result<()> {
let uid = get_user_by_name(name).ok_or_else(|| {
let msg = format!("unknown user {}", name);
io::Error::new(io::ErrorKind::Other, msg)
})?;
self.set_user = Some(Uid::from_raw(uid.uid()));
Ok(())
}
pub fn set_group(&mut self, name: &str) -> io::Result<()> {
let gid = get_group_by_name(name).ok_or_else(|| {
let msg = format!("unknown group {}", name);
io::Error::new(io::ErrorKind::Other, msg)
})?;
self.set_group = Some(Gid::from_raw(gid.gid()));
Ok(())
}
pub fn set_workdir(&mut self, workdir: &Path) -> io::Result<()> {
self.workdir = Some(workdir.to_owned());
Ok(())
}
pub fn clear_seccomp_permission(&mut self) {
self.seccomp_props.clear();
}
pub fn add_seccomp_permission(&mut self, value: &'static str) {
self.seccomp_props.insert(value);
}
pub fn build(&self) -> ExecExtras {
let mut sandboxing_strategy = None;
if self.set_user.is_some() || self.set_group.is_some() {
let obj: Box<dyn SandboxingStrategy> = Box::new(UserChangeStrategy {
workdir: self.workdir.clone(),
set_user: self.set_user.clone(),
set_group: self.set_group.clone(),
seccomp_props: self.seccomp_props.clone(),
});
sandboxing_strategy = Some(obj.into());
}
ExecExtras {
sandboxing_strategy,
}
}
}
fn exec_artifact_child(ext: &ExecExtras, c: &AppPreforkConfiguration) -> io::Result<Void> {
let package_id = c.package_id.clone();
let app_config = relabel_file_descriptors(&c)?;
let tmpfile = open(
"/tmp",
OFlag::O_RDWR | OFlag::O_TMPFILE,
Mode::S_IRUSR | Mode::S_IWUSR,
)
.map_err(|e| {
event!(Level::WARN, "error opening temporary: {:?}", e);
io_other(e)
})?;
let data = serde_json::to_string(&app_config)?;
let data_len =
write(tmpfile, data.as_bytes()).map_err(|err| io::Error::new(io::ErrorKind::Other, err))?;
lseek64(tmpfile, 0, Whence::SeekSet)
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))?;
assert_eq!(data_len, data.len());
let tmpfile = format!("{}\0", tmpfile);
let arguments: &[&CStr] = &[
CStr::from_bytes_with_nul(b"yscloud-executable\0").unwrap(),
CStr::from_bytes_with_nul(b"--config-fd\0").unwrap(),
CStr::from_bytes_with_nul(tmpfile.as_bytes()).unwrap(),
];
event!(
Level::INFO,
"running {} {:?} -- {}",
package_id,
arguments,
data
);
if let Some(ref sandbox) = ext.sandboxing_strategy {
sandbox.preexec()?;
}
let env: &[&CStr] = &[
CStr::from_bytes_with_nul(b"RUST_BACKTRACE=1\0").unwrap(),
CStr::from_bytes_with_nul(b"YSCLOUD=1\0").unwrap(),
];
c.artifact.execute(arguments, env)?;
unreachable!();
}
pub fn exec_artifact(e: &ExecExtras, c: AppPreforkConfiguration) -> io::Result<Pid> {
match unsafe { fork() } {
Ok(ForkResult::Child) => {
if let Err(err) = exec_artifact_child(e, &c) {
event!(Level::WARN, "failed to execute: {:?}", err);
std::process::exit(1);
} else {
unreachable!();
}
}
Ok(ForkResult::Parent { child, .. }) => Ok(child),
Err(err) => Err(io::Error::new(io::ErrorKind::Other, err)),
}
}
| finalize_container | identifier_name |
mod.rs | use std::error::Error as StdError;
use std::ffi::{CStr, CString};
use std::fmt;
use std::io;
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use std::collections::HashSet;
use std::fs::File;
use digest::{Digest, FixedOutput};
use nix::fcntl::{fcntl, open, OFlag};
use nix::sys::stat::Mode;
use nix::unistd::{execveat, fork, lseek64, write, ForkResult, Gid, Pid, Uid, Whence};
use sha2::{Sha256, Sha512};
use sha3::{Sha3_512, Keccak512};
use tracing::{event, Level};
use users::{get_group_by_name, get_user_by_name};
use yscloud_config_model::ImageType;
use memfd::{MemFd, MemFdOptions, SealFlag};
use owned_fd::{OwnedFd, IntoOwnedFd};
use super::posix_imp::relabel_file_descriptors;
pub use super::posix_imp::run_reified;
use crate::{Void, AppPreforkConfiguration};
pub mod arch;
pub mod seccomp;
pub mod unshare;
pub mod container;
pub mod mount;
pub struct ContainerImage {
file: PathBuf,
mounts: Vec<(PathBuf, PathBuf)>,
}
impl ContainerImage {
pub fn start(&self) -> io::Result<Void> {
return Err(io::Error::new(io::ErrorKind::Other, "containers not yet implemented"));
}
}
#[derive(Debug)]
pub struct Executable {
file: OwnedFd,
}
impl fmt::Display for Executable {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "fd+executable:{}", self.file.as_raw_fd())
}
}
impl AsRawFd for Executable {
fn as_raw_fd(&self) -> RawFd {
self.file.as_raw_fd()
}
}
fn nix_error_to_io_error(err: nix::Error) -> io::Error {
match err {
nix::Error::Sys(syserr) => io::Error::from_raw_os_error(syserr as i32),
nix::Error::InvalidPath => io::Error::new(io::ErrorKind::Other, "Invalid path (nix)"),
nix::Error::InvalidUtf8 => io::Error::new(io::ErrorKind::Other, "Invalid UTF-8 (nix)"),
nix::Error::UnsupportedOperation => {
io::Error::new(io::ErrorKind::Other, "Unsupported operation (nix)")
}
}
}
pub struct ExecutableFactory {
storage: File,
expected_path: PathBuf,
disk_linked: bool,
}
impl ExecutableFactory {
pub fn new_unspecified(name: &str, capacity: i64) -> io::Result<ExecutableFactory> {
Self::new_in_memory(name, capacity)
}
pub fn new_in_memory(name: &str, capacity: i64) -> io::Result<ExecutableFactory> {
let mut mem_fd = MemFdOptions::new()
.cloexec(true)
.allow_sealing(true)
.with_capacity(capacity)
.set_mode(Mode::S_IRWXU | Mode::S_IRGRP | Mode::S_IXGRP | Mode::S_IROTH | Mode::S_IXOTH)
.open(name)
.map_err(|e| nix_error_to_io_error(e))?;
mem_fd
.seal(SealFlag::F_SEAL_SEAL | SealFlag::F_SEAL_SHRINK | SealFlag::F_SEAL_GROW)
.map_err(|e| nix_error_to_io_error(e))?;
Ok(ExecutableFactory {
storage: mem_fd.into(),
expected_path: Default::default(),
disk_linked: false,
})
}
pub fn new_on_disk(name: &str, capacity: i64, root: &Path) -> io::Result<ExecutableFactory> {
let mut full_path = root.to_owned();
full_path.push(name);
let storage = File::create(&full_path)?;
Ok(ExecutableFactory {
storage,
expected_path: full_path,
disk_linked: true,
})
}
pub fn finalize_container(self) -> ContainerImage {
assert!(self.disk_linked, "must be disk-linked");
ContainerImage {
file: self.expected_path,
mounts: Vec::new(),
}
}
pub fn finalize_executable(self) -> Executable {
Executable {
file: self.storage.into_owned_fd(),
}
}
}
impl io::Write for ExecutableFactory {
fn write(&mut self, data: &[u8]) -> io::Result<usize> {
self.storage.write(data)
}
fn flush(&mut self) -> io::Result<()> {
self.storage.flush()
}
}
impl Executable {
pub fn open<P>(path: P) -> io::Result<Executable>
where
P: AsRef<Path>,
{
let path: &Path = path.as_ref();
let artifact_file = open(path, OFlag::O_RDONLY | OFlag::O_CLOEXEC, Mode::empty())
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))?;
Ok(Executable {
file: unsafe { OwnedFd::from_raw_fd(artifact_file) },
})
}
pub fn execute(&self, arguments: &[&CStr], env: &[&CStr]) -> io::Result<Void> {
use nix::fcntl::{AtFlags, FcntlArg, FdFlag};
fcntl(self.file.as_raw_fd(), FcntlArg::F_SETFD(FdFlag::FD_CLOEXEC))
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
let program_name = CString::new("").unwrap();
execveat(
self.file.as_raw_fd(),
&program_name,
arguments,
env,
AtFlags::AT_EMPTY_PATH,
)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
// successful invokations of execveat don't return.
unreachable!();
}
}
pub const EXTENSION: &str = "";
#[cfg(target_arch = "x86_64")]
pub const PLATFORM_TRIPLES: &[&str] = &[
"x86_64-unknown-linux-gnu",
"x86_64-unknown-linux-musl",
"x86_64-unknown-linux",
];
pub fn keep_hook(c: &AppPreforkConfiguration, keep_map: &mut [bool]) {
keep_map[c.artifact.0.as_raw_fd() as usize] = true;
}
pub trait SandboxingStrategy {
fn preexec(&self) -> io::Result<()>;
}
impl SandboxingStrategy for () {
fn preexec(&self) -> io::Result<()> {
Ok(())
}
}
pub struct UserChangeStrategy {
workdir: Option<PathBuf>,
set_user: Option<Uid>,
set_group: Option<Gid>,
seccomp_props: HashSet<&'static str>,
}
fn io_other<E>(e: E) -> io::Error
where
E: Into<Box<dyn StdError + Send + Sync>>,
{
io::Error::new(io::ErrorKind::Other, e)
}
impl SandboxingStrategy for UserChangeStrategy {
fn preexec(&self) -> io::Result<()> {
if let Some(ref wd) = self.workdir {
std::fs::create_dir_all(wd)?;
nix::unistd::chown(wd, self.set_user, self.set_group)
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))?;
}
// seccomp::setup(&self.seccomp_props).unwrap();
if let Some(ref wd) = self.workdir {
event!(Level::INFO, "setting cwd = {}", wd.display());
nix::unistd::chdir(wd).map_err(io_other)?;
}
if !self.seccomp_props.contains("*") {
if !self.seccomp_props.contains("@filesystem") {
unshare::restrict_filesystem()?;
}
if !self.seccomp_props.contains("@network") {
unshare::restrict_network()?;
}
}
if let Some(gid) = self.set_group {
event!(Level::INFO, "setting gid = {:?}", gid);
nix::unistd::setgid(gid).map_err(io_other)?;
}
if let Some(uid) = self.set_user {
event!(Level::INFO, "setting uid = {:?}", uid);
nix::unistd::setuid(uid).map_err(io_other)?;
}
Ok(())
}
}
pub struct ExecExtras {
sandboxing_strategy: Option<Arc<dyn SandboxingStrategy>>,
}
impl ExecExtras {
pub fn builder() -> ExecExtrasBuilder {
let mut builder: ExecExtrasBuilder = Default::default();
builder.seccomp_props.insert("@network");
builder.seccomp_props.insert("*");
builder
}
}
#[derive(Default)]
pub struct ExecExtrasBuilder {
workdir: Option<PathBuf>,
set_user: Option<Uid>,
set_group: Option<Gid>,
seccomp_props: HashSet<&'static str>,
}
impl ExecExtrasBuilder {
pub fn set_user(&mut self, name: &str) -> io::Result<()> {
let uid = get_user_by_name(name).ok_or_else(|| {
let msg = format!("unknown user {}", name);
io::Error::new(io::ErrorKind::Other, msg)
})?;
self.set_user = Some(Uid::from_raw(uid.uid()));
Ok(())
}
pub fn set_group(&mut self, name: &str) -> io::Result<()> {
let gid = get_group_by_name(name).ok_or_else(|| {
let msg = format!("unknown group {}", name);
io::Error::new(io::ErrorKind::Other, msg)
})?;
self.set_group = Some(Gid::from_raw(gid.gid()));
Ok(())
}
pub fn set_workdir(&mut self, workdir: &Path) -> io::Result<()> {
self.workdir = Some(workdir.to_owned());
Ok(())
}
pub fn clear_seccomp_permission(&mut self) {
self.seccomp_props.clear();
}
pub fn add_seccomp_permission(&mut self, value: &'static str) {
self.seccomp_props.insert(value);
}
pub fn build(&self) -> ExecExtras {
let mut sandboxing_strategy = None;
if self.set_user.is_some() || self.set_group.is_some() {
let obj: Box<dyn SandboxingStrategy> = Box::new(UserChangeStrategy {
workdir: self.workdir.clone(),
set_user: self.set_user.clone(),
set_group: self.set_group.clone(),
seccomp_props: self.seccomp_props.clone(),
});
sandboxing_strategy = Some(obj.into());
}
ExecExtras {
sandboxing_strategy,
}
}
}
fn exec_artifact_child(ext: &ExecExtras, c: &AppPreforkConfiguration) -> io::Result<Void> {
let package_id = c.package_id.clone();
let app_config = relabel_file_descriptors(&c)?;
let tmpfile = open(
"/tmp",
OFlag::O_RDWR | OFlag::O_TMPFILE,
Mode::S_IRUSR | Mode::S_IWUSR,
)
.map_err(|e| {
event!(Level::WARN, "error opening temporary: {:?}", e);
io_other(e)
})?;
let data = serde_json::to_string(&app_config)?;
let data_len =
write(tmpfile, data.as_bytes()).map_err(|err| io::Error::new(io::ErrorKind::Other, err))?;
lseek64(tmpfile, 0, Whence::SeekSet)
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))?;
assert_eq!(data_len, data.len());
let tmpfile = format!("{}\0", tmpfile);
let arguments: &[&CStr] = &[
CStr::from_bytes_with_nul(b"yscloud-executable\0").unwrap(),
CStr::from_bytes_with_nul(b"--config-fd\0").unwrap(),
CStr::from_bytes_with_nul(tmpfile.as_bytes()).unwrap(),
];
event!(
Level::INFO,
"running {} {:?} -- {}",
package_id,
arguments,
data
);
if let Some(ref sandbox) = ext.sandboxing_strategy {
sandbox.preexec()?;
}
let env: &[&CStr] = &[
CStr::from_bytes_with_nul(b"RUST_BACKTRACE=1\0").unwrap(),
CStr::from_bytes_with_nul(b"YSCLOUD=1\0").unwrap(),
];
c.artifact.execute(arguments, env)?;
unreachable!();
}
pub fn exec_artifact(e: &ExecExtras, c: AppPreforkConfiguration) -> io::Result<Pid> {
match unsafe { fork() } {
Ok(ForkResult::Child) => |
Ok(ForkResult::Parent { child, .. }) => Ok(child),
Err(err) => Err(io::Error::new(io::ErrorKind::Other, err)),
}
}
| {
if let Err(err) = exec_artifact_child(e, &c) {
event!(Level::WARN, "failed to execute: {:?}", err);
std::process::exit(1);
} else {
unreachable!();
}
} | conditional_block |
mod.rs | use std::error::Error as StdError;
use std::ffi::{CStr, CString};
use std::fmt;
use std::io;
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use std::collections::HashSet;
use std::fs::File;
use digest::{Digest, FixedOutput};
use nix::fcntl::{fcntl, open, OFlag};
use nix::sys::stat::Mode;
use nix::unistd::{execveat, fork, lseek64, write, ForkResult, Gid, Pid, Uid, Whence};
use sha2::{Sha256, Sha512};
use sha3::{Sha3_512, Keccak512};
use tracing::{event, Level};
use users::{get_group_by_name, get_user_by_name};
use yscloud_config_model::ImageType;
use memfd::{MemFd, MemFdOptions, SealFlag};
use owned_fd::{OwnedFd, IntoOwnedFd};
use super::posix_imp::relabel_file_descriptors;
pub use super::posix_imp::run_reified;
use crate::{Void, AppPreforkConfiguration};
pub mod arch;
pub mod seccomp;
pub mod unshare;
pub mod container;
pub mod mount;
pub struct ContainerImage {
file: PathBuf,
mounts: Vec<(PathBuf, PathBuf)>,
}
impl ContainerImage {
pub fn start(&self) -> io::Result<Void> {
return Err(io::Error::new(io::ErrorKind::Other, "containers not yet implemented"));
}
}
#[derive(Debug)]
pub struct Executable {
file: OwnedFd,
}
impl fmt::Display for Executable {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "fd+executable:{}", self.file.as_raw_fd())
}
}
impl AsRawFd for Executable {
fn as_raw_fd(&self) -> RawFd {
self.file.as_raw_fd()
}
}
fn nix_error_to_io_error(err: nix::Error) -> io::Error {
match err {
nix::Error::Sys(syserr) => io::Error::from_raw_os_error(syserr as i32),
nix::Error::InvalidPath => io::Error::new(io::ErrorKind::Other, "Invalid path (nix)"),
nix::Error::InvalidUtf8 => io::Error::new(io::ErrorKind::Other, "Invalid UTF-8 (nix)"),
nix::Error::UnsupportedOperation => {
io::Error::new(io::ErrorKind::Other, "Unsupported operation (nix)")
}
}
}
pub struct ExecutableFactory {
storage: File,
expected_path: PathBuf,
disk_linked: bool,
}
impl ExecutableFactory {
pub fn new_unspecified(name: &str, capacity: i64) -> io::Result<ExecutableFactory> {
Self::new_in_memory(name, capacity)
}
pub fn new_in_memory(name: &str, capacity: i64) -> io::Result<ExecutableFactory> |
pub fn new_on_disk(name: &str, capacity: i64, root: &Path) -> io::Result<ExecutableFactory> {
let mut full_path = root.to_owned();
full_path.push(name);
let storage = File::create(&full_path)?;
Ok(ExecutableFactory {
storage,
expected_path: full_path,
disk_linked: true,
})
}
pub fn finalize_container(self) -> ContainerImage {
assert!(self.disk_linked, "must be disk-linked");
ContainerImage {
file: self.expected_path,
mounts: Vec::new(),
}
}
pub fn finalize_executable(self) -> Executable {
Executable {
file: self.storage.into_owned_fd(),
}
}
}
impl io::Write for ExecutableFactory {
fn write(&mut self, data: &[u8]) -> io::Result<usize> {
self.storage.write(data)
}
fn flush(&mut self) -> io::Result<()> {
self.storage.flush()
}
}
impl Executable {
pub fn open<P>(path: P) -> io::Result<Executable>
where
P: AsRef<Path>,
{
let path: &Path = path.as_ref();
let artifact_file = open(path, OFlag::O_RDONLY | OFlag::O_CLOEXEC, Mode::empty())
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))?;
Ok(Executable {
file: unsafe { OwnedFd::from_raw_fd(artifact_file) },
})
}
pub fn execute(&self, arguments: &[&CStr], env: &[&CStr]) -> io::Result<Void> {
use nix::fcntl::{AtFlags, FcntlArg, FdFlag};
fcntl(self.file.as_raw_fd(), FcntlArg::F_SETFD(FdFlag::FD_CLOEXEC))
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
let program_name = CString::new("").unwrap();
execveat(
self.file.as_raw_fd(),
&program_name,
arguments,
env,
AtFlags::AT_EMPTY_PATH,
)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
// successful invokations of execveat don't return.
unreachable!();
}
}
pub const EXTENSION: &str = "";
#[cfg(target_arch = "x86_64")]
pub const PLATFORM_TRIPLES: &[&str] = &[
"x86_64-unknown-linux-gnu",
"x86_64-unknown-linux-musl",
"x86_64-unknown-linux",
];
pub fn keep_hook(c: &AppPreforkConfiguration, keep_map: &mut [bool]) {
keep_map[c.artifact.0.as_raw_fd() as usize] = true;
}
pub trait SandboxingStrategy {
fn preexec(&self) -> io::Result<()>;
}
impl SandboxingStrategy for () {
fn preexec(&self) -> io::Result<()> {
Ok(())
}
}
pub struct UserChangeStrategy {
workdir: Option<PathBuf>,
set_user: Option<Uid>,
set_group: Option<Gid>,
seccomp_props: HashSet<&'static str>,
}
fn io_other<E>(e: E) -> io::Error
where
E: Into<Box<dyn StdError + Send + Sync>>,
{
io::Error::new(io::ErrorKind::Other, e)
}
impl SandboxingStrategy for UserChangeStrategy {
fn preexec(&self) -> io::Result<()> {
if let Some(ref wd) = self.workdir {
std::fs::create_dir_all(wd)?;
nix::unistd::chown(wd, self.set_user, self.set_group)
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))?;
}
// seccomp::setup(&self.seccomp_props).unwrap();
if let Some(ref wd) = self.workdir {
event!(Level::INFO, "setting cwd = {}", wd.display());
nix::unistd::chdir(wd).map_err(io_other)?;
}
if !self.seccomp_props.contains("*") {
if !self.seccomp_props.contains("@filesystem") {
unshare::restrict_filesystem()?;
}
if !self.seccomp_props.contains("@network") {
unshare::restrict_network()?;
}
}
if let Some(gid) = self.set_group {
event!(Level::INFO, "setting gid = {:?}", gid);
nix::unistd::setgid(gid).map_err(io_other)?;
}
if let Some(uid) = self.set_user {
event!(Level::INFO, "setting uid = {:?}", uid);
nix::unistd::setuid(uid).map_err(io_other)?;
}
Ok(())
}
}
pub struct ExecExtras {
sandboxing_strategy: Option<Arc<dyn SandboxingStrategy>>,
}
impl ExecExtras {
pub fn builder() -> ExecExtrasBuilder {
let mut builder: ExecExtrasBuilder = Default::default();
builder.seccomp_props.insert("@network");
builder.seccomp_props.insert("*");
builder
}
}
#[derive(Default)]
pub struct ExecExtrasBuilder {
workdir: Option<PathBuf>,
set_user: Option<Uid>,
set_group: Option<Gid>,
seccomp_props: HashSet<&'static str>,
}
impl ExecExtrasBuilder {
pub fn set_user(&mut self, name: &str) -> io::Result<()> {
let uid = get_user_by_name(name).ok_or_else(|| {
let msg = format!("unknown user {}", name);
io::Error::new(io::ErrorKind::Other, msg)
})?;
self.set_user = Some(Uid::from_raw(uid.uid()));
Ok(())
}
pub fn set_group(&mut self, name: &str) -> io::Result<()> {
let gid = get_group_by_name(name).ok_or_else(|| {
let msg = format!("unknown group {}", name);
io::Error::new(io::ErrorKind::Other, msg)
})?;
self.set_group = Some(Gid::from_raw(gid.gid()));
Ok(())
}
pub fn set_workdir(&mut self, workdir: &Path) -> io::Result<()> {
self.workdir = Some(workdir.to_owned());
Ok(())
}
pub fn clear_seccomp_permission(&mut self) {
self.seccomp_props.clear();
}
pub fn add_seccomp_permission(&mut self, value: &'static str) {
self.seccomp_props.insert(value);
}
pub fn build(&self) -> ExecExtras {
let mut sandboxing_strategy = None;
if self.set_user.is_some() || self.set_group.is_some() {
let obj: Box<dyn SandboxingStrategy> = Box::new(UserChangeStrategy {
workdir: self.workdir.clone(),
set_user: self.set_user.clone(),
set_group: self.set_group.clone(),
seccomp_props: self.seccomp_props.clone(),
});
sandboxing_strategy = Some(obj.into());
}
ExecExtras {
sandboxing_strategy,
}
}
}
fn exec_artifact_child(ext: &ExecExtras, c: &AppPreforkConfiguration) -> io::Result<Void> {
let package_id = c.package_id.clone();
let app_config = relabel_file_descriptors(&c)?;
let tmpfile = open(
"/tmp",
OFlag::O_RDWR | OFlag::O_TMPFILE,
Mode::S_IRUSR | Mode::S_IWUSR,
)
.map_err(|e| {
event!(Level::WARN, "error opening temporary: {:?}", e);
io_other(e)
})?;
let data = serde_json::to_string(&app_config)?;
let data_len =
write(tmpfile, data.as_bytes()).map_err(|err| io::Error::new(io::ErrorKind::Other, err))?;
lseek64(tmpfile, 0, Whence::SeekSet)
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))?;
assert_eq!(data_len, data.len());
let tmpfile = format!("{}\0", tmpfile);
let arguments: &[&CStr] = &[
CStr::from_bytes_with_nul(b"yscloud-executable\0").unwrap(),
CStr::from_bytes_with_nul(b"--config-fd\0").unwrap(),
CStr::from_bytes_with_nul(tmpfile.as_bytes()).unwrap(),
];
event!(
Level::INFO,
"running {} {:?} -- {}",
package_id,
arguments,
data
);
if let Some(ref sandbox) = ext.sandboxing_strategy {
sandbox.preexec()?;
}
let env: &[&CStr] = &[
CStr::from_bytes_with_nul(b"RUST_BACKTRACE=1\0").unwrap(),
CStr::from_bytes_with_nul(b"YSCLOUD=1\0").unwrap(),
];
c.artifact.execute(arguments, env)?;
unreachable!();
}
pub fn exec_artifact(e: &ExecExtras, c: AppPreforkConfiguration) -> io::Result<Pid> {
match unsafe { fork() } {
Ok(ForkResult::Child) => {
if let Err(err) = exec_artifact_child(e, &c) {
event!(Level::WARN, "failed to execute: {:?}", err);
std::process::exit(1);
} else {
unreachable!();
}
}
Ok(ForkResult::Parent { child, .. }) => Ok(child),
Err(err) => Err(io::Error::new(io::ErrorKind::Other, err)),
}
}
| {
let mut mem_fd = MemFdOptions::new()
.cloexec(true)
.allow_sealing(true)
.with_capacity(capacity)
.set_mode(Mode::S_IRWXU | Mode::S_IRGRP | Mode::S_IXGRP | Mode::S_IROTH | Mode::S_IXOTH)
.open(name)
.map_err(|e| nix_error_to_io_error(e))?;
mem_fd
.seal(SealFlag::F_SEAL_SEAL | SealFlag::F_SEAL_SHRINK | SealFlag::F_SEAL_GROW)
.map_err(|e| nix_error_to_io_error(e))?;
Ok(ExecutableFactory {
storage: mem_fd.into(),
expected_path: Default::default(),
disk_linked: false,
})
} | identifier_body |
expressDataHandling.js | import qs from "qs";
import axios from "axios"
import xml2json from './xml2json'
const expressDataHandling = (cargo,trackingResponse)=>{
// console.log(trackingResponse);
let result = {
tracking:"",
pieces:[], //{tracking,atd,eta,ata,status}
origin:"",
destination:"",
atd:"", //string
eta:"",
ata:"", // string
status:"", // string 已送达 正在运送
checkPoints:[],
responseErrors:{}, // {message,tracking}
comment:""
};
if (cargo.forwarder==="DHL"){
return new Promise((resolve,reject)=>{
result.forwarder = "DHL";
// DHL API拿到的日期格式 "Monday, November 02, 2020 18:37"或者"Monday, November 02, 2020 " ,定义getDate函数方便转化
let getDate = (dateStr)=>{
let monthList={"January":0,"February":1,"March":2,"April":3,"May":4,"June":5,"July":6,"August":7,"September":8,"October":9,"November":10,"December":11};
let Y = dateStr.split(",")[2].split(" ")[1]; // 年 2020
let h,m;
if (dateStr.split(",")[2].split(" ").length===2 || dateStr.split(",")[2].split(" ")[2]===""){
h=m="";
}else {
h = dateStr.split(",")[2].split(" ")[2].split(":")[0]; // 时 18
m = dateStr.split(",")[2].split(" ")[2].split(":")[1]; // 分 37
}
let M = monthList[dateStr.split(",")[1].split(" ")[1]]; // 月 monthList["November"]
let D = dateStr.split(",")[1].split(" ")[2]; // 天 02
return new Date(Y,M,D,h,m)
};
// 错误运单号是返回
if (trackingResponse.hasOwnProperty("errors")){
result.responseErrors={
message:trackingResponse.errors[0].message,
tracking:trackingResponse.errors[0].id
}
}else {
let dhl = trackingResponse.results[0];
result.tracking = dhl.id;
result.pieces = [];
dhl.pieces.pIds.forEach(item=>{
result.pieces.push({tracking:item,status:"",ata:"",eta:""})
});
result.origin = dhl.origin.value.replace("CHAOSHAN & HUIZHOU AREA - ","");
result.destination = dhl.destination.value;
let firstCheckPoint = dhl.checkpoints.find(item=>item.counter===1);
result.atd = getDate(firstCheckPoint.date+firstCheckPoint.time);
if (dhl.hasOwnProperty("delivery") && dhl.delivery.code==="101"){ // 101是已送达 102是正在运送
result.ata = getDate(dhl.description);
result.status = "已送达"
}else{
result.status = "正在运送";
result.eta = dhl.edd.hasOwnProperty("date")?getDate(dhl.edd.date):""
}
result.checkPoints = dhl.checkpoints;
result.checkPoints.forEach(item=>{
item.actDate=getDate(item.date+item.time);
item.status = item.description;
item.scanLocation = item.location;
})
//[{counter:"44",description:"清关中",time:"21:40",date: "星期三, 十一月 25, 2020",location: "LEIPZIG - GERMANY"}]
}
resolve(result)
})
}else if (cargo.forwarder==="FEDEX"){
return new Promise((resolve,reject)=>{
result.forwarder = "FEDEX";
let fedex = trackingResponse.TrackPackagesResponse.packageList[0];
if (fedex.isInvalid){
result.responseErrors={
message:"This tracking number cannot be found. Please check the number or contact the sender.",
tracking:cargo.tracking
};
resolve(result)
}else{
result.tracking = fedex.trackingNbr;
result.origin = `${fedex.shipperCity} ${fedex.shipperCntryCD}`;
result.destination = `${fedex.recipientCity} ${fedex.recipientCntryCD}`;
result.atd = new Date(fedex.tenderedDt);
result.eta = fedex.hasOwnProperty("displayEstDeliveryDt")&&fedex.displayEstDeliveryDt!==""?new Date(fedex.displayEstDeliveryDt):"";
result.checkPoints = fedex.scanEventList;
result.checkPoints.forEach(item=>item.actDate=new Date(item.date+"T"+item.time+item.gmtOffset));
// 有关联包裹时请求关联包裹的信息
if (fedex.hasAssociatedShipments){
let req = {};
req.url="https://www.fedex.com/trackingCal/track";
req.method="POST";
req.headers={'content-type':'application/x-www-form-urlencoded' };
req.data=qs.stringify({
action:"getAssociatedShipments",
data:`{"AssociatedShipmentRequest":{"appType":"WTRK","appDeviceType":"DESKTOP","uniqueKey":"","processingParameters":{},"masterTrackingNumberInfo":{"trackingNumberInfo":{"trackingNumber":"${result.tracking}","trackingQualifier":"","trackingCarrier":""},"associatedType":"MPS"}}}`,
locale:"zh_CN",
version:"1",
format:"json"
});
axios(req).then(res=>{
let relatedPackage = res.data.AssociatedShipmentsResponse.associatedShipmentList;
let flag = false; // 如果发现任意一个包裹未送达就改为true
// console.log("relatedPackage",relatedPackage);
relatedPackage.forEach(item=>{
result.pieces.push({
tracking:item.trackingNbr,
atd:new Date(item.tendDt),
status:item.keyStatus,
ata:item.actDeliveryDt?new Date(item.actDeliveryDt):"",
eta:item.estDeliveryDt?new Date(item.estDeliveryDt):"",
});
if (item.keyStatus!=="已送达"){
flag = true;
result.status = "部分子件未送达";
result.eta = item.estDeliveryDt>result.eta?new Date(item.estDeliveryDt):result.eta
}
});
if (!flag){ // 没有不是"已送达"状态的包裹说明都送达了
result.status = "已送达";
result.ata = fedex.actDeliveryDt?new Date(fedex.actDeliveryDt):""
}
resolve(result);
})
}else{
// 没有关联包裹,只有单独一个包裹的:
if (fedex.isDelivered){
result.ata = new Date(fedex.actDeliveryDt);
result.status = "已送达"
}else{
result.eta = new Date(fedex.estDeliveryDt);
result.status = "正在运送"
}
resolve(result)
}
}
})
}else if (cargo.forwarder==="UPS"){
return new Promise((resolve,reject)=>{
result.forwarder = "UPS";
let upsGetDate = (str1,str2)=>{
// str1 20201130
// str2 120000
let Y = str1.slice(0,4); // 年 2020
let M = str1.slice(4,6)-1; // 月 11
let D = str1.slice(6); // 日 30
let h = str2?str2.slice(0,2):""; // 时 12
return new Date(Y,M,D,h)
};
// xml字符串转xml对象
let xmlObj = xml2json.getXmlObject(trackingResponse);
// xml对象转json对象
let jsonObj = xml2json.xmlToJson(xmlObj);
if (jsonObj.TrackResponse.Response.hasOwnProperty("Error")){
result.responseErrors={
message:jsonObj.TrackResponse.Response.Error.ErrorDescription["#text"],
tracking:cargo.tracking
};
resolve(result);
}else{
let ups = jsonObj.TrackResponse.Shipment;
result.tracking = ups.ShipmentIdentificationNumber["#text"];
result.origin = `${ups.Shipper.Address.City["#text"]} ${ups.Shipper.Address.CountryCode["#text"]}`;
result.destination = `${ups.ShipTo.Address.City["#text"]} ${ups.ShipTo.Address.CountryCode["#text"]} `;
result.atd = upsGetDate(ups.PickupDate["#text"]);
result.eta = ups.Package.hasOwnProperty("RescheduledDeliveryDate")?
// ups.Package.RescheduledDeliveryDate 20201130
// ups.Package.RescheduledDeliveryTime 120000
upsGetDate(ups.Package.RescheduledDeliveryDate["#text"],ups.Package.RescheduledDeliveryTime["#text"]):"";
if (ups.Package.Activity[0].Status.StatusType.Code["#text"]==="D"){
| result.status = "已送达";
result.ata = new Date(ups.Package.Activity[0].GMTDate["#text"]+"T"+ups.Package.Activity[0].GMTTime["#text"]+ups.Package.Activity[0].GMTOffset["#text"])
}else{
result.status = "正在运送"
}
result.checkPoints = ups.Package.Activity;
result.checkPoints.forEach(item=>{
item.actDate=new Date(item.GMTDate["#text"]+"T"+item.GMTTime["#text"]+item.GMTOffset["#text"]);
item.status = item.Status.StatusType.Description["#text"];
item.scanLocation = item.ActivityLocation.Address.hasOwnProperty("City")?
item.ActivityLocation.Address.hasOwnProperty("CountryCode")?item.ActivityLocation.Address.City["#text"]+" "+item.ActivityLocation.Address.CountryCode["#text"]:item.ActivityLocation.Address.City["#text"]
:""
});
// 关联包裹
let req = {};
req.url="https://onlinetools.ups.com/rest/Track";
req.data={
Security: {
UPSServiceAccessToken: {
AccessLicenseNumber: "2C89134F4D10E2D8"
},
UsernameToken: {
Username: "CST6636EY",
Password: "123456Aa"
}
},
TrackRequest: {
InquiryNumber: result.tracking,
Request: {
RequestAction: "Track",
RequestOption: "activity",
SubVersion: "1907",
ReferenceNumber:{"Value": ""}
}
}
};
// 二次请求 拿到关联包裹的状态
axios.post(req.url,req.data).then(res=>{
// console.log("ups二次请求关联包裹",res);
res.data.TrackResponse.Shipment.Package.forEach(item=>{
result.pieces.push({
tracking:item.TrackingNumber,
atd:upsGetDate(res.data.TrackResponse.Shipment.PickupDate,""),
status:item.hasOwnProperty("DeliveryDate")?"已送达":"正在运送",
ata:item.hasOwnProperty("DeliveryDate")?new Date(upsGetDate(item.DeliveryDate,"")):"",
eta:item.hasOwnProperty("DeliveryDetail")?new Date(upsGetDate(item.DeliveryDetail.Date,item.DeliveryDetail.Time)):"",
current:{
location:item.Activity[0].ActivityLocation.Address.City + " " + item.Activity[0].ActivityLocation.Address.CountryCode,
date:new Date(item.Activity[0].GMTDate+"T"+item.Activity[0].GMTTime+item.Activity[0].GMTOffset),
status:item.Activity[0].Status.Description
}
})
});
resolve(result);
});
}
})
}
};
export default expressDataHandling | random_line_split | |
expressDataHandling.js | import qs from "qs";
import axios from "axios"
import xml2json from './xml2json'
const expressDataHandling = (cargo,trackingResponse)=>{
// console.log(trackingResponse);
let result = {
tracking:"",
pieces:[], //{tracking,atd,eta,ata,status}
origin:"",
destination:"",
atd:"", //string
eta:"",
ata:"", // string
status:"", // string 已送达 正在运送
checkPoints:[],
responseErrors:{}, // {message,tracking}
comment:""
};
if (cargo.forwarder==="DHL"){
return new Promise((resolve,reject)=>{
result.forwarder = "DHL";
// DHL API拿到的日期格式 "Monday, November 02, 2020 18:37"或者"Monday, November 02, 2020 " ,定义getDate函数方便转化
let getDate = (dateStr)=>{
let monthList={"January":0,"February":1,"March":2,"April":3,"May":4,"June":5,"July":6,"August":7,"September":8,"October":9,"November":10,"December":11};
let Y = dateStr.split(",")[2].split(" ")[1]; // 年 2020
let h,m;
if (dateStr.split(",")[2].split(" ").length===2 || dateStr.split(",")[2].split(" ")[2]===""){
h=m="";
}else {
h = dateStr.split(",")[2].split(" ")[2].split(":")[0]; // 时 18
m = dateStr.split(",")[2].split(" ")[2].split(":")[1]; // 分 37
}
let M = monthList[dateStr.split(",")[1].split(" ")[1]]; // 月 monthList["November"]
let D = dateStr.split(",")[1].split(" ")[2]; // 天 02
return new Date(Y,M,D,h,m)
};
// 错误运单号是返回
if (trackingResponse.hasOwnProperty("errors")){
result.responseErrors={
message:trackingResponse.errors[0].message,
tracking:trackingResponse.errors[0].id
}
}else {
let dhl = trackingResponse.results[0];
result.tracking = dhl.id;
result.pieces = [];
dhl.pieces.pIds.forEach(item=>{
result.pieces.push({tracking:item,status:"",ata:"",eta:""})
});
result.origin = dhl.origin.value.replace("CHAOSHAN & HUIZHOU AREA - ","");
result.destination = dhl.destination.value;
let firstCheckPoint = dhl.checkpoints.find(item=>item.counter===1);
result.atd = getDate(firstCheckPoint.date+firstCheckPoint.time);
if (dhl.hasOwnProperty("delivery") && dhl.delivery.code==="101"){ // 101是已送达 102是正在运送
result.ata = getDate(dhl.description);
result.status = "已送达"
}else{
result.status = "正在运送";
result.eta = dhl.edd.hasOwnProperty("date")?getDate(dhl.edd.date):""
}
result.checkPoints = dhl.checkpoints;
result.checkPoints.forEach(item=>{
item.actDate=getDate(item.date+item.time);
item.status = item.description;
item.scanLocation = item.location;
})
//[{counter:"44",description:"清关中",time:"21:40",date: "星期三, 十一月 25, 2020",location: "LEIPZIG - GERMANY"}]
}
resolve(result)
})
}else if (cargo.forwarder==="FEDEX"){
return new Promise((resolve,reject)=>{
result.forwarder = "FEDEX";
let fedex = trackingResponse.TrackPackagesResponse.packageList[0];
if (fedex.isInvalid){
result.responseErrors={
message:"This tracking number cannot be found. Please check the number or contact the sender.",
tracking:cargo.tracking
};
resolve(result)
}else{
result.tracking = fedex.trackingNbr;
result.origin = `${fedex.shipperCity} ${fedex.shipperCntryCD}`;
result.destination = `${fedex.recipientCity} ${fedex.recipientCntryCD}`;
result.atd = new Date(fedex.tenderedDt);
result.eta = fedex.hasOwnProperty("displayEstDeliveryDt")&&fedex.displayEstDeliveryDt!==""?new Date(fedex.displayEstDeliveryDt):"";
result.checkPoints = fedex.scanEventList;
result.checkPoints.forEach(item=>item.actDate=new Date(item.date+"T"+item.time+item.gmtOffset));
// 有关联包裹时请求关联包裹的信息
if (fedex.hasAssociatedShipments){
let req = {};
req.url="https://www.fedex.com/trackingCal/track";
req.method="POST";
req.headers={'content-type':'application/x-www-form-urlencoded' };
req.data=qs.stringify({
action:"getAssociatedShipments",
data:`{"AssociatedShipmentRequest":{"appType":"WTRK","appDeviceType":"DESKTOP","uniqueKey":"","processingParameters":{},"masterTrackingNumberInfo":{"trackingNumberInfo":{"trackingNumber":"${result.tracking}","trackingQualifier":"","trackingCarrier":""},"associatedType":"MPS"}}}`,
locale:"zh_CN",
version:"1",
format:"json"
});
axios(req).then(res=>{
let relatedPackage = res.data.AssociatedShipmentsResponse.associatedShipmentList;
let flag = false; // 如果发现任意一个包裹未送达就改为true
// console.log("relatedPackage",relatedPackage);
relatedPackage.forEach(item=>{
result.pieces.push({
tracking:item.trackingNbr,
atd:new Date(item.tendDt),
status:item.keyStatus,
ata:item.actDeliveryDt?new Date(item.actDeliveryDt):"",
eta:item.estDeliveryDt?new Date(item.estDeliveryDt):"",
});
if (item.keyStatus!=="已送达"){
flag = true;
result.status = "部分子件未送达";
result.eta = item.estDeliveryDt>result.eta?new Date(item.estDeliveryDt):result.eta
}
});
if (!flag){ // 没有不是"已送达"状态的包裹说明都送达了
result.status = "已送达";
result.ata = fedex.actDeliveryDt?new Date(fedex.actDeliveryDt):""
}
resolve(result);
})
}else{
// 没有关联包裹,只有单独一个包裹的:
if (fedex.isDelivered){
result.ata = new Date(fedex.actDeliveryDt);
result.status = "已送达"
}else{
result.eta = new Date(fedex.estDeliveryDt);
result.status = "正在运送"
}
resolve(result)
}
}
})
}else if (cargo.forwarder==="UPS"){
return new Promise((resolve,reject)=>{
result.forwarder = "UPS";
let upsGetDate = (str1,str2)=>{
// str1 20201130
// str2 120000
let Y = str1.slice(0,4); // 年 2020
let M = str1.slice(4,6)-1; // 月 11
let D = str1.slice(6); // 日 30
let h = str2?str2.slice(0,2):""; // 时 12
return new Date(Y,M,D,h)
};
// xml字符串转xml对象
let xmlObj = xml2json.getXmlObject(trackingResponse);
// xml对象转json对象
let jsonObj = xml2json.xmlToJson(xmlObj);
if (jsonObj.TrackResponse.Response.hasOwnProperty("Error")){
result.responseErrors={
message:jsonObj.TrackResponse.Response.Error.ErrorDescription["#text"],
tracking:cargo.tracking
};
resolve(result);
}else{
let ups = jsonObj.TrackResponse.Shipment;
| ext"]} ${ups.ShipTo.Address.CountryCode["#text"]} `;
result.atd = upsGetDate(ups.PickupDate["#text"]);
result.eta = ups.Package.hasOwnProperty("RescheduledDeliveryDate")?
// ups.Package.RescheduledDeliveryDate 20201130
// ups.Package.RescheduledDeliveryTime 120000
upsGetDate(ups.Package.RescheduledDeliveryDate["#text"],ups.Package.RescheduledDeliveryTime["#text"]):"";
if (ups.Package.Activity[0].Status.StatusType.Code["#text"]==="D"){
result.status = "已送达";
result.ata = new Date(ups.Package.Activity[0].GMTDate["#text"]+"T"+ups.Package.Activity[0].GMTTime["#text"]+ups.Package.Activity[0].GMTOffset["#text"])
}else{
result.status = "正在运送"
}
result.checkPoints = ups.Package.Activity;
result.checkPoints.forEach(item=>{
item.actDate=new Date(item.GMTDate["#text"]+"T"+item.GMTTime["#text"]+item.GMTOffset["#text"]);
item.status = item.Status.StatusType.Description["#text"];
item.scanLocation = item.ActivityLocation.Address.hasOwnProperty("City")?
item.ActivityLocation.Address.hasOwnProperty("CountryCode")?item.ActivityLocation.Address.City["#text"]+" "+item.ActivityLocation.Address.CountryCode["#text"]:item.ActivityLocation.Address.City["#text"]
:""
});
// 关联包裹
let req = {};
req.url="https://onlinetools.ups.com/rest/Track";
req.data={
Security: {
UPSServiceAccessToken: {
AccessLicenseNumber: "2C89134F4D10E2D8"
},
UsernameToken: {
Username: "CST6636EY",
Password: "123456Aa"
}
},
TrackRequest: {
InquiryNumber: result.tracking,
Request: {
RequestAction: "Track",
RequestOption: "activity",
SubVersion: "1907",
ReferenceNumber:{"Value": ""}
}
}
};
// 二次请求 拿到关联包裹的状态
axios.post(req.url,req.data).then(res=>{
// console.log("ups二次请求关联包裹",res);
res.data.TrackResponse.Shipment.Package.forEach(item=>{
result.pieces.push({
tracking:item.TrackingNumber,
atd:upsGetDate(res.data.TrackResponse.Shipment.PickupDate,""),
status:item.hasOwnProperty("DeliveryDate")?"已送达":"正在运送",
ata:item.hasOwnProperty("DeliveryDate")?new Date(upsGetDate(item.DeliveryDate,"")):"",
eta:item.hasOwnProperty("DeliveryDetail")?new Date(upsGetDate(item.DeliveryDetail.Date,item.DeliveryDetail.Time)):"",
current:{
location:item.Activity[0].ActivityLocation.Address.City + " " + item.Activity[0].ActivityLocation.Address.CountryCode,
date:new Date(item.Activity[0].GMTDate+"T"+item.Activity[0].GMTTime+item.Activity[0].GMTOffset),
status:item.Activity[0].Status.Description
}
})
});
resolve(result);
});
}
})
}
};
export default expressDataHandling | result.tracking = ups.ShipmentIdentificationNumber["#text"];
result.origin = `${ups.Shipper.Address.City["#text"]} ${ups.Shipper.Address.CountryCode["#text"]}`;
result.destination = `${ups.ShipTo.Address.City["#t | conditional_block |
preprocess.py | # analyze.py
# Takes json data from BBench and PMC data from Powmon
# and generates a machine-learning friendly output file
# that summarizes the data
# Written by Will Sumner
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import sys,glob
from copy import deepcopy
import pmc
import process_json as pj
# Global values
badConfigs = [(0,0),(1,4),(2,4),(1,2),(2,2),(1,1),(2,1),(1,0),(2,0),(0,2)] # configs we will skip
badLittle = [3] # configs we will skip
badBig = [3] # configs we will skip
coreConfigs = []
for little in range(5): # 0 - 4
for big in range(5):
if ((little,big) in badConfigs) or (little in badLittle) or (big in badBig):
continue
coreConfigs.append(str(little)+"l-"+str(big)+"b")
govConfigs = ["ii"] #["ip","pi","pp","ii"]
allLoadTypes = ['navigationStart', 'fetchStart', 'domainLookupStart',
'domainLookupEnd', 'connectStart', 'connectEnd',
#'secureConnectionStart', # this entry was not tested
'requestStart',
'responseStart', 'responseEnd', 'domLoading',
'domInteractive', 'domContentLoadedEventStart',
'domContentLoadedEventEnd', 'domComplete',
'loadEventStart', 'loadEventEnd' ]
loadTypes = ['navigationStart', 'requestStart', 'domLoading', 'domComplete', 'loadEventEnd' ]
#loadTypes = allLoadTypes
phases = loadTypes[0:len(loadTypes)-1]
phasesSimple = ['Setup Connection','Download Page','Process Page','Run Dynamic Content']
#phasesSimple = allLoadTypes
phaseMap = dict(zip(phases,phasesSimple))
sites = [ 'amazon', 'bbc', 'cnn',
'craigslist', 'ebay', 'espn',
'google', 'msn', 'slashdot',
'twitter', 'youtube']
powmon_sample_period = 100.0 # sample period is 100ms
verboseGlobal = True
def printv(string):
global verboseGlobal
if verboseGlobal:
print(string)
def indexTimestamp(timestamp,timestampArr):
count = 0
while(count < len(timestampArr) and timestampArr[count] < timestamp):
count +=1
return count
def timestampInterval(start,end,timestampArr):
start = indexTimestamp(start,timestampArr)
end = indexTimestamp(end,timestampArr)
if end < start:
return (-1,-1)
return (start,end+1) # plus 1 for python indexing
def filterZeros(data): # remove less than 0 data
return data[data >= 0]
def filterOutliers(data, m): # remove outliers
return data[abs(data - np.mean(data)) < m * np.std(data)]
def cleanupEntry(entry,maxStds):
return filterZeros(filterOutliers(entry,maxStds))
def cleanupData(data,maxStds=3):
for coreConfig in coreConfigs:
for govConfig in govConfigs:
for site in sites:
for phase in phases:
for matrixType in ["loadtime","energy"]:
data[coreConfig][govConfig][site][phase][matrixType] = \
cleanupEntry(data[coreConfig][govConfig][site][phase][matrixType],maxStds)
def parseAndCalcEnergy(filePrefix="sim-data-", iterations=10,cleanData=False,verbose=False):
global verboseGlobal
verboseGlobal = verbose
if filePrefix[-1] != '-': # some quick error checking
filePrefix += "-"
pmcDir = "powmon-data/"
jsonDir = "json-data/"
pmcPrefix = pmcDir + filePrefix
jsonPrefix = jsonDir + filePrefix
# Layout for the data: # TODO fix this --- really bad design, maybe try to leverage numpy multidim arrays more
# websiteData[coreConfig][govConfig][siteName][loadTimeType][iteration]['energy'|'loadtime'] -> npArray of values
baseContainer = {'energy':np.zeros((iterations,)), 'loadtime': np.zeros((iterations,))}
byLoadType = dict(zip(phases,[deepcopy(baseContainer) for phase in phases]))
bySite = dict(zip(sites,[deepcopy(byLoadType) for site in sites]))
byGov = dict(zip(govConfigs,[deepcopy(bySite) for config in govConfigs]))
websiteData = dict(zip(coreConfigs,[deepcopy(byGov) for config in coreConfigs]))
knownCoreConfigs = []
maxIterations = 0
warnedIterations = False
for coreConfig in coreConfigs:
pmcFile = pmcPrefix + coreConfig + "-"
jsonFilePrefix = jsonPrefix + coreConfig + "-"
for govConfig in govConfigs:
pmcFilePrefix = pmcPrefix + coreConfig + "-" + govConfig + "-"
jsonFilePrefix = jsonPrefix + coreConfig + "-" + govConfig + "-"
pmcFiles = glob.glob(pmcFilePrefix+"*") # just use pmc files to get id
ids = []
for index,f in enumerate(pmcFiles):
ids.append(pmcFiles[index].split("-")[-1]) # id is last field
if len(ids) != 0: # we found a file!
if not coreConfig in knownCoreConfigs:
knownCoreConfigs.append(coreConfig) # track which core configs we've found
for fileIndex,fileID in enumerate(ids): # for each pair of data files
iteration = fileIndex
if (iteration >= iterations):
if (not(warnedIterations)):
print("Warning: additional iteration data found, skipping.")
warnedIterations = True
break # stop if we can't hold anymore data, TODO allow for dynamic number of files
pmcFile = pmcFiles[fileIndex]
jsonFile = jsonFilePrefix + fileID + ".json" # look at same id'd json file
printv("on file " + pmcFile)
printv("with file " + jsonFile)
try:
pmcData = pmc.readPMCData(pmcFile) # ndarray
except IOError as e:
print(e)
continue
try:
jsonData = pj.readSeleniumData(jsonFile) # dict of mixed types
except IOError as e:
print(e)
continue
energyThreshold = 0.01
for site in sites:
for index,phase in enumerate(phases):
loadtime = jsonData['timestamps'][site][0][loadTypes[index+1]][0] - jsonData['timestamps'][site][0][phase][0]
websiteData[coreConfig][govConfig][site][phase]['loadtime'][iteration] = loadtime
| websiteData[coreConfig][govConfig][site][phase]['energy'][iteration] = -100
continue
start,end = timestampInterval(int(jsonData['timestamps'][site][0][phase][0]),
int(jsonData['timestamps'][site][0][loadTypes[index+1]][0]),
pmcData['Time_Milliseconds'])
if start == -1 or end == -1: # error getting timestamp
printv("unable to calculate timestamps in phase " + phase + ", skipping...")
continue
if (start == end-1 and end < len(pmcData['Power_A7'])): # time interval is lower than our powmon recorded, estimate
scaleFactor = loadtime/powmon_sample_period
minPower = min(pmcData['Power_A7'][start-1],pmcData['Power_A7'][end])
maxPower = max(pmcData['Power_A7'][start-1],pmcData['Power_A7'][end])
energyLittle = (minPower + 0.5*(maxPower-minPower)) * scaleFactor * (pmcData['Time_Milliseconds'][end] - pmcData['Time_Milliseconds'][start])
minPower = min(pmcData['Power_A15'][start-1],pmcData['Power_A15'][end])
maxPower = max(pmcData['Power_A15'][start-1],pmcData['Power_A15'][end])
energyBig = (minPower + 0.5*(maxPower-minPower)) * scaleFactor * (pmcData['Time_Milliseconds'][end] - pmcData['Time_Milliseconds'][start])
energy = energyBig + energyLittle
if energy <= energyThreshold:
printv("In phase: " + phase)
printv(str(energy) + " energy calculated from (" + str(minPower) + \
" * 0.5*(" + str(maxPower) + "-" + str(minPower) + ")) * " + str(scaleFactor))
printv("scaleFactor = " + str(loadtime) + "/" + str(powmon_sample_period))
printv("loadtime = " + str(jsonData['timestamps'][site][0][loadTypes[index+1]][0]) + " - " + \
str(jsonData['timestamps'][site][0][phase][0]))
if loadtime == 0: # if we didn't get any meaningful data because of a low loadtime
energy = -100 # make sure it gets filtered out
elif start == end -1: # edge case where data is not available
printv("edge case found with phase" + phase)
energy = -100
else:
energy = pmc.calcEnergy(pmcData['Power_A7'][start:end], pmcData['Time_Milliseconds'][start:end])
energy += pmc.calcEnergy(pmcData['Power_A15'][start:end], pmcData['Time_Milliseconds'][start:end])
if energy <= energyThreshold:
printv(str(energy) + " energy calculated from regular integration")
printv(start)
printv(end)
printv(pmcData['Power_A7'][start:end])
printv(pmcData['Power_A15'][start:end])
printv(pmcData['Time_Milliseconds'][start:end])
if (start != end): # if we didn't do an approximation
websiteData[coreConfig][govConfig][site][phase]['energy'][iteration] = energy
else:
websiteData[coreConfig][govConfig][site][phase]['energy'][iteration] = \
energy*(loadtime/powmon_sample_period)
maxIterations = max(fileIndex,maxIterations)
if cleanData:
cleanupData(websiteData,maxStds=3)
return (websiteData,knownCoreConfigs,maxIterations)
def avgMatrix(timeAndEnergy,iterStart=0,iterStop=0): # update to avged matrix
if iterStop == 0:
iterStop = len(timeAndEnergy['4l-4b']['ii']['amazon']['navigationStart']['energy'])
for coreConfig in coreConfigs:
for phase in phases:
for govConfig in govConfigs:
for site in sites:
timeAndEnergy[coreConfig][govConfig][site][phase]['energy'] = \
np.mean(timeAndEnergy[coreConfig][govConfig][site][phase]['energy'][iterStart:iterStop])
timeAndEnergy[coreConfig][govConfig][site][phase]['loadtime'] = \
np.mean(timeAndEnergy[coreConfig][govConfig][site][phase]['loadtime'][iterStart:iterStop])
def extractIter(timeAndEnergy,iteration): # TODO refactor timeAndEnergy organization - this is too hardcoded
baseContainer = {'energy':np.zeros((1,)), 'loadtime': np.zeros((1,))}
byLoadType = dict(zip(phases,[deepcopy(baseContainer) for phase in phases]))
bySite = dict(zip(sites,[deepcopy(byLoadType) for site in sites]))
byGov = dict(zip(govConfigs,[deepcopy(bySite) for config in govConfigs]))
iterData = dict(zip(coreConfigs,[deepcopy(byGov) for config in coreConfigs]))
for coreConfig in coreConfigs:
for phase in phases:
for govConfig in govConfigs:
for site in sites:
iterData[coreConfig][govConfig][site][phase]['energy'] = \
timeAndEnergy[coreConfig][govConfig][site][phase]['energy'][iteration]
iterData[coreConfig][govConfig][site][phase]['loadtime'] = \
timeAndEnergy[coreConfig][govConfig][site][phase]['loadtime'][iteration]
return iterData
def writeData(data,filename):
pj.writeData(data,filename)
def readData(filename):
return pj.readData(filename)
if __name__ == "__main__":
print("Processing data...")
data,foundConfigs,maxIterations = parseAndCalcEnergy(filePrefix="sim-data-",iterations=27,cleanData=False)
dataFilename = "sim-data/sim-data-processed.json"
iter0 = extractIter(data,0)
iter1 = extractIter(data,1)
if sys.flags.interactive: # we are in an interactive shell
print("Running in interactive mode: type 'data' to see values generated by this file")
else:
print("Writing data to file...")
pj.writeData([data,foundConfigs,maxIterations],dataFilename,indent=1)
print("Note: Running this file with python -i will allow you to interact directly with the data") | if loadtime == 0: # don't waste time on 0 energies | random_line_split |
preprocess.py | # analyze.py
# Takes json data from BBench and PMC data from Powmon
# and generates a machine-learning friendly output file
# that summarizes the data
# Written by Will Sumner
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import sys,glob
from copy import deepcopy
import pmc
import process_json as pj
# Global values
badConfigs = [(0,0),(1,4),(2,4),(1,2),(2,2),(1,1),(2,1),(1,0),(2,0),(0,2)] # configs we will skip
badLittle = [3] # configs we will skip
badBig = [3] # configs we will skip
coreConfigs = []
for little in range(5): # 0 - 4
for big in range(5):
if ((little,big) in badConfigs) or (little in badLittle) or (big in badBig):
continue
coreConfigs.append(str(little)+"l-"+str(big)+"b")
govConfigs = ["ii"] #["ip","pi","pp","ii"]
allLoadTypes = ['navigationStart', 'fetchStart', 'domainLookupStart',
'domainLookupEnd', 'connectStart', 'connectEnd',
#'secureConnectionStart', # this entry was not tested
'requestStart',
'responseStart', 'responseEnd', 'domLoading',
'domInteractive', 'domContentLoadedEventStart',
'domContentLoadedEventEnd', 'domComplete',
'loadEventStart', 'loadEventEnd' ]
loadTypes = ['navigationStart', 'requestStart', 'domLoading', 'domComplete', 'loadEventEnd' ]
#loadTypes = allLoadTypes
phases = loadTypes[0:len(loadTypes)-1]
phasesSimple = ['Setup Connection','Download Page','Process Page','Run Dynamic Content']
#phasesSimple = allLoadTypes
phaseMap = dict(zip(phases,phasesSimple))
sites = [ 'amazon', 'bbc', 'cnn',
'craigslist', 'ebay', 'espn',
'google', 'msn', 'slashdot',
'twitter', 'youtube']
powmon_sample_period = 100.0 # sample period is 100ms
verboseGlobal = True
def printv(string):
global verboseGlobal
if verboseGlobal:
print(string)
def indexTimestamp(timestamp,timestampArr):
count = 0
while(count < len(timestampArr) and timestampArr[count] < timestamp):
count +=1
return count
def timestampInterval(start,end,timestampArr):
start = indexTimestamp(start,timestampArr)
end = indexTimestamp(end,timestampArr)
if end < start:
return (-1,-1)
return (start,end+1) # plus 1 for python indexing
def filterZeros(data): # remove less than 0 data
return data[data >= 0]
def filterOutliers(data, m): # remove outliers
return data[abs(data - np.mean(data)) < m * np.std(data)]
def cleanupEntry(entry,maxStds):
return filterZeros(filterOutliers(entry,maxStds))
def cleanupData(data,maxStds=3):
for coreConfig in coreConfigs:
for govConfig in govConfigs:
for site in sites:
for phase in phases:
for matrixType in ["loadtime","energy"]:
data[coreConfig][govConfig][site][phase][matrixType] = \
cleanupEntry(data[coreConfig][govConfig][site][phase][matrixType],maxStds)
def parseAndCalcEnergy(filePrefix="sim-data-", iterations=10,cleanData=False,verbose=False):
|
def avgMatrix(timeAndEnergy,iterStart=0,iterStop=0): # update to avged matrix
if iterStop == 0:
iterStop = len(timeAndEnergy['4l-4b']['ii']['amazon']['navigationStart']['energy'])
for coreConfig in coreConfigs:
for phase in phases:
for govConfig in govConfigs:
for site in sites:
timeAndEnergy[coreConfig][govConfig][site][phase]['energy'] = \
np.mean(timeAndEnergy[coreConfig][govConfig][site][phase]['energy'][iterStart:iterStop])
timeAndEnergy[coreConfig][govConfig][site][phase]['loadtime'] = \
np.mean(timeAndEnergy[coreConfig][govConfig][site][phase]['loadtime'][iterStart:iterStop])
def extractIter(timeAndEnergy,iteration): # TODO refactor timeAndEnergy organization - this is too hardcoded
baseContainer = {'energy':np.zeros((1,)), 'loadtime': np.zeros((1,))}
byLoadType = dict(zip(phases,[deepcopy(baseContainer) for phase in phases]))
bySite = dict(zip(sites,[deepcopy(byLoadType) for site in sites]))
byGov = dict(zip(govConfigs,[deepcopy(bySite) for config in govConfigs]))
iterData = dict(zip(coreConfigs,[deepcopy(byGov) for config in coreConfigs]))
for coreConfig in coreConfigs:
for phase in phases:
for govConfig in govConfigs:
for site in sites:
iterData[coreConfig][govConfig][site][phase]['energy'] = \
timeAndEnergy[coreConfig][govConfig][site][phase]['energy'][iteration]
iterData[coreConfig][govConfig][site][phase]['loadtime'] = \
timeAndEnergy[coreConfig][govConfig][site][phase]['loadtime'][iteration]
return iterData
def writeData(data,filename):
pj.writeData(data,filename)
def readData(filename):
return pj.readData(filename)
if __name__ == "__main__":
print("Processing data...")
data,foundConfigs,maxIterations = parseAndCalcEnergy(filePrefix="sim-data-",iterations=27,cleanData=False)
dataFilename = "sim-data/sim-data-processed.json"
iter0 = extractIter(data,0)
iter1 = extractIter(data,1)
if sys.flags.interactive: # we are in an interactive shell
print("Running in interactive mode: type 'data' to see values generated by this file")
else:
print("Writing data to file...")
pj.writeData([data,foundConfigs,maxIterations],dataFilename,indent=1)
print("Note: Running this file with python -i will allow you to interact directly with the data")
| global verboseGlobal
verboseGlobal = verbose
if filePrefix[-1] != '-': # some quick error checking
filePrefix += "-"
pmcDir = "powmon-data/"
jsonDir = "json-data/"
pmcPrefix = pmcDir + filePrefix
jsonPrefix = jsonDir + filePrefix
# Layout for the data: # TODO fix this --- really bad design, maybe try to leverage numpy multidim arrays more
# websiteData[coreConfig][govConfig][siteName][loadTimeType][iteration]['energy'|'loadtime'] -> npArray of values
baseContainer = {'energy':np.zeros((iterations,)), 'loadtime': np.zeros((iterations,))}
byLoadType = dict(zip(phases,[deepcopy(baseContainer) for phase in phases]))
bySite = dict(zip(sites,[deepcopy(byLoadType) for site in sites]))
byGov = dict(zip(govConfigs,[deepcopy(bySite) for config in govConfigs]))
websiteData = dict(zip(coreConfigs,[deepcopy(byGov) for config in coreConfigs]))
knownCoreConfigs = []
maxIterations = 0
warnedIterations = False
for coreConfig in coreConfigs:
pmcFile = pmcPrefix + coreConfig + "-"
jsonFilePrefix = jsonPrefix + coreConfig + "-"
for govConfig in govConfigs:
pmcFilePrefix = pmcPrefix + coreConfig + "-" + govConfig + "-"
jsonFilePrefix = jsonPrefix + coreConfig + "-" + govConfig + "-"
pmcFiles = glob.glob(pmcFilePrefix+"*") # just use pmc files to get id
ids = []
for index,f in enumerate(pmcFiles):
ids.append(pmcFiles[index].split("-")[-1]) # id is last field
if len(ids) != 0: # we found a file!
if not coreConfig in knownCoreConfigs:
knownCoreConfigs.append(coreConfig) # track which core configs we've found
for fileIndex,fileID in enumerate(ids): # for each pair of data files
iteration = fileIndex
if (iteration >= iterations):
if (not(warnedIterations)):
print("Warning: additional iteration data found, skipping.")
warnedIterations = True
break # stop if we can't hold anymore data, TODO allow for dynamic number of files
pmcFile = pmcFiles[fileIndex]
jsonFile = jsonFilePrefix + fileID + ".json" # look at same id'd json file
printv("on file " + pmcFile)
printv("with file " + jsonFile)
try:
pmcData = pmc.readPMCData(pmcFile) # ndarray
except IOError as e:
print(e)
continue
try:
jsonData = pj.readSeleniumData(jsonFile) # dict of mixed types
except IOError as e:
print(e)
continue
energyThreshold = 0.01
for site in sites:
for index,phase in enumerate(phases):
loadtime = jsonData['timestamps'][site][0][loadTypes[index+1]][0] - jsonData['timestamps'][site][0][phase][0]
websiteData[coreConfig][govConfig][site][phase]['loadtime'][iteration] = loadtime
if loadtime == 0: # don't waste time on 0 energies
websiteData[coreConfig][govConfig][site][phase]['energy'][iteration] = -100
continue
start,end = timestampInterval(int(jsonData['timestamps'][site][0][phase][0]),
int(jsonData['timestamps'][site][0][loadTypes[index+1]][0]),
pmcData['Time_Milliseconds'])
if start == -1 or end == -1: # error getting timestamp
printv("unable to calculate timestamps in phase " + phase + ", skipping...")
continue
if (start == end-1 and end < len(pmcData['Power_A7'])): # time interval is lower than our powmon recorded, estimate
scaleFactor = loadtime/powmon_sample_period
minPower = min(pmcData['Power_A7'][start-1],pmcData['Power_A7'][end])
maxPower = max(pmcData['Power_A7'][start-1],pmcData['Power_A7'][end])
energyLittle = (minPower + 0.5*(maxPower-minPower)) * scaleFactor * (pmcData['Time_Milliseconds'][end] - pmcData['Time_Milliseconds'][start])
minPower = min(pmcData['Power_A15'][start-1],pmcData['Power_A15'][end])
maxPower = max(pmcData['Power_A15'][start-1],pmcData['Power_A15'][end])
energyBig = (minPower + 0.5*(maxPower-minPower)) * scaleFactor * (pmcData['Time_Milliseconds'][end] - pmcData['Time_Milliseconds'][start])
energy = energyBig + energyLittle
if energy <= energyThreshold:
printv("In phase: " + phase)
printv(str(energy) + " energy calculated from (" + str(minPower) + \
" * 0.5*(" + str(maxPower) + "-" + str(minPower) + ")) * " + str(scaleFactor))
printv("scaleFactor = " + str(loadtime) + "/" + str(powmon_sample_period))
printv("loadtime = " + str(jsonData['timestamps'][site][0][loadTypes[index+1]][0]) + " - " + \
str(jsonData['timestamps'][site][0][phase][0]))
if loadtime == 0: # if we didn't get any meaningful data because of a low loadtime
energy = -100 # make sure it gets filtered out
elif start == end -1: # edge case where data is not available
printv("edge case found with phase" + phase)
energy = -100
else:
energy = pmc.calcEnergy(pmcData['Power_A7'][start:end], pmcData['Time_Milliseconds'][start:end])
energy += pmc.calcEnergy(pmcData['Power_A15'][start:end], pmcData['Time_Milliseconds'][start:end])
if energy <= energyThreshold:
printv(str(energy) + " energy calculated from regular integration")
printv(start)
printv(end)
printv(pmcData['Power_A7'][start:end])
printv(pmcData['Power_A15'][start:end])
printv(pmcData['Time_Milliseconds'][start:end])
if (start != end): # if we didn't do an approximation
websiteData[coreConfig][govConfig][site][phase]['energy'][iteration] = energy
else:
websiteData[coreConfig][govConfig][site][phase]['energy'][iteration] = \
energy*(loadtime/powmon_sample_period)
maxIterations = max(fileIndex,maxIterations)
if cleanData:
cleanupData(websiteData,maxStds=3)
return (websiteData,knownCoreConfigs,maxIterations) | identifier_body |
preprocess.py | # analyze.py
# Takes json data from BBench and PMC data from Powmon
# and generates a machine-learning friendly output file
# that summarizes the data
# Written by Will Sumner
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import sys,glob
from copy import deepcopy
import pmc
import process_json as pj
# Global values
badConfigs = [(0,0),(1,4),(2,4),(1,2),(2,2),(1,1),(2,1),(1,0),(2,0),(0,2)] # configs we will skip
badLittle = [3] # configs we will skip
badBig = [3] # configs we will skip
coreConfigs = []
for little in range(5): # 0 - 4
for big in range(5):
if ((little,big) in badConfigs) or (little in badLittle) or (big in badBig):
continue
coreConfigs.append(str(little)+"l-"+str(big)+"b")
govConfigs = ["ii"] #["ip","pi","pp","ii"]
allLoadTypes = ['navigationStart', 'fetchStart', 'domainLookupStart',
'domainLookupEnd', 'connectStart', 'connectEnd',
#'secureConnectionStart', # this entry was not tested
'requestStart',
'responseStart', 'responseEnd', 'domLoading',
'domInteractive', 'domContentLoadedEventStart',
'domContentLoadedEventEnd', 'domComplete',
'loadEventStart', 'loadEventEnd' ]
loadTypes = ['navigationStart', 'requestStart', 'domLoading', 'domComplete', 'loadEventEnd' ]
#loadTypes = allLoadTypes
phases = loadTypes[0:len(loadTypes)-1]
phasesSimple = ['Setup Connection','Download Page','Process Page','Run Dynamic Content']
#phasesSimple = allLoadTypes
phaseMap = dict(zip(phases,phasesSimple))
sites = [ 'amazon', 'bbc', 'cnn',
'craigslist', 'ebay', 'espn',
'google', 'msn', 'slashdot',
'twitter', 'youtube']
powmon_sample_period = 100.0 # sample period is 100ms
verboseGlobal = True
def printv(string):
global verboseGlobal
if verboseGlobal:
print(string)
def indexTimestamp(timestamp,timestampArr):
count = 0
while(count < len(timestampArr) and timestampArr[count] < timestamp):
count +=1
return count
def timestampInterval(start,end,timestampArr):
start = indexTimestamp(start,timestampArr)
end = indexTimestamp(end,timestampArr)
if end < start:
return (-1,-1)
return (start,end+1) # plus 1 for python indexing
def filterZeros(data): # remove less than 0 data
return data[data >= 0]
def filterOutliers(data, m): # remove outliers
return data[abs(data - np.mean(data)) < m * np.std(data)]
def cleanupEntry(entry,maxStds):
return filterZeros(filterOutliers(entry,maxStds))
def cleanupData(data,maxStds=3):
for coreConfig in coreConfigs:
for govConfig in govConfigs:
for site in sites:
for phase in phases:
for matrixType in ["loadtime","energy"]:
data[coreConfig][govConfig][site][phase][matrixType] = \
cleanupEntry(data[coreConfig][govConfig][site][phase][matrixType],maxStds)
def parseAndCalcEnergy(filePrefix="sim-data-", iterations=10,cleanData=False,verbose=False):
global verboseGlobal
verboseGlobal = verbose
if filePrefix[-1] != '-': # some quick error checking
filePrefix += "-"
pmcDir = "powmon-data/"
jsonDir = "json-data/"
pmcPrefix = pmcDir + filePrefix
jsonPrefix = jsonDir + filePrefix
# Layout for the data: # TODO fix this --- really bad design, maybe try to leverage numpy multidim arrays more
# websiteData[coreConfig][govConfig][siteName][loadTimeType][iteration]['energy'|'loadtime'] -> npArray of values
baseContainer = {'energy':np.zeros((iterations,)), 'loadtime': np.zeros((iterations,))}
byLoadType = dict(zip(phases,[deepcopy(baseContainer) for phase in phases]))
bySite = dict(zip(sites,[deepcopy(byLoadType) for site in sites]))
byGov = dict(zip(govConfigs,[deepcopy(bySite) for config in govConfigs]))
websiteData = dict(zip(coreConfigs,[deepcopy(byGov) for config in coreConfigs]))
knownCoreConfigs = []
maxIterations = 0
warnedIterations = False
for coreConfig in coreConfigs:
pmcFile = pmcPrefix + coreConfig + "-"
jsonFilePrefix = jsonPrefix + coreConfig + "-"
for govConfig in govConfigs:
pmcFilePrefix = pmcPrefix + coreConfig + "-" + govConfig + "-"
jsonFilePrefix = jsonPrefix + coreConfig + "-" + govConfig + "-"
pmcFiles = glob.glob(pmcFilePrefix+"*") # just use pmc files to get id
ids = []
for index,f in enumerate(pmcFiles):
ids.append(pmcFiles[index].split("-")[-1]) # id is last field
if len(ids) != 0: # we found a file!
if not coreConfig in knownCoreConfigs:
knownCoreConfigs.append(coreConfig) # track which core configs we've found
for fileIndex,fileID in enumerate(ids): # for each pair of data files
iteration = fileIndex
if (iteration >= iterations):
if (not(warnedIterations)):
print("Warning: additional iteration data found, skipping.")
warnedIterations = True
break # stop if we can't hold anymore data, TODO allow for dynamic number of files
pmcFile = pmcFiles[fileIndex]
jsonFile = jsonFilePrefix + fileID + ".json" # look at same id'd json file
printv("on file " + pmcFile)
printv("with file " + jsonFile)
try:
pmcData = pmc.readPMCData(pmcFile) # ndarray
except IOError as e:
print(e)
continue
try:
jsonData = pj.readSeleniumData(jsonFile) # dict of mixed types
except IOError as e:
print(e)
continue
energyThreshold = 0.01
for site in sites:
for index,phase in enumerate(phases):
loadtime = jsonData['timestamps'][site][0][loadTypes[index+1]][0] - jsonData['timestamps'][site][0][phase][0]
websiteData[coreConfig][govConfig][site][phase]['loadtime'][iteration] = loadtime
if loadtime == 0: # don't waste time on 0 energies
websiteData[coreConfig][govConfig][site][phase]['energy'][iteration] = -100
continue
start,end = timestampInterval(int(jsonData['timestamps'][site][0][phase][0]),
int(jsonData['timestamps'][site][0][loadTypes[index+1]][0]),
pmcData['Time_Milliseconds'])
if start == -1 or end == -1: # error getting timestamp
printv("unable to calculate timestamps in phase " + phase + ", skipping...")
continue
if (start == end-1 and end < len(pmcData['Power_A7'])): # time interval is lower than our powmon recorded, estimate
scaleFactor = loadtime/powmon_sample_period
minPower = min(pmcData['Power_A7'][start-1],pmcData['Power_A7'][end])
maxPower = max(pmcData['Power_A7'][start-1],pmcData['Power_A7'][end])
energyLittle = (minPower + 0.5*(maxPower-minPower)) * scaleFactor * (pmcData['Time_Milliseconds'][end] - pmcData['Time_Milliseconds'][start])
minPower = min(pmcData['Power_A15'][start-1],pmcData['Power_A15'][end])
maxPower = max(pmcData['Power_A15'][start-1],pmcData['Power_A15'][end])
energyBig = (minPower + 0.5*(maxPower-minPower)) * scaleFactor * (pmcData['Time_Milliseconds'][end] - pmcData['Time_Milliseconds'][start])
energy = energyBig + energyLittle
if energy <= energyThreshold:
printv("In phase: " + phase)
printv(str(energy) + " energy calculated from (" + str(minPower) + \
" * 0.5*(" + str(maxPower) + "-" + str(minPower) + ")) * " + str(scaleFactor))
printv("scaleFactor = " + str(loadtime) + "/" + str(powmon_sample_period))
printv("loadtime = " + str(jsonData['timestamps'][site][0][loadTypes[index+1]][0]) + " - " + \
str(jsonData['timestamps'][site][0][phase][0]))
if loadtime == 0: # if we didn't get any meaningful data because of a low loadtime
energy = -100 # make sure it gets filtered out
elif start == end -1: # edge case where data is not available
printv("edge case found with phase" + phase)
energy = -100
else:
energy = pmc.calcEnergy(pmcData['Power_A7'][start:end], pmcData['Time_Milliseconds'][start:end])
energy += pmc.calcEnergy(pmcData['Power_A15'][start:end], pmcData['Time_Milliseconds'][start:end])
if energy <= energyThreshold:
printv(str(energy) + " energy calculated from regular integration")
printv(start)
printv(end)
printv(pmcData['Power_A7'][start:end])
printv(pmcData['Power_A15'][start:end])
printv(pmcData['Time_Milliseconds'][start:end])
if (start != end): # if we didn't do an approximation
websiteData[coreConfig][govConfig][site][phase]['energy'][iteration] = energy
else:
websiteData[coreConfig][govConfig][site][phase]['energy'][iteration] = \
energy*(loadtime/powmon_sample_period)
maxIterations = max(fileIndex,maxIterations)
if cleanData:
cleanupData(websiteData,maxStds=3)
return (websiteData,knownCoreConfigs,maxIterations)
def avgMatrix(timeAndEnergy,iterStart=0,iterStop=0): # update to avged matrix
if iterStop == 0:
iterStop = len(timeAndEnergy['4l-4b']['ii']['amazon']['navigationStart']['energy'])
for coreConfig in coreConfigs:
for phase in phases:
for govConfig in govConfigs:
for site in sites:
timeAndEnergy[coreConfig][govConfig][site][phase]['energy'] = \
np.mean(timeAndEnergy[coreConfig][govConfig][site][phase]['energy'][iterStart:iterStop])
timeAndEnergy[coreConfig][govConfig][site][phase]['loadtime'] = \
np.mean(timeAndEnergy[coreConfig][govConfig][site][phase]['loadtime'][iterStart:iterStop])
def extractIter(timeAndEnergy,iteration): # TODO refactor timeAndEnergy organization - this is too hardcoded
baseContainer = {'energy':np.zeros((1,)), 'loadtime': np.zeros((1,))}
byLoadType = dict(zip(phases,[deepcopy(baseContainer) for phase in phases]))
bySite = dict(zip(sites,[deepcopy(byLoadType) for site in sites]))
byGov = dict(zip(govConfigs,[deepcopy(bySite) for config in govConfigs]))
iterData = dict(zip(coreConfigs,[deepcopy(byGov) for config in coreConfigs]))
for coreConfig in coreConfigs:
for phase in phases:
for govConfig in govConfigs:
for site in sites:
iterData[coreConfig][govConfig][site][phase]['energy'] = \
timeAndEnergy[coreConfig][govConfig][site][phase]['energy'][iteration]
iterData[coreConfig][govConfig][site][phase]['loadtime'] = \
timeAndEnergy[coreConfig][govConfig][site][phase]['loadtime'][iteration]
return iterData
def writeData(data,filename):
pj.writeData(data,filename)
def | (filename):
return pj.readData(filename)
if __name__ == "__main__":
print("Processing data...")
data,foundConfigs,maxIterations = parseAndCalcEnergy(filePrefix="sim-data-",iterations=27,cleanData=False)
dataFilename = "sim-data/sim-data-processed.json"
iter0 = extractIter(data,0)
iter1 = extractIter(data,1)
if sys.flags.interactive: # we are in an interactive shell
print("Running in interactive mode: type 'data' to see values generated by this file")
else:
print("Writing data to file...")
pj.writeData([data,foundConfigs,maxIterations],dataFilename,indent=1)
print("Note: Running this file with python -i will allow you to interact directly with the data")
| readData | identifier_name |
preprocess.py | # analyze.py
# Takes json data from BBench and PMC data from Powmon
# and generates a machine-learning friendly output file
# that summarizes the data
# Written by Will Sumner
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import sys,glob
from copy import deepcopy
import pmc
import process_json as pj
# Global values
badConfigs = [(0,0),(1,4),(2,4),(1,2),(2,2),(1,1),(2,1),(1,0),(2,0),(0,2)] # configs we will skip
badLittle = [3] # configs we will skip
badBig = [3] # configs we will skip
coreConfigs = []
for little in range(5): # 0 - 4
for big in range(5):
if ((little,big) in badConfigs) or (little in badLittle) or (big in badBig):
continue
coreConfigs.append(str(little)+"l-"+str(big)+"b")
govConfigs = ["ii"] #["ip","pi","pp","ii"]
allLoadTypes = ['navigationStart', 'fetchStart', 'domainLookupStart',
'domainLookupEnd', 'connectStart', 'connectEnd',
#'secureConnectionStart', # this entry was not tested
'requestStart',
'responseStart', 'responseEnd', 'domLoading',
'domInteractive', 'domContentLoadedEventStart',
'domContentLoadedEventEnd', 'domComplete',
'loadEventStart', 'loadEventEnd' ]
loadTypes = ['navigationStart', 'requestStart', 'domLoading', 'domComplete', 'loadEventEnd' ]
#loadTypes = allLoadTypes
phases = loadTypes[0:len(loadTypes)-1]
phasesSimple = ['Setup Connection','Download Page','Process Page','Run Dynamic Content']
#phasesSimple = allLoadTypes
phaseMap = dict(zip(phases,phasesSimple))
sites = [ 'amazon', 'bbc', 'cnn',
'craigslist', 'ebay', 'espn',
'google', 'msn', 'slashdot',
'twitter', 'youtube']
powmon_sample_period = 100.0 # sample period is 100ms
verboseGlobal = True
def printv(string):
global verboseGlobal
if verboseGlobal:
print(string)
def indexTimestamp(timestamp,timestampArr):
count = 0
while(count < len(timestampArr) and timestampArr[count] < timestamp):
count +=1
return count
def timestampInterval(start,end,timestampArr):
start = indexTimestamp(start,timestampArr)
end = indexTimestamp(end,timestampArr)
if end < start:
return (-1,-1)
return (start,end+1) # plus 1 for python indexing
def filterZeros(data): # remove less than 0 data
return data[data >= 0]
def filterOutliers(data, m): # remove outliers
return data[abs(data - np.mean(data)) < m * np.std(data)]
def cleanupEntry(entry,maxStds):
return filterZeros(filterOutliers(entry,maxStds))
def cleanupData(data,maxStds=3):
for coreConfig in coreConfigs:
for govConfig in govConfigs:
for site in sites:
for phase in phases:
for matrixType in ["loadtime","energy"]:
data[coreConfig][govConfig][site][phase][matrixType] = \
cleanupEntry(data[coreConfig][govConfig][site][phase][matrixType],maxStds)
def parseAndCalcEnergy(filePrefix="sim-data-", iterations=10,cleanData=False,verbose=False):
global verboseGlobal
verboseGlobal = verbose
if filePrefix[-1] != '-': # some quick error checking
filePrefix += "-"
pmcDir = "powmon-data/"
jsonDir = "json-data/"
pmcPrefix = pmcDir + filePrefix
jsonPrefix = jsonDir + filePrefix
# Layout for the data: # TODO fix this --- really bad design, maybe try to leverage numpy multidim arrays more
# websiteData[coreConfig][govConfig][siteName][loadTimeType][iteration]['energy'|'loadtime'] -> npArray of values
baseContainer = {'energy':np.zeros((iterations,)), 'loadtime': np.zeros((iterations,))}
byLoadType = dict(zip(phases,[deepcopy(baseContainer) for phase in phases]))
bySite = dict(zip(sites,[deepcopy(byLoadType) for site in sites]))
byGov = dict(zip(govConfigs,[deepcopy(bySite) for config in govConfigs]))
websiteData = dict(zip(coreConfigs,[deepcopy(byGov) for config in coreConfigs]))
knownCoreConfigs = []
maxIterations = 0
warnedIterations = False
for coreConfig in coreConfigs:
pmcFile = pmcPrefix + coreConfig + "-"
jsonFilePrefix = jsonPrefix + coreConfig + "-"
for govConfig in govConfigs:
pmcFilePrefix = pmcPrefix + coreConfig + "-" + govConfig + "-"
jsonFilePrefix = jsonPrefix + coreConfig + "-" + govConfig + "-"
pmcFiles = glob.glob(pmcFilePrefix+"*") # just use pmc files to get id
ids = []
for index,f in enumerate(pmcFiles):
ids.append(pmcFiles[index].split("-")[-1]) # id is last field
if len(ids) != 0: # we found a file!
if not coreConfig in knownCoreConfigs:
knownCoreConfigs.append(coreConfig) # track which core configs we've found
for fileIndex,fileID in enumerate(ids): # for each pair of data files
iteration = fileIndex
if (iteration >= iterations):
|
pmcFile = pmcFiles[fileIndex]
jsonFile = jsonFilePrefix + fileID + ".json" # look at same id'd json file
printv("on file " + pmcFile)
printv("with file " + jsonFile)
try:
pmcData = pmc.readPMCData(pmcFile) # ndarray
except IOError as e:
print(e)
continue
try:
jsonData = pj.readSeleniumData(jsonFile) # dict of mixed types
except IOError as e:
print(e)
continue
energyThreshold = 0.01
for site in sites:
for index,phase in enumerate(phases):
loadtime = jsonData['timestamps'][site][0][loadTypes[index+1]][0] - jsonData['timestamps'][site][0][phase][0]
websiteData[coreConfig][govConfig][site][phase]['loadtime'][iteration] = loadtime
if loadtime == 0: # don't waste time on 0 energies
websiteData[coreConfig][govConfig][site][phase]['energy'][iteration] = -100
continue
start,end = timestampInterval(int(jsonData['timestamps'][site][0][phase][0]),
int(jsonData['timestamps'][site][0][loadTypes[index+1]][0]),
pmcData['Time_Milliseconds'])
if start == -1 or end == -1: # error getting timestamp
printv("unable to calculate timestamps in phase " + phase + ", skipping...")
continue
if (start == end-1 and end < len(pmcData['Power_A7'])): # time interval is lower than our powmon recorded, estimate
scaleFactor = loadtime/powmon_sample_period
minPower = min(pmcData['Power_A7'][start-1],pmcData['Power_A7'][end])
maxPower = max(pmcData['Power_A7'][start-1],pmcData['Power_A7'][end])
energyLittle = (minPower + 0.5*(maxPower-minPower)) * scaleFactor * (pmcData['Time_Milliseconds'][end] - pmcData['Time_Milliseconds'][start])
minPower = min(pmcData['Power_A15'][start-1],pmcData['Power_A15'][end])
maxPower = max(pmcData['Power_A15'][start-1],pmcData['Power_A15'][end])
energyBig = (minPower + 0.5*(maxPower-minPower)) * scaleFactor * (pmcData['Time_Milliseconds'][end] - pmcData['Time_Milliseconds'][start])
energy = energyBig + energyLittle
if energy <= energyThreshold:
printv("In phase: " + phase)
printv(str(energy) + " energy calculated from (" + str(minPower) + \
" * 0.5*(" + str(maxPower) + "-" + str(minPower) + ")) * " + str(scaleFactor))
printv("scaleFactor = " + str(loadtime) + "/" + str(powmon_sample_period))
printv("loadtime = " + str(jsonData['timestamps'][site][0][loadTypes[index+1]][0]) + " - " + \
str(jsonData['timestamps'][site][0][phase][0]))
if loadtime == 0: # if we didn't get any meaningful data because of a low loadtime
energy = -100 # make sure it gets filtered out
elif start == end -1: # edge case where data is not available
printv("edge case found with phase" + phase)
energy = -100
else:
energy = pmc.calcEnergy(pmcData['Power_A7'][start:end], pmcData['Time_Milliseconds'][start:end])
energy += pmc.calcEnergy(pmcData['Power_A15'][start:end], pmcData['Time_Milliseconds'][start:end])
if energy <= energyThreshold:
printv(str(energy) + " energy calculated from regular integration")
printv(start)
printv(end)
printv(pmcData['Power_A7'][start:end])
printv(pmcData['Power_A15'][start:end])
printv(pmcData['Time_Milliseconds'][start:end])
if (start != end): # if we didn't do an approximation
websiteData[coreConfig][govConfig][site][phase]['energy'][iteration] = energy
else:
websiteData[coreConfig][govConfig][site][phase]['energy'][iteration] = \
energy*(loadtime/powmon_sample_period)
maxIterations = max(fileIndex,maxIterations)
if cleanData:
cleanupData(websiteData,maxStds=3)
return (websiteData,knownCoreConfigs,maxIterations)
def avgMatrix(timeAndEnergy,iterStart=0,iterStop=0): # update to avged matrix
if iterStop == 0:
iterStop = len(timeAndEnergy['4l-4b']['ii']['amazon']['navigationStart']['energy'])
for coreConfig in coreConfigs:
for phase in phases:
for govConfig in govConfigs:
for site in sites:
timeAndEnergy[coreConfig][govConfig][site][phase]['energy'] = \
np.mean(timeAndEnergy[coreConfig][govConfig][site][phase]['energy'][iterStart:iterStop])
timeAndEnergy[coreConfig][govConfig][site][phase]['loadtime'] = \
np.mean(timeAndEnergy[coreConfig][govConfig][site][phase]['loadtime'][iterStart:iterStop])
def extractIter(timeAndEnergy,iteration): # TODO refactor timeAndEnergy organization - this is too hardcoded
baseContainer = {'energy':np.zeros((1,)), 'loadtime': np.zeros((1,))}
byLoadType = dict(zip(phases,[deepcopy(baseContainer) for phase in phases]))
bySite = dict(zip(sites,[deepcopy(byLoadType) for site in sites]))
byGov = dict(zip(govConfigs,[deepcopy(bySite) for config in govConfigs]))
iterData = dict(zip(coreConfigs,[deepcopy(byGov) for config in coreConfigs]))
for coreConfig in coreConfigs:
for phase in phases:
for govConfig in govConfigs:
for site in sites:
iterData[coreConfig][govConfig][site][phase]['energy'] = \
timeAndEnergy[coreConfig][govConfig][site][phase]['energy'][iteration]
iterData[coreConfig][govConfig][site][phase]['loadtime'] = \
timeAndEnergy[coreConfig][govConfig][site][phase]['loadtime'][iteration]
return iterData
def writeData(data,filename):
pj.writeData(data,filename)
def readData(filename):
return pj.readData(filename)
if __name__ == "__main__":
print("Processing data...")
data,foundConfigs,maxIterations = parseAndCalcEnergy(filePrefix="sim-data-",iterations=27,cleanData=False)
dataFilename = "sim-data/sim-data-processed.json"
iter0 = extractIter(data,0)
iter1 = extractIter(data,1)
if sys.flags.interactive: # we are in an interactive shell
print("Running in interactive mode: type 'data' to see values generated by this file")
else:
print("Writing data to file...")
pj.writeData([data,foundConfigs,maxIterations],dataFilename,indent=1)
print("Note: Running this file with python -i will allow you to interact directly with the data")
| if (not(warnedIterations)):
print("Warning: additional iteration data found, skipping.")
warnedIterations = True
break # stop if we can't hold anymore data, TODO allow for dynamic number of files | conditional_block |
Terminals.js | dojo.require("dojo._base.html");
function getTerminals() {
dojo.xhrGet({
url: "Terminals.ashx",
preventCache: 1,
handleAs: "json",
error: function() { console.error('Error retrieving terminal data.'); },
load: function (responseObject, ioArgs) {
dojo.empty("TerminalLegendListDiv");
var TerminalLegendList = dojo.byId("TerminalLegendListDiv");
var term_ddlist = dojo.byId("ddlist_terminalAdmin");
var ListContents = "";
for (var i = 0; i < responseObject.LegendItemList.length; i++) {
var myLegendItem = responseObject.LegendItemList[i];
var TerminalLegendListItem = CreateLegendListDivContents(myLegendItem, "background-position:3px 1px;");
ListContents = (ListContents + TerminalLegendListItem);
}
TerminalLegendList.innerHTML = "<table border=\"0\" cellpadding=\"0\" cellspacing=\"0\">" + ListContents + "</table>";
if (term_ddlist != null) {
//clear dropdown options values because they are being added below...
removeAllOptions(term_ddlist);
addOption(term_ddlist, "For a Terminal...", "0");
for (var count = 0; count < responseObject.FeedContentList.length; count++) {
//add dropdown option values to admin tool for updating terminal cartography...
addOption(term_ddlist, responseObject.FeedContentList[count].Terminal.TerminalName, responseObject.FeedContentList[count].Terminal.TerminalID);
}
}
currentTerminalsJson = responseObject.FeedContentList;
currentTerminalsIcon = responseObject.LegendItemList[0].Icon;
//Wait until the zoom levels are completely loaded (in getVessels) before plotting the terminals
setTimeout("RebuildTerminalLayer(map.getLevel())", 500);
}
});
}
/**
* terminals are rebuild when ajax call is made or when user zooms in
*/
function | (zoomLevel) {
// clear the terminal layer or we'll get duplicate graphic objects stacked up on top of each other
terminalLayer.clear();
// set up terminal icon
var iconJSON = { "icon": currentTerminalsIcon, "h": 16, "w": 16, "xOffSet": 0, "yOffSet": 0 };
if ((currentTerminalsJson != null) && (currentTerminalsIcon != null)) {
// iterate through terminals and create markers
for (var count = 0; count < currentTerminalsJson.length; count++) {
var currentTerminal = currentTerminalsJson[count].Terminal;
var currentCamsForTerminal = currentTerminalsJson[count].FerryCameras;
var graphic = CreateMapGraphicTerm(currentTerminal, currentCamsForTerminal, iconJSON, zoomLevel);
terminalLayer.add(graphic);
}
}
}
/**
* creates terminal markers
*/
function CreateMapGraphicTerm(MapMarkerAsJSON, CamArrayAsJSON, iconJSON, zoomLevel) {
// places vessels and labels with offset from actual lat lon location - if nessecary.
var symbol = new esri.symbol.PictureMarkerSymbol(iconJSON.icon, iconJSON.w, iconJSON.h);
symbol.setOffset(iconJSON.xOffSet, iconJSON.yOffSet);
// popup content
var TemplateJSON = CreateInfoTemplateJSONTerm(MapMarkerAsJSON, CamArrayAsJSON);
var info = new esri.InfoTemplate(TemplateJSON);
// point location
var point = null;
// attempt to find a customized lat long for the given zoom level
for (var loccount = 0; loccount < MapMarkerAsJSON.GISZoomLocations.length; loccount++) {
var zoomloc = MapMarkerAsJSON.GISZoomLocations[loccount];
if (zoomloc.Zm == zoomLevel) point = new esri.geometry.Point(zoomloc.Lon, zoomloc.Lat, mySpatialReference);
}
// if no custom lat long for the given zoom level is found, use default values
if (point == null) point = new esri.geometry.Point(MapMarkerAsJSON.Longitude, MapMarkerAsJSON.Latitude, mySpatialReference);
point = esri.geometry.geographicToWebMercator(point);
// set TerminalID into an attribute. I set this value into a global "selectedTerminal" var if the infoWindow is opened - which allows me to keep the infoWindow open upon AJAX refresh.
var attribute = { "TerminalID": MapMarkerAsJSON.TerminalID, "TerminalName": MapMarkerAsJSON.TerminalName };
var graphic = new esri.Graphic(point, symbol, attribute, info);
MoveAndUpdateInfoTemplateTerm(MapMarkerAsJSON, CamArrayAsJSON, graphic, TemplateJSON);
return graphic;
}
function MoveAndUpdateInfoTemplateTerm(MapMarkerAsJSON, CamArrayAsJSON, graphic, TemplateJSON) {
if (selectedTerminal == MapMarkerAsJSON.TerminalID) {
map.infoWindow.coords = map.toScreen(graphic.geometry);
map.infoWindow.move(map.toScreen(graphic.geometry));
map.infoWindow.setContent(TemplateJSON.content);
map.infoWindow.setTitle(TemplateJSON.title);
if (selectedTerminalTab == 1) {
//Wait times - not the default
var waittimelinkid = "waittimelink" + MapMarkerAsJSON.TerminalID;
InfoLinkClickedTerm(waittimelinkid, MapMarkerAsJSON.TerminalID);
}
}
}
/**
* builds out the html for the template Title and Contents - when you click a vessel on the map, this displays in the window that pops open.
*/
function CreateInfoTemplateJSONTerm(MapMarkerAsJSON, CamArrayAsJSON) {
var InfoTemplateContents = CreateInfoTemplateContentsTerm(MapMarkerAsJSON, CamArrayAsJSON);
var TitleName = MapMarkerAsJSON.TerminalName;
var InfoTemplateTitle = ("<div id='termPopTitle' class='popTitle'>" + TitleName + "</div>");
var TemplateJSON = { "title": InfoTemplateTitle, "content": InfoTemplateContents };
return TemplateJSON;
}
function InfoLinkClickedTerm(linkid, terminalid) {
// called when the "Cameras" or "Wait Time" links are clicked in the info window popup
var camlink = 'camlink' + terminalid.toString();
var camlayer = 'camlayer' + terminalid.toString();
var waittimelink = 'waittimelink' + terminalid.toString();
var waittimelayer = 'waittimelayer' + terminalid.toString();
if ((dojo.byId(camlink) != null) && (dojo.byId(camlayer) != null) && (dojo.byId(waittimelink) != null) && (dojo.byId(waittimelayer) != null)) {
if (linkid == 'camlink' + terminalid.toString()) {
// the cam link was clicked
dojo.removeClass(camlink, 'infolink');
dojo.addClass(camlink, 'infolinkactive');
dojo.removeClass(camlayer, 'infolayer');
dojo.addClass(camlayer, 'infolayeractive');
dojo.removeClass(waittimelink, 'infolinkactive');
dojo.addClass(waittimelink, 'infolink');
dojo.removeClass(waittimelayer, 'infolayeractive');
dojo.addClass(waittimelayer, 'infolayer');
selectedTerminalTab = 0;
} else {
// the wait time link was clicked
dojo.removeClass(camlink, 'infolinkactive');
dojo.addClass(camlink, 'infolink');
dojo.removeClass(camlayer, 'infolayeractive');
dojo.addClass(camlayer, 'infolayer');
dojo.removeClass(waittimelink, 'infolink');
dojo.addClass(waittimelink, 'infolinkactive');
dojo.removeClass(waittimelayer, 'infolayer');
dojo.addClass(waittimelayer, 'infolayeractive');
selectedTerminalTab = 1;
}
}
}
/**
* builds out the html for the popup terminal content
*/
function CreateInfoTemplateContentsTerm(MapMarkerAsJSON, CamArrayAsJSON) {
InfoTemplateContents = "<div id='termPopContainer'>";
var waittimelinkclass = 'infolinkactive';
var waittimelayerclass = 'infolayeractive';
if ((CamArrayAsJSON != null) && (CamArrayAsJSON.length > 0)) {
waittimelinkclass = 'infolink';
waittimelayerclass = 'infolayer';
InfoTemplateContents += "<a onclick='return InfoLinkClickedTerm(this.id, " + MapMarkerAsJSON.TerminalID.toString() + ");' id='camlink" + MapMarkerAsJSON.TerminalID.toString() + "' class='infolinkactive'>Cameras</a> | ";
}
if ((MapMarkerAsJSON.WaitTimes != null) && (MapMarkerAsJSON.WaitTimes.length > 0))
InfoTemplateContents += "<a onclick='return InfoLinkClickedTerm(this.id, " + MapMarkerAsJSON.TerminalID.toString() + ");' id='waittimelink" + MapMarkerAsJSON.TerminalID.toString() + "' class='" + waittimelinkclass + "'>Wait Time</a> | ";
InfoTemplateContents += "<a class='infolink' href='./TerminalDetail.aspx?terminalid=" + MapMarkerAsJSON.TerminalID + "'>More »</a>";
InfoTemplateContents += "<br /><br />";
if ((CamArrayAsJSON != null) && (CamArrayAsJSON.length > 0)) {
InfoTemplateContents += "<div id='camlayer" + MapMarkerAsJSON.TerminalID.toString() + "' class='infolayeractive'>";
// iterate through and show cams for each terminal in the popup
for (var count = 0; count < CamArrayAsJSON.length; count++) {
var currentCam = CamArrayAsJSON[count];
if (count > 0)
InfoTemplateContents += "<br /><br />";
if ((currentCam.CamOwner != "") && (currentCam.CamOwner != null) && (currentCam.OwnerURL != "") && (currentCam.OwnerURL != null))
InfoTemplateContents += "<strong>" + currentCam.Title + " (<a href='" + currentCam.OwnerURL + "' target='_blank'>" + currentCam.CamOwner + "</a>)</strong><a href='" + currentCam.OwnerURL + "' target='_blank'><img width='200' style='border: 1px solid #787878;' alt='" + currentCam.Title + "' src='" + currentCam.ImgURL + "' /></a>";
else
InfoTemplateContents += "<strong>" + currentCam.Title + "</strong>" +
"<a href='./TerminalDetail.aspx?terminalid=" + MapMarkerAsJSON.TerminalID + "#cam" + currentCam.CamID + "'><img style='border: 1px solid #787878;' src='" + currentCam.ImgURL + "?" + new Date().getTime() + "' width='200' /></a>";
}
InfoTemplateContents += "</div>";
}
// add wait time info and a link for more info
if ((MapMarkerAsJSON.WaitTimes != null) && (MapMarkerAsJSON.WaitTimes.length > 0)) {
InfoTemplateContents += "<div id='waittimelayer" + MapMarkerAsJSON.TerminalID.toString() + "' class='" + waittimelayerclass + "'>";
for (var waitcount = 0; waitcount < MapMarkerAsJSON.WaitTimes.length; waitcount++) {
if (waitcount > 0) InfoTemplateContents += "<br /><br />";
var waititem = MapMarkerAsJSON.WaitTimes[waitcount];
if ((waititem.RouteName != null) && (waititem.RouteName != '')) InfoTemplateContents += "<strong>" + waititem.RouteName + "</strong> - ";
InfoTemplateContents += StripHtml(waititem.WaitTimeNotes);
if ((waititem.WaitTimeLastUpdated != null) && (waititem.WaitTimeLastUpdated != '')) InfoTemplateContents += "<br />[Last Updated: " + wsfFormatDate(waititem.WaitTimeLastUpdated) + "]";
}
InfoTemplateContents += "</div>";
}
InfoTemplateContents += "</div>";
return InfoTemplateContents;
} | RebuildTerminalLayer | identifier_name |
Terminals.js | dojo.require("dojo._base.html");
function getTerminals() {
dojo.xhrGet({
url: "Terminals.ashx",
preventCache: 1,
handleAs: "json",
error: function() { console.error('Error retrieving terminal data.'); },
load: function (responseObject, ioArgs) {
dojo.empty("TerminalLegendListDiv");
var TerminalLegendList = dojo.byId("TerminalLegendListDiv");
var term_ddlist = dojo.byId("ddlist_terminalAdmin");
var ListContents = "";
for (var i = 0; i < responseObject.LegendItemList.length; i++) {
var myLegendItem = responseObject.LegendItemList[i];
var TerminalLegendListItem = CreateLegendListDivContents(myLegendItem, "background-position:3px 1px;");
ListContents = (ListContents + TerminalLegendListItem);
}
TerminalLegendList.innerHTML = "<table border=\"0\" cellpadding=\"0\" cellspacing=\"0\">" + ListContents + "</table>";
if (term_ddlist != null) {
//clear dropdown options values because they are being added below...
removeAllOptions(term_ddlist);
addOption(term_ddlist, "For a Terminal...", "0");
for (var count = 0; count < responseObject.FeedContentList.length; count++) {
//add dropdown option values to admin tool for updating terminal cartography...
addOption(term_ddlist, responseObject.FeedContentList[count].Terminal.TerminalName, responseObject.FeedContentList[count].Terminal.TerminalID);
}
}
currentTerminalsJson = responseObject.FeedContentList;
currentTerminalsIcon = responseObject.LegendItemList[0].Icon;
//Wait until the zoom levels are completely loaded (in getVessels) before plotting the terminals
setTimeout("RebuildTerminalLayer(map.getLevel())", 500);
}
});
}
/**
* terminals are rebuild when ajax call is made or when user zooms in
*/
function RebuildTerminalLayer(zoomLevel) {
// clear the terminal layer or we'll get duplicate graphic objects stacked up on top of each other
terminalLayer.clear();
// set up terminal icon
var iconJSON = { "icon": currentTerminalsIcon, "h": 16, "w": 16, "xOffSet": 0, "yOffSet": 0 };
if ((currentTerminalsJson != null) && (currentTerminalsIcon != null)) {
// iterate through terminals and create markers
for (var count = 0; count < currentTerminalsJson.length; count++) {
var currentTerminal = currentTerminalsJson[count].Terminal;
var currentCamsForTerminal = currentTerminalsJson[count].FerryCameras;
var graphic = CreateMapGraphicTerm(currentTerminal, currentCamsForTerminal, iconJSON, zoomLevel);
terminalLayer.add(graphic);
}
}
}
/**
* creates terminal markers
*/
function CreateMapGraphicTerm(MapMarkerAsJSON, CamArrayAsJSON, iconJSON, zoomLevel) {
// places vessels and labels with offset from actual lat lon location - if nessecary.
var symbol = new esri.symbol.PictureMarkerSymbol(iconJSON.icon, iconJSON.w, iconJSON.h);
symbol.setOffset(iconJSON.xOffSet, iconJSON.yOffSet);
// popup content
var TemplateJSON = CreateInfoTemplateJSONTerm(MapMarkerAsJSON, CamArrayAsJSON);
var info = new esri.InfoTemplate(TemplateJSON);
// point location
var point = null;
// attempt to find a customized lat long for the given zoom level
for (var loccount = 0; loccount < MapMarkerAsJSON.GISZoomLocations.length; loccount++) {
var zoomloc = MapMarkerAsJSON.GISZoomLocations[loccount];
if (zoomloc.Zm == zoomLevel) point = new esri.geometry.Point(zoomloc.Lon, zoomloc.Lat, mySpatialReference);
}
// if no custom lat long for the given zoom level is found, use default values
if (point == null) point = new esri.geometry.Point(MapMarkerAsJSON.Longitude, MapMarkerAsJSON.Latitude, mySpatialReference);
point = esri.geometry.geographicToWebMercator(point);
// set TerminalID into an attribute. I set this value into a global "selectedTerminal" var if the infoWindow is opened - which allows me to keep the infoWindow open upon AJAX refresh.
var attribute = { "TerminalID": MapMarkerAsJSON.TerminalID, "TerminalName": MapMarkerAsJSON.TerminalName };
var graphic = new esri.Graphic(point, symbol, attribute, info);
MoveAndUpdateInfoTemplateTerm(MapMarkerAsJSON, CamArrayAsJSON, graphic, TemplateJSON);
return graphic;
}
function MoveAndUpdateInfoTemplateTerm(MapMarkerAsJSON, CamArrayAsJSON, graphic, TemplateJSON) {
if (selectedTerminal == MapMarkerAsJSON.TerminalID) {
map.infoWindow.coords = map.toScreen(graphic.geometry);
map.infoWindow.move(map.toScreen(graphic.geometry));
map.infoWindow.setContent(TemplateJSON.content);
map.infoWindow.setTitle(TemplateJSON.title);
if (selectedTerminalTab == 1) {
//Wait times - not the default
var waittimelinkid = "waittimelink" + MapMarkerAsJSON.TerminalID;
InfoLinkClickedTerm(waittimelinkid, MapMarkerAsJSON.TerminalID);
}
}
}
/**
* builds out the html for the template Title and Contents - when you click a vessel on the map, this displays in the window that pops open.
*/
function CreateInfoTemplateJSONTerm(MapMarkerAsJSON, CamArrayAsJSON) {
var InfoTemplateContents = CreateInfoTemplateContentsTerm(MapMarkerAsJSON, CamArrayAsJSON);
var TitleName = MapMarkerAsJSON.TerminalName;
var InfoTemplateTitle = ("<div id='termPopTitle' class='popTitle'>" + TitleName + "</div>");
var TemplateJSON = { "title": InfoTemplateTitle, "content": InfoTemplateContents };
return TemplateJSON;
}
function InfoLinkClickedTerm(linkid, terminalid) {
// called when the "Cameras" or "Wait Time" links are clicked in the info window popup
var camlink = 'camlink' + terminalid.toString();
var camlayer = 'camlayer' + terminalid.toString();
var waittimelink = 'waittimelink' + terminalid.toString();
var waittimelayer = 'waittimelayer' + terminalid.toString();
if ((dojo.byId(camlink) != null) && (dojo.byId(camlayer) != null) && (dojo.byId(waittimelink) != null) && (dojo.byId(waittimelayer) != null)) {
if (linkid == 'camlink' + terminalid.toString()) {
// the cam link was clicked
dojo.removeClass(camlink, 'infolink');
dojo.addClass(camlink, 'infolinkactive');
dojo.removeClass(camlayer, 'infolayer');
dojo.addClass(camlayer, 'infolayeractive');
dojo.removeClass(waittimelink, 'infolinkactive');
dojo.addClass(waittimelink, 'infolink');
dojo.removeClass(waittimelayer, 'infolayeractive');
dojo.addClass(waittimelayer, 'infolayer');
selectedTerminalTab = 0;
} else {
// the wait time link was clicked
dojo.removeClass(camlink, 'infolinkactive');
dojo.addClass(camlink, 'infolink');
dojo.removeClass(camlayer, 'infolayeractive');
dojo.addClass(camlayer, 'infolayer');
dojo.removeClass(waittimelink, 'infolink');
dojo.addClass(waittimelink, 'infolinkactive');
dojo.removeClass(waittimelayer, 'infolayer');
dojo.addClass(waittimelayer, 'infolayeractive');
selectedTerminalTab = 1;
}
}
}
/**
* builds out the html for the popup terminal content
*/
function CreateInfoTemplateContentsTerm(MapMarkerAsJSON, CamArrayAsJSON) {
InfoTemplateContents = "<div id='termPopContainer'>";
var waittimelinkclass = 'infolinkactive';
var waittimelayerclass = 'infolayeractive';
if ((CamArrayAsJSON != null) && (CamArrayAsJSON.length > 0)) {
waittimelinkclass = 'infolink';
waittimelayerclass = 'infolayer';
InfoTemplateContents += "<a onclick='return InfoLinkClickedTerm(this.id, " + MapMarkerAsJSON.TerminalID.toString() + ");' id='camlink" + MapMarkerAsJSON.TerminalID.toString() + "' class='infolinkactive'>Cameras</a> | ";
}
if ((MapMarkerAsJSON.WaitTimes != null) && (MapMarkerAsJSON.WaitTimes.length > 0))
InfoTemplateContents += "<a onclick='return InfoLinkClickedTerm(this.id, " + MapMarkerAsJSON.TerminalID.toString() + ");' id='waittimelink" + MapMarkerAsJSON.TerminalID.toString() + "' class='" + waittimelinkclass + "'>Wait Time</a> | ";
InfoTemplateContents += "<a class='infolink' href='./TerminalDetail.aspx?terminalid=" + MapMarkerAsJSON.TerminalID + "'>More »</a>";
InfoTemplateContents += "<br /><br />";
if ((CamArrayAsJSON != null) && (CamArrayAsJSON.length > 0)) |
// add wait time info and a link for more info
if ((MapMarkerAsJSON.WaitTimes != null) && (MapMarkerAsJSON.WaitTimes.length > 0)) {
InfoTemplateContents += "<div id='waittimelayer" + MapMarkerAsJSON.TerminalID.toString() + "' class='" + waittimelayerclass + "'>";
for (var waitcount = 0; waitcount < MapMarkerAsJSON.WaitTimes.length; waitcount++) {
if (waitcount > 0) InfoTemplateContents += "<br /><br />";
var waititem = MapMarkerAsJSON.WaitTimes[waitcount];
if ((waititem.RouteName != null) && (waititem.RouteName != '')) InfoTemplateContents += "<strong>" + waititem.RouteName + "</strong> - ";
InfoTemplateContents += StripHtml(waititem.WaitTimeNotes);
if ((waititem.WaitTimeLastUpdated != null) && (waititem.WaitTimeLastUpdated != '')) InfoTemplateContents += "<br />[Last Updated: " + wsfFormatDate(waititem.WaitTimeLastUpdated) + "]";
}
InfoTemplateContents += "</div>";
}
InfoTemplateContents += "</div>";
return InfoTemplateContents;
} | {
InfoTemplateContents += "<div id='camlayer" + MapMarkerAsJSON.TerminalID.toString() + "' class='infolayeractive'>";
// iterate through and show cams for each terminal in the popup
for (var count = 0; count < CamArrayAsJSON.length; count++) {
var currentCam = CamArrayAsJSON[count];
if (count > 0)
InfoTemplateContents += "<br /><br />";
if ((currentCam.CamOwner != "") && (currentCam.CamOwner != null) && (currentCam.OwnerURL != "") && (currentCam.OwnerURL != null))
InfoTemplateContents += "<strong>" + currentCam.Title + " (<a href='" + currentCam.OwnerURL + "' target='_blank'>" + currentCam.CamOwner + "</a>)</strong><a href='" + currentCam.OwnerURL + "' target='_blank'><img width='200' style='border: 1px solid #787878;' alt='" + currentCam.Title + "' src='" + currentCam.ImgURL + "' /></a>";
else
InfoTemplateContents += "<strong>" + currentCam.Title + "</strong>" +
"<a href='./TerminalDetail.aspx?terminalid=" + MapMarkerAsJSON.TerminalID + "#cam" + currentCam.CamID + "'><img style='border: 1px solid #787878;' src='" + currentCam.ImgURL + "?" + new Date().getTime() + "' width='200' /></a>";
}
InfoTemplateContents += "</div>";
} | conditional_block |
Terminals.js | dojo.require("dojo._base.html");
function getTerminals() |
/**
* terminals are rebuild when ajax call is made or when user zooms in
*/
function RebuildTerminalLayer(zoomLevel) {
// clear the terminal layer or we'll get duplicate graphic objects stacked up on top of each other
terminalLayer.clear();
// set up terminal icon
var iconJSON = { "icon": currentTerminalsIcon, "h": 16, "w": 16, "xOffSet": 0, "yOffSet": 0 };
if ((currentTerminalsJson != null) && (currentTerminalsIcon != null)) {
// iterate through terminals and create markers
for (var count = 0; count < currentTerminalsJson.length; count++) {
var currentTerminal = currentTerminalsJson[count].Terminal;
var currentCamsForTerminal = currentTerminalsJson[count].FerryCameras;
var graphic = CreateMapGraphicTerm(currentTerminal, currentCamsForTerminal, iconJSON, zoomLevel);
terminalLayer.add(graphic);
}
}
}
/**
* creates terminal markers
*/
function CreateMapGraphicTerm(MapMarkerAsJSON, CamArrayAsJSON, iconJSON, zoomLevel) {
// places vessels and labels with offset from actual lat lon location - if nessecary.
var symbol = new esri.symbol.PictureMarkerSymbol(iconJSON.icon, iconJSON.w, iconJSON.h);
symbol.setOffset(iconJSON.xOffSet, iconJSON.yOffSet);
// popup content
var TemplateJSON = CreateInfoTemplateJSONTerm(MapMarkerAsJSON, CamArrayAsJSON);
var info = new esri.InfoTemplate(TemplateJSON);
// point location
var point = null;
// attempt to find a customized lat long for the given zoom level
for (var loccount = 0; loccount < MapMarkerAsJSON.GISZoomLocations.length; loccount++) {
var zoomloc = MapMarkerAsJSON.GISZoomLocations[loccount];
if (zoomloc.Zm == zoomLevel) point = new esri.geometry.Point(zoomloc.Lon, zoomloc.Lat, mySpatialReference);
}
// if no custom lat long for the given zoom level is found, use default values
if (point == null) point = new esri.geometry.Point(MapMarkerAsJSON.Longitude, MapMarkerAsJSON.Latitude, mySpatialReference);
point = esri.geometry.geographicToWebMercator(point);
// set TerminalID into an attribute. I set this value into a global "selectedTerminal" var if the infoWindow is opened - which allows me to keep the infoWindow open upon AJAX refresh.
var attribute = { "TerminalID": MapMarkerAsJSON.TerminalID, "TerminalName": MapMarkerAsJSON.TerminalName };
var graphic = new esri.Graphic(point, symbol, attribute, info);
MoveAndUpdateInfoTemplateTerm(MapMarkerAsJSON, CamArrayAsJSON, graphic, TemplateJSON);
return graphic;
}
function MoveAndUpdateInfoTemplateTerm(MapMarkerAsJSON, CamArrayAsJSON, graphic, TemplateJSON) {
if (selectedTerminal == MapMarkerAsJSON.TerminalID) {
map.infoWindow.coords = map.toScreen(graphic.geometry);
map.infoWindow.move(map.toScreen(graphic.geometry));
map.infoWindow.setContent(TemplateJSON.content);
map.infoWindow.setTitle(TemplateJSON.title);
if (selectedTerminalTab == 1) {
//Wait times - not the default
var waittimelinkid = "waittimelink" + MapMarkerAsJSON.TerminalID;
InfoLinkClickedTerm(waittimelinkid, MapMarkerAsJSON.TerminalID);
}
}
}
/**
* builds out the html for the template Title and Contents - when you click a vessel on the map, this displays in the window that pops open.
*/
function CreateInfoTemplateJSONTerm(MapMarkerAsJSON, CamArrayAsJSON) {
var InfoTemplateContents = CreateInfoTemplateContentsTerm(MapMarkerAsJSON, CamArrayAsJSON);
var TitleName = MapMarkerAsJSON.TerminalName;
var InfoTemplateTitle = ("<div id='termPopTitle' class='popTitle'>" + TitleName + "</div>");
var TemplateJSON = { "title": InfoTemplateTitle, "content": InfoTemplateContents };
return TemplateJSON;
}
function InfoLinkClickedTerm(linkid, terminalid) {
// called when the "Cameras" or "Wait Time" links are clicked in the info window popup
var camlink = 'camlink' + terminalid.toString();
var camlayer = 'camlayer' + terminalid.toString();
var waittimelink = 'waittimelink' + terminalid.toString();
var waittimelayer = 'waittimelayer' + terminalid.toString();
if ((dojo.byId(camlink) != null) && (dojo.byId(camlayer) != null) && (dojo.byId(waittimelink) != null) && (dojo.byId(waittimelayer) != null)) {
if (linkid == 'camlink' + terminalid.toString()) {
// the cam link was clicked
dojo.removeClass(camlink, 'infolink');
dojo.addClass(camlink, 'infolinkactive');
dojo.removeClass(camlayer, 'infolayer');
dojo.addClass(camlayer, 'infolayeractive');
dojo.removeClass(waittimelink, 'infolinkactive');
dojo.addClass(waittimelink, 'infolink');
dojo.removeClass(waittimelayer, 'infolayeractive');
dojo.addClass(waittimelayer, 'infolayer');
selectedTerminalTab = 0;
} else {
// the wait time link was clicked
dojo.removeClass(camlink, 'infolinkactive');
dojo.addClass(camlink, 'infolink');
dojo.removeClass(camlayer, 'infolayeractive');
dojo.addClass(camlayer, 'infolayer');
dojo.removeClass(waittimelink, 'infolink');
dojo.addClass(waittimelink, 'infolinkactive');
dojo.removeClass(waittimelayer, 'infolayer');
dojo.addClass(waittimelayer, 'infolayeractive');
selectedTerminalTab = 1;
}
}
}
/**
* builds out the html for the popup terminal content
*/
function CreateInfoTemplateContentsTerm(MapMarkerAsJSON, CamArrayAsJSON) {
InfoTemplateContents = "<div id='termPopContainer'>";
var waittimelinkclass = 'infolinkactive';
var waittimelayerclass = 'infolayeractive';
if ((CamArrayAsJSON != null) && (CamArrayAsJSON.length > 0)) {
waittimelinkclass = 'infolink';
waittimelayerclass = 'infolayer';
InfoTemplateContents += "<a onclick='return InfoLinkClickedTerm(this.id, " + MapMarkerAsJSON.TerminalID.toString() + ");' id='camlink" + MapMarkerAsJSON.TerminalID.toString() + "' class='infolinkactive'>Cameras</a> | ";
}
if ((MapMarkerAsJSON.WaitTimes != null) && (MapMarkerAsJSON.WaitTimes.length > 0))
InfoTemplateContents += "<a onclick='return InfoLinkClickedTerm(this.id, " + MapMarkerAsJSON.TerminalID.toString() + ");' id='waittimelink" + MapMarkerAsJSON.TerminalID.toString() + "' class='" + waittimelinkclass + "'>Wait Time</a> | ";
InfoTemplateContents += "<a class='infolink' href='./TerminalDetail.aspx?terminalid=" + MapMarkerAsJSON.TerminalID + "'>More »</a>";
InfoTemplateContents += "<br /><br />";
if ((CamArrayAsJSON != null) && (CamArrayAsJSON.length > 0)) {
InfoTemplateContents += "<div id='camlayer" + MapMarkerAsJSON.TerminalID.toString() + "' class='infolayeractive'>";
// iterate through and show cams for each terminal in the popup
for (var count = 0; count < CamArrayAsJSON.length; count++) {
var currentCam = CamArrayAsJSON[count];
if (count > 0)
InfoTemplateContents += "<br /><br />";
if ((currentCam.CamOwner != "") && (currentCam.CamOwner != null) && (currentCam.OwnerURL != "") && (currentCam.OwnerURL != null))
InfoTemplateContents += "<strong>" + currentCam.Title + " (<a href='" + currentCam.OwnerURL + "' target='_blank'>" + currentCam.CamOwner + "</a>)</strong><a href='" + currentCam.OwnerURL + "' target='_blank'><img width='200' style='border: 1px solid #787878;' alt='" + currentCam.Title + "' src='" + currentCam.ImgURL + "' /></a>";
else
InfoTemplateContents += "<strong>" + currentCam.Title + "</strong>" +
"<a href='./TerminalDetail.aspx?terminalid=" + MapMarkerAsJSON.TerminalID + "#cam" + currentCam.CamID + "'><img style='border: 1px solid #787878;' src='" + currentCam.ImgURL + "?" + new Date().getTime() + "' width='200' /></a>";
}
InfoTemplateContents += "</div>";
}
// add wait time info and a link for more info
if ((MapMarkerAsJSON.WaitTimes != null) && (MapMarkerAsJSON.WaitTimes.length > 0)) {
InfoTemplateContents += "<div id='waittimelayer" + MapMarkerAsJSON.TerminalID.toString() + "' class='" + waittimelayerclass + "'>";
for (var waitcount = 0; waitcount < MapMarkerAsJSON.WaitTimes.length; waitcount++) {
if (waitcount > 0) InfoTemplateContents += "<br /><br />";
var waititem = MapMarkerAsJSON.WaitTimes[waitcount];
if ((waititem.RouteName != null) && (waititem.RouteName != '')) InfoTemplateContents += "<strong>" + waititem.RouteName + "</strong> - ";
InfoTemplateContents += StripHtml(waititem.WaitTimeNotes);
if ((waititem.WaitTimeLastUpdated != null) && (waititem.WaitTimeLastUpdated != '')) InfoTemplateContents += "<br />[Last Updated: " + wsfFormatDate(waititem.WaitTimeLastUpdated) + "]";
}
InfoTemplateContents += "</div>";
}
InfoTemplateContents += "</div>";
return InfoTemplateContents;
} | {
dojo.xhrGet({
url: "Terminals.ashx",
preventCache: 1,
handleAs: "json",
error: function() { console.error('Error retrieving terminal data.'); },
load: function (responseObject, ioArgs) {
dojo.empty("TerminalLegendListDiv");
var TerminalLegendList = dojo.byId("TerminalLegendListDiv");
var term_ddlist = dojo.byId("ddlist_terminalAdmin");
var ListContents = "";
for (var i = 0; i < responseObject.LegendItemList.length; i++) {
var myLegendItem = responseObject.LegendItemList[i];
var TerminalLegendListItem = CreateLegendListDivContents(myLegendItem, "background-position:3px 1px;");
ListContents = (ListContents + TerminalLegendListItem);
}
TerminalLegendList.innerHTML = "<table border=\"0\" cellpadding=\"0\" cellspacing=\"0\">" + ListContents + "</table>";
if (term_ddlist != null) {
//clear dropdown options values because they are being added below...
removeAllOptions(term_ddlist);
addOption(term_ddlist, "For a Terminal...", "0");
for (var count = 0; count < responseObject.FeedContentList.length; count++) {
//add dropdown option values to admin tool for updating terminal cartography...
addOption(term_ddlist, responseObject.FeedContentList[count].Terminal.TerminalName, responseObject.FeedContentList[count].Terminal.TerminalID);
}
}
currentTerminalsJson = responseObject.FeedContentList;
currentTerminalsIcon = responseObject.LegendItemList[0].Icon;
//Wait until the zoom levels are completely loaded (in getVessels) before plotting the terminals
setTimeout("RebuildTerminalLayer(map.getLevel())", 500);
}
});
} | identifier_body |
Terminals.js | dojo.require("dojo._base.html");
function getTerminals() {
dojo.xhrGet({
url: "Terminals.ashx",
preventCache: 1,
handleAs: "json",
error: function() { console.error('Error retrieving terminal data.'); },
load: function (responseObject, ioArgs) {
dojo.empty("TerminalLegendListDiv");
var TerminalLegendList = dojo.byId("TerminalLegendListDiv");
var term_ddlist = dojo.byId("ddlist_terminalAdmin");
var ListContents = "";
for (var i = 0; i < responseObject.LegendItemList.length; i++) {
var myLegendItem = responseObject.LegendItemList[i];
var TerminalLegendListItem = CreateLegendListDivContents(myLegendItem, "background-position:3px 1px;");
ListContents = (ListContents + TerminalLegendListItem);
}
TerminalLegendList.innerHTML = "<table border=\"0\" cellpadding=\"0\" cellspacing=\"0\">" + ListContents + "</table>";
if (term_ddlist != null) {
//clear dropdown options values because they are being added below...
removeAllOptions(term_ddlist);
addOption(term_ddlist, "For a Terminal...", "0");
for (var count = 0; count < responseObject.FeedContentList.length; count++) {
//add dropdown option values to admin tool for updating terminal cartography...
addOption(term_ddlist, responseObject.FeedContentList[count].Terminal.TerminalName, responseObject.FeedContentList[count].Terminal.TerminalID);
}
}
currentTerminalsJson = responseObject.FeedContentList;
currentTerminalsIcon = responseObject.LegendItemList[0].Icon;
//Wait until the zoom levels are completely loaded (in getVessels) before plotting the terminals
setTimeout("RebuildTerminalLayer(map.getLevel())", 500);
}
});
}
/**
* terminals are rebuild when ajax call is made or when user zooms in
*/
function RebuildTerminalLayer(zoomLevel) {
// clear the terminal layer or we'll get duplicate graphic objects stacked up on top of each other
terminalLayer.clear();
// set up terminal icon
var iconJSON = { "icon": currentTerminalsIcon, "h": 16, "w": 16, "xOffSet": 0, "yOffSet": 0 };
if ((currentTerminalsJson != null) && (currentTerminalsIcon != null)) {
// iterate through terminals and create markers
for (var count = 0; count < currentTerminalsJson.length; count++) {
var currentTerminal = currentTerminalsJson[count].Terminal;
var currentCamsForTerminal = currentTerminalsJson[count].FerryCameras;
var graphic = CreateMapGraphicTerm(currentTerminal, currentCamsForTerminal, iconJSON, zoomLevel);
terminalLayer.add(graphic);
}
}
}
/**
* creates terminal markers
*/
function CreateMapGraphicTerm(MapMarkerAsJSON, CamArrayAsJSON, iconJSON, zoomLevel) {
// places vessels and labels with offset from actual lat lon location - if nessecary.
var symbol = new esri.symbol.PictureMarkerSymbol(iconJSON.icon, iconJSON.w, iconJSON.h);
symbol.setOffset(iconJSON.xOffSet, iconJSON.yOffSet);
// popup content
var TemplateJSON = CreateInfoTemplateJSONTerm(MapMarkerAsJSON, CamArrayAsJSON);
var info = new esri.InfoTemplate(TemplateJSON);
// point location
var point = null;
// attempt to find a customized lat long for the given zoom level
for (var loccount = 0; loccount < MapMarkerAsJSON.GISZoomLocations.length; loccount++) {
var zoomloc = MapMarkerAsJSON.GISZoomLocations[loccount];
if (zoomloc.Zm == zoomLevel) point = new esri.geometry.Point(zoomloc.Lon, zoomloc.Lat, mySpatialReference);
}
// if no custom lat long for the given zoom level is found, use default values
if (point == null) point = new esri.geometry.Point(MapMarkerAsJSON.Longitude, MapMarkerAsJSON.Latitude, mySpatialReference);
point = esri.geometry.geographicToWebMercator(point);
// set TerminalID into an attribute. I set this value into a global "selectedTerminal" var if the infoWindow is opened - which allows me to keep the infoWindow open upon AJAX refresh.
var attribute = { "TerminalID": MapMarkerAsJSON.TerminalID, "TerminalName": MapMarkerAsJSON.TerminalName };
var graphic = new esri.Graphic(point, symbol, attribute, info);
MoveAndUpdateInfoTemplateTerm(MapMarkerAsJSON, CamArrayAsJSON, graphic, TemplateJSON);
return graphic;
}
function MoveAndUpdateInfoTemplateTerm(MapMarkerAsJSON, CamArrayAsJSON, graphic, TemplateJSON) {
if (selectedTerminal == MapMarkerAsJSON.TerminalID) {
map.infoWindow.coords = map.toScreen(graphic.geometry);
map.infoWindow.move(map.toScreen(graphic.geometry));
map.infoWindow.setContent(TemplateJSON.content);
map.infoWindow.setTitle(TemplateJSON.title);
if (selectedTerminalTab == 1) {
//Wait times - not the default
var waittimelinkid = "waittimelink" + MapMarkerAsJSON.TerminalID; |
/**
* builds out the html for the template Title and Contents - when you click a vessel on the map, this displays in the window that pops open.
*/
function CreateInfoTemplateJSONTerm(MapMarkerAsJSON, CamArrayAsJSON) {
var InfoTemplateContents = CreateInfoTemplateContentsTerm(MapMarkerAsJSON, CamArrayAsJSON);
var TitleName = MapMarkerAsJSON.TerminalName;
var InfoTemplateTitle = ("<div id='termPopTitle' class='popTitle'>" + TitleName + "</div>");
var TemplateJSON = { "title": InfoTemplateTitle, "content": InfoTemplateContents };
return TemplateJSON;
}
function InfoLinkClickedTerm(linkid, terminalid) {
// called when the "Cameras" or "Wait Time" links are clicked in the info window popup
var camlink = 'camlink' + terminalid.toString();
var camlayer = 'camlayer' + terminalid.toString();
var waittimelink = 'waittimelink' + terminalid.toString();
var waittimelayer = 'waittimelayer' + terminalid.toString();
if ((dojo.byId(camlink) != null) && (dojo.byId(camlayer) != null) && (dojo.byId(waittimelink) != null) && (dojo.byId(waittimelayer) != null)) {
if (linkid == 'camlink' + terminalid.toString()) {
// the cam link was clicked
dojo.removeClass(camlink, 'infolink');
dojo.addClass(camlink, 'infolinkactive');
dojo.removeClass(camlayer, 'infolayer');
dojo.addClass(camlayer, 'infolayeractive');
dojo.removeClass(waittimelink, 'infolinkactive');
dojo.addClass(waittimelink, 'infolink');
dojo.removeClass(waittimelayer, 'infolayeractive');
dojo.addClass(waittimelayer, 'infolayer');
selectedTerminalTab = 0;
} else {
// the wait time link was clicked
dojo.removeClass(camlink, 'infolinkactive');
dojo.addClass(camlink, 'infolink');
dojo.removeClass(camlayer, 'infolayeractive');
dojo.addClass(camlayer, 'infolayer');
dojo.removeClass(waittimelink, 'infolink');
dojo.addClass(waittimelink, 'infolinkactive');
dojo.removeClass(waittimelayer, 'infolayer');
dojo.addClass(waittimelayer, 'infolayeractive');
selectedTerminalTab = 1;
}
}
}
/**
* builds out the html for the popup terminal content
*/
function CreateInfoTemplateContentsTerm(MapMarkerAsJSON, CamArrayAsJSON) {
InfoTemplateContents = "<div id='termPopContainer'>";
var waittimelinkclass = 'infolinkactive';
var waittimelayerclass = 'infolayeractive';
if ((CamArrayAsJSON != null) && (CamArrayAsJSON.length > 0)) {
waittimelinkclass = 'infolink';
waittimelayerclass = 'infolayer';
InfoTemplateContents += "<a onclick='return InfoLinkClickedTerm(this.id, " + MapMarkerAsJSON.TerminalID.toString() + ");' id='camlink" + MapMarkerAsJSON.TerminalID.toString() + "' class='infolinkactive'>Cameras</a> | ";
}
if ((MapMarkerAsJSON.WaitTimes != null) && (MapMarkerAsJSON.WaitTimes.length > 0))
InfoTemplateContents += "<a onclick='return InfoLinkClickedTerm(this.id, " + MapMarkerAsJSON.TerminalID.toString() + ");' id='waittimelink" + MapMarkerAsJSON.TerminalID.toString() + "' class='" + waittimelinkclass + "'>Wait Time</a> | ";
InfoTemplateContents += "<a class='infolink' href='./TerminalDetail.aspx?terminalid=" + MapMarkerAsJSON.TerminalID + "'>More »</a>";
InfoTemplateContents += "<br /><br />";
if ((CamArrayAsJSON != null) && (CamArrayAsJSON.length > 0)) {
InfoTemplateContents += "<div id='camlayer" + MapMarkerAsJSON.TerminalID.toString() + "' class='infolayeractive'>";
// iterate through and show cams for each terminal in the popup
for (var count = 0; count < CamArrayAsJSON.length; count++) {
var currentCam = CamArrayAsJSON[count];
if (count > 0)
InfoTemplateContents += "<br /><br />";
if ((currentCam.CamOwner != "") && (currentCam.CamOwner != null) && (currentCam.OwnerURL != "") && (currentCam.OwnerURL != null))
InfoTemplateContents += "<strong>" + currentCam.Title + " (<a href='" + currentCam.OwnerURL + "' target='_blank'>" + currentCam.CamOwner + "</a>)</strong><a href='" + currentCam.OwnerURL + "' target='_blank'><img width='200' style='border: 1px solid #787878;' alt='" + currentCam.Title + "' src='" + currentCam.ImgURL + "' /></a>";
else
InfoTemplateContents += "<strong>" + currentCam.Title + "</strong>" +
"<a href='./TerminalDetail.aspx?terminalid=" + MapMarkerAsJSON.TerminalID + "#cam" + currentCam.CamID + "'><img style='border: 1px solid #787878;' src='" + currentCam.ImgURL + "?" + new Date().getTime() + "' width='200' /></a>";
}
InfoTemplateContents += "</div>";
}
// add wait time info and a link for more info
if ((MapMarkerAsJSON.WaitTimes != null) && (MapMarkerAsJSON.WaitTimes.length > 0)) {
InfoTemplateContents += "<div id='waittimelayer" + MapMarkerAsJSON.TerminalID.toString() + "' class='" + waittimelayerclass + "'>";
for (var waitcount = 0; waitcount < MapMarkerAsJSON.WaitTimes.length; waitcount++) {
if (waitcount > 0) InfoTemplateContents += "<br /><br />";
var waititem = MapMarkerAsJSON.WaitTimes[waitcount];
if ((waititem.RouteName != null) && (waititem.RouteName != '')) InfoTemplateContents += "<strong>" + waititem.RouteName + "</strong> - ";
InfoTemplateContents += StripHtml(waititem.WaitTimeNotes);
if ((waititem.WaitTimeLastUpdated != null) && (waititem.WaitTimeLastUpdated != '')) InfoTemplateContents += "<br />[Last Updated: " + wsfFormatDate(waititem.WaitTimeLastUpdated) + "]";
}
InfoTemplateContents += "</div>";
}
InfoTemplateContents += "</div>";
return InfoTemplateContents;
} | InfoLinkClickedTerm(waittimelinkid, MapMarkerAsJSON.TerminalID);
}
}
} | random_line_split |
simulate.py | """Routines for running the scheduler in simulation mode."""
import os.path
import configparser
import logging
import numpy as np
import astropy.coordinates as coord
from astropy.time import Time
import astropy.units as u
from .TelescopeStateMachine import TelescopeStateMachine
from .Scheduler import Scheduler
from .QueueManager import GreedyQueueManager, QueueEmptyError
from .QueueManager import calc_pool_stats, calc_queue_stats
from .configuration import SchedulerConfiguration, QueueConfiguration
from .constants import BASE_DIR, P48_loc
from .utils import block_index
# check aggressively for setting with copy
import pandas as pd
pd.options.mode.chained_assignment = 'raise' # default='warn'
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logging.getLogger("transitions").setLevel(logging.WARNING)
logging.getLogger("sklearn_pandas").setLevel(logging.WARNING)
logging.getLogger("gurobipy").setLevel(logging.INFO)
logging.getLogger("ztf_sim.field_selection_functions").setLevel(logging.INFO)
def simulate(scheduler_config_file, sim_config_file,
scheduler_config_path = BASE_DIR + '../../ztf_survey_configuration/',
sim_config_path = BASE_DIR+'../config/',
output_path = BASE_DIR+'../sims/',
profile=False, raise_queue_empty=False, fallback=True,
time_limit = 30*u.second):
if profile:
try:
from pyinstrument import Profiler
except ImportError:
print('Error importing pyinstrument')
profile = False
sim_config = configparser.ConfigParser()
sim_config_file_fullpath = os.path.join(sim_config_path, sim_config_file)
sim_config.read(sim_config_file_fullpath)
# load config parameters into local variables
start_time = sim_config['simulation']['start_time']
try:
weather_year = sim_config['simulation']['weather_year']
except KeyError:
weather_year = None
if (weather_year.lower() == "none"):
|
else:
weather_year = int(weather_year)
survey_duration = \
sim_config['simulation'].getfloat('survey_duration_days') * u.day
# set up Scheduler
scheduler_config_file_fullpath = \
os.path.join(scheduler_config_path, scheduler_config_file)
scheduler = Scheduler(scheduler_config_file_fullpath,
sim_config_file_fullpath, output_path = output_path)
run_name = scheduler.scheduler_config.config['run_name']
if profile:
if survey_duration > 1. * u.day:
print("Don't profile long runs: 25% overhead")
profile = False
else:
profiler = Profiler()
profiler.start()
survey_start_time = Time(start_time, scale='utc', location=P48_loc)
tel = TelescopeStateMachine(
current_time=survey_start_time,
historical_observability_year=weather_year)
# logging. wipe out existing log.
logfile=os.path.join(output_path,f'{run_name}_log.txt')
fh = logging.FileHandler(logfile, mode='w')
fh.setLevel(logging.INFO)
logger.addHandler(fh)
# initialize to a low value so we start by assigning nightly requests
current_night_mjd = 0
while tel.current_time < (survey_start_time + survey_duration):
# check if it is a new night and reload queue with new requests
if np.floor(tel.current_time.mjd) > current_night_mjd:
# use the state machine to allow us to skip weathered out nights
#if tel.check_if_ready():
scheduler.obs_log.prev_obs = None
block_use = scheduler.find_block_use_tonight(
tel.current_time)
timed_obs_count = scheduler.count_timed_observations_tonight()
# clobber old missed_obs queue with an empty one
scheduler.add_queue('missed_obs',
GreedyQueueManager('missed_obs',
QueueConfiguration(BASE_DIR+'../sims/missed_obs.json')),
clobber=True)
scheduler.queues['default'].missed_obs_queue = scheduler.queues['missed_obs']
scheduler.queues['default'].assign_nightly_requests(
tel.current_state_dict(),
scheduler.obs_log, block_use = block_use,
timed_obs_count = timed_obs_count, time_limit = time_limit,
skymaps = None)
current_night_mjd = np.floor(tel.current_time.mjd)
# log pool stats
logger.info(calc_pool_stats(
scheduler.queues['default'].rp.pool, intro="Nightly requests initialized"))
if tel.check_if_ready():
current_state = tel.current_state_dict()
#scheduler.check_for_TOO_queue_and_switch(current_state['current_time'])
scheduler.check_for_timed_queue_and_switch(current_state['current_time'])
# get coords
try:
next_obs = scheduler.Q.next_obs(current_state,
scheduler.obs_log)
assert(next_obs['request_id'] in scheduler.Q.queue.index)
except QueueEmptyError:
if scheduler.Q.queue_name != 'default':
logger.info(f"Queue {scheduler.Q.queue_name} empty! Switching to default queue.")
scheduler.set_queue('default')
try:
next_obs = scheduler.Q.next_obs(current_state,
scheduler.obs_log)
assert(next_obs['request_id'] in scheduler.Q.queue.index)
except QueueEmptyError:
logger.info("Default queue empty! Trying missed_obs queue...")
try:
next_obs = scheduler.queues['missed_obs'].next_obs(
current_state, scheduler.obs_log)
except QueueEmptyError:
logger.info("missed_obs queue empty! Trying fallback queue...")
if fallback and 'fallback' in scheduler.queues:
next_obs = scheduler.queues['fallback'].next_obs(
current_state, scheduler.obs_log)
else:
logger.info("No fallback queue defined!")
raise QueueEmptyError
else:
logger.info("Default queue empty! Trying missed_obs queue...")
try:
next_obs = scheduler.queues['missed_obs'].next_obs(
current_state, scheduler.obs_log)
except QueueEmptyError:
if fallback and 'fallback' in scheduler.queues:
logger.info("Default queue empty! Trying fallback queue...")
next_obs = scheduler.queues['fallback'].next_obs(
current_state, scheduler.obs_log)
elif not raise_queue_empty:
logger.info("Queue empty! Waiting...")
scheduler.obs_log.prev_obs = None
tel.wait()
continue
else:
logger.info(calc_queue_stats(
scheduler.Q.queue, current_state,
intro="Queue returned no next_obs. Current queue status:"))
logger.info(calc_pool_stats(
scheduler.Q.rp.pool, intro="Current pool status:"))
raise QueueEmptyError
# try to change filters, if needed
if next_obs['target_filter_id'] != current_state['current_filter_id']:
if not tel.start_filter_change(next_obs['target_filter_id']):
logger.info("Filter change failure! Waiting...")
scheduler.obs_log.prev_obs = None
tel.wait()
continue
# try to slew to the next target
if not tel.start_slew(coord.SkyCoord(next_obs['target_ra'] * u.deg,
next_obs['target_dec'] * u.deg)):
tel.set_cant_observe()
# "missed history": http://ops2.lsst.org/docs/current/architecture.html#output-tables
logger.info("Failure slewing to {}, {}! Waiting...".format
(next_obs['target_ra'] * u.deg, next_obs['target_dec'] * u.deg))
scheduler.obs_log.prev_obs = None
tel.wait()
continue
# try to expose
if not tel.start_exposing(next_obs['target_exposure_time']):
tel.set_cant_observe()
logger.info("Exposure failure! Waiting...")
scheduler.obs_log.prev_obs = None
tel.wait()
continue
else:
# exposure completed successfully. now
# a) store exposure information in pointing history sqlite db
current_state = tel.current_state_dict()
scheduler.obs_log.log_pointing(current_state, next_obs)
# b) remove completed request_id from the pool and the queue
logger.info(next_obs)
assert(next_obs['request_id'] in scheduler.queues[next_obs['queue_name']].queue.index)
scheduler.queues[next_obs['queue_name']].remove_requests(next_obs['request_id'])
else:
scheduler.obs_log.prev_obs = None
tel.set_cant_observe()
tel.wait()
if profile:
profiler.stop()
print(profiler.output_text(str=True, color=True))
with open(os.path.join(output_path,f'{run_name}_profile.txt'), 'w') as f:
f.write(profiler.output_text())
| weather_year = None | conditional_block |
simulate.py | """Routines for running the scheduler in simulation mode."""
import os.path
import configparser
import logging
import numpy as np
import astropy.coordinates as coord
from astropy.time import Time
import astropy.units as u
from .TelescopeStateMachine import TelescopeStateMachine
from .Scheduler import Scheduler
from .QueueManager import GreedyQueueManager, QueueEmptyError
from .QueueManager import calc_pool_stats, calc_queue_stats
from .configuration import SchedulerConfiguration, QueueConfiguration
from .constants import BASE_DIR, P48_loc
from .utils import block_index
# check aggressively for setting with copy
import pandas as pd
pd.options.mode.chained_assignment = 'raise' # default='warn'
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logging.getLogger("transitions").setLevel(logging.WARNING)
logging.getLogger("sklearn_pandas").setLevel(logging.WARNING)
logging.getLogger("gurobipy").setLevel(logging.INFO)
logging.getLogger("ztf_sim.field_selection_functions").setLevel(logging.INFO)
def simulate(scheduler_config_file, sim_config_file,
scheduler_config_path = BASE_DIR + '../../ztf_survey_configuration/',
sim_config_path = BASE_DIR+'../config/',
output_path = BASE_DIR+'../sims/',
profile=False, raise_queue_empty=False, fallback=True,
time_limit = 30*u.second):
if profile:
try:
from pyinstrument import Profiler
except ImportError:
print('Error importing pyinstrument')
profile = False
sim_config = configparser.ConfigParser()
sim_config_file_fullpath = os.path.join(sim_config_path, sim_config_file)
sim_config.read(sim_config_file_fullpath)
# load config parameters into local variables
start_time = sim_config['simulation']['start_time']
try:
weather_year = sim_config['simulation']['weather_year']
except KeyError:
weather_year = None
if (weather_year.lower() == "none"):
weather_year = None
else:
weather_year = int(weather_year)
survey_duration = \
sim_config['simulation'].getfloat('survey_duration_days') * u.day
# set up Scheduler
scheduler_config_file_fullpath = \
os.path.join(scheduler_config_path, scheduler_config_file)
scheduler = Scheduler(scheduler_config_file_fullpath,
sim_config_file_fullpath, output_path = output_path)
run_name = scheduler.scheduler_config.config['run_name']
if profile:
if survey_duration > 1. * u.day:
print("Don't profile long runs: 25% overhead")
profile = False
else:
profiler = Profiler()
profiler.start()
survey_start_time = Time(start_time, scale='utc', location=P48_loc)
tel = TelescopeStateMachine(
current_time=survey_start_time,
historical_observability_year=weather_year)
# logging. wipe out existing log.
logfile=os.path.join(output_path,f'{run_name}_log.txt')
fh = logging.FileHandler(logfile, mode='w')
fh.setLevel(logging.INFO)
logger.addHandler(fh)
# initialize to a low value so we start by assigning nightly requests
current_night_mjd = 0
while tel.current_time < (survey_start_time + survey_duration):
# check if it is a new night and reload queue with new requests
if np.floor(tel.current_time.mjd) > current_night_mjd:
# use the state machine to allow us to skip weathered out nights
#if tel.check_if_ready():
scheduler.obs_log.prev_obs = None
block_use = scheduler.find_block_use_tonight(
tel.current_time)
timed_obs_count = scheduler.count_timed_observations_tonight()
# clobber old missed_obs queue with an empty one
scheduler.add_queue('missed_obs',
GreedyQueueManager('missed_obs',
QueueConfiguration(BASE_DIR+'../sims/missed_obs.json')),
clobber=True)
scheduler.queues['default'].missed_obs_queue = scheduler.queues['missed_obs']
scheduler.queues['default'].assign_nightly_requests(
tel.current_state_dict(),
scheduler.obs_log, block_use = block_use,
timed_obs_count = timed_obs_count, time_limit = time_limit,
skymaps = None)
current_night_mjd = np.floor(tel.current_time.mjd)
# log pool stats
logger.info(calc_pool_stats(
scheduler.queues['default'].rp.pool, intro="Nightly requests initialized"))
if tel.check_if_ready():
current_state = tel.current_state_dict()
#scheduler.check_for_TOO_queue_and_switch(current_state['current_time'])
scheduler.check_for_timed_queue_and_switch(current_state['current_time'])
# get coords
try:
next_obs = scheduler.Q.next_obs(current_state,
scheduler.obs_log)
assert(next_obs['request_id'] in scheduler.Q.queue.index)
except QueueEmptyError:
if scheduler.Q.queue_name != 'default':
logger.info(f"Queue {scheduler.Q.queue_name} empty! Switching to default queue.")
scheduler.set_queue('default')
try:
next_obs = scheduler.Q.next_obs(current_state,
scheduler.obs_log)
assert(next_obs['request_id'] in scheduler.Q.queue.index)
except QueueEmptyError:
logger.info("Default queue empty! Trying missed_obs queue...")
try:
next_obs = scheduler.queues['missed_obs'].next_obs(
current_state, scheduler.obs_log)
except QueueEmptyError:
logger.info("missed_obs queue empty! Trying fallback queue...")
if fallback and 'fallback' in scheduler.queues:
next_obs = scheduler.queues['fallback'].next_obs(
current_state, scheduler.obs_log)
else:
logger.info("No fallback queue defined!")
raise QueueEmptyError
else:
logger.info("Default queue empty! Trying missed_obs queue...")
try:
next_obs = scheduler.queues['missed_obs'].next_obs(
current_state, scheduler.obs_log)
except QueueEmptyError:
if fallback and 'fallback' in scheduler.queues:
logger.info("Default queue empty! Trying fallback queue...")
next_obs = scheduler.queues['fallback'].next_obs(
current_state, scheduler.obs_log)
elif not raise_queue_empty:
logger.info("Queue empty! Waiting...")
scheduler.obs_log.prev_obs = None
tel.wait()
continue
else:
logger.info(calc_queue_stats(
scheduler.Q.queue, current_state,
intro="Queue returned no next_obs. Current queue status:"))
logger.info(calc_pool_stats(
scheduler.Q.rp.pool, intro="Current pool status:"))
raise QueueEmptyError
# try to change filters, if needed
if next_obs['target_filter_id'] != current_state['current_filter_id']:
if not tel.start_filter_change(next_obs['target_filter_id']):
logger.info("Filter change failure! Waiting...")
scheduler.obs_log.prev_obs = None
tel.wait()
continue
# try to slew to the next target
if not tel.start_slew(coord.SkyCoord(next_obs['target_ra'] * u.deg,
next_obs['target_dec'] * u.deg)):
tel.set_cant_observe()
# "missed history": http://ops2.lsst.org/docs/current/architecture.html#output-tables |
# try to expose
if not tel.start_exposing(next_obs['target_exposure_time']):
tel.set_cant_observe()
logger.info("Exposure failure! Waiting...")
scheduler.obs_log.prev_obs = None
tel.wait()
continue
else:
# exposure completed successfully. now
# a) store exposure information in pointing history sqlite db
current_state = tel.current_state_dict()
scheduler.obs_log.log_pointing(current_state, next_obs)
# b) remove completed request_id from the pool and the queue
logger.info(next_obs)
assert(next_obs['request_id'] in scheduler.queues[next_obs['queue_name']].queue.index)
scheduler.queues[next_obs['queue_name']].remove_requests(next_obs['request_id'])
else:
scheduler.obs_log.prev_obs = None
tel.set_cant_observe()
tel.wait()
if profile:
profiler.stop()
print(profiler.output_text(str=True, color=True))
with open(os.path.join(output_path,f'{run_name}_profile.txt'), 'w') as f:
f.write(profiler.output_text()) | logger.info("Failure slewing to {}, {}! Waiting...".format
(next_obs['target_ra'] * u.deg, next_obs['target_dec'] * u.deg))
scheduler.obs_log.prev_obs = None
tel.wait()
continue | random_line_split |
simulate.py | """Routines for running the scheduler in simulation mode."""
import os.path
import configparser
import logging
import numpy as np
import astropy.coordinates as coord
from astropy.time import Time
import astropy.units as u
from .TelescopeStateMachine import TelescopeStateMachine
from .Scheduler import Scheduler
from .QueueManager import GreedyQueueManager, QueueEmptyError
from .QueueManager import calc_pool_stats, calc_queue_stats
from .configuration import SchedulerConfiguration, QueueConfiguration
from .constants import BASE_DIR, P48_loc
from .utils import block_index
# check aggressively for setting with copy
import pandas as pd
pd.options.mode.chained_assignment = 'raise' # default='warn'
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logging.getLogger("transitions").setLevel(logging.WARNING)
logging.getLogger("sklearn_pandas").setLevel(logging.WARNING)
logging.getLogger("gurobipy").setLevel(logging.INFO)
logging.getLogger("ztf_sim.field_selection_functions").setLevel(logging.INFO)
def | (scheduler_config_file, sim_config_file,
scheduler_config_path = BASE_DIR + '../../ztf_survey_configuration/',
sim_config_path = BASE_DIR+'../config/',
output_path = BASE_DIR+'../sims/',
profile=False, raise_queue_empty=False, fallback=True,
time_limit = 30*u.second):
if profile:
try:
from pyinstrument import Profiler
except ImportError:
print('Error importing pyinstrument')
profile = False
sim_config = configparser.ConfigParser()
sim_config_file_fullpath = os.path.join(sim_config_path, sim_config_file)
sim_config.read(sim_config_file_fullpath)
# load config parameters into local variables
start_time = sim_config['simulation']['start_time']
try:
weather_year = sim_config['simulation']['weather_year']
except KeyError:
weather_year = None
if (weather_year.lower() == "none"):
weather_year = None
else:
weather_year = int(weather_year)
survey_duration = \
sim_config['simulation'].getfloat('survey_duration_days') * u.day
# set up Scheduler
scheduler_config_file_fullpath = \
os.path.join(scheduler_config_path, scheduler_config_file)
scheduler = Scheduler(scheduler_config_file_fullpath,
sim_config_file_fullpath, output_path = output_path)
run_name = scheduler.scheduler_config.config['run_name']
if profile:
if survey_duration > 1. * u.day:
print("Don't profile long runs: 25% overhead")
profile = False
else:
profiler = Profiler()
profiler.start()
survey_start_time = Time(start_time, scale='utc', location=P48_loc)
tel = TelescopeStateMachine(
current_time=survey_start_time,
historical_observability_year=weather_year)
# logging. wipe out existing log.
logfile=os.path.join(output_path,f'{run_name}_log.txt')
fh = logging.FileHandler(logfile, mode='w')
fh.setLevel(logging.INFO)
logger.addHandler(fh)
# initialize to a low value so we start by assigning nightly requests
current_night_mjd = 0
while tel.current_time < (survey_start_time + survey_duration):
# check if it is a new night and reload queue with new requests
if np.floor(tel.current_time.mjd) > current_night_mjd:
# use the state machine to allow us to skip weathered out nights
#if tel.check_if_ready():
scheduler.obs_log.prev_obs = None
block_use = scheduler.find_block_use_tonight(
tel.current_time)
timed_obs_count = scheduler.count_timed_observations_tonight()
# clobber old missed_obs queue with an empty one
scheduler.add_queue('missed_obs',
GreedyQueueManager('missed_obs',
QueueConfiguration(BASE_DIR+'../sims/missed_obs.json')),
clobber=True)
scheduler.queues['default'].missed_obs_queue = scheduler.queues['missed_obs']
scheduler.queues['default'].assign_nightly_requests(
tel.current_state_dict(),
scheduler.obs_log, block_use = block_use,
timed_obs_count = timed_obs_count, time_limit = time_limit,
skymaps = None)
current_night_mjd = np.floor(tel.current_time.mjd)
# log pool stats
logger.info(calc_pool_stats(
scheduler.queues['default'].rp.pool, intro="Nightly requests initialized"))
if tel.check_if_ready():
current_state = tel.current_state_dict()
#scheduler.check_for_TOO_queue_and_switch(current_state['current_time'])
scheduler.check_for_timed_queue_and_switch(current_state['current_time'])
# get coords
try:
next_obs = scheduler.Q.next_obs(current_state,
scheduler.obs_log)
assert(next_obs['request_id'] in scheduler.Q.queue.index)
except QueueEmptyError:
if scheduler.Q.queue_name != 'default':
logger.info(f"Queue {scheduler.Q.queue_name} empty! Switching to default queue.")
scheduler.set_queue('default')
try:
next_obs = scheduler.Q.next_obs(current_state,
scheduler.obs_log)
assert(next_obs['request_id'] in scheduler.Q.queue.index)
except QueueEmptyError:
logger.info("Default queue empty! Trying missed_obs queue...")
try:
next_obs = scheduler.queues['missed_obs'].next_obs(
current_state, scheduler.obs_log)
except QueueEmptyError:
logger.info("missed_obs queue empty! Trying fallback queue...")
if fallback and 'fallback' in scheduler.queues:
next_obs = scheduler.queues['fallback'].next_obs(
current_state, scheduler.obs_log)
else:
logger.info("No fallback queue defined!")
raise QueueEmptyError
else:
logger.info("Default queue empty! Trying missed_obs queue...")
try:
next_obs = scheduler.queues['missed_obs'].next_obs(
current_state, scheduler.obs_log)
except QueueEmptyError:
if fallback and 'fallback' in scheduler.queues:
logger.info("Default queue empty! Trying fallback queue...")
next_obs = scheduler.queues['fallback'].next_obs(
current_state, scheduler.obs_log)
elif not raise_queue_empty:
logger.info("Queue empty! Waiting...")
scheduler.obs_log.prev_obs = None
tel.wait()
continue
else:
logger.info(calc_queue_stats(
scheduler.Q.queue, current_state,
intro="Queue returned no next_obs. Current queue status:"))
logger.info(calc_pool_stats(
scheduler.Q.rp.pool, intro="Current pool status:"))
raise QueueEmptyError
# try to change filters, if needed
if next_obs['target_filter_id'] != current_state['current_filter_id']:
if not tel.start_filter_change(next_obs['target_filter_id']):
logger.info("Filter change failure! Waiting...")
scheduler.obs_log.prev_obs = None
tel.wait()
continue
# try to slew to the next target
if not tel.start_slew(coord.SkyCoord(next_obs['target_ra'] * u.deg,
next_obs['target_dec'] * u.deg)):
tel.set_cant_observe()
# "missed history": http://ops2.lsst.org/docs/current/architecture.html#output-tables
logger.info("Failure slewing to {}, {}! Waiting...".format
(next_obs['target_ra'] * u.deg, next_obs['target_dec'] * u.deg))
scheduler.obs_log.prev_obs = None
tel.wait()
continue
# try to expose
if not tel.start_exposing(next_obs['target_exposure_time']):
tel.set_cant_observe()
logger.info("Exposure failure! Waiting...")
scheduler.obs_log.prev_obs = None
tel.wait()
continue
else:
# exposure completed successfully. now
# a) store exposure information in pointing history sqlite db
current_state = tel.current_state_dict()
scheduler.obs_log.log_pointing(current_state, next_obs)
# b) remove completed request_id from the pool and the queue
logger.info(next_obs)
assert(next_obs['request_id'] in scheduler.queues[next_obs['queue_name']].queue.index)
scheduler.queues[next_obs['queue_name']].remove_requests(next_obs['request_id'])
else:
scheduler.obs_log.prev_obs = None
tel.set_cant_observe()
tel.wait()
if profile:
profiler.stop()
print(profiler.output_text(str=True, color=True))
with open(os.path.join(output_path,f'{run_name}_profile.txt'), 'w') as f:
f.write(profiler.output_text())
| simulate | identifier_name |
simulate.py | """Routines for running the scheduler in simulation mode."""
import os.path
import configparser
import logging
import numpy as np
import astropy.coordinates as coord
from astropy.time import Time
import astropy.units as u
from .TelescopeStateMachine import TelescopeStateMachine
from .Scheduler import Scheduler
from .QueueManager import GreedyQueueManager, QueueEmptyError
from .QueueManager import calc_pool_stats, calc_queue_stats
from .configuration import SchedulerConfiguration, QueueConfiguration
from .constants import BASE_DIR, P48_loc
from .utils import block_index
# check aggressively for setting with copy
import pandas as pd
pd.options.mode.chained_assignment = 'raise' # default='warn'
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logging.getLogger("transitions").setLevel(logging.WARNING)
logging.getLogger("sklearn_pandas").setLevel(logging.WARNING)
logging.getLogger("gurobipy").setLevel(logging.INFO)
logging.getLogger("ztf_sim.field_selection_functions").setLevel(logging.INFO)
def simulate(scheduler_config_file, sim_config_file,
scheduler_config_path = BASE_DIR + '../../ztf_survey_configuration/',
sim_config_path = BASE_DIR+'../config/',
output_path = BASE_DIR+'../sims/',
profile=False, raise_queue_empty=False, fallback=True,
time_limit = 30*u.second):
| if profile:
try:
from pyinstrument import Profiler
except ImportError:
print('Error importing pyinstrument')
profile = False
sim_config = configparser.ConfigParser()
sim_config_file_fullpath = os.path.join(sim_config_path, sim_config_file)
sim_config.read(sim_config_file_fullpath)
# load config parameters into local variables
start_time = sim_config['simulation']['start_time']
try:
weather_year = sim_config['simulation']['weather_year']
except KeyError:
weather_year = None
if (weather_year.lower() == "none"):
weather_year = None
else:
weather_year = int(weather_year)
survey_duration = \
sim_config['simulation'].getfloat('survey_duration_days') * u.day
# set up Scheduler
scheduler_config_file_fullpath = \
os.path.join(scheduler_config_path, scheduler_config_file)
scheduler = Scheduler(scheduler_config_file_fullpath,
sim_config_file_fullpath, output_path = output_path)
run_name = scheduler.scheduler_config.config['run_name']
if profile:
if survey_duration > 1. * u.day:
print("Don't profile long runs: 25% overhead")
profile = False
else:
profiler = Profiler()
profiler.start()
survey_start_time = Time(start_time, scale='utc', location=P48_loc)
tel = TelescopeStateMachine(
current_time=survey_start_time,
historical_observability_year=weather_year)
# logging. wipe out existing log.
logfile=os.path.join(output_path,f'{run_name}_log.txt')
fh = logging.FileHandler(logfile, mode='w')
fh.setLevel(logging.INFO)
logger.addHandler(fh)
# initialize to a low value so we start by assigning nightly requests
current_night_mjd = 0
while tel.current_time < (survey_start_time + survey_duration):
# check if it is a new night and reload queue with new requests
if np.floor(tel.current_time.mjd) > current_night_mjd:
# use the state machine to allow us to skip weathered out nights
#if tel.check_if_ready():
scheduler.obs_log.prev_obs = None
block_use = scheduler.find_block_use_tonight(
tel.current_time)
timed_obs_count = scheduler.count_timed_observations_tonight()
# clobber old missed_obs queue with an empty one
scheduler.add_queue('missed_obs',
GreedyQueueManager('missed_obs',
QueueConfiguration(BASE_DIR+'../sims/missed_obs.json')),
clobber=True)
scheduler.queues['default'].missed_obs_queue = scheduler.queues['missed_obs']
scheduler.queues['default'].assign_nightly_requests(
tel.current_state_dict(),
scheduler.obs_log, block_use = block_use,
timed_obs_count = timed_obs_count, time_limit = time_limit,
skymaps = None)
current_night_mjd = np.floor(tel.current_time.mjd)
# log pool stats
logger.info(calc_pool_stats(
scheduler.queues['default'].rp.pool, intro="Nightly requests initialized"))
if tel.check_if_ready():
current_state = tel.current_state_dict()
#scheduler.check_for_TOO_queue_and_switch(current_state['current_time'])
scheduler.check_for_timed_queue_and_switch(current_state['current_time'])
# get coords
try:
next_obs = scheduler.Q.next_obs(current_state,
scheduler.obs_log)
assert(next_obs['request_id'] in scheduler.Q.queue.index)
except QueueEmptyError:
if scheduler.Q.queue_name != 'default':
logger.info(f"Queue {scheduler.Q.queue_name} empty! Switching to default queue.")
scheduler.set_queue('default')
try:
next_obs = scheduler.Q.next_obs(current_state,
scheduler.obs_log)
assert(next_obs['request_id'] in scheduler.Q.queue.index)
except QueueEmptyError:
logger.info("Default queue empty! Trying missed_obs queue...")
try:
next_obs = scheduler.queues['missed_obs'].next_obs(
current_state, scheduler.obs_log)
except QueueEmptyError:
logger.info("missed_obs queue empty! Trying fallback queue...")
if fallback and 'fallback' in scheduler.queues:
next_obs = scheduler.queues['fallback'].next_obs(
current_state, scheduler.obs_log)
else:
logger.info("No fallback queue defined!")
raise QueueEmptyError
else:
logger.info("Default queue empty! Trying missed_obs queue...")
try:
next_obs = scheduler.queues['missed_obs'].next_obs(
current_state, scheduler.obs_log)
except QueueEmptyError:
if fallback and 'fallback' in scheduler.queues:
logger.info("Default queue empty! Trying fallback queue...")
next_obs = scheduler.queues['fallback'].next_obs(
current_state, scheduler.obs_log)
elif not raise_queue_empty:
logger.info("Queue empty! Waiting...")
scheduler.obs_log.prev_obs = None
tel.wait()
continue
else:
logger.info(calc_queue_stats(
scheduler.Q.queue, current_state,
intro="Queue returned no next_obs. Current queue status:"))
logger.info(calc_pool_stats(
scheduler.Q.rp.pool, intro="Current pool status:"))
raise QueueEmptyError
# try to change filters, if needed
if next_obs['target_filter_id'] != current_state['current_filter_id']:
if not tel.start_filter_change(next_obs['target_filter_id']):
logger.info("Filter change failure! Waiting...")
scheduler.obs_log.prev_obs = None
tel.wait()
continue
# try to slew to the next target
if not tel.start_slew(coord.SkyCoord(next_obs['target_ra'] * u.deg,
next_obs['target_dec'] * u.deg)):
tel.set_cant_observe()
# "missed history": http://ops2.lsst.org/docs/current/architecture.html#output-tables
logger.info("Failure slewing to {}, {}! Waiting...".format
(next_obs['target_ra'] * u.deg, next_obs['target_dec'] * u.deg))
scheduler.obs_log.prev_obs = None
tel.wait()
continue
# try to expose
if not tel.start_exposing(next_obs['target_exposure_time']):
tel.set_cant_observe()
logger.info("Exposure failure! Waiting...")
scheduler.obs_log.prev_obs = None
tel.wait()
continue
else:
# exposure completed successfully. now
# a) store exposure information in pointing history sqlite db
current_state = tel.current_state_dict()
scheduler.obs_log.log_pointing(current_state, next_obs)
# b) remove completed request_id from the pool and the queue
logger.info(next_obs)
assert(next_obs['request_id'] in scheduler.queues[next_obs['queue_name']].queue.index)
scheduler.queues[next_obs['queue_name']].remove_requests(next_obs['request_id'])
else:
scheduler.obs_log.prev_obs = None
tel.set_cant_observe()
tel.wait()
if profile:
profiler.stop()
print(profiler.output_text(str=True, color=True))
with open(os.path.join(output_path,f'{run_name}_profile.txt'), 'w') as f:
f.write(profiler.output_text()) | identifier_body | |
systems.rs | // будем юзать Behaviour Tree AI
//Главный скрипт работает по системе выбор-условие-действие (selector-condition-action).
//Выбор — своеобразный аналог оператора switch() в языках программирования.
// В «теле» элемента выбора происходит выбор одного из заданных наборов действий
// в зависимости от условия.
//Условие — проверка истинности заданного условия.
// Используется в начале каждого набора действий внутри элемента выбора.
// Если условие истинно — выполняется данный набор действий и выбор завершается.
// Если нет — происходит переход к следующему набору действий
//Действие — скрипт, запускающий другой скрипт (действие) с заданными параметрами.
// *В BT AI существует понятие базовых действий.
/*
SelectorBegin('AI Role 1');
SequenceBegin('Атака');
//видим врага и знаем его id
Condition(SeeEnemy && Enemy>0);
//смомтрим на него
Action( Action_LookAt, point_direction(x,y, Enemy.x, Enemy.y));
//стреляем в сторону врага 2 раза
Action( Action_Shoot, point_direction(x,y, Enemy.x, Enemy.y), 2);
SelectorBegin('Подходим на оптимальное растояние');
//или
SequenceBegin('Враг слишком далеко');
Condition(point_distance(x,y, Enemy.x, Enemy.y)>256);
Action(Action_MoveTo, Enemy.x-lengthdir_x(128, direction), Enemy.y-lengthdir_y(128, direction), highSpeed);
SequenceEnd();
//или
SequenceBegin('Враг слишком близко');
Condition(point_distance(x,y, Enemy.x, Enemy.y)<64);
//идем назад
Action(Action_MoveTo, x-lengthdir_x(64, direction), y-lengthdir_y(64, direction), highSpeed);
SequenceEnd();
SequenceBegin('маневр');
//иначе просто маневрируем, чтобы сложнее было попасть
Action( Action_MoveTo, x+irandom_range(-64, 64), y+irandom_range(-64, 64), highSpeed);
SequenceEnd();
SelectorEnd();
//стреляем в сторону врага 4 раза
Action(Action_Shoot, point_direction(x,y, Enemy.x, Enemy.y), 2);
SequenceEnd();
SelectorEnd();
*/
//Selector — оператор выбора набора действий
//Sequence — набор действий
//Condition — проверка условия
//Action — действие. вызов скрипта(первый аргумент) с параметрами (остальные аргументы)
/*
http://www.pvsm.ru/robototehnika/161885/print/
Узлы BT называют [10] задачами или поведениями. Каждая задача может иметь четыре состояния:
«Успех», если задача выполнена успешно;
- выкинуть нахер, заменитьт ошибкой. «Неудача», если условие не выполнено или задача, по какой-то причине, невыполнима;
«В работе», если задача запущена в работу и ожидает завершения
«Ошибка», если в программе возникает неизвестная ошибка.
Результат работы любого узла всегда передается родительскому узлу, расположенному на уровень выше.
Дерево просматривается с самого верхнего узла – корня. От него производится поиск в глубину начиная
с левой ветви дерева. Если у одного узла есть несколько подзадач, они исполняются слева направо.
Среди узлов выделяют следующие типы:
-действие (action),
-узел исполнения последовательности (sequence),
-параллельный узел (parallel),
-селектор (selector),
-условие (condition),
-инвертор (inverter).
Действие представляет собой запись переменных или какое-либо движение.
Узлы последовательностей поочередно исполняют поведения каждого дочернего узла до тех пор,
пока один из них не выдаст значение «Неудача», «В работе» или «Ошибка».
Если этого не произошло, возвращает значение «Успех».
Узлы параллельных действий исполняют поведения дочерних узлов до тех пор,
пока заданное количество из них не вернет статусы «Неудача» или «Успех».
Селекторы поочередно исполняют поведения каждого дочернего узла до тех пор,
пока один из них не выдаст значение «Успех», «В работе» или «Ошибка».
Если этого не произошло, возвращает значение «Неудача».
Условия содержат критерий, по которому определяется исход, и переменную.
Например, условие «Есть ли в этой комнате человек?» перебирает все объекты в комнате
и сравнивает их с переменной «Человек».
Узлы инверсии выполняют функцию оператора NOT.
*/
use tinyecs::*;
use time::{PreciseTime, Duration};
use WORLD_SPEED;
use ::manager::components::Position;
use ::monster::components::MonsterClass;
use ::monster::components::MonsterAttributes;
use ::monster::components::SelectionTree;
use ::monster::components::BehaviourEvent;
use ::monster::components::BehaviourState;
/// Система восприятия
pub struct _PerceptionSystem;
// тут типа чекает окружение, и помечает объекты что попадают в поле зения.
impl System for _PerceptionSystem {
fn aspect(&self) -> Aspect {
aspect_all!(MonsterClass)
}
fn process_one(&mut self, _entity: &mut Entity) {
// здесь тоже меняются события.
// сканируем вокруг, может есть еда или вода или др. монстр или ОБОРИГЕН!
}
}
/// Выбиральщик состояний дерева поведения
// используя код программатора SelectionTree, переключает состояния.
pub struct SelectorSystem;
impl System for SelectorSystem {
fn aspect(&self) -> Aspect {
aspect_all!(MonsterClass, SelectionTree, BehaviourEvent, BehaviourState)
}
// fn process_no_entities(&mut self) {
// println!("instaced buffer render system must work, but no entities!");
// }
// fn process_no_data(&mut self) {
// println!("instaced buffer render system must work, but no data!");
// }
fn process_one(&mut self, entity: &mut Entity) {
let mut selection_tree = entity.get_component::<SelectionTree>();
let mut behaviour_state = entity.get_component::<BehaviourState>(); // состояние
let behaviour_event = entity.get_component::<BehaviourEvent>(); // события
let len = selection_tree.selector.len();
if len > 0 {
// ткущий узел.
if selection_tree.curr_selector < 0i32 {
selection_tree.curr_selector = 0i32;
println!("ошибка/инициализация текущего указателя, теперь он {}", 0i32);
} else {
/*event, state
let sel = vec![[6, 2], [5, 1]];*/
let index: usize = selection_tree.curr_selector as usize;
let curr_cell = selection_tree.selector[index]; //[6, 2]
let v_event = curr_cell[0];
let v_state = curr_cell[1];
// проверить нет ли ошибки в селекторе/программаторе. или первый запуск/инициализация.
let curr_event = behaviour_event.event; // считываем текущий событие/event
if curr_event == v_event {
// меняем состояние, на соответствующее.
behaviour_state.state = v_state;
println!("обнаружено событие {}", v_event);
println!("переключаю состояние на {}", v_state);
// сдвигаем curr_selector, переходим к сл. ячейке.
let shl: i32 = (len - 1) as i32;
if selection_tree.curr_selector < shl { selection_tree.curr_selector += 1; } else {
selection_tree.curr_selector = 0;
}
}
}
}
}
}
/// Активатор. Приводит в действие.
// считывает состояние и выполняет его, либо продолжает выполнение.
// система поведения.
pub struct BehaviorSystem {
pub behavior_time: PreciseTime,
}
impl System for BehaviorSystem {
fn aspect(&self) -> Aspect {
aspect_all!(MonsterClass, BehaviourState, Position)
}
fn process_one(&mut self, entity: &mut Entity) {
// смотрит текущее состояние и выполняет действие.
// тут заставляем монстра ходить, пить, искать.
// 0. Инициализация, ошибка.
// 1. Сон. Монстр ждет, в этот момент с ним ничего не происходит.
// 2. Бодрствование. Случайное перемещение по полигону.
// 3. Поиск пищи.
// 4. Поиск воды.
// 5. Прием пищи.
// 6. Прием воды.
// 7. Перемещение к цели.
// 8. Проверка достижения цели.
if self.behavior_time.to(PreciseTime::now()) > Duration::seconds(5 * WORLD_SPEED) {
let behaviour_state = entity.get_component::<BehaviourState>(); // состояние
let mut position = entity.get_component::<Position>();
match behaviour_state.state {
1 => {
println!("...zzz...");
},
2 => {
// тут заставляем монстра ходить туда-сюда, бесцельно, куда подует)
if position.x > (position.y * 2f32) && position.y < 139f32 {
position.y += 1f32;
} else if position.x < 139f32 {
position.x += 1f32;
}
println!("x:{}, y:{}", position.x, position.y);
/* движение по овальной траектории.
x = x_+x_radius*cos(r_ang+2);
y = y_+y_radius*sin(r_ang);
X1 = X + R * 0.5;
Y1 = Y + 1.3 * R * 0.8;
0.5 это синус 30
0.8 это косинус 30
R - хз. например 20 клеток.
X, Y - это текущие координаты.
*/
// let x1: f32 = position.x + 20f32 * 0.5;
// let y1: f32 = position.y + 1.3f32 * 20f32 *0.8;
// position.x = x1;
// position.y = y1;
// println!("x:{}, y:{}", position.x, position.y);
},
_ => {},
}
// фиксируем текущее время
self.behavior_time = PreciseTime::now();
}
}
}
/// Генерация событий
// Создаем события, проверяем параметры.
pub struct EventSystem {
pub event_time: PreciseTime,
pub event_last: u32,
// 0 - инициализация/ошибка
}
impl System for EventSystem {
fn aspect(&self) -> Aspect {
aspect_all!(MonsterAttributes, BehaviourEvent)
}
fn process_one(&mut self, entity: &mut Entity) {
// 0. Инициализация, ошибка.
// 1. Обнаружена еда.
// 2. Обнаружена вода.
// 3. Наступил голод.
// 4. Наступила жажда.
// 5. Утомился.
// 6. Нет событий.
// 7. Монстр насытился.
// 8. Монстр напился.
if self.event_time.to(PreciseTime::now()) > Duration::seconds(WORLD_SPEED) {
let mut behaviour_event = entity.get_component::<BehaviourEvent>(); // события
let monster_attr = entity.get_component::<MonsterAttributes>(); // события
if behaviour_event.event == 0 {
// проверяем ошибки/инициализация
behaviour_event.event = 6;
println!("ошибка/инициализация текущего события, теперь он {}", 6);
} else if monster_attr.power < 960 && self.event_last != 5 {
behaviour_event.event = 5; // наступает событие - УСТАЛ
self.event_last = behaviour_event.event;
println!("Новое событие: монстр устал.");
} else if monster_attr.power > 990 && self.event_last != 6 {
behaviour_event.event = 6;
self.event_last = behaviour_event.event;
println!("Новое событие: монстр отдохнул.");
}
// фиксируем текущее время
self.event_time = PreciseTime::now();
}
}
}
| conditional_block | ||
systems.rs | // будем юзать Behaviour Tree AI
//Главный скрипт работает по системе выбор-условие-действие (selector-condition-action).
//Выбор — своеобразный аналог оператора switch() в языках программирования.
// В «теле» элемента выбора происходит выбор одного из заданных наборов действий
// в зависимости от условия.
//Условие — проверка истинности заданного условия.
// Используется в начале каждого набора действий внутри элемента выбора.
// Если условие истинно — выполняется данный набор действий и выбор завершается.
// Если нет — происходит переход к следующему набору действий
//Действие — скрипт, запускающий другой скрипт (действие) с заданными параметрами.
// *В BT AI существует понятие базовых действий.
/*
SelectorBegin('AI Role 1');
SequenceBegin('Атака');
//видим врага и знаем его id
Condition(SeeEnemy && Enemy>0);
//смомтрим на него
Action( Action_LookAt, point_direction(x,y, Enemy.x, Enemy.y));
//стреляем в сторону врага 2 раза
Action( Action_Shoot, point_direction(x,y, Enemy.x, Enemy.y), 2);
SelectorBegin('Подходим на оптимальное растояние');
//или
SequenceBegin('Враг слишком далеко');
Condition(point_distance(x,y, Enemy.x, Enemy.y)>256);
Action(Action_MoveTo, Enemy.x-lengthdir_x(128, direction), Enemy.y-lengthdir_y(128, direction), highSpeed);
SequenceEnd();
//или
SequenceBegin('Враг слишком близко');
Condition(point_distance(x,y, Enemy.x, Enemy.y)<64);
//идем назад
Action(Action_MoveTo, x-lengthdir_x(64, direction), y-lengthdir_y(64, direction), highSpeed);
SequenceEnd();
SequenceBegin('маневр');
//иначе просто маневрируем, чтобы сложнее было попасть
Action( Action_MoveTo, x+irandom_range(-64, 64), y+irandom_range(-64, 64), highSpeed);
SequenceEnd();
SelectorEnd();
//стреляем в сторону врага 4 раза
Action(Action_Shoot, point_direction(x,y, Enemy.x, Enemy.y), 2);
SequenceEnd();
SelectorEnd();
*/
//Selector — оператор выбора набора действий
//Sequence — набор действий
//Condition — проверка условия
//Action — действие. вызов скрипта(первый аргумент) с параметрами (остальные аргументы)
/*
http://www.pvsm.ru/robototehnika/161885/print/
Узлы BT называют [10] задачами или поведениями. Каждая задача может иметь четыре состояния:
«Успех», если задача выполнена успешно;
- выкинуть нахер, заменитьт ошибкой. «Неудача», если условие не выполнено или задача, по какой-то причине, невыполнима;
«В работе», если задача запущена в работу и ожидает завершения
«Ошибка», если в программе возникает неизвестная ошибка.
Результат работы любого узла всегда передается родительскому узлу, расположенному на уровень выше.
Дерево просматривается с самого верхнего узла – корня. От него производится поиск в глубину начиная
с левой ветви дерева. Если у одного узла есть несколько подзадач, они исполняются слева направо.
Среди узлов выделяют следующие типы:
-действие (action),
-узел исполнения последовательности (sequence),
-параллельный узел (parallel),
-селектор (selector),
-условие (condition),
-инвертор (inverter).
Действие представляет собой запись переменных или какое-либо движение.
Узлы последовательностей поочередно исполняют поведения каждого дочернего узла до тех пор,
пока один из них не выдаст значение «Неудача», «В работе» или «Ошибка».
Если этого не произошло, возвращает значение «Успех».
Узлы параллельных действий исполняют поведения дочерних узлов до тех пор,
пока заданное количество из них не вернет статусы «Неудача» или «Успех».
Селекторы поочередно исполняют поведения каждого дочернего узла до тех пор,
пока один из них не выдаст значение «Успех», «В работе» или «Ошибка».
Если этого не произошло, возвращает значение «Неудача».
Условия содержат критерий, по которому определяется исход, и переменную.
Например, условие «Есть ли в этой комнате человек?» перебирает все объекты в комнате
и сравнивает их с переменной «Человек».
Узлы инверсии выполняют функцию оператора NOT.
*/
use tinyecs::*;
use time::{PreciseTime, Duration};
use WORLD_SPEED;
use ::manager::components::Position;
use ::monster::components::MonsterClass;
use ::monster::components::MonsterAttributes;
use ::monster::components::SelectionTree;
use ::monster::components::BehaviourEvent;
use ::monster::components::BehaviourState;
/// Система восприятия
pub struct _PerceptionSystem;
// тут типа чекает окружение, и помечает объекты что попадают в поле зения.
impl System for _PerceptionSystem {
fn aspect(&self) -> Aspect {
aspect_all!(MonsterClass)
}
fn process_one(&mut self, _entity: &mut Entity) {
// здесь тоже меняются события.
// сканируем вокруг, может есть еда или вода или др. монстр или ОБОРИГЕН!
}
}
/// Выбиральщик состояний дерева поведения
// используя код программатора SelectionTree, переключает состояния.
pub struct SelectorSystem;
impl System for SelectorSystem {
fn aspect(&self) -> Aspect {
aspect_all!(MonsterClass, SelectionTree, BehaviourEvent, BehaviourState)
}
// fn process_no_entities(&mut self) {
// println!("instaced buffer render system must work, but no entities!");
// }
// fn process_no_data(&mut self) {
// println!("instaced buffer render system must work, but no data!");
// }
fn process_one(&mut self, entity: &mut Entity) {
let mut selection_tree = entity.get_component::<SelectionTree>();
let mut behaviour_state = entity.get_component::<BehaviourState>(); // состояние
let behaviour_event = entity.get_component::<BehaviourEvent>(); // события
let len = selection_tree.selector.len();
if len > 0 {
// ткущий узел.
if selection_tree.curr_selector < 0i32 {
selection_tree.curr_selector = 0i32;
println!("ошибка/инициализация текущего указателя, теперь он {}", 0i32);
} else {
/*event, state
let sel = vec![[6, 2], [5, 1]];*/
let index: usize = selection_tree.curr_selector as usize;
let curr_cell = selection_tree.selector[index]; //[6, 2]
let v_event = curr_cell[0];
let v_state = curr_cell[1];
// проверить нет ли ошибки в селекторе/программаторе. или первый запуск/инициализация.
let curr_event = behaviour_event.event; // считываем текущий событие/event
if curr_event == v_event {
// меняем состояние, на соответствующее.
behaviour_state.state = v_state;
println!("обнаружено событие {}", v_event);
println!("переключаю состояние на {}", v_state);
// сдвигаем curr_selector, переходим к сл. ячейке.
let shl: i32 = (len - 1) as i32;
if selection_tree.curr_selector < shl { selection_tree.curr_selector += 1; } else {
selection_tree.curr_selector = 0;
}
}
}
}
}
}
/// Активатор. Приводит в действие.
// считывает состояние и выполняет его, либо продолжает выполнение.
// система поведения.
pub struct BehaviorSystem {
pub behavior_time: PreciseTime,
}
impl System for BehaviorSystem {
fn aspect(&self) -> Aspect {
aspect_all!(MonsterClass, BehaviourState, Position)
}
fn process_one(&mut self, entity: &mut Entity) {
// смотрит текущее состояние и выполняет действие.
// тут заставляем монстра ходить, пить, искать.
// 0. Инициализация, ошибка.
// 1. Сон. Монстр ждет, в этот момент с ним ничего не происходит.
// 2. Бодрствование. Случайное перемещение по полигону.
// 3. Поиск пищи.
// 4. Поиск воды.
// 5. Прием пищи.
// 6. Прием воды.
// 7. Перемещение к цели.
// 8. Проверка достижения цели.
if self.behavior_time.to(PreciseTime::now()) > Duration::seconds(5 * WORLD_SPEED) {
let behaviour_state = entity.get_component::<BehaviourState>(); // состояние
let mut position = entity.get_component::<Position>();
match behaviour_state.state {
1 => {
println!("...zzz...");
},
2 => {
// тут заставляем монстра ходить туда-сюда, бесцельно, куда подует)
if position.x > (position.y * 2f32) && position.y < 139f32 {
position.y += 1f32;
} else if position.x < 139f32 {
position.x += 1f32;
}
println!("x:{}, y:{}", position.x, position.y);
/* движение по овальной траектории.
x = x_+x_radius*cos(r_ang+2);
y = y_+y_radius*sin(r_ang);
X1 = X + R * 0.5;
Y1 = Y + 1.3 * R * 0.8;
0.5 это синус 30
0.8 это косинус 30
R - хз. например 20 клеток.
X, Y - это текущие координаты.
*/
// let x1: f32 = position.x + 20f32 * 0.5;
// let y1: f32 = position.y + 1.3f32 * 20f32 *0.8;
// position.x = x1;
// position.y = y1;
// println!("x:{}, y:{}", position.x, position.y);
},
_ => {},
}
// фиксируем текущее время
self.behavior_time = PreciseTime::now();
}
}
}
/// Генерация событий
// Создаем события, проверяем параметры.
pub struct EventSystem {
pub event_time: PreciseTime,
pub event_last: u32,
// 0 - инициализация/ошибка
}
impl System for EventSystem {
fn aspect(&self) -> Aspect {
aspect_all!(MonsterAttributes, BehaviourEvent)
}
fn process_one(&mut self, entity: &mut Entity) {
// 0. Инициализация, ошибка.
// 1. Обнаружена еда.
// 2. Обнаружена вода.
// 3. Наступил голод.
// 4. Наступила жажда.
// 5. Утомился.
// 6. Нет событий.
// 7. Монстр насытился.
// 8. Монстр напился.
if self.event_time.to(PreciseTime::now()) > Duration::seconds(WORLD_SPEED) {
let mut behaviour_event = entity.get_component::<BehaviourEvent>(); // события
let monster_attr = entity.get_component::<MonsterAttributes>(); // события
if behaviour_event.event == 0 {
// проверяем ошибки/инициализация
behaviour_event.event = 6;
println!("ошибка/инициализация текущего события, теперь он {}", 6);
} else if monster_attr.power < 960 && self.event_last != 5 {
behaviour_event.event = 5; // наступает событие - УСТАЛ
self.event_last = behaviour_event.event;
println!("Новое событие: монстр устал.");
} else if monster_attr.power > 990 && self.event_last != 6 {
behaviour_event.event = 6;
self.event_last = behaviour_event.event;
println!("Новое событие: монстр отдохнул.");
}
// фиксируем текущее время
self.event_time = PreciseTime::now();
}
}
}
| identifier_body | ||
systems.rs | // будем юзать Behaviour Tree AI
//Главный скрипт работает по системе выбор-условие-действие (selector-condition-action).
//Выбор — своеобразный аналог оператора switch() в языках программирования.
// В «теле» элемента выбора происходит выбор одного из заданных наборов действий
// в зависимости от условия.
//Условие — проверка истинности заданного условия.
// Используется в начале каждого набора действий внутри элемента выбора.
// Если условие истинно — выполняется данный набор действий и выбор завершается.
// Если нет — происходит переход к следующему набору действий
//Действие — скрипт, запускающий другой скрипт (действие) с заданными параметрами.
// *В BT AI существует понятие базовых действий.
/*
SelectorBegin('AI Role 1');
SequenceBegin('Атака');
//видим врага и знаем его id
Condition(SeeEnemy && Enemy>0);
//смомтрим на него
Action( Action_LookAt, point_direction(x,y, Enemy.x, Enemy.y));
//стреляем в сторону врага 2 раза
Action( Action_Shoot, point_direction(x,y, Enemy.x, Enemy.y), 2);
SelectorBegin('Подходим на оптимальное растояние');
//или
SequenceBegin('Враг слишком далеко');
Condition(point_distance(x,y, Enemy.x, Enemy.y)>256);
Action(Action_MoveTo, Enemy.x-lengthdir_x(128, direction), Enemy.y-lengthdir_y(128, direction), highSpeed);
SequenceEnd();
//или
SequenceBegin('Враг слишком близко');
Condition(point_distance(x,y, Enemy.x, Enemy.y)<64);
//идем назад
Action(Action_MoveTo, x-lengthdir_x(64, direction), y-lengthdir_y(64, direction), highSpeed);
SequenceEnd();
SequenceBegin('маневр');
//иначе просто маневрируем, чтобы сложнее было попасть
Action( Action_MoveTo, x+irandom_range(-64, 64), y+irandom_range(-64, 64), highSpeed);
SequenceEnd();
SelectorEnd();
//стреляем в сторону врага 4 раза
Action(Action_Shoot, point_direction(x,y, Enemy.x, Enemy.y), 2);
SequenceEnd();
SelectorEnd();
*/
//Selector — оператор выбора набора действий
//Sequence — набор действий
//Condition — проверка условия
//Action — действие. вызов скрипта(первый аргумент) с параметрами (остальные аргументы)
/*
http://www.pvsm.ru/robototehnika/161885/print/
Узлы BT называют [10] задачами или поведениями. Каждая задача может иметь четыре состояния:
«Успех», если задача выполнена успешно;
- выкинуть нахер, заменитьт ошибкой. «Неудача», если условие не выполнено или задача, по какой-то причине, невыполнима;
«В работе», если задача запущена в работу и ожидает завершения
«Ошибка», если в программе возникает неизвестная ошибка.
Результат работы любого узла всегда передается родительскому узлу, расположенному на уровень выше.
Дерево просматривается с самого верхнего узла – корня. От него производится поиск в глубину начиная
с левой ветви дерева. Если у одного узла есть несколько подзадач, они исполняются слева направо.
Среди узлов выделяют следующие типы:
-действие (action),
-узел исполнения последовательности (sequence),
-параллельный узел (parallel),
-селектор (selector),
-условие (condition),
-инвертор (inverter).
Действие представляет собой запись переменных или какое-либо движение.
Узлы последовательностей поочередно исполняют поведения каждого дочернего узла до тех пор,
пока один из них не выдаст значение «Неудача», «В работе» или «Ошибка».
Если этого не произошло, возвращает значение «Успех».
Узлы параллельных действий исполняют поведения дочерних узлов до тех пор,
пока заданное количество из них не вернет статусы «Неудача» или «Успех».
Селекторы поочередно исполняют поведения каждого дочернего узла до тех пор,
пока один из них не выдаст значение «Успех», «В работе» или «Ошибка».
Если этого не произошло, возвращает значение «Неудача».
Условия содержат критерий, по которому определяется исход, и переменную.
Например, условие «Есть ли в этой комнате человек?» перебирает все объекты в комнате
и сравнивает их с переменной «Человек».
Узлы инверсии выполняют функцию оператора NOT.
*/
use tinyecs::*;
use time::{PreciseTime, Duration};
use WORLD_SPEED;
use ::manager::components::Position;
use ::monster::components::MonsterClass;
use ::monster::components::MonsterAttributes;
use ::monster::components::SelectionTree;
use ::monster::components::BehaviourEvent;
use ::monster::components::BehaviourState;
/// Система восприятия
pub struct _PerceptionSystem;
// тут типа чекает окружение, и помечает объекты что попадают в поле зения.
impl System for _PerceptionSystem {
fn aspect(&self) -> Aspect {
aspect_all!(MonsterClass)
}
fn process_one(&mut self, _entity: &mut Entity) {
// здесь тоже меняются события.
// сканируем вокруг, может есть еда или вода или др. монстр или ОБОРИГЕН!
}
}
/// Выбиральщик состояний дерева поведения
// используя код программатора SelectionTree, переключает состояния.
pub struct SelectorSystem;
impl System for SelectorSystem {
fn aspect(&self) -> Aspect {
aspect_all!(MonsterClass, SelectionTree, BehaviourEvent, BehaviourState)
}
// fn process_no_entities(&mut self) {
// println!("instaced buffer render system must work, but no entities!");
// }
// fn process_no_data(&mut self) {
// println!("instaced buffer render system must work, but no data!");
// }
fn process_one(&mut self, entity: &mut Entity) {
let mut selection_tree = entity.get_component::<SelectionTree>();
let mut behaviour_state = entity.get_component::<BehaviourState>(); // состояние
let behaviour_event = entity.get_component::<BehaviourEvent>(); // события
let len = selection_tree.selector.len();
if len > 0 {
// ткущий узел.
if selection_tree.curr_selector < 0i32 {
selection_tree.curr_selector = 0i32;
println!("ошибка/инициализация текущего указателя, теперь он {}", 0i32);
} else {
/*event, state
let sel = vec![[6, 2], [5, 1]];*/
let index: usize = selection_tree.curr_selector as usize;
let curr_cell = selection_tree.selector[index]; //[6, 2]
let v_event = curr_cell[0];
let v_state = curr_cell[1];
// проверить нет ли ошибки в селекторе/программаторе. или первый запуск/инициализация.
let curr_event = behaviour_event.event; // считываем текущий событие/event
if curr_event == v_event {
// меняем состояние, на соответствующее.
behaviour_state.state = v_state;
println!("обнаружено событие {}", v_event);
println!("переключаю состояние на {}", v_state);
// сдвигаем curr_selector, переходим к сл. ячейке.
let shl: i32 = (len - 1) as i32;
if selection_tree.curr_selector < shl { selection_tree.curr_selector += 1; } else {
selection_tree.curr_selector = 0;
}
}
}
}
}
}
/// Активатор. Приводит в действие.
// считывает состояние и выполняет его, либо продолжает выполнение.
// система поведения.
pub struct BehaviorSystem {
pub behavior_time: PreciseTime,
}
impl System for BehaviorSystem {
fn aspect(&self) -> Aspect {
aspect_all!(MonsterClass, BehaviourState, Position)
}
fn process_one(&mut self, entity: &mut Entity) {
// смотрит текущее состояние и выполняет действие.
// тут заставляем монстра ходить, пить, искать.
// 0. Инициализация, ошибка.
// 1. Сон. Монстр ждет, в этот момент с ним ничего не происходит.
// 2. Бодрствование. Случайное перемещение по полигону.
// 3. Поиск пищи.
// 4. Поиск воды.
// 5. Прием пищи.
// 6. Прием воды.
// 7. Перемещение к цели.
// 8. Проверка достижения цели.
if self.behavior_time.to(PreciseTime::now()) > Duration::seconds(5 * WORLD_SPEED) {
let behaviour_state = entity.get_component::<BehaviourState>(); // состояние
let mut position = entity.get_component::<Position>();
match behaviour_state.state {
1 => {
println!("...zzz...");
},
2 => {
// тут заставляем монстра ходить туда-сюда, бесцельно, куда подует)
if position.x > (position.y * 2f32) && position.y < 139f32 {
position.y += 1f32;
} else if position.x < 139f32 {
position.x += 1f32;
}
println!("x:{}, y:{}", position.x, position.y);
/* движение по овальной траектории.
x = x_+x_radius*cos(r_ang+2);
y = y_+y_radius*sin(r_ang);
X1 = X + R * 0.5;
Y1 = Y + 1.3 * R * 0.8;
0.5 это синус 30
0.8 это косинус 30
R - хз. например 20 клеток.
X, Y - это текущие координаты.
*/
// let x1: f32 = position.x + 20f32 * 0.5;
// let y1: f32 = position.y + 1.3f32 * 20f32 *0.8;
// position.x = x1;
// position.y = y1;
// println!("x:{}, y:{}", position.x, position.y);
},
_ => {},
}
// фиксируем текущее время
self.behavior_time = PreciseTime::now();
}
}
}
/// Генерация событий
// Создаем события, проверяем параметры.
pub struct EventSystem {
pub event_time: PreciseTime,
pub event_last: u32,
// 0 - инициализация/ошибка
}
impl System for EventSystem {
fn aspect(&self) -> Aspect {
aspect_all!(MonsterAttributes, BehaviourEvent)
}
fn process_one(&mut self, entity: &mut Entity) {
// 0. Инициализация, ошибка.
// 1. Обнаружена еда.
// 2. Обнаружена вода.
// 3. Наступил голод.
// 4. Наступила жа | // 5. Утомился.
// 6. Нет событий.
// 7. Монстр насытился.
// 8. Монстр напился.
if self.event_time.to(PreciseTime::now()) > Duration::seconds(WORLD_SPEED) {
let mut behaviour_event = entity.get_component::<BehaviourEvent>(); // события
let monster_attr = entity.get_component::<MonsterAttributes>(); // события
if behaviour_event.event == 0 {
// проверяем ошибки/инициализация
behaviour_event.event = 6;
println!("ошибка/инициализация текущего события, теперь он {}", 6);
} else if monster_attr.power < 960 && self.event_last != 5 {
behaviour_event.event = 5; // наступает событие - УСТАЛ
self.event_last = behaviour_event.event;
println!("Новое событие: монстр устал.");
} else if monster_attr.power > 990 && self.event_last != 6 {
behaviour_event.event = 6;
self.event_last = behaviour_event.event;
println!("Новое событие: монстр отдохнул.");
}
// фиксируем текущее время
self.event_time = PreciseTime::now();
}
}
}
| жда.
| identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.