file_name large_stringlengths 4 140 | prefix large_stringlengths 0 12.1k | suffix large_stringlengths 0 12k | middle large_stringlengths 0 7.51k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
main.go | .Redirect(w, r, url, http.StatusFound)
}
// oauth2callback is the handler to which Google's OAuth service redirects the
// user after they have granted the appropriate permissions.
func oauth2callbackHandler(w http.ResponseWriter, r *http.Request) {
// Create an oauth transport with a urlfetch.Transport embedded inside.
t := &oauth.Transport{Config: config(r.Host)}
// Exchange the code for access and refresh tokens.
tok, err := t.Exchange(r.FormValue("code"))
if err != nil {
w.WriteHeader(500)
LogPrintf("oauth: exchange")
return
}
o, err := oauth2.New(t.Client())
if err != nil {
w.WriteHeader(500)
LogPrintf("oauth: oauth get")
return
}
u, err := o.Userinfo.Get().Do()
if err != nil {
w.WriteHeader(500)
LogPrintf("oauth: userinfo get")
return
}
userId := fmt.Sprintf("%s_%s", strings.Split(clientId, ".")[0], u.Id)
if err = storeUserID(w, r, userId); err != nil {
w.WriteHeader(500)
LogPrintf("oauth: store userid")
return | w.WriteHeader(500)
LogPrintf("oauth: json marshal")
return
}
storeCredential(userId, tok, string(userSer))
http.Redirect(w, r, fullUrl, http.StatusFound)
}
func SetupHandler(w http.ResponseWriter, r *http.Request) {
userId, err := userID(r)
if err != nil || userId == "" {
w.WriteHeader(400)
LogPrintf("setup: userid")
return
}
t := authTransport(userId)
if t == nil {
w.WriteHeader(401)
LogPrintf("setup: auth")
return
}
setupUser(r, t.Client(), userId)
}
// signout Revokes access for the user and removes the associated credentials from the datastore.
func signoutHandler(w http.ResponseWriter, r *http.Request) {
userId, err := userID(r)
if err != nil || userId == "" {
w.WriteHeader(400)
LogPrintf("signout: userid")
return
}
t := authTransport(userId)
if t == nil {
w.WriteHeader(500)
LogPrintf("signout: auth")
return
}
req, err := http.NewRequest("GET", fmt.Sprintf(revokeEndpointFmt, t.Token.RefreshToken), nil)
response, err := http.DefaultClient.Do(req)
if err != nil {
w.WriteHeader(500)
LogPrintf("signout: revoke")
return
}
defer response.Body.Close()
storeUserID(w, r, "")
deleteCredential(userId)
http.Redirect(w, r, fullUrl, http.StatusFound)
}
func sendImageCard(image string, text string, svc *mirror.Service) {
nt := &mirror.TimelineItem{
SpeakableText: text,
MenuItems: []*mirror.MenuItem{&mirror.MenuItem{Action: "READ_ALOUD"}, &mirror.MenuItem{Action: "DELETE"}},
Html: "<img src=\"attachment:0\" width=\"100%\" height=\"100%\">",
Notification: &mirror.NotificationConfig{Level: "DEFAULT"},
}
req := svc.Timeline.Insert(nt)
req.Media(strings.NewReader(image))
_, err := req.Do()
if err != nil {
LogPrintf("sendimage: insert")
return
}
}
func getImageAttachment(conn *picarus.Conn, svc *mirror.Service, trans *oauth.Transport, t *mirror.TimelineItem) ([]byte, error) {
a, err := svc.Timeline.Attachments.Get(t.Id, t.Attachments[0].Id).Do()
if err != nil {
LogPrintf("getattachment: metadata")
return nil, err
}
req, err := http.NewRequest("GET", a.ContentUrl, nil)
if err != nil {
LogPrintf("getattachment: http")
return nil, err
}
resp, err := trans.RoundTrip(req)
if err != nil {
LogPrintf("getattachment: content")
return nil, err
}
defer resp.Body.Close()
imageData, err := ioutil.ReadAll(resp.Body)
if err != nil {
LogPrintf("getattachment: body")
return nil, err
}
return imageData, nil
}
func notifyOpenGlass(conn *picarus.Conn, svc *mirror.Service, trans *oauth.Transport, t *mirror.TimelineItem, userId string) {
if !hasFlagSingle(userId, "flags", "user_openglass") {
LogPrintf("openglass: flag user_openglass")
return
}
var err error
flags, err := getUserFlags(userId, "uflags")
if err != nil {
LogPrintf("openglass: uflags")
return
}
if t.Attachments != nil && len(t.Attachments) > 0 {
imageData, err := getImageAttachment(conn, svc, trans, t)
if err != nil {
LogPrintf("openglass: attachment")
return
}
imageRow, err := PicarusApiImageUpload(conn, imageData)
if err != nil {
LogPrintf("openglass: picarus upload")
return
}
pushUserListTrim(userId, "images", imageRow, maxImages)
PicarusApiRowThumb(conn, imageRow)
if hasFlag(flags, "match_memento") {
mementoMatches, _, err := matchMementoImage(conn, imageRow, userId)
if err != nil {
LogPrintf("openglass: memento match")
} else {
for row, note := range mementoMatches {
m, err := conn.GetRow("images", row, []string{picarus.B64Dec(glassImageModel)})
if err != nil {
LogPrintf("openglass: memento get thumb")
continue
}
sendImageCard(m[picarus.B64Dec(glassImageModel)], note, svc)
}
}
}
if hasFlag(flags, "location") && hasFlag(flags, "location:streetview") {
//searchData, err := PicarusApiModel(conn, imageRow, picarus.B64Dec(locationModel))
}
if err != nil {
LogPrintf("openglass: image search")
}
// Warped image example
var imageWarped string
if hasFlag(flags, "warp") {
imageWarped, err := PicarusApiModel(conn, imageRow, picarus.B64Dec(homographyModel))
if err != nil {
LogPrintf("openglass: image warp")
imageWarped = ""
} else {
sendImageCard(imageWarped, "", svc)
}
}
// If there is a caption, send it to the annotation task
if len(t.Text) > 0 {
if hasFlag(flags, "crowdqa") {
imageType := "full"
if strings.HasPrefix(t.Text, "augmented ") {
if len(imageWarped) > 0 {
imageWarpedData := []byte(imageWarped)
imageRowWarped, err := PicarusApiImageUpload(conn, imageWarpedData)
PicarusApiRowThumb(conn, imageRowWarped)
if err != nil {
LogPrintf("openglass: warp image upload")
} else {
imageRow = imageRowWarped
imageData = imageWarpedData
imageType = "augmented"
}
}
t.Text = t.Text[10:] // Remove "augmented "
}
_, err = conn.PatchRow("images", imageRow, map[string]string{"meta:question": t.Text, "meta:openglass_user": userId,
"meta:openglass_image_type": imageType}, map[string][]byte{})
if err != nil {
LogPrintf("openglass: patch image")
return
}
// TODO: Here is where we would resize the image, we can do that later
_, err = conn.PostRow("jobs", annotationTask, map[string]string{"action": "io/annotation/sync"})
if err != nil {
LogPrintf("openglass: sync annotations")
return
}
}
} else {
if hasFlag(flags, "predict") {
confHTML := "<article><section><ul class=\"text-x-small\">"
menuItems := []*mirror.MenuItem{}
for modelName, modelRow := range predictionModels {
confMsgpack, err := PicarusApiModel(conn, imageRow, picarus.B64Dec(modelRow))
if err != nil {
LogPrintf("openglass: predict")
return
}
var value float64
err = msgpack.Unmarshal([]byte(confMsgpack), &value, nil)
if err != nil {
LogPrintf("openglass: predict msgpack")
return
}
confHTML = confHTML + fmt.Sprintf("<li>%s: %f</li>", modelName, value)
menuItems = append(menuItems, &mirror.MenuItem{Action: "CUSTOM", Id: modelName + " 1", Values: []*mirror.MenuValue{&mirror.MenuValue{DisplayName: modelName, IconUrl: fullUrl + "/static/icon_plus.png"}} | }
userSer, err := json.Marshal(u)
if err != nil { | random_line_split |
main.go | (w, r, url, http.StatusFound)
}
// oauth2callback is the handler to which Google's OAuth service redirects the
// user after they have granted the appropriate permissions.
func oauth2callbackHandler(w http.ResponseWriter, r *http.Request) {
// Create an oauth transport with a urlfetch.Transport embedded inside.
t := &oauth.Transport{Config: config(r.Host)}
// Exchange the code for access and refresh tokens.
tok, err := t.Exchange(r.FormValue("code"))
if err != nil {
w.WriteHeader(500)
LogPrintf("oauth: exchange")
return
}
o, err := oauth2.New(t.Client())
if err != nil {
w.WriteHeader(500)
LogPrintf("oauth: oauth get")
return
}
u, err := o.Userinfo.Get().Do()
if err != nil {
w.WriteHeader(500)
LogPrintf("oauth: userinfo get")
return
}
userId := fmt.Sprintf("%s_%s", strings.Split(clientId, ".")[0], u.Id)
if err = storeUserID(w, r, userId); err != nil {
w.WriteHeader(500)
LogPrintf("oauth: store userid")
return
}
userSer, err := json.Marshal(u)
if err != nil {
w.WriteHeader(500)
LogPrintf("oauth: json marshal")
return
}
storeCredential(userId, tok, string(userSer))
http.Redirect(w, r, fullUrl, http.StatusFound)
}
func SetupHandler(w http.ResponseWriter, r *http.Request) {
userId, err := userID(r)
if err != nil || userId == "" {
w.WriteHeader(400)
LogPrintf("setup: userid")
return
}
t := authTransport(userId)
if t == nil {
w.WriteHeader(401)
LogPrintf("setup: auth")
return
}
setupUser(r, t.Client(), userId)
}
// signout Revokes access for the user and removes the associated credentials from the datastore.
func signoutHandler(w http.ResponseWriter, r *http.Request) {
userId, err := userID(r)
if err != nil || userId == "" {
w.WriteHeader(400)
LogPrintf("signout: userid")
return
}
t := authTransport(userId)
if t == nil {
w.WriteHeader(500)
LogPrintf("signout: auth")
return
}
req, err := http.NewRequest("GET", fmt.Sprintf(revokeEndpointFmt, t.Token.RefreshToken), nil)
response, err := http.DefaultClient.Do(req)
if err != nil {
w.WriteHeader(500)
LogPrintf("signout: revoke")
return
}
defer response.Body.Close()
storeUserID(w, r, "")
deleteCredential(userId)
http.Redirect(w, r, fullUrl, http.StatusFound)
}
func sendImageCard(image string, text string, svc *mirror.Service) {
nt := &mirror.TimelineItem{
SpeakableText: text,
MenuItems: []*mirror.MenuItem{&mirror.MenuItem{Action: "READ_ALOUD"}, &mirror.MenuItem{Action: "DELETE"}},
Html: "<img src=\"attachment:0\" width=\"100%\" height=\"100%\">",
Notification: &mirror.NotificationConfig{Level: "DEFAULT"},
}
req := svc.Timeline.Insert(nt)
req.Media(strings.NewReader(image))
_, err := req.Do()
if err != nil {
LogPrintf("sendimage: insert")
return
}
}
func | (conn *picarus.Conn, svc *mirror.Service, trans *oauth.Transport, t *mirror.TimelineItem) ([]byte, error) {
a, err := svc.Timeline.Attachments.Get(t.Id, t.Attachments[0].Id).Do()
if err != nil {
LogPrintf("getattachment: metadata")
return nil, err
}
req, err := http.NewRequest("GET", a.ContentUrl, nil)
if err != nil {
LogPrintf("getattachment: http")
return nil, err
}
resp, err := trans.RoundTrip(req)
if err != nil {
LogPrintf("getattachment: content")
return nil, err
}
defer resp.Body.Close()
imageData, err := ioutil.ReadAll(resp.Body)
if err != nil {
LogPrintf("getattachment: body")
return nil, err
}
return imageData, nil
}
func notifyOpenGlass(conn *picarus.Conn, svc *mirror.Service, trans *oauth.Transport, t *mirror.TimelineItem, userId string) {
if !hasFlagSingle(userId, "flags", "user_openglass") {
LogPrintf("openglass: flag user_openglass")
return
}
var err error
flags, err := getUserFlags(userId, "uflags")
if err != nil {
LogPrintf("openglass: uflags")
return
}
if t.Attachments != nil && len(t.Attachments) > 0 {
imageData, err := getImageAttachment(conn, svc, trans, t)
if err != nil {
LogPrintf("openglass: attachment")
return
}
imageRow, err := PicarusApiImageUpload(conn, imageData)
if err != nil {
LogPrintf("openglass: picarus upload")
return
}
pushUserListTrim(userId, "images", imageRow, maxImages)
PicarusApiRowThumb(conn, imageRow)
if hasFlag(flags, "match_memento") {
mementoMatches, _, err := matchMementoImage(conn, imageRow, userId)
if err != nil {
LogPrintf("openglass: memento match")
} else {
for row, note := range mementoMatches {
m, err := conn.GetRow("images", row, []string{picarus.B64Dec(glassImageModel)})
if err != nil {
LogPrintf("openglass: memento get thumb")
continue
}
sendImageCard(m[picarus.B64Dec(glassImageModel)], note, svc)
}
}
}
if hasFlag(flags, "location") && hasFlag(flags, "location:streetview") {
//searchData, err := PicarusApiModel(conn, imageRow, picarus.B64Dec(locationModel))
}
if err != nil {
LogPrintf("openglass: image search")
}
// Warped image example
var imageWarped string
if hasFlag(flags, "warp") {
imageWarped, err := PicarusApiModel(conn, imageRow, picarus.B64Dec(homographyModel))
if err != nil {
LogPrintf("openglass: image warp")
imageWarped = ""
} else {
sendImageCard(imageWarped, "", svc)
}
}
// If there is a caption, send it to the annotation task
if len(t.Text) > 0 {
if hasFlag(flags, "crowdqa") {
imageType := "full"
if strings.HasPrefix(t.Text, "augmented ") {
if len(imageWarped) > 0 {
imageWarpedData := []byte(imageWarped)
imageRowWarped, err := PicarusApiImageUpload(conn, imageWarpedData)
PicarusApiRowThumb(conn, imageRowWarped)
if err != nil {
LogPrintf("openglass: warp image upload")
} else {
imageRow = imageRowWarped
imageData = imageWarpedData
imageType = "augmented"
}
}
t.Text = t.Text[10:] // Remove "augmented "
}
_, err = conn.PatchRow("images", imageRow, map[string]string{"meta:question": t.Text, "meta:openglass_user": userId,
"meta:openglass_image_type": imageType}, map[string][]byte{})
if err != nil {
LogPrintf("openglass: patch image")
return
}
// TODO: Here is where we would resize the image, we can do that later
_, err = conn.PostRow("jobs", annotationTask, map[string]string{"action": "io/annotation/sync"})
if err != nil {
LogPrintf("openglass: sync annotations")
return
}
}
} else {
if hasFlag(flags, "predict") {
confHTML := "<article><section><ul class=\"text-x-small\">"
menuItems := []*mirror.MenuItem{}
for modelName, modelRow := range predictionModels {
confMsgpack, err := PicarusApiModel(conn, imageRow, picarus.B64Dec(modelRow))
if err != nil {
LogPrintf("openglass: predict")
return
}
var value float64
err = msgpack.Unmarshal([]byte(confMsgpack), &value, nil)
if err != nil {
LogPrintf("openglass: predict msgpack")
return
}
confHTML = confHTML + fmt.Sprintf("<li>%s: %f</li>", modelName, value)
menuItems = append(menuItems, &mirror.MenuItem{Action: "CUSTOM", Id: modelName + " 1", Values: []*mirror.MenuValue{&mirror.MenuValue{DisplayName: modelName, IconUrl: fullUrl + "/static/icon_plus.png | getImageAttachment | identifier_name |
snapshots.go | "Content upload reference to use",
},
cli.BoolFlag{
Name: "keep",
Usage: "Keep diff content. up to creator to delete it.",
},
}, commands.LabelFlag),
Action: func(context *cli.Context) error {
var (
idA = context.Args().First()
idB = context.Args().Get(1)
)
if idA == "" {
return errors.New("snapshot id must be provided")
}
client, ctx, cancel, err := commands.NewClient(context)
if err != nil {
return err
}
defer cancel()
ctx, done, err := client.WithLease(ctx)
if err != nil {
return err
}
defer done(ctx)
var desc ocispec.Descriptor
labels := commands.LabelArgs(context.StringSlice("label"))
snapshotter := client.SnapshotService(context.GlobalString("snapshotter"))
if context.Bool("keep") {
labels["containerd.io/gc.root"] = time.Now().UTC().Format(time.RFC3339)
}
opts := []diff.Opt{
diff.WithMediaType(context.String("media-type")),
diff.WithReference(context.String("ref")),
diff.WithLabels(labels),
}
// SOURCE_DATE_EPOCH is propagated via the ctx, so no need to specify diff.WithSourceDateEpoch here
if idB == "" {
desc, err = rootfs.CreateDiff(ctx, idA, snapshotter, client.DiffService(), opts...)
if err != nil {
return err
}
} else {
desc, err = withMounts(ctx, idA, snapshotter, func(a []mount.Mount) (ocispec.Descriptor, error) {
return withMounts(ctx, idB, snapshotter, func(b []mount.Mount) (ocispec.Descriptor, error) {
return client.DiffService().Compare(ctx, a, b, opts...)
})
})
if err != nil {
return err
}
}
ra, err := client.ContentStore().ReaderAt(ctx, desc)
if err != nil {
return err
}
defer ra.Close()
_, err = io.Copy(os.Stdout, content.NewReader(ra))
return err
},
}
func withMounts(ctx gocontext.Context, id string, sn snapshots.Snapshotter, f func(mounts []mount.Mount) (ocispec.Descriptor, error)) (ocispec.Descriptor, error) {
var mounts []mount.Mount
info, err := sn.Stat(ctx, id)
if err != nil {
return ocispec.Descriptor{}, err
}
if info.Kind == snapshots.KindActive {
mounts, err = sn.Mounts(ctx, id)
if err != nil {
return ocispec.Descriptor{}, err
}
} else {
key := fmt.Sprintf("%s-view-key", id)
mounts, err = sn.View(ctx, key, id)
if err != nil {
return ocispec.Descriptor{}, err
}
defer sn.Remove(ctx, key)
}
return f(mounts)
}
var usageCommand = cli.Command{
Name: "usage",
Usage: "Usage snapshots",
ArgsUsage: "[flags] [<key>, ...]",
Flags: []cli.Flag{
cli.BoolFlag{
Name: "b",
Usage: "Display size in bytes",
},
},
Action: func(context *cli.Context) error {
var displaySize func(int64) string
if context.Bool("b") {
displaySize = func(s int64) string {
return strconv.FormatInt(s, 10)
}
} else {
displaySize = func(s int64) string {
return progress.Bytes(s).String()
}
}
client, ctx, cancel, err := commands.NewClient(context)
if err != nil {
return err
}
defer cancel()
var (
snapshotter = client.SnapshotService(context.GlobalString("snapshotter"))
tw = tabwriter.NewWriter(os.Stdout, 1, 8, 1, ' ', 0)
)
fmt.Fprintln(tw, "KEY\tSIZE\tINODES\t")
if context.NArg() == 0 {
if err := snapshotter.Walk(ctx, func(ctx gocontext.Context, info snapshots.Info) error {
usage, err := snapshotter.Usage(ctx, info.Name)
if err != nil {
return err
}
fmt.Fprintf(tw, "%v\t%s\t%d\t\n", info.Name, displaySize(usage.Size), usage.Inodes)
return nil
}); err != nil {
return err
}
} else {
for _, id := range context.Args() {
usage, err := snapshotter.Usage(ctx, id)
if err != nil {
return err
}
fmt.Fprintf(tw, "%v\t%s\t%d\t\n", id, displaySize(usage.Size), usage.Inodes)
}
}
return tw.Flush()
},
}
var removeCommand = cli.Command{
Name: "delete",
Aliases: []string{"del", "remove", "rm"},
ArgsUsage: "<key> [<key>, ...]",
Usage: "Remove snapshots",
Action: func(context *cli.Context) error {
client, ctx, cancel, err := commands.NewClient(context)
if err != nil {
return err
}
defer cancel()
snapshotter := client.SnapshotService(context.GlobalString("snapshotter"))
for _, key := range context.Args() {
err = snapshotter.Remove(ctx, key)
if err != nil {
return fmt.Errorf("failed to remove %q: %w", key, err)
}
}
return nil
},
}
var prepareCommand = cli.Command{
Name: "prepare",
Usage: "Prepare a snapshot from a committed snapshot", | Flags: []cli.Flag{
cli.StringFlag{
Name: "target, t",
Usage: "Mount target path, will print mount, if provided",
},
cli.BoolFlag{
Name: "mounts",
Usage: "Print out snapshot mounts as JSON",
},
},
Action: func(context *cli.Context) error {
if narg := context.NArg(); narg < 1 || narg > 2 {
return cli.ShowSubcommandHelp(context)
}
var (
target = context.String("target")
key = context.Args().Get(0)
parent = context.Args().Get(1)
)
client, ctx, cancel, err := commands.NewClient(context)
if err != nil {
return err
}
defer cancel()
snapshotter := client.SnapshotService(context.GlobalString("snapshotter"))
labels := map[string]string{
"containerd.io/gc.root": time.Now().UTC().Format(time.RFC3339),
}
mounts, err := snapshotter.Prepare(ctx, key, parent, snapshots.WithLabels(labels))
if err != nil {
return err
}
if target != "" {
printMounts(target, mounts)
}
if context.Bool("mounts") {
commands.PrintAsJSON(mounts)
}
return nil
},
}
var viewCommand = cli.Command{
Name: "view",
Usage: "Create a read-only snapshot from a committed snapshot",
ArgsUsage: "[flags] <key> [<parent>]",
Flags: []cli.Flag{
cli.StringFlag{
Name: "target, t",
Usage: "Mount target path, will print mount, if provided",
},
cli.BoolFlag{
Name: "mounts",
Usage: "Print out snapshot mounts as JSON",
},
},
Action: func(context *cli.Context) error {
if narg := context.NArg(); narg < 1 || narg > 2 {
return cli.ShowSubcommandHelp(context)
}
var (
target = context.String("target")
key = context.Args().Get(0)
parent = context.Args().Get(1)
)
client, ctx, cancel, err := commands.NewClient(context)
if err != nil {
return err
}
defer cancel()
snapshotter := client.SnapshotService(context.GlobalString("snapshotter"))
mounts, err := snapshotter.View(ctx, key, parent)
if err != nil {
return err
}
if target != "" {
printMounts(target, mounts)
}
if context.Bool("mounts") {
commands.PrintAsJSON(mounts)
}
return nil
},
}
var mountCommand = cli.Command{
Name: "mounts",
Aliases: []string{"m", "mount"},
Usage: "Mount gets mount commands for the snapshots",
ArgsUsage: "<target> <key>",
Action: func(context *cli.Context) error {
if context.NArg() != 2 {
return cli.ShowSubcommandHelp(context)
}
var (
target = context.Args().Get(0)
key | ArgsUsage: "[flags] <key> [<parent>]", | random_line_split |
snapshots.go | "target, t",
Usage: "Mount target path, will print mount, if provided",
},
cli.BoolFlag{
Name: "mounts",
Usage: "Print out snapshot mounts as JSON",
},
},
Action: func(context *cli.Context) error {
if narg := context.NArg(); narg < 1 || narg > 2 {
return cli.ShowSubcommandHelp(context)
}
var (
target = context.String("target")
key = context.Args().Get(0)
parent = context.Args().Get(1)
)
client, ctx, cancel, err := commands.NewClient(context)
if err != nil {
return err
}
defer cancel()
snapshotter := client.SnapshotService(context.GlobalString("snapshotter"))
mounts, err := snapshotter.View(ctx, key, parent)
if err != nil {
return err
}
if target != "" {
printMounts(target, mounts)
}
if context.Bool("mounts") {
commands.PrintAsJSON(mounts)
}
return nil
},
}
var mountCommand = cli.Command{
Name: "mounts",
Aliases: []string{"m", "mount"},
Usage: "Mount gets mount commands for the snapshots",
ArgsUsage: "<target> <key>",
Action: func(context *cli.Context) error {
if context.NArg() != 2 {
return cli.ShowSubcommandHelp(context)
}
var (
target = context.Args().Get(0)
key = context.Args().Get(1)
)
client, ctx, cancel, err := commands.NewClient(context)
if err != nil {
return err
}
defer cancel()
snapshotter := client.SnapshotService(context.GlobalString("snapshotter"))
mounts, err := snapshotter.Mounts(ctx, key)
if err != nil {
return err
}
printMounts(target, mounts)
return nil
},
}
var commitCommand = cli.Command{
Name: "commit",
Usage: "Commit an active snapshot into the provided name",
ArgsUsage: "<key> <active>",
Action: func(context *cli.Context) error {
if context.NArg() != 2 {
return cli.ShowSubcommandHelp(context)
}
var (
key = context.Args().Get(0)
active = context.Args().Get(1)
)
client, ctx, cancel, err := commands.NewClient(context)
if err != nil {
return err
}
defer cancel()
snapshotter := client.SnapshotService(context.GlobalString("snapshotter"))
labels := map[string]string{
"containerd.io/gc.root": time.Now().UTC().Format(time.RFC3339),
}
return snapshotter.Commit(ctx, key, active, snapshots.WithLabels(labels))
},
}
var treeCommand = cli.Command{
Name: "tree",
Usage: "Display tree view of snapshot branches",
Action: func(context *cli.Context) error {
client, ctx, cancel, err := commands.NewClient(context)
if err != nil {
return err
}
defer cancel()
var (
snapshotter = client.SnapshotService(context.GlobalString("snapshotter"))
tree = newSnapshotTree()
)
if err := snapshotter.Walk(ctx, func(ctx gocontext.Context, info snapshots.Info) error {
// Get or create node and add node details
tree.add(info)
return nil
}); err != nil {
return err
}
printTree(tree)
return nil
},
}
var infoCommand = cli.Command{
Name: "info",
Usage: "Get info about a snapshot",
ArgsUsage: "<key>",
Action: func(context *cli.Context) error {
if context.NArg() != 1 {
return cli.ShowSubcommandHelp(context)
}
key := context.Args().Get(0)
client, ctx, cancel, err := commands.NewClient(context)
if err != nil {
return err
}
defer cancel()
snapshotter := client.SnapshotService(context.GlobalString("snapshotter"))
info, err := snapshotter.Stat(ctx, key)
if err != nil {
return err
}
commands.PrintAsJSON(info)
return nil
},
}
var setLabelCommand = cli.Command{
Name: "label",
Usage: "Add labels to content",
ArgsUsage: "<name> [<label>=<value> ...]",
Description: "labels snapshots in the snapshotter",
Action: func(context *cli.Context) error {
key, labels := commands.ObjectWithLabelArgs(context)
client, ctx, cancel, err := commands.NewClient(context)
if err != nil {
return err
}
defer cancel()
snapshotter := client.SnapshotService(context.GlobalString("snapshotter"))
info := snapshots.Info{
Name: key,
Labels: map[string]string{},
}
var paths []string
for k, v := range labels {
paths = append(paths, fmt.Sprintf("labels.%s", k))
if v != "" {
info.Labels[k] = v
}
}
// Nothing updated, do no clear
if len(paths) == 0 {
info, err = snapshotter.Stat(ctx, info.Name)
} else {
info, err = snapshotter.Update(ctx, info, paths...)
}
if err != nil {
return err
}
var labelStrings []string
for k, v := range info.Labels {
labelStrings = append(labelStrings, fmt.Sprintf("%s=%s", k, v))
}
fmt.Println(strings.Join(labelStrings, ","))
return nil
},
}
var unpackCommand = cli.Command{
Name: "unpack",
Usage: "Unpack applies layers from a manifest to a snapshot",
ArgsUsage: "[flags] <digest>",
Flags: commands.SnapshotterFlags,
Action: func(context *cli.Context) error {
dgst, err := digest.Parse(context.Args().First())
if err != nil {
return err
}
client, ctx, cancel, err := commands.NewClient(context)
if err != nil {
return err
}
defer cancel()
log.G(ctx).Debugf("unpacking layers from manifest %s", dgst.String())
// TODO: Support unpack by name
images, err := client.ListImages(ctx)
if err != nil {
return err
}
var unpacked bool
for _, image := range images {
if image.Target().Digest == dgst {
fmt.Printf("unpacking %s (%s)...", dgst, image.Target().MediaType)
if err := image.Unpack(ctx, context.String("snapshotter")); err != nil {
fmt.Println()
return err
}
fmt.Println("done")
unpacked = true
break
}
}
if !unpacked {
return errors.New("manifest not found")
}
// TODO: Get rootfs from Image
//log.G(ctx).Infof("chain ID: %s", chainID.String())
return nil
},
}
type snapshotTree struct {
nodes []*snapshotTreeNode
index map[string]*snapshotTreeNode
}
func newSnapshotTree() *snapshotTree {
return &snapshotTree{
index: make(map[string]*snapshotTreeNode),
}
}
type snapshotTreeNode struct {
info snapshots.Info
children []string
}
func (st *snapshotTree) add(info snapshots.Info) *snapshotTreeNode {
entry, ok := st.index[info.Name]
if !ok {
entry = &snapshotTreeNode{info: info}
st.nodes = append(st.nodes, entry)
st.index[info.Name] = entry
} else {
entry.info = info // update info if we created placeholder
}
if info.Parent != "" {
pn := st.get(info.Parent)
if pn == nil {
// create a placeholder
pn = st.add(snapshots.Info{Name: info.Parent})
}
pn.children = append(pn.children, info.Name)
}
return entry
}
func (st *snapshotTree) get(name string) *snapshotTreeNode {
return st.index[name]
}
func printTree(st *snapshotTree) {
for _, node := range st.nodes {
// Print for root(parent-less) nodes only
if node.info.Parent == "" {
printNode(node.info.Name, st, 0)
}
}
}
func printNode(name string, tree *snapshotTree, level int) {
node := tree.index[name]
prefix := strings.Repeat(" ", level)
if level > 0 {
prefix += "\\_"
}
fmt.Printf(prefix+" %s\n", node.info.Name)
level++
for _, child := range node.children {
printNode(child, tree, level)
}
}
func printMounts(target string, mounts []mount.Mount) | {
// FIXME: This is specific to Unix
for _, m := range mounts {
fmt.Printf("mount -t %s %s %s -o %s\n", m.Type, m.Source, filepath.Join(target, m.Target), strings.Join(m.Options, ","))
}
} | identifier_body | |
snapshots.go | )
}
return nil
},
}
var viewCommand = cli.Command{
Name: "view",
Usage: "Create a read-only snapshot from a committed snapshot",
ArgsUsage: "[flags] <key> [<parent>]",
Flags: []cli.Flag{
cli.StringFlag{
Name: "target, t",
Usage: "Mount target path, will print mount, if provided",
},
cli.BoolFlag{
Name: "mounts",
Usage: "Print out snapshot mounts as JSON",
},
},
Action: func(context *cli.Context) error {
if narg := context.NArg(); narg < 1 || narg > 2 {
return cli.ShowSubcommandHelp(context)
}
var (
target = context.String("target")
key = context.Args().Get(0)
parent = context.Args().Get(1)
)
client, ctx, cancel, err := commands.NewClient(context)
if err != nil {
return err
}
defer cancel()
snapshotter := client.SnapshotService(context.GlobalString("snapshotter"))
mounts, err := snapshotter.View(ctx, key, parent)
if err != nil {
return err
}
if target != "" {
printMounts(target, mounts)
}
if context.Bool("mounts") {
commands.PrintAsJSON(mounts)
}
return nil
},
}
var mountCommand = cli.Command{
Name: "mounts",
Aliases: []string{"m", "mount"},
Usage: "Mount gets mount commands for the snapshots",
ArgsUsage: "<target> <key>",
Action: func(context *cli.Context) error {
if context.NArg() != 2 {
return cli.ShowSubcommandHelp(context)
}
var (
target = context.Args().Get(0)
key = context.Args().Get(1)
)
client, ctx, cancel, err := commands.NewClient(context)
if err != nil {
return err
}
defer cancel()
snapshotter := client.SnapshotService(context.GlobalString("snapshotter"))
mounts, err := snapshotter.Mounts(ctx, key)
if err != nil {
return err
}
printMounts(target, mounts)
return nil
},
}
var commitCommand = cli.Command{
Name: "commit",
Usage: "Commit an active snapshot into the provided name",
ArgsUsage: "<key> <active>",
Action: func(context *cli.Context) error {
if context.NArg() != 2 {
return cli.ShowSubcommandHelp(context)
}
var (
key = context.Args().Get(0)
active = context.Args().Get(1)
)
client, ctx, cancel, err := commands.NewClient(context)
if err != nil {
return err
}
defer cancel()
snapshotter := client.SnapshotService(context.GlobalString("snapshotter"))
labels := map[string]string{
"containerd.io/gc.root": time.Now().UTC().Format(time.RFC3339),
}
return snapshotter.Commit(ctx, key, active, snapshots.WithLabels(labels))
},
}
var treeCommand = cli.Command{
Name: "tree",
Usage: "Display tree view of snapshot branches",
Action: func(context *cli.Context) error {
client, ctx, cancel, err := commands.NewClient(context)
if err != nil {
return err
}
defer cancel()
var (
snapshotter = client.SnapshotService(context.GlobalString("snapshotter"))
tree = newSnapshotTree()
)
if err := snapshotter.Walk(ctx, func(ctx gocontext.Context, info snapshots.Info) error {
// Get or create node and add node details
tree.add(info)
return nil
}); err != nil {
return err
}
printTree(tree)
return nil
},
}
var infoCommand = cli.Command{
Name: "info",
Usage: "Get info about a snapshot",
ArgsUsage: "<key>",
Action: func(context *cli.Context) error {
if context.NArg() != 1 {
return cli.ShowSubcommandHelp(context)
}
key := context.Args().Get(0)
client, ctx, cancel, err := commands.NewClient(context)
if err != nil {
return err
}
defer cancel()
snapshotter := client.SnapshotService(context.GlobalString("snapshotter"))
info, err := snapshotter.Stat(ctx, key)
if err != nil {
return err
}
commands.PrintAsJSON(info)
return nil
},
}
var setLabelCommand = cli.Command{
Name: "label",
Usage: "Add labels to content",
ArgsUsage: "<name> [<label>=<value> ...]",
Description: "labels snapshots in the snapshotter",
Action: func(context *cli.Context) error {
key, labels := commands.ObjectWithLabelArgs(context)
client, ctx, cancel, err := commands.NewClient(context)
if err != nil {
return err
}
defer cancel()
snapshotter := client.SnapshotService(context.GlobalString("snapshotter"))
info := snapshots.Info{
Name: key,
Labels: map[string]string{},
}
var paths []string
for k, v := range labels {
paths = append(paths, fmt.Sprintf("labels.%s", k))
if v != "" {
info.Labels[k] = v
}
}
// Nothing updated, do no clear
if len(paths) == 0 {
info, err = snapshotter.Stat(ctx, info.Name)
} else {
info, err = snapshotter.Update(ctx, info, paths...)
}
if err != nil {
return err
}
var labelStrings []string
for k, v := range info.Labels {
labelStrings = append(labelStrings, fmt.Sprintf("%s=%s", k, v))
}
fmt.Println(strings.Join(labelStrings, ","))
return nil
},
}
var unpackCommand = cli.Command{
Name: "unpack",
Usage: "Unpack applies layers from a manifest to a snapshot",
ArgsUsage: "[flags] <digest>",
Flags: commands.SnapshotterFlags,
Action: func(context *cli.Context) error {
dgst, err := digest.Parse(context.Args().First())
if err != nil {
return err
}
client, ctx, cancel, err := commands.NewClient(context)
if err != nil {
return err
}
defer cancel()
log.G(ctx).Debugf("unpacking layers from manifest %s", dgst.String())
// TODO: Support unpack by name
images, err := client.ListImages(ctx)
if err != nil {
return err
}
var unpacked bool
for _, image := range images {
if image.Target().Digest == dgst {
fmt.Printf("unpacking %s (%s)...", dgst, image.Target().MediaType)
if err := image.Unpack(ctx, context.String("snapshotter")); err != nil {
fmt.Println()
return err
}
fmt.Println("done")
unpacked = true
break
}
}
if !unpacked {
return errors.New("manifest not found")
}
// TODO: Get rootfs from Image
//log.G(ctx).Infof("chain ID: %s", chainID.String())
return nil
},
}
type snapshotTree struct {
nodes []*snapshotTreeNode
index map[string]*snapshotTreeNode
}
func newSnapshotTree() *snapshotTree {
return &snapshotTree{
index: make(map[string]*snapshotTreeNode),
}
}
type snapshotTreeNode struct {
info snapshots.Info
children []string
}
func (st *snapshotTree) add(info snapshots.Info) *snapshotTreeNode {
entry, ok := st.index[info.Name]
if !ok {
entry = &snapshotTreeNode{info: info}
st.nodes = append(st.nodes, entry)
st.index[info.Name] = entry
} else {
entry.info = info // update info if we created placeholder
}
if info.Parent != "" {
pn := st.get(info.Parent)
if pn == nil {
// create a placeholder
pn = st.add(snapshots.Info{Name: info.Parent})
}
pn.children = append(pn.children, info.Name)
}
return entry
}
func (st *snapshotTree) get(name string) *snapshotTreeNode {
return st.index[name]
}
func printTree(st *snapshotTree) {
for _, node := range st.nodes {
// Print for root(parent-less) nodes only
if node.info.Parent == "" {
printNode(node.info.Name, st, 0)
}
}
}
func printNode(name string, tree *snapshotTree, level int) {
node := tree.index[name]
prefix := strings.Repeat(" ", level)
if level > 0 {
prefix += "\\_"
}
fmt.Printf(prefix+" %s\n", node.info.Name)
level++
for _, child := range node.children {
printNode(child, tree, level)
}
}
func | printMounts | identifier_name | |
snapshots.go | (ctx, id)
if err != nil {
return ocispec.Descriptor{}, err
}
if info.Kind == snapshots.KindActive {
mounts, err = sn.Mounts(ctx, id)
if err != nil {
return ocispec.Descriptor{}, err
}
} else {
key := fmt.Sprintf("%s-view-key", id)
mounts, err = sn.View(ctx, key, id)
if err != nil {
return ocispec.Descriptor{}, err
}
defer sn.Remove(ctx, key)
}
return f(mounts)
}
var usageCommand = cli.Command{
Name: "usage",
Usage: "Usage snapshots",
ArgsUsage: "[flags] [<key>, ...]",
Flags: []cli.Flag{
cli.BoolFlag{
Name: "b",
Usage: "Display size in bytes",
},
},
Action: func(context *cli.Context) error {
var displaySize func(int64) string
if context.Bool("b") {
displaySize = func(s int64) string {
return strconv.FormatInt(s, 10)
}
} else {
displaySize = func(s int64) string {
return progress.Bytes(s).String()
}
}
client, ctx, cancel, err := commands.NewClient(context)
if err != nil {
return err
}
defer cancel()
var (
snapshotter = client.SnapshotService(context.GlobalString("snapshotter"))
tw = tabwriter.NewWriter(os.Stdout, 1, 8, 1, ' ', 0)
)
fmt.Fprintln(tw, "KEY\tSIZE\tINODES\t")
if context.NArg() == 0 {
if err := snapshotter.Walk(ctx, func(ctx gocontext.Context, info snapshots.Info) error {
usage, err := snapshotter.Usage(ctx, info.Name)
if err != nil {
return err
}
fmt.Fprintf(tw, "%v\t%s\t%d\t\n", info.Name, displaySize(usage.Size), usage.Inodes)
return nil
}); err != nil {
return err
}
} else {
for _, id := range context.Args() {
usage, err := snapshotter.Usage(ctx, id)
if err != nil {
return err
}
fmt.Fprintf(tw, "%v\t%s\t%d\t\n", id, displaySize(usage.Size), usage.Inodes)
}
}
return tw.Flush()
},
}
var removeCommand = cli.Command{
Name: "delete",
Aliases: []string{"del", "remove", "rm"},
ArgsUsage: "<key> [<key>, ...]",
Usage: "Remove snapshots",
Action: func(context *cli.Context) error {
client, ctx, cancel, err := commands.NewClient(context)
if err != nil {
return err
}
defer cancel()
snapshotter := client.SnapshotService(context.GlobalString("snapshotter"))
for _, key := range context.Args() {
err = snapshotter.Remove(ctx, key)
if err != nil {
return fmt.Errorf("failed to remove %q: %w", key, err)
}
}
return nil
},
}
var prepareCommand = cli.Command{
Name: "prepare",
Usage: "Prepare a snapshot from a committed snapshot",
ArgsUsage: "[flags] <key> [<parent>]",
Flags: []cli.Flag{
cli.StringFlag{
Name: "target, t",
Usage: "Mount target path, will print mount, if provided",
},
cli.BoolFlag{
Name: "mounts",
Usage: "Print out snapshot mounts as JSON",
},
},
Action: func(context *cli.Context) error {
if narg := context.NArg(); narg < 1 || narg > 2 {
return cli.ShowSubcommandHelp(context)
}
var (
target = context.String("target")
key = context.Args().Get(0)
parent = context.Args().Get(1)
)
client, ctx, cancel, err := commands.NewClient(context)
if err != nil {
return err
}
defer cancel()
snapshotter := client.SnapshotService(context.GlobalString("snapshotter"))
labels := map[string]string{
"containerd.io/gc.root": time.Now().UTC().Format(time.RFC3339),
}
mounts, err := snapshotter.Prepare(ctx, key, parent, snapshots.WithLabels(labels))
if err != nil {
return err
}
if target != "" {
printMounts(target, mounts)
}
if context.Bool("mounts") {
commands.PrintAsJSON(mounts)
}
return nil
},
}
var viewCommand = cli.Command{
Name: "view",
Usage: "Create a read-only snapshot from a committed snapshot",
ArgsUsage: "[flags] <key> [<parent>]",
Flags: []cli.Flag{
cli.StringFlag{
Name: "target, t",
Usage: "Mount target path, will print mount, if provided",
},
cli.BoolFlag{
Name: "mounts",
Usage: "Print out snapshot mounts as JSON",
},
},
Action: func(context *cli.Context) error {
if narg := context.NArg(); narg < 1 || narg > 2 {
return cli.ShowSubcommandHelp(context)
}
var (
target = context.String("target")
key = context.Args().Get(0)
parent = context.Args().Get(1)
)
client, ctx, cancel, err := commands.NewClient(context)
if err != nil {
return err
}
defer cancel()
snapshotter := client.SnapshotService(context.GlobalString("snapshotter"))
mounts, err := snapshotter.View(ctx, key, parent)
if err != nil {
return err
}
if target != "" {
printMounts(target, mounts)
}
if context.Bool("mounts") {
commands.PrintAsJSON(mounts)
}
return nil
},
}
var mountCommand = cli.Command{
Name: "mounts",
Aliases: []string{"m", "mount"},
Usage: "Mount gets mount commands for the snapshots",
ArgsUsage: "<target> <key>",
Action: func(context *cli.Context) error {
if context.NArg() != 2 {
return cli.ShowSubcommandHelp(context)
}
var (
target = context.Args().Get(0)
key = context.Args().Get(1)
)
client, ctx, cancel, err := commands.NewClient(context)
if err != nil {
return err
}
defer cancel()
snapshotter := client.SnapshotService(context.GlobalString("snapshotter"))
mounts, err := snapshotter.Mounts(ctx, key)
if err != nil {
return err
}
printMounts(target, mounts)
return nil
},
}
var commitCommand = cli.Command{
Name: "commit",
Usage: "Commit an active snapshot into the provided name",
ArgsUsage: "<key> <active>",
Action: func(context *cli.Context) error {
if context.NArg() != 2 {
return cli.ShowSubcommandHelp(context)
}
var (
key = context.Args().Get(0)
active = context.Args().Get(1)
)
client, ctx, cancel, err := commands.NewClient(context)
if err != nil {
return err
}
defer cancel()
snapshotter := client.SnapshotService(context.GlobalString("snapshotter"))
labels := map[string]string{
"containerd.io/gc.root": time.Now().UTC().Format(time.RFC3339),
}
return snapshotter.Commit(ctx, key, active, snapshots.WithLabels(labels))
},
}
var treeCommand = cli.Command{
Name: "tree",
Usage: "Display tree view of snapshot branches",
Action: func(context *cli.Context) error {
client, ctx, cancel, err := commands.NewClient(context)
if err != nil {
return err
}
defer cancel()
var (
snapshotter = client.SnapshotService(context.GlobalString("snapshotter"))
tree = newSnapshotTree()
)
if err := snapshotter.Walk(ctx, func(ctx gocontext.Context, info snapshots.Info) error {
// Get or create node and add node details
tree.add(info)
return nil
}); err != nil {
return err
}
printTree(tree)
return nil
},
}
var infoCommand = cli.Command{
Name: "info",
Usage: "Get info about a snapshot",
ArgsUsage: "<key>",
Action: func(context *cli.Context) error {
if context.NArg() != 1 {
return cli.ShowSubcommandHelp(context)
}
key := context.Args().Get(0)
client, ctx, cancel, err := commands.NewClient(context)
if err != nil | {
return err
} | conditional_block | |
server.py | shake( | v):
key = base64.b64encode(hashlib.sha1(v + '258EAFA5-E914-47DA-95CA-C5AB0DC85B11').digest())
response = 'HTTP/1.1 101 Switching Protocols\r\n' \
'Upgrade: websocket\r\n' \
'Connection: Upgrade\r\n' \
'Sec-WebSocket-Accept:' + key + '\r\n\r\n'
conn.send(response)
self.socket_list.add(conn)
# 超时时长, 文件名, 缓存大小
self.session[conn.fileno()] = dict(buffer='', length=0, no=0)
self.ws_send(conn, 'init')
def ws_process(self, conn, size=1024*1024):
data = conn.recv(size)
sesskey = conn.fileno()
if sesskey not in self.session or 'buffer' not in self.session[sesskey]:
self.ws_send(conn, 'session error!')
if conn in self.socket_list:
self.socket_list.remove(conn)
conn.close()
return
self.session[sesskey]['buffer'] += data
# 可能关闭连接,销毁session
while sesskey in self.session and self.session[sesskey]['buffer']:
if self.session[sesskey]['length'] == 0:
b = self.session[sesskey]['buffer']
if len(b) < 14:
break
len_flag = ord(b[1]) & 127 # 数据长度
if len_flag == 126:
self.session[sesskey]['length'] = ord(b[2]) * 256 + ord(b[3]) + 8
elif len_flag == 127:
self.session[sesskey]['length'] = reduce(lambda y, z: y * 256 + z, map(lambda x: ord(x), b[2:9])) + 14
else:
self.session[sesskey]['length'] = len_flag + 6
# logging.info("length %d, buffer %d" % (self.session[sesskey]['length'], len(self.session[sesskey]['buffer'])))
if self.session[sesskey]['length'] <= len(self.session[sesskey]['buffer']) \
and self.session[sesskey]['length'] != 0:
# 处理完整包
pack_data = self.session[sesskey]['buffer'][:self.session[sesskey]['length']]
if len(self.session[sesskey]['buffer']) > self.session[sesskey]['length']:
self.session[sesskey]['buffer'] = self.session[sesskey]['buffer'][self.session[sesskey]['length']:]
else:
self.session[sesskey]['buffer'] = ''
self.session[sesskey]['length'] = 0
self.package_process(conn, pack_data)
else:
break
def package_process(self, conn, data):
# logging.info(data)
FIN = ord(data[0]) & 128 # 结束位
Opcode = ord(data[0]) & 112 # 操作码
is_mask = ord(data[1]) & 128 # 是否加掩码
len_flag = ord(data[1]) & 127 # 数据长度
if len_flag == 126:
mask = data[4:8]
length = ord(data[2]) * 256 + ord(data[3])
raw = data[8:]
elif len_flag == 127:
mask = data[10:14]
raw = data[14:]
length = reduce(lambda y, z: y * 256 + z, map(lambda x: ord(x), data[2:9]))
else:
mask = data[2:6]
raw = data[6:]
length = len_flag
ret = ''
for cnt, d in enumerate(raw):
ret += chr(ord(d) ^ ord(mask[cnt % 4]))
if not ret:
pass
# logging.debug("frame info FIN %d Opcode %d mask %d length %d " % (FIN, Opcode, is_mask, length))
# hexstr = binascii.b2a_hex(data)
# bsstr = bin(int(hexstr, 16))[2:]
# logging.debug(bsstr)
sesskey = conn.fileno()
session = self.session[sesskey]
if not ret or ret is False:
# if conn in self.socket_list:
# self.socket_list.remove(conn)
logging.info("ignore empty msg")
self.ws_send(conn, 'empty:%d' % session['no'])
# conn.close()
return
try:
# logging.info(ret[:10])
msg = self.params_data(ret)
except Exception, e:
# logging.exception(e)
logging.debug("error:%d" % session['no'])
self.ws_send(conn, "error:%d" % session['no'])
return
if "a" in msg:
if msg['a'] == 'init':
self.session[sesskey]['name'] = msg['name']
# self.ws_send(conn, 'ok:0')
self.session[sesskey]['filebuffer'] = []
self.session[sesskey]['no'] = 0
self.session[sesskey]['file'] = open(
os.path.join(os.path.dirname(__file__), 'upload', msg['name']), 'ab')
elif msg['a'] == 'ping':
self.ws_send(conn, "ok:%d" % (self.session[sesskey]['no']))
elif msg['a'] == 'f':
logging.info('a %s s %d e %d n %d' % (msg['a'], msg['s'], msg['e'], msg['n']))
start, end = msg['s'], msg['e']
length = end - start
if msg['n'] != session['no']:
if msg['n'] < session['no']:
logging.info('already msg %d' % msg['n'])
self.ws_send(conn, 'already:%d' % msg['n'])
else:
logging.info("ignore msg %d %d" % (msg['n'], session['no']))
self.ws_send(conn, "retry:%d" % (session['no']))
elif length != len(msg['d']):
logging.info("error length msg %d %d" % (length, len(msg['d'])))
self.ws_send(conn, "retry:%d" % (msg['n']))
else:
self.session[sesskey]['filebuffer'].append(msg['d'])
self.session[sesskey]['no'] += 1
# logging.info('ok msg %d' % msg['n'])
# 每1M写入一次
if len(session['filebuffer']) > 128:
for i in session['filebuffer']:
self.session[sesskey]['file'].write(i)
self.session[sesskey]['filebuffer'] = []
self.ws_send(conn, "ok:%d" % (msg['n']))
elif msg['a'] == 'over':
for i in session['filebuffer']:
self.session[sesskey]['file'].write(i)
self.session[sesskey]['filebuffer'] = []
self.session[sesskey]['file'].close()
logging.info("over")
self.ws_send(conn, "over")
elif msg['a'] == 'check':
logging.info("check file md5 : %s" % msg['hash'])
with open(os.path.join(os.path.dirname(__file__), 'upload', session['name']), 'rb') as f:
md5 = md5_for_file(f)
logging.info(md5)
self.ws_send(conn, "check:%s" % md5)
elif msg['a'] == 'closed':
logging.info("closed")
self.ws_close(conn)
@staticmethod
def ws_send(conn, data):
head = '\x81'
if len(data) < 126:
head += struct.pack('B', len(data))
elif len(data) <= 0xFFFF:
head += struct.pack('!BH', 126, len(data))
else:
head += struct.pack('!BQ', 127, len(data))
conn.send(head + data)
def ws_close(self, conn):
msg = '\x88\x00'
conn.send(msg)
fileno = conn.fileno()
logging.info("close conn %d" % fileno)
if fileno in self.session:
self.session.pop(fileno)
if conn in self.socket_list:
self.socket_list.remove(conn)
def protocol(self, conn):
data = conn.recv(8192)
is_ws = False
query = data.split('\r\n\r\n')[0].split('\r\n')
head = query[0].split(' ')
path = '/'
if len(head) > 2:
path = head[1]
logging.info(path)
for line in query[1:]:
k, v = line.split(': ')
# 带key头,为ws连接
| self, conn, | identifier_name |
server.py | shake(self, conn, v):
key = base64.b64encode(hashlib.sha1(v + '258EAFA5-E914-47DA-95CA-C5AB0DC85B11').digest())
response = 'HTTP/1.1 101 Switching Protocols\r\n' \
'Upgrade: websocket\r\n' \
'Connection: Upgrade\r\n' \
'Sec-WebSocket-Accept:' + key + '\r\n\r\n'
conn.send(response)
self.socket_list.add(conn)
# 超时时长, 文件名, 缓存大小
self.session[conn.fileno()] = dict(buffer='', length=0, no=0)
self.ws_send(conn, 'init')
def ws_process(self, conn, size=1024*1024):
data = conn.recv(size)
sesskey = conn.fileno()
if sesskey not in self.session or 'buffer' not in self.session[sesskey]:
self.ws_send(conn, 'session error!')
if conn in self.socket_list:
self.socket_list.remove(conn)
conn.close()
return
self.session[sesskey]['buffer'] += data
# 可能关闭连接,销毁session
while sesskey in self.session and self.session[sesskey]['buffer']:
if self.session[sesskey]['length'] == 0:
b = self.session[sesskey]['buffer']
if len(b) < 14:
break
len_flag = ord(b[1]) & 127 # 数据长度
if len_flag == 126:
self.session[sesskey]['length'] = ord(b[2]) * 256 + ord(b[3]) + 8
elif len_flag == 127:
self.session[sesskey]['length'] = reduce(lambda y, z: y * 256 + z, map(lambda x: ord(x), b[2:9])) + 14
else:
self.session[sesskey]['length'] = len_flag + 6
# logging.info("length %d, buffer %d" % (self.session[sesskey]['length'], len(self.session[sesskey]['buffer'])))
if self.session[sesskey]['length'] <= len(self.session[sesskey]['buffer']) \
and self.session[sesskey]['length'] != 0:
# 处理完整包
pack_data = self.session[sesskey]['buffer'][:self.session[sesskey]['length']]
if len(self.session[sesskey]['buffer']) > self.session[sesskey]['length']:
self.session[sesskey]['buffer'] = self.session[sesskey]['buffer'][self.session[sesskey]['length']:]
else:
self.session[sesskey]['buffer'] = ''
self.session[sesskey]['length'] = 0
self.package_process(conn, pack_data)
else:
break
def package_process(self, conn, data):
# logging.info(data)
FIN = ord(data[0]) & 128 # 结束位
Opcode = ord(data[0]) & 112 # 操作码
is_mask = ord(data[1]) & 128 # 是否加掩码
len_flag = ord(data[1]) & 127 # 数据长度
if len_flag == 126:
mask = data[4:8]
length = ord(data[2]) * 256 + ord(data[3])
raw = data[8:]
elif len_flag == 127:
mask = data[10:14]
| length = len_flag
ret = ''
for cnt, d in enumerate(raw):
ret += chr(ord(d) ^ ord(mask[cnt % 4]))
if not ret:
pass
# logging.debug("frame info FIN %d Opcode %d mask %d length %d " % (FIN, Opcode, is_mask, length))
# hexstr = binascii.b2a_hex(data)
# bsstr = bin(int(hexstr, 16))[2:]
# logging.debug(bsstr)
sesskey = conn.fileno()
session = self.session[sesskey]
if not ret or ret is False:
# if conn in self.socket_list:
# self.socket_list.remove(conn)
logging.info("ignore empty msg")
self.ws_send(conn, 'empty:%d' % session['no'])
# conn.close()
return
try:
# logging.info(ret[:10])
msg = self.params_data(ret)
except Exception, e:
# logging.exception(e)
logging.debug("error:%d" % session['no'])
self.ws_send(conn, "error:%d" % session['no'])
return
if "a" in msg:
if msg['a'] == 'init':
self.session[sesskey]['name'] = msg['name']
# self.ws_send(conn, 'ok:0')
self.session[sesskey]['filebuffer'] = []
self.session[sesskey]['no'] = 0
self.session[sesskey]['file'] = open(
os.path.join(os.path.dirname(__file__), 'upload', msg['name']), 'ab')
elif msg['a'] == 'ping':
self.ws_send(conn, "ok:%d" % (self.session[sesskey]['no']))
elif msg['a'] == 'f':
logging.info('a %s s %d e %d n %d' % (msg['a'], msg['s'], msg['e'], msg['n']))
start, end = msg['s'], msg['e']
length = end - start
if msg['n'] != session['no']:
if msg['n'] < session['no']:
logging.info('already msg %d' % msg['n'])
self.ws_send(conn, 'already:%d' % msg['n'])
else:
logging.info("ignore msg %d %d" % (msg['n'], session['no']))
self.ws_send(conn, "retry:%d" % (session['no']))
elif length != len(msg['d']):
logging.info("error length msg %d %d" % (length, len(msg['d'])))
self.ws_send(conn, "retry:%d" % (msg['n']))
else:
self.session[sesskey]['filebuffer'].append(msg['d'])
self.session[sesskey]['no'] += 1
# logging.info('ok msg %d' % msg['n'])
# 每1M写入一次
if len(session['filebuffer']) > 128:
for i in session['filebuffer']:
self.session[sesskey]['file'].write(i)
self.session[sesskey]['filebuffer'] = []
self.ws_send(conn, "ok:%d" % (msg['n']))
elif msg['a'] == 'over':
for i in session['filebuffer']:
self.session[sesskey]['file'].write(i)
self.session[sesskey]['filebuffer'] = []
self.session[sesskey]['file'].close()
logging.info("over")
self.ws_send(conn, "over")
elif msg['a'] == 'check':
logging.info("check file md5 : %s" % msg['hash'])
with open(os.path.join(os.path.dirname(__file__), 'upload', session['name']), 'rb') as f:
md5 = md5_for_file(f)
logging.info(md5)
self.ws_send(conn, "check:%s" % md5)
elif msg['a'] == 'closed':
logging.info("closed")
self.ws_close(conn)
@staticmethod
def ws_send(conn, data):
head = '\x81'
if len(data) < 126:
head += struct.pack('B', len(data))
elif len(data) <= 0xFFFF:
head += struct.pack('!BH', 126, len(data))
else:
head += struct.pack('!BQ', 127, len(data))
conn.send(head + data)
def ws_close(self, conn):
msg = '\x88\x00'
conn.send(msg)
fileno = conn.fileno()
logging.info("close conn %d" % fileno)
if fileno in self.session:
self.session.pop(fileno)
if conn in self.socket_list:
self.socket_list.remove(conn)
def protocol(self, conn):
data = conn.recv(8192)
is_ws = False
query = data.split('\r\n\r\n')[0].split('\r\n')
head = query[0].split(' ')
path = '/'
if len(head) > 2:
path = head[1]
logging.info(path)
for line in query[1:]:
k, v = line.split(': ')
# 带key头,为ws连接
| raw = data[14:]
length = reduce(lambda y, z: y * 256 + z, map(lambda x: ord(x), data[2:9]))
else:
mask = data[2:6]
raw = data[6:]
| random_line_split |
server.py | shake(self, conn, v):
key = base64.b64encode(hashlib.sha1(v + '258EAFA5-E914-47DA-95CA-C5AB0DC85B11').digest())
response = 'HTTP/1.1 101 Switching Protocols\r\n' \
'Upgrade: websocket\r\n' \
'Connection: Upgrade\r\n' \
'Sec-WebSocket-Accept:' + key + '\r\n\r\n'
conn.send(response)
self.socket_list.add(conn)
# 超时时长, 文件名, 缓存大小
self.session[conn.fileno()] = dict(buffer='', length=0, no=0)
self.ws_send(conn, 'init')
def ws_process(self, conn, size=1024*1024):
data = conn.recv(size)
sesskey = conn.fileno()
if sesskey not in self.session or 'buffer' not in self.session[sesskey]:
self.ws_send(conn, 'session error!')
if conn in self.socket_list:
self.socket_list.remove(conn)
conn.close()
return
self.session[sesskey]['buffer'] += data
# 可能关闭连接,销毁session
while sesskey in self.session and self.session[sesskey]['buffer']:
if self.session[sesskey]['length'] == 0:
b = self.session[sesskey]['buffer']
if len(b) < 14:
break
len_flag = ord(b[1]) & 127 # 数据长度
if len_flag == 126:
self.session[sesskey]['length'] = ord(b[2]) * 256 + ord(b[3]) + 8
elif len_flag == 127:
self.session[sesskey]['length'] = reduce(lambda y, z: y * 256 + z, map(lambda x: ord(x), b[2:9])) + 14
else:
self.session[sesskey]['length'] = len_flag + 6
# logging.info("length %d, buffer %d" % (self.session[sesskey]['length'], len(self.session[sesskey]['buffer'])))
if self.session[sesskey]['length'] <= len(self.session[sesskey]['buffer']) \
and self.session[sesskey]['length'] != 0:
# 处理完整包
pack_data = self.session[sesskey]['buffer'][:self.session[sesskey]['length']]
if len(self.session[sesskey]['buffer']) > self.session[sesskey]['length']:
self.session[sesskey]['buffer'] = self.session[sesskey]['buffer'][self.session[sesskey]['length']:]
else:
self.session[sesskey]['buffer'] = ''
self.session[sesskey]['length'] = 0
self.package_process(conn, pack_data)
else:
break
def package_process(self, conn, data):
# logging.info(data)
FIN = ord(data[0]) & 128 # 结束位
Opcode = ord(data[0]) & 112 # 操作码
is_mask = ord(data[1]) & 128 # 是否加掩码
len_flag = ord(data[1]) & 127 # 数据长度
if len_flag == 126:
mask = data[4:8]
length = ord(data[2]) * 256 + ord(data[3])
raw = data[8:]
elif len_flag == 127:
mask = data[10:14]
raw = data[14:]
length = reduce(lambda y, z: y * 256 + z, map(lambda x: ord(x), data[2:9]))
else:
mask = data[2:6]
raw = data[6:]
length = len_flag
ret = ''
for cnt, d in enumerate(raw):
ret += chr(ord(d) ^ ord(mask[cnt % 4]))
if not ret:
pass
# logging.debug("frame info FIN %d Opcode %d mask %d length %d " % (FIN, Opcode, is_mask, length))
# hexstr = binascii.b2a_hex(data)
# bsstr = bin(int(hexstr, 16))[2:]
# logging.debug(bsstr)
sesskey = conn.fileno()
session = self.session[sesskey]
if not ret or ret is False:
# if conn in self.socket_list:
# self.socket_list.remove(conn)
logging.info("ignore empty msg")
self.ws_send(conn, 'empty:%d' % session['no'])
# conn.close()
return
try:
# logging.info(ret[:10])
msg = self.params_data(ret)
except Exception, e:
# logging.exception(e)
logging.debug("error:%d" % session['no'])
self.ws_send(conn, "error:%d" % session['no'])
return
if "a" in msg:
if msg['a'] == 'init':
self.session[sesskey]['name'] = msg['name']
# self.ws_send(conn, 'ok:0')
| ey]['no']))
elif msg['a'] == 'f':
logging.info('a %s s %d e %d n %d' % (msg['a'], msg['s'], msg['e'], msg['n']))
start, end = msg['s'], msg['e']
length = end - start
if msg['n'] != session['no']:
if msg['n'] < session['no']:
logging.info('already msg %d' % msg['n'])
self.ws_send(conn, 'already:%d' % msg['n'])
else:
logging.info("ignore msg %d %d" % (msg['n'], session['no']))
self.ws_send(conn, "retry:%d" % (session['no']))
elif length != len(msg['d']):
logging.info("error length msg %d %d" % (length, len(msg['d'])))
self.ws_send(conn, "retry:%d" % (msg['n']))
else:
self.session[sesskey]['filebuffer'].append(msg['d'])
self.session[sesskey]['no'] += 1
# logging.info('ok msg %d' % msg['n'])
# 每1M写入一次
if len(session['filebuffer']) > 128:
for i in session['filebuffer']:
self.session[sesskey]['file'].write(i)
self.session[sesskey]['filebuffer'] = []
self.ws_send(conn, "ok:%d" % (msg['n']))
elif msg['a'] == 'over':
for i in session['filebuffer']:
self.session[sesskey]['file'].write(i)
self.session[sesskey]['filebuffer'] = []
self.session[sesskey]['file'].close()
logging.info("over")
self.ws_send(conn, "over")
elif msg['a'] == 'check':
logging.info("check file md5 : %s" % msg['hash'])
with open(os.path.join(os.path.dirname(__file__), 'upload', session['name']), 'rb') as f:
md5 = md5_for_file(f)
logging.info(md5)
self.ws_send(conn, "check:%s" % md5)
elif msg['a'] == 'closed':
logging.info("closed")
self.ws_close(conn)
@staticmethod
def ws_send(conn, data):
head = '\x81'
if len(data) < 126:
head += struct.pack('B', len(data))
elif len(data) <= 0xFFFF:
head += struct.pack('!BH', 126, len(data))
else:
head += struct.pack('!BQ', 127, len(data))
conn.send(head + data)
def ws_close(self, conn):
msg = '\x88\x00'
conn.send(msg)
fileno = conn.fileno()
logging.info("close conn %d" % fileno)
if fileno in self.session:
self.session.pop(fileno)
if conn in self.socket_list:
self.socket_list.remove(conn)
def protocol(self, conn):
data = conn.recv(8192)
is_ws = False
query = data.split('\r\n\r\n')[0].split('\r\n')
head = query[0].split(' ')
path = '/'
if len(head) > 2:
path = head[1]
logging.info(path)
for line in query[1:]:
k, v = line.split(': ')
# 带key头,为ws连接
| self.session[sesskey]['filebuffer'] = []
self.session[sesskey]['no'] = 0
self.session[sesskey]['file'] = open(
os.path.join(os.path.dirname(__file__), 'upload', msg['name']), 'ab')
elif msg['a'] == 'ping':
self.ws_send(conn, "ok:%d" % (self.session[sessk | conditional_block |
server.py | Server:
socket = None
socket_list = set()
port = 7000
buffersize = 1024*1024
timeout = 20
content = dict()
session = dict()
def __init__(self):
filelist = ['test.html', 'upload.js', 'spark-md5.min.js']
for i in filelist:
with open(i, 'r') as f:
self.content[i] = f.read()
def wshandshake(self, conn, v):
key = base64.b64encode(hashlib.sha1(v + '258EAFA5-E914-47DA-95CA-C5AB0DC85B11').digest())
response = 'HTTP/1.1 101 Switching Protocols\r\n' \
'Upgrade: websocket\r\n' \
'Connection: Upgrade\r\n' \
'Sec-WebSocket-Accept:' + key + '\r\n\r\n'
conn.send(response)
self.socket_list.add(conn)
# 超时时长, 文件名, 缓存大小
self.session[conn.fileno()] = dict(buffer='', length=0, no=0)
self.ws_send(conn, 'init')
def ws_process(self, conn, size=1024*1024):
data = conn.recv(size)
sesskey = conn.fileno()
if sesskey not in self.session or 'buffer' not in self.session[sesskey]:
self.ws_send(conn, 'session error!')
if conn in self.socket_list:
self.socket_list.remove(conn)
conn.close()
return
self.session[sesskey]['buffer'] += data
# 可能关闭连接,销毁session
while sesskey in self.session and self.session[sesskey]['buffer']:
if self.session[sesskey]['length'] == 0:
b = self.session[sesskey]['buffer']
if len(b) < 14:
break
len_flag = ord(b[1]) & 127 # 数据长度
if len_flag == 126:
self.session[sesskey]['length'] = ord(b[2]) * 256 + ord(b[3]) + 8
elif len_flag == 127:
self.session[sesskey]['length'] = reduce(lambda y, z: y * 256 + z, map(lambda x: ord(x), b[2:9])) + 14
else:
self.session[sesskey]['length'] = len_flag + 6
# logging.info("length %d, buffer %d" % (self.session[sesskey]['length'], len(self.session[sesskey]['buffer'])))
if self.session[sesskey]['length'] <= len(self.session[sesskey]['buffer']) \
and self.session[sesskey]['length'] != 0:
# 处理完整包
pack_data = self.session[sesskey]['buffer'][:self.session[sesskey]['length']]
if len(self.session[sesskey]['buffer']) > self.session[sesskey]['length']:
self.session[sesskey]['buffer'] = self.session[sesskey]['buffer'][self.session[sesskey]['length']:]
else:
self.session[sesskey]['buffer'] = ''
self.session[sesskey]['length'] = 0
self.package_process(conn, pack_data)
else:
break
def package_process(self, conn, data):
# logging.info(data)
FIN = ord(data[0]) & 128 # 结束位
Opcode = ord(data[0]) & 112 # 操作码
is_mask = ord(data[1]) & 128 # 是否加掩码
len_flag = ord(data[1]) & 127 # 数据长度
if len_flag == 126:
mask = data[4:8]
length = ord(data[2]) * 256 + ord(data[3])
raw = data[8:]
elif len_flag == 127:
mask = data[10:14]
raw = data[14:]
length = reduce(lambda y, z: y * 256 + z, map(lambda x: ord(x), data[2:9]))
else:
mask = data[2:6]
raw = data[6:]
length = len_flag
ret = ''
for cnt, d in enumerate(raw):
ret += chr(ord(d) ^ ord(mask[cnt % 4]))
if not ret:
pass
# logging.debug("frame info FIN %d Opcode %d mask %d length %d " % (FIN, Opcode, is_mask, length))
# hexstr = binascii.b2a_hex(data)
# bsstr = bin(int(hexstr, 16))[2:]
# logging.debug(bsstr)
sesskey = conn.fileno()
session = self.session[sesskey]
if not ret or ret is False:
# if conn in self.socket_list:
# self.socket_list.remove(conn)
logging.info("ignore empty msg")
self.ws_send(conn, 'empty:%d' % session['no'])
# conn.close()
return
try:
# logging.info(ret[:10])
msg = self.params_data(ret)
except Exception, e:
# logging.exception(e)
logging.debug("error:%d" % session['no'])
self.ws_send(conn, "error:%d" % session['no'])
return
if "a" in msg:
if msg['a'] == 'init':
self.session[sesskey]['name'] = msg['name']
# self.ws_send(conn, 'ok:0')
self.session[sesskey]['filebuffer'] = []
self.session[sesskey]['no'] = 0
self.session[sesskey]['file'] = open(
os.path.join(os.path.dirname(__file__), 'upload', msg['name']), 'ab')
elif msg['a'] == 'ping':
self.ws_send(conn, "ok:%d" % (self.session[sesskey]['no']))
elif msg['a'] == 'f':
logging.info('a %s s %d e %d n %d' % (msg['a'], msg['s'], msg['e'], msg['n']))
start, end = msg['s'], msg['e']
length = end - start
if msg['n'] != session['no']:
if msg['n'] < session['no']:
logging.info('already msg %d' % msg['n'])
self.ws_send(conn, 'already:%d' % msg['n'])
else:
logging.info("ignore msg %d %d" % (msg['n'], session['no']))
self.ws_send(conn, "retry:%d" % (session['no']))
elif length != len(msg['d']):
logging.info("error length msg %d %d" % (length, len(msg['d'])))
self.ws_send(conn, "retry:%d" % (msg['n']))
else:
self.session[sesskey]['filebuffer'].append(msg['d'])
self.session[sesskey]['no'] += 1
# logging.info('ok msg %d' % msg['n'])
# 每1M写入一次
if len(session['filebuffer']) > 128:
for i in session['filebuffer']:
self.session[sesskey]['file'].write(i)
self.session[sesskey]['filebuffer'] = []
self.ws_send(conn, "ok:%d" % (msg['n']))
elif msg['a'] == 'over':
for i in session['filebuffer']:
self.session[sesskey]['file'].write(i)
self.session[sesskey]['filebuffer'] = []
self.session[sesskey]['file'].close()
logging.info("over")
self.ws_send(conn, "over")
elif msg['a'] == 'check':
logging.info("check file md5 : %s" % msg['hash'])
with open(os.path.join(os.path.dirname(__file__), 'upload', session['name']), 'rb') as f:
md5 = md5_for_file(f)
logging.info(md5)
self.ws_send(conn, "check:%s" % md5)
elif msg['a'] == 'closed':
logging.info("closed")
self.ws_close(conn)
@staticmethod
def ws_send(conn, data):
head = '\x81'
if len(data) < 126:
head += struct.pack('B', len(data))
elif len(data) <= 0xFFFF:
head += struct.pack('!BH', 126, len(data))
else:
head += struct.pack('!BQ', 127, len(data))
conn.send(head + data)
def ws_close(self, conn):
msg = '\x88\x00'
conn.send(msg)
fileno = conn.fileno()
logging | b.md5()
while True:
data = f.read(block_size)
if not data:
break
md5.update(data)
return md5.hexdigest()
class | identifier_body | |
rol_common.js | fdid = $(this).parent().find(".folder_id").val();
var fdname = $(this).parent().find(".folder_name").val();
var parent = $(this).parent();
setTimeout(function(){
//添加焦点事件
parent.addClass("left_nav_infolink_high");
var edit_div = $('#float_edit');
edit_div.css("display","block");
edit_div.css("top",top);
edit_div.css("left",left);
edit_div.find(".folder_id").val(fdid);
edit_div.find(".folder_name").val(fdname);
},100);
});
//需要编辑文件夹的,高亮显示,同时显示编辑图片
left_nav.find(".edit_folder_div").mouseover(function(){
$(this).addClass("infolink_hover");
$(this).find(".edit_folder").show();
});
//需要编辑文件夹的,移除高亮显示,同时不显示编辑图片
left_nav.find(".edit_folder_div").mouseout(function(){
$(this).find(".edit_folder").hide();
$(this).removeClass("infolink_hover");
});
}
//显示操作正确信息
function show_yes_tips(tip_id,msg,time)
{
if(!time || time < 3000){
time = 3000;
}
$.scmtips.show("success",msg,null,time);
}
//显示操作错误信息
function show_wrong_tips(tip_id,msg,time)
{
if(!time || time <3000){
time = 3000;
}
$.scmtips.show("error",msg, null,3000);
}
//杰青近五年代表性论著操作错误信息
function show_unlimit_wrong_tips(tip_id,msg)
{
$.scmtips.show("error",msg, null,3000);
} | function show_msg_tips_newdiv(type,msg,id){
if(!type || !msg)
return;
var time=3000;
if('success'==type || 'yes'==type)
$.scmtips.show("success",msg,null,time);
if('warn'==type || 'warning'==type)
$.scmtips.show("warn",msg,null,time);
if('error'==type || 'wrong'==type)
$.scmtips.show("error",msg,null,time);
}
function show_msg_tips(type,msg,width){
if(!type || !msg)
return;
var time=1000;
if('success'==type || 'yes'==type)
$.scmtips.show("success",msg, width,time);
if('warn'==type || 'warning'==type)
$.scmtips.show("warn",msg, width,time);
if('error'==type || 'wrong'==type)
$.scmtips.show("error",msg, width,time);
}
function rol_show_msg_tips(type,msg,rowCount){
if(!type || !msg)
return;
if('success'==type || 'yes'==type)
$.scmtips.show("success",msg);
if('warn'==type || 'warning'==type)
$.scmtips.show("warn",msg);
if('error'==type || 'wrong'==type)
$.scmtips.show("error",msg);
}
//手动关闭显示的操作消息
function close_msg_tips(){
$("#tip_msg_box").hide();
}
//替换HMTL特殊字符,注意替换顺序
function covertHmtl(str)
{
str = str.replace(/\&/gi,"&");
str = str.replace(/\>/gi,">");
str = str.replace(/\</gi,"<");
str = str.replace(/\n/gi,"<br/>");
str = str.replace(/\s/gi," ");
return str;
}
//将textarea转换成span,过滤掉特殊字符
function refreshTextArea()
{
var objs = $(".rep_textarea");
for(var i = 0; i < objs.size(); i++)
{
var tag=objs[i];
var p = tag.parentNode;
if(!p) p = document;
if(/\r(\n)?/g.test(tag.value)==true)
{
newTag = getSpan(tag.value.replace(/\r(\n)?/g,"<br>"));
}
else
{
newTag = getSpan(tag.value);
}
p.replaceChild(newTag,tag);
}
}
function getSpan(text)
{
var node = document.createElement("span");
node.innerHTML=text+" ";
return node;
}
//帮助信息
function bindHelps()
{
$(".help_prompt").bind("click",function(){
var help_prompt_left = $(this).find(".help_prompt_left");
if(help_prompt_left.is(":visible")){
help_prompt_left.hide();
$(this).find(".help_prompt_left1").show();
$(this).find(".shear_head-down_opt").hide();
$(this).find(".shear_head-up_opt").show();
}else{
help_prompt_left.show();
$(this).find(".help_prompt_left1").hide();
$(this).find(".shear_head-down_opt").show();
$(this).find(".shear_head-up_opt").hide();
}
});
$(".help_prompt").each(function(){
var isShow = $(this).find(".help_prompt_left1").is(":visible")? true : false;
if(isShow){
$(this).find(".shear_head-down_opt").hide();
$(this).find(".shear_head-up_opt").show();
}else{
$(this).find(".shear_head-down_opt").show();
$(this).find(".shear_head-up_opt").hide();
}
});
//链接不进行事件冒泡
$(".help_prompt").find(".help_prompt_left1 a").bind("click",function(event){
stopBubble(event);
});
}
//显示隐藏检索条件
function view_search(){
var search_block = $(".search_block");
if(!search_block.is(":hidden")){
$("#isSearchShow").val(0);
$("#view_search_block_link").show();
$("#hide_search_block_link").hide();
search_block.hide();
}else{
$("#view_search_block_link").hide();
$("#hide_search_block_link").show();
$("#isSearchShow").val(1);
search_block.show();
}
}
//验证邮件格式是否合法
function isEmail(email) {
return /^((([a-z]|\d|[!#\$%&'\*\+\-\/=\?\^_`{\|}~]|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])+(\.([a-z]|\d|[!#\$%&'\*\+\-\/=\?\^_`{\|}~]|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])+)*)|((\x22)((((\x20|\x09)*(\x0d\x0a))?(\x20|\x09)+)?(([\x01-\x08\x0b\x0c\x0e-\x1f\x7f]|\x21|[\x23-\x5b]|[\x5d-\x7e]|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])|(\\([\x01-\x09\x0b\x0c\x0d-\x7f]|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF]))))*(((\x20|\x09)*(\x0d\x0a))?(\x20|\x09)+)?(\x22)))@((([a-z]|\d|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])|(([a-z]|\d|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])([a-z]|\d|-|\.|_|~|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])*([a-z]|\d|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])))\.)+(([a-z]|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])|(([a-z]|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])([a-z]|\d|-|\.|_|~|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])*([a-z]|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])))\.?$/i.test(email);
}
//产生数字下拉,例如年度等
function genNumDescOption(start,end,select_id){
if(start > end |
//在弹出框中显示提示信息 | random_line_split |
rol_common.js | nav_open");
}
}
//左边菜单展开,关闭替换图标
function replaceImg(obj){
var img = $(obj).find("img");
var src = img.attr("src");
if(src.indexOf('open')>0){
img.attr("src",src.replace("open","close"));
}else{
img.attr("src",src.replace("close","open"));
}
}
//打开左侧菜单,根据设置的.info样式
function open_left_nav(){
$("#left_nav").find(".info")
.each(function(){
$(this).parent().find(">div:first-child").trigger("click");
});
}
//左侧菜单鼠标事件
function bind_left_nav_mouse()
{
//鼠标在上面高亮显示
$("#left_nav div.infolink").mouseover(function(){
$(this).addClass("infolink_hover");
});
//鼠标移走移除高亮
$("#left_nav div.infolink").mouseout(function(){
$(this).removeClass("infolink_hover");
});
}
//左侧菜单编辑文件夹
function bind_left_nav_folder()
{
var left_nav = $("#left_nav");
left_nav.find('div.edit_folder').click(function(){
//失去焦点,需要隐藏带hidden_out_div class的层
$(document).bind("click",hidden_outeditfd_div);
var top = $(this).offset().top;
var left = $(this).offset().left;
var fdid = $(this).parent().find(".folder_id").val();
var fdname = $(this).parent().find(".folder_name").val();
var parent = $(this).parent();
setTimeout(function(){
//添加焦点事件
parent.addClass("left_nav_infolink_high");
var edit_div = $('#float_edit');
edit_div.css("display","block");
edit_div.css("top",top);
edit_div.css("left",left);
edit_div.find(".folder_id").val(fdid);
edit_div.find(".folder_name").val(fdname);
},100);
});
//需要编辑文件夹的,高亮显示,同时显示编辑图片
left_nav.find(".edit_folder_div").mouseover(function(){
$(this).addClass("infolink_hover");
$(this).find(".edit_folder").show();
});
//需要编辑文件夹的,移除高亮显示,同时不显示编辑图片
left_nav.find(".edit_folder_div").mouseout(function(){
$(this).find(".edit_folder").hide();
$(this).removeClass("infolink_hover");
});
}
//显示操作正确信息
function show_yes_tips(tip_id,msg,time)
{
if(!time || time < 3000){
time = 3000;
}
$.scmtips.show("success",msg,null,time);
}
//显示操作错误信息
function show_wrong_tips(tip_id,msg,time)
{
if(!time || time <3000){
time = 3000;
}
$.scmtips.show("error",msg, null,3000);
}
//杰青近五年代表性论著操作错误信息
function show_unlimit_wrong_tips(tip_id,msg)
{
$.scmtips.show("error",msg, null,3000);
}
//在弹出框中显示提示信息
function show_msg_tips_newdiv(type,msg,id){
if(!type || !msg)
return;
var time=3000;
if('success'==type || 'yes'==type)
$.scmtips.show("success",msg,null,time);
if('warn'==type || 'warning'==type)
$.scmtips.show("warn",msg,null,time);
if('error'==type || 'wrong'==type)
$.scmtips.show("error",msg,null,time);
}
function show_msg_tips(type,msg,width){
if(!type || !msg)
return;
var time=1000;
if('success'==type || 'yes'==type)
$.scmtips.show("success",msg, width,time);
if('warn'==type || 'warning'==type)
$.scmtips.show("warn",msg, width,time);
if('error'==type || 'wrong'==type)
$.scmtips.show("error",msg, width,time);
}
function rol_show_msg_tips(type,msg,rowCount){
if(!type || !msg)
return;
if('success'==type || 'yes'==type)
$.scmtips.show("success",msg);
if('warn'==type || 'warning'==type)
$.scmtips.show("warn",msg);
if('error'==type || 'wrong'==type)
$.scmtips.show("error",msg);
}
//手动关闭显示的操作消息
function close_msg_tips(){
$("#tip_msg_box").hide();
}
//替换HMTL特殊字符,注意替换顺序
function covertHmtl(str)
{
str = str.replace(/\&/gi,"&");
str = str.replace(/\>/gi,">");
str = str.replace(/\</gi,"<");
str = str.replace(/\n/gi,"<br/>");
str = str.replace(/\s/gi," ");
return str;
}
//将textarea转换成span,过滤掉特殊字符
function refreshTextArea()
{
var objs = $(".rep_textarea");
for(var i = 0; i < objs.size(); i++)
{
var tag=objs[i];
var p = tag.parentNode;
if(!p) p = document;
if(/\r(\n)?/g.test(tag.value)==true)
{
newTag = getSpan(tag.value.replace(/\r(\n)?/g,"<br>"));
}
else
{
newTag = getSpan(tag.value);
}
p.replaceChild(newTag,tag);
}
}
function getSpan(text)
{
var node = document.createElement("span");
node.innerHTML=text+" ";
return node;
}
//帮助信息
function bindHelps()
{
$(".help_prompt").bind("click",function(){
var help_prompt_left = $(this).find(".help_prompt_left");
if(help_prompt_left.is(":visible")){
help_prompt_left.hide();
$(this).find(".help_prompt_left1").show();
$(this).find(".shear_head-down_opt").hide();
$(this).find(".shear_head-up_opt").show();
}else{
help_prompt_left.show();
$(this).find(".help_prompt_left1").hide();
$(this).find(".shear_head-down_opt").show();
$(this).find(".shear_head-up_opt").hide();
}
});
$(".help_prompt").each(function(){
var isShow = $(this).find(".help_prompt_left1").is(":visible")? true : false;
if(isShow){
$(this).find(".shear_head-down_opt").hide();
$(this).find(".shear_head-up_opt").show();
}else{
$(this).find(".shear_head-down_opt").show();
$(this).find(".shear_head-up_opt").hide();
}
});
//链接不进行事件冒泡
$(".help_prompt").find(".help_prompt_left1 a").bind("click",function(event){
stopBubble(event);
});
}
//显示隐藏检索条件
function view_search(){
var search_block = $(".search_block");
if(!search_block.is(":hidden")){
$("#isSearchShow").val(0);
$("#view_search_block_link").show();
$("#hide_search_block_link").hide();
search_block.hide();
}else{
$("#view_search_block_link").hide();
$("#hide_search_block_link").show();
$("#isSearchShow").val(1);
search_block.show();
}
}
//验证邮件格式是否合法
function isEmail(email) {
return /^((([a-z]|\d|[!#\$%&'\*\+\-\/=\?\^_`{\|}~]|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])+(\.([a-z]|\d|[!#\$%&'\*\+\-\/=\?\^_`{\|}~]|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])+)*)|((\x22)((((\x20|\x09)*(\x0d\x0a))?(\x20|\x09)+)?(([\x01-\x08\x0b\x0c\x0e-\x1f\x7f]|\x21|[\x23-\x5b]|[\x5d-\x7e]| | $(obj).css("zIndex",1000);
$(obj).css("height",$(obj).find(".float_div_content").height()+70)
},100);
}else{
$(obj).hide();
}
}
//左菜单展开,关闭
function switch_left_nav(obj){
var div = $(obj).parent();
if($(obj).hasClass("left_nav_open"))
{
div.find(">div:not(div:first-child)").hide();
$(obj).removeClass("left_nav_open");
}else{
div.find(">div").show();
$(obj).addClass("left_ | conditional_block | |
rol_common.js | fdid = $(this).parent().find(".folder_id").val();
var fdname = $(this).parent().find(".folder_name").val();
var parent = $(this).parent();
setTimeout(function(){
//添加焦点事件
parent.addClass("left_nav_infolink_high");
var edit_div = $('#float_edit');
edit_div.css("display","block");
edit_div.css("top",top);
edit_div.css("left",left);
edit_div.find(".folder_id").val(fdid);
edit_div.find(".folder_name").val(fdname);
},100);
});
//需要编辑文件夹的,高亮显示,同时显示编辑图片
left_nav.find(".edit_folder_div").mouseover(function(){
$(this).addClass("infolink_hover");
$(this).find(".edit_folder").show();
});
//需要编辑文件夹的,移除高亮显示,同时不显示编辑图片
left_nav.find(".edit_folder_div").mouseout(function(){
$(this).find(".edit_folder").hide();
$(this).removeClass("infolink_hover");
});
}
//显示操作正确信息
function show_yes_tips(tip_id,msg,time)
{
if(!time || time < 3000){
time = 3000;
}
$.scmtips.show("success",msg,null,time);
}
//显示操作错误信息
function show_wrong_tips(tip_id,msg,time)
{
if(!time || time <3000){
time = 3000;
}
$.scmtips.show("error",msg, null,3000);
}
//杰青近五年代表性论著操作错误信息
function show_unlimit_wrong_tips(tip_id,msg)
{
$.scmtips.show("error",msg, null,3000);
}
//在弹出框中显示提示信息
function show_msg_tips_newdiv(type,msg,id){
if(!type || !msg)
return;
var time=3000;
if('success'==type || 'yes'==type)
$.scmtips.show("success",msg,null,time);
if('warn'==type || 'warning'==type)
$.s | rror",msg,null,time);
}
function show_msg_tips(type,msg,width){
if(!type || !msg)
return;
var time=1000;
if('success'==type || 'yes'==type)
$.scmtips.show("success",msg, width,time);
if('warn'==type || 'warning'==type)
$.scmtips.show("warn",msg, width,time);
if('error'==type || 'wrong'==type)
$.scmtips.show("error",msg, width,time);
}
function rol_show_msg_tips(type,msg,rowCount){
if(!type || !msg)
return;
if('success'==type || 'yes'==type)
$.scmtips.show("success",msg);
if('warn'==type || 'warning'==type)
$.scmtips.show("warn",msg);
if('error'==type || 'wrong'==type)
$.scmtips.show("error",msg);
}
//手动关闭显示的操作消息
function close_msg_tips(){
$("#tip_msg_box").hide();
}
//替换HMTL特殊字符,注意替换顺序
function covertHmtl(str)
{
str = str.replace(/\&/gi,"&");
str = str.replace(/\>/gi,">");
str = str.replace(/\</gi,"<");
str = str.replace(/\n/gi,"<br/>");
str = str.replace(/\s/gi," ");
return str;
}
//将textarea转换成span,过滤掉特殊字符
function refreshTextArea()
{
var objs = $(".rep_textarea");
for(var i = 0; i < objs.size(); i++)
{
var tag=objs[i];
var p = tag.parentNode;
if(!p) p = document;
if(/\r(\n)?/g.test(tag.value)==true)
{
newTag = getSpan(tag.value.replace(/\r(\n)?/g,"<br>"));
}
else
{
newTag = getSpan(tag.value);
}
p.replaceChild(newTag,tag);
}
}
function getSpan(text)
{
var node = document.createElement("span");
node.innerHTML=text+" ";
return node;
}
//帮助信息
function bindHelps()
{
$(".help_prompt").bind("click",function(){
var help_prompt_left = $(this).find(".help_prompt_left");
if(help_prompt_left.is(":visible")){
help_prompt_left.hide();
$(this).find(".help_prompt_left1").show();
$(this).find(".shear_head-down_opt").hide();
$(this).find(".shear_head-up_opt").show();
}else{
help_prompt_left.show();
$(this).find(".help_prompt_left1").hide();
$(this).find(".shear_head-down_opt").show();
$(this).find(".shear_head-up_opt").hide();
}
});
$(".help_prompt").each(function(){
var isShow = $(this).find(".help_prompt_left1").is(":visible")? true : false;
if(isShow){
$(this).find(".shear_head-down_opt").hide();
$(this).find(".shear_head-up_opt").show();
}else{
$(this).find(".shear_head-down_opt").show();
$(this).find(".shear_head-up_opt").hide();
}
});
//链接不进行事件冒泡
$(".help_prompt").find(".help_prompt_left1 a").bind("click",function(event){
stopBubble(event);
});
}
//显示隐藏检索条件
function view_search(){
var search_block = $(".search_block");
if(!search_block.is(":hidden")){
$("#isSearchShow").val(0);
$("#view_search_block_link").show();
$("#hide_search_block_link").hide();
search_block.hide();
}else{
$("#view_search_block_link").hide();
$("#hide_search_block_link").show();
$("#isSearchShow").val(1);
search_block.show();
}
}
//验证邮件格式是否合法
function isEmail(email) {
return /^((([a-z]|\d|[!#\$%&'\*\+\-\/=\?\^_`{\|}~]|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])+(\.([a-z]|\d|[!#\$%&'\*\+\-\/=\?\^_`{\|}~]|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])+)*)|((\x22)((((\x20|\x09)*(\x0d\x0a))?(\x20|\x09)+)?(([\x01-\x08\x0b\x0c\x0e-\x1f\x7f]|\x21|[\x23-\x5b]|[\x5d-\x7e]|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])|(\\([\x01-\x09\x0b\x0c\x0d-\x7f]|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF]))))*(((\x20|\x09)*(\x0d\x0a))?(\x20|\x09)+)?(\x22)))@((([a-z]|\d|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])|(([a-z]|\d|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])([a-z]|\d|-|\.|_|~|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])*([a-z]|\d|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])))\.)+(([a-z]|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])|(([a-z]|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])([a-z]|\d|-|\.|_|~|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])*([a-z]|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])))\.?$/i.test(email);
}
//产生数字下拉,例如年度等
function genNumDescOption(start,end,select_id){
if(start | cmtips.show("warn",msg,null,time);
if('error'==type || 'wrong'==type)
$.scmtips.show("e | identifier_body |
rol_common.js | ,null,time);
}
function show_msg_tips(type,msg,width){
if(!type || !msg)
return;
var time=1000;
if('success'==type || 'yes'==type)
$.scmtips.show("success",msg, width,time);
if('warn'==type || 'warning'==type)
$.scmtips.show("warn",msg, width,time);
if('error'==type || 'wrong'==type)
$.scmtips.show("error",msg, width,time);
}
function rol_show_msg_tips(type,msg,rowCount){
if(!type || !msg)
return;
if('success'==type || 'yes'==type)
$.scmtips.show("success",msg);
if('warn'==type || 'warning'==type)
$.scmtips.show("warn",msg);
if('error'==type || 'wrong'==type)
$.scmtips.show("error",msg);
}
//手动关闭显示的操作消息
function close_msg_tips(){
$("#tip_msg_box").hide();
}
//替换HMTL特殊字符,注意替换顺序
function covertHmtl(str)
{
str = str.replace(/\&/gi,"&");
str = str.replace(/\>/gi,">");
str = str.replace(/\</gi,"<");
str = str.replace(/\n/gi,"<br/>");
str = str.replace(/\s/gi," ");
return str;
}
//将textarea转换成span,过滤掉特殊字符
function refreshTextArea()
{
var objs = $(".rep_textarea");
for(var i = 0; i < objs.size(); i++)
{
var tag=objs[i];
var p = tag.parentNode;
if(!p) p = document;
if(/\r(\n)?/g.test(tag.value)==true)
{
newTag = getSpan(tag.value.replace(/\r(\n)?/g,"<br>"));
}
else
{
newTag = getSpan(tag.value);
}
p.replaceChild(newTag,tag);
}
}
function getSpan(text)
{
var node = document.createElement("span");
node.innerHTML=text+" ";
return node;
}
//帮助信息
function bindHelps()
{
$(".help_prompt").bind("click",function(){
var help_prompt_left = $(this).find(".help_prompt_left");
if(help_prompt_left.is(":visible")){
help_prompt_left.hide();
$(this).find(".help_prompt_left1").show();
$(this).find(".shear_head-down_opt").hide();
$(this).find(".shear_head-up_opt").show();
}else{
help_prompt_left.show();
$(this).find(".help_prompt_left1").hide();
$(this).find(".shear_head-down_opt").show();
$(this).find(".shear_head-up_opt").hide();
}
});
$(".help_prompt").each(function(){
var isShow = $(this).find(".help_prompt_left1").is(":visible")? true : false;
if(isShow){
$(this).find(".shear_head-down_opt").hide();
$(this).find(".shear_head-up_opt").show();
}else{
$(this).find(".shear_head-down_opt").show();
$(this).find(".shear_head-up_opt").hide();
}
});
//链接不进行事件冒泡
$(".help_prompt").find(".help_prompt_left1 a").bind("click",function(event){
stopBubble(event);
});
}
//显示隐藏检索条件
function view_search(){
var search_block = $(".search_block");
if(!search_block.is(":hidden")){
$("#isSearchShow").val(0);
$("#view_search_block_link").show();
$("#hide_search_block_link").hide();
search_block.hide();
}else{
$("#view_search_block_link").hide();
$("#hide_search_block_link").show();
$("#isSearchShow").val(1);
search_block.show();
}
}
//验证邮件格式是否合法
function isEmail(email) {
return /^((([a-z]|\d|[!#\$%&'\*\+\-\/=\?\^_`{\|}~]|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])+(\.([a-z]|\d|[!#\$%&'\*\+\-\/=\?\^_`{\|}~]|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])+)*)|((\x22)((((\x20|\x09)*(\x0d\x0a))?(\x20|\x09)+)?(([\x01-\x08\x0b\x0c\x0e-\x1f\x7f]|\x21|[\x23-\x5b]|[\x5d-\x7e]|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])|(\\([\x01-\x09\x0b\x0c\x0d-\x7f]|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF]))))*(((\x20|\x09)*(\x0d\x0a))?(\x20|\x09)+)?(\x22)))@((([a-z]|\d|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])|(([a-z]|\d|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])([a-z]|\d|-|\.|_|~|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])*([a-z]|\d|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])))\.)+(([a-z]|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])|(([a-z]|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])([a-z]|\d|-|\.|_|~|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])*([a-z]|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])))\.?$/i.test(email);
}
//产生数字下拉,例如年度等
function genNumDescOption(start,end,select_id){
if(start > end){
var tmp = start;
start = end;
end = tmp;
}
var select = $("#"+select_id);
for(;end >= start;end--){
var option = $("<option value='"+end+"' select=''>"+end+"</option>");
select.append(option);
}
}
//判断是否是不是中文
function isChinStr(s){
var regu = "[\w\W]*[\u4e00-\u9fa5][\w\W]*";
var re = new RegExp(regu);
if (s=="")
return false;
if (re.test(s)) {
return true;
}else{
return false;
}
}
//判断是否是数字
function isNumStr(s) {
var patrn=/^[0-9]{1,20}$/;
if (!patrn.exec(s)) return false ;
return true ;
}
//字母、数字、下划线
function isCharsOrNum(s) {
var patrn=/^(\w)$/;
if (!patrn.exec(s))
return false;
return true ;
}
//字母、数字
function isChrOrNum(s){
var patrn=/^[A-Za-z0-9]+$/;
if (!patrn.exec(s)) return false ;
return true ;
}
//是否是中文或者是英文和数字
function isParamStr(s){
var flag = false;
if(isChinStr(s))
flag =true;
if(isChrOrNum(s))
flag =true;
return flag;
}
//限制textarea最多输入
function setTextareaMaxLength(maxLength){
$("textarea").keyup(function(){
var area=$(this).val();
if(area.length>maxLength){
$(this).val(area.substring(0,maxLength));
}
});
$("textarea").blur(function(){
var area=$(this).val();
if(area.length>maxLength){
$(this).val(area.substring(0,maxLength));
}
});
}
//判断是否是特殊字符
function isSpecial(s){
var str = '",.;[]{}+=|\*&^%$#@!~()-/?<>';
var flag = false;
if($.trim(s).length>0){
for(var i=0;i<str.length;i++){
if(s.indexOf(str.charAt(i))>=0){
flag=true;
| break;
| identifier_name | |
ranker_ltr.py | ):
"""
Trains a model and saves it to a file.
- This function currently only supports GBRT.
Args:
inss: erd.ml.CERInstances, train instances
model_file: A file to save the model. For None value, the model will not be saved
Returns:
ranker, the learned model
"""
config = self.config
if model_file is not None:
config['save_model'] = model_file
if feat_imp_file is not None:
config['save_feature_imp'] = feat_imp_file
self.ml.config = config
ranker = self.ml.train_model(inss)
return ranker
def cross_validate(self, inss, num_folds, folds_name=None, gen_folds=False):
"""
Performs k-fold cross validation.
:param inss: erd.ml.CERInstances
:param num_folds: int, number of folds
:param folds_name: file name for saving the folds. It adds a postfix to the file name.
e.g. "./output/res/erd-ltr" -> "./output/res/erd-ltr-f1-train.json"
:return All of instances ranked by cross validation
"""
kcv = CrossValidation(num_folds, inss, self.train, self.rank_inss)
# loads/generates folds
if gen_folds:
kcv.create_folds(group_by="session")
if folds_name is not None:
kcv.save_folds(folds_name)
else:
kcv.load_folds(folds_name)
# Cross validation
inss = kcv.run()
inss.__class__ = CERInstances
for ins in inss.get_all():
ins.__class__ = CERInstance
return inss
def rank_inss(self, inss, model=None):
"""
Ranks the instances using the given trained model.
:param inss: erd.ml.CERInstances
:return erd.ml.CERInstances, ranked instances
"""
if model is None: # Done for CV call_back_test method
|
return self.ml.apply_model(inss, model)
def rank_queries(self, queries, time_log_file=None): # commonness_th, filter=True,
"""
Ranks entities for the given queries using the trained model.
:param queries: a dictionary, {q_id: q_content, ...}
:param time_log_file: file name to save time log
:return erd.ml.CERInstances, Ranked instances
"""
print "Ranking queries ..."
total_time = 0.0
s_t = datetime.now() # start time
inss_list = [] # list of Instances
# Ranks queries
for q_id, q_content in queries.iteritems():
query = Query(q_id, q_content)
q_inss = self.rank_query(query)
if len(q_inss.get_all()) == 0:
print "==================================================="
print "No candidate entity for query " + q_id + ", " + q_content
print "==================================================="
inss_list.append(q_inss)
# time log
e_t = datetime.now()
diff = e_t - s_t
total_time += diff.total_seconds()
time_log = "Execution time(min):\t" + str(round(total_time/60, 4)) + "\n"
time_log += "Avg. time per query:\t" + str(round(total_time/len(queries), 4)) + "\n"
print time_log
# open(time_log_file + ".timelog", "w").write(time_log)
# print "Time log:\t" + time_log_file + ".timelog"
return CERInstances.concatenate_inss(inss_list)
def rank_query(self, query):
"""
Generates ranking score for entities related to the given query.
:param query: query.Query
:return erd.ml.CERInstances
"""
q_inss = CERInstances.gen_instances(query, self.commonness_th, sf_source=self.sf_source, filter=self.filter)
RankerLTR.add_features(q_inss, self.commonness_th, self.sf_source)
self.rank_inss(q_inss)
return q_inss
@staticmethod
def add_features(inss, commonness_th, sf_source):
print "Extracting features ..."
i = 0
for ins in inss.get_all():
ins.features = RankerLTR.get_features(ins, commonness_th, sf_source)
i += 1
if i % 1000.0 == 0:
print "Features are generated until instance " + str(ins.id)
return inss
@staticmethod
def get_features(ins, commonness_th, sf_source):
"""
Concatenate all features.
:param ins: ml.Instance
"""
all_ftrs = {}
# --- mention features ---
mention_ftr = MentionFeat(ins.mention, sf_source)
all_ftrs['len'] = mention_ftr.mention_len()
all_ftrs['ntem'] = mention_ftr.ntem()
all_ftrs['smil'] = mention_ftr.smil()
all_ftrs['matches'] = ins.matches if ins.matches is not None else mention_ftr.matches(commonness_th)
all_ftrs['len_ratio'] = mention_ftr.len_ratio(Query.preprocess(ins.q_content))
# --- entity features ---
en_ftr = EntityFeat(ins.en_id)
all_ftrs['redirects'] = en_ftr.redirects()
all_ftrs['links'] = en_ftr.links()
# --- entity-mention features ---
en_mention_ftr = EntityMentionFeat(ins.en_id, ins.mention)
all_ftrs['commonness'] = ins.commonness
all_ftrs['mct'] = en_mention_ftr.mct()
all_ftrs['tcm'] = en_mention_ftr.tcm()
all_ftrs['tem'] = en_mention_ftr.tem()
all_ftrs['pos1'] = en_mention_ftr.pos1()
all_ftrs.update(RankerLTR.__lm_scores(ins.en_id, ins.mention, "m"))
# --- entity-query features ---
en_query_ftr = EntityMentionFeat(ins.en_id, ins.q_content)
all_ftrs['qct'] = en_query_ftr.mct()
all_ftrs['tcq'] = en_query_ftr.tcm()
all_ftrs['teq'] = en_query_ftr.tem()
mlm_tc = QuerySimFeat(ins.q_content).nllr_mlm_score(ins.en_id, {'names': 0.2, 'contents': 0.8}) # mlm_score
all_ftrs['mlm-tc'] = mlm_tc if mlm_tc is not None else 0
all_ftrs.update(RankerLTR.__lm_scores(ins.en_id, ins.q_content, "q"))
return all_ftrs
@staticmethod
def __lm_scores(en_id, txt, prefix):
""" Calculates all LM scores. """
feat_field_dict = {'title': econfig.TITLE, 'sAbs': econfig.SHORT_ABS, 'lAbs': econfig.LONG_ABS,
'links': econfig.WIKILINKS, 'cats': econfig.CATEGORIES, 'catchall': Lucene.FIELDNAME_CONTENTS}
ftr_extractor = QuerySimFeat(txt)
scores = dict()
for feature_name, field in feat_field_dict.iteritems():
lm_score = ftr_extractor.nllr_lm_score(en_id, field) # lm_score(en_id, field)
scores[prefix + feature_name] = lm_score if lm_score is not None else 0
return scores
def main(args):
"""
Required args for training: -train -cer -t <int> -l <int> -in <train_set_name.json>
Required args for cross validation: -cv -cer -d <data_name> -c <commonness> -t <int> -l <int>
Required args for ranking: -rank -ltr -m <model_file>
Valid args for ranking: -d <data_name> -qid <str> -query <str> -c <commonness>
"""
settings_str = "-ltr-t" + str(args.tree)
model_name = ""
if args.depth is not None:
settings_str += "-d" + str(args.depth)
model_name = "gbrt"
elif args.maxfeat is not None:
settings_str += "-m" + str(args.maxfeat)
model_name = "rf"
ml_config = {'model': model_name,
'parameters': {'tree': args.tree, 'depth': args.depth, 'maxfeat': args.maxfeat}}
# ==== Train ====
if args.train:
train_inss = CERInstances.from_json(args.input)
file_name = args.input[:args.input.rfind(".json")] + settings_str
ranker_ltr = RankerLTR(config=ml_config) # model_name, tree=args.tree, depth=args.depth, max_features=args.maxfeat)
ranker_ltr.train(train_inss, model_file=file_name + ".model", feat_imp_file=file_name + "-feat_imp.txt")
# ==== Cross Validation ====
elif args.cv:
in_file_name = args.input[:args.input.rfind(".json")]
cv_in | model = self.model | conditional_block |
ranker_ltr.py | ):
"""
Trains a model and saves it to a file.
- This function currently only supports GBRT.
Args:
inss: erd.ml.CERInstances, train instances
model_file: A file to save the model. For None value, the model will not be saved
Returns:
ranker, the learned model
"""
config = self.config
if model_file is not None:
config['save_model'] = model_file
if feat_imp_file is not None:
config['save_feature_imp'] = feat_imp_file
self.ml.config = config
ranker = self.ml.train_model(inss)
return ranker
def cross_validate(self, inss, num_folds, folds_name=None, gen_folds=False):
"""
Performs k-fold cross validation.
:param inss: erd.ml.CERInstances
:param num_folds: int, number of folds
:param folds_name: file name for saving the folds. It adds a postfix to the file name.
e.g. "./output/res/erd-ltr" -> "./output/res/erd-ltr-f1-train.json"
:return All of instances ranked by cross validation
"""
kcv = CrossValidation(num_folds, inss, self.train, self.rank_inss)
# loads/generates folds
if gen_folds:
kcv.create_folds(group_by="session")
if folds_name is not None:
kcv.save_folds(folds_name)
else:
kcv.load_folds(folds_name)
# Cross validation
inss = kcv.run()
inss.__class__ = CERInstances
for ins in inss.get_all():
ins.__class__ = CERInstance
return inss
def rank_inss(self, inss, model=None):
"""
Ranks the instances using the given trained model.
:param inss: erd.ml.CERInstances
:return erd.ml.CERInstances, ranked instances
"""
if model is None: # Done for CV call_back_test method
model = self.model
return self.ml.apply_model(inss, model)
def | (self, queries, time_log_file=None): # commonness_th, filter=True,
"""
Ranks entities for the given queries using the trained model.
:param queries: a dictionary, {q_id: q_content, ...}
:param time_log_file: file name to save time log
:return erd.ml.CERInstances, Ranked instances
"""
print "Ranking queries ..."
total_time = 0.0
s_t = datetime.now() # start time
inss_list = [] # list of Instances
# Ranks queries
for q_id, q_content in queries.iteritems():
query = Query(q_id, q_content)
q_inss = self.rank_query(query)
if len(q_inss.get_all()) == 0:
print "==================================================="
print "No candidate entity for query " + q_id + ", " + q_content
print "==================================================="
inss_list.append(q_inss)
# time log
e_t = datetime.now()
diff = e_t - s_t
total_time += diff.total_seconds()
time_log = "Execution time(min):\t" + str(round(total_time/60, 4)) + "\n"
time_log += "Avg. time per query:\t" + str(round(total_time/len(queries), 4)) + "\n"
print time_log
# open(time_log_file + ".timelog", "w").write(time_log)
# print "Time log:\t" + time_log_file + ".timelog"
return CERInstances.concatenate_inss(inss_list)
def rank_query(self, query):
"""
Generates ranking score for entities related to the given query.
:param query: query.Query
:return erd.ml.CERInstances
"""
q_inss = CERInstances.gen_instances(query, self.commonness_th, sf_source=self.sf_source, filter=self.filter)
RankerLTR.add_features(q_inss, self.commonness_th, self.sf_source)
self.rank_inss(q_inss)
return q_inss
@staticmethod
def add_features(inss, commonness_th, sf_source):
print "Extracting features ..."
i = 0
for ins in inss.get_all():
ins.features = RankerLTR.get_features(ins, commonness_th, sf_source)
i += 1
if i % 1000.0 == 0:
print "Features are generated until instance " + str(ins.id)
return inss
@staticmethod
def get_features(ins, commonness_th, sf_source):
"""
Concatenate all features.
:param ins: ml.Instance
"""
all_ftrs = {}
# --- mention features ---
mention_ftr = MentionFeat(ins.mention, sf_source)
all_ftrs['len'] = mention_ftr.mention_len()
all_ftrs['ntem'] = mention_ftr.ntem()
all_ftrs['smil'] = mention_ftr.smil()
all_ftrs['matches'] = ins.matches if ins.matches is not None else mention_ftr.matches(commonness_th)
all_ftrs['len_ratio'] = mention_ftr.len_ratio(Query.preprocess(ins.q_content))
# --- entity features ---
en_ftr = EntityFeat(ins.en_id)
all_ftrs['redirects'] = en_ftr.redirects()
all_ftrs['links'] = en_ftr.links()
# --- entity-mention features ---
en_mention_ftr = EntityMentionFeat(ins.en_id, ins.mention)
all_ftrs['commonness'] = ins.commonness
all_ftrs['mct'] = en_mention_ftr.mct()
all_ftrs['tcm'] = en_mention_ftr.tcm()
all_ftrs['tem'] = en_mention_ftr.tem()
all_ftrs['pos1'] = en_mention_ftr.pos1()
all_ftrs.update(RankerLTR.__lm_scores(ins.en_id, ins.mention, "m"))
# --- entity-query features ---
en_query_ftr = EntityMentionFeat(ins.en_id, ins.q_content)
all_ftrs['qct'] = en_query_ftr.mct()
all_ftrs['tcq'] = en_query_ftr.tcm()
all_ftrs['teq'] = en_query_ftr.tem()
mlm_tc = QuerySimFeat(ins.q_content).nllr_mlm_score(ins.en_id, {'names': 0.2, 'contents': 0.8}) # mlm_score
all_ftrs['mlm-tc'] = mlm_tc if mlm_tc is not None else 0
all_ftrs.update(RankerLTR.__lm_scores(ins.en_id, ins.q_content, "q"))
return all_ftrs
@staticmethod
def __lm_scores(en_id, txt, prefix):
""" Calculates all LM scores. """
feat_field_dict = {'title': econfig.TITLE, 'sAbs': econfig.SHORT_ABS, 'lAbs': econfig.LONG_ABS,
'links': econfig.WIKILINKS, 'cats': econfig.CATEGORIES, 'catchall': Lucene.FIELDNAME_CONTENTS}
ftr_extractor = QuerySimFeat(txt)
scores = dict()
for feature_name, field in feat_field_dict.iteritems():
lm_score = ftr_extractor.nllr_lm_score(en_id, field) # lm_score(en_id, field)
scores[prefix + feature_name] = lm_score if lm_score is not None else 0
return scores
def main(args):
"""
Required args for training: -train -cer -t <int> -l <int> -in <train_set_name.json>
Required args for cross validation: -cv -cer -d <data_name> -c <commonness> -t <int> -l <int>
Required args for ranking: -rank -ltr -m <model_file>
Valid args for ranking: -d <data_name> -qid <str> -query <str> -c <commonness>
"""
settings_str = "-ltr-t" + str(args.tree)
model_name = ""
if args.depth is not None:
settings_str += "-d" + str(args.depth)
model_name = "gbrt"
elif args.maxfeat is not None:
settings_str += "-m" + str(args.maxfeat)
model_name = "rf"
ml_config = {'model': model_name,
'parameters': {'tree': args.tree, 'depth': args.depth, 'maxfeat': args.maxfeat}}
# ==== Train ====
if args.train:
train_inss = CERInstances.from_json(args.input)
file_name = args.input[:args.input.rfind(".json")] + settings_str
ranker_ltr = RankerLTR(config=ml_config) # model_name, tree=args.tree, depth=args.depth, max_features=args.maxfeat)
ranker_ltr.train(train_inss, model_file=file_name + ".model", feat_imp_file=file_name + "-feat_imp.txt")
# ==== Cross Validation ====
elif args.cv:
in_file_name = args.input[:args.input.rfind(".json")]
cv | rank_queries | identifier_name |
ranker_ltr.py | ):
"""
Trains a model and saves it to a file.
- This function currently only supports GBRT.
Args:
inss: erd.ml.CERInstances, train instances
model_file: A file to save the model. For None value, the model will not be saved
Returns:
ranker, the learned model
"""
config = self.config
if model_file is not None:
config['save_model'] = model_file
if feat_imp_file is not None:
config['save_feature_imp'] = feat_imp_file
self.ml.config = config
ranker = self.ml.train_model(inss)
return ranker
def cross_validate(self, inss, num_folds, folds_name=None, gen_folds=False):
"""
Performs k-fold cross validation.
:param inss: erd.ml.CERInstances
:param num_folds: int, number of folds
:param folds_name: file name for saving the folds. It adds a postfix to the file name.
e.g. "./output/res/erd-ltr" -> "./output/res/erd-ltr-f1-train.json"
:return All of instances ranked by cross validation
"""
kcv = CrossValidation(num_folds, inss, self.train, self.rank_inss)
# loads/generates folds
if gen_folds:
kcv.create_folds(group_by="session")
if folds_name is not None:
kcv.save_folds(folds_name)
else:
kcv.load_folds(folds_name)
# Cross validation
inss = kcv.run()
inss.__class__ = CERInstances
for ins in inss.get_all():
ins.__class__ = CERInstance
return inss
def rank_inss(self, inss, model=None):
"""
Ranks the instances using the given trained model.
:param inss: erd.ml.CERInstances
:return erd.ml.CERInstances, ranked instances
"""
if model is None: # Done for CV call_back_test method
model = self.model
return self.ml.apply_model(inss, model)
def rank_queries(self, queries, time_log_file=None): # commonness_th, filter=True,
| inss_list.append(q_inss)
# time log
e_t = datetime.now()
diff = e_t - s_t
total_time += diff.total_seconds()
time_log = "Execution time(min):\t" + str(round(total_time/60, 4)) + "\n"
time_log += "Avg. time per query:\t" + str(round(total_time/len(queries), 4)) + "\n"
print time_log
# open(time_log_file + ".timelog", "w").write(time_log)
# print "Time log:\t" + time_log_file + ".timelog"
return CERInstances.concatenate_inss(inss_list)
def rank_query(self, query):
"""
Generates ranking score for entities related to the given query.
:param query: query.Query
:return erd.ml.CERInstances
"""
q_inss = CERInstances.gen_instances(query, self.commonness_th, sf_source=self.sf_source, filter=self.filter)
RankerLTR.add_features(q_inss, self.commonness_th, self.sf_source)
self.rank_inss(q_inss)
return q_inss
@staticmethod
def add_features(inss, commonness_th, sf_source):
print "Extracting features ..."
i = 0
for ins in inss.get_all():
ins.features = RankerLTR.get_features(ins, commonness_th, sf_source)
i += 1
if i % 1000.0 == 0:
print "Features are generated until instance " + str(ins.id)
return inss
@staticmethod
def get_features(ins, commonness_th, sf_source):
"""
Concatenate all features.
:param ins: ml.Instance
"""
all_ftrs = {}
# --- mention features ---
mention_ftr = MentionFeat(ins.mention, sf_source)
all_ftrs['len'] = mention_ftr.mention_len()
all_ftrs['ntem'] = mention_ftr.ntem()
all_ftrs['smil'] = mention_ftr.smil()
all_ftrs['matches'] = ins.matches if ins.matches is not None else mention_ftr.matches(commonness_th)
all_ftrs['len_ratio'] = mention_ftr.len_ratio(Query.preprocess(ins.q_content))
# --- entity features ---
en_ftr = EntityFeat(ins.en_id)
all_ftrs['redirects'] = en_ftr.redirects()
all_ftrs['links'] = en_ftr.links()
# --- entity-mention features ---
en_mention_ftr = EntityMentionFeat(ins.en_id, ins.mention)
all_ftrs['commonness'] = ins.commonness
all_ftrs['mct'] = en_mention_ftr.mct()
all_ftrs['tcm'] = en_mention_ftr.tcm()
all_ftrs['tem'] = en_mention_ftr.tem()
all_ftrs['pos1'] = en_mention_ftr.pos1()
all_ftrs.update(RankerLTR.__lm_scores(ins.en_id, ins.mention, "m"))
# --- entity-query features ---
en_query_ftr = EntityMentionFeat(ins.en_id, ins.q_content)
all_ftrs['qct'] = en_query_ftr.mct()
all_ftrs['tcq'] = en_query_ftr.tcm()
all_ftrs['teq'] = en_query_ftr.tem()
mlm_tc = QuerySimFeat(ins.q_content).nllr_mlm_score(ins.en_id, {'names': 0.2, 'contents': 0.8}) # mlm_score
all_ftrs['mlm-tc'] = mlm_tc if mlm_tc is not None else 0
all_ftrs.update(RankerLTR.__lm_scores(ins.en_id, ins.q_content, "q"))
return all_ftrs
@staticmethod
def __lm_scores(en_id, txt, prefix):
""" Calculates all LM scores. """
feat_field_dict = {'title': econfig.TITLE, 'sAbs': econfig.SHORT_ABS, 'lAbs': econfig.LONG_ABS,
'links': econfig.WIKILINKS, 'cats': econfig.CATEGORIES, 'catchall': Lucene.FIELDNAME_CONTENTS}
ftr_extractor = QuerySimFeat(txt)
scores = dict()
for feature_name, field in feat_field_dict.iteritems():
lm_score = ftr_extractor.nllr_lm_score(en_id, field) # lm_score(en_id, field)
scores[prefix + feature_name] = lm_score if lm_score is not None else 0
return scores
def main(args):
"""
Required args for training: -train -cer -t <int> -l <int> -in <train_set_name.json>
Required args for cross validation: -cv -cer -d <data_name> -c <commonness> -t <int> -l <int>
Required args for ranking: -rank -ltr -m <model_file>
Valid args for ranking: -d <data_name> -qid <str> -query <str> -c <commonness>
"""
settings_str = "-ltr-t" + str(args.tree)
model_name = ""
if args.depth is not None:
settings_str += "-d" + str(args.depth)
model_name = "gbrt"
elif args.maxfeat is not None:
settings_str += "-m" + str(args.maxfeat)
model_name = "rf"
ml_config = {'model': model_name,
'parameters': {'tree': args.tree, 'depth': args.depth, 'maxfeat': args.maxfeat}}
# ==== Train ====
if args.train:
train_inss = CERInstances.from_json(args.input)
file_name = args.input[:args.input.rfind(".json")] + settings_str
ranker_ltr = RankerLTR(config=ml_config) # model_name, tree=args.tree, depth=args.depth, max_features=args.maxfeat)
ranker_ltr.train(train_inss, model_file=file_name + ".model", feat_imp_file=file_name + "-feat_imp.txt")
# ==== Cross Validation ====
elif args.cv:
in_file_name = args.input[:args.input.rfind(".json")]
cv_inss | """
Ranks entities for the given queries using the trained model.
:param queries: a dictionary, {q_id: q_content, ...}
:param time_log_file: file name to save time log
:return erd.ml.CERInstances, Ranked instances
"""
print "Ranking queries ..."
total_time = 0.0
s_t = datetime.now() # start time
inss_list = [] # list of Instances
# Ranks queries
for q_id, q_content in queries.iteritems():
query = Query(q_id, q_content)
q_inss = self.rank_query(query)
if len(q_inss.get_all()) == 0:
print "==================================================="
print "No candidate entity for query " + q_id + ", " + q_content
print "===================================================" | identifier_body |
ranker_ltr.py | model: the trained model
"""
def __init__(self, commonness_th=None, sf_source=None, filter=True, model=None, config={}):
self.commonness_th = commonness_th
self.sf_source = sf_source
self.filter = filter
self.config = config
self.model = model
self.ml = ML(config) #if config is not None else None
def train(self, inss, model_file=None, feat_imp_file=None):
"""
Trains a model and saves it to a file.
- This function currently only supports GBRT.
Args:
inss: erd.ml.CERInstances, train instances
model_file: A file to save the model. For None value, the model will not be saved
Returns:
ranker, the learned model
"""
config = self.config
if model_file is not None:
config['save_model'] = model_file
if feat_imp_file is not None:
config['save_feature_imp'] = feat_imp_file
self.ml.config = config
ranker = self.ml.train_model(inss)
return ranker
def cross_validate(self, inss, num_folds, folds_name=None, gen_folds=False):
"""
Performs k-fold cross validation.
:param inss: erd.ml.CERInstances
:param num_folds: int, number of folds
:param folds_name: file name for saving the folds. It adds a postfix to the file name.
e.g. "./output/res/erd-ltr" -> "./output/res/erd-ltr-f1-train.json"
:return All of instances ranked by cross validation
"""
kcv = CrossValidation(num_folds, inss, self.train, self.rank_inss)
# loads/generates folds
if gen_folds:
kcv.create_folds(group_by="session")
if folds_name is not None:
kcv.save_folds(folds_name)
else:
kcv.load_folds(folds_name)
# Cross validation
inss = kcv.run()
inss.__class__ = CERInstances
for ins in inss.get_all():
ins.__class__ = CERInstance
return inss
def rank_inss(self, inss, model=None):
"""
Ranks the instances using the given trained model.
:param inss: erd.ml.CERInstances
:return erd.ml.CERInstances, ranked instances
"""
if model is None: # Done for CV call_back_test method
model = self.model
return self.ml.apply_model(inss, model)
def rank_queries(self, queries, time_log_file=None): # commonness_th, filter=True,
"""
Ranks entities for the given queries using the trained model.
:param queries: a dictionary, {q_id: q_content, ...}
:param time_log_file: file name to save time log
:return erd.ml.CERInstances, Ranked instances
"""
print "Ranking queries ..."
total_time = 0.0
s_t = datetime.now() # start time
inss_list = [] # list of Instances
# Ranks queries
for q_id, q_content in queries.iteritems():
query = Query(q_id, q_content)
q_inss = self.rank_query(query)
if len(q_inss.get_all()) == 0:
print "==================================================="
print "No candidate entity for query " + q_id + ", " + q_content
print "==================================================="
inss_list.append(q_inss)
# time log
e_t = datetime.now()
diff = e_t - s_t
total_time += diff.total_seconds()
time_log = "Execution time(min):\t" + str(round(total_time/60, 4)) + "\n"
time_log += "Avg. time per query:\t" + str(round(total_time/len(queries), 4)) + "\n"
print time_log
# open(time_log_file + ".timelog", "w").write(time_log)
# print "Time log:\t" + time_log_file + ".timelog"
return CERInstances.concatenate_inss(inss_list)
def rank_query(self, query):
"""
Generates ranking score for entities related to the given query.
:param query: query.Query
:return erd.ml.CERInstances
"""
q_inss = CERInstances.gen_instances(query, self.commonness_th, sf_source=self.sf_source, filter=self.filter)
RankerLTR.add_features(q_inss, self.commonness_th, self.sf_source)
self.rank_inss(q_inss)
return q_inss
@staticmethod
def add_features(inss, commonness_th, sf_source):
print "Extracting features ..."
i = 0
for ins in inss.get_all():
ins.features = RankerLTR.get_features(ins, commonness_th, sf_source)
i += 1
if i % 1000.0 == 0:
print "Features are generated until instance " + str(ins.id)
return inss
@staticmethod
def get_features(ins, commonness_th, sf_source):
"""
Concatenate all features.
:param ins: ml.Instance
"""
all_ftrs = {}
# --- mention features ---
mention_ftr = MentionFeat(ins.mention, sf_source)
all_ftrs['len'] = mention_ftr.mention_len()
all_ftrs['ntem'] = mention_ftr.ntem()
all_ftrs['smil'] = mention_ftr.smil()
all_ftrs['matches'] = ins.matches if ins.matches is not None else mention_ftr.matches(commonness_th)
all_ftrs['len_ratio'] = mention_ftr.len_ratio(Query.preprocess(ins.q_content))
# --- entity features ---
en_ftr = EntityFeat(ins.en_id)
all_ftrs['redirects'] = en_ftr.redirects()
all_ftrs['links'] = en_ftr.links()
# --- entity-mention features ---
en_mention_ftr = EntityMentionFeat(ins.en_id, ins.mention)
all_ftrs['commonness'] = ins.commonness
all_ftrs['mct'] = en_mention_ftr.mct()
all_ftrs['tcm'] = en_mention_ftr.tcm()
all_ftrs['tem'] = en_mention_ftr.tem()
all_ftrs['pos1'] = en_mention_ftr.pos1()
all_ftrs.update(RankerLTR.__lm_scores(ins.en_id, ins.mention, "m"))
# --- entity-query features ---
en_query_ftr = EntityMentionFeat(ins.en_id, ins.q_content)
all_ftrs['qct'] = en_query_ftr.mct()
all_ftrs['tcq'] = en_query_ftr.tcm()
all_ftrs['teq'] = en_query_ftr.tem()
mlm_tc = QuerySimFeat(ins.q_content).nllr_mlm_score(ins.en_id, {'names': 0.2, 'contents': 0.8}) # mlm_score
all_ftrs['mlm-tc'] = mlm_tc if mlm_tc is not None else 0
all_ftrs.update(RankerLTR.__lm_scores(ins.en_id, ins.q_content, "q"))
return all_ftrs
@staticmethod
def __lm_scores(en_id, txt, prefix):
""" Calculates all LM scores. """
feat_field_dict = {'title': econfig.TITLE, 'sAbs': econfig.SHORT_ABS, 'lAbs': econfig.LONG_ABS,
'links': econfig.WIKILINKS, 'cats': econfig.CATEGORIES, 'catchall': Lucene.FIELDNAME_CONTENTS}
ftr_extractor = QuerySimFeat(txt)
scores = dict()
for feature_name, field in feat_field_dict.iteritems():
lm_score = ftr_extractor.nllr_lm_score(en_id, field) # lm_score(en_id, field)
scores[prefix + feature_name] = lm_score if lm_score is not None else 0
return scores
def main(args):
"""
Required args for training: -train -cer -t <int> -l <int> -in <train_set_name.json>
Required args for cross validation: -cv -cer -d <data_name> -c <commonness> -t <int> -l <int>
Required args for ranking: -rank -ltr -m <model_file>
Valid args for ranking: -d <data_name> -qid <str> -query <str> -c <commonness>
"""
settings_str = "-ltr-t" + str(args.tree)
model_name = ""
if args.depth is not None:
settings_str += "-d" + str(args.depth)
model_name = "gbrt"
elif args.maxfeat is not None:
settings_str += "-m" + str(args.maxfeat)
model_name = "rf"
ml_config = {'model': model_name,
'parameters': {'tree': args.tree, 'depth': args.depth, 'maxfeat': args.maxfeat}}
# ==== Train ====
if args.train:
train_inss = CERInstances.from_json(args.input)
file_name = args.input[:args | random_line_split | ||
opentuna-stack.ts | extends cdk.Stack {
constructor(scope: cdk.Construct, id: string, props: OpenTunaStackProps) {
super(scope, id, props);
const stack = cdk.Stack.of(this);
const domainName = this.node.tryGetContext('domainName');
const domainZoneName = this.node.tryGetContext('domainZone');
const iamCertId = this.node.tryGetContext('iamCertId');
let useHTTPS = false;
let domainZone: r53.IHostedZone | undefined;
// ACM or IAM certificate
let cloudfrontCert: acm.Certificate | string | null = null;
if (domainName && domainZoneName) {
domainZone = r53.HostedZone.fromLookup(this, 'HostedZone', {
domainName: domainZoneName,
});
useHTTPS = true;
if (iamCertId !== undefined) {
// Use IAM first when specified
cloudfrontCert = iamCertId;
} else if (!stack.region.startsWith('cn-')) {
// Try to use ACM certificate in us-east-1 for CloudFront
cloudfrontCert = new acm.DnsValidatedCertificate(this, 'CloudFrontCertificate', {
domainName: domainName,
hostedZone: domainZone,
validation: acm.CertificateValidation.fromDns(domainZone),
region: 'us-east-1',
});
} else {
throw new Error('You must specify iamCertId context for cn regions');
}
}
const vpc = ec2.Vpc.fromLookup(this, `VPC-${props.vpcId}`, {
vpcId: props.vpcId,
});
const assetBucket = new s3.Bucket(this, `OpenTunaAssets`, {
removalPolicy: cdk.RemovalPolicy.DESTROY,
});
// setup bucket for rubygems
const tunaRepoBucket = new s3.Bucket(this, 'TunaRepoBucket');
// CloudWatch dashboard
const dashboard = new cloudwatch.Dashboard(this, 'Dashboard', {
dashboardName: 'OpenTUNA-Dashboard',
});
const tunaManagerSG = new ec2.SecurityGroup(this, "TunaManagerSG", {
vpc,
description: "SG of Tuna Manager",
allowAllOutbound: true,
});
const tunaManagerALBSG = new ec2.SecurityGroup(this, "TunaManagerALBSG", {
vpc,
description: "SG of ALB of Tuna Manager",
allowAllOutbound: false,
});
const tunaWorkerSG = new ec2.SecurityGroup(this, "TunaWorkerSG", {
vpc,
description: "SG of Tuna Worker",
allowAllOutbound: true,
});
const externalALBSG = new ec2.SecurityGroup(this, "ExternalALBSG", {
vpc,
description: "SG of External ALB",
allowAllOutbound: false,
});
const externalALB = new elbv2.ApplicationLoadBalancer(this, "ExternalALB", {
vpc,
securityGroup: externalALBSG,
internetFacing: true,
http2Enabled: useHTTPS,
});
dashboard.addWidgets(new cloudwatch.GraphWidget({
title: 'ALB Processed Data',
left: [externalALB.metricProcessedBytes({
label: 'Bytes per minute',
period: cdk.Duration.minutes(1),
})]
}), new cloudwatch.GraphWidget({
title: 'ALB Connections',
left: [externalALB.metricNewConnectionCount({
label: 'New',
period: cdk.Duration.minutes(1),
}), externalALB.metricActiveConnectionCount({
label: 'Active',
period: cdk.Duration.minutes(1),
}), externalALB.metricRejectedConnectionCount({
label: 'Rejected',
period: cdk.Duration.minutes(1),
})]
}), new cloudwatch.GraphWidget({
title: 'ALB HTTP Code from Target',
left: [externalALB.metricHttpCodeTarget(elbv2.HttpCodeTarget.TARGET_2XX_COUNT, {
label: '2XX',
period: cdk.Duration.minutes(1),
}), externalALB.metricHttpCodeTarget(elbv2.HttpCodeTarget.TARGET_3XX_COUNT, {
label: '3XX',
period: cdk.Duration.minutes(1),
}), externalALB.metricHttpCodeTarget(elbv2.HttpCodeTarget.TARGET_4XX_COUNT, {
label: '4XX',
period: cdk.Duration.minutes(1),
}), externalALB.metricHttpCodeTarget(elbv2.HttpCodeTarget.TARGET_5XX_COUNT, {
label: '5XX',
period: cdk.Duration.minutes(1),
})]
}));
let cert: acm.Certificate | undefined;
if (useHTTPS) {
cert = new acm.Certificate(this, 'Certificate', {
domainName: domainName,
subjectAlternativeNames: [`${stack.region}.${domainName}`],
validation: acm.CertificateValidation.fromDns(domainZone),
});
}
const defaultALBPort: number = useHTTPS ? 443 : 80;
const defaultALBListener = externalALB.addListener(`DefaultPort-${defaultALBPort}`, {
protocol: useHTTPS ? elbv2.ApplicationProtocol.HTTPS : elbv2.ApplicationProtocol.HTTP,
port: defaultALBPort,
open: true,
certificates: cert ? [cert] : undefined,
sslPolicy: useHTTPS ? elbv2.SslPolicy.RECOMMENDED : undefined,
});
let httpOnlyALBListener: elbv2.ApplicationListener | undefined;
if (useHTTPS) {
// redirect HTTP to HTTPS
httpOnlyALBListener = externalALB.addListener(`DefaultPort-80`, {
protocol: elbv2.ApplicationProtocol.HTTP,
port: 80,
open: true,
defaultAction: elbv2.ListenerAction.redirect({
port: '443',
protocol: elbv2.ApplicationProtocol.HTTPS,
permanent: true,
}),
});
new r53.ARecord(this, 'ALBCustomDomain', {
zone: domainZone!,
recordName: `${stack.region}.${domainName}`,
ttl: cdk.Duration.minutes(5),
target: r53.RecordTarget.fromAlias(new alias.LoadBalancerTarget(externalALB)),
});
}
// Tunasync Manager stack
const tunaManagerStack = new TunaManagerStack(this, 'TunaManagerStack', {
vpc,
fileSystemId: props.fileSystemId,
notifyTopic: props.notifyTopic,
tunaManagerSG,
tunaManagerALBSG,
timeout: cdk.Duration.minutes(10),
assetBucket,
});
const managerUrl = `http://${tunaManagerStack.managerALB.loadBalancerDnsName}:${tunaManagerStack.managerPort}`;
// Tunasync Worker stack
const tunaWorkerStack = new TunaWorkerStack(this, 'TunaWorkerStack', {
vpc,
fileSystemId: props.fileSystemId,
notifyTopic: props.notifyTopic,
managerUrl,
timeout: cdk.Duration.minutes(10),
tunaWorkerSG,
assetBucket,
tunaRepoBucket,
});
tunaManagerALBSG.connections.allowFrom(tunaWorkerSG, ec2.Port.tcp(tunaManagerStack.managerPort), 'Access from tuna worker');
tunaWorkerSG.connections.allowFrom(tunaManagerSG, ec2.Port.tcp(tunaWorkerStack.workerPort), 'Access from tuna manager');
const ecsCluster = new ecs.Cluster(this, `ECSCluster`, {
vpc,
});
// Content Server stack
const contentServerStack = new ContentServerStack(this, 'ContentServerStack', {
vpc,
fileSystemId: props.fileSystemId,
notifyTopic: props.notifyTopic,
ecsCluster,
listener: defaultALBListener,
httpOnlyListener: httpOnlyALBListener,
dashboard,
});
// Web Portal stack
const webPortalStack = new WebPortalStack(this, 'WebPortalStack', {
vpc,
externalALBListener: defaultALBListener,
ecsCluster,
tunaManagerASG: tunaManagerStack.managerASG,
tunaManagerALBTargetGroup: tunaManagerStack.managerALBTargetGroup,
fileSystemId: props.fileSystemId,
fileSystemSGId: props.fileSystemSGId,
});
tunaManagerSG.connections.allowFrom(externalALBSG, ec2.Port.tcp(80), 'Allow external ALB to access tuna manager');
// Monitor stack
const monitorStack = new MonitorStack(this, 'MonitorStack', {
vpc,
domainName,
notifyTopic: props.notifyTopic,
tunaManagerUrl: managerUrl,
tunaManagerALBSG,
});
let commonBehaviorConfig = {
// special handling for redirections
forwardedValues: {
headers: ['Host'],
queryString: true,
},
// default 1 day cache
defaultTtl: cdk.Duration.days(1),
};
// origin access identity for s3 bucket
const oai = new cloudfront.OriginAccessIdentity(this, 'TunaRepoOAI');
tunaRepoBucket.grantRead(oai);
// CloudFront as cdn
let cloudfrontProps = {
originConfigs: [{
customOriginSource: {
domainName: useHTTPS ? `${stack.region}.${domainName}` : external | OpentunaStack | identifier_name | |
opentuna-stack.ts | const tunaManagerALBSG = new ec2.SecurityGroup(this, "TunaManagerALBSG", {
vpc,
description: "SG of ALB of Tuna Manager",
allowAllOutbound: false,
});
const tunaWorkerSG = new ec2.SecurityGroup(this, "TunaWorkerSG", {
vpc,
description: "SG of Tuna Worker",
allowAllOutbound: true,
});
const externalALBSG = new ec2.SecurityGroup(this, "ExternalALBSG", {
vpc,
description: "SG of External ALB",
allowAllOutbound: false,
});
const externalALB = new elbv2.ApplicationLoadBalancer(this, "ExternalALB", {
vpc,
securityGroup: externalALBSG,
internetFacing: true,
http2Enabled: useHTTPS,
});
dashboard.addWidgets(new cloudwatch.GraphWidget({
title: 'ALB Processed Data',
left: [externalALB.metricProcessedBytes({
label: 'Bytes per minute',
period: cdk.Duration.minutes(1),
})]
}), new cloudwatch.GraphWidget({
title: 'ALB Connections',
left: [externalALB.metricNewConnectionCount({
label: 'New',
period: cdk.Duration.minutes(1),
}), externalALB.metricActiveConnectionCount({
label: 'Active',
period: cdk.Duration.minutes(1),
}), externalALB.metricRejectedConnectionCount({
label: 'Rejected',
period: cdk.Duration.minutes(1),
})]
}), new cloudwatch.GraphWidget({
title: 'ALB HTTP Code from Target',
left: [externalALB.metricHttpCodeTarget(elbv2.HttpCodeTarget.TARGET_2XX_COUNT, {
label: '2XX',
period: cdk.Duration.minutes(1),
}), externalALB.metricHttpCodeTarget(elbv2.HttpCodeTarget.TARGET_3XX_COUNT, {
label: '3XX',
period: cdk.Duration.minutes(1),
}), externalALB.metricHttpCodeTarget(elbv2.HttpCodeTarget.TARGET_4XX_COUNT, {
label: '4XX',
period: cdk.Duration.minutes(1),
}), externalALB.metricHttpCodeTarget(elbv2.HttpCodeTarget.TARGET_5XX_COUNT, {
label: '5XX',
period: cdk.Duration.minutes(1),
})]
}));
let cert: acm.Certificate | undefined;
if (useHTTPS) {
cert = new acm.Certificate(this, 'Certificate', {
domainName: domainName,
subjectAlternativeNames: [`${stack.region}.${domainName}`],
validation: acm.CertificateValidation.fromDns(domainZone),
});
}
const defaultALBPort: number = useHTTPS ? 443 : 80;
const defaultALBListener = externalALB.addListener(`DefaultPort-${defaultALBPort}`, {
protocol: useHTTPS ? elbv2.ApplicationProtocol.HTTPS : elbv2.ApplicationProtocol.HTTP,
port: defaultALBPort,
open: true,
certificates: cert ? [cert] : undefined,
sslPolicy: useHTTPS ? elbv2.SslPolicy.RECOMMENDED : undefined,
});
let httpOnlyALBListener: elbv2.ApplicationListener | undefined;
if (useHTTPS) {
// redirect HTTP to HTTPS
httpOnlyALBListener = externalALB.addListener(`DefaultPort-80`, {
protocol: elbv2.ApplicationProtocol.HTTP,
port: 80,
open: true,
defaultAction: elbv2.ListenerAction.redirect({
port: '443',
protocol: elbv2.ApplicationProtocol.HTTPS,
permanent: true,
}),
});
new r53.ARecord(this, 'ALBCustomDomain', {
zone: domainZone!,
recordName: `${stack.region}.${domainName}`,
ttl: cdk.Duration.minutes(5),
target: r53.RecordTarget.fromAlias(new alias.LoadBalancerTarget(externalALB)),
});
}
// Tunasync Manager stack
const tunaManagerStack = new TunaManagerStack(this, 'TunaManagerStack', {
vpc,
fileSystemId: props.fileSystemId,
notifyTopic: props.notifyTopic,
tunaManagerSG,
tunaManagerALBSG,
timeout: cdk.Duration.minutes(10),
assetBucket,
});
const managerUrl = `http://${tunaManagerStack.managerALB.loadBalancerDnsName}:${tunaManagerStack.managerPort}`;
// Tunasync Worker stack
const tunaWorkerStack = new TunaWorkerStack(this, 'TunaWorkerStack', {
vpc,
fileSystemId: props.fileSystemId,
notifyTopic: props.notifyTopic,
managerUrl,
timeout: cdk.Duration.minutes(10),
tunaWorkerSG,
assetBucket,
tunaRepoBucket,
});
tunaManagerALBSG.connections.allowFrom(tunaWorkerSG, ec2.Port.tcp(tunaManagerStack.managerPort), 'Access from tuna worker');
tunaWorkerSG.connections.allowFrom(tunaManagerSG, ec2.Port.tcp(tunaWorkerStack.workerPort), 'Access from tuna manager');
const ecsCluster = new ecs.Cluster(this, `ECSCluster`, {
vpc,
});
// Content Server stack
const contentServerStack = new ContentServerStack(this, 'ContentServerStack', {
vpc,
fileSystemId: props.fileSystemId,
notifyTopic: props.notifyTopic,
ecsCluster,
listener: defaultALBListener,
httpOnlyListener: httpOnlyALBListener,
dashboard,
});
// Web Portal stack
const webPortalStack = new WebPortalStack(this, 'WebPortalStack', {
vpc,
externalALBListener: defaultALBListener,
ecsCluster,
tunaManagerASG: tunaManagerStack.managerASG,
tunaManagerALBTargetGroup: tunaManagerStack.managerALBTargetGroup,
fileSystemId: props.fileSystemId,
fileSystemSGId: props.fileSystemSGId,
});
tunaManagerSG.connections.allowFrom(externalALBSG, ec2.Port.tcp(80), 'Allow external ALB to access tuna manager');
// Monitor stack
const monitorStack = new MonitorStack(this, 'MonitorStack', {
vpc,
domainName,
notifyTopic: props.notifyTopic,
tunaManagerUrl: managerUrl,
tunaManagerALBSG,
});
let commonBehaviorConfig = {
// special handling for redirections
forwardedValues: {
headers: ['Host'],
queryString: true,
},
// default 1 day cache
defaultTtl: cdk.Duration.days(1),
};
// origin access identity for s3 bucket
const oai = new cloudfront.OriginAccessIdentity(this, 'TunaRepoOAI');
tunaRepoBucket.grantRead(oai);
// CloudFront as cdn
let cloudfrontProps = {
originConfigs: [{
customOriginSource: {
domainName: useHTTPS ? `${stack.region}.${domainName}` : externalALB.loadBalancerDnsName,
originProtocolPolicy: cloudfront.OriginProtocolPolicy.MATCH_VIEWER
},
behaviors: [{
...commonBehaviorConfig,
isDefaultBehavior: true,
}, {
...commonBehaviorConfig,
pathPattern: '/debian/*',
}, {
...commonBehaviorConfig,
pathPattern: '/debian-security/*',
}, {
...commonBehaviorConfig,
pathPattern: '/ubuntu/*',
}, {
...commonBehaviorConfig,
// 5min cache for tunasync status
pathPattern: '/jobs',
defaultTtl: cdk.Duration.minutes(5),
}],
}, {
s3OriginSource: {
s3BucketSource: tunaRepoBucket,
originAccessIdentity: oai,
},
behaviors: [{
pathPattern: '/rubygems/gems/*',
// 1w cache for gem specs
defaultTtl: cdk.Duration.days(7),
}, {
pathPattern: '/rubygems/*',
// 1h cache for index files
defaultTtl: cdk.Duration.minutes(60),
}]
}],
defaultRootObject: '',
errorConfigurations: [
{
errorCode: 500,
errorCachingMinTtl: 30,
},
{
errorCode: 502,
errorCachingMinTtl: 0,
},
{
errorCode: 503,
errorCachingMinTtl: 0,
},
{
errorCode: 404,
errorCachingMinTtl: 3600,
responseCode: 404,
responsePagePath: '/404.html',
}
],
} as cloudfront.CloudFrontWebDistributionProps;
if (useHTTPS) {
// when https is enabled
cloudfrontProps = {
httpVersion: cloudfront.HttpVersion.HTTP2,
viewerProtocolPolicy: cloudfront.ViewerProtocolPolicy.REDIRECT_TO_HTTPS,
...cloudfrontProps
};
if (cloudfrontCert instanceof acm.DnsValidatedCertificate) | {
// ACM cert
cloudfrontProps = {
aliasConfiguration: {
acmCertRef: cloudfrontCert.certificateArn,
names: [domainName],
},
...cloudfrontProps
}
} | conditional_block | |
opentuna-stack.ts | } else if (!stack.region.startsWith('cn-')) {
// Try to use ACM certificate in us-east-1 for CloudFront
cloudfrontCert = new acm.DnsValidatedCertificate(this, 'CloudFrontCertificate', {
domainName: domainName,
hostedZone: domainZone,
validation: acm.CertificateValidation.fromDns(domainZone),
region: 'us-east-1',
});
} else {
throw new Error('You must specify iamCertId context for cn regions');
}
}
const vpc = ec2.Vpc.fromLookup(this, `VPC-${props.vpcId}`, {
vpcId: props.vpcId,
});
const assetBucket = new s3.Bucket(this, `OpenTunaAssets`, {
removalPolicy: cdk.RemovalPolicy.DESTROY,
});
// setup bucket for rubygems
const tunaRepoBucket = new s3.Bucket(this, 'TunaRepoBucket');
// CloudWatch dashboard
const dashboard = new cloudwatch.Dashboard(this, 'Dashboard', {
dashboardName: 'OpenTUNA-Dashboard',
});
const tunaManagerSG = new ec2.SecurityGroup(this, "TunaManagerSG", {
vpc,
description: "SG of Tuna Manager",
allowAllOutbound: true,
});
const tunaManagerALBSG = new ec2.SecurityGroup(this, "TunaManagerALBSG", {
vpc,
description: "SG of ALB of Tuna Manager",
allowAllOutbound: false,
});
const tunaWorkerSG = new ec2.SecurityGroup(this, "TunaWorkerSG", {
vpc,
description: "SG of Tuna Worker",
allowAllOutbound: true,
});
const externalALBSG = new ec2.SecurityGroup(this, "ExternalALBSG", {
vpc,
description: "SG of External ALB",
allowAllOutbound: false,
});
const externalALB = new elbv2.ApplicationLoadBalancer(this, "ExternalALB", {
vpc,
securityGroup: externalALBSG,
internetFacing: true,
http2Enabled: useHTTPS,
});
dashboard.addWidgets(new cloudwatch.GraphWidget({
title: 'ALB Processed Data',
left: [externalALB.metricProcessedBytes({
label: 'Bytes per minute',
period: cdk.Duration.minutes(1),
})]
}), new cloudwatch.GraphWidget({
title: 'ALB Connections',
left: [externalALB.metricNewConnectionCount({
label: 'New',
period: cdk.Duration.minutes(1),
}), externalALB.metricActiveConnectionCount({
label: 'Active',
period: cdk.Duration.minutes(1),
}), externalALB.metricRejectedConnectionCount({
label: 'Rejected',
period: cdk.Duration.minutes(1),
})]
}), new cloudwatch.GraphWidget({
title: 'ALB HTTP Code from Target',
left: [externalALB.metricHttpCodeTarget(elbv2.HttpCodeTarget.TARGET_2XX_COUNT, {
label: '2XX',
period: cdk.Duration.minutes(1),
}), externalALB.metricHttpCodeTarget(elbv2.HttpCodeTarget.TARGET_3XX_COUNT, {
label: '3XX',
period: cdk.Duration.minutes(1),
}), externalALB.metricHttpCodeTarget(elbv2.HttpCodeTarget.TARGET_4XX_COUNT, {
label: '4XX',
period: cdk.Duration.minutes(1),
}), externalALB.metricHttpCodeTarget(elbv2.HttpCodeTarget.TARGET_5XX_COUNT, {
label: '5XX',
period: cdk.Duration.minutes(1),
})]
}));
let cert: acm.Certificate | undefined;
if (useHTTPS) {
cert = new acm.Certificate(this, 'Certificate', {
domainName: domainName,
subjectAlternativeNames: [`${stack.region}.${domainName}`],
validation: acm.CertificateValidation.fromDns(domainZone),
});
}
const defaultALBPort: number = useHTTPS ? 443 : 80;
const defaultALBListener = externalALB.addListener(`DefaultPort-${defaultALBPort}`, {
protocol: useHTTPS ? elbv2.ApplicationProtocol.HTTPS : elbv2.ApplicationProtocol.HTTP,
port: defaultALBPort,
open: true,
certificates: cert ? [cert] : undefined,
sslPolicy: useHTTPS ? elbv2.SslPolicy.RECOMMENDED : undefined,
});
let httpOnlyALBListener: elbv2.ApplicationListener | undefined;
if (useHTTPS) {
// redirect HTTP to HTTPS
httpOnlyALBListener = externalALB.addListener(`DefaultPort-80`, {
protocol: elbv2.ApplicationProtocol.HTTP,
port: 80,
open: true,
defaultAction: elbv2.ListenerAction.redirect({
port: '443',
protocol: elbv2.ApplicationProtocol.HTTPS,
permanent: true,
}),
});
new r53.ARecord(this, 'ALBCustomDomain', {
zone: domainZone!,
recordName: `${stack.region}.${domainName}`,
ttl: cdk.Duration.minutes(5),
target: r53.RecordTarget.fromAlias(new alias.LoadBalancerTarget(externalALB)),
});
}
// Tunasync Manager stack
const tunaManagerStack = new TunaManagerStack(this, 'TunaManagerStack', {
vpc,
fileSystemId: props.fileSystemId,
notifyTopic: props.notifyTopic,
tunaManagerSG,
tunaManagerALBSG,
timeout: cdk.Duration.minutes(10),
assetBucket,
});
const managerUrl = `http://${tunaManagerStack.managerALB.loadBalancerDnsName}:${tunaManagerStack.managerPort}`;
// Tunasync Worker stack
const tunaWorkerStack = new TunaWorkerStack(this, 'TunaWorkerStack', {
vpc,
fileSystemId: props.fileSystemId,
notifyTopic: props.notifyTopic,
managerUrl,
timeout: cdk.Duration.minutes(10),
tunaWorkerSG,
assetBucket,
tunaRepoBucket,
});
tunaManagerALBSG.connections.allowFrom(tunaWorkerSG, ec2.Port.tcp(tunaManagerStack.managerPort), 'Access from tuna worker');
tunaWorkerSG.connections.allowFrom(tunaManagerSG, ec2.Port.tcp(tunaWorkerStack.workerPort), 'Access from tuna manager');
const ecsCluster = new ecs.Cluster(this, `ECSCluster`, {
vpc,
});
// Content Server stack
const contentServerStack = new ContentServerStack(this, 'ContentServerStack', {
vpc,
fileSystemId: props.fileSystemId,
notifyTopic: props.notifyTopic,
ecsCluster,
listener: defaultALBListener,
httpOnlyListener: httpOnlyALBListener,
dashboard,
});
// Web Portal stack
const webPortalStack = new WebPortalStack(this, 'WebPortalStack', {
vpc,
externalALBListener: defaultALBListener,
ecsCluster,
tunaManagerASG: tunaManagerStack.managerASG,
tunaManagerALBTargetGroup: tunaManagerStack.managerALBTargetGroup,
fileSystemId: props.fileSystemId,
fileSystemSGId: props.fileSystemSGId,
});
tunaManagerSG.connections.allowFrom(externalALBSG, ec2.Port.tcp(80), 'Allow external ALB to access tuna manager');
// Monitor stack
const monitorStack = new MonitorStack(this, 'MonitorStack', {
vpc,
domainName,
notifyTopic: props.notifyTopic,
tunaManagerUrl: managerUrl,
tunaManagerALBSG,
});
let commonBehaviorConfig = {
// special handling for redirections
forwardedValues: {
headers: ['Host'],
queryString: true,
},
// default 1 day cache
defaultTtl: cdk.Duration.days(1),
};
// origin access identity for s3 bucket
const oai = new cloudfront.OriginAccessIdentity(this, 'TunaRepoOAI');
tunaRepoBucket.grantRead(oai);
// CloudFront as cdn
let cloudfrontProps = {
originConfigs: [{
customOriginSource: {
domainName: useHTTPS ? `${stack.region}.${domainName}` : externalALB.loadBalancerDnsName,
originProtocolPolicy: cloudfront.OriginProtocolPolicy.MATCH_VIEWER
},
behaviors: [{
| {
super(scope, id, props);
const stack = cdk.Stack.of(this);
const domainName = this.node.tryGetContext('domainName');
const domainZoneName = this.node.tryGetContext('domainZone');
const iamCertId = this.node.tryGetContext('iamCertId');
let useHTTPS = false;
let domainZone: r53.IHostedZone | undefined;
// ACM or IAM certificate
let cloudfrontCert: acm.Certificate | string | null = null;
if (domainName && domainZoneName) {
domainZone = r53.HostedZone.fromLookup(this, 'HostedZone', {
domainName: domainZoneName,
});
useHTTPS = true;
if (iamCertId !== undefined) {
// Use IAM first when specified
cloudfrontCert = iamCertId; | identifier_body | |
opentuna-stack.ts | cloudfrontCert: acm.Certificate | string | null = null;
if (domainName && domainZoneName) {
domainZone = r53.HostedZone.fromLookup(this, 'HostedZone', {
domainName: domainZoneName,
});
useHTTPS = true;
if (iamCertId !== undefined) {
// Use IAM first when specified
cloudfrontCert = iamCertId;
} else if (!stack.region.startsWith('cn-')) {
// Try to use ACM certificate in us-east-1 for CloudFront
cloudfrontCert = new acm.DnsValidatedCertificate(this, 'CloudFrontCertificate', {
domainName: domainName,
hostedZone: domainZone,
validation: acm.CertificateValidation.fromDns(domainZone),
region: 'us-east-1',
});
} else {
throw new Error('You must specify iamCertId context for cn regions');
}
}
const vpc = ec2.Vpc.fromLookup(this, `VPC-${props.vpcId}`, {
vpcId: props.vpcId,
});
const assetBucket = new s3.Bucket(this, `OpenTunaAssets`, {
removalPolicy: cdk.RemovalPolicy.DESTROY,
});
// setup bucket for rubygems
const tunaRepoBucket = new s3.Bucket(this, 'TunaRepoBucket');
// CloudWatch dashboard
const dashboard = new cloudwatch.Dashboard(this, 'Dashboard', {
dashboardName: 'OpenTUNA-Dashboard',
}); | allowAllOutbound: true,
});
const tunaManagerALBSG = new ec2.SecurityGroup(this, "TunaManagerALBSG", {
vpc,
description: "SG of ALB of Tuna Manager",
allowAllOutbound: false,
});
const tunaWorkerSG = new ec2.SecurityGroup(this, "TunaWorkerSG", {
vpc,
description: "SG of Tuna Worker",
allowAllOutbound: true,
});
const externalALBSG = new ec2.SecurityGroup(this, "ExternalALBSG", {
vpc,
description: "SG of External ALB",
allowAllOutbound: false,
});
const externalALB = new elbv2.ApplicationLoadBalancer(this, "ExternalALB", {
vpc,
securityGroup: externalALBSG,
internetFacing: true,
http2Enabled: useHTTPS,
});
dashboard.addWidgets(new cloudwatch.GraphWidget({
title: 'ALB Processed Data',
left: [externalALB.metricProcessedBytes({
label: 'Bytes per minute',
period: cdk.Duration.minutes(1),
})]
}), new cloudwatch.GraphWidget({
title: 'ALB Connections',
left: [externalALB.metricNewConnectionCount({
label: 'New',
period: cdk.Duration.minutes(1),
}), externalALB.metricActiveConnectionCount({
label: 'Active',
period: cdk.Duration.minutes(1),
}), externalALB.metricRejectedConnectionCount({
label: 'Rejected',
period: cdk.Duration.minutes(1),
})]
}), new cloudwatch.GraphWidget({
title: 'ALB HTTP Code from Target',
left: [externalALB.metricHttpCodeTarget(elbv2.HttpCodeTarget.TARGET_2XX_COUNT, {
label: '2XX',
period: cdk.Duration.minutes(1),
}), externalALB.metricHttpCodeTarget(elbv2.HttpCodeTarget.TARGET_3XX_COUNT, {
label: '3XX',
period: cdk.Duration.minutes(1),
}), externalALB.metricHttpCodeTarget(elbv2.HttpCodeTarget.TARGET_4XX_COUNT, {
label: '4XX',
period: cdk.Duration.minutes(1),
}), externalALB.metricHttpCodeTarget(elbv2.HttpCodeTarget.TARGET_5XX_COUNT, {
label: '5XX',
period: cdk.Duration.minutes(1),
})]
}));
let cert: acm.Certificate | undefined;
if (useHTTPS) {
cert = new acm.Certificate(this, 'Certificate', {
domainName: domainName,
subjectAlternativeNames: [`${stack.region}.${domainName}`],
validation: acm.CertificateValidation.fromDns(domainZone),
});
}
const defaultALBPort: number = useHTTPS ? 443 : 80;
const defaultALBListener = externalALB.addListener(`DefaultPort-${defaultALBPort}`, {
protocol: useHTTPS ? elbv2.ApplicationProtocol.HTTPS : elbv2.ApplicationProtocol.HTTP,
port: defaultALBPort,
open: true,
certificates: cert ? [cert] : undefined,
sslPolicy: useHTTPS ? elbv2.SslPolicy.RECOMMENDED : undefined,
});
let httpOnlyALBListener: elbv2.ApplicationListener | undefined;
if (useHTTPS) {
// redirect HTTP to HTTPS
httpOnlyALBListener = externalALB.addListener(`DefaultPort-80`, {
protocol: elbv2.ApplicationProtocol.HTTP,
port: 80,
open: true,
defaultAction: elbv2.ListenerAction.redirect({
port: '443',
protocol: elbv2.ApplicationProtocol.HTTPS,
permanent: true,
}),
});
new r53.ARecord(this, 'ALBCustomDomain', {
zone: domainZone!,
recordName: `${stack.region}.${domainName}`,
ttl: cdk.Duration.minutes(5),
target: r53.RecordTarget.fromAlias(new alias.LoadBalancerTarget(externalALB)),
});
}
// Tunasync Manager stack
const tunaManagerStack = new TunaManagerStack(this, 'TunaManagerStack', {
vpc,
fileSystemId: props.fileSystemId,
notifyTopic: props.notifyTopic,
tunaManagerSG,
tunaManagerALBSG,
timeout: cdk.Duration.minutes(10),
assetBucket,
});
const managerUrl = `http://${tunaManagerStack.managerALB.loadBalancerDnsName}:${tunaManagerStack.managerPort}`;
// Tunasync Worker stack
const tunaWorkerStack = new TunaWorkerStack(this, 'TunaWorkerStack', {
vpc,
fileSystemId: props.fileSystemId,
notifyTopic: props.notifyTopic,
managerUrl,
timeout: cdk.Duration.minutes(10),
tunaWorkerSG,
assetBucket,
tunaRepoBucket,
});
tunaManagerALBSG.connections.allowFrom(tunaWorkerSG, ec2.Port.tcp(tunaManagerStack.managerPort), 'Access from tuna worker');
tunaWorkerSG.connections.allowFrom(tunaManagerSG, ec2.Port.tcp(tunaWorkerStack.workerPort), 'Access from tuna manager');
const ecsCluster = new ecs.Cluster(this, `ECSCluster`, {
vpc,
});
// Content Server stack
const contentServerStack = new ContentServerStack(this, 'ContentServerStack', {
vpc,
fileSystemId: props.fileSystemId,
notifyTopic: props.notifyTopic,
ecsCluster,
listener: defaultALBListener,
httpOnlyListener: httpOnlyALBListener,
dashboard,
});
// Web Portal stack
const webPortalStack = new WebPortalStack(this, 'WebPortalStack', {
vpc,
externalALBListener: defaultALBListener,
ecsCluster,
tunaManagerASG: tunaManagerStack.managerASG,
tunaManagerALBTargetGroup: tunaManagerStack.managerALBTargetGroup,
fileSystemId: props.fileSystemId,
fileSystemSGId: props.fileSystemSGId,
});
tunaManagerSG.connections.allowFrom(externalALBSG, ec2.Port.tcp(80), 'Allow external ALB to access tuna manager');
// Monitor stack
const monitorStack = new MonitorStack(this, 'MonitorStack', {
vpc,
domainName,
notifyTopic: props.notifyTopic,
tunaManagerUrl: managerUrl,
tunaManagerALBSG,
});
let commonBehaviorConfig = {
// special handling for redirections
forwardedValues: {
headers: ['Host'],
queryString: true,
},
// default 1 day cache
defaultTtl: cdk.Duration.days(1),
};
// origin access identity for s3 bucket
const oai = new cloudfront.OriginAccessIdentity(this, 'TunaRepoOAI');
tunaRepoBucket.grantRead(oai);
// CloudFront as cdn
let cloudfrontProps = {
originConfigs: [{
customOriginSource: {
domainName: useHTTPS ? `${stack.region}.${domainName}` : externalALB.loadBalancerDnsName,
originProtocolPolicy: cloudfront.OriginProtocolPolicy.MATCH_VIEWER
},
behaviors: [{
...commonBehaviorConfig,
isDefaultBehavior: true,
}, {
...commonBehaviorConfig,
pathPattern: '/debian/*',
}, {
...commonBehaviorConfig,
pathPattern: '/debian-security/*',
}, {
...commonBehaviorConfig,
pathPattern: '/ubuntu/*',
}, {
...commonBehaviorConfig,
// 5min cache for tunasync status
pathPattern: '/jobs',
defaultT |
const tunaManagerSG = new ec2.SecurityGroup(this, "TunaManagerSG", {
vpc,
description: "SG of Tuna Manager", | random_line_split |
L.IM_RoutingControl.js | );transform:scale(-1.3, 1.3)"></i>'+
'</span>',
tooltip: 'right',
marker_style_origen: {
icon : '',
markerColor : 'green',
divColor:'transparent',
iconAnchor : new L.Point(14, 42),
iconSize : new L.Point(28, 42),
iconColor : '#000000',
prefix : 'fa',
isCanvas:false,
radius:6,
opacity:1,
weight : 2,
fillOpacity : 0.9,
color : "#ffffff",
fillColor :"transparent"
},
marker_style_desti: {
icon : '',
markerColor : 'red',
divColor:'transparent',
iconAnchor : new L.Point(14, 42),
iconSize : new L.Point(28, 42),
iconColor : '#000000',
prefix : 'fa',
isCanvas:false,
radius:6,
opacity:1,
weight : 2,
fillOpacity : 0.9,
color : "#ffffff",
fillColor :"transparent"
},
marker_style_intermig: {
icon : '',
markerColor : 'orange',
divColor:'transparent',
iconAnchor : new L.Point(14, 42),
iconSize : new L.Point(28, 42),
iconColor : '#000000',
prefix : 'fa',
isCanvas:false,
radius:6,
opacity:1,
weight : 2,
fillOpacity : 0.9,
color : "#ffffff",
fillColor :"transparent"
},
originTexts: {
title: "Càlcul de rutes",
btnStart: "Defineix com a origen",
btnEnd: "Defineix com a destí",
btnReverse: "Ruta inversa",
btnAdd: "Afegir punts",
start: "Inici",
end: "Destí"
},
texts: {
title: "Càlcul de rutes",
btnStart: "Defineix com a origen",
btnEnd: "Defineix com a destí",
btnReverse: "Ruta inversa",
btnAdd: "Afegir punts",
start: "Inici",
end: "Destí"
}
},
//TODO ver el tema del lang para poder cambiar el idioma del control
initialize: function(options) {
L.setOptions(this, options);
var self = this,
options = this.options,
lang = options.lang,
puntIntermig = L.AwesomeMarkers.icon(options.marker_style_intermig),
puntDesti = L.AwesomeMarkers.icon(options.marker_style_desti),
puntOrigen = L.AwesomeMarkers.icon(options.marker_style_origen);
this._reversablePlan = L.Routing.Plan.extend({
createGeocoders: function() {
var container = L.Routing.Plan.prototype.createGeocoders.call(this),
title = (window.lang) ? window.lang.translate(options.originTexts.btnReverse) : options.texts.btnReverse,
reverseButton = self._createButton('<span class="glyphicon glyphicon-sort" style="font-size:14px;"></span>', container, title, lang);
L.DomEvent.on(reverseButton, 'click', function() {
var waypoints = this.getWaypoints();
this.setWaypoints(waypoints.reverse());
}, this);
return container;
}
});
var createMarker = function(i, wp) {
var numWp = this._route.getWaypoints().length;
if(i == 0){
return L.marker(wp.latLng, {
draggable: true,
icon: puntOrigen
});
}
else if (i === (numWp - 1)){
| else {
return L.marker(wp.latLng, {
draggable: true,
icon: puntIntermig
});
}
};
this._plan = new this._reversablePlan([], {
geocoder: L.Control.Geocoder.icgc(),
routeWhileDragging: true,
language: lang,
createMarker: createMarker.bind(self)
});
//console.debug(lang);
this._route = L.Routing.control({
router: L.Routing.mapzen('mapzen-aMHsmLA', {
language: lang,
costing:'auto',
directions_options: {
language: lang
}
}),
formatter: new L.Routing.mapzenFormatter(),
routeWhileDragging: true,
plan: this._plan,
position: 'topleft',
language: lang,
showAlternatives: true,
lineOptions: {
styles: [
{color: '#00B3FD', opacity: 1, weight: 4},
]
},
altLineOptions:{
styles: [
{color: 'black', opacity: 1, weight: 2},
]
}
});
},
onAdd: function(map){
var self = this,
options = self.options,
stop = L.DomEvent.stopPropagation,
container = L.DomUtil.create('div', options.className);
container.id = options.id;
container.innerHTML = options.html;
container.title = options.title;
container.dataset.toggle = 'tooltip';
container.dataset.placement = options.tooltip;
container.dataset.langTitle = options.langTitle;
self._div = container;
self._map = map;
L.DomEvent
.on(container, 'click', stop)
.on(container, 'mousedown', stop)
.on(container, 'dblclick', stop)
.on(container, 'click', L.DomEvent.preventDefault)
.on(container, 'click', self._toggle, self);
return container;
},
hideBtn: function(){
var self = this;
$(self._div).hide();
},
showBtn: function(){
var self = this;
$(self._div).show();
},
show: function() {
L.DomUtil.removeClass(this._div, 'grisfort');
L.DomUtil.addClass(this._div, 'greenfort');
var _map = this._map,
options = this.options,
_texts = options.texts,
_route = this._route;
_map.fire('showRouting'); //to track ga events
_map.on('click', this._routingPopup, this);
_route.addTo(_map);
if(window.lang){
_texts.title = window.lang.translate(options.originTexts.title);
_texts.btnReverse = window.lang.translate(options.originTexts.btnReverse);
_texts.btnAdd = window.lang.translate(options.originTexts.btnAdd);
_texts.start = window.lang.translate(options.originTexts.start);
_texts.end = window.lang.translate(options.originTexts.end);
}
$('.leaflet-routing-geocoders').before( '<div class="div-routing-title"><span lang="ca" class="routing-title">'+_texts.title+'</span> <a href="http://www.liedman.net/leaflet-routing-machine/" target="_blank" class="div-routing-title" style="display:inline;"><span class="glyphicon glyphicon-info-sign white" style="font-size:14px;"></a></div>' );
$('.leaflet-routing-add-waypoint').attr('title',_texts.btnAdd);
$('.leaflet-routing-add-waypoint').attr('lang',options.lang);
$('.leaflet-routing-geocoder').first().find('input').attr('placeholder',_texts.start);
$('.leaflet-routing-geocoder').last().find('input').attr('placeholder',_texts.end);
var offset = $(this._div).offset();
jQuery('.leaflet-routing-container').css('top', (offset.top-60)+'px');
jQuery('.leaflet-routing-container').css('left', (offset.left + 35)+'px');
jQuery('.leaflet-routing-container').css('position','absolute');
jQuery('.leaflet-routing-container').css('z-index','100');
},
hide: function() {
var self = this,
_map = self._map,
_route = self._route;
L.DomUtil.removeClass(self._div, 'greenfort');
L.DomUtil.addClass(self._div, 'grisfort');
console.debug("AQUI");
try{
_route.removeFrom.call(_route,_map);
}catch(e){
console.debug(e);
}finally{
_map.off('click',self._routingPopup, self);
}
},
_toggle: function(e){
var collapsed = L.DomUtil.hasClass(this._div, 'grisfort');
this[collapsed ? 'show' : 'hide']();
},
_routingPopup: function(e) {
console.debug("routing");
var options = this.options,
_texts = options.texts;
if(window.lang){
_texts.title = window.lang.translate(options.originTexts.title);
_texts.btn | return L.marker(wp.latLng, {
draggable: true,
icon: puntDesti
});
}
| conditional_block |
L.IM_RoutingControl.js | '<i class="t-square-rounded" style="-webkit-transform:scale(1.25) scale(0.65) rotate(45deg);-moz-transform:scale(1.25) scale(0.65) rotate(45deg);transform:scale(1.25) scale(0.65) rotate(45deg)"></i>'+
'<i class="t-turn-90-l t-c-white" style="-webkit-transform:scale(-1.3, 1.3);-moz-transform:scale(-1.3, 1.3);transform:scale(-1.3, 1.3)"></i>'+
'</span>',
tooltip: 'right',
marker_style_origen: {
icon : '',
markerColor : 'green',
divColor:'transparent',
iconAnchor : new L.Point(14, 42),
iconSize : new L.Point(28, 42),
iconColor : '#000000',
prefix : 'fa',
isCanvas:false,
radius:6,
opacity:1,
weight : 2,
fillOpacity : 0.9,
color : "#ffffff",
fillColor :"transparent"
},
marker_style_desti: {
icon : '',
markerColor : 'red',
divColor:'transparent',
iconAnchor : new L.Point(14, 42),
iconSize : new L.Point(28, 42),
iconColor : '#000000',
prefix : 'fa',
isCanvas:false,
radius:6,
opacity:1,
weight : 2,
fillOpacity : 0.9,
color : "#ffffff",
fillColor :"transparent"
},
marker_style_intermig: {
icon : '',
markerColor : 'orange',
divColor:'transparent',
iconAnchor : new L.Point(14, 42),
iconSize : new L.Point(28, 42),
iconColor : '#000000',
prefix : 'fa',
isCanvas:false,
radius:6,
opacity:1,
weight : 2,
fillOpacity : 0.9,
color : "#ffffff",
fillColor :"transparent"
},
originTexts: {
title: "Càlcul de rutes",
btnStart: "Defineix com a origen",
btnEnd: "Defineix com a destí",
btnReverse: "Ruta inversa",
btnAdd: "Afegir punts",
start: "Inici",
end: "Destí"
},
texts: {
title: "Càlcul de rutes",
btnStart: "Defineix com a origen",
btnEnd: "Defineix com a destí",
btnReverse: "Ruta inversa",
btnAdd: "Afegir punts",
start: "Inici",
end: "Destí"
}
},
//TODO ver el tema del lang para poder cambiar el idioma del control
initialize: function(options) {
L.setOptions(this, options);
var self = this,
options = this.options,
lang = options.lang,
puntIntermig = L.AwesomeMarkers.icon(options.marker_style_intermig),
puntDesti = L.AwesomeMarkers.icon(options.marker_style_desti),
puntOrigen = L.AwesomeMarkers.icon(options.marker_style_origen);
this._reversablePlan = L.Routing.Plan.extend({
createGeocoders: function() {
var container = L.Routing.Plan.prototype.createGeocoders.call(this),
title = (window.lang) ? window.lang.translate(options.originTexts.btnReverse) : options.texts.btnReverse,
reverseButton = self._createButton('<span class="glyphicon glyphicon-sort" style="font-size:14px;"></span>', container, title, lang);
L.DomEvent.on(reverseButton, 'click', function() {
var waypoints = this.getWaypoints();
this.setWaypoints(waypoints.reverse());
}, this);
return container;
}
});
var createMarker = function(i, wp) {
var numWp = this._route.getWaypoints().length;
if(i == 0){
return L.marker(wp.latLng, {
draggable: true,
icon: puntOrigen
});
}
else if (i === (numWp - 1)){
return L.marker(wp.latLng, {
draggable: true,
icon: puntDesti
});
}
else {
return L.marker(wp.latLng, {
draggable: true,
icon: puntIntermig
});
}
};
this._plan = new this._reversablePlan([], {
geocoder: L.Control.Geocoder.icgc(),
routeWhileDragging: true,
language: lang,
createMarker: createMarker.bind(self)
});
//console.debug(lang);
this._route = L.Routing.control({
router: L.Routing.mapzen('mapzen-aMHsmLA', {
language: lang,
costing:'auto',
directions_options: {
language: lang
}
}),
formatter: new L.Routing.mapzenFormatter(),
routeWhileDragging: true,
plan: this._plan,
position: 'topleft',
language: lang,
showAlternatives: true,
lineOptions: {
styles: [
{color: '#00B3FD', opacity: 1, weight: 4},
]
},
altLineOptions:{
styles: [
{color: 'black', opacity: 1, weight: 2},
]
}
});
},
onAdd: function(map){
var self = this,
options = self.options,
stop = L.DomEvent.stopPropagation,
container = L.DomUtil.create('div', options.className);
container.id = options.id;
container.innerHTML = options.html;
container.title = options.title;
container.dataset.toggle = 'tooltip';
container.dataset.placement = options.tooltip;
container.dataset.langTitle = options.langTitle;
self._div = container;
self._map = map;
L.DomEvent
.on(container, 'click', stop)
.on(container, 'mousedown', stop)
.on(container, 'dblclick', stop)
.on(container, 'click', L.DomEvent.preventDefault)
.on(container, 'click', self._toggle, self);
return container;
},
hideBtn: function(){
var self = this;
$(self._div).hide();
},
showBtn: function(){
var self = this;
$(self._div).show();
},
show: function() {
L.DomUtil.removeClass(this._div, 'grisfort');
L.DomUtil.addClass(this._div, 'greenfort');
var _map = this._map,
options = this.options,
_texts = options.texts,
_route = this._route;
_map.fire('showRouting'); //to track ga events
_map.on('click', this._routingPopup, this);
_route.addTo(_map);
if(window.lang){
_texts.title = window.lang.translate(options.originTexts.title);
_texts.btnReverse = window.lang.translate(options.originTexts.btnReverse);
_texts.btnAdd = window.lang.translate(options.originTexts.btnAdd);
_texts.start = window.lang.translate(options.originTexts.start);
_texts.end = window.lang.translate(options.originTexts.end);
}
$('.leaflet-routing-geocoders').before( '<div class="div-routing-title"><span lang="ca" class="routing-title">'+_texts.title+'</span> <a href="http://www.liedman.net/leaflet-routing-machine/" target="_blank" class="div-routing-title" style="display:inline;"><span class="glyphicon glyphicon-info-sign white" style="font-size:14px;"></a></div>' );
$('.leaflet-routing-add-waypoint').attr('title',_texts.btnAdd);
$('.leaflet-routing-add-waypoint').attr('lang',options.lang);
$('.leaflet-routing-geocoder').first().find('input').attr('placeholder',_texts.start);
$('.leaflet-routing-geocoder').last().find('input').attr('placeholder',_texts.end);
var offset = $(this._div).offset();
jQuery('.leaflet-routing-container').css('top', (offset.top-60)+'px');
jQuery('.leaflet-routing-container').css('left', (offset.left + 35)+'px');
jQuery('.leaflet-routing-container').css('position','absolute');
jQuery('.leaflet-routing-container').css('z-index','100');
},
hide: function() {
var self = this,
_map = self._map,
_route = self._route;
L.DomUtil.removeClass(self._div, 'greenfort');
L.DomUtil.addClass(self._div, 'grisfort');
console.debug("AQUI");
try{
_route.removeFrom.call(_route,_map);
| random_line_split | ||
views.py |
from students.forms import CourseEnrollForm
from .models import Course, Module, Content, Subject
from .forms import ModuleFormSet
class OwnerMixin(object):
"""
Миксин переопределяющий метод get_queryset
во всех дочерних классах.
Может взаимодействовать со всеми моделями
у которых есть атрибут owner.
"""
def get_queryset(self):
"""
вернуть объекты созданные только текущим пользователем
"""
queryset = super(OwnerMixin, self).get_queryset()
return queryset.filter(owner=self.request.user)
class OwnerEditMixin(object):
"""
Миксин переопределяющий метод form_valid
во всех дочерних классах.
"""
def form_valid(self, form):
"""
С помощью этого метода при создании объекта(подтверждение формы)
задается владелец этого объекта.
"""
form.instance.owner = self.request.user
return super(OwnerEditMixin, self).form_valid(form)
class OwnerCourseMixin(OwnerMixin, LoginRequiredMixin):
"""
Указание модели для queryset во всех дочерних классах
"""
model = Course
class OwnerCourseEditMixin(OwnerCourseMixin, OwnerEditMixin):
"""
Миксин который должен использоватся в классах изменяющиюх
или создающих объекты модели Course
"""
# указание полей для форм дочерних классов
fields = ['subject', 'title', 'slug', 'overview']
# указание, куда будет перенаправлен пользователь
# после подтверждения формы.
# manage_course_list это имя URL в url.py
success_url = reverse_lazy('man |
c
lass CourseUpdateView(PermissionRequiredMixin, OwnerCourseEditMixin, UpdateView):
"""
Используется для изменения Course
"""
# PermissionRequiredMixin проверяет если у пользователя указанный permission_required
permission_required = "courses.change_course"
class CourseDeleteView(PermissionRequiredMixin, OwnerCourseMixin, DeleteView):
"""
Используется для удаления Course
"""
# PermissionRequiredMixin проверяет если у пользователя указанный permission_required
permission_required = "courses.delete_course"
# указание, куда будет перенаправлен пользователь
# после подтверждения формы.
# manage_course_list это имя URL в url.py
success_url = reverse_lazy('manage_course_list')
template_name = "courses/manage/course/delete.html"
class CourseModuleUpdateView(TemplateResponseMixin, View):
"""
Класс используется для добавления, обновления и удаления модулей
определенного курса.
--------------------
TemplateResponseMixin используется для отображения templates, для него
обязательно нужно указывать template_name или реализовать
метод get_template_names; имеет метод render_to_response
для отображения context в template
--------------------
View реализует метод dispatch, который анализирует response на метод запроса
и в зависимости от его типа отправляет его нужному методу (get(), post()...)
"""
template_name = "courses/manage/module/formset.html"
course = None
def get_formset(self, data=None):
return ModuleFormSet(instance=self.course, data=data)
def dispatch(self, request, pk):
# ищем определенный курс текущего пользователя
self.course = get_object_or_404(Course, id=pk, owner=request.user)
return super(CourseModuleUpdateView, self).dispatch(request, pk)
def get(self, request, *args, **kwargs):
# создаем пустой formset
formset = self.get_formset()
return self.render_to_response({'course': self.course,
'formset': formset})
def post(self, request, *args, **kwargs):
# создаем formset с данными
formset = self.get_formset(data=request.POST)
if formset.is_valid():
formset.save()
return redirect('manage_course_list')
return self.render_to_response({'course': self.course,
'formset': formset})
class ContentCreateUpdateView(TemplateResponseMixin, View):
module = None
model = None
obj = None
template_name = "courses/manage/content/form.html"
def get_model(self, model_name):
# если имя модели соответствует одному из имен моделей контента
# вернуть модель для app_label и model_name
if model_name in ['text', 'file', 'image', 'video']:
return apps.get_model(app_label="courses", model_name=model_name)
# если модель нам не подходит
return None
def get_form(self, model, *args, **kwargs):
# возвращает ModelForm для указаной model
# со всеми полями кроме тех что указаны в exclude
Form = modelform_factory(model, exclude=['owner'
'created',
'updated',
'order', 'owner'])
return Form(*args, **kwargs)
def dispatch(self, request, module_id, model_name, id=None):
# получаем модуль с которым будет асоциирован объект
self.module = get_object_or_404(Module,
id=module_id,
course__owner=request.user)
# получаем модель которая будет соответсвотать типу контента
self.model = self.get_model(model_name)
# если не None, то объект будет обновлен, иначе будет создан новый
if id:
self.obj = get_object_or_404(self.model,
id=id,
owner=request.user)
# вызываем метод родителя
return super(ContentCreateUpdateView, self).dispatch(request,
module_id,
model_name,
id)
def get(self, request, module_id, model_name, id=None):
# возвращаем форму для изменения экземпляра контента при self.obj!=None.
# при None, будт возвращена форма для создания экземпляра контента.
form = self.get_form(self.model, instance=self.obj)
return self.render_to_response({'form': form,
'object': self.obj})
def post(self, request, module_id, model_name, id=None):
# возвращаем форму с данными и файлами
form = self.get_form(self.model,
instance=self.obj,
data=request.POST,
files=request.FILES)
if form.is_valid():
# задаем владельцем контента текущего пользователя
obj = form.save(commit=False)
obj.owner = request.user
obj.save()
if not id:
# если id объекта не указан, создаем новый экземпляр
Content.objects.create(module=self.module, content_object=obj)
return redirect('module_content_list', self.module.id)
return self.render_to_response({'form': form, 'object': self.obj})
class ContentDeleteView(View):
def post(self, request, id):
content = get_object_or_404(Content,
id=id,
module__course__owner=request.user)
module = content.module
content.content_object.delete()
content.delete()
# возвращаемся к списку контента модуля
return redirect('module_content_list', module.id)
class ModuleContentListView(TemplateResponseMixin, View):
template_name = "courses/manage/module/content_list.html"
def get(self, request, module_id):
module = get_object_or_404(Module,
id=module_id,
course__owner=request.user)
return self.render_to_response({'module': module})
class ModuleOrderView(CsrfExemptMixin, JsonRequestResponseMixin, View):
"""
CsrfExemptMixin освобождает запрос от csrf token'а.
JsonRequestResponseMixin - помещает правильно отформатированый
json запрос в request_json; также сериализирует response
"""
def post(self, request):
for id, order in self.request_json.items():
Module.objects.filter(id=id,
course__owner=request.user).update(order=order)
return self.render_json_response({'saved': 'OK'})
class ContentOrderView(CsrfExemptMixin, JsonRequestResponseMixin, View):
"""
CsrfExemptMixin освобождает запрос от csrf token'а.
JsonRequestResponseMixin - помещает правильно отформатированый
json запрос в request_json; также сериализирует response
"""
def post(self, request):
for id, order in self.request_json.items():
print('id', id, ' -- ', order)
for id, order in self.request_json.items():
Content.objects.filter(id=id, module__course__owner=request.user). | age_course_list')
template_name = "courses/manage/course/form.html"
class ManageCourseListView(OwnerCourseMixin, ListView):
"""
Используя наследование от OwnerCourseMixin, ListView
этот класс также будет содержать все поля и методы из
OwnerCourseMixin, ListView, OwnerMixin
"""
template_name = "courses/manage/course/list.html"
class CourseCreateView(PermissionRequiredMixin, OwnerCourseEditMixin, CreateView):
"""
Используется для создания нового Course
"""
# PermissionRequiredMixin проверяет если у пользователя указанный permission_required
permission_required = "courses.add_course" | identifier_body |
views.py | from .models import Course, Module, Content, Subject
from .forms import ModuleFormSet
class OwnerMixin(object):
"""
Миксин переопределяющий метод get_queryset
во всех дочерних классах.
Может взаимодействовать со всеми моделями
у которых есть атрибут owner.
"""
def get_queryset(self):
"""
вернуть объекты созданные только текущим пользователем
"""
queryset = super(OwnerMixin, self).get_queryset()
return queryset.filter(owner=self.request.user)
class OwnerEditMixin(object):
"""
Миксин переопределяющий метод form_valid
во всех дочерних классах.
"""
def form_valid(self, form):
"""
С помощью этого метода при создании объекта(подтверждение формы)
задается владелец этого объекта.
"""
form.instance.owner = self.request.user
return super(OwnerEditMixin, self).form_valid(form)
class OwnerCourseMixin(OwnerMixin, LoginRequiredMixin):
"""
Указание модели для queryset во всех дочерних классах
"""
model = Course
class OwnerCourseEditMixin(OwnerCourseMixin, OwnerEditMixin):
"""
Миксин который должен использоватся в классах изменяющиюх
или создающих объекты модели Course
"""
# указание полей для форм дочерних классов
fields = ['subject', 'title', 'slug', 'overview']
# указание, куда будет перенаправлен пользователь
# после подтверждения формы.
# manage_course_list это имя URL в url.py
success_url = reverse_lazy('manage_course_list')
template_name = "courses/manage/course/form.html"
class ManageCourseListView(OwnerCourseMixin, ListView):
"""
Используя наследование от OwnerCourseMixin, ListView
этот класс также будет содержать все поля и методы из
OwnerCourseMixin, ListView, OwnerMixin
"""
template_name = "courses/manage/course/list.html"
class CourseCreateView(PermissionRequiredMixin, OwnerCourseEditMixin, CreateView):
"""
Используется для создания нового Course
"""
# PermissionRequiredMixin проверяет если у пользователя указанный permission_required
permission_required = "courses.add_course"
class CourseUpdateView(PermissionRequiredMixin, OwnerCourseEditMixin, UpdateView):
"""
Используется для изменения Course
"""
# PermissionRequiredMixin проверяет если у пользователя указанный permission_required
permission_required = "courses.change_course"
class CourseDeleteView(PermissionRequiredMixin, OwnerCourseMixin, DeleteView):
"""
Используется для удаления Course
"""
# PermissionRequiredMixin проверяет если у пользователя указанный permission_required
permission_required = "courses.delete_course"
# указание, куда будет перенаправлен пользователь
# после подтверждения формы.
# manage_course_list это имя URL в url.py
success_url = reverse_lazy('manage_course_list')
template_name = "courses/manage/course/delete.html"
class CourseModuleUpdateView(TemplateResponseMixin, View):
"""
Класс используется для добавления, обновления и удаления модулей
определенного курса.
--------------------
TemplateResponseMixin используется для отображения templates, для него
обязательно нужно указывать template_name или реализовать
метод get_template_names; имеет метод render_to_response
для отображения context в template
--------------------
View реализует метод dispatch, который анализирует response на метод запроса
и в зависимости от его типа отправляет его нужному методу (get(), post()...)
"""
template_name = "courses/manage/module/formset.html"
course = None
def get_formset(self, data=None):
return ModuleFormSet(instance=self.course, data=data)
def dispatch(self, request, pk):
# ищем определенный курс текущего пользователя
self.course = get_object_or_404(Course, id=pk, owner=request.user)
return super(CourseModuleUpdateView, self).dispatch(request, pk)
def get(self, request, *args, **kwargs):
# создаем пустой formset
formset = self.get_formset()
return self.render_to_response({'course': self.course,
'formset': formset})
def post(self, request, *args, **kwargs):
# создаем formset с данными
formset = self.get_formset(data=request.POST)
if formset.is_valid():
formset.save()
return redirect('manage_course_list')
return self.render_to_response({'course': self.course,
'formset': formset})
class ContentCreateUpdateView(TemplateResponseMixin, View):
module = None
model = None
obj = None
template_name = "courses/manage/content/form.html"
def get_model(self, model_name):
# если имя модели соответствует одному из имен моделей контента
# вернуть модель для app_label и model_name
if model_name in ['text', 'file', 'image', 'video']:
return apps.get_model(app_label="courses", model_name=model_name)
# если модель нам не подходит
return None
def get_form(self, model, *args, **kwargs):
# возвращает ModelForm для указаной model
# со всеми полями кроме тех что указаны в exclude
Form = modelform_factory(model, exclude=['owner'
'created',
'updated',
'order', 'owner'])
return Form(*args, **kwargs)
def dispatch(self, request, module_id, model_name, id=None):
# получаем модуль с которым будет асоциирован объект
self.module = get_object_or_404(Module,
id=module_id,
course__owner=request.user)
# получаем модель которая будет соответсвотать типу контента
self.model = self.get_model(model_name)
# если не None, то объект будет обновлен, иначе будет создан новый
if id:
self.obj = get_object_or_404(self.model,
id=id,
owner=request.user)
# вызываем метод родителя
return super(ContentCreateUpdateView, self).dispatch(request,
module_id,
model_name,
id)
def get(self, request, module_id, model_name, id=None):
# возвращаем форму для изменения экземпляра контента при self.obj!=None.
# при None, будт возвращена форма для создания экземпляра контента.
form = self.get_form(self.model, instance=self.obj)
return self.render_to_response({'form': form,
'object': self.obj})
def post(self, request, module_id, model_name, id=None):
# возвращаем форму с данными и файлами
form = self.get_form(self.model,
instance=self.obj,
data=request.POST,
files=request.FILES)
if form.is_valid():
# задаем владельцем контента текущего пользователя
obj = form.save(commit=False)
obj.owner = request.user
obj.save()
if not id:
# если id объекта не указан, создаем новый экземпляр
Content.objects.create(module=self.module, content_object=obj)
return redirect('module_content_list', self.module.id)
return self.render_to_response({'form': form, 'object': self.obj})
class ContentDeleteView(View):
def post(self, request, id):
content = get_object_or_404(Content,
id=id,
module__course__owner=request.user)
module = content.module
content.content_object.delete()
content.delete()
# возвращаемся к списку контента модуля
return redirect('module_content_list', module.id)
class ModuleContentListView(TemplateResponseMixin, View):
template_name = "courses/manage/module/content_list.html"
def get(self, request, module_id):
module = get_object_or_404(Module,
id=module_id,
course__owner=request.user)
return self.render_to_response({'module': module})
class ModuleOrderView(CsrfExemptMixin, JsonRequestResponseMixin, View):
"""
CsrfExemptMixin освобождает запрос от csrf token'а.
JsonRequestResponseMixin - помещает правильно отформатированый
json запрос в request_json; также сериализирует response
"""
def post(self, request):
for id, order in self.request_json.items():
Module.objects.filter(id=id,
course__owner=request.user).update(order=order)
return self.render_json_response({'saved': 'OK'})
class ContentOrderView(CsrfExemptMixin, JsonRequestResponseMixin, View):
"""
CsrfExemptMixin освобождает запрос от csrf token'а.
JsonRequestResponseMixin - помещает правильно отформатированый
json запрос в request_json; также с | ериализирует response
"""
def post(self, request):
for id, order in self.request_json.items():
print('id', id, ' -- ', order)
for id, order in self.request_json.items():
Content.objects.filter(id=id, module__course__owner=request.user).update(order=order)
return self.render_json_response({'sa | conditional_block | |
views.py | apps
from students.forms import CourseEnrollForm
from .models import Course, Module, Content, Subject
from .forms import ModuleFormSet
class OwnerMixin(object):
"""
Миксин переопределяющий метод get_queryset
во всех дочерних классах.
Может взаимодействовать со всеми моделями
у которых есть атрибут owner.
"""
def get_queryset(self):
"""
вернуть объекты созданные только текущим пользователем
"""
queryset = super(OwnerMixin, self).get_queryset()
return queryset.filter(owner=self.request.user)
class OwnerEditMixin(object):
"""
Миксин переопределяющий метод form_valid
во всех дочерних классах.
"""
def form_valid(self, form):
"""
С помощью этого метода при создании объекта(подтверждение формы)
задается владелец этого объекта.
"""
form.instance.owner = self.request.user
return super(OwnerEditMixin, self).form_valid(form)
class OwnerCourseMixin(OwnerMixin, LoginRequiredMixin):
"""
Указание модели для queryset во всех дочерних классах
"""
model = Course
class OwnerCourseEditMixin(OwnerCourseMixin, OwnerEditMixin):
"""
Миксин который должен использоватся в классах изменяющиюх
или создающих объекты модели Course
"""
# указание полей для форм дочерних классов
fields = ['subject', 'title', 'slug', 'overview']
# указание, куда будет перенаправлен пользователь
# после подтверждения формы.
# manage_course_list это имя URL в url.py
success_url = reverse_lazy('manage_course_list')
template_name = "courses/manage/course/form.html"
class ManageCourseListView(OwnerCourseMixin, ListView):
"""
Используя наследование от OwnerCourseMixin, ListView
этот класс также будет содержать все поля и методы из
OwnerCourseMixin, ListView, OwnerMixin
"""
template_name = "courses/manage/course/list.html"
class CourseCreateView(PermissionRequiredMixin, OwnerCourseEditMixin, CreateView):
"""
Используется для создания нового Course
"""
# PermissionRequiredMixin проверяет если у пользователя указанный permission_required
permission_required = "courses.add_course"
class CourseUpdateView(PermissionRequiredMixin, OwnerCourseEditMixin, UpdateView):
"""
Используется для изменения Course
"""
# PermissionRequiredMixin проверяет если у пользователя указанный permission_required
permission_required = "courses.change_course"
class CourseDeleteView(PermissionRequiredMixin, OwnerCourseMixin, DeleteView):
"""
Используется для удаления Course
"""
# PermissionRequiredMixin проверяет если у пользователя указанный permission_required
permission_required = "courses.delete_course"
# указание, куда будет перенаправлен пользователь
# после подтверждения формы.
# manage_course_list это имя URL в url.py
success_url = reverse_lazy('manage_course_list')
template_name = "courses/manage/course/delete.html"
class CourseModuleUpdateView(TemplateResponseMixin, View):
"""
Класс используется для добавления, обновления и удаления модулей
определенного курса.
--------------------
TemplateResponseMixin используется для отображения templates, для него
обязательно нужно указывать template_name или реализовать
метод get_template_names; имеет метод render_to_response
для отображения context в template
--------------------
View реализует метод dispatch, который анализирует response на метод запроса
и в зависимости от его типа отправляет его нужному методу (get(), post()...)
"""
template_name = "courses/manage/module/formset.html"
course = None
def get_formset(self, data=None):
return ModuleFormSet(instance=self.course, data=data)
def dispatch(self, request, pk):
# ищем определенный курс текущего пользователя
self.course = get_object_or_404(Course, id=pk, owner=request.user)
return super(CourseModuleUpdateView, self).dispatch(request, pk)
def get(self, request, *args, **kwargs):
# создаем пустой formset
formset = self.get_formset()
return self.render_to_response({'course': self.course,
'formset': formset})
def post(self, request, *args, **kwargs):
# создаем formset с данными
formset = self.get_formset(data=request.POST)
if formset.is_valid():
formset.save()
return redirect('manage_course_list')
return self.render_to_response({'course': self.course,
'formset': formset})
class ContentCreateUpdateView(TemplateResponseMixin, View):
module = None
model = None
obj = None
template_name = "courses/manage/content/form.html"
def get_model(self, model_name):
# если имя модели соответствует одному из имен моделей контента
# вернуть модель для app_label и model_name
if model_name in ['text', 'file', 'image', 'video']:
return apps.get_model(app_label="courses", model_name=model_name)
# если модель нам не подходит
return None
def get_form(self, model, *args, **kwargs):
# возвращает ModelForm для указаной model
# со всеми полями кроме тех что указаны в exclude
Form = modelform_factory(model, exclude=['owner'
'created',
'updated',
'order', 'owner'])
return Form(*args, **kwargs)
def dispatch(self, request, module_id, model_name, id=None):
# получаем модуль с которым будет асоциирован объект
self.module = get_object_or_404(Module,
id=module_id,
course__owner=request.user)
# получаем модель которая будет соответсвотать типу контента
self.model = self.get_model(model_name)
# если не None, то объект будет обновлен, иначе будет создан новый
if id:
self.obj = get_object_or_404(self.model,
id=id,
owner=request.user)
# вызываем метод родителя
return super(ContentCreateUpdateView, self).dispatch(request,
module_id,
model_name,
id)
def get(self, request, module_id, model_name, id=None):
# возвращаем форму для изменения экземпляра контента при self.obj!=None.
# при None, будт возвращена форма для создания экземпляра контента.
form = self.get_form(self.model, instance=self.obj)
return self.render_to_response({'form': form,
'object': self.obj})
def post(self, request, module_id, model_name, id=None):
# возвращаем форму с данными и файлами
form = self.get_form(self.model,
instance=self.obj,
data=request.POST,
files=request.FILES)
if form.is_valid():
# задаем владельцем контента текущего пользователя
obj = form.save(commit=False)
obj.owner = request.user
obj.save()
if not id:
# если id объекта не указан, создаем новый экземпляр
Content.objects.create(module=self.module, content_object=obj)
return redirect('module_content_list', self.module.id)
return self.render_to_response({'form': form, 'object': self.obj})
class ContentDeleteView(View):
def post(self, request, id):
content = get_object_or_404(Content, | content.content_object.delete()
content.delete()
# возвращаемся к списку контента модуля
return redirect('module_content_list', module.id)
class ModuleContentListView(TemplateResponseMixin, View):
template_name = "courses/manage/module/content_list.html"
def get(self, request, module_id):
module = get_object_or_404(Module,
id=module_id,
course__owner=request.user)
return self.render_to_response({'module': module})
class ModuleOrderView(CsrfExemptMixin, JsonRequestResponseMixin, View):
"""
CsrfExemptMixin освобождает запрос от csrf token'а.
JsonRequestResponseMixin - помещает правильно отформатированый
json запрос в request_json; также сериализирует response
"""
def post(self, request):
for id, order in self.request_json.items():
Module.objects.filter(id=id,
course__owner=request.user).update(order=order)
return self.render_json_response({'saved': 'OK'})
class ContentOrderView(CsrfExemptMixin, JsonRequestResponseMixin, View):
"""
CsrfExemptMixin освобождает запрос от csrf token'а.
JsonRequestResponseMixin - помещает правильно отформатированый
json запрос в request_json; также сериализирует response
"""
def post(self, request):
for id, order in self.request_json.items():
print('id', id, ' -- ', order)
for id, order in self.request_json.items():
Content.objects.filter(id=id, module__course__owner=request.user).update(order=order | id=id,
module__course__owner=request.user)
module = content.module | random_line_split |
views.py |
from students.forms import CourseEnrollForm
from .models import Course, Module, Content, Subject
from .forms import ModuleFormSet
class OwnerMixin(object):
"""
Миксин переопределяющий метод get_queryset
во всех дочерних классах.
Может взаимодействовать со всеми моделями
у которых есть атрибут owner.
"""
def get_queryset(self):
"""
вернуть объекты созданные только текущим пользователем
"""
queryset = super(OwnerMixin, self).get_queryset()
return queryset.filter(owner=self.request.user)
class OwnerEditMixin(object):
"""
Миксин переопределяющий метод form_valid
во всех дочерних классах.
"""
def form_valid(self, form):
"""
С помощью этого метода при создании объекта(подт | ы)
задается владелец этого объекта.
"""
form.instance.owner = self.request.user
return super(OwnerEditMixin, self).form_valid(form)
class OwnerCourseMixin(OwnerMixin, LoginRequiredMixin):
"""
Указание модели для queryset во всех дочерних классах
"""
model = Course
class OwnerCourseEditMixin(OwnerCourseMixin, OwnerEditMixin):
"""
Миксин который должен использоватся в классах изменяющиюх
или создающих объекты модели Course
"""
# указание полей для форм дочерних классов
fields = ['subject', 'title', 'slug', 'overview']
# указание, куда будет перенаправлен пользователь
# после подтверждения формы.
# manage_course_list это имя URL в url.py
success_url = reverse_lazy('manage_course_list')
template_name = "courses/manage/course/form.html"
class ManageCourseListView(OwnerCourseMixin, ListView):
"""
Используя наследование от OwnerCourseMixin, ListView
этот класс также будет содержать все поля и методы из
OwnerCourseMixin, ListView, OwnerMixin
"""
template_name = "courses/manage/course/list.html"
class CourseCreateView(PermissionRequiredMixin, OwnerCourseEditMixin, CreateView):
"""
Используется для создания нового Course
"""
# PermissionRequiredMixin проверяет если у пользователя указанный permission_required
permission_required = "courses.add_course"
class CourseUpdateView(PermissionRequiredMixin, OwnerCourseEditMixin, UpdateView):
"""
Используется для изменения Course
"""
# PermissionRequiredMixin проверяет если у пользователя указанный permission_required
permission_required = "courses.change_course"
class CourseDeleteView(PermissionRequiredMixin, OwnerCourseMixin, DeleteView):
"""
Используется для удаления Course
"""
# PermissionRequiredMixin проверяет если у пользователя указанный permission_required
permission_required = "courses.delete_course"
# указание, куда будет перенаправлен пользователь
# после подтверждения формы.
# manage_course_list это имя URL в url.py
success_url = reverse_lazy('manage_course_list')
template_name = "courses/manage/course/delete.html"
class CourseModuleUpdateView(TemplateResponseMixin, View):
"""
Класс используется для добавления, обновления и удаления модулей
определенного курса.
--------------------
TemplateResponseMixin используется для отображения templates, для него
обязательно нужно указывать template_name или реализовать
метод get_template_names; имеет метод render_to_response
для отображения context в template
--------------------
View реализует метод dispatch, который анализирует response на метод запроса
и в зависимости от его типа отправляет его нужному методу (get(), post()...)
"""
template_name = "courses/manage/module/formset.html"
course = None
def get_formset(self, data=None):
return ModuleFormSet(instance=self.course, data=data)
def dispatch(self, request, pk):
# ищем определенный курс текущего пользователя
self.course = get_object_or_404(Course, id=pk, owner=request.user)
return super(CourseModuleUpdateView, self).dispatch(request, pk)
def get(self, request, *args, **kwargs):
# создаем пустой formset
formset = self.get_formset()
return self.render_to_response({'course': self.course,
'formset': formset})
def post(self, request, *args, **kwargs):
# создаем formset с данными
formset = self.get_formset(data=request.POST)
if formset.is_valid():
formset.save()
return redirect('manage_course_list')
return self.render_to_response({'course': self.course,
'formset': formset})
class ContentCreateUpdateView(TemplateResponseMixin, View):
module = None
model = None
obj = None
template_name = "courses/manage/content/form.html"
def get_model(self, model_name):
# если имя модели соответствует одному из имен моделей контента
# вернуть модель для app_label и model_name
if model_name in ['text', 'file', 'image', 'video']:
return apps.get_model(app_label="courses", model_name=model_name)
# если модель нам не подходит
return None
def get_form(self, model, *args, **kwargs):
# возвращает ModelForm для указаной model
# со всеми полями кроме тех что указаны в exclude
Form = modelform_factory(model, exclude=['owner'
'created',
'updated',
'order', 'owner'])
return Form(*args, **kwargs)
def dispatch(self, request, module_id, model_name, id=None):
# получаем модуль с которым будет асоциирован объект
self.module = get_object_or_404(Module,
id=module_id,
course__owner=request.user)
# получаем модель которая будет соответсвотать типу контента
self.model = self.get_model(model_name)
# если не None, то объект будет обновлен, иначе будет создан новый
if id:
self.obj = get_object_or_404(self.model,
id=id,
owner=request.user)
# вызываем метод родителя
return super(ContentCreateUpdateView, self).dispatch(request,
module_id,
model_name,
id)
def get(self, request, module_id, model_name, id=None):
# возвращаем форму для изменения экземпляра контента при self.obj!=None.
# при None, будт возвращена форма для создания экземпляра контента.
form = self.get_form(self.model, instance=self.obj)
return self.render_to_response({'form': form,
'object': self.obj})
def post(self, request, module_id, model_name, id=None):
# возвращаем форму с данными и файлами
form = self.get_form(self.model,
instance=self.obj,
data=request.POST,
files=request.FILES)
if form.is_valid():
# задаем владельцем контента текущего пользователя
obj = form.save(commit=False)
obj.owner = request.user
obj.save()
if not id:
# если id объекта не указан, создаем новый экземпляр
Content.objects.create(module=self.module, content_object=obj)
return redirect('module_content_list', self.module.id)
return self.render_to_response({'form': form, 'object': self.obj})
class ContentDeleteView(View):
def post(self, request, id):
content = get_object_or_404(Content,
id=id,
module__course__owner=request.user)
module = content.module
content.content_object.delete()
content.delete()
# возвращаемся к списку контента модуля
return redirect('module_content_list', module.id)
class ModuleContentListView(TemplateResponseMixin, View):
template_name = "courses/manage/module/content_list.html"
def get(self, request, module_id):
module = get_object_or_404(Module,
id=module_id,
course__owner=request.user)
return self.render_to_response({'module': module})
class ModuleOrderView(CsrfExemptMixin, JsonRequestResponseMixin, View):
"""
CsrfExemptMixin освобождает запрос от csrf token'а.
JsonRequestResponseMixin - помещает правильно отформатированый
json запрос в request_json; также сериализирует response
"""
def post(self, request):
for id, order in self.request_json.items():
Module.objects.filter(id=id,
course__owner=request.user).update(order=order)
return self.render_json_response({'saved': 'OK'})
class ContentOrderView(CsrfExemptMixin, JsonRequestResponseMixin, View):
"""
CsrfExemptMixin освобождает запрос от csrf token'а.
JsonRequestResponseMixin - помещает правильно отформатированый
json запрос в request_json; также сериализирует response
"""
def post(self, request):
for id, order in self.request_json.items():
print('id', id, ' -- ', order)
for id, order in self.request_json.items():
Content.objects.filter(id=id, module__course__owner=request.user).update(order | верждение форм | identifier_name |
CKEditor_media_tab.js | /dialogs/image.js
*/
function _eatlas_media_frame_ckeditor_create_media_tab() {
// As defined in imageDialog function
var IMAGE = 1,
LINK = 2,
PREVIEW = 4,
CLEANUP = 8;
var IMAGESTYLE_CLASS_PREFIX = 'img__view_mode__';
var IMAGEID_CLASS_PREFIX = 'img__fid__';
var onMediaStyleChange = function() {
// This = input element.
var value = this.getValue(),
dialog = this.getDialog();
var enable = value && value != 'enlarge';
toggleInput(dialog, 'chkHideDesc', enable);
toggleInput(dialog, 'chkHideLicense', enable);
toggleInput(dialog, 'txtMediaTitle', enable);
toggleInput(dialog, 'txtMediaDescPrefix', enable);
toggleInput(dialog, 'txtMediaDescription', enable);
};
var onImageStyleChange = function() {
var newMediaStyle = this.getValue(),
dialog = this.getDialog();
if (!newMediaStyle) {
newMediaStyle = 'media_original'
}
// The media styles are inconsistent with image styles. It's okay
// with most of them, but a mapping has to be done for the
// 'hardcoded' one.
var newImageStyle = newMediaStyle;
if (newImageStyle === 'media_preview') {
newImageStyle = 'square_thumbnail';
} else if (newImageStyle === 'media_large') {
newImageStyle = 'large';
} else if (newImageStyle === 'media_original') {
newImageStyle = '';
}
// API http://docs.cksource.com/ckeditor_api/symbols/CKEDITOR.dialog.html
//
// pageId: 'info', 'media', 'Link', 'Upload', 'advanced'
// elementId:
// info:
// 'txtUrl' (cke_75_uiElement),
// 'browse' (cke_77_uiElement) (disabled 'Browse Server' button to the right of URL field),
// 'txtAlt' (cke_82_uiElement),
// 'txtWidth' (cke_85_uiElement),
// 'txtHeight' (cke_88_uiElement),
// undefined (cke_89_uiElement) (container for Width and Height),
// 'ratioLock' (cke_90_uiElement) (both lock and reset),
// 'txtBorder' (cke_94_uiElement),
// 'txtHSpace' (cke_97_uiElement),
// 'txtVSpace' (cke_100_uiElement),
// 'cmbAlign' (cke_103_uiElement),
// 'basic' (cke_105_uiElement) (container for Width, Height, Border, HSpace, VSpace and Alignment),
// 'htmlPreview' (cke_106_uiElement)
// media:
// 'lstImageStyle' (cke_113_uiElement),
// 'lstMediaStyle' (cke_116_uiElement),
// 'lstMediaLink' (...),
// 'txtMediaTitle' (...),
// 'txtMediaDescPrefix' (...),
// 'txtMediaDescription' (cke_119_uiElement),
// undefined (cke_120_uiElement) (metadata info),
// Link:
// 'txtUrl' (cke_125_uiElement),
// 'browse' (cke_127_uiElement) (disabled 'Browse Server' button to the right of URL field),
// 'cmbTarget' (cke_130_uiElement)
// Upload:
// 'upload' (cke_135_uiElement),
// 'uploadButton' (cke_137_uiElement)
// advanced:
// 'linkId' (cke_142_uiElement),
// 'cmbLangDir' (cke_145_uiElement),
// 'txtLangCode' (cke_148_uiElement),
// 'txtGenLongDescr' (cke_152_uiElement),
// 'txtGenClass' (cke_155_uiElement),
// 'txtGenTitle' (cke_158_uiElement),
// undefined (cke_159_uiElement) (container for Stylesheet Classes and Advisory Title),
// 'txtdlgGenStyle' (cke_162_uiElement)
//
// Snipet to display the mapping DOM ID => CKEditor element ID:
// dialog.foreach(function(el) {
// console.log('DOM ID: ' + el.domId + ' ID: ' + el.id);
// });
// *** CSS Classes ***
// Get actual image CSS Classes, as defined in the dialog field
// API: dialog.getValueOf(pageId, elementId);
var classes = dialog.getValueOf('advanced', 'txtGenClass');
classes = classes ? classes.split(/\s+/) : [];
// Remove previous 'image style' class and find the image ID
var newClasses = [];
for (var i=0, len=classes.length; i<len; i++) {
if (classes[i].substring(0, IMAGESTYLE_CLASS_PREFIX.length) !== IMAGESTYLE_CLASS_PREFIX) {
newClasses.push(classes[i]);
}
}
// Add new 'image style' class
newClasses.push(IMAGESTYLE_CLASS_PREFIX + newMediaStyle);
// Set the new image CSS Classes in the dialog field
// API: dialog.setValueOf(pageId, elementId, value);
dialog.setValueOf('advanced', 'txtGenClass', newClasses.join(' '));
// *** Image URL ***
// Async request to the file URL service (only works when logged)
// IMPORTANT: The Drupal API must be used to get the image URL
// because it need to add the "itok" token to the URL.
// That token has been added to avoid an easy DDoS.
// See: http://berk.es/2013/03/04/drupal-imagecache-security-vulnarability-with-ddos-attack-explained/
if (typeof this.fid !== 'undefined') {
$.getJSON("/eatlas_mediaframe_fileurl/" + this.fid + "/" + newImageStyle, function(json){
if (typeof json.url !== 'undefined') {
// Set the new image URL in the dialog field
var currentUrl = dialog.getValueOf('info', 'txtUrl');
if (currentUrl != json.url) {
dialog.setValueOf('info', 'txtUrl', json.url);
}
}
});
}
};
var getImageStyle = function(element) {
var classes = element.getAttribute('class');
if (!classes) {
return null;
}
classes = classes.split(/\s+/);
for (var i=0, len=classes.length; i < len; i++) {
if (classes[i].substring(0, IMAGESTYLE_CLASS_PREFIX.length) === IMAGESTYLE_CLASS_PREFIX) {
return classes[i].substring(IMAGESTYLE_CLASS_PREFIX.length);
}
}
};
var getMediaFileId = function(element) {
var classes = element.getAttribute('class');
if (!classes) {
return null;
}
classes = classes.split(/\s+/);
for (var i=0, len=classes.length; i < len; i++) {
if (classes[i].substring(0, IMAGEID_CLASS_PREFIX.length) === IMAGEID_CLASS_PREFIX) {
return classes[i].substring(IMAGEID_CLASS_PREFIX.length);
}
}
};
var toggleInput = function(dialog, inputID, active) {
var inputEl = dialog.getContentElement('media', inputID).getInputElement();
if (active) | else {
inputEl.setAttribute('readonly', true);
inputEl.addClass('disabled');
}
};
var imageStyles = [
['Original', 'media_original'],
['Link', 'media_link'],
['Preview', 'media_preview'],
['Large', 'media_large']
];
// NOTE: Drupal.settings.eatlas_media_frame_filter.drupal_custom_image_styles is defined in eatlas_media_frame_filter.module
if (typeof Drupal.settings.eatlas_media_frame_filter === 'object' && typeof Drupal.settings.eatlas_media_frame_filter.drupal_custom_image_styles === 'object') {
var customStyles = Drupal.settings.eatlas_media_frame_filter.drupal_custom_image_styles;
for (customStyleId in customStyles) {
if (customStyles.hasOwnProperty(customStyleId)) {
imageStyles.push([customStyles[customStyleId], customStyleId]);
}
}
}
// CKEditor API: http://docs | {
inputEl.removeAttribute('readonly');
inputEl.removeClass('disabled');
} | conditional_block |
CKEditor_media_tab.js | /image/dialogs/image.js
*/
function _eatlas_media_frame_ckeditor_create_media_tab() {
// As defined in imageDialog function
var IMAGE = 1,
LINK = 2,
PREVIEW = 4,
CLEANUP = 8;
var IMAGESTYLE_CLASS_PREFIX = 'img__view_mode__';
var IMAGEID_CLASS_PREFIX = 'img__fid__';
var onMediaStyleChange = function() {
// This = input element.
var value = this.getValue(),
dialog = this.getDialog();
var enable = value && value != 'enlarge';
toggleInput(dialog, 'chkHideDesc', enable);
toggleInput(dialog, 'chkHideLicense', enable);
toggleInput(dialog, 'txtMediaTitle', enable);
toggleInput(dialog, 'txtMediaDescPrefix', enable);
toggleInput(dialog, 'txtMediaDescription', enable);
};
var onImageStyleChange = function() {
var newMediaStyle = this.getValue(),
dialog = this.getDialog();
if (!newMediaStyle) {
newMediaStyle = 'media_original'
}
// The media styles are inconsistent with image styles. It's okay
// with most of them, but a mapping has to be done for the
// 'hardcoded' one.
var newImageStyle = newMediaStyle;
if (newImageStyle === 'media_preview') {
newImageStyle = 'square_thumbnail';
} else if (newImageStyle === 'media_large') {
newImageStyle = 'large';
} else if (newImageStyle === 'media_original') {
newImageStyle = '';
}
// API http://docs.cksource.com/ckeditor_api/symbols/CKEDITOR.dialog.html
//
// pageId: 'info', 'media', 'Link', 'Upload', 'advanced'
// elementId:
// info:
// 'txtUrl' (cke_75_uiElement),
// 'browse' (cke_77_uiElement) (disabled 'Browse Server' button to the right of URL field),
// 'txtAlt' (cke_82_uiElement),
// 'txtWidth' (cke_85_uiElement),
// 'txtHeight' (cke_88_uiElement),
// undefined (cke_89_uiElement) (container for Width and Height),
// 'ratioLock' (cke_90_uiElement) (both lock and reset),
// 'txtBorder' (cke_94_uiElement),
// 'txtHSpace' (cke_97_uiElement),
// 'txtVSpace' (cke_100_uiElement),
// 'cmbAlign' (cke_103_uiElement),
// 'basic' (cke_105_uiElement) (container for Width, Height, Border, HSpace, VSpace and Alignment),
// 'htmlPreview' (cke_106_uiElement)
// media:
// 'lstImageStyle' (cke_113_uiElement),
// 'lstMediaStyle' (cke_116_uiElement),
// 'lstMediaLink' (...),
// 'txtMediaTitle' (...),
// 'txtMediaDescPrefix' (...),
// 'txtMediaDescription' (cke_119_uiElement),
// undefined (cke_120_uiElement) (metadata info),
// Link:
// 'txtUrl' (cke_125_uiElement),
// 'browse' (cke_127_uiElement) (disabled 'Browse Server' button to the right of URL field),
// 'cmbTarget' (cke_130_uiElement)
// Upload:
// 'upload' (cke_135_uiElement),
// 'uploadButton' (cke_137_uiElement)
// advanced:
// 'linkId' (cke_142_uiElement),
// 'cmbLangDir' (cke_145_uiElement),
// 'txtLangCode' (cke_148_uiElement),
// 'txtGenLongDescr' (cke_152_uiElement),
// 'txtGenClass' (cke_155_uiElement),
// 'txtGenTitle' (cke_158_uiElement),
// undefined (cke_159_uiElement) (container for Stylesheet Classes and Advisory Title),
// 'txtdlgGenStyle' (cke_162_uiElement)
//
// Snipet to display the mapping DOM ID => CKEditor element ID:
// dialog.foreach(function(el) {
// console.log('DOM ID: ' + el.domId + ' ID: ' + el.id);
// });
// *** CSS Classes ***
// Get actual image CSS Classes, as defined in the dialog field |
// Remove previous 'image style' class and find the image ID
var newClasses = [];
for (var i=0, len=classes.length; i<len; i++) {
if (classes[i].substring(0, IMAGESTYLE_CLASS_PREFIX.length) !== IMAGESTYLE_CLASS_PREFIX) {
newClasses.push(classes[i]);
}
}
// Add new 'image style' class
newClasses.push(IMAGESTYLE_CLASS_PREFIX + newMediaStyle);
// Set the new image CSS Classes in the dialog field
// API: dialog.setValueOf(pageId, elementId, value);
dialog.setValueOf('advanced', 'txtGenClass', newClasses.join(' '));
// *** Image URL ***
// Async request to the file URL service (only works when logged)
// IMPORTANT: The Drupal API must be used to get the image URL
// because it need to add the "itok" token to the URL.
// That token has been added to avoid an easy DDoS.
// See: http://berk.es/2013/03/04/drupal-imagecache-security-vulnarability-with-ddos-attack-explained/
if (typeof this.fid !== 'undefined') {
$.getJSON("/eatlas_mediaframe_fileurl/" + this.fid + "/" + newImageStyle, function(json){
if (typeof json.url !== 'undefined') {
// Set the new image URL in the dialog field
var currentUrl = dialog.getValueOf('info', 'txtUrl');
if (currentUrl != json.url) {
dialog.setValueOf('info', 'txtUrl', json.url);
}
}
});
}
};
var getImageStyle = function(element) {
var classes = element.getAttribute('class');
if (!classes) {
return null;
}
classes = classes.split(/\s+/);
for (var i=0, len=classes.length; i < len; i++) {
if (classes[i].substring(0, IMAGESTYLE_CLASS_PREFIX.length) === IMAGESTYLE_CLASS_PREFIX) {
return classes[i].substring(IMAGESTYLE_CLASS_PREFIX.length);
}
}
};
var getMediaFileId = function(element) {
var classes = element.getAttribute('class');
if (!classes) {
return null;
}
classes = classes.split(/\s+/);
for (var i=0, len=classes.length; i < len; i++) {
if (classes[i].substring(0, IMAGEID_CLASS_PREFIX.length) === IMAGEID_CLASS_PREFIX) {
return classes[i].substring(IMAGEID_CLASS_PREFIX.length);
}
}
};
var toggleInput = function(dialog, inputID, active) {
var inputEl = dialog.getContentElement('media', inputID).getInputElement();
if (active) {
inputEl.removeAttribute('readonly');
inputEl.removeClass('disabled');
} else {
inputEl.setAttribute('readonly', true);
inputEl.addClass('disabled');
}
};
var imageStyles = [
['Original', 'media_original'],
['Link', 'media_link'],
['Preview', 'media_preview'],
['Large', 'media_large']
];
// NOTE: Drupal.settings.eatlas_media_frame_filter.drupal_custom_image_styles is defined in eatlas_media_frame_filter.module
if (typeof Drupal.settings.eatlas_media_frame_filter === 'object' && typeof Drupal.settings.eatlas_media_frame_filter.drupal_custom_image_styles === 'object') {
var customStyles = Drupal.settings.eatlas_media_frame_filter.drupal_custom_image_styles;
for (customStyleId in customStyles) {
if (customStyles.hasOwnProperty(customStyleId)) {
imageStyles.push([customStyles[customStyleId], customStyleId]);
}
}
}
// CKEditor API: http://docs | // API: dialog.getValueOf(pageId, elementId);
var classes = dialog.getValueOf('advanced', 'txtGenClass');
classes = classes ? classes.split(/\s+/) : []; | random_line_split |
CKEditor_media_tab.js | El = dialog.getContentElement('media', inputID).getInputElement();
if (active) {
inputEl.removeAttribute('readonly');
inputEl.removeClass('disabled');
} else {
inputEl.setAttribute('readonly', true);
inputEl.addClass('disabled');
}
};
var imageStyles = [
['Original', 'media_original'],
['Link', 'media_link'],
['Preview', 'media_preview'],
['Large', 'media_large']
];
// NOTE: Drupal.settings.eatlas_media_frame_filter.drupal_custom_image_styles is defined in eatlas_media_frame_filter.module
if (typeof Drupal.settings.eatlas_media_frame_filter === 'object' && typeof Drupal.settings.eatlas_media_frame_filter.drupal_custom_image_styles === 'object') {
var customStyles = Drupal.settings.eatlas_media_frame_filter.drupal_custom_image_styles;
for (customStyleId in customStyles) {
if (customStyles.hasOwnProperty(customStyleId)) {
imageStyles.push([customStyles[customStyleId], customStyleId]);
}
}
}
// CKEditor API: http://docs.ckeditor.com/#!/api/CKEDITOR.dialog.definition.checkbox
return {
id: 'media',
label: 'Frame info',
padding: 0,
elements: [
{
id: 'lstImageStyle',
type: 'select',
label: 'Image style',
// NOTE: This CSS class hide the field when it's disabled.
className: 'eatlas-media-frame-filter-image-style-select',
items: imageStyles,
onChange: onImageStyleChange,
setup: function(type, element) {
// element => CKEDITOR.dom.element
if (type == IMAGE) {
var currentImageStyle = getImageStyle(element);
this.fid = getMediaFileId(element);
this.setValue(currentImageStyle);
// Disable the field when it's set to "Original"
// We don't want users to be able to choose something else
// but still give the ability to fix broken images.
if (currentImageStyle === 'media_original') {
this.disable();
}
}
},
commit: function(type, element) {}
}, {
// API: http://docs.cksource.com/ckeditor_api/symbols/CKEDITOR.dialog.definition.select.html
id: 'lstMediaStyle',
type: 'select',
label: 'Frame style',
items: [
['- No frame -', ''],
['Wikipedia style', 'wikipedia'],
['Info on image', 'onImage'],
['Tile', 'tile']
],
'default': '', // The default must match the default in "eatlas_media_frame_filter.module"
onChange: onMediaStyleChange,
setup: function(type, element) {
// element => CKEDITOR.dom.element
if (type == IMAGE) {
// noframe, with default true
var frameStyle = element.getAttribute('media_style');
if (frameStyle !== null) {
this.setValue(frameStyle);
}
}
},
commit: function(type, element) {
// element => CKEDITOR.dom.element
if (type == IMAGE) {
var frameStyle = this.getValue();
element.setAttribute('media_style', frameStyle);
}
}
}, {
// API: http://docs.cksource.com/ckeditor_api/symbols/CKEDITOR.dialog.definition.select.html
id: 'lstMediaLink',
type: 'select',
label: 'Link to media page',
items: [
['- No link to media page -', 'none'],
['Magnifier', 'magnifier'],
['Image linked to media page', 'direct']
],
'default': 'none',
setup: function(type, element) {
// element => CKEDITOR.dom.element
if (type == IMAGE) {
// noframe, with default true
var mediaLinkStyle = element.getAttribute('media_link');
if (mediaLinkStyle !== null) {
this.setValue(mediaLinkStyle);
}
}
},
commit: function(type, element) {
// element => CKEDITOR.dom.element
if (type == IMAGE) {
var mediaLinkStyle = this.getValue();
element.setAttribute('media_link', mediaLinkStyle);
}
}
}, {
id: 'chkHideDesc',
type: 'checkbox',
label: 'Hide description',
readonly: true,
setup: function(type, element) {
if (type == IMAGE) {
var hidedesc = element.getAttribute('media_hidedesc');
if (hidedesc) {
this.setValue(true);
}
}
},
commit: function(type, element) {
if (type == IMAGE) {
var hidedesc = this.getValue();
if (hidedesc) {
element.setAttribute('media_hidedesc', true);
} else {
element.removeAttribute('media_hidedesc');
}
}
}
}, {
id: 'chkHideLicense',
type: 'checkbox',
label: 'Hide license',
readonly: true,
setup: function(type, element) {
if (type == IMAGE) {
var hidelicense = element.getAttribute('media_hidelicense');
if (hidelicense) {
this.setValue(true);
}
}
},
commit: function(type, element) {
if (type == IMAGE) {
var hidelicense = this.getValue();
if (hidelicense) {
element.setAttribute('media_hidelicense', true);
} else {
element.removeAttribute('media_hidelicense');
}
}
}
}, {
id: 'txtMediaTitle',
type: 'text',
label: 'Title overwrite',
style: 'width: 100%',
'default': '',
readonly: true,
setup: function(type, element) {
if (type == IMAGE) {
var title = _decode(element.getAttribute('media_title'));
if (title) {
this.setValue(title);
}
}
},
commit: function(type, element) {
if (type == IMAGE) {
var title = _encode(this.getValue());
if (title) {
element.setAttribute('media_title', title);
} else {
element.removeAttribute('media_title');
}
}
}
}, {
id: 'txtMediaDescPrefix',
type: 'text',
label: 'Description prefix',
style: 'width: 100%',
'default': '',
readonly: true,
setup: function(type, element) {
if (type == IMAGE) {
var prefix = _decode(element.getAttribute('media_descprefix'));
if (prefix) {
this.setValue(prefix);
}
}
},
commit: function(type, element) {
if (type == IMAGE) {
var prefix = _encode(this.getValue());
if (prefix) {
element.setAttribute('media_descprefix', prefix);
} else {
element.removeAttribute('media_descprefix');
}
}
}
}, {
id: 'txtMediaDescription',
type: 'textarea',
label: 'Description overwrite',
style: 'width: 100%',
'default': '',
readonly: true,
setup: function(type, element) {
if (type == IMAGE) {
var description = _decode(element.getAttribute('media_description'));
if (description) {
this.setValue(description);
}
}
},
commit: function( type, element ) {
if (type == IMAGE) {
var description = _encode(this.getValue());
if (description) {
element.setAttribute('media_description', description);
} else {
element.removeAttribute('media_description');
}
}
}
}, {
type: 'html',
html: '<div role="presentation">' +
'<label>Metadata</label>' +
'<div id="eatlas_media_frame_info"><span class="loading">Loading...</span></div>' +
'</div>',
setup: function(type, element) {
// element => CKEDITOR.dom.element
if (type == IMAGE) {
var domElement = document.getElementById('eatlas_media_frame_info');
var fid = _eatlas_media_frame_ckeditor_get_fid(element);
if (fid !== null) {
domElement.innerHTML = '<iframe class="iframe-mediaframe-fileinfo" src="/?q=eatlas_mediaframe_fileinfo/' + fid + '"/>';
} else {
domElement.innerHTML = '<span class="noinfo">No information available</span>';
}
}
}
}
]
};
function _encode(htmlStr) {
// Create a in-memory div, set it's inner text (which jQuery automatically encodes)
// then grab the encoded contents back out. The div never exists on the page.
return $('<div/>').text(htmlStr).html();
}
function _decode(str) | {
return $('<div/>').html(str).text();
} | identifier_body | |
CKEditor_media_tab.js | };
var toggleInput = function(dialog, inputID, active) {
var inputEl = dialog.getContentElement('media', inputID).getInputElement();
if (active) {
inputEl.removeAttribute('readonly');
inputEl.removeClass('disabled');
} else {
inputEl.setAttribute('readonly', true);
inputEl.addClass('disabled');
}
};
var imageStyles = [
['Original', 'media_original'],
['Link', 'media_link'],
['Preview', 'media_preview'],
['Large', 'media_large']
];
// NOTE: Drupal.settings.eatlas_media_frame_filter.drupal_custom_image_styles is defined in eatlas_media_frame_filter.module
if (typeof Drupal.settings.eatlas_media_frame_filter === 'object' && typeof Drupal.settings.eatlas_media_frame_filter.drupal_custom_image_styles === 'object') {
var customStyles = Drupal.settings.eatlas_media_frame_filter.drupal_custom_image_styles;
for (customStyleId in customStyles) {
if (customStyles.hasOwnProperty(customStyleId)) {
imageStyles.push([customStyles[customStyleId], customStyleId]);
}
}
}
// CKEditor API: http://docs.ckeditor.com/#!/api/CKEDITOR.dialog.definition.checkbox
return {
id: 'media',
label: 'Frame info',
padding: 0,
elements: [
{
id: 'lstImageStyle',
type: 'select',
label: 'Image style',
// NOTE: This CSS class hide the field when it's disabled.
className: 'eatlas-media-frame-filter-image-style-select',
items: imageStyles,
onChange: onImageStyleChange,
setup: function(type, element) {
// element => CKEDITOR.dom.element
if (type == IMAGE) {
var currentImageStyle = getImageStyle(element);
this.fid = getMediaFileId(element);
this.setValue(currentImageStyle);
// Disable the field when it's set to "Original"
// We don't want users to be able to choose something else
// but still give the ability to fix broken images.
if (currentImageStyle === 'media_original') {
this.disable();
}
}
},
commit: function(type, element) {}
}, {
// API: http://docs.cksource.com/ckeditor_api/symbols/CKEDITOR.dialog.definition.select.html
id: 'lstMediaStyle',
type: 'select',
label: 'Frame style',
items: [
['- No frame -', ''],
['Wikipedia style', 'wikipedia'],
['Info on image', 'onImage'],
['Tile', 'tile']
],
'default': '', // The default must match the default in "eatlas_media_frame_filter.module"
onChange: onMediaStyleChange,
setup: function(type, element) {
// element => CKEDITOR.dom.element
if (type == IMAGE) {
// noframe, with default true
var frameStyle = element.getAttribute('media_style');
if (frameStyle !== null) {
this.setValue(frameStyle);
}
}
},
commit: function(type, element) {
// element => CKEDITOR.dom.element
if (type == IMAGE) {
var frameStyle = this.getValue();
element.setAttribute('media_style', frameStyle);
}
}
}, {
// API: http://docs.cksource.com/ckeditor_api/symbols/CKEDITOR.dialog.definition.select.html
id: 'lstMediaLink',
type: 'select',
label: 'Link to media page',
items: [
['- No link to media page -', 'none'],
['Magnifier', 'magnifier'],
['Image linked to media page', 'direct']
],
'default': 'none',
setup: function(type, element) {
// element => CKEDITOR.dom.element
if (type == IMAGE) {
// noframe, with default true
var mediaLinkStyle = element.getAttribute('media_link');
if (mediaLinkStyle !== null) {
this.setValue(mediaLinkStyle);
}
}
},
commit: function(type, element) {
// element => CKEDITOR.dom.element
if (type == IMAGE) {
var mediaLinkStyle = this.getValue();
element.setAttribute('media_link', mediaLinkStyle);
}
}
}, {
id: 'chkHideDesc',
type: 'checkbox',
label: 'Hide description',
readonly: true,
setup: function(type, element) {
if (type == IMAGE) {
var hidedesc = element.getAttribute('media_hidedesc');
if (hidedesc) {
this.setValue(true);
}
}
},
commit: function(type, element) {
if (type == IMAGE) {
var hidedesc = this.getValue();
if (hidedesc) {
element.setAttribute('media_hidedesc', true);
} else {
element.removeAttribute('media_hidedesc');
}
}
}
}, {
id: 'chkHideLicense',
type: 'checkbox',
label: 'Hide license',
readonly: true,
setup: function(type, element) {
if (type == IMAGE) {
var hidelicense = element.getAttribute('media_hidelicense');
if (hidelicense) {
this.setValue(true);
}
}
},
commit: function(type, element) {
if (type == IMAGE) {
var hidelicense = this.getValue();
if (hidelicense) {
element.setAttribute('media_hidelicense', true);
} else {
element.removeAttribute('media_hidelicense');
}
}
}
}, {
id: 'txtMediaTitle',
type: 'text',
label: 'Title overwrite',
style: 'width: 100%',
'default': '',
readonly: true,
setup: function(type, element) {
if (type == IMAGE) {
var title = _decode(element.getAttribute('media_title'));
if (title) {
this.setValue(title);
}
}
},
commit: function(type, element) {
if (type == IMAGE) {
var title = _encode(this.getValue());
if (title) {
element.setAttribute('media_title', title);
} else {
element.removeAttribute('media_title');
}
}
}
}, {
id: 'txtMediaDescPrefix',
type: 'text',
label: 'Description prefix',
style: 'width: 100%',
'default': '',
readonly: true,
setup: function(type, element) {
if (type == IMAGE) {
var prefix = _decode(element.getAttribute('media_descprefix'));
if (prefix) {
this.setValue(prefix);
}
}
},
commit: function(type, element) {
if (type == IMAGE) {
var prefix = _encode(this.getValue());
if (prefix) {
element.setAttribute('media_descprefix', prefix);
} else {
element.removeAttribute('media_descprefix');
}
}
}
}, {
id: 'txtMediaDescription',
type: 'textarea',
label: 'Description overwrite',
style: 'width: 100%',
'default': '',
readonly: true,
setup: function(type, element) {
if (type == IMAGE) {
var description = _decode(element.getAttribute('media_description'));
if (description) {
this.setValue(description);
}
}
},
commit: function( type, element ) {
if (type == IMAGE) {
var description = _encode(this.getValue());
if (description) {
element.setAttribute('media_description', description);
} else {
element.removeAttribute('media_description');
}
}
}
}, {
type: 'html',
html: '<div role="presentation">' +
'<label>Metadata</label>' +
'<div id="eatlas_media_frame_info"><span class="loading">Loading...</span></div>' +
'</div>',
setup: function(type, element) {
// element => CKEDITOR.dom.element
if (type == IMAGE) {
var domElement = document.getElementById('eatlas_media_frame_info');
var fid = _eatlas_media_frame_ckeditor_get_fid(element);
if (fid !== null) {
domElement.innerHTML = '<iframe class="iframe-mediaframe-fileinfo" src="/?q=eatlas_mediaframe_fileinfo/' + fid + '"/>';
} else {
domElement.innerHTML = '<span class="noinfo">No information available</span>';
}
}
}
}
]
};
function _encode(htmlStr) {
// Create a in-memory div, set it's inner text (which jQuery automatically encodes)
// then grab the encoded contents back out. The div never exists on the page.
return $('<div/>').text(htmlStr).html();
}
function | _decode | identifier_name | |
lib.rs | RuntimeOrigin> +
IsType<<<Self as frame_system::Config>::RuntimeOrigin as frame_support::traits::OriginTrait>::PalletsOrigin>;
/// Weight information for extrinsics in this pallet.
type WeightInfo: WeightInfo;
}
#[pallet::event]
#[pallet::generate_deposit(pub(super) fn deposit_event)]
pub enum Event {
/// Batch of dispatches did not complete fully. Index of first failing dispatch given, as
/// well as the error.
BatchInterrupted { index: u32, error: DispatchError },
/// Batch of dispatches completed fully with no error.
BatchCompleted,
/// Batch of dispatches completed but has errors.
BatchCompletedWithErrors,
/// A single item within a Batch of dispatches has completed with no error.
ItemCompleted,
/// A single item within a Batch of dispatches has completed with error.
ItemFailed { error: DispatchError },
/// A call was dispatched.
DispatchedAs { result: DispatchResult },
}
// Align the call size to 1KB. As we are currently compiling the runtime for native/wasm
// the `size_of` of the `Call` can be different. To ensure that this don't leads to
// mismatches between native/wasm or to different metadata for the same runtime, we
// algin the call size. The value is chosen big enough to hopefully never reach it.
const CALL_ALIGN: u32 = 1024;
#[pallet::extra_constants]
impl<T: Config> Pallet<T> {
/// The limit on the number of batched calls.
fn batched_calls_limit() -> u32 |
}
#[pallet::hooks]
impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
fn integrity_test() {
// If you hit this error, you need to try to `Box` big dispatchable parameters.
assert!(
sp_std::mem::size_of::<<T as Config>::RuntimeCall>() as u32 <= CALL_ALIGN,
"Call enum size should be smaller than {} bytes.",
CALL_ALIGN,
);
}
}
#[pallet::error]
pub enum Error<T> {
/// Too many calls batched.
TooManyCalls,
}
#[pallet::call]
impl<T: Config> Pallet<T> {
/// Send a batch of dispatch calls.
///
/// May be called from any origin except `None`.
///
/// - `calls`: The calls to be dispatched from the same origin. The number of call must not
/// exceed the constant: `batched_calls_limit` (available in constant metadata).
///
/// If origin is root then the calls are dispatched without checking origin filter. (This
/// includes bypassing `frame_system::Config::BaseCallFilter`).
///
/// ## Complexity
/// - O(C) where C is the number of calls to be batched.
///
/// This will return `Ok` in all circumstances. To determine the success of the batch, an
/// event is deposited. If a call failed and the batch was interrupted, then the
/// `BatchInterrupted` event is deposited, along with the number of successful calls made
/// and the error of the failed call. If all were successful, then the `BatchCompleted`
/// event is deposited.
#[pallet::call_index(0)]
#[pallet::weight({
let dispatch_infos = calls.iter().map(|call| call.get_dispatch_info()).collect::<Vec<_>>();
let dispatch_weight = dispatch_infos.iter()
.map(|di| di.weight)
.fold(Weight::zero(), |total: Weight, weight: Weight| total.saturating_add(weight))
.saturating_add(T::WeightInfo::batch(calls.len() as u32));
let dispatch_class = {
let all_operational = dispatch_infos.iter()
.map(|di| di.class)
.all(|class| class == DispatchClass::Operational);
if all_operational {
DispatchClass::Operational
} else {
DispatchClass::Normal
}
};
(dispatch_weight, dispatch_class)
})]
pub fn batch(
origin: OriginFor<T>,
calls: Vec<<T as Config>::RuntimeCall>,
) -> DispatchResultWithPostInfo {
// Do not allow the `None` origin.
if ensure_none(origin.clone()).is_ok() {
return Err(BadOrigin.into())
}
let is_root = ensure_root(origin.clone()).is_ok();
let calls_len = calls.len();
ensure!(calls_len <= Self::batched_calls_limit() as usize, Error::<T>::TooManyCalls);
// Track the actual weight of each of the batch calls.
let mut weight = Weight::zero();
for (index, call) in calls.into_iter().enumerate() {
let info = call.get_dispatch_info();
// If origin is root, don't apply any dispatch filters; root can call anything.
let result = if is_root {
call.dispatch_bypass_filter(origin.clone())
} else {
call.dispatch(origin.clone())
};
// Add the weight of this call.
weight = weight.saturating_add(extract_actual_weight(&result, &info));
if let Err(e) = result {
Self::deposit_event(Event::BatchInterrupted {
index: index as u32,
error: e.error,
});
// Take the weight of this function itself into account.
let base_weight = T::WeightInfo::batch(index.saturating_add(1) as u32);
// Return the actual used weight + base_weight of this call.
return Ok(Some(base_weight + weight).into())
}
Self::deposit_event(Event::ItemCompleted);
}
Self::deposit_event(Event::BatchCompleted);
let base_weight = T::WeightInfo::batch(calls_len as u32);
Ok(Some(base_weight + weight).into())
}
/// Send a call through an indexed pseudonym of the sender.
///
/// Filter from origin are passed along. The call will be dispatched with an origin which
/// use the same filter as the origin of this call.
///
/// NOTE: If you need to ensure that any account-based filtering is not honored (i.e.
/// because you expect `proxy` to have been used prior in the call stack and you do not want
/// the call restrictions to apply to any sub-accounts), then use `as_multi_threshold_1`
/// in the Multisig pallet instead.
///
/// NOTE: Prior to version *12, this was called `as_limited_sub`.
///
/// The dispatch origin for this call must be _Signed_.
#[pallet::call_index(1)]
#[pallet::weight({
let dispatch_info = call.get_dispatch_info();
(
T::WeightInfo::as_derivative()
// AccountData for inner call origin accountdata.
.saturating_add(T::DbWeight::get().reads_writes(1, 1))
.saturating_add(dispatch_info.weight),
dispatch_info.class,
)
})]
pub fn as_derivative(
origin: OriginFor<T>,
index: u16,
call: Box<<T as Config>::RuntimeCall>,
) -> DispatchResultWithPostInfo {
let mut origin = origin;
let who = ensure_signed(origin.clone())?;
let pseudonym = Self::derivative_account_id(who, index);
origin.set_caller_from(frame_system::RawOrigin::Signed(pseudonym));
let info = call.get_dispatch_info();
let result = call.dispatch(origin);
// Always take into account the base weight of this call.
let mut weight = T::WeightInfo::as_derivative()
.saturating_add(T::DbWeight::get().reads_writes(1, 1));
// Add the real weight of the dispatch.
weight = weight.saturating_add(extract_actual_weight(&result, &info));
result
.map_err(|mut err| {
err.post_info = Some(weight).into();
err
})
.map(|_| Some(weight).into())
}
/// Send a batch of dispatch calls and atomically execute them.
/// The whole transaction will rollback and fail if any of the calls failed.
///
/// May be called from any origin except `None`.
///
/// - `calls`: The calls to be dispatched from the same origin. The number of call must not
/// exceed the constant: | {
let allocator_limit = sp_core::MAX_POSSIBLE_ALLOCATION;
let call_size = ((sp_std::mem::size_of::<<T as Config>::RuntimeCall>() as u32 +
CALL_ALIGN - 1) / CALL_ALIGN) *
CALL_ALIGN;
// The margin to take into account vec doubling capacity.
let margin_factor = 3;
allocator_limit / margin_factor / call_size
} | identifier_body |
lib.rs | RuntimeOrigin> +
IsType<<<Self as frame_system::Config>::RuntimeOrigin as frame_support::traits::OriginTrait>::PalletsOrigin>;
/// Weight information for extrinsics in this pallet.
type WeightInfo: WeightInfo;
}
#[pallet::event]
#[pallet::generate_deposit(pub(super) fn deposit_event)]
pub enum Event {
/// Batch of dispatches did not complete fully. Index of first failing dispatch given, as
/// well as the error.
BatchInterrupted { index: u32, error: DispatchError },
/// Batch of dispatches completed fully with no error.
BatchCompleted,
/// Batch of dispatches completed but has errors.
BatchCompletedWithErrors,
/// A single item within a Batch of dispatches has completed with no error.
ItemCompleted,
/// A single item within a Batch of dispatches has completed with error.
ItemFailed { error: DispatchError },
/// A call was dispatched.
DispatchedAs { result: DispatchResult },
}
// Align the call size to 1KB. As we are currently compiling the runtime for native/wasm
// the `size_of` of the `Call` can be different. To ensure that this don't leads to
// mismatches between native/wasm or to different metadata for the same runtime, we
// algin the call size. The value is chosen big enough to hopefully never reach it.
const CALL_ALIGN: u32 = 1024;
#[pallet::extra_constants]
impl<T: Config> Pallet<T> {
/// The limit on the number of batched calls.
fn batched_calls_limit() -> u32 {
let allocator_limit = sp_core::MAX_POSSIBLE_ALLOCATION;
let call_size = ((sp_std::mem::size_of::<<T as Config>::RuntimeCall>() as u32 +
CALL_ALIGN - 1) / CALL_ALIGN) *
CALL_ALIGN;
// The margin to take into account vec doubling capacity.
let margin_factor = 3;
allocator_limit / margin_factor / call_size
}
}
#[pallet::hooks]
impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
fn | () {
// If you hit this error, you need to try to `Box` big dispatchable parameters.
assert!(
sp_std::mem::size_of::<<T as Config>::RuntimeCall>() as u32 <= CALL_ALIGN,
"Call enum size should be smaller than {} bytes.",
CALL_ALIGN,
);
}
}
#[pallet::error]
pub enum Error<T> {
/// Too many calls batched.
TooManyCalls,
}
#[pallet::call]
impl<T: Config> Pallet<T> {
/// Send a batch of dispatch calls.
///
/// May be called from any origin except `None`.
///
/// - `calls`: The calls to be dispatched from the same origin. The number of call must not
/// exceed the constant: `batched_calls_limit` (available in constant metadata).
///
/// If origin is root then the calls are dispatched without checking origin filter. (This
/// includes bypassing `frame_system::Config::BaseCallFilter`).
///
/// ## Complexity
/// - O(C) where C is the number of calls to be batched.
///
/// This will return `Ok` in all circumstances. To determine the success of the batch, an
/// event is deposited. If a call failed and the batch was interrupted, then the
/// `BatchInterrupted` event is deposited, along with the number of successful calls made
/// and the error of the failed call. If all were successful, then the `BatchCompleted`
/// event is deposited.
#[pallet::call_index(0)]
#[pallet::weight({
let dispatch_infos = calls.iter().map(|call| call.get_dispatch_info()).collect::<Vec<_>>();
let dispatch_weight = dispatch_infos.iter()
.map(|di| di.weight)
.fold(Weight::zero(), |total: Weight, weight: Weight| total.saturating_add(weight))
.saturating_add(T::WeightInfo::batch(calls.len() as u32));
let dispatch_class = {
let all_operational = dispatch_infos.iter()
.map(|di| di.class)
.all(|class| class == DispatchClass::Operational);
if all_operational {
DispatchClass::Operational
} else {
DispatchClass::Normal
}
};
(dispatch_weight, dispatch_class)
})]
pub fn batch(
origin: OriginFor<T>,
calls: Vec<<T as Config>::RuntimeCall>,
) -> DispatchResultWithPostInfo {
// Do not allow the `None` origin.
if ensure_none(origin.clone()).is_ok() {
return Err(BadOrigin.into())
}
let is_root = ensure_root(origin.clone()).is_ok();
let calls_len = calls.len();
ensure!(calls_len <= Self::batched_calls_limit() as usize, Error::<T>::TooManyCalls);
// Track the actual weight of each of the batch calls.
let mut weight = Weight::zero();
for (index, call) in calls.into_iter().enumerate() {
let info = call.get_dispatch_info();
// If origin is root, don't apply any dispatch filters; root can call anything.
let result = if is_root {
call.dispatch_bypass_filter(origin.clone())
} else {
call.dispatch(origin.clone())
};
// Add the weight of this call.
weight = weight.saturating_add(extract_actual_weight(&result, &info));
if let Err(e) = result {
Self::deposit_event(Event::BatchInterrupted {
index: index as u32,
error: e.error,
});
// Take the weight of this function itself into account.
let base_weight = T::WeightInfo::batch(index.saturating_add(1) as u32);
// Return the actual used weight + base_weight of this call.
return Ok(Some(base_weight + weight).into())
}
Self::deposit_event(Event::ItemCompleted);
}
Self::deposit_event(Event::BatchCompleted);
let base_weight = T::WeightInfo::batch(calls_len as u32);
Ok(Some(base_weight + weight).into())
}
/// Send a call through an indexed pseudonym of the sender.
///
/// Filter from origin are passed along. The call will be dispatched with an origin which
/// use the same filter as the origin of this call.
///
/// NOTE: If you need to ensure that any account-based filtering is not honored (i.e.
/// because you expect `proxy` to have been used prior in the call stack and you do not want
/// the call restrictions to apply to any sub-accounts), then use `as_multi_threshold_1`
/// in the Multisig pallet instead.
///
/// NOTE: Prior to version *12, this was called `as_limited_sub`.
///
/// The dispatch origin for this call must be _Signed_.
#[pallet::call_index(1)]
#[pallet::weight({
let dispatch_info = call.get_dispatch_info();
(
T::WeightInfo::as_derivative()
// AccountData for inner call origin accountdata.
.saturating_add(T::DbWeight::get().reads_writes(1, 1))
.saturating_add(dispatch_info.weight),
dispatch_info.class,
)
})]
pub fn as_derivative(
origin: OriginFor<T>,
index: u16,
call: Box<<T as Config>::RuntimeCall>,
) -> DispatchResultWithPostInfo {
let mut origin = origin;
let who = ensure_signed(origin.clone())?;
let pseudonym = Self::derivative_account_id(who, index);
origin.set_caller_from(frame_system::RawOrigin::Signed(pseudonym));
let info = call.get_dispatch_info();
let result = call.dispatch(origin);
// Always take into account the base weight of this call.
let mut weight = T::WeightInfo::as_derivative()
.saturating_add(T::DbWeight::get().reads_writes(1, 1));
// Add the real weight of the dispatch.
weight = weight.saturating_add(extract_actual_weight(&result, &info));
result
.map_err(|mut err| {
err.post_info = Some(weight).into();
err
})
.map(|_| Some(weight).into())
}
/// Send a batch of dispatch calls and atomically execute them.
/// The whole transaction will rollback and fail if any of the calls failed.
///
/// May be called from any origin except `None`.
///
/// - `calls`: The calls to be dispatched from the same origin. The number of call must not
/// exceed the constant: | integrity_test | identifier_name |
lib.rs | _constants]
impl<T: Config> Pallet<T> {
/// The limit on the number of batched calls.
fn batched_calls_limit() -> u32 {
let allocator_limit = sp_core::MAX_POSSIBLE_ALLOCATION;
let call_size = ((sp_std::mem::size_of::<<T as Config>::RuntimeCall>() as u32 +
CALL_ALIGN - 1) / CALL_ALIGN) *
CALL_ALIGN;
// The margin to take into account vec doubling capacity.
let margin_factor = 3;
allocator_limit / margin_factor / call_size
}
}
#[pallet::hooks]
impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
fn integrity_test() {
// If you hit this error, you need to try to `Box` big dispatchable parameters.
assert!(
sp_std::mem::size_of::<<T as Config>::RuntimeCall>() as u32 <= CALL_ALIGN,
"Call enum size should be smaller than {} bytes.",
CALL_ALIGN,
);
}
}
#[pallet::error]
pub enum Error<T> {
/// Too many calls batched.
TooManyCalls,
}
#[pallet::call]
impl<T: Config> Pallet<T> {
/// Send a batch of dispatch calls.
///
/// May be called from any origin except `None`.
///
/// - `calls`: The calls to be dispatched from the same origin. The number of call must not
/// exceed the constant: `batched_calls_limit` (available in constant metadata).
///
/// If origin is root then the calls are dispatched without checking origin filter. (This
/// includes bypassing `frame_system::Config::BaseCallFilter`).
///
/// ## Complexity
/// - O(C) where C is the number of calls to be batched.
///
/// This will return `Ok` in all circumstances. To determine the success of the batch, an
/// event is deposited. If a call failed and the batch was interrupted, then the
/// `BatchInterrupted` event is deposited, along with the number of successful calls made
/// and the error of the failed call. If all were successful, then the `BatchCompleted`
/// event is deposited.
#[pallet::call_index(0)]
#[pallet::weight({
let dispatch_infos = calls.iter().map(|call| call.get_dispatch_info()).collect::<Vec<_>>();
let dispatch_weight = dispatch_infos.iter()
.map(|di| di.weight)
.fold(Weight::zero(), |total: Weight, weight: Weight| total.saturating_add(weight))
.saturating_add(T::WeightInfo::batch(calls.len() as u32));
let dispatch_class = {
let all_operational = dispatch_infos.iter()
.map(|di| di.class)
.all(|class| class == DispatchClass::Operational);
if all_operational {
DispatchClass::Operational
} else {
DispatchClass::Normal
}
};
(dispatch_weight, dispatch_class)
})]
pub fn batch(
origin: OriginFor<T>,
calls: Vec<<T as Config>::RuntimeCall>,
) -> DispatchResultWithPostInfo {
// Do not allow the `None` origin.
if ensure_none(origin.clone()).is_ok() {
return Err(BadOrigin.into())
}
let is_root = ensure_root(origin.clone()).is_ok();
let calls_len = calls.len();
ensure!(calls_len <= Self::batched_calls_limit() as usize, Error::<T>::TooManyCalls);
// Track the actual weight of each of the batch calls.
let mut weight = Weight::zero();
for (index, call) in calls.into_iter().enumerate() {
let info = call.get_dispatch_info();
// If origin is root, don't apply any dispatch filters; root can call anything.
let result = if is_root {
call.dispatch_bypass_filter(origin.clone())
} else {
call.dispatch(origin.clone())
};
// Add the weight of this call.
weight = weight.saturating_add(extract_actual_weight(&result, &info));
if let Err(e) = result {
Self::deposit_event(Event::BatchInterrupted {
index: index as u32,
error: e.error,
});
// Take the weight of this function itself into account.
let base_weight = T::WeightInfo::batch(index.saturating_add(1) as u32);
// Return the actual used weight + base_weight of this call.
return Ok(Some(base_weight + weight).into())
}
Self::deposit_event(Event::ItemCompleted);
}
Self::deposit_event(Event::BatchCompleted);
let base_weight = T::WeightInfo::batch(calls_len as u32);
Ok(Some(base_weight + weight).into())
}
/// Send a call through an indexed pseudonym of the sender.
///
/// Filter from origin are passed along. The call will be dispatched with an origin which
/// use the same filter as the origin of this call.
///
/// NOTE: If you need to ensure that any account-based filtering is not honored (i.e.
/// because you expect `proxy` to have been used prior in the call stack and you do not want
/// the call restrictions to apply to any sub-accounts), then use `as_multi_threshold_1`
/// in the Multisig pallet instead.
///
/// NOTE: Prior to version *12, this was called `as_limited_sub`.
///
/// The dispatch origin for this call must be _Signed_.
#[pallet::call_index(1)]
#[pallet::weight({
let dispatch_info = call.get_dispatch_info();
(
T::WeightInfo::as_derivative()
// AccountData for inner call origin accountdata.
.saturating_add(T::DbWeight::get().reads_writes(1, 1))
.saturating_add(dispatch_info.weight),
dispatch_info.class,
)
})]
pub fn as_derivative(
origin: OriginFor<T>,
index: u16,
call: Box<<T as Config>::RuntimeCall>,
) -> DispatchResultWithPostInfo {
let mut origin = origin;
let who = ensure_signed(origin.clone())?;
let pseudonym = Self::derivative_account_id(who, index);
origin.set_caller_from(frame_system::RawOrigin::Signed(pseudonym));
let info = call.get_dispatch_info();
let result = call.dispatch(origin);
// Always take into account the base weight of this call.
let mut weight = T::WeightInfo::as_derivative()
.saturating_add(T::DbWeight::get().reads_writes(1, 1));
// Add the real weight of the dispatch.
weight = weight.saturating_add(extract_actual_weight(&result, &info));
result
.map_err(|mut err| {
err.post_info = Some(weight).into();
err
})
.map(|_| Some(weight).into())
}
/// Send a batch of dispatch calls and atomically execute them.
/// The whole transaction will rollback and fail if any of the calls failed.
///
/// May be called from any origin except `None`.
///
/// - `calls`: The calls to be dispatched from the same origin. The number of call must not
/// exceed the constant: `batched_calls_limit` (available in constant metadata).
///
/// If origin is root then the calls are dispatched without checking origin filter. (This
/// includes bypassing `frame_system::Config::BaseCallFilter`).
///
/// ## Complexity
/// - O(C) where C is the number of calls to be batched.
#[pallet::call_index(2)]
#[pallet::weight({
let dispatch_infos = calls.iter().map(|call| call.get_dispatch_info()).collect::<Vec<_>>();
let dispatch_weight = dispatch_infos.iter()
.map(|di| di.weight)
.fold(Weight::zero(), |total: Weight, weight: Weight| total.saturating_add(weight))
.saturating_add(T::WeightInfo::batch_all(calls.len() as u32));
let dispatch_class = {
let all_operational = dispatch_infos.iter()
.map(|di| di.class)
.all(|class| class == DispatchClass::Operational);
if all_operational {
DispatchClass::Operational
} else {
DispatchClass::Normal
}
};
(dispatch_weight, dispatch_class)
})]
pub fn batch_all(
origin: OriginFor<T>,
calls: Vec<<T as Config>::RuntimeCall>,
) -> DispatchResultWithPostInfo {
// Do not allow the `None` origin.
if ensure_none(origin.clone()).is_ok() | {
return Err(BadOrigin.into())
} | conditional_block | |
lib.rs | >::RuntimeOrigin> +
IsType<<<Self as frame_system::Config>::RuntimeOrigin as frame_support::traits::OriginTrait>::PalletsOrigin>;
/// Weight information for extrinsics in this pallet.
type WeightInfo: WeightInfo;
}
#[pallet::event]
#[pallet::generate_deposit(pub(super) fn deposit_event)]
pub enum Event {
/// Batch of dispatches did not complete fully. Index of first failing dispatch given, as
/// well as the error.
BatchInterrupted { index: u32, error: DispatchError },
/// Batch of dispatches completed fully with no error.
BatchCompleted,
/// Batch of dispatches completed but has errors.
BatchCompletedWithErrors,
/// A single item within a Batch of dispatches has completed with no error.
ItemCompleted,
/// A single item within a Batch of dispatches has completed with error.
ItemFailed { error: DispatchError },
/// A call was dispatched.
DispatchedAs { result: DispatchResult },
}
// Align the call size to 1KB. As we are currently compiling the runtime for native/wasm
// the `size_of` of the `Call` can be different. To ensure that this don't leads to
// mismatches between native/wasm or to different metadata for the same runtime, we
// algin the call size. The value is chosen big enough to hopefully never reach it.
const CALL_ALIGN: u32 = 1024;
#[pallet::extra_constants]
impl<T: Config> Pallet<T> {
/// The limit on the number of batched calls.
fn batched_calls_limit() -> u32 {
let allocator_limit = sp_core::MAX_POSSIBLE_ALLOCATION;
let call_size = ((sp_std::mem::size_of::<<T as Config>::RuntimeCall>() as u32 +
CALL_ALIGN - 1) / CALL_ALIGN) *
CALL_ALIGN;
// The margin to take into account vec doubling capacity.
let margin_factor = 3;
allocator_limit / margin_factor / call_size
}
}
#[pallet::hooks]
impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
fn integrity_test() {
// If you hit this error, you need to try to `Box` big dispatchable parameters.
assert!(
sp_std::mem::size_of::<<T as Config>::RuntimeCall>() as u32 <= CALL_ALIGN,
"Call enum size should be smaller than {} bytes.",
CALL_ALIGN,
);
}
}
#[pallet::error]
pub enum Error<T> {
/// Too many calls batched.
TooManyCalls,
}
#[pallet::call]
impl<T: Config> Pallet<T> {
/// Send a batch of dispatch calls.
///
/// May be called from any origin except `None`.
///
/// - `calls`: The calls to be dispatched from the same origin. The number of call must not
/// exceed the constant: `batched_calls_limit` (available in constant metadata).
///
/// If origin is root then the calls are dispatched without checking origin filter. (This
/// includes bypassing `frame_system::Config::BaseCallFilter`).
///
/// ## Complexity | /// event is deposited. If a call failed and the batch was interrupted, then the
/// `BatchInterrupted` event is deposited, along with the number of successful calls made
/// and the error of the failed call. If all were successful, then the `BatchCompleted`
/// event is deposited.
#[pallet::call_index(0)]
#[pallet::weight({
let dispatch_infos = calls.iter().map(|call| call.get_dispatch_info()).collect::<Vec<_>>();
let dispatch_weight = dispatch_infos.iter()
.map(|di| di.weight)
.fold(Weight::zero(), |total: Weight, weight: Weight| total.saturating_add(weight))
.saturating_add(T::WeightInfo::batch(calls.len() as u32));
let dispatch_class = {
let all_operational = dispatch_infos.iter()
.map(|di| di.class)
.all(|class| class == DispatchClass::Operational);
if all_operational {
DispatchClass::Operational
} else {
DispatchClass::Normal
}
};
(dispatch_weight, dispatch_class)
})]
pub fn batch(
origin: OriginFor<T>,
calls: Vec<<T as Config>::RuntimeCall>,
) -> DispatchResultWithPostInfo {
// Do not allow the `None` origin.
if ensure_none(origin.clone()).is_ok() {
return Err(BadOrigin.into())
}
let is_root = ensure_root(origin.clone()).is_ok();
let calls_len = calls.len();
ensure!(calls_len <= Self::batched_calls_limit() as usize, Error::<T>::TooManyCalls);
// Track the actual weight of each of the batch calls.
let mut weight = Weight::zero();
for (index, call) in calls.into_iter().enumerate() {
let info = call.get_dispatch_info();
// If origin is root, don't apply any dispatch filters; root can call anything.
let result = if is_root {
call.dispatch_bypass_filter(origin.clone())
} else {
call.dispatch(origin.clone())
};
// Add the weight of this call.
weight = weight.saturating_add(extract_actual_weight(&result, &info));
if let Err(e) = result {
Self::deposit_event(Event::BatchInterrupted {
index: index as u32,
error: e.error,
});
// Take the weight of this function itself into account.
let base_weight = T::WeightInfo::batch(index.saturating_add(1) as u32);
// Return the actual used weight + base_weight of this call.
return Ok(Some(base_weight + weight).into())
}
Self::deposit_event(Event::ItemCompleted);
}
Self::deposit_event(Event::BatchCompleted);
let base_weight = T::WeightInfo::batch(calls_len as u32);
Ok(Some(base_weight + weight).into())
}
/// Send a call through an indexed pseudonym of the sender.
///
/// Filter from origin are passed along. The call will be dispatched with an origin which
/// use the same filter as the origin of this call.
///
/// NOTE: If you need to ensure that any account-based filtering is not honored (i.e.
/// because you expect `proxy` to have been used prior in the call stack and you do not want
/// the call restrictions to apply to any sub-accounts), then use `as_multi_threshold_1`
/// in the Multisig pallet instead.
///
/// NOTE: Prior to version *12, this was called `as_limited_sub`.
///
/// The dispatch origin for this call must be _Signed_.
#[pallet::call_index(1)]
#[pallet::weight({
let dispatch_info = call.get_dispatch_info();
(
T::WeightInfo::as_derivative()
// AccountData for inner call origin accountdata.
.saturating_add(T::DbWeight::get().reads_writes(1, 1))
.saturating_add(dispatch_info.weight),
dispatch_info.class,
)
})]
pub fn as_derivative(
origin: OriginFor<T>,
index: u16,
call: Box<<T as Config>::RuntimeCall>,
) -> DispatchResultWithPostInfo {
let mut origin = origin;
let who = ensure_signed(origin.clone())?;
let pseudonym = Self::derivative_account_id(who, index);
origin.set_caller_from(frame_system::RawOrigin::Signed(pseudonym));
let info = call.get_dispatch_info();
let result = call.dispatch(origin);
// Always take into account the base weight of this call.
let mut weight = T::WeightInfo::as_derivative()
.saturating_add(T::DbWeight::get().reads_writes(1, 1));
// Add the real weight of the dispatch.
weight = weight.saturating_add(extract_actual_weight(&result, &info));
result
.map_err(|mut err| {
err.post_info = Some(weight).into();
err
})
.map(|_| Some(weight).into())
}
/// Send a batch of dispatch calls and atomically execute them.
/// The whole transaction will rollback and fail if any of the calls failed.
///
/// May be called from any origin except `None`.
///
/// - `calls`: The calls to be dispatched from the same origin. The number of call must not
/// exceed the constant: `batched | /// - O(C) where C is the number of calls to be batched.
///
/// This will return `Ok` in all circumstances. To determine the success of the batch, an | random_line_split |
main.py | True,
'input_dim':2048}
else:
warnings.warn('=> You did not choose a global image representation method!')
representation = None # which for original vgg or alexnet
model = get_model(args.arch,
representation,
args.num_classes,
args.freezed_layer,
pretrained=args.pretrained)
# plot network
vizNet(model, args.modeldir)
# obtain learning rate
LR = Learning_rate_generater(args.lr_method, args.lr_params, args.epochs)
if args.pretrained:
params_list = [{'params': model.features.parameters(), 'lr': args.lr,
'weight_decay': args.weight_decay},]
params_list.append({'params': model.representation.parameters(), 'lr': args.lr,
'weight_decay': args.weight_decay})
params_list.append({'params': model.classifier.parameters(),
'lr': args.lr*args.classifier_factor,
'weight_decay': 0. if args.arch.startswith('vgg') else args.weight_decay})
else:
params_list = [{'params': model.features.parameters(), 'lr': args.lr,
'weight_decay': args.weight_decay},]
params_list.append({'params': model.representation.parameters(), 'lr': args.lr,
'weight_decay': args.weight_decay})
params_list.append({'params': model.classifier.parameters(),
'lr': args.lr*args.classifier_factor,
'weight_decay':args.weight_decay})
optimizer = torch.optim.SGD(params_list, lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
if args.gpu is not None:
model = model.cuda(args.gpu)
elif args.distributed:
model.cuda()
model = torch.nn.parallel.DistributedDataParallel(model)
else:
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
train_transforms, val_transforms = preprocess_strategy(args.benchmark)
train_dataset = datasets.ImageFolder(
traindir,
train_transforms)
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, val_transforms),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.evaluate:
validate(val_loader, model, criterion)
return
# make directory for storing checkpoint files
if os.path.exists(args.modeldir) is not True:
os.mkdir(args.modeldir)
stats_ = stats(args.modeldir, args.start_epoch)
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, LR.lr_factor, epoch)
# train for one epoch
trainObj, top1, top5 = train(train_loader, model, criterion, optimizer, epoch)
# evaluate on validation set
valObj, prec1, prec5 = validate(val_loader, model, criterion)
# update stats
stats_._update(trainObj, top1, top5, valObj, prec1, prec5)
# remember best prec@1 and save checkpoint
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
filename = []
if args.store_model_everyepoch:
filename.append(os.path.join(args.modeldir, 'net-epoch-%s.pth.tar' % (epoch + 1)))
else:
filename.append(os.path.join(args.modeldir, 'checkpoint.pth.tar'))
filename.append(os.path.join(args.modeldir, 'model_best.pth.tar'))
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer' : optimizer.state_dict(),
}, is_best, filename)
plot_curve(stats_, args.modeldir, True)
data = stats_
sio.savemat(os.path.join(args.modeldir,'stats.mat'), {'data':data})
def train(train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
return losses.avg, top1.avg, top5.avg
def validate(val_loader, model, criterion):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (input, target) in enumerate(val_loader):
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top1.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return losses.avg, top1.avg, top5.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename[0])
if is_best:
shutil.copyfile(filename[0], filename[1])
class AverageMeter(object):
"""Computes and stores the average and current value"""
def | __init__ | identifier_name | |
main.py | .parameters(), 'lr': args.lr,
'weight_decay': args.weight_decay})
params_list.append({'params': model.classifier.parameters(),
'lr': args.lr*args.classifier_factor,
'weight_decay': 0. if args.arch.startswith('vgg') else args.weight_decay})
else:
params_list = [{'params': model.features.parameters(), 'lr': args.lr,
'weight_decay': args.weight_decay},]
params_list.append({'params': model.representation.parameters(), 'lr': args.lr,
'weight_decay': args.weight_decay})
params_list.append({'params': model.classifier.parameters(),
'lr': args.lr*args.classifier_factor,
'weight_decay':args.weight_decay})
optimizer = torch.optim.SGD(params_list, lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
if args.gpu is not None:
model = model.cuda(args.gpu)
elif args.distributed:
model.cuda()
model = torch.nn.parallel.DistributedDataParallel(model)
else:
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
train_transforms, val_transforms = preprocess_strategy(args.benchmark)
train_dataset = datasets.ImageFolder(
traindir,
train_transforms)
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, val_transforms),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.evaluate:
validate(val_loader, model, criterion)
return
# make directory for storing checkpoint files
if os.path.exists(args.modeldir) is not True:
os.mkdir(args.modeldir)
stats_ = stats(args.modeldir, args.start_epoch)
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, LR.lr_factor, epoch)
# train for one epoch
trainObj, top1, top5 = train(train_loader, model, criterion, optimizer, epoch)
# evaluate on validation set
valObj, prec1, prec5 = validate(val_loader, model, criterion)
# update stats
stats_._update(trainObj, top1, top5, valObj, prec1, prec5)
# remember best prec@1 and save checkpoint
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
filename = []
if args.store_model_everyepoch:
filename.append(os.path.join(args.modeldir, 'net-epoch-%s.pth.tar' % (epoch + 1)))
else:
filename.append(os.path.join(args.modeldir, 'checkpoint.pth.tar'))
filename.append(os.path.join(args.modeldir, 'model_best.pth.tar'))
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer' : optimizer.state_dict(),
}, is_best, filename)
plot_curve(stats_, args.modeldir, True)
data = stats_
sio.savemat(os.path.join(args.modeldir,'stats.mat'), {'data':data})
def train(train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
return losses.avg, top1.avg, top5.avg
def validate(val_loader, model, criterion):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (input, target) in enumerate(val_loader):
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top1.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return losses.avg, top1.avg, top5.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename[0])
if is_best:
shutil.copyfile(filename[0], filename[1])
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class Learning_rate_generater(object):
"""Generates a list of learning rate for each training epoch"""
def __init__(self, method, params, total_epoch):
if method == 'step':
lr_factor, lr = self.step(params, total_epoch)
elif method == 'log':
| lr_factor, lr = self.log(params, total_epoch) | conditional_block | |
main.py | -trained model')
parser.add_argument('--world-size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='gloo', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--modeldir', default=None, type=str,
help='director of checkpoint')
parser.add_argument('--representation', default=None, type=str,
help='define the representation method')
parser.add_argument('--num-classes', default=None, type=int,
help='define the number of classes')
parser.add_argument('--freezed-layer', default=None, type=int,
help='define the end of freezed layer')
parser.add_argument('--store-model-everyepoch', dest='store_model_everyepoch', action='store_true',
help='store checkpoint in every epoch')
parser.add_argument('--classifier-factor', default=None, type=int,
help='define the multiply factor of classifier')
parser.add_argument('--benchmark', default=None, type=str,
help='name of dataset')
best_prec1 = 0
def main():
global args, best_prec1
args = parser.parse_args()
print(args)
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
args.distributed = args.world_size > 1
if args.distributed:
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size)
# create model
if args.representation == 'GAvP':
representation = {'function':GAvP,
'input_dim':2048}
elif args.representation == 'MPNCOV':
representation = {'function':MPNCOV,
'iterNum':5,
'is_sqrt':True,
'is_vec':True,
'input_dim':2048,
'dimension_reduction':None if args.pretrained else 256}
elif args.representation == 'BCNN':
representation = {'function':BCNN,
'is_vec':True,
'input_dim':2048}
else:
warnings.warn('=> You did not choose a global image representation method!')
representation = None # which for original vgg or alexnet
model = get_model(args.arch,
representation,
args.num_classes,
args.freezed_layer,
pretrained=args.pretrained)
# plot network
vizNet(model, args.modeldir)
# obtain learning rate
LR = Learning_rate_generater(args.lr_method, args.lr_params, args.epochs)
if args.pretrained:
params_list = [{'params': model.features.parameters(), 'lr': args.lr,
'weight_decay': args.weight_decay},]
params_list.append({'params': model.representation.parameters(), 'lr': args.lr,
'weight_decay': args.weight_decay})
params_list.append({'params': model.classifier.parameters(),
'lr': args.lr*args.classifier_factor,
'weight_decay': 0. if args.arch.startswith('vgg') else args.weight_decay})
else:
params_list = [{'params': model.features.parameters(), 'lr': args.lr,
'weight_decay': args.weight_decay},]
params_list.append({'params': model.representation.parameters(), 'lr': args.lr,
'weight_decay': args.weight_decay})
params_list.append({'params': model.classifier.parameters(),
'lr': args.lr*args.classifier_factor,
'weight_decay':args.weight_decay})
optimizer = torch.optim.SGD(params_list, lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
if args.gpu is not None:
model = model.cuda(args.gpu)
elif args.distributed:
model.cuda()
model = torch.nn.parallel.DistributedDataParallel(model)
else:
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
train_transforms, val_transforms = preprocess_strategy(args.benchmark)
train_dataset = datasets.ImageFolder(
traindir,
train_transforms)
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, val_transforms),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.evaluate:
validate(val_loader, model, criterion)
return
# make directory for storing checkpoint files
if os.path.exists(args.modeldir) is not True:
os.mkdir(args.modeldir)
stats_ = stats(args.modeldir, args.start_epoch)
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, LR.lr_factor, epoch)
# train for one epoch
trainObj, top1, top5 = train(train_loader, model, criterion, optimizer, epoch)
# evaluate on validation set
valObj, prec1, prec5 = validate(val_loader, model, criterion)
# update stats
stats_._update(trainObj, top1, top5, valObj, prec1, prec5)
# remember best prec@1 and save checkpoint
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
filename = []
if args.store_model_everyepoch:
filename.append(os.path.join(args.modeldir, 'net-epoch-%s.pth.tar' % (epoch + 1)))
else:
filename.append(os.path.join(args.modeldir, 'checkpoint.pth.tar'))
filename.append(os.path.join(args.modeldir, 'model_best.pth.tar'))
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer' : optimizer.state_dict(),
}, is_best, filename)
plot_curve(stats_, args.modeldir, True)
data = stats_
sio.savemat(os.path.join(args.modeldir,'stats.mat'), {'data':data})
def train(train_loader, model, criterion, optimizer, epoch):
| loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data | batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(input) | identifier_body |
main.py | -trained model')
parser.add_argument('--world-size', default=1, type=int,
help='number of distributed processes') | help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='gloo', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--modeldir', default=None, type=str,
help='director of checkpoint')
parser.add_argument('--representation', default=None, type=str,
help='define the representation method')
parser.add_argument('--num-classes', default=None, type=int,
help='define the number of classes')
parser.add_argument('--freezed-layer', default=None, type=int,
help='define the end of freezed layer')
parser.add_argument('--store-model-everyepoch', dest='store_model_everyepoch', action='store_true',
help='store checkpoint in every epoch')
parser.add_argument('--classifier-factor', default=None, type=int,
help='define the multiply factor of classifier')
parser.add_argument('--benchmark', default=None, type=str,
help='name of dataset')
best_prec1 = 0
def main():
global args, best_prec1
args = parser.parse_args()
print(args)
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
args.distributed = args.world_size > 1
if args.distributed:
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size)
# create model
if args.representation == 'GAvP':
representation = {'function':GAvP,
'input_dim':2048}
elif args.representation == 'MPNCOV':
representation = {'function':MPNCOV,
'iterNum':5,
'is_sqrt':True,
'is_vec':True,
'input_dim':2048,
'dimension_reduction':None if args.pretrained else 256}
elif args.representation == 'BCNN':
representation = {'function':BCNN,
'is_vec':True,
'input_dim':2048}
else:
warnings.warn('=> You did not choose a global image representation method!')
representation = None # which for original vgg or alexnet
model = get_model(args.arch,
representation,
args.num_classes,
args.freezed_layer,
pretrained=args.pretrained)
# plot network
vizNet(model, args.modeldir)
# obtain learning rate
LR = Learning_rate_generater(args.lr_method, args.lr_params, args.epochs)
if args.pretrained:
params_list = [{'params': model.features.parameters(), 'lr': args.lr,
'weight_decay': args.weight_decay},]
params_list.append({'params': model.representation.parameters(), 'lr': args.lr,
'weight_decay': args.weight_decay})
params_list.append({'params': model.classifier.parameters(),
'lr': args.lr*args.classifier_factor,
'weight_decay': 0. if args.arch.startswith('vgg') else args.weight_decay})
else:
params_list = [{'params': model.features.parameters(), 'lr': args.lr,
'weight_decay': args.weight_decay},]
params_list.append({'params': model.representation.parameters(), 'lr': args.lr,
'weight_decay': args.weight_decay})
params_list.append({'params': model.classifier.parameters(),
'lr': args.lr*args.classifier_factor,
'weight_decay':args.weight_decay})
optimizer = torch.optim.SGD(params_list, lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
if args.gpu is not None:
model = model.cuda(args.gpu)
elif args.distributed:
model.cuda()
model = torch.nn.parallel.DistributedDataParallel(model)
else:
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
train_transforms, val_transforms = preprocess_strategy(args.benchmark)
train_dataset = datasets.ImageFolder(
traindir,
train_transforms)
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, val_transforms),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.evaluate:
validate(val_loader, model, criterion)
return
# make directory for storing checkpoint files
if os.path.exists(args.modeldir) is not True:
os.mkdir(args.modeldir)
stats_ = stats(args.modeldir, args.start_epoch)
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, LR.lr_factor, epoch)
# train for one epoch
trainObj, top1, top5 = train(train_loader, model, criterion, optimizer, epoch)
# evaluate on validation set
valObj, prec1, prec5 = validate(val_loader, model, criterion)
# update stats
stats_._update(trainObj, top1, top5, valObj, prec1, prec5)
# remember best prec@1 and save checkpoint
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
filename = []
if args.store_model_everyepoch:
filename.append(os.path.join(args.modeldir, 'net-epoch-%s.pth.tar' % (epoch + 1)))
else:
filename.append(os.path.join(args.modeldir, 'checkpoint.pth.tar'))
filename.append(os.path.join(args.modeldir, 'model_best.pth.tar'))
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer' : optimizer.state_dict(),
}, is_best, filename)
plot_curve(stats_, args.modeldir, True)
data = stats_
sio.savemat(os.path.join(args.modeldir,'stats.mat'), {'data':data})
def train(train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data | parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str, | random_line_split |
shlex.go | nil || b == nil {
return false
}
if a.tokenType != b.tokenType {
return false
}
return a.value == b.value
}
const (
RUNE_CHAR string = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789._-,/@$*()+=><:;&^%~|!?[]{}"
RUNE_SPACE string = " \t\r\n"
RUNE_ESCAPING_QUOTE string = "\""
RUNE_NONESCAPING_QUOTE string = "'"
RUNE_ESCAPE = "\\"
RUNE_COMMENT = "#"
RUNETOKEN_UNKNOWN RuneTokenType = 0
RUNETOKEN_CHAR RuneTokenType = 1
RUNETOKEN_SPACE RuneTokenType = 2
RUNETOKEN_ESCAPING_QUOTE RuneTokenType = 3
RUNETOKEN_NONESCAPING_QUOTE RuneTokenType = 4
RUNETOKEN_ESCAPE RuneTokenType = 5
RUNETOKEN_COMMENT RuneTokenType = 6
RUNETOKEN_EOF RuneTokenType = 7
TOKEN_UNKNOWN TokenType = 0
TOKEN_WORD TokenType = 1
TOKEN_SPACE TokenType = 2
TOKEN_COMMENT TokenType = 3
STATE_START lexerState = 0
STATE_INWORD lexerState = 1
STATE_ESCAPING lexerState = 2
STATE_ESCAPING_QUOTED lexerState = 3
STATE_QUOTED_ESCAPING lexerState = 4
STATE_QUOTED lexerState = 5
STATE_COMMENT lexerState = 6
INITIAL_TOKEN_CAPACITY int = 100
)
/*
A type for classifying characters. This allows for different sorts of
classifiers - those accepting extended non-ascii chars, or strict posix
compatibility, for example.
*/
type TokenClassifier struct {
typeMap map[int32]RuneTokenType
}
func addRuneClass(typeMap *map[int32]RuneTokenType, runes string, tokenType RuneTokenType) {
for _, rune := range runes {
(*typeMap)[int32(rune)] = tokenType
}
}
/*
Create a new classifier for basic ASCII characters.
*/
func NewDefaultClassifier() *TokenClassifier |
func (classifier *TokenClassifier) ClassifyRune(rune int32) RuneTokenType {
return classifier.typeMap[rune]
}
/*
A type for turning an input stream in to a sequence of strings. Whitespace and
comments are skipped.
*/
type Lexer struct {
tokenizer *Tokenizer
}
/*
Create a new lexer.
*/
func NewLexer(r io.Reader) (*Lexer, error) {
tokenizer, err := NewTokenizer(r)
if err != nil {
return nil, err
}
lexer := &Lexer{tokenizer: tokenizer}
return lexer, nil
}
/*
Return the next word, and an error value. If there are no more words, the error
will be io.EOF.
*/
func (l *Lexer) NextWord() (string, error) {
var token *Token
var err error
for {
token, err = l.tokenizer.NextToken()
if err != nil {
return "", err
}
switch token.tokenType {
case TOKEN_WORD:
{
return token.value, nil
}
case TOKEN_COMMENT:
{
// skip comments
}
default:
{
panic(fmt.Sprintf("Unknown token type: %v", token.tokenType))
}
}
}
return "", io.EOF
}
/*
A type for turning an input stream in to a sequence of typed tokens.
*/
type Tokenizer struct {
input *bufio.Reader
classifier *TokenClassifier
}
/*
Create a new tokenizer.
*/
func NewTokenizer(r io.Reader) (*Tokenizer, error) {
input := bufio.NewReader(r)
classifier := NewDefaultClassifier()
tokenizer := &Tokenizer{
input: input,
classifier: classifier}
return tokenizer, nil
}
/*
Scan the stream for the next token.
This uses an internal state machine. It will panic if it encounters a character
which it does not know how to handle.
*/
func (t *Tokenizer) scanStream() (*Token, error) {
state := STATE_START
var tokenType TokenType
value := make([]int32, 0, INITIAL_TOKEN_CAPACITY)
var (
nextRune int32
nextRuneType RuneTokenType
err error
)
SCAN:
for {
nextRune, _, err = t.input.ReadRune()
nextRuneType = t.classifier.ClassifyRune(nextRune)
if err != nil {
if err == io.EOF {
nextRuneType = RUNETOKEN_EOF
err = nil
} else {
return nil, err
}
}
switch state {
case STATE_START: // no runes read yet
{
switch nextRuneType {
case RUNETOKEN_EOF:
{
return nil, io.EOF
}
case RUNETOKEN_CHAR:
{
tokenType = TOKEN_WORD
value = append(value, nextRune)
state = STATE_INWORD
}
case RUNETOKEN_SPACE:
{
}
case RUNETOKEN_ESCAPING_QUOTE:
{
tokenType = TOKEN_WORD
state = STATE_QUOTED_ESCAPING
}
case RUNETOKEN_NONESCAPING_QUOTE:
{
tokenType = TOKEN_WORD
state = STATE_QUOTED
}
case RUNETOKEN_ESCAPE:
{
tokenType = TOKEN_WORD
state = STATE_ESCAPING
}
case RUNETOKEN_COMMENT:
{
tokenType = TOKEN_COMMENT
state = STATE_COMMENT
}
default:
{
return nil, errors.New(fmt.Sprintf("Unknown rune: %v", nextRune))
}
}
}
case STATE_INWORD: // in a regular word
{
switch nextRuneType {
case RUNETOKEN_EOF:
{
break SCAN
}
case RUNETOKEN_CHAR, RUNETOKEN_COMMENT:
{
value = append(value, nextRune)
}
case RUNETOKEN_SPACE:
{
t.input.UnreadRune()
break SCAN
}
case RUNETOKEN_ESCAPING_QUOTE:
{
state = STATE_QUOTED_ESCAPING
}
case RUNETOKEN_NONESCAPING_QUOTE:
{
state = STATE_QUOTED
}
case RUNETOKEN_ESCAPE:
{
state = STATE_ESCAPING
}
default:
{
return nil, errors.New(fmt.Sprintf("Uknown rune: %v", nextRune))
}
}
}
case STATE_ESCAPING: // the next rune after an escape character
{
switch nextRuneType {
case RUNETOKEN_EOF:
{
err = errors.New("EOF found after escape character")
break SCAN
}
case RUNETOKEN_CHAR, RUNETOKEN_SPACE, RUNETOKEN_ESCAPING_QUOTE, RUNETOKEN_NONESCAPING_QUOTE, RUNETOKEN_ESCAPE, RUNETOKEN_COMMENT:
{
state = STATE_INWORD
value = append(value, nextRune)
}
default:
{
return nil, errors.New(fmt.Sprintf("Uknown rune: %v", nextRune))
}
}
}
case STATE_ESCAPING_QUOTED: // the next rune after an escape character, in double quotes
{
switch nextRuneType {
case RUNETOKEN_EOF:
{
err = errors.New("EOF found after escape character")
break SCAN
}
case RUNETOKEN_CHAR, RUNETOKEN_SPACE, RUNETOKEN_ESCAPING_QUOTE, RUNETOKEN_NONESCAPING_QUOTE, RUNETOKEN_ESCAPE, RUNETOKEN_COMMENT:
{
state = STATE_QUOTED_ESCAPING
value = append(value, nextRune)
}
default:
{
return nil, errors.New(fmt.Sprintf("Uknown rune: %v", nextRune))
}
}
}
case STATE_QUOTED_ESCAPING: // in escaping double quotes
{
switch nextRuneType {
case RUNETOKEN_EOF:
{
err = errors.New("EOF found when expecting closing quote.")
break SCAN
}
case RUNETOKEN_CHAR, RUNETOKEN_UNKNOWN, RUNETOKEN_SPACE, RUNETOKEN_NONESCAPING_QUOTE, RUNETOKEN_COMMENT:
{
value = append(value, nextRune)
}
| {
typeMap := map[int32]RuneTokenType{}
addRuneClass(&typeMap, RUNE_CHAR, RUNETOKEN_CHAR)
addRuneClass(&typeMap, RUNE_SPACE, RUNETOKEN_SPACE)
addRuneClass(&typeMap, RUNE_ESCAPING_QUOTE, RUNETOKEN_ESCAPING_QUOTE)
addRuneClass(&typeMap, RUNE_NONESCAPING_QUOTE, RUNETOKEN_NONESCAPING_QUOTE)
addRuneClass(&typeMap, RUNE_ESCAPE, RUNETOKEN_ESCAPE)
addRuneClass(&typeMap, RUNE_COMMENT, RUNETOKEN_COMMENT)
return &TokenClassifier{
typeMap: typeMap}
} | identifier_body |
shlex.go | never equal another token.
*/
func (a *Token) Equal(b *Token) bool {
if a == nil || b == nil {
return false
}
if a.tokenType != b.tokenType {
return false
}
return a.value == b.value
}
const (
RUNE_CHAR string = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789._-,/@$*()+=><:;&^%~|!?[]{}"
RUNE_SPACE string = " \t\r\n"
RUNE_ESCAPING_QUOTE string = "\""
RUNE_NONESCAPING_QUOTE string = "'"
RUNE_ESCAPE = "\\"
RUNE_COMMENT = "#"
RUNETOKEN_UNKNOWN RuneTokenType = 0
RUNETOKEN_CHAR RuneTokenType = 1
RUNETOKEN_SPACE RuneTokenType = 2
RUNETOKEN_ESCAPING_QUOTE RuneTokenType = 3
RUNETOKEN_NONESCAPING_QUOTE RuneTokenType = 4
RUNETOKEN_ESCAPE RuneTokenType = 5
RUNETOKEN_COMMENT RuneTokenType = 6
RUNETOKEN_EOF RuneTokenType = 7
TOKEN_UNKNOWN TokenType = 0
TOKEN_WORD TokenType = 1
TOKEN_SPACE TokenType = 2
TOKEN_COMMENT TokenType = 3
STATE_START lexerState = 0
STATE_INWORD lexerState = 1
STATE_ESCAPING lexerState = 2
STATE_ESCAPING_QUOTED lexerState = 3
STATE_QUOTED_ESCAPING lexerState = 4
STATE_QUOTED lexerState = 5
STATE_COMMENT lexerState = 6
INITIAL_TOKEN_CAPACITY int = 100
)
/*
A type for classifying characters. This allows for different sorts of
classifiers - those accepting extended non-ascii chars, or strict posix
compatibility, for example.
*/
type TokenClassifier struct {
typeMap map[int32]RuneTokenType
}
func addRuneClass(typeMap *map[int32]RuneTokenType, runes string, tokenType RuneTokenType) {
for _, rune := range runes {
(*typeMap)[int32(rune)] = tokenType
}
}
/*
Create a new classifier for basic ASCII characters.
*/
func NewDefaultClassifier() *TokenClassifier {
typeMap := map[int32]RuneTokenType{}
addRuneClass(&typeMap, RUNE_CHAR, RUNETOKEN_CHAR)
addRuneClass(&typeMap, RUNE_SPACE, RUNETOKEN_SPACE)
addRuneClass(&typeMap, RUNE_ESCAPING_QUOTE, RUNETOKEN_ESCAPING_QUOTE)
addRuneClass(&typeMap, RUNE_NONESCAPING_QUOTE, RUNETOKEN_NONESCAPING_QUOTE)
addRuneClass(&typeMap, RUNE_ESCAPE, RUNETOKEN_ESCAPE)
addRuneClass(&typeMap, RUNE_COMMENT, RUNETOKEN_COMMENT)
return &TokenClassifier{
typeMap: typeMap}
}
func (classifier *TokenClassifier) ClassifyRune(rune int32) RuneTokenType {
return classifier.typeMap[rune]
}
/*
A type for turning an input stream in to a sequence of strings. Whitespace and
comments are skipped.
*/
type Lexer struct {
tokenizer *Tokenizer
}
/*
Create a new lexer.
*/
func NewLexer(r io.Reader) (*Lexer, error) {
tokenizer, err := NewTokenizer(r)
if err != nil {
return nil, err
}
lexer := &Lexer{tokenizer: tokenizer}
return lexer, nil
}
/*
Return the next word, and an error value. If there are no more words, the error
will be io.EOF.
*/
func (l *Lexer) NextWord() (string, error) {
var token *Token
var err error
for {
token, err = l.tokenizer.NextToken()
if err != nil {
return "", err
}
switch token.tokenType {
case TOKEN_WORD:
{
return token.value, nil
}
case TOKEN_COMMENT:
{
// skip comments
}
default:
{
panic(fmt.Sprintf("Unknown token type: %v", token.tokenType))
}
}
}
return "", io.EOF
}
/*
A type for turning an input stream in to a sequence of typed tokens.
*/
type Tokenizer struct {
input *bufio.Reader
classifier *TokenClassifier
}
/*
Create a new tokenizer.
*/
func NewTokenizer(r io.Reader) (*Tokenizer, error) {
input := bufio.NewReader(r)
classifier := NewDefaultClassifier()
tokenizer := &Tokenizer{
input: input,
classifier: classifier}
return tokenizer, nil
}
/*
Scan the stream for the next token.
This uses an internal state machine. It will panic if it encounters a character
which it does not know how to handle.
*/
func (t *Tokenizer) scanStream() (*Token, error) {
state := STATE_START
var tokenType TokenType
value := make([]int32, 0, INITIAL_TOKEN_CAPACITY)
var (
nextRune int32
nextRuneType RuneTokenType
err error
)
SCAN:
for {
nextRune, _, err = t.input.ReadRune()
nextRuneType = t.classifier.ClassifyRune(nextRune)
if err != nil {
if err == io.EOF {
nextRuneType = RUNETOKEN_EOF
err = nil
} else {
return nil, err
}
}
switch state {
case STATE_START: // no runes read yet
{
switch nextRuneType {
case RUNETOKEN_EOF:
{
return nil, io.EOF
}
case RUNETOKEN_CHAR:
{
tokenType = TOKEN_WORD
value = append(value, nextRune)
state = STATE_INWORD
}
case RUNETOKEN_SPACE:
{
}
case RUNETOKEN_ESCAPING_QUOTE:
{
tokenType = TOKEN_WORD
state = STATE_QUOTED_ESCAPING
}
case RUNETOKEN_NONESCAPING_QUOTE:
{
tokenType = TOKEN_WORD
state = STATE_QUOTED
}
case RUNETOKEN_ESCAPE:
{
tokenType = TOKEN_WORD
state = STATE_ESCAPING
}
case RUNETOKEN_COMMENT:
{
tokenType = TOKEN_COMMENT
state = STATE_COMMENT
}
default:
{
return nil, errors.New(fmt.Sprintf("Unknown rune: %v", nextRune))
}
}
}
case STATE_INWORD: // in a regular word
{
switch nextRuneType {
case RUNETOKEN_EOF:
{
break SCAN
}
case RUNETOKEN_CHAR, RUNETOKEN_COMMENT:
{
value = append(value, nextRune)
}
case RUNETOKEN_SPACE:
{
t.input.UnreadRune()
break SCAN
}
case RUNETOKEN_ESCAPING_QUOTE:
{
state = STATE_QUOTED_ESCAPING
}
case RUNETOKEN_NONESCAPING_QUOTE:
{
state = STATE_QUOTED
}
case RUNETOKEN_ESCAPE:
{
state = STATE_ESCAPING
}
default:
{
return nil, errors.New(fmt.Sprintf("Uknown rune: %v", nextRune))
}
}
}
case STATE_ESCAPING: // the next rune after an escape character
{
switch nextRuneType {
case RUNETOKEN_EOF:
{
err = errors.New("EOF found after escape character")
break SCAN
}
case RUNETOKEN_CHAR, RUNETOKEN_SPACE, RUNETOKEN_ESCAPING_QUOTE, RUNETOKEN_NONESCAPING_QUOTE, RUNETOKEN_ESCAPE, RUNETOKEN_COMMENT:
{
state = STATE_INWORD
value = append(value, nextRune)
}
default:
{
return nil, errors.New(fmt.Sprintf("Uknown rune: %v", nextRune))
}
}
}
case STATE_ESCAPING_QUOTED: // the next rune after an escape character, in double quotes
{
switch nextRuneType {
case RUNETOKEN_EOF:
{
err = errors.New("EOF found after escape character")
break SCAN
}
case RUNETOKEN_CHAR, RUNETOKEN_SPACE, RUNETOKEN_ESCAPING_QUOTE, RUNETOKEN_NONESCAPING_QUOTE, RUNETOKEN_ESCAPE, RUNETOKEN_COMMENT:
{
state = STATE_QUOTED_ESCAPING
value = append(value, nextRune)
}
default:
{
return nil, errors.New(fmt.Sprintf("Uknown rune: %v", nextRune))
}
}
}
case STATE_QUOTED_ESCAPING: // in escaping double quotes
{
switch nextRuneType {
case RUNETOKEN_EOF:
{
err = errors.New("EOF found when expecting closing quote.")
break SCAN
}
case RUNETOKEN_CHAR, RUNETOKEN_UNKNOWN, RUNETOKEN_SPACE, RUNETOKEN_NONESCAPING_QUOTE, RUN | random_line_split | ||
shlex.go | nil || b == nil {
return false
}
if a.tokenType != b.tokenType {
return false
}
return a.value == b.value
}
const (
RUNE_CHAR string = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789._-,/@$*()+=><:;&^%~|!?[]{}"
RUNE_SPACE string = " \t\r\n"
RUNE_ESCAPING_QUOTE string = "\""
RUNE_NONESCAPING_QUOTE string = "'"
RUNE_ESCAPE = "\\"
RUNE_COMMENT = "#"
RUNETOKEN_UNKNOWN RuneTokenType = 0
RUNETOKEN_CHAR RuneTokenType = 1
RUNETOKEN_SPACE RuneTokenType = 2
RUNETOKEN_ESCAPING_QUOTE RuneTokenType = 3
RUNETOKEN_NONESCAPING_QUOTE RuneTokenType = 4
RUNETOKEN_ESCAPE RuneTokenType = 5
RUNETOKEN_COMMENT RuneTokenType = 6
RUNETOKEN_EOF RuneTokenType = 7
TOKEN_UNKNOWN TokenType = 0
TOKEN_WORD TokenType = 1
TOKEN_SPACE TokenType = 2
TOKEN_COMMENT TokenType = 3
STATE_START lexerState = 0
STATE_INWORD lexerState = 1
STATE_ESCAPING lexerState = 2
STATE_ESCAPING_QUOTED lexerState = 3
STATE_QUOTED_ESCAPING lexerState = 4
STATE_QUOTED lexerState = 5
STATE_COMMENT lexerState = 6
INITIAL_TOKEN_CAPACITY int = 100
)
/*
A type for classifying characters. This allows for different sorts of
classifiers - those accepting extended non-ascii chars, or strict posix
compatibility, for example.
*/
type TokenClassifier struct {
typeMap map[int32]RuneTokenType
}
func addRuneClass(typeMap *map[int32]RuneTokenType, runes string, tokenType RuneTokenType) {
for _, rune := range runes {
(*typeMap)[int32(rune)] = tokenType
}
}
/*
Create a new classifier for basic ASCII characters.
*/
func NewDefaultClassifier() *TokenClassifier {
typeMap := map[int32]RuneTokenType{}
addRuneClass(&typeMap, RUNE_CHAR, RUNETOKEN_CHAR)
addRuneClass(&typeMap, RUNE_SPACE, RUNETOKEN_SPACE)
addRuneClass(&typeMap, RUNE_ESCAPING_QUOTE, RUNETOKEN_ESCAPING_QUOTE)
addRuneClass(&typeMap, RUNE_NONESCAPING_QUOTE, RUNETOKEN_NONESCAPING_QUOTE)
addRuneClass(&typeMap, RUNE_ESCAPE, RUNETOKEN_ESCAPE)
addRuneClass(&typeMap, RUNE_COMMENT, RUNETOKEN_COMMENT)
return &TokenClassifier{
typeMap: typeMap}
}
func (classifier *TokenClassifier) ClassifyRune(rune int32) RuneTokenType {
return classifier.typeMap[rune]
}
/*
A type for turning an input stream in to a sequence of strings. Whitespace and
comments are skipped.
*/
type Lexer struct {
tokenizer *Tokenizer
}
/*
Create a new lexer.
*/
func | (r io.Reader) (*Lexer, error) {
tokenizer, err := NewTokenizer(r)
if err != nil {
return nil, err
}
lexer := &Lexer{tokenizer: tokenizer}
return lexer, nil
}
/*
Return the next word, and an error value. If there are no more words, the error
will be io.EOF.
*/
func (l *Lexer) NextWord() (string, error) {
var token *Token
var err error
for {
token, err = l.tokenizer.NextToken()
if err != nil {
return "", err
}
switch token.tokenType {
case TOKEN_WORD:
{
return token.value, nil
}
case TOKEN_COMMENT:
{
// skip comments
}
default:
{
panic(fmt.Sprintf("Unknown token type: %v", token.tokenType))
}
}
}
return "", io.EOF
}
/*
A type for turning an input stream in to a sequence of typed tokens.
*/
type Tokenizer struct {
input *bufio.Reader
classifier *TokenClassifier
}
/*
Create a new tokenizer.
*/
func NewTokenizer(r io.Reader) (*Tokenizer, error) {
input := bufio.NewReader(r)
classifier := NewDefaultClassifier()
tokenizer := &Tokenizer{
input: input,
classifier: classifier}
return tokenizer, nil
}
/*
Scan the stream for the next token.
This uses an internal state machine. It will panic if it encounters a character
which it does not know how to handle.
*/
func (t *Tokenizer) scanStream() (*Token, error) {
state := STATE_START
var tokenType TokenType
value := make([]int32, 0, INITIAL_TOKEN_CAPACITY)
var (
nextRune int32
nextRuneType RuneTokenType
err error
)
SCAN:
for {
nextRune, _, err = t.input.ReadRune()
nextRuneType = t.classifier.ClassifyRune(nextRune)
if err != nil {
if err == io.EOF {
nextRuneType = RUNETOKEN_EOF
err = nil
} else {
return nil, err
}
}
switch state {
case STATE_START: // no runes read yet
{
switch nextRuneType {
case RUNETOKEN_EOF:
{
return nil, io.EOF
}
case RUNETOKEN_CHAR:
{
tokenType = TOKEN_WORD
value = append(value, nextRune)
state = STATE_INWORD
}
case RUNETOKEN_SPACE:
{
}
case RUNETOKEN_ESCAPING_QUOTE:
{
tokenType = TOKEN_WORD
state = STATE_QUOTED_ESCAPING
}
case RUNETOKEN_NONESCAPING_QUOTE:
{
tokenType = TOKEN_WORD
state = STATE_QUOTED
}
case RUNETOKEN_ESCAPE:
{
tokenType = TOKEN_WORD
state = STATE_ESCAPING
}
case RUNETOKEN_COMMENT:
{
tokenType = TOKEN_COMMENT
state = STATE_COMMENT
}
default:
{
return nil, errors.New(fmt.Sprintf("Unknown rune: %v", nextRune))
}
}
}
case STATE_INWORD: // in a regular word
{
switch nextRuneType {
case RUNETOKEN_EOF:
{
break SCAN
}
case RUNETOKEN_CHAR, RUNETOKEN_COMMENT:
{
value = append(value, nextRune)
}
case RUNETOKEN_SPACE:
{
t.input.UnreadRune()
break SCAN
}
case RUNETOKEN_ESCAPING_QUOTE:
{
state = STATE_QUOTED_ESCAPING
}
case RUNETOKEN_NONESCAPING_QUOTE:
{
state = STATE_QUOTED
}
case RUNETOKEN_ESCAPE:
{
state = STATE_ESCAPING
}
default:
{
return nil, errors.New(fmt.Sprintf("Uknown rune: %v", nextRune))
}
}
}
case STATE_ESCAPING: // the next rune after an escape character
{
switch nextRuneType {
case RUNETOKEN_EOF:
{
err = errors.New("EOF found after escape character")
break SCAN
}
case RUNETOKEN_CHAR, RUNETOKEN_SPACE, RUNETOKEN_ESCAPING_QUOTE, RUNETOKEN_NONESCAPING_QUOTE, RUNETOKEN_ESCAPE, RUNETOKEN_COMMENT:
{
state = STATE_INWORD
value = append(value, nextRune)
}
default:
{
return nil, errors.New(fmt.Sprintf("Uknown rune: %v", nextRune))
}
}
}
case STATE_ESCAPING_QUOTED: // the next rune after an escape character, in double quotes
{
switch nextRuneType {
case RUNETOKEN_EOF:
{
err = errors.New("EOF found after escape character")
break SCAN
}
case RUNETOKEN_CHAR, RUNETOKEN_SPACE, RUNETOKEN_ESCAPING_QUOTE, RUNETOKEN_NONESCAPING_QUOTE, RUNETOKEN_ESCAPE, RUNETOKEN_COMMENT:
{
state = STATE_QUOTED_ESCAPING
value = append(value, nextRune)
}
default:
{
return nil, errors.New(fmt.Sprintf("Uknown rune: %v", nextRune))
}
}
}
case STATE_QUOTED_ESCAPING: // in escaping double quotes
{
switch nextRuneType {
case RUNETOKEN_EOF:
{
err = errors.New("EOF found when expecting closing quote.")
break SCAN
}
case RUNETOKEN_CHAR, RUNETOKEN_UNKNOWN, RUNETOKEN_SPACE, RUNETOKEN_NONESCAPING_QUOTE, RUNETOKEN_COMMENT:
{
value = append(value, nextRune)
}
| NewLexer | identifier_name |
shlex.go | for turning an input stream in to a sequence of strings. Whitespace and
comments are skipped.
*/
type Lexer struct {
tokenizer *Tokenizer
}
/*
Create a new lexer.
*/
func NewLexer(r io.Reader) (*Lexer, error) {
tokenizer, err := NewTokenizer(r)
if err != nil {
return nil, err
}
lexer := &Lexer{tokenizer: tokenizer}
return lexer, nil
}
/*
Return the next word, and an error value. If there are no more words, the error
will be io.EOF.
*/
func (l *Lexer) NextWord() (string, error) {
var token *Token
var err error
for {
token, err = l.tokenizer.NextToken()
if err != nil {
return "", err
}
switch token.tokenType {
case TOKEN_WORD:
{
return token.value, nil
}
case TOKEN_COMMENT:
{
// skip comments
}
default:
{
panic(fmt.Sprintf("Unknown token type: %v", token.tokenType))
}
}
}
return "", io.EOF
}
/*
A type for turning an input stream in to a sequence of typed tokens.
*/
type Tokenizer struct {
input *bufio.Reader
classifier *TokenClassifier
}
/*
Create a new tokenizer.
*/
func NewTokenizer(r io.Reader) (*Tokenizer, error) {
input := bufio.NewReader(r)
classifier := NewDefaultClassifier()
tokenizer := &Tokenizer{
input: input,
classifier: classifier}
return tokenizer, nil
}
/*
Scan the stream for the next token.
This uses an internal state machine. It will panic if it encounters a character
which it does not know how to handle.
*/
func (t *Tokenizer) scanStream() (*Token, error) {
state := STATE_START
var tokenType TokenType
value := make([]int32, 0, INITIAL_TOKEN_CAPACITY)
var (
nextRune int32
nextRuneType RuneTokenType
err error
)
SCAN:
for {
nextRune, _, err = t.input.ReadRune()
nextRuneType = t.classifier.ClassifyRune(nextRune)
if err != nil {
if err == io.EOF {
nextRuneType = RUNETOKEN_EOF
err = nil
} else {
return nil, err
}
}
switch state {
case STATE_START: // no runes read yet
{
switch nextRuneType {
case RUNETOKEN_EOF:
{
return nil, io.EOF
}
case RUNETOKEN_CHAR:
{
tokenType = TOKEN_WORD
value = append(value, nextRune)
state = STATE_INWORD
}
case RUNETOKEN_SPACE:
{
}
case RUNETOKEN_ESCAPING_QUOTE:
{
tokenType = TOKEN_WORD
state = STATE_QUOTED_ESCAPING
}
case RUNETOKEN_NONESCAPING_QUOTE:
{
tokenType = TOKEN_WORD
state = STATE_QUOTED
}
case RUNETOKEN_ESCAPE:
{
tokenType = TOKEN_WORD
state = STATE_ESCAPING
}
case RUNETOKEN_COMMENT:
{
tokenType = TOKEN_COMMENT
state = STATE_COMMENT
}
default:
{
return nil, errors.New(fmt.Sprintf("Unknown rune: %v", nextRune))
}
}
}
case STATE_INWORD: // in a regular word
{
switch nextRuneType {
case RUNETOKEN_EOF:
{
break SCAN
}
case RUNETOKEN_CHAR, RUNETOKEN_COMMENT:
{
value = append(value, nextRune)
}
case RUNETOKEN_SPACE:
{
t.input.UnreadRune()
break SCAN
}
case RUNETOKEN_ESCAPING_QUOTE:
{
state = STATE_QUOTED_ESCAPING
}
case RUNETOKEN_NONESCAPING_QUOTE:
{
state = STATE_QUOTED
}
case RUNETOKEN_ESCAPE:
{
state = STATE_ESCAPING
}
default:
{
return nil, errors.New(fmt.Sprintf("Uknown rune: %v", nextRune))
}
}
}
case STATE_ESCAPING: // the next rune after an escape character
{
switch nextRuneType {
case RUNETOKEN_EOF:
{
err = errors.New("EOF found after escape character")
break SCAN
}
case RUNETOKEN_CHAR, RUNETOKEN_SPACE, RUNETOKEN_ESCAPING_QUOTE, RUNETOKEN_NONESCAPING_QUOTE, RUNETOKEN_ESCAPE, RUNETOKEN_COMMENT:
{
state = STATE_INWORD
value = append(value, nextRune)
}
default:
{
return nil, errors.New(fmt.Sprintf("Uknown rune: %v", nextRune))
}
}
}
case STATE_ESCAPING_QUOTED: // the next rune after an escape character, in double quotes
{
switch nextRuneType {
case RUNETOKEN_EOF:
{
err = errors.New("EOF found after escape character")
break SCAN
}
case RUNETOKEN_CHAR, RUNETOKEN_SPACE, RUNETOKEN_ESCAPING_QUOTE, RUNETOKEN_NONESCAPING_QUOTE, RUNETOKEN_ESCAPE, RUNETOKEN_COMMENT:
{
state = STATE_QUOTED_ESCAPING
value = append(value, nextRune)
}
default:
{
return nil, errors.New(fmt.Sprintf("Uknown rune: %v", nextRune))
}
}
}
case STATE_QUOTED_ESCAPING: // in escaping double quotes
{
switch nextRuneType {
case RUNETOKEN_EOF:
{
err = errors.New("EOF found when expecting closing quote.")
break SCAN
}
case RUNETOKEN_CHAR, RUNETOKEN_UNKNOWN, RUNETOKEN_SPACE, RUNETOKEN_NONESCAPING_QUOTE, RUNETOKEN_COMMENT:
{
value = append(value, nextRune)
}
case RUNETOKEN_ESCAPING_QUOTE:
{
state = STATE_INWORD
}
case RUNETOKEN_ESCAPE:
{
state = STATE_ESCAPING_QUOTED
}
default:
{
return nil, errors.New(fmt.Sprintf("Uknown rune: %v", nextRune))
}
}
}
case STATE_QUOTED: // in non-escaping single quotes
{
switch nextRuneType {
case RUNETOKEN_EOF:
{
err = errors.New("EOF found when expecting closing quote.")
break SCAN
}
case RUNETOKEN_CHAR, RUNETOKEN_UNKNOWN, RUNETOKEN_SPACE, RUNETOKEN_ESCAPING_QUOTE, RUNETOKEN_ESCAPE, RUNETOKEN_COMMENT:
{
value = append(value, nextRune)
}
case RUNETOKEN_NONESCAPING_QUOTE:
{
state = STATE_INWORD
}
default:
{
return nil, errors.New(fmt.Sprintf("Uknown rune: %v", nextRune))
}
}
}
case STATE_COMMENT:
{
switch nextRuneType {
case RUNETOKEN_EOF:
{
break SCAN
}
case RUNETOKEN_CHAR, RUNETOKEN_UNKNOWN, RUNETOKEN_ESCAPING_QUOTE, RUNETOKEN_ESCAPE, RUNETOKEN_COMMENT, RUNETOKEN_NONESCAPING_QUOTE:
{
value = append(value, nextRune)
}
case RUNETOKEN_SPACE:
{
if nextRune == '\n' {
state = STATE_START
break SCAN
} else {
value = append(value, nextRune)
}
}
default:
{
return nil, errors.New(fmt.Sprintf("Uknown rune: %v", nextRune))
}
}
}
default:
{
panic(fmt.Sprintf("Unexpected state: %v", state))
}
}
}
token := &Token{
tokenType: tokenType,
value: string(value)}
return token, err
}
/*
Return the next token in the stream, and an error value. If there are no more
tokens available, the error value will be io.EOF.
*/
func (t *Tokenizer) NextToken() (*Token, error) {
return t.scanStream()
}
/*
Split a string in to a slice of strings, based upon shell-style rules for
quoting, escaping, and spaces.
*/
func Split(s string) ([]string, error) {
l, err := NewLexer(strings.NewReader(s))
if err != nil {
return nil, err
}
subStrings := []string{}
for | {
word, err := l.NextWord()
if err != nil {
if err == io.EOF {
return subStrings, nil
}
return subStrings, err
}
subStrings = append(subStrings, word)
} | conditional_block | |
mod.rs | sequences from `tail` until either (1) `tail` is
// exhausted, or (2) the display width of the result would exceed `display_width`.
//
// 3. If tail was exhausted, then contribute graphemes and ANSI escape sequences from `s` until the
// display_width of the result would exceed `display_width`.
pub fn truncate_str<'a>(s: &'a str, display_width: usize, tail: &str) -> Cow<'a, str> {
let items = ansi_strings_iterator(s).collect::<Vec<(&str, bool)>>();
let width = strip_ansi_codes_from_strings_iterator(items.iter().copied()).width();
if width <= display_width {
return Cow::from(s);
}
let result_tail = if !tail.is_empty() {
truncate_str(tail, display_width, "").to_string()
} else {
String::new()
};
let mut used = measure_text_width(&result_tail);
let mut result = String::new();
for (t, is_ansi) in items {
if !is_ansi {
for g in t.graphemes(true) {
let w = g.width();
if used + w > display_width {
result.push_str(&" ".repeat(display_width.saturating_sub(used)));
break;
}
result.push_str(g);
used += w;
}
} else {
result.push_str(t);
}
}
Cow::from(format!("{result}{result_tail}"))
}
pub fn parse_style_sections(s: &str) -> Vec<(ansi_term::Style, &str)> {
let mut sections = Vec::new();
let mut curr_style = Style::default();
for element in AnsiElementIterator::new(s) {
match element {
Element::Text(start, end) => sections.push((curr_style, &s[start..end])),
Element::Sgr(style, _, _) => curr_style = style,
_ => {}
}
}
sections
}
// Return the first CSI element, if any, as an `ansi_term::Style`.
pub fn parse_first_style(s: &str) -> Option<ansi_term::Style> {
AnsiElementIterator::new(s).find_map(|el| match el {
Element::Sgr(style, _, _) => Some(style),
_ => None,
})
}
pub fn string_starts_with_ansi_style_sequence(s: &str) -> bool {
AnsiElementIterator::new(s)
.next()
.map(|el| matches!(el, Element::Sgr(_, _, _)))
.unwrap_or(false)
}
/// Return string formed from a byte slice starting at byte position `start`, where the index
/// counts bytes in non-ANSI-escape-sequence content only. All ANSI escape sequences in the
/// original string are preserved.
pub fn ansi_preserving_slice(s: &str, start: usize) -> String {
AnsiElementIterator::new(s)
.scan(0, |index, element| {
// `index` is the index in non-ANSI-escape-sequence content.
Some(match element {
Element::Sgr(_, a, b) => &s[a..b],
Element::Csi(a, b) => &s[a..b],
Element::Esc(a, b) => &s[a..b],
Element::Osc(a, b) => &s[a..b],
Element::Text(a, b) => {
let i = *index;
*index += b - a;
if *index <= start {
// This text segment ends before start, so contributes no bytes.
""
} else if i > start {
// This section starts after `start`, so contributes all its bytes.
&s[a..b]
} else {
// This section contributes those bytes that are >= start
&s[(a + start - i)..b]
}
}
})
})
.join("")
}
/// Return the byte index in `s` of the i-th text byte in `s`. I.e. `i` counts
/// bytes in non-ANSI-escape-sequence content only.
pub fn ansi_preserving_index(s: &str, i: usize) -> Option<usize> {
let mut index = 0;
for element in AnsiElementIterator::new(s) {
if let Element::Text(a, b) = element {
index += b - a;
if index > i {
return Some(b - (index - i));
}
}
}
None
}
fn ansi_strings_iterator(s: &str) -> impl Iterator<Item = (&str, bool)> {
AnsiElementIterator::new(s).map(move |el| match el {
Element::Sgr(_, i, j) => (&s[i..j], true),
Element::Csi(i, j) => (&s[i..j], true),
Element::Esc(i, j) => (&s[i..j], true),
Element::Osc(i, j) => (&s[i..j], true),
Element::Text(i, j) => (&s[i..j], false),
})
}
fn strip_ansi_codes_from_strings_iterator<'a>(
strings: impl Iterator<Item = (&'a str, bool)>,
) -> String {
strings
.filter_map(|(el, is_ansi)| if !is_ansi { Some(el) } else { None })
.join("")
}
pub fn explain_ansi(line: &str, colorful: bool) -> String {
use crate::style::Style;
parse_style_sections(line)
.into_iter()
.map(|(ansi_term_style, s)| {
let style = Style {
ansi_term_style,
..Style::default()
};
if colorful {
format!("({}){}", style.to_painted_string(), style.paint(s))
} else {
format!("({style}){s}")
}
})
.collect()
}
#[cfg(test)]
mod tests {
use crate::ansi::ansi_preserving_index;
// Note that src/ansi/console_tests.rs contains additional test coverage for this module.
use super::{
ansi_preserving_slice, measure_text_width, parse_first_style,
string_starts_with_ansi_style_sequence, strip_ansi_codes, truncate_str,
};
#[test]
fn test_strip_ansi_codes() {
for s in &["src/ansi/mod.rs", "バー", "src/ansi/modバー.rs"] {
assert_eq!(strip_ansi_codes(s), *s);
}
assert_eq!(strip_ansi_codes("\x1b[31mバー\x1b[0m"), "バー");
}
#[test]
fn test_measure_text_width() {
assert_eq!(measure_text_width("src/ansi/mod.rs"), 15);
assert_eq!(measure_text_width("バー"), 4);
assert_eq!(measure_text_width("src/ansi/modバー.rs"), 19);
assert_eq!(measure_text_width("\x1b[31mバー\x1b[0m"), 4);
assert_eq!(measure_text_width("a\nb\n"), 2);
}
#[test]
fn test_strip_ansi_codes_osc_hyperlink() {
assert_eq!(strip_ansi_codes("\x1b[38;5;4m\x1b]8;;file:///Users/dan/src/delta/src/ansi/mod.rs\x1b\\src/ansi/mod.rs\x1b]8;;\x1b\\\x1b[0m\n"),
"src/ansi/mod.rs\n");
}
#[test]
fn test_measure_text_width_osc_hyperlink() {
assert_eq!(measure | asure_text_width_osc_hyperlink_non_ascii() {
assert_eq!(measure_text_width("\x1b[38;5;4m\x1b]8;;file:///Users/dan/src/delta/src/ansi/mod.rs\x1b\\src/ansi/modバー.rs\x1b]8;;\x1b\\\x1b[0m"),
measure_text_width("src/ansi/modバー.rs"));
}
#[test]
fn test_parse_first_style() {
let minus_line_from_unconfigured_git = "\x1b[31m-____\x1b[m\n";
let style = parse_first_style(minus_line_from_unconfigured_git);
let expected_style = ansi_term::Style {
foreground: Some(ansi_term::Color::Red),
..ansi_term::Style::default()
};
assert_eq!(Some(expected_style), style);
}
#[test]
fn test_string_starts_with_ansi_escape_sequence() {
assert!(!string_starts_with_ansi_style_sequence(""));
assert!(!string_starts_with_ansi_style_sequence("-"));
assert!(string_starts_with_ansi_style_sequence(
"\x1b[31m-XXX\x1b[m\n"
));
assert!(string_starts_with_ansi_style_sequence("\x1b[32m+XXX"));
}
#[test]
fn test_ansi_preserving_slice_and_index() {
assert_eq!(ansi_preserving_slice("", | _text_width("\x1b[38;5;4m\x1b]8;;file:///Users/dan/src/delta/src/ansi/mod.rs\x1b\\src/ansi/mod.rs\x1b]8;;\x1b\\\x1b[0m"),
measure_text_width("src/ansi/mod.rs"));
}
#[test]
fn test_me | identifier_body |
mod.rs | sequences from `tail` until either (1) `tail` is
// exhausted, or (2) the display width of the result would exceed `display_width`.
//
// 3. If tail was exhausted, then contribute graphemes and ANSI escape sequences from `s` until the
// display_width of the result would exceed `display_width`.
pub fn truncate_str<'a>(s: &'a str, display_width: usize, tail: &str) -> Cow<'a, str> {
let items = ansi_strings_iterator(s).collect::<Vec<(&str, bool)>>();
let width = strip_ansi_codes_from_strings_iterator(items.iter().copied()).width();
if width <= display_width {
return Cow::from(s);
}
let result_tail = if !tail.is_empty() {
truncate_str(tail, display_width, "").to_string()
} else {
String::new()
};
let mut used = measure_text_width(&result_tail);
let mut result = String::new();
for (t, is_ansi) in items {
if !is_ansi {
for g in t.graphemes(true) {
let w = g.width();
if used + w > display_width {
result.push_str(&" ".repeat(display_width.saturating_sub(used)));
break;
}
result.push_str(g);
used += w;
}
} else {
result.push_str(t);
}
}
Cow::from(format!("{result}{result_tail}"))
}
pub fn parse_style_sections(s: &str) -> Vec<(ansi_term::Style, &str)> {
let mut sections = Vec::new();
let mut curr_style = Style::default();
for element in AnsiElementIterator::new(s) {
match element {
Element::Text(start, end) => sections.push((curr_style, &s[start..end])),
Element::Sgr(style, _, _) => curr_style = style,
_ => {}
}
}
sections
}
// Return the first CSI element, if any, as an `ansi_term::Style`.
pub fn parse_first_style(s: &str) -> Option<ansi_term::Style> {
AnsiElementIterator::new(s).find_map(|el| match el {
Element::Sgr(style, _, _) => Some(style),
_ => None,
})
}
pub fn string_starts_with_ansi_style_sequence(s: &str) -> bool {
AnsiElementIterator::new(s)
.next()
.map(|el| matches!(el, Element::Sgr(_, _, _)))
.unwrap_or(false)
}
/// Return string formed from a byte slice starting at byte position `start`, where the index
/// counts bytes in non-ANSI-escape-sequence content only. All ANSI escape sequences in the
/// original string are preserved.
pub fn ansi_preserving_slice(s: &str, start: usize) -> String {
AnsiElementIterator::new(s)
.scan(0, |index, element| {
// `index` is the index in non-ANSI-escape-sequence content.
Some(match element {
Element::Sgr(_, a, b) => &s[a..b],
Element::Csi(a, b) => &s[a..b],
Element::Esc(a, b) => &s[a..b],
Element::Osc(a, b) => &s[a..b],
Element::Text(a, b) => {
let i = *index;
*index += b - a;
if *index <= start {
// This text segment ends before start, so contributes no bytes.
""
} else if i > start {
// This section starts after `start`, so contributes all its bytes.
&s[a..b]
} else {
// This section contributes those bytes that are >= start
&s[(a + start - i)..b]
}
}
})
})
.join("")
}
/// Return the byte index in `s` of the i-th text byte in `s`. I.e. `i` counts
/// bytes in non-ANSI-escape-sequence content only.
pub fn ansi_preserving_index(s: &str, i: usize) -> Option<usize> {
let mut index = 0;
for element in AnsiElementIterator::new(s) {
if let Element::Text(a, b) = element {
index += b - a;
if index > i {
return Some(b - (index - i));
}
}
}
None
}
fn ansi_strings_iterator(s: &str) -> impl Iterator<Item = (&str, bool)> {
AnsiElementIterator::new(s).map(move |el| match el {
Element::Sgr(_, i, j) => (&s[i..j], true),
Element::Csi(i, j) => (&s[i..j], true),
Element::Esc(i, j) => (&s[i..j], true),
Element::Osc(i, j) => (&s[i..j], true),
Element::Text(i, j) => (&s[i..j], false),
})
}
fn strip_ansi_codes_from_strings_iterator<'a>(
strings: impl Iterator<Item = (&'a str, bool)>,
) -> String {
strings
.filter_map(|(el, is_ansi)| if !is_ansi { Some(el) } else { None })
.join("")
}
pub fn explain_ansi(line: &str, colorful: bool) -> String {
use crate::style::Style;
parse_style_sections(line)
.into_iter()
.map(|(ansi_term_style, s)| {
let style = Style {
ansi_term_style,
..Style::default()
};
if colorful {
format!("({}){}", style.to_painted_string(), style.paint(s))
} else {
format!("({style}){s}")
}
})
.collect()
}
#[cfg(test)]
mod tests {
use crate::ansi::ansi_preserving_index;
// Note that src/ansi/console_tests.rs contains additional test coverage for this module.
use super::{
ansi_preserving_slice, measure_text_width, parse_first_style,
string_starts_with_ansi_style_sequence, strip_ansi_codes, truncate_str,
};
#[test]
fn test_strip_ansi_codes() {
for s in &["src/ansi/mod.rs", "バー", "src/ansi/modバー.rs"] {
assert_eq!(strip_ansi_codes(s), *s);
}
assert_eq!(strip_ansi_codes("\x1b[31mバー\x1b[0m"), "バー");
}
#[test]
fn test_measure_text_width() {
assert_eq!(measure_text_width("src/ansi/mod.rs"), 15);
assert_eq!(measure_text_width("バー"), 4);
assert_eq!(measure_text_width("src/ansi/modバー.rs"), 19);
assert_eq!(measure_text_width("\x1b[31mバー\x1b[0m"), 4);
assert_eq!(measure_text_width("a\nb\n"), 2);
}
#[test]
fn test_strip_ansi_codes_osc_hyperlink() {
assert_eq!(strip_ansi_codes("\x1b[38;5;4m\x1b]8;;file:///Users/dan/src/delta/src/ansi/mod.rs\x1b\\src/ansi/mod.rs\x1b]8;;\x1b\\\x1b[0m\n"),
"src/ansi/mod.rs\n");
}
#[test]
fn test_measure_text_width_osc_hyperlink() {
assert_eq!(measure_text_width("\x1b[38;5;4m\x1b]8;;file:///Users/dan/src/delta/src/ansi/mod.rs\x1b\\src/ansi/mod.rs\x1b]8;;\x1b\\\x1b[0m"),
measure_text_width("src/ansi/mod.rs"));
}
#[test]
fn test_measure_text_width_osc_ | ure_text_width("\x1b[38;5;4m\x1b]8;;file:///Users/dan/src/delta/src/ansi/mod.rs\x1b\\src/ansi/modバー.rs\x1b]8;;\x1b\\\x1b[0m"),
measure_text_width("src/ansi/modバー.rs"));
}
#[test]
fn test_parse_first_style() {
let minus_line_from_unconfigured_git = "\x1b[31m-____\x1b[m\n";
let style = parse_first_style(minus_line_from_unconfigured_git);
let expected_style = ansi_term::Style {
foreground: Some(ansi_term::Color::Red),
..ansi_term::Style::default()
};
assert_eq!(Some(expected_style), style);
}
#[test]
fn test_string_starts_with_ansi_escape_sequence() {
assert!(!string_starts_with_ansi_style_sequence(""));
assert!(!string_starts_with_ansi_style_sequence("-"));
assert!(string_starts_with_ansi_style_sequence(
"\x1b[31m-XXX\x1b[m\n"
));
assert!(string_starts_with_ansi_style_sequence("\x1b[32m+XXX"));
}
#[test]
fn test_ansi_preserving_slice_and_index() {
assert_eq!(ansi_preserving_slice | hyperlink_non_ascii() {
assert_eq!(meas | identifier_name |
mod.rs | sequences from `tail` until either (1) `tail` is
// exhausted, or (2) the display width of the result would exceed `display_width`.
//
// 3. If tail was exhausted, then contribute graphemes and ANSI escape sequences from `s` until the
// display_width of the result would exceed `display_width`.
pub fn truncate_str<'a>(s: &'a str, display_width: usize, tail: &str) -> Cow<'a, str> {
let items = ansi_strings_iterator(s).collect::<Vec<(&str, bool)>>();
let width = strip_ansi_codes_from_strings_iterator(items.iter().copied()).width();
if width <= display_width {
return Cow::from(s);
}
let result_tail = if !tail.is_empty() {
truncate_str(tail, display_width, "").to_string()
} else {
String::new()
};
let mut used = measure_text_width(&result_tail);
let mut result = String::new();
for (t, is_ansi) in items {
if !is_ansi {
for g in t.graphemes(true) {
let w = g.width();
if used + w > display_width {
result.push_str(&" ".repeat(display_width.saturating_sub(used)));
break;
}
result.push_str(g);
used += w;
}
} else |
}
Cow::from(format!("{result}{result_tail}"))
}
pub fn parse_style_sections(s: &str) -> Vec<(ansi_term::Style, &str)> {
let mut sections = Vec::new();
let mut curr_style = Style::default();
for element in AnsiElementIterator::new(s) {
match element {
Element::Text(start, end) => sections.push((curr_style, &s[start..end])),
Element::Sgr(style, _, _) => curr_style = style,
_ => {}
}
}
sections
}
// Return the first CSI element, if any, as an `ansi_term::Style`.
pub fn parse_first_style(s: &str) -> Option<ansi_term::Style> {
AnsiElementIterator::new(s).find_map(|el| match el {
Element::Sgr(style, _, _) => Some(style),
_ => None,
})
}
pub fn string_starts_with_ansi_style_sequence(s: &str) -> bool {
AnsiElementIterator::new(s)
.next()
.map(|el| matches!(el, Element::Sgr(_, _, _)))
.unwrap_or(false)
}
/// Return string formed from a byte slice starting at byte position `start`, where the index
/// counts bytes in non-ANSI-escape-sequence content only. All ANSI escape sequences in the
/// original string are preserved.
pub fn ansi_preserving_slice(s: &str, start: usize) -> String {
AnsiElementIterator::new(s)
.scan(0, |index, element| {
// `index` is the index in non-ANSI-escape-sequence content.
Some(match element {
Element::Sgr(_, a, b) => &s[a..b],
Element::Csi(a, b) => &s[a..b],
Element::Esc(a, b) => &s[a..b],
Element::Osc(a, b) => &s[a..b],
Element::Text(a, b) => {
let i = *index;
*index += b - a;
if *index <= start {
// This text segment ends before start, so contributes no bytes.
""
} else if i > start {
// This section starts after `start`, so contributes all its bytes.
&s[a..b]
} else {
// This section contributes those bytes that are >= start
&s[(a + start - i)..b]
}
}
})
})
.join("")
}
/// Return the byte index in `s` of the i-th text byte in `s`. I.e. `i` counts
/// bytes in non-ANSI-escape-sequence content only.
pub fn ansi_preserving_index(s: &str, i: usize) -> Option<usize> {
let mut index = 0;
for element in AnsiElementIterator::new(s) {
if let Element::Text(a, b) = element {
index += b - a;
if index > i {
return Some(b - (index - i));
}
}
}
None
}
fn ansi_strings_iterator(s: &str) -> impl Iterator<Item = (&str, bool)> {
AnsiElementIterator::new(s).map(move |el| match el {
Element::Sgr(_, i, j) => (&s[i..j], true),
Element::Csi(i, j) => (&s[i..j], true),
Element::Esc(i, j) => (&s[i..j], true),
Element::Osc(i, j) => (&s[i..j], true),
Element::Text(i, j) => (&s[i..j], false),
})
}
fn strip_ansi_codes_from_strings_iterator<'a>(
strings: impl Iterator<Item = (&'a str, bool)>,
) -> String {
strings
.filter_map(|(el, is_ansi)| if !is_ansi { Some(el) } else { None })
.join("")
}
pub fn explain_ansi(line: &str, colorful: bool) -> String {
use crate::style::Style;
parse_style_sections(line)
.into_iter()
.map(|(ansi_term_style, s)| {
let style = Style {
ansi_term_style,
..Style::default()
};
if colorful {
format!("({}){}", style.to_painted_string(), style.paint(s))
} else {
format!("({style}){s}")
}
})
.collect()
}
#[cfg(test)]
mod tests {
use crate::ansi::ansi_preserving_index;
// Note that src/ansi/console_tests.rs contains additional test coverage for this module.
use super::{
ansi_preserving_slice, measure_text_width, parse_first_style,
string_starts_with_ansi_style_sequence, strip_ansi_codes, truncate_str,
};
#[test]
fn test_strip_ansi_codes() {
for s in &["src/ansi/mod.rs", "バー", "src/ansi/modバー.rs"] {
assert_eq!(strip_ansi_codes(s), *s);
}
assert_eq!(strip_ansi_codes("\x1b[31mバー\x1b[0m"), "バー");
}
#[test]
fn test_measure_text_width() {
assert_eq!(measure_text_width("src/ansi/mod.rs"), 15);
assert_eq!(measure_text_width("バー"), 4);
assert_eq!(measure_text_width("src/ansi/modバー.rs"), 19);
assert_eq!(measure_text_width("\x1b[31mバー\x1b[0m"), 4);
assert_eq!(measure_text_width("a\nb\n"), 2);
}
#[test]
fn test_strip_ansi_codes_osc_hyperlink() {
assert_eq!(strip_ansi_codes("\x1b[38;5;4m\x1b]8;;file:///Users/dan/src/delta/src/ansi/mod.rs\x1b\\src/ansi/mod.rs\x1b]8;;\x1b\\\x1b[0m\n"),
"src/ansi/mod.rs\n");
}
#[test]
fn test_measure_text_width_osc_hyperlink() {
assert_eq!(measure_text_width("\x1b[38;5;4m\x1b]8;;file:///Users/dan/src/delta/src/ansi/mod.rs\x1b\\src/ansi/mod.rs\x1b]8;;\x1b\\\x1b[0m"),
measure_text_width("src/ansi/mod.rs"));
}
#[test]
fn test_measure_text_width_osc_hyperlink_non_ascii() {
assert_eq!(measure_text_width("\x1b[38;5;4m\x1b]8;;file:///Users/dan/src/delta/src/ansi/mod.rs\x1b\\src/ansi/modバー.rs\x1b]8;;\x1b\\\x1b[0m"),
measure_text_width("src/ansi/modバー.rs"));
}
#[test]
fn test_parse_first_style() {
let minus_line_from_unconfigured_git = "\x1b[31m-____\x1b[m\n";
let style = parse_first_style(minus_line_from_unconfigured_git);
let expected_style = ansi_term::Style {
foreground: Some(ansi_term::Color::Red),
..ansi_term::Style::default()
};
assert_eq!(Some(expected_style), style);
}
#[test]
fn test_string_starts_with_ansi_escape_sequence() {
assert!(!string_starts_with_ansi_style_sequence(""));
assert!(!string_starts_with_ansi_style_sequence("-"));
assert!(string_starts_with_ansi_style_sequence(
"\x1b[31m-XXX\x1b[m\n"
));
assert!(string_starts_with_ansi_style_sequence("\x1b[32m+XXX"));
}
#[test]
fn test_ansi_preserving_slice_and_index() {
assert_eq!(ansi_preserving_slice("", | {
result.push_str(t);
} | conditional_block |
mod.rs | escape sequences from `tail` until either (1) `tail` is
// exhausted, or (2) the display width of the result would exceed `display_width`.
//
// 3. If tail was exhausted, then contribute graphemes and ANSI escape sequences from `s` until the
// display_width of the result would exceed `display_width`.
pub fn truncate_str<'a>(s: &'a str, display_width: usize, tail: &str) -> Cow<'a, str> {
let items = ansi_strings_iterator(s).collect::<Vec<(&str, bool)>>();
let width = strip_ansi_codes_from_strings_iterator(items.iter().copied()).width();
if width <= display_width {
return Cow::from(s);
}
let result_tail = if !tail.is_empty() {
truncate_str(tail, display_width, "").to_string()
} else {
String::new()
};
let mut used = measure_text_width(&result_tail);
let mut result = String::new();
for (t, is_ansi) in items {
if !is_ansi {
for g in t.graphemes(true) {
let w = g.width();
if used + w > display_width {
result.push_str(&" ".repeat(display_width.saturating_sub(used)));
break;
}
result.push_str(g);
used += w;
}
} else {
result.push_str(t);
}
}
Cow::from(format!("{result}{result_tail}"))
}
pub fn parse_style_sections(s: &str) -> Vec<(ansi_term::Style, &str)> {
let mut sections = Vec::new();
let mut curr_style = Style::default();
for element in AnsiElementIterator::new(s) {
match element {
Element::Text(start, end) => sections.push((curr_style, &s[start..end])),
Element::Sgr(style, _, _) => curr_style = style,
_ => {}
}
}
sections
}
// Return the first CSI element, if any, as an `ansi_term::Style`.
pub fn parse_first_style(s: &str) -> Option<ansi_term::Style> {
AnsiElementIterator::new(s).find_map(|el| match el {
Element::Sgr(style, _, _) => Some(style),
_ => None,
})
}
pub fn string_starts_with_ansi_style_sequence(s: &str) -> bool {
AnsiElementIterator::new(s)
.next()
.map(|el| matches!(el, Element::Sgr(_, _, _)))
.unwrap_or(false)
}
/// Return string formed from a byte slice starting at byte position `start`, where the index
/// counts bytes in non-ANSI-escape-sequence content only. All ANSI escape sequences in the
/// original string are preserved.
pub fn ansi_preserving_slice(s: &str, start: usize) -> String {
AnsiElementIterator::new(s)
.scan(0, |index, element| {
// `index` is the index in non-ANSI-escape-sequence content.
Some(match element {
Element::Sgr(_, a, b) => &s[a..b],
Element::Csi(a, b) => &s[a..b],
Element::Esc(a, b) => &s[a..b],
Element::Osc(a, b) => &s[a..b],
Element::Text(a, b) => {
let i = *index;
*index += b - a;
if *index <= start {
// This text segment ends before start, so contributes no bytes.
""
} else if i > start {
// This section starts after `start`, so contributes all its bytes.
&s[a..b]
} else {
// This section contributes those bytes that are >= start
&s[(a + start - i)..b]
}
}
})
})
.join("")
}
/// Return the byte index in `s` of the i-th text byte in `s`. I.e. `i` counts
/// bytes in non-ANSI-escape-sequence content only.
pub fn ansi_preserving_index(s: &str, i: usize) -> Option<usize> {
let mut index = 0;
for element in AnsiElementIterator::new(s) {
if let Element::Text(a, b) = element {
index += b - a;
if index > i {
return Some(b - (index - i));
}
}
}
None
}
fn ansi_strings_iterator(s: &str) -> impl Iterator<Item = (&str, bool)> {
AnsiElementIterator::new(s).map(move |el| match el {
Element::Sgr(_, i, j) => (&s[i..j], true),
Element::Csi(i, j) => (&s[i..j], true),
Element::Esc(i, j) => (&s[i..j], true),
Element::Osc(i, j) => (&s[i..j], true),
Element::Text(i, j) => (&s[i..j], false),
})
}
fn strip_ansi_codes_from_strings_iterator<'a>(
strings: impl Iterator<Item = (&'a str, bool)>,
) -> String {
strings
.filter_map(|(el, is_ansi)| if !is_ansi { Some(el) } else { None })
.join("")
}
pub fn explain_ansi(line: &str, colorful: bool) -> String {
use crate::style::Style;
parse_style_sections(line)
.into_iter()
.map(|(ansi_term_style, s)| {
let style = Style {
ansi_term_style,
..Style::default()
};
if colorful {
format!("({}){}", style.to_painted_string(), style.paint(s))
} else {
format!("({style}){s}")
}
})
.collect()
}
#[cfg(test)]
mod tests {
use crate::ansi::ansi_preserving_index;
// Note that src/ansi/console_tests.rs contains additional test coverage for this module.
use super::{
ansi_preserving_slice, measure_text_width, parse_first_style,
string_starts_with_ansi_style_sequence, strip_ansi_codes, truncate_str,
};
#[test]
fn test_strip_ansi_codes() {
for s in &["src/ansi/mod.rs", "バー", "src/ansi/modバー.rs"] {
assert_eq!(strip_ansi_codes(s), *s);
}
assert_eq!(strip_ansi_codes("\x1b[31mバー\x1b[0m"), "バー");
}
#[test]
fn test_measure_text_width() {
assert_eq!(measure_text_width("src/ansi/mod.rs"), 15);
assert_eq!(measure_text_width("バー"), 4);
assert_eq!(measure_text_width("src/ansi/modバー.rs"), 19);
assert_eq!(measure_text_width("\x1b[31mバー\x1b[0m"), 4);
assert_eq!(measure_text_width("a\nb\n"), 2);
}
#[test]
fn test_strip_ansi_codes_osc_hyperlink() {
assert_eq!(strip_ansi_codes("\x1b[38;5;4m\x1b]8;;file:///Users/dan/src/delta/src/ansi/mod.rs\x1b\\src/ansi/mod.rs\x1b]8;;\x1b\\\x1b[0m\n"),
"src/ansi/mod.rs\n");
}
#[test]
fn test_measure_text_width_osc_hyperlink() {
assert_eq!(measure_text_width("\x1b[38;5;4m\x1b]8;;file:///Users/dan/src/delta/src/ansi/mod.rs\x1b\\src/ansi/mod.rs\x1b]8;;\x1b\\\x1b[0m"), | measure_text_width("src/ansi/mod.rs"));
}
#[test]
fn test_measure_text_width_osc_hyperlink_non_ascii() {
assert_eq!(measure_text_width("\x1b[38;5;4m\x1b]8;;file:///Users/dan/src/delta/src/ansi/mod.rs\x1b\\src/ansi/modバー.rs\x1b]8;;\x1b\\\x1b[0m"),
measure_text_width("src/ansi/modバー.rs"));
}
#[test]
fn test_parse_first_style() {
let minus_line_from_unconfigured_git = "\x1b[31m-____\x1b[m\n";
let style = parse_first_style(minus_line_from_unconfigured_git);
let expected_style = ansi_term::Style {
foreground: Some(ansi_term::Color::Red),
..ansi_term::Style::default()
};
assert_eq!(Some(expected_style), style);
}
#[test]
fn test_string_starts_with_ansi_escape_sequence() {
assert!(!string_starts_with_ansi_style_sequence(""));
assert!(!string_starts_with_ansi_style_sequence("-"));
assert!(string_starts_with_ansi_style_sequence(
"\x1b[31m-XXX\x1b[m\n"
));
assert!(string_starts_with_ansi_style_sequence("\x1b[32m+XXX"));
}
#[test]
fn test_ansi_preserving_slice_and_index() {
assert_eq!(ansi_preserving_slice("", | random_line_split | |
main.rs | : u32;
static __DATA_END: u32;
static mut __DATA_START: u32;
static mut __BSS_START: u32;
static mut __BSS_END: u32;
}
let data_load = &__DATA_LOAD;
let data_start = &mut __DATA_START;
let data_end = &__DATA_END;
let bss_start = &mut __BSS_START;
let bss_end = &__BSS_END;
r0::init_data(data_start, data_end, data_load);
r0::zero_bss(bss_start, bss_end);
stm32f7::heap::init();
// enable floating point unit
let scb = stm32f7::cortex_m::peripheral::scb_mut();
scb.cpacr.modify(|v| v | 0b1111 << 20);
asm!("DSB; ISB;"::::"volatile"); // pipeline flush
main(board::hw());
}
// WORKAROUND: rust compiler will inline & reorder fp instructions into
#[inline(never)] // reset() before the FPU is initialized
fn main(hw: board::Hardware) -> ! {
let board::Hardware {
rcc,
pwr,
flash,
fmc,
ltdc,
gpio_a,
gpio_b,
gpio_c,
gpio_d,
gpio_e,
gpio_f,
gpio_g,
gpio_h,
gpio_i,
gpio_j,
gpio_k,
spi_2,
i2c_3,
..
} = hw;
let mut gpio = Gpio::new(gpio_a,
gpio_b,
gpio_c,
gpio_d,
gpio_e,
gpio_f,
gpio_g,
gpio_h,
gpio_i,
gpio_j,
gpio_k);
system_clock::init(rcc, pwr, flash);
// Peripheral clock configuration
{
// enable all gpio ports
rcc.ahb1enr.update(|r| {
r.set_gpioaen(true);
r.set_gpioben(true);
r.set_gpiocen(true);
r.set_gpioden(true);
r.set_gpioeen(true);
r.set_gpiofen(true);
r.set_gpiogen(true);
r.set_gpiohen(true);
r.set_gpioien(true);
r.set_gpiojen(true);
r.set_gpioken(true);
});
// Enable SPI_2
rcc.apb1enr.update(|apb1enr| {
apb1enr.set_spi2en(true);
});
delay(1);
}
// i2c configuration
i2c::init_pins_and_clocks(rcc, &mut gpio);
let mut i2c_3 = i2c::init(i2c_3);
i2c_3.test_1();
i2c_3.test_2();
let mut temp_sensor = temp_sensor_init_spi2(&mut gpio, spi_2);
// init sdram (needed for display buffer)
sdram::init(rcc, fmc, &mut gpio);
let pwm_pin = (gpio::Port::PortI, gpio::Pin::Pin2);
let mut pwm_gpio = gpio.to_output(pwm_pin,
gpio::OutputType::PushPull,
gpio::OutputSpeed::High, | let drag_color = Color::from_hex(0x000000);
let grid_color = Color::from_hex(0x444444);
// lcd controller
let mut lcd = lcd::init(ltdc, rcc, &mut gpio);
touch::check_family_id(&mut i2c_3).unwrap();
loop {
SYSCLOCK.reset();
lcd.clear_screen();
lcd.set_background_color(Color::from_hex(0x000000));
let plot_font = Box::new(Font::new(TTF, 11).unwrap()).leak();
let rtval_font = Box::new(Font::new(TTF, 14).unwrap()).leak();
let mut plot = plot::Plot::new(model::Range::new(0f32, (20*60) as f32),
model::Range::new(0f32, 200f32),
plot_font,
rtval_font,
axis_color,
grid_color,
drag_color,
80, // drag timeout
);
plot.draw_axis(&mut lcd);
//let mut pid_controller = pid::PIDController::new(0.3f32, 0.0f32, 0.0f32);
//let mut pid_controller = pid::PIDController::new(0.1f32, 0.0f32, 0.3f32); // Definitely better than first, but overshooting
//let mut pid_controller = pid::PIDController::new(0.2f32, 0.0f32, 0.3f32); // Not much different
let mut pid_controller = pid::PIDController::new(0.2f32, 0.0f32, 0.6f32); // Not much different
let mut smoother = pid::Smoother::new(10);
let mut measurement_start_system_time = SYSCLOCK.get_ticks();
let mut last_measurement_system_time = SYSCLOCK.get_ticks();
let mut duty_cycle: usize = 0;
let mut temp = 20f32;
let mut state_button = state_button::StateButton::new(
Color::from_hex(0x222222),
Rect{origin: Point{x: 440, y: 0}, width: 40, height: 40}
);
state_button.render(&mut lcd);
let mut last_touch_event = None;
'mainloop: loop {
let ticks = SYSCLOCK.get_ticks();
let delta_measurement = time::delta_checked(&last_measurement_system_time, &ticks);
if delta_measurement.to_msecs() >= 500 {
let val = temp_sensor.read();
let measurement_time = time::delta_checked(&measurement_start_system_time, &ticks).to_secs();
let measurement = model::TimeTemp{
time: measurement_time, // TODO just integer divide here?
temp: val as f32,
};
match state_button.state() {
State::RUNNING => plot.add_measurement(measurement, &mut lcd),
State::RESETTED => {
plot.set_measurement(model::TimeTemp{time: 0f32, temp: measurement.temp}, &mut lcd);
plot.update_ramp_start(&mut lcd);
},
State::STOPPED => {},
}
if let State::RUNNING = state_button.state() {
smoother.push_value(val);
let smooth_temp = smoother.get_average();
let ramp_target_temp = plot.ramp().evaluate(measurement_time);
let error = ramp_target_temp - smooth_temp;
let pid_value = pid_controller.cycle(error, &delta_measurement);
duty_cycle = (util::clamp(pid_value, 0f32, 1f32) * 1000f32) as usize;
lcd.draw_point_color(
Point{
x: plot.transform_time(measurement_time),
y: plot::Plot::transform_ranges(model::Range{from: 0f32, to: 1f32}, plot::Y_PX_RANGE, pid_value)
}, Layer::Layer2, Color::from_hex(0x0000ff).to_argb1555());
//let pid_clamped = util::clamp(pid_value, 0f32, 1f32);
//temp += (pid_clamped - 0.3) * delta_measurement.to_secs() * 1.0;
} else {
duty_cycle = 0;
}
last_measurement_system_time = ticks;
}
pwm_gpio.set(ticks.to_msecs() % 1000 < duty_cycle);
// poll for new touch data
let mut touches = false;
for touch in &touch::touches(&mut i2c_3).unwrap() {
touches = true;
let touch = model::Touch{
location: Point{
x: touch.x,
y: touch.y
},
time: ticks
};
let touch_event = match last_touch_event {
Some(TouchDown(_)) | Some(TouchMove(_)) => TouchMove(touch),
None | Some(TouchUp(_)) => TouchDown(touch),
};
//Do not allow changing ramp in stopped state
match state_button.state() {
State::RUNNING | State::RESETTED =>
plot.handle_touch(touch_event, &mut lcd),
_ => {},
}
last_touch_event = Some(touch_event);
}
// Deliver touch-up events
if !touches && last_touch_event.is_some() {
let touch_event = match last_touch_event.unwrap() {
TouchDown(t) | TouchMove(t) if time::delta(&ticks,&t.time).to_msecs() > 200 => {
| gpio::Resistor::NoPull)
.expect("Could not configure pwm pin");
let axis_color = Color::from_hex(0xffffff); | random_line_split |
main.rs | u32;
static __DATA_END: u32;
static mut __DATA_START: u32;
static mut __BSS_START: u32;
static mut __BSS_END: u32;
}
let data_load = &__DATA_LOAD;
let data_start = &mut __DATA_START;
let data_end = &__DATA_END;
let bss_start = &mut __BSS_START;
let bss_end = &__BSS_END;
r0::init_data(data_start, data_end, data_load);
r0::zero_bss(bss_start, bss_end);
stm32f7::heap::init();
// enable floating point unit
let scb = stm32f7::cortex_m::peripheral::scb_mut();
scb.cpacr.modify(|v| v | 0b1111 << 20);
asm!("DSB; ISB;"::::"volatile"); // pipeline flush
main(board::hw());
}
// WORKAROUND: rust compiler will inline & reorder fp instructions into
#[inline(never)] // reset() before the FPU is initialized
fn | (hw: board::Hardware) -> ! {
let board::Hardware {
rcc,
pwr,
flash,
fmc,
ltdc,
gpio_a,
gpio_b,
gpio_c,
gpio_d,
gpio_e,
gpio_f,
gpio_g,
gpio_h,
gpio_i,
gpio_j,
gpio_k,
spi_2,
i2c_3,
..
} = hw;
let mut gpio = Gpio::new(gpio_a,
gpio_b,
gpio_c,
gpio_d,
gpio_e,
gpio_f,
gpio_g,
gpio_h,
gpio_i,
gpio_j,
gpio_k);
system_clock::init(rcc, pwr, flash);
// Peripheral clock configuration
{
// enable all gpio ports
rcc.ahb1enr.update(|r| {
r.set_gpioaen(true);
r.set_gpioben(true);
r.set_gpiocen(true);
r.set_gpioden(true);
r.set_gpioeen(true);
r.set_gpiofen(true);
r.set_gpiogen(true);
r.set_gpiohen(true);
r.set_gpioien(true);
r.set_gpiojen(true);
r.set_gpioken(true);
});
// Enable SPI_2
rcc.apb1enr.update(|apb1enr| {
apb1enr.set_spi2en(true);
});
delay(1);
}
// i2c configuration
i2c::init_pins_and_clocks(rcc, &mut gpio);
let mut i2c_3 = i2c::init(i2c_3);
i2c_3.test_1();
i2c_3.test_2();
let mut temp_sensor = temp_sensor_init_spi2(&mut gpio, spi_2);
// init sdram (needed for display buffer)
sdram::init(rcc, fmc, &mut gpio);
let pwm_pin = (gpio::Port::PortI, gpio::Pin::Pin2);
let mut pwm_gpio = gpio.to_output(pwm_pin,
gpio::OutputType::PushPull,
gpio::OutputSpeed::High,
gpio::Resistor::NoPull)
.expect("Could not configure pwm pin");
let axis_color = Color::from_hex(0xffffff);
let drag_color = Color::from_hex(0x000000);
let grid_color = Color::from_hex(0x444444);
// lcd controller
let mut lcd = lcd::init(ltdc, rcc, &mut gpio);
touch::check_family_id(&mut i2c_3).unwrap();
loop {
SYSCLOCK.reset();
lcd.clear_screen();
lcd.set_background_color(Color::from_hex(0x000000));
let plot_font = Box::new(Font::new(TTF, 11).unwrap()).leak();
let rtval_font = Box::new(Font::new(TTF, 14).unwrap()).leak();
let mut plot = plot::Plot::new(model::Range::new(0f32, (20*60) as f32),
model::Range::new(0f32, 200f32),
plot_font,
rtval_font,
axis_color,
grid_color,
drag_color,
80, // drag timeout
);
plot.draw_axis(&mut lcd);
//let mut pid_controller = pid::PIDController::new(0.3f32, 0.0f32, 0.0f32);
//let mut pid_controller = pid::PIDController::new(0.1f32, 0.0f32, 0.3f32); // Definitely better than first, but overshooting
//let mut pid_controller = pid::PIDController::new(0.2f32, 0.0f32, 0.3f32); // Not much different
let mut pid_controller = pid::PIDController::new(0.2f32, 0.0f32, 0.6f32); // Not much different
let mut smoother = pid::Smoother::new(10);
let mut measurement_start_system_time = SYSCLOCK.get_ticks();
let mut last_measurement_system_time = SYSCLOCK.get_ticks();
let mut duty_cycle: usize = 0;
let mut temp = 20f32;
let mut state_button = state_button::StateButton::new(
Color::from_hex(0x222222),
Rect{origin: Point{x: 440, y: 0}, width: 40, height: 40}
);
state_button.render(&mut lcd);
let mut last_touch_event = None;
'mainloop: loop {
let ticks = SYSCLOCK.get_ticks();
let delta_measurement = time::delta_checked(&last_measurement_system_time, &ticks);
if delta_measurement.to_msecs() >= 500 {
let val = temp_sensor.read();
let measurement_time = time::delta_checked(&measurement_start_system_time, &ticks).to_secs();
let measurement = model::TimeTemp{
time: measurement_time, // TODO just integer divide here?
temp: val as f32,
};
match state_button.state() {
State::RUNNING => plot.add_measurement(measurement, &mut lcd),
State::RESETTED => {
plot.set_measurement(model::TimeTemp{time: 0f32, temp: measurement.temp}, &mut lcd);
plot.update_ramp_start(&mut lcd);
},
State::STOPPED => {},
}
if let State::RUNNING = state_button.state() {
smoother.push_value(val);
let smooth_temp = smoother.get_average();
let ramp_target_temp = plot.ramp().evaluate(measurement_time);
let error = ramp_target_temp - smooth_temp;
let pid_value = pid_controller.cycle(error, &delta_measurement);
duty_cycle = (util::clamp(pid_value, 0f32, 1f32) * 1000f32) as usize;
lcd.draw_point_color(
Point{
x: plot.transform_time(measurement_time),
y: plot::Plot::transform_ranges(model::Range{from: 0f32, to: 1f32}, plot::Y_PX_RANGE, pid_value)
}, Layer::Layer2, Color::from_hex(0x0000ff).to_argb1555());
//let pid_clamped = util::clamp(pid_value, 0f32, 1f32);
//temp += (pid_clamped - 0.3) * delta_measurement.to_secs() * 1.0;
} else {
duty_cycle = 0;
}
last_measurement_system_time = ticks;
}
pwm_gpio.set(ticks.to_msecs() % 1000 < duty_cycle);
// poll for new touch data
let mut touches = false;
for touch in &touch::touches(&mut i2c_3).unwrap() {
touches = true;
let touch = model::Touch{
location: Point{
x: touch.x,
y: touch.y
},
time: ticks
};
let touch_event = match last_touch_event {
Some(TouchDown(_)) | Some(TouchMove(_)) => TouchMove(touch),
None | Some(TouchUp(_)) => TouchDown(touch),
};
//Do not allow changing ramp in stopped state
match state_button.state() {
State::RUNNING | State::RESETTED =>
plot.handle_touch(touch_event, &mut lcd),
_ => {},
}
last_touch_event = Some(touch_event);
}
// Deliver touch-up events
if !touches && last_touch_event.is_some() {
let touch_event = match last_touch_event.unwrap() {
TouchDown(t) | TouchMove(t) if time::delta(&ticks,&t.time).to_msecs() > 200 => {
| main | identifier_name |
main.rs | // enable floating point unit
let scb = stm32f7::cortex_m::peripheral::scb_mut();
scb.cpacr.modify(|v| v | 0b1111 << 20);
asm!("DSB; ISB;"::::"volatile"); // pipeline flush
main(board::hw());
}
// WORKAROUND: rust compiler will inline & reorder fp instructions into
#[inline(never)] // reset() before the FPU is initialized
fn main(hw: board::Hardware) -> ! {
let board::Hardware {
rcc,
pwr,
flash,
fmc,
ltdc,
gpio_a,
gpio_b,
gpio_c,
gpio_d,
gpio_e,
gpio_f,
gpio_g,
gpio_h,
gpio_i,
gpio_j,
gpio_k,
spi_2,
i2c_3,
..
} = hw;
let mut gpio = Gpio::new(gpio_a,
gpio_b,
gpio_c,
gpio_d,
gpio_e,
gpio_f,
gpio_g,
gpio_h,
gpio_i,
gpio_j,
gpio_k);
system_clock::init(rcc, pwr, flash);
// Peripheral clock configuration
{
// enable all gpio ports
rcc.ahb1enr.update(|r| {
r.set_gpioaen(true);
r.set_gpioben(true);
r.set_gpiocen(true);
r.set_gpioden(true);
r.set_gpioeen(true);
r.set_gpiofen(true);
r.set_gpiogen(true);
r.set_gpiohen(true);
r.set_gpioien(true);
r.set_gpiojen(true);
r.set_gpioken(true);
});
// Enable SPI_2
rcc.apb1enr.update(|apb1enr| {
apb1enr.set_spi2en(true);
});
delay(1);
}
// i2c configuration
i2c::init_pins_and_clocks(rcc, &mut gpio);
let mut i2c_3 = i2c::init(i2c_3);
i2c_3.test_1();
i2c_3.test_2();
let mut temp_sensor = temp_sensor_init_spi2(&mut gpio, spi_2);
// init sdram (needed for display buffer)
sdram::init(rcc, fmc, &mut gpio);
let pwm_pin = (gpio::Port::PortI, gpio::Pin::Pin2);
let mut pwm_gpio = gpio.to_output(pwm_pin,
gpio::OutputType::PushPull,
gpio::OutputSpeed::High,
gpio::Resistor::NoPull)
.expect("Could not configure pwm pin");
let axis_color = Color::from_hex(0xffffff);
let drag_color = Color::from_hex(0x000000);
let grid_color = Color::from_hex(0x444444);
// lcd controller
let mut lcd = lcd::init(ltdc, rcc, &mut gpio);
touch::check_family_id(&mut i2c_3).unwrap();
loop {
SYSCLOCK.reset();
lcd.clear_screen();
lcd.set_background_color(Color::from_hex(0x000000));
let plot_font = Box::new(Font::new(TTF, 11).unwrap()).leak();
let rtval_font = Box::new(Font::new(TTF, 14).unwrap()).leak();
let mut plot = plot::Plot::new(model::Range::new(0f32, (20*60) as f32),
model::Range::new(0f32, 200f32),
plot_font,
rtval_font,
axis_color,
grid_color,
drag_color,
80, // drag timeout
);
plot.draw_axis(&mut lcd);
//let mut pid_controller = pid::PIDController::new(0.3f32, 0.0f32, 0.0f32);
//let mut pid_controller = pid::PIDController::new(0.1f32, 0.0f32, 0.3f32); // Definitely better than first, but overshooting
//let mut pid_controller = pid::PIDController::new(0.2f32, 0.0f32, 0.3f32); // Not much different
let mut pid_controller = pid::PIDController::new(0.2f32, 0.0f32, 0.6f32); // Not much different
let mut smoother = pid::Smoother::new(10);
let mut measurement_start_system_time = SYSCLOCK.get_ticks();
let mut last_measurement_system_time = SYSCLOCK.get_ticks();
let mut duty_cycle: usize = 0;
let mut temp = 20f32;
let mut state_button = state_button::StateButton::new(
Color::from_hex(0x222222),
Rect{origin: Point{x: 440, y: 0}, width: 40, height: 40}
);
state_button.render(&mut lcd);
let mut last_touch_event = None;
'mainloop: loop {
let ticks = SYSCLOCK.get_ticks();
let delta_measurement = time::delta_checked(&last_measurement_system_time, &ticks);
if delta_measurement.to_msecs() >= 500 {
let val = temp_sensor.read();
let measurement_time = time::delta_checked(&measurement_start_system_time, &ticks).to_secs();
let measurement = model::TimeTemp{
time: measurement_time, // TODO just integer divide here?
temp: val as f32,
};
match state_button.state() {
State::RUNNING => plot.add_measurement(measurement, &mut lcd),
State::RESETTED => {
plot.set_measurement(model::TimeTemp{time: 0f32, temp: measurement.temp}, &mut lcd);
plot.update_ramp_start(&mut lcd);
},
State::STOPPED => {},
}
if let State::RUNNING = state_button.state() {
smoother.push_value(val);
let smooth_temp = smoother.get_average();
let ramp_target_temp = plot.ramp().evaluate(measurement_time);
let error = ramp_target_temp - smooth_temp;
let pid_value = pid_controller.cycle(error, &delta_measurement);
duty_cycle = (util::clamp(pid_value, 0f32, 1f32) * 1000f32) as usize;
lcd.draw_point_color(
Point{
x: plot.transform_time(measurement_time),
y: plot::Plot::transform_ranges(model::Range{from: 0f32, to: 1f32}, plot::Y_PX_RANGE, pid_value)
}, Layer::Layer2, Color::from_hex(0x0000ff).to_argb1555());
//let pid_clamped = util::clamp(pid_value, 0f32, 1f32);
//temp += (pid_clamped - 0.3) * delta_measurement.to_secs() * 1.0;
} else {
duty_cycle = 0;
}
last_measurement_system_time = ticks;
}
pwm_gpio.set(ticks.to_msecs() % 1000 < duty_cycle);
// poll for new touch data
let mut touches = false;
for touch in &touch::touches(&mut i2c_3).unwrap() {
touches = true;
let touch = model::Touch{
location: Point{
x: touch.x,
y: touch.y
},
time: ticks
};
let touch_event = match last_touch_event {
Some(TouchDown(_)) | Some(TouchMove(_)) => TouchMove(touch),
None | Some(TouchUp(_)) => TouchDown(touch),
};
//Do not allow changing ramp in stopped state
match state_button.state() {
State::RUNNING | State::RESETTED =>
plot.handle_touch(touch_event, &mut lcd),
_ => {},
}
last_touch_event = Some(touch_event);
}
// Deliver touch-up events
if !touches && last_touch_event.is_some() {
let touch_event = match last_touch_event.unwrap() {
TouchDown(t) | TouchMove(t) if time::delta(&ticks,&t.time | {
extern "C" {
static __DATA_LOAD: u32;
static __DATA_END: u32;
static mut __DATA_START: u32;
static mut __BSS_START: u32;
static mut __BSS_END: u32;
}
let data_load = &__DATA_LOAD;
let data_start = &mut __DATA_START;
let data_end = &__DATA_END;
let bss_start = &mut __BSS_START;
let bss_end = &__BSS_END;
r0::init_data(data_start, data_end, data_load);
r0::zero_bss(bss_start, bss_end);
stm32f7::heap::init();
| identifier_body | |
navtreeindex22.js | 8h.htm#gaeb0fca7dd680f3f8863edb56b5ce0e5b":[4,0,72,21],
"token-stack_8h.htm#gaeb9bc13c387e34deb35e981dd9c1a276":[4,0,72,10],
"token-stack_8h.htm#gaec433bb494f14daea12eb32616d685a8":[4,0,72,30],
"token-stack_8h.htm#gaed1a0a2b07a388b3c94cfc512dfa8335":[4,0,72,39],
"token-stack_8h.htm#gaee5596e09a93afc50340f794e61a64d4":[4,0,72,22],
"token-stack_8h.htm#gaf9abd8d0c4ff19e503e3477e4b6a863c":[4,0,72,3],
"token-stack_8h.htm#gafa42ac1c49b2e1c73cb194d2265a1940":[4,0,72,0],
"token-stack_8h.htm#gafdfa82d0d225df984972329f613947d2":[4,0,72,23],
"token-stack_8h_source.htm":[4,0,72],
"token_8h.htm":[4,0,73],
"token_8h.htm#ga1cdf205aa0e67b9508ce927b52a6875a":[4,0,73,0],
"token_8h.htm#ga632ff9591650d5721a447413a5d1505d":[4,0,73,4],
"token_8h.htm#ga7d7937bb8f438d0bd76a6d017d2db2fe":[4,0,73,1],
"token_8h.htm#ga8d7f03bfb03f29e528d36035ecbf4c23":[4,0,73,2],
"token_8h.htm#ga93c5e6c239ea037dc42a368b9a875244":[4,0,73,5],
"token_8h.htm#gaaea9227bf2ad0aba25b9eb01199c7fde":[4,0,73,6],
"token_8h.htm#gab7b75375d668ed0a061a3351e879a87e":[4,0,73,3],
"token_8h_source.htm":[4,0,73],
"trust-center_8h.htm":[4,0,75],
"trust-center_8h.htm#ga67f4fbc374dce84651af2c8c4d57a318":[4,0,75,0],
"trust-center_8h.htm#ga7965584d8894b1d609b6a688477baac1":[4,0,75,2],
"trust-center_8h.htm#gab5bb6cbd9eb86489736c265587283f10":[4,0,75,8],
"trust-center_8h.htm#gabc629fb2a3166dc1a75efec28c205f58":[4,0,75,3],
"trust-center_8h.htm#gac0c783a271b94329c30257e9b7bc60db":[4,0,75,9],
"trust-center_8h.htm#gad7df687b4305b8c264e13c6e48cadd69":[4,0,75,4],
"trust-center_8h.htm#gadaa9bc1ff2c7a6c6cedecb8b510ac058":[4,0,75,6],
"trust-center_8h.htm#gae8a8fea570328903562eb6cb45336ed7":[4,0,75,1],
"trust-center_8h.htm#gaea7b346ab4db5443316930328033285b":[4,0,75,5],
"trust-center_8h.htm#gaf74958c082b2b0324f2ba340c3af70df":[4,0,75,7],
"trust-center_8h_source.htm":[4,0,75],
"zigbee-device-common_8h.htm":[4,0,76],
"zigbee-device-common_8h.htm#ga19bfc6ee375a8a6b486cd28778fc2cb7":[4,0,76,10],
"zigbee-device-common_8h.htm#ga1b7070cabb35f428a4c381f82d14fa78":[4,0,76,15],
"zigbee-device-common_8h.htm#ga2e6a82dcf3c14945afc33300de676da7":[4,0,76,3],
"zigbee-device-common_8h.htm#ga3b2a0c15e9ac9c0c52e6ad8cf8ccd53b":[4,0,76,11],
"zigbee-device-common_8h.htm#ga59071ae3f0a8ad821cf367f482a40fa7":[4,0,76,4],
"zigbee-device-common_8h.htm#ga624f6f3c22a49ea82c3f871075cc453c":[4,0,76,7],
"zigbee-device-common_8h.htm#ga6856235a7e67c51062355eaafd19a65f":[4,0,76,5],
"zigbee-device-common_8h.htm#ga737bb3438c41f544593108abc1bd3450":[4,0,76,9],
"zigbee-device-common_8h.htm#ga7ba073238c67cc8af110065476237acc":[4,0,76,2],
"zigbee-device-common_8h.htm#ga88596cdf711341024c7a3a79fea3835b":[4,0,76,8],
"zigbee-device-common_8h.htm#gaa694c0ef35b71ae53f5467ddc147ceeb":[4,0,76,6],
"zigbee-device-common_8h.htm#gad0a0afbc86dbd58e2c0b5af9a98fab76":[4,0,76,1],
"zigbee-device-common_8h.htm#gae18ceee05ffa62dd1c68d4590eda41db":[4,0,76,12],
"zigbee-device-common_8h.htm#gae8dd77cbdab2e3d5ae35b120f844d888":[4,0,76,13],
"zigbee-device-common_8h.htm#gaf4fcdc0f3da658af9167bd512d7465e2":[4,0,76,14], | "zigbee-device-common_8h.htm#gaf8e641f05f5b8359571fa677ccb8c4b3":[4,0,76,0],
"zigbee-device-common_8h_source.htm":[4,0,76],
"zigbee-device-host_8h.htm":[4,0,77], | random_line_split | |
index.ts | utOptions: InputOptions, plugin: Plugin) {
if (plugin.options) return plugin.options(inputOptions) || inputOptions;
return inputOptions;
}
function getInputOptions(rawInputOptions: GenericConfigObject): any {
if (!rawInputOptions) {
throw new Error('You must supply an options object to rollup');
}
// inputOptions: input 从命令行或配置文件与默认配置合并
// deprecations: 过时的参数列表,过时的参数,仍然会写入正确的地方
// optionError: 错误信息
let { inputOptions, deprecations, optionError } = mergeOptions({
config: rawInputOptions,
deprecateConfig: { input: true }
});
// 如果存在错误信息,直接输出到终端
if (optionError) inputOptions.onwarn({ message: optionError, code: 'UNKNOWN_OPTION' });
// 如果存在过时参数,直接输出到终端
if (deprecations.length) addDeprecations(deprecations, inputOptions.onwarn);
// 检查是否存在一些 属性 如 transform 应放到插件里。(应该不会出现这种情况,因为inputOptions不可能会有)
checkInputOptions(inputOptions);
// 整理插件列表,过滤掉null,undefined等无效的插件,并确保是数组
const plugins = inputOptions.plugins;
inputOptions.plugins = Array.isArray(plugins)
? plugins.filter(Boolean)
: plugins
? [plugins]
: [];
// 依次调用每个插件的options方法
inputOptions = inputOptions.plugins.reduce(applyOptionHook, inputOptions);
// 实验代码分割,
if (!inputOptions.experimentalCodeSplitting) {
inputOptions.inlineDynamicImports = true; // 内联动态导入
if (inputOptions.manualChunks)
error({
code: 'INVALID_OPTION',
message: '"manualChunks" option is only supported for experimentalCodeSplitting.'
});
if (inputOptions.optimizeChunks)
error({
code: 'INVALID_OPTION',
message: '"optimizeChunks" option is only supported for experimentalCodeSplitting.'
});
if (inputOptions.input instanceof Array || typeof inputOptions.input === 'object')
error({
code: 'INVALID_OPTION',
message: 'Multiple inputs are only supported for experimentalCodeSplitting.'
});
}
// 内联动态导入
if (inputOptions.inlineDynamicImports) {
if (inputOptions.manualChunks)
error({
code: 'INVALID_OPTION',
message: '"manualChunks" option is not supported for inlineDynamicImports.'
});
if (inputOptions.optimizeChunks)
error({
code: 'INVALID_OPTION',
message: '"optimizeChunks" option is not supported for inlineDynamicImports.'
});
if (inputOptions.input instanceof Array || typeof inputOptions.input === 'object')
error({
code: 'INVALID_OPTION',
message: 'Multiple inputs are not supported for inlineDynamicImports.'
});
} else if (inputOptions.experimentalPreserveModules) {
// 实验保存模块
if (inputOptions.inlineDynamicImports)
error({
code: 'INVALID_OPTION',
message: `experimentalPreserveModules does not support the inlineDynamicImports option.`
});
if (inputOptions.manualChunks)
error({
code: 'INVALID_OPTION',
message: 'experimentalPreserveModules does not support the manualChunks option.'
});
if (inputOptions.optimizeChunks)
error({
code: 'INVALID_OPTION',
message: 'experimentalPreserveModules does not support the optimizeChunks option.'
});
}
return inputOptions;
}
let curWatcher: Watcher;
export function setWatcher(watcher: Watcher) {
curWatcher = watcher;
}
export default function rollup(
rawInputOptions: GenericConfigObject
): Promise<RollupSingleFileBuild | RollupBuild> {
try {
// 从命令行,配置文件,默认配置中获取配置信息,并调用每个插件的options方法
const inputOptions = getInputOptions(rawInputOptions);
// 当perf为true时,给插件的指定方法注入打印开始时间,结束时间
initialiseTimers(inputOptions);
const graph = new Graph(inputOptions, curWatcher);
curWatcher = undefined;
// remove the cache option from the memory after graph creation (cache is not used anymore)
const useCache = rawInputOptions.cache !== false;
delete inputOptions.cache;
delete rawInputOptions.cache;
timeStart('BUILD', 1);
return graph.pluginDriver
.hookParallel('buildStart')
.then(() =>
graph.build(
inputOptions.input,
inputOptions.manualChunks,
inputOptions.inlineDynamicImports,
inputOptions.experimentalPreserveModules
)
)
.then(
chunks =>
graph.pluginDriver.hookParallel('buildEnd').then(() => {
return chunks;
}),
err =>
graph.pluginDriver.hookParallel('buildEnd', [err]).then(() => {
throw err;
})
)
.then(chunks => {
timeEnd('BUILD', 1);
// TODO: deprecate legacy single chunk return
let singleChunk: Chunk | void;
const singleInput =
typeof inputOptions.input === 'string' ||
(inputOptions.input instanceof Array && inputOptions.input.length === 1);
//let imports: string[], exports: string[];
if (!inputOptions.experimentalPreserveModules) {
if (singleInput) {
for (const chunk of chunks) {
if (chunk.entryModule === undefined) continue;
if (singleChunk) {
singleChunk = undefined;
break;
}
singleChunk = chunk;
}
}
}
// ensure we only do one optimization pass per build
let optimized = false;
function generate(rawOutputOptions: GenericConfigObject, isWrite: boolean) {
const outputOptions = normalizeOutputOptions(inputOptions, rawOutputOptions);
if (inputOptions.experimentalCodeSplitting) {
if (typeof outputOptions.file === 'string' && typeof outputOptions.dir === 'string')
error({
code: 'INVALID_OPTION',
message:
'Build must set either output.file for a single-file build or output.dir when generating multiple chunks.'
});
if (chunks.length > 1) {
if (outputOptions.format === 'umd' || outputOptions.format === 'iife')
error({
code: 'INVALID_OPTION',
message:
'UMD and IIFE output formats are not supported with the experimentalCodeSplitting option.'
});
if (outputOptions.sourcemapFile)
error({
code: 'INVALID_OPTION',
message: '"sourcemapFile" is only supported for single-file builds.'
});
}
if (!singleChunk && typeof outputOptions.file === 'string')
error({
code: 'INVALID_OPTION',
message: singleInput
? 'When building a bundle using dynamic imports, the output.dir option must be used, not output.file. Alternatively set inlineDynamicImports: true to output a single file.'
: 'When building multiple entry point inputs, the output.dir option must be used, not output.file.'
});
}
if (!outputOptions.file && inputOptions.experimentalCodeSplitting)
singleChunk = undefined;
timeStart('GENERATE', 1);
// populate asset files into output
const assetFileNames = outputOptions.assetFileNames || 'assets/[name]-[hash][extname]';
const outputBundle: OutputBundle = graph.finaliseAssets(assetFileNames);
const inputBase = commondir(
chunks
.filter(chunk => chunk.entryModule && isAbsolute(chunk.entryModule.id))
.map(chunk => chunk.entryModule.id)
);
return graph.pluginDriver
.hookParallel('renderStart')
.then(() => createAddons(graph, outputOptions))
.then(addons => {
// pre-render all chunks
for (const chunk of chunks) {
if (!inputOptions.experimentalPreserveModules)
chunk.generateInternalExports(outputOptions);
if (chunk.isEntryModuleFacade)
chunk.exportMode = getExportMode(chunk, outputOptions);
}
for (const chunk of chunks) {
chunk.preRender(outputOptions, inputBase);
}
if (!optimized && inputOptions.optimizeChunks) {
optimizeChunks(chunks, outputOptions, inputOptions.chunkGroupingSize, inputBase);
optimized = true;
}
// name all chunks
const usedIds: Record<string, true> = {};
for (let i = 0; i < chunks.length; i++) {
const chunk = chunks[i];
if (chunk === singleChunk) {
singleChunk.id = basename(
outputOptions.file ||
(inputOptions.input instanceof Array
? inputOptions.input[0]
: <string>inputOptions.input)
);
} else if (inputOptions.experimentalPreserveModules) {
chunk.generateIdPreserveModules(inputBase, usedIds);
} else {
let pattern, patternName;
if (chunk.isEntryModuleFacade) {
pattern = outputOptions.entryFileNames || '[name].js';
patternName = 'output.entryFileNames';
} else {
pattern = outputOptions.chunkFileNames || | yOptionHook(inp | identifier_name | |
index.ts | (!rawInputOptions) {
throw new Error('You must supply an options object to rollup');
}
// inputOptions: input 从命令行或配置文件与默认配置合并
// deprecations: 过时的参数列表,过时的参数,仍然会写入正确的地方
// optionError: 错误信息
let { inputOptions, deprecations, optionError } = mergeOptions({
config: rawInputOptions,
deprecateConfig: { input: true }
});
// 如果存在错误信息,直接输出到终端
if (optionError) inputOptions.onwarn({ message: optionError, code: 'UNKNOWN_OPTION' });
// 如果存在过时参数,直接输出到终端
if (deprecations.length) addDeprecations(deprecations, inputOptions.onwarn);
// 检查是否存在一些 属性 如 transform 应放到插件里。(应该不会出现这种情况,因为inputOptions不可能会有)
checkInputOptions(inputOptions);
// 整理插件列表,过滤掉null,undefined等无效的插件,并确保是数组
const plugins = inputOptions.plugins;
inputOptions.plugins = Array.isArray(plugins)
? plugins.filter(Boolean)
: plugins
? [plugins]
: [];
// 依次调用每个插件的options方法
inputOptions = inputOptions.plugins.reduce(applyOptionHook, inputOptions);
// 实验代码分割,
if (!inputOptions.experimentalCodeSplitting) {
inputOptions.inlineDynamicImports = true; // 内联动态导入
if (inputOptions.manualChunks)
error({
code: 'INVALID_OPTION',
message: '"manualChunks" option is only supported for experimentalCodeSplitting.'
});
if (inputOptions.optimizeChunks)
error({
code: 'INVALID_OPTION',
message: '"optimizeChunks" option is only supported for experimentalCodeSplitting.'
});
if (inputOptions.input instanceof Array || typeof inputOptions.input === 'object')
error({
code: 'INVALID_OPTION',
message: 'Multiple inputs are only supported for experimentalCodeSplitting.'
});
}
// 内联动态导入
if (inputOptions.inlineDynamicImports) {
if (inputOptions.manualChunks)
error({
code: 'INVALID_OPTION',
message: '"manualChunks" option is not supported for inlineDynamicImports.'
});
if (inputOptions.optimizeChunks)
error({
code: 'INVALID_OPTION',
message: '"optimizeChunks" option is not supported for inlineDynamicImports.'
});
if (inputOptions.input instanceof Array || typeof inputOptions.input === 'object')
error({
code: 'INVALID_OPTION',
message: 'Multiple inputs are not supported for inlineDynamicImports.'
});
} else if (inputOptions.experimentalPreserveModules) {
// 实验保存模块
if (inputOptions.inlineDynamicImports)
error({
code: 'INVALID_OPTION',
message: `experimentalPreserveModules does not support the inlineDynamicImports option.`
});
if (inputOptions.manualChunks)
error({
code: 'INVALID_OPTION',
message: 'experimentalPreserveModules does not support the manualChunks option.'
});
if (inputOptions.optimizeChunks)
error({
code: 'INVALID_OPTION',
message: 'experimentalPreserveModules does not support the optimizeChunks option.'
});
}
return inputOptions;
}
let curWatcher: Watcher;
export function setWatcher(watcher: Watcher) {
curWatcher = watcher;
}
export default function rollup(
rawInputOptions: GenericConfigObject
): Promise<RollupSingleFileBuild | RollupBuild> {
try {
// 从命令行,配置文件,默认配置中获取配置信息,并调用每个插件的options方法
const inputOptions = getInputOptions(rawInputOptions);
// 当perf为true时,给插件的指定方法注入打印开始时间,结束时间
initialiseTimers(inputOptions);
const graph = new Graph(inputOptions, curWatcher);
curWatcher = undefined;
// remove the cache option from the memory after graph creation (cache is not used anymore)
const useCache = rawInputOptions.cache !== false;
delete inputOptions.cache;
delete rawInputOptions.cache;
timeStart('BUILD', 1);
return graph.pluginDriver
.hookParallel('buildStart')
.then(() =>
graph.build(
inputOptions.input,
inputOptions.manualChunks,
inputOptions.inlineDynamicImports,
inputOptions.experimentalPreserveModules
)
)
.then(
chunks =>
graph.pluginDriver.hookParallel('buildEnd').then(() => {
return chunks;
}),
err =>
graph.pluginDriver.hookParallel('buildEnd', [err]).then(() => {
throw err;
})
)
.then(chunks => {
timeEnd('BUILD', 1);
// TODO: deprecate legacy single chunk return
let singleChunk: Chunk | void;
const singleInput =
typeof inputOptions.input === 'string' ||
(inputOptions.input instanceof Array && inputOptions.input.length === 1);
//let imports: string[], exports: string[];
if (!inputOptions.experimentalPreserveModules) {
if (singleInput) {
for (const chunk of chunks) {
if (chunk.entryModule === undefined) continue;
if (singleChunk) {
singleChunk = undefined;
break;
}
singleChunk = chunk;
}
}
}
// ensure we only do one optimization pass per build
let optimized = false;
function generate(rawOutputOptions: GenericConfigObject, isWrite: boolean) {
const outputOptions = normalizeOutputOptions(inputOptions, rawOutputOptions);
if (inputOptions.experimentalCodeSplitting) {
if (typeof outputOptions.file === 'string' && typeof outputOptions.dir === 'string')
error({
code: 'INVALID_OPTION',
message:
'Build must set either output.file for a single-file build or output.dir when generating multiple chunks.'
});
if (chunks.length > 1) {
if (outputOptions.format === 'umd' || outputOptions.format === 'iife')
error({
code: 'INVALID_OPTION',
message:
'UMD and IIFE output formats are not supported with the experimentalCodeSplitting option.'
});
if (outputOptions.sourcemapFile)
error({
code: 'INVALID_OPTION',
message: '"sourcemapFile" is only supported for single-file builds.'
});
}
if (!singleChunk && typeof outputOptions.file === 'string')
error({
code: 'INVALID_OPTION',
message: singleInput
? 'When building a bundle using dynamic imports, the output.dir option must be used, not output.file. Alternatively set inlineDynamicImports: true to output a single file.'
: 'When building multiple entry point inputs, the output.dir option must be used, not output.file.'
});
}
if (!outputOptions.file && inputOptions.experimentalCodeSplitting)
singleChunk = undefined;
timeStart('GENERATE', 1);
// populate asset files into output
const assetFileNames = outputOptions.assetFileNames || 'assets/[name]-[hash][extname]';
const outputBundle: OutputBundle = graph.finaliseAssets(assetFileNames);
const inputBase = commondir(
chunks
.filter(chunk => chunk.entryModule && isAbsolute(chunk.entryModule.id))
.map(chunk => chunk.entryModule.id)
);
return graph.pluginDriver
.hookParallel('renderStart')
.then(() => createAddons(graph, outputOptions)) | .then(addons => {
// pre-render all chunks
for (const chunk of chunks) {
if (!inputOptions.experimentalPreserveModules)
chunk.generateInternalExports(outputOptions);
if (chunk.isEntryModuleFacade)
chunk.exportMode = getExportMode(chunk, outputOptions);
}
for (const chunk of chunks) {
chunk.preRender(outputOptions, inputBase);
}
if (!optimized && inputOptions.optimizeChunks) {
optimizeChunks(chunks, outputOptions, inputOptions.chunkGroupingSize, inputBase);
optimized = true;
}
// name all chunks
const usedIds: Record<string, true> = {};
for (let i = 0; i < chunks.length; i++) {
const chunk = chunks[i];
if (chunk === singleChunk) {
singleChunk.id = basename(
outputOptions.file ||
(inputOptions.input instanceof Array
? inputOptions.input[0]
: <string>inputOptions.input)
);
} else if (inputOptions.experimentalPreserveModules) {
chunk.generateIdPreserveModules(inputBase, usedIds);
} else {
let pattern, patternName;
if (chunk.isEntryModuleFacade) {
pattern = outputOptions.entryFileNames || '[name].js';
patternName = 'output.entryFileNames';
} else {
pattern = outputOptions.chunkFileNames || '[name]-[hash].js';
patternName = 'output.chunkFileNames';
}
chunk.generateId(pattern, patternName, addons, outputOptions, usedIds);
}
usedIds[chunk | random_line_split | |
index.ts | function checkInputOptions(options: InputOptions) {
if (options.transform || options.load || options.resolveId || options.resolveExternal) {
throw new Error(
'The `transform`, `load`, `resolveId` and `resolveExternal` options are deprecated in favour of a unified plugin API. See https://rollupjs.org/guide/en#plugins'
);
}
}
function checkOutputOptions(options: OutputOptions) {
if (<string>options.format === 'es6') {
error({
message: 'The `es6` output format is deprecated – use `es` instead',
url: `https://rollupjs.org/guide/en#output-format-f-format`
});
}
if (!options.format) {
error({
message: `You must specify output.format, which can be one of 'amd', 'cjs', 'system', 'esm', 'iife' or 'umd'`,
url: `https://rollupjs.org/guide/en#output-format-f-format`
});
}
if (options.moduleId) {
if (options.amd) throw new Error('Cannot have both output.amd and output.moduleId');
}
}
const throwAsyncGenerateError = {
get() {
throw new Error(`bundle.generate(...) now returns a Promise instead of a { code, map } object`);
}
};
function applyOptionHook(inputOptions: InputOptions, plugin: Plugin) {
if (plugin.options) return plugin.options(inputOptions) || inputOptions;
return inputOptions;
}
function getInputOptions(rawInputOptions: GenericConfigObject): any {
if (!rawInputOptions) {
throw new Error('You must supply an options object to rollup');
}
// inputOptions: input 从命令行或配置文件与默认配置合并
// deprecations: 过时的参数列表,过时的参数,仍然会写入正确的地方
// optionError: 错误信息
let { inputOptions, deprecations, optionError } = mergeOptions({
config: rawInputOptions,
deprecateConfig: { input: true }
});
// 如果存在错误信息,直接输出到终端
if (optionError) inputOptions.onwarn({ message: optionError, code: 'UNKNOWN_OPTION' });
// 如果存在过时参数,直接输出到终端
if (deprecations.length) addDeprecations(deprecations, inputOptions.onwarn);
// 检查是否存在一些 属性 如 transform 应放到插件里。(应该不会出现这种情况,因为inputOptions不可能会有)
checkInputOptions(inputOptions);
// 整理插件列表,过滤掉null,undefined等无效的插件,并确保是数组
const plugins = inputOptions.plugins;
inputOptions.plugins = Array.isArray(plugins)
? plugins.filter(Boolean)
: plugins
? [plugins]
: [];
// 依次调用每个插件的options方法
inputOptions = inputOptions.plugins.reduce(applyOptionHook, inputOptions);
// 实验代码分割,
if (!inputOptions.experimentalCodeSplitting) {
inputOptions.inlineDynamicImports = true; // 内联动态导入
if (inputOptions.manualChunks)
error({
code: 'INVALID_OPTION',
message: '"manualChunks" option is only supported for experimentalCodeSplitting.'
});
if (inputOptions.optimizeChunks)
error({
code: 'INVALID_OPTION',
message: '"optimizeChunks" option is only supported for experimentalCodeSplitting.'
});
if (inputOptions.input instanceof Array || typeof inputOptions.input === 'object')
error({
code: 'INVALID_OPTION',
message: 'Multiple inputs are only supported for experimentalCodeSplitting.'
});
}
// 内联动态导入
if (inputOptions.inlineDynamicImports) {
if (inputOptions.manualChunks)
error({
code: 'INVALID_OPTION',
message: '"manualChunks" option is not supported for inlineDynamicImports.'
});
if (inputOptions.optimizeChunks)
error({
code: 'INVALID_OPTION',
message: '"optimizeChunks" option is not supported for inlineDynamicImports.'
});
if (inputOptions.input instanceof Array || typeof inputOptions.input === 'object')
error({
code: 'INVALID_OPTION',
message: 'Multiple inputs are not supported for inlineDynamicImports.'
});
} else if (inputOptions.experimentalPreserveModules) {
// 实验保存模块
if (inputOptions.inlineDynamicImports)
error({
code: 'INVALID_OPTION',
message: `experimentalPreserveModules does not support the inlineDynamicImports option.`
});
if (inputOptions.manualChunks)
error({
code: 'INVALID_OPTION',
message: 'experimentalPreserveModules does not support the manualChunks option.'
});
if (inputOptions.optimizeChunks)
error({
code: 'INVALID_OPTION',
message: 'experimentalPreserveModules does not support the optimizeChunks option.'
});
}
return inputOptions;
}
let curWatcher: Watcher;
export function setWatcher(watcher: Watcher) {
curWatcher = watcher;
}
export default function rollup(
rawInputOptions: GenericConfigObject
): Promise<RollupSingleFileBuild | RollupBuild> {
try {
// 从命令行,配置文件,默认配置中获取配置信息,并调用每个插件的options方法
const inputOptions = getInputOptions(rawInputOptions);
// 当perf为true时,给插件的指定方法注入打印开始时间,结束时间
initialiseTimers(inputOptions);
const graph = new Graph(inputOptions, curWatcher);
curWatcher = undefined;
// remove the cache option from the memory after graph creation (cache is not used anymore)
const useCache = rawInputOptions.cache !== false;
delete inputOptions.cache;
delete rawInputOptions.cache;
timeStart('BUILD', 1);
return graph.pluginDriver
.hookParallel('buildStart')
.then(() =>
graph.build(
inputOptions.input,
inputOptions.manualChunks,
inputOptions.inlineDynamicImports,
inputOptions.experimentalPreserveModules
)
)
.then(
chunks =>
graph.pluginDriver.hookParallel('buildEnd').then(() => {
return chunks;
}),
err =>
graph.pluginDriver.hookParallel('buildEnd', [err]).then(() => {
throw err;
})
)
.then(chunks => {
timeEnd('BUILD', 1);
// TODO: deprecate legacy single chunk return
let singleChunk: Chunk | void;
const singleInput =
typeof inputOptions.input === 'string' ||
(inputOptions.input instanceof Array && inputOptions.input.length === 1);
//let imports: string[], exports: string[];
if (!inputOptions.experimentalPreserveModules) {
if (singleInput) {
for (const chunk of chunks) {
if (chunk.entryModule === undefined) continue;
if (singleChunk) {
singleChunk = undefined;
break;
}
singleChunk = chunk;
}
}
}
// ensure we only do one optimization pass per build
let optimized = false;
function generate(rawOutputOptions: GenericConfigObject, isWrite: boolean) {
const outputOptions = normalizeOutputOptions(inputOptions, rawOutputOptions);
if (inputOptions.experimentalCodeSplitting) {
if (typeof outputOptions.file === 'string' && typeof outputOptions.dir === 'string')
error({
code: 'INVALID_OPTION',
message:
'Build must set either output.file for a single-file build or output.dir when generating multiple chunks.'
});
if (chunks.length > 1) {
if (outputOptions.format === 'umd' || outputOptions.format === 'iife')
error({
code: 'INVALID_OPTION',
message:
'UMD and IIFE output formats are not supported with the experimentalCodeSplitting option.'
});
if (outputOptions.sourcemapFile)
error({
code: 'INVALID_OPTION',
message: '"sourcemapFile" is only supported for single-file builds.'
});
}
if (!singleChunk && typeof outputOptions.file === 'string')
error({
code: 'INVALID_OPTION',
message: singleInput
? 'When building a bundle using dynamic imports, the output.dir option must be used, not output.file. Alternatively set inlineDynamicImports: true to output a single file.'
: 'When building multiple entry point inputs, the output.dir option must be used, not output.file.'
});
}
if (!outputOptions.file && inputOptions.experimentalCodeSplitting)
singleChunk = undefined;
timeStart('GENERATE', 1);
// populate asset files into output
const assetFileNames = outputOptions.assetFileNames || 'assets/[name]-[hash][extname]';
const outputBundle: OutputBundle = graph.finaliseAssets(assetFileNames);
const inputBase = commondir(
chunks
.filter(chunk => chunk.entryModule && isAbsolute(chunk.entryModule.id))
.map(chunk => chunk.entry | {
const message = `The following options have been renamed — please update your config: ${deprecations
.map(option => `${option.old} -> ${option.new}`)
.join(', ')}`;
warn({
code: 'DEPRECATED_OPTIONS',
message,
deprecations
});
}
| identifier_body | |
index.ts |
function checkOutputOptions(options: OutputOptions) {
if (<string>options.format === 'es6') {
error({
message: 'The `es6` output format is deprecated – use `es` instead',
url: `https://rollupjs.org/guide/en#output-format-f-format`
});
}
if (!options.format) {
error({
message: `You must specify output.format, which can be one of 'amd', 'cjs', 'system', 'esm', 'iife' or 'umd'`,
url: `https://rollupjs.org/guide/en#output-format-f-format`
});
}
if (options.moduleId) {
if (options.amd) throw new Error('Cannot have both output.amd and output.moduleId');
}
}
const throwAsyncGenerateError = {
get() {
throw new Error(`bundle.generate(...) now returns a Promise instead of a { code, map } object`);
}
};
function applyOptionHook(inputOptions: InputOptions, plugin: Plugin) {
if (plugin.options) return plugin.options(inputOptions) || inputOptions;
return inputOptions;
}
function getInputOptions(rawInputOptions: GenericConfigObject): any {
if (!rawInputOptions) {
throw new Error('You must supply an options object to rollup');
}
// inputOptions: input 从命令行或配置文件与默认配置合并
// deprecations: 过时的参数列表,过时的参数,仍然会写入正确的地方
// optionError: 错误信息
let { inputOptions, deprecations, optionError } = mergeOptions({
config: rawInputOptions,
deprecateConfig: { input: true }
});
// 如果存在错误信息,直接输出到终端
if (optionError) inputOptions.onwarn({ message: optionError, code: 'UNKNOWN_OPTION' });
// 如果存在过时参数,直接输出到终端
if (deprecations.length) addDeprecations(deprecations, inputOptions.onwarn);
// 检查是否存在一些 属性 如 transform 应放到插件里。(应该不会出现这种情况,因为inputOptions不可能会有)
checkInputOptions(inputOptions);
// 整理插件列表,过滤掉null,undefined等无效的插件,并确保是数组
const plugins = inputOptions.plugins;
inputOptions.plugins = Array.isArray(plugins)
? plugins.filter(Boolean)
: plugins
? [plugins]
: [];
// 依次调用每个插件的options方法
inputOptions = inputOptions.plugins.reduce(applyOptionHook, inputOptions);
// 实验代码分割,
if (!inputOptions.experimentalCodeSplitting) {
inputOptions.inlineDynamicImports = true; // 内联动态导入
if (inputOptions.manualChunks)
error({
code: 'INVALID_OPTION',
message: '"manualChunks" option is only supported for experimentalCodeSplitting.'
});
if (inputOptions.optimizeChunks)
error({
code: 'INVALID_OPTION',
message: '"optimizeChunks" option is only supported for experimentalCodeSplitting.'
});
if (inputOptions.input instanceof Array || typeof inputOptions.input === 'object')
error({
code: 'INVALID_OPTION',
message: 'Multiple inputs are only supported for experimentalCodeSplitting.'
});
}
// 内联动态导入
if (inputOptions.inlineDynamicImports) {
if (inputOptions.manualChunks)
error({
code: 'INVALID_OPTION',
message: '"manualChunks" option is not supported for inlineDynamicImports.'
});
if (inputOptions.optimizeChunks)
error({
code: 'INVALID_OPTION',
message: '"optimizeChunks" option is not supported for inlineDynamicImports.'
});
if (inputOptions.input instanceof Array || typeof inputOptions.input === 'object')
error({
code: 'INVALID_OPTION',
message: 'Multiple inputs are not supported for inlineDynamicImports.'
});
} else if (inputOptions.experimentalPreserveModules) {
// 实验保存模块
if (inputOptions.inlineDynamicImports)
error({
code: 'INVALID_OPTION',
message: `experimentalPreserveModules does not support the inlineDynamicImports option.`
});
if (inputOptions.manualChunks)
error({
code: 'INVALID_OPTION',
message: 'experimentalPreserveModules does not support the manualChunks option.'
});
if (inputOptions.optimizeChunks)
error({
code: 'INVALID_OPTION',
message: 'experimentalPreserveModules does not support the optimizeChunks option.'
});
}
return inputOptions;
}
let curWatcher: Watcher;
export function setWatcher(watcher: Watcher) {
curWatcher = watcher;
}
export default function rollup(
rawInputOptions: GenericConfigObject
): Promise<RollupSingleFileBuild | RollupBuild> {
try {
// 从命令行,配置文件,默认配置中获取配置信息,并调用每个插件的options方法
const inputOptions = getInputOptions(rawInputOptions);
// 当perf为true时,给插件的指定方法注入打印开始时间,结束时间
initialiseTimers(inputOptions);
const graph = new Graph(inputOptions, curWatcher);
curWatcher = undefined;
// remove the cache option from the memory after graph creation (cache is not used anymore)
const useCache = rawInputOptions.cache !== false;
delete inputOptions.cache;
delete rawInputOptions.cache;
timeStart('BUILD', 1);
return graph.pluginDriver
.hookParallel('buildStart')
.then(() =>
graph.build(
inputOptions.input,
inputOptions.manualChunks,
inputOptions.inlineDynamicImports,
inputOptions.experimentalPreserveModules
)
)
.then(
chunks =>
graph.pluginDriver.hookParallel('buildEnd').then(() => {
return chunks;
}),
err =>
graph.pluginDriver.hookParallel('buildEnd', [err]).then(() => {
throw err;
})
)
.then(chunks => {
timeEnd('BUILD', 1);
// TODO: deprecate legacy single chunk return
let singleChunk: Chunk | void;
const singleInput =
typeof inputOptions.input === 'string' ||
(inputOptions.input instanceof Array && inputOptions.input.length === 1);
//let imports: string[], exports: string[];
if (!inputOptions.experimentalPreserveModules) {
if (singleInput) {
for (const chunk of chunks) {
if (chunk.entryModule === undefined) continue;
if (singleChunk) {
singleChunk = undefined;
break;
}
singleChunk = chunk;
}
}
}
// ensure we only do one optimization pass per build
let optimized = false;
function generate(rawOutputOptions: GenericConfigObject, isWrite: boolean) {
const outputOptions = normalizeOutputOptions(inputOptions, rawOutputOptions);
if (inputOptions.experimentalCodeSplitting) {
if (typeof outputOptions.file === 'string' && typeof outputOptions.dir === 'string')
error({
code: 'INVALID_OPTION',
message:
'Build must set either output.file for a single-file build or output.dir when generating multiple chunks.'
});
if (chunks.length > 1) {
if (outputOptions.format === 'umd' || outputOptions.format === 'iife')
error({
code: 'INVALID_OPTION',
message:
'UMD and IIFE output formats are not supported with the experimentalCodeSplitting option.'
});
if (outputOptions.sourcemapFile)
error({
code: 'INVALID_OPTION',
message: '"sourcemapFile" is only supported for single-file builds.'
});
}
if (!singleChunk && typeof outputOptions.file === 'string')
error({
code: 'INVALID_OPTION',
message: singleInput
? 'When building a bundle using dynamic imports, the output.dir option must be used, not output.file. Alternatively set inlineDynamicImports: true to output a single file.'
: 'When building multiple entry point inputs, the output.dir option must be used, not output.file.'
});
}
if (!outputOptions.file && inputOptions.experimentalCodeSplitting)
singleChunk = undefined;
timeStart('GENERATE', 1);
// populate asset files into output
const assetFileNames = outputOptions.assetFileNames || 'assets/[name]-[hash][extname]';
const outputBundle: OutputBundle = graph.finaliseAssets(assetFileNames);
const inputBase = commondir(
chunks
.filter(chunk => chunk.entryModule && isAbsolute(chunk.entryModule.id))
.map(chunk => chunk.entryModule.id)
);
return graph.pluginDriver
.hookParallel('renderStart')
.then(() => createAddons(graph, outputOptions))
.then(addons => {
// pre-render all chunks
for (const chunk of chunks) {
if (!inputOptions.experimentalPreserveModules)
chunk.generateInternalExports(outputOptions);
if (chunk.isEntryModuleFacade)
chunk.export | throw new Error(
'The `transform`, `load`, `resolveId` and `resolveExternal` options are deprecated in favour of a unified plugin API. See https://rollupjs.org/guide/en#plugins'
);
}
} | conditional_block | |
model_probabilistic.py | ization(
self.MLP_SIZE,
activation = mlp_activation,
)
layer_0_act = self.layer_0(self.x_ph)
layer_0_out = tf.layers.dropout(layer_0_act, rate = self.DROP, training = self.is_training)
self.layer_1 = tfp.layers.DenseLocalReparameterization(
self.MLP_SIZE,
activation = mlp_activation,
)
layer_1_act = self.layer_1(layer_0_out)
layer_1_out = tf.layers.dropout(layer_1_act, rate = self.DROP, training = self.is_training)
self.layer_2 = tfp.layers.DenseLocalReparameterization(
self.MLP_SIZE,
activation = mlp_activation,
)
layer_2_act = self.layer_2(layer_1_out)
layer_2_out = layer_2_act
self.layer_3 = tfp.layers.DenseLocalReparameterization(
self.targets_shape[1],
activation = out_activation,
)
layer_3_out = self.layer_3(layer_2_out)
self.net_out = layer_3_out
self.scales = tf.nn.softplus(tf.Variable(tf.zeros(1)))
self.y_pred = tf_dist.Normal(self.net_out, scale = self.scales)
def construct_inference(self):
self.is_graph_constructed = True
with self.graph.as_default():
self.kl = sum(self.layer_0.losses) / float(self.batch_size)
self.kl += sum(self.layer_1.losses) / float(self.batch_size)
self.kl += sum(self.layer_2.losses) / float(self.batch_size)
self.kl += sum(self.layer_3.losses) / float(self.batch_size)
self.reg_loss = - tf.reduce_mean( self.y_pred.log_prob(self.y_ph) )
self.loss = self.reg_loss + self.REG * self.kl
self.optimizer = tf.compat.v1.train.AdamOptimizer(self.LEARNING_RATE)
self.train_op = self.optimizer.minimize(self.loss)
self.init_op = tf.group(tf.compat.v1.global_variables_initializer(), tf.compat.v1.local_variables_initializer())
self.sess = tf.compat.v1.Session(graph = self.graph)
with self.sess.as_default():
self.sess.run(self.init_op)
def train(self, train_features, train_targets, valid_features, valid_targets, model_path, plot = False, targets = 'same'):
from sklearn.metrics import r2_score
if not os.path.isdir(model_path): os.mkdir(model_path)
logfile = open('%s/logfile.dat' % model_path, 'w')
logfile.close()
if not self.is_graph_constructed: self.construct_inference()
train_feat_scaled = self.get_scaled_features(train_features)
train_targ_scaled = self.get_scaled_targets(train_targets)
valid_feat_scaled = self.get_scaled_features(valid_features)
valid_targ_scaled = self.get_scaled_targets(valid_targets)
min_target, max_target = np.minimum(np.amin(train_targets, axis = 0), np.amin(valid_targets, axis = 0)), np.maximum(np.amax(train_targets, axis = 0), np.amax(valid_targets, axis = 0))
if targets == 'probs':
min_target = 1. / (1. + np.exp( - min_target))
max_target = 1. / (1. + np.exp( - max_target))
batch_train_gen = self._generator(train_feat_scaled, train_targ_scaled, self.batch_size)
batch_valid_gen = self._generator(valid_feat_scaled, valid_targ_scaled, self.batch_size)
train_errors, valid_errors = [], []
with self.graph.as_default():
with self.sess.as_default():
self.saver = tf.compat.v1.train.Saver()
if plot:
import matplotlib.pyplot as plt
import seaborn as sns
colors = sns.color_palette('RdYlGn', 4)
plt.ion()
plt.style.use('dark_background')
fig = plt.figure(figsize = (14, 5))
ax0 = plt.subplot2grid((1, 3), (0, 0))
ax1 = plt.subplot2grid((1, 3), (0, 1))
ax2 = plt.subplot2grid((1, 3), (0, 2))
for epoch in range(self.max_iter):
train_x, train_y = next(batch_train_gen)
valid_x, valid_y = next(batch_valid_gen)
self.sess.run(self.train_op, feed_dict = {self.x_ph: train_x, self.y_ph: train_y, self.is_training: True})
if epoch % 200 == 0:
valid_preds = self.sess.run(self.net_out, feed_dict = {self.x_ph: valid_x, self.is_training: False})
valid_y = self.get_raw_targets(valid_y)
valid_preds = self.get_raw_targets(valid_preds)
if targets == 'probs':
valid_y = 1. / (1. + np.exp( - valid_y))
valid_preds = 1. / (1. + np.exp( - valid_preds))
try:
valid_r2 = r2_score(valid_y, valid_preds)
except:
valid_r2 = np.nan
valid_errors.append(valid_r2)
_1_, _2_ = self.sess.run([self.reg_loss, self.kl], feed_dict = {self.x_ph: train_x, self.y_ph: train_y, self.is_training: False})
print('...', _1_, _2_)
train_preds = self.sess.run(self.net_out, feed_dict = {self.x_ph: train_x, self.is_training: False})
train_y = self.get_raw_targets(train_y)
train_preds = self.get_raw_targets(train_preds)
try:
train_r2 = r2_score(train_y, train_preds)
except:
train_r2 = np.nan
train_errors.append(train_r2)
if targets == 'probs':
train_y = 1. / (1. + np.exp( - train_y))
train_preds = 1. / (1. + np.exp( - train_preds))
logfile = open('%s/logfile.dat' % model_path, 'a')
logfile.write('%d\t%.5f\t%.5f\n' % (epoch, train_r2, valid_r2))
logfile.close()
# define break condition --> last improvement happened more than 100 epochs ago
max_r2_index = np.argmax(valid_errors)
if len(valid_errors) - max_r2_index > 100: break
if max_r2_index == len(valid_errors) - 1:
self.saver.save(self.sess, '%s/model.ckpt' % model_path)
new_line = 'EVALUATION: %d (%d)\t%.5f\t%.5f' % ( len(valid_errors) - max_r2_index, len(valid_errors), train_errors[-1], valid_errors[-1])
print(new_line)
if plot:
train_preds_scaled = train_preds
train_trues_scaled = train_y
valid_preds_scaled = valid_preds
valid_trues_scaled = valid_y
ax0.cla()
ax1.cla()
ax2.cla()
ax0.plot([min_target[0], max_target[0]], [min_target[0], max_target[0]], lw = 3, color = 'w', alpha = 0.5)
ax0.plot(train_trues_scaled[:, 0], train_preds_scaled[:, 0], marker = '.', ls = '', color = colors[-1], alpha = 0.5)
ax0.plot(valid_trues_scaled[:, 0], valid_preds_scaled[:, 0], marker = '.', ls = '', color = colors[0], alpha = 0.5)
if len(min_target) > 1:
ax1.plot([min_target[1], max_target[1]], [min_target[1], max_target[1]], lw = 3, color = 'w', alpha = 0.5)
ax1.plot(train_trues_scaled[:, 1], train_preds_scaled[:, 1], marker = '.', ls = '', color = colors[-1], alpha = 0.5)
ax1.plot(valid_trues_scaled[:, 1], valid_preds_scaled[:, 1], marker = '.', ls = '', color = colors[0], alpha = 0.5)
RANGE = 50
ax2.plot(np.arange(len(train_errors[-RANGE:])) + len(train_errors[-RANGE:]), train_errors[-RANGE:], lw = 3, color = colors[-1])
ax2.plot(np.arange(len(valid_errors[-RANGE:])) + len(valid_errors[-RANGE:]), valid_errors[-RANGE:], lw = 3, color = colors[0])
plt.pause(0.05)
def restore(self, model_path):
if not self.is_graph_constructed: self.construct_inference()
self.sess = tf.compat.v1.Session(graph = self.graph)
self.saver = tf.compat.v1.train.Saver()
try:
self.saver.restore(self.sess, model_path)
return True
except AttributeError:
return False
def predict(self, input_raw): |
input_scaled = self.get_scaled_features(input_raw)
| random_line_split | |
model_probabilistic.py | [key] for key in details}
self.features_shape = self.scaling['features_shape']
self.targets_shape = self.scaling['targets_shape']
def get_scaled_features(self, features):
if self.config['feature_rescaling'] == 'standardization':
scaled = (features - self.scaling['mean_features']) / self.scaling['std_features']
elif self.config['feature_rescaling'] == 'unit_cube':
scaled = (features - self.scaling['min_features']) / (self.scaling['max_features'] - self.scaling['min_features'])
return scaled
def get_scaled_targets(self, targets):
if self.config['target_rescaling'] == 'standardization':
scaled = (targets - self.scaling['mean_targets']) / self.scaling['std_targets']
elif self.config['target_rescaling'] == 'unit_cube':
scaled = (targets - self.scaling['min_targets']) / (self.scaling['max_targets'] - self.scaling['min_targets'])
elif self.config['target_rescaling'] == 'mean':
scaled = targets / self.scaling['mean_targets']
elif self.config['target_rescaling'] == 'same':
scaled = targets
return scaled
def get_raw_targets(self, targets):
if self.config['target_rescaling'] == 'standardization':
raw = targets * self.scaling['std_targets'] + self.scaling['mean_targets']
elif self.config['target_rescaling'] == 'unit_cube':
raw = (self.scaling['max_targets'] - self.scaling['min_targets']) * targets + self.scaling['min_targets']
elif self.config['target_rescaling'] == 'mean':
raw = targets * self.scaling['mean_targets']
elif self.config['target_rescaling'] == 'same':
|
return raw
def set_hyperparameters(self, hyperparam_dict):
for key, value in hyperparam_dict.items():
setattr(self, key, value)
def construct_graph(self):
act_funcs = {
'linear': lambda y: y,
'leaky_relu': lambda y: tf.nn.leaky_relu(y, 0.2),
'relu': lambda y: tf.nn.relu(y),
'softmax': lambda y: tf.nn.softmax(y),
'softplus': lambda y: tf.nn.softplus(y),
'softsign': lambda y: tf.nn.softsign(y),
'sigmoid': lambda y: tf.nn.sigmoid(y),
}
mlp_activation = act_funcs[self.ACT_FUNC]
out_activation = act_funcs[self.ACT_FUNC_OUT]
with self.graph.as_default():
with tf.name_scope(self.scope):
self.is_training = tf.compat.v1.placeholder(tf.bool, shape = ())
self.x_ph = tf.compat.v1.placeholder(tf.float32, [self.batch_size, self.features_shape[1]])
self.y_ph = tf.compat.v1.placeholder(tf.float32, [self.batch_size, self.targets_shape[1]])
self.layer_0 = tfp.layers.DenseLocalReparameterization(
self.MLP_SIZE,
activation = mlp_activation,
)
layer_0_act = self.layer_0(self.x_ph)
layer_0_out = tf.layers.dropout(layer_0_act, rate = self.DROP, training = self.is_training)
self.layer_1 = tfp.layers.DenseLocalReparameterization(
self.MLP_SIZE,
activation = mlp_activation,
)
layer_1_act = self.layer_1(layer_0_out)
layer_1_out = tf.layers.dropout(layer_1_act, rate = self.DROP, training = self.is_training)
self.layer_2 = tfp.layers.DenseLocalReparameterization(
self.MLP_SIZE,
activation = mlp_activation,
)
layer_2_act = self.layer_2(layer_1_out)
layer_2_out = layer_2_act
self.layer_3 = tfp.layers.DenseLocalReparameterization(
self.targets_shape[1],
activation = out_activation,
)
layer_3_out = self.layer_3(layer_2_out)
self.net_out = layer_3_out
self.scales = tf.nn.softplus(tf.Variable(tf.zeros(1)))
self.y_pred = tf_dist.Normal(self.net_out, scale = self.scales)
def construct_inference(self):
self.is_graph_constructed = True
with self.graph.as_default():
self.kl = sum(self.layer_0.losses) / float(self.batch_size)
self.kl += sum(self.layer_1.losses) / float(self.batch_size)
self.kl += sum(self.layer_2.losses) / float(self.batch_size)
self.kl += sum(self.layer_3.losses) / float(self.batch_size)
self.reg_loss = - tf.reduce_mean( self.y_pred.log_prob(self.y_ph) )
self.loss = self.reg_loss + self.REG * self.kl
self.optimizer = tf.compat.v1.train.AdamOptimizer(self.LEARNING_RATE)
self.train_op = self.optimizer.minimize(self.loss)
self.init_op = tf.group(tf.compat.v1.global_variables_initializer(), tf.compat.v1.local_variables_initializer())
self.sess = tf.compat.v1.Session(graph = self.graph)
with self.sess.as_default():
self.sess.run(self.init_op)
def train(self, train_features, train_targets, valid_features, valid_targets, model_path, plot = False, targets = 'same'):
from sklearn.metrics import r2_score
if not os.path.isdir(model_path): os.mkdir(model_path)
logfile = open('%s/logfile.dat' % model_path, 'w')
logfile.close()
if not self.is_graph_constructed: self.construct_inference()
train_feat_scaled = self.get_scaled_features(train_features)
train_targ_scaled = self.get_scaled_targets(train_targets)
valid_feat_scaled = self.get_scaled_features(valid_features)
valid_targ_scaled = self.get_scaled_targets(valid_targets)
min_target, max_target = np.minimum(np.amin(train_targets, axis = 0), np.amin(valid_targets, axis = 0)), np.maximum(np.amax(train_targets, axis = 0), np.amax(valid_targets, axis = 0))
if targets == 'probs':
min_target = 1. / (1. + np.exp( - min_target))
max_target = 1. / (1. + np.exp( - max_target))
batch_train_gen = self._generator(train_feat_scaled, train_targ_scaled, self.batch_size)
batch_valid_gen = self._generator(valid_feat_scaled, valid_targ_scaled, self.batch_size)
train_errors, valid_errors = [], []
with self.graph.as_default():
with self.sess.as_default():
self.saver = tf.compat.v1.train.Saver()
if plot:
import matplotlib.pyplot as plt
import seaborn as sns
colors = sns.color_palette('RdYlGn', 4)
plt.ion()
plt.style.use('dark_background')
fig = plt.figure(figsize = (14, 5))
ax0 = plt.subplot2grid((1, 3), (0, 0))
ax1 = plt.subplot2grid((1, 3), (0, 1))
ax2 = plt.subplot2grid((1, 3), (0, 2))
for epoch in range(self.max_iter):
train_x, train_y = next(batch_train_gen)
valid_x, valid_y = next(batch_valid_gen)
self.sess.run(self.train_op, feed_dict = {self.x_ph: train_x, self.y_ph: train_y, self.is_training: True})
if epoch % 200 == 0:
valid_preds = self.sess.run(self.net_out, feed_dict = {self.x_ph: valid_x, self.is_training: False})
valid_y = self.get_raw_targets(valid_y)
valid_preds = self.get_raw_targets(valid_preds)
if targets == 'probs':
valid_y = 1. / (1. + np.exp( - valid_y))
valid_preds = 1. / (1. + np.exp( - valid_preds))
try:
valid_r2 = r2_score(valid_y, valid_preds)
except:
valid_r2 = np.nan
valid_errors.append(valid_r2)
_1_, _2_ = self.sess.run([self.reg_loss, self.kl], feed_dict = {self.x_ph: train_x, self.y_ph: train_y, self.is_training: False})
print('...', _1_, _2_)
train_preds = self.sess.run(self.net_out, feed_dict = {self.x_ph: train_x, self.is_training: False})
train_y = self.get_raw_targets(train_y)
train_preds = self.get_raw_targets(train_preds)
try:
train_r2 = r2_score(train_y, train_preds)
except:
train_r2 = np.nan
train_errors.append(train_r2)
if targets == 'probs':
train_y = 1. / (1. + np.exp( - train_y))
train_preds = 1. / (1. + np.exp( - train_preds))
logfile = open('%s/logfile.dat' % model_path, 'a | raw = targets | conditional_block |
model_probabilistic.py | self.targets_shape[1]])
self.layer_0 = tfp.layers.DenseLocalReparameterization(
self.MLP_SIZE,
activation = mlp_activation,
)
layer_0_act = self.layer_0(self.x_ph)
layer_0_out = tf.layers.dropout(layer_0_act, rate = self.DROP, training = self.is_training)
self.layer_1 = tfp.layers.DenseLocalReparameterization(
self.MLP_SIZE,
activation = mlp_activation,
)
layer_1_act = self.layer_1(layer_0_out)
layer_1_out = tf.layers.dropout(layer_1_act, rate = self.DROP, training = self.is_training)
self.layer_2 = tfp.layers.DenseLocalReparameterization(
self.MLP_SIZE,
activation = mlp_activation,
)
layer_2_act = self.layer_2(layer_1_out)
layer_2_out = layer_2_act
self.layer_3 = tfp.layers.DenseLocalReparameterization(
self.targets_shape[1],
activation = out_activation,
)
layer_3_out = self.layer_3(layer_2_out)
self.net_out = layer_3_out
self.scales = tf.nn.softplus(tf.Variable(tf.zeros(1)))
self.y_pred = tf_dist.Normal(self.net_out, scale = self.scales)
def construct_inference(self):
self.is_graph_constructed = True
with self.graph.as_default():
self.kl = sum(self.layer_0.losses) / float(self.batch_size)
self.kl += sum(self.layer_1.losses) / float(self.batch_size)
self.kl += sum(self.layer_2.losses) / float(self.batch_size)
self.kl += sum(self.layer_3.losses) / float(self.batch_size)
self.reg_loss = - tf.reduce_mean( self.y_pred.log_prob(self.y_ph) )
self.loss = self.reg_loss + self.REG * self.kl
self.optimizer = tf.compat.v1.train.AdamOptimizer(self.LEARNING_RATE)
self.train_op = self.optimizer.minimize(self.loss)
self.init_op = tf.group(tf.compat.v1.global_variables_initializer(), tf.compat.v1.local_variables_initializer())
self.sess = tf.compat.v1.Session(graph = self.graph)
with self.sess.as_default():
self.sess.run(self.init_op)
def train(self, train_features, train_targets, valid_features, valid_targets, model_path, plot = False, targets = 'same'):
from sklearn.metrics import r2_score
if not os.path.isdir(model_path): os.mkdir(model_path)
logfile = open('%s/logfile.dat' % model_path, 'w')
logfile.close()
if not self.is_graph_constructed: self.construct_inference()
train_feat_scaled = self.get_scaled_features(train_features)
train_targ_scaled = self.get_scaled_targets(train_targets)
valid_feat_scaled = self.get_scaled_features(valid_features)
valid_targ_scaled = self.get_scaled_targets(valid_targets)
min_target, max_target = np.minimum(np.amin(train_targets, axis = 0), np.amin(valid_targets, axis = 0)), np.maximum(np.amax(train_targets, axis = 0), np.amax(valid_targets, axis = 0))
if targets == 'probs':
min_target = 1. / (1. + np.exp( - min_target))
max_target = 1. / (1. + np.exp( - max_target))
batch_train_gen = self._generator(train_feat_scaled, train_targ_scaled, self.batch_size)
batch_valid_gen = self._generator(valid_feat_scaled, valid_targ_scaled, self.batch_size)
train_errors, valid_errors = [], []
with self.graph.as_default():
with self.sess.as_default():
self.saver = tf.compat.v1.train.Saver()
if plot:
import matplotlib.pyplot as plt
import seaborn as sns
colors = sns.color_palette('RdYlGn', 4)
plt.ion()
plt.style.use('dark_background')
fig = plt.figure(figsize = (14, 5))
ax0 = plt.subplot2grid((1, 3), (0, 0))
ax1 = plt.subplot2grid((1, 3), (0, 1))
ax2 = plt.subplot2grid((1, 3), (0, 2))
for epoch in range(self.max_iter):
train_x, train_y = next(batch_train_gen)
valid_x, valid_y = next(batch_valid_gen)
self.sess.run(self.train_op, feed_dict = {self.x_ph: train_x, self.y_ph: train_y, self.is_training: True})
if epoch % 200 == 0:
valid_preds = self.sess.run(self.net_out, feed_dict = {self.x_ph: valid_x, self.is_training: False})
valid_y = self.get_raw_targets(valid_y)
valid_preds = self.get_raw_targets(valid_preds)
if targets == 'probs':
valid_y = 1. / (1. + np.exp( - valid_y))
valid_preds = 1. / (1. + np.exp( - valid_preds))
try:
valid_r2 = r2_score(valid_y, valid_preds)
except:
valid_r2 = np.nan
valid_errors.append(valid_r2)
_1_, _2_ = self.sess.run([self.reg_loss, self.kl], feed_dict = {self.x_ph: train_x, self.y_ph: train_y, self.is_training: False})
print('...', _1_, _2_)
train_preds = self.sess.run(self.net_out, feed_dict = {self.x_ph: train_x, self.is_training: False})
train_y = self.get_raw_targets(train_y)
train_preds = self.get_raw_targets(train_preds)
try:
train_r2 = r2_score(train_y, train_preds)
except:
train_r2 = np.nan
train_errors.append(train_r2)
if targets == 'probs':
train_y = 1. / (1. + np.exp( - train_y))
train_preds = 1. / (1. + np.exp( - train_preds))
logfile = open('%s/logfile.dat' % model_path, 'a')
logfile.write('%d\t%.5f\t%.5f\n' % (epoch, train_r2, valid_r2))
logfile.close()
# define break condition --> last improvement happened more than 100 epochs ago
max_r2_index = np.argmax(valid_errors)
if len(valid_errors) - max_r2_index > 100: break
if max_r2_index == len(valid_errors) - 1:
self.saver.save(self.sess, '%s/model.ckpt' % model_path)
new_line = 'EVALUATION: %d (%d)\t%.5f\t%.5f' % ( len(valid_errors) - max_r2_index, len(valid_errors), train_errors[-1], valid_errors[-1])
print(new_line)
if plot:
train_preds_scaled = train_preds
train_trues_scaled = train_y
valid_preds_scaled = valid_preds
valid_trues_scaled = valid_y
ax0.cla()
ax1.cla()
ax2.cla()
ax0.plot([min_target[0], max_target[0]], [min_target[0], max_target[0]], lw = 3, color = 'w', alpha = 0.5)
ax0.plot(train_trues_scaled[:, 0], train_preds_scaled[:, 0], marker = '.', ls = '', color = colors[-1], alpha = 0.5)
ax0.plot(valid_trues_scaled[:, 0], valid_preds_scaled[:, 0], marker = '.', ls = '', color = colors[0], alpha = 0.5)
if len(min_target) > 1:
ax1.plot([min_target[1], max_target[1]], [min_target[1], max_target[1]], lw = 3, color = 'w', alpha = 0.5)
ax1.plot(train_trues_scaled[:, 1], train_preds_scaled[:, 1], marker = '.', ls = '', color = colors[-1], alpha = 0.5)
ax1.plot(valid_trues_scaled[:, 1], valid_preds_scaled[:, 1], marker = '.', ls = '', color = colors[0], alpha = 0.5)
RANGE = 50
ax2.plot(np.arange(len(train_errors[-RANGE:])) + len(train_errors[-RANGE:]), train_errors[-RANGE:], lw = 3, color = colors[-1])
ax2.plot(np.arange(len(valid_errors[-RANGE:])) + len(valid_errors[-RANGE:]), valid_errors[-RANGE:], lw = 3, color = colors[0])
plt.pause(0.05)
def restore(self, model_path):
| if not self.is_graph_constructed: self.construct_inference()
self.sess = tf.compat.v1.Session(graph = self.graph)
self.saver = tf.compat.v1.train.Saver()
try:
self.saver.restore(self.sess, model_path)
return True
except AttributeError:
return False | identifier_body | |
model_probabilistic.py | (self, graph, dataset_details, config, scope, batch_size, max_iter = 10**8):
self.graph = graph
self.scope = scope
self.config = config
self.batch_size = batch_size
self.dataset_details = dataset_details
self.max_iter = max_iter
self.is_graph_constructed = False
self._read_scaling_details()
def _generator(self, features, targets, batch_size):
indices = np.arange(len(features))
while True:
np.random.shuffle(indices)
batch_features = features[indices[:batch_size]]
batch_targets = targets[indices[:batch_size]]
yield (batch_features, batch_targets)
def _read_scaling_details(self):
with open(self.dataset_details, 'rb') as content:
details = pickle.load(content)
self.scaling = {key: details[key] for key in details}
self.features_shape = self.scaling['features_shape']
self.targets_shape = self.scaling['targets_shape']
def get_scaled_features(self, features):
if self.config['feature_rescaling'] == 'standardization':
scaled = (features - self.scaling['mean_features']) / self.scaling['std_features']
elif self.config['feature_rescaling'] == 'unit_cube':
scaled = (features - self.scaling['min_features']) / (self.scaling['max_features'] - self.scaling['min_features'])
return scaled
def get_scaled_targets(self, targets):
if self.config['target_rescaling'] == 'standardization':
scaled = (targets - self.scaling['mean_targets']) / self.scaling['std_targets']
elif self.config['target_rescaling'] == 'unit_cube':
scaled = (targets - self.scaling['min_targets']) / (self.scaling['max_targets'] - self.scaling['min_targets'])
elif self.config['target_rescaling'] == 'mean':
scaled = targets / self.scaling['mean_targets']
elif self.config['target_rescaling'] == 'same':
scaled = targets
return scaled
def get_raw_targets(self, targets):
if self.config['target_rescaling'] == 'standardization':
raw = targets * self.scaling['std_targets'] + self.scaling['mean_targets']
elif self.config['target_rescaling'] == 'unit_cube':
raw = (self.scaling['max_targets'] - self.scaling['min_targets']) * targets + self.scaling['min_targets']
elif self.config['target_rescaling'] == 'mean':
raw = targets * self.scaling['mean_targets']
elif self.config['target_rescaling'] == 'same':
raw = targets
return raw
def set_hyperparameters(self, hyperparam_dict):
for key, value in hyperparam_dict.items():
setattr(self, key, value)
def construct_graph(self):
act_funcs = {
'linear': lambda y: y,
'leaky_relu': lambda y: tf.nn.leaky_relu(y, 0.2),
'relu': lambda y: tf.nn.relu(y),
'softmax': lambda y: tf.nn.softmax(y),
'softplus': lambda y: tf.nn.softplus(y),
'softsign': lambda y: tf.nn.softsign(y),
'sigmoid': lambda y: tf.nn.sigmoid(y),
}
mlp_activation = act_funcs[self.ACT_FUNC]
out_activation = act_funcs[self.ACT_FUNC_OUT]
with self.graph.as_default():
with tf.name_scope(self.scope):
self.is_training = tf.compat.v1.placeholder(tf.bool, shape = ())
self.x_ph = tf.compat.v1.placeholder(tf.float32, [self.batch_size, self.features_shape[1]])
self.y_ph = tf.compat.v1.placeholder(tf.float32, [self.batch_size, self.targets_shape[1]])
self.layer_0 = tfp.layers.DenseLocalReparameterization(
self.MLP_SIZE,
activation = mlp_activation,
)
layer_0_act = self.layer_0(self.x_ph)
layer_0_out = tf.layers.dropout(layer_0_act, rate = self.DROP, training = self.is_training)
self.layer_1 = tfp.layers.DenseLocalReparameterization(
self.MLP_SIZE,
activation = mlp_activation,
)
layer_1_act = self.layer_1(layer_0_out)
layer_1_out = tf.layers.dropout(layer_1_act, rate = self.DROP, training = self.is_training)
self.layer_2 = tfp.layers.DenseLocalReparameterization(
self.MLP_SIZE,
activation = mlp_activation,
)
layer_2_act = self.layer_2(layer_1_out)
layer_2_out = layer_2_act
self.layer_3 = tfp.layers.DenseLocalReparameterization(
self.targets_shape[1],
activation = out_activation,
)
layer_3_out = self.layer_3(layer_2_out)
self.net_out = layer_3_out
self.scales = tf.nn.softplus(tf.Variable(tf.zeros(1)))
self.y_pred = tf_dist.Normal(self.net_out, scale = self.scales)
def construct_inference(self):
self.is_graph_constructed = True
with self.graph.as_default():
self.kl = sum(self.layer_0.losses) / float(self.batch_size)
self.kl += sum(self.layer_1.losses) / float(self.batch_size)
self.kl += sum(self.layer_2.losses) / float(self.batch_size)
self.kl += sum(self.layer_3.losses) / float(self.batch_size)
self.reg_loss = - tf.reduce_mean( self.y_pred.log_prob(self.y_ph) )
self.loss = self.reg_loss + self.REG * self.kl
self.optimizer = tf.compat.v1.train.AdamOptimizer(self.LEARNING_RATE)
self.train_op = self.optimizer.minimize(self.loss)
self.init_op = tf.group(tf.compat.v1.global_variables_initializer(), tf.compat.v1.local_variables_initializer())
self.sess = tf.compat.v1.Session(graph = self.graph)
with self.sess.as_default():
self.sess.run(self.init_op)
def train(self, train_features, train_targets, valid_features, valid_targets, model_path, plot = False, targets = 'same'):
from sklearn.metrics import r2_score
if not os.path.isdir(model_path): os.mkdir(model_path)
logfile = open('%s/logfile.dat' % model_path, 'w')
logfile.close()
if not self.is_graph_constructed: self.construct_inference()
train_feat_scaled = self.get_scaled_features(train_features)
train_targ_scaled = self.get_scaled_targets(train_targets)
valid_feat_scaled = self.get_scaled_features(valid_features)
valid_targ_scaled = self.get_scaled_targets(valid_targets)
min_target, max_target = np.minimum(np.amin(train_targets, axis = 0), np.amin(valid_targets, axis = 0)), np.maximum(np.amax(train_targets, axis = 0), np.amax(valid_targets, axis = 0))
if targets == 'probs':
min_target = 1. / (1. + np.exp( - min_target))
max_target = 1. / (1. + np.exp( - max_target))
batch_train_gen = self._generator(train_feat_scaled, train_targ_scaled, self.batch_size)
batch_valid_gen = self._generator(valid_feat_scaled, valid_targ_scaled, self.batch_size)
train_errors, valid_errors = [], []
with self.graph.as_default():
with self.sess.as_default():
self.saver = tf.compat.v1.train.Saver()
if plot:
import matplotlib.pyplot as plt
import seaborn as sns
colors = sns.color_palette('RdYlGn', 4)
plt.ion()
plt.style.use('dark_background')
fig = plt.figure(figsize = (14, 5))
ax0 = plt.subplot2grid((1, 3), (0, 0))
ax1 = plt.subplot2grid((1, 3), (0, 1))
ax2 = plt.subplot2grid((1, 3), (0, 2))
for epoch in range(self.max_iter):
train_x, train_y = next(batch_train_gen)
valid_x, valid_y = next(batch_valid_gen)
self.sess.run(self.train_op, feed_dict = {self.x_ph: train_x, self.y_ph: train_y, self.is_training: True})
if epoch % 200 == 0:
valid_preds = self.sess.run(self.net_out, feed_dict = {self.x_ph: valid_x, self.is_training: False})
valid_y = self.get_raw_targets(valid_y)
valid_preds = self.get_raw_targets(valid_preds)
if targets == 'probs':
valid_y = 1. / (1. + np.exp( - valid_y))
valid_preds = 1. / (1. + np.exp( - valid_preds))
try:
valid_r2 = r2_score(valid_y, valid_preds)
except:
valid_r2 = np.nan
valid_errors.append(valid_r2)
_1_, _2_ = self.sess.run([self.reg_loss, self.kl], | __init__ | identifier_name | |
walk.py |
self.console_stream = console_stream
def printer(self, message, stream=False):
if not stream:
if self.console_output:
print('\t' + message)
else:
if self.console_stream:
print('\t' + message)
def pool_process(func, iterable, process_name='Pool processing', cpus=cpu_count()):
"""
Apply a function to each element in an iterable and return a result list.
:param func: A function that returns a value
:param iterable: A list or set of elements to be passed to the func as the singular parameter
:param process_name: Name of the process, for printing purposes only
:param cpus: Number of CPUs
:return: Result list
"""
with Timer('\t{0} ({1}) completed in'.format(process_name, str(func))):
pool = Pool(cpus)
vals = pool.map(func, iterable)
pool.close()
return vals
def md5_hash(file_path):
"""Open a file path and hash the contents."""
with open(file_path, 'rb') as fp:
return md5(fp.read()).hexdigest()
def md5_tuple(file_path):
"""Returns a file_path, hash tuple."""
return file_path, md5_hash(file_path)
def | (path_list):
"""Pool process file hashing."""
return pool_process(md5_tuple, path_list, 'MD5 hashing')
def remover(file_path):
"""Delete a file or directory path only if it exists."""
if os.path.isfile(file_path):
os.remove(file_path)
return True
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
return True
else:
return False
def creation_date(path_to_file, return_datetime=True):
"""
Retrieve a file's creation date.
Try to get the date that a file was created, falling back to when it was
last modified if that isn't possible.
See http://stackoverflow.com/a/39501288/1709587 for explanation.
:param path_to_file: File path
:param return_datetime: Bool, returns value in Datetime format
:return: Creation date
"""
if platform.system() == 'Windows':
created_at = os.path.getctime(path_to_file)
else:
stat = os.stat(path_to_file)
try:
created_at = stat.st_birthtime
except AttributeError:
# We're probably on Linux. No easy way to get creation dates here,
# so we'll settle for when its content was last modified.
created_at = stat.st_mtime
if return_datetime:
return datetime.fromtimestamp(created_at)
else:
return created_at
def creation_date_tuple(file_path):
"""Returns a (file_path, creation_date) tuple."""
return file_path, creation_date(file_path)
def pool_creation_date(path_list):
"""Pool process file creation dates."""
return pool_process(creation_date_tuple, path_list, 'File creation dates')
class DirPaths:
def __init__(self,
directory,
full_paths=False,
topdown=True,
to_include=None,
to_exclude=None,
min_level=0,
max_level=inf,
filters=None,
non_empty_folders=False,
parallelize=False,
pool_size=cpu_count(),
console_output=False,
console_stream=False,
hash_files=False):
"""
This class generates a list of either files and or folders within a root directory.
The walk method generates a directory list of files by walking the file tree top down or bottom up. The
files and folders method generate a list of files or folders in the top level of the tree.
:param directory: Starting directory file path
:param full_paths: Bool, when true full paths are concatenated to file paths list
:param topdown: Bool, when true walk method walks tree from the topdwon. When false tree is walked bottom up
:param to_include: None by default. List of filters acceptable to find within file path string return
:param to_exclude: None by default. List of filters NOT acceptable to return
:param min_level: 0 by default. Minimum directory level to save paths from
:param max_level: Infinity by default. Maximum directory level to save paths from
:param parallelize: Bool, when true pool processing is enabled within walk method
:param pool_size: Number of CPUs for pool processing, default is number of processors
:param console_output: Bool, when true console output is printed
:param console_stream: Bool, when true loops print live results
:param hash_files: Bool, when true walk() method return a dictionary file_paths and hashes
"""
self.timer = Timer()
self.full_paths = full_paths
self.topdown = topdown
# Exclude .DS_Store by default, set to_exclude to False to include .DS_Store
to_exclude = ['.DS_Store'] if to_exclude is None else to_exclude
if any(i for i in [to_include, to_exclude, filters]) or min_level != 0 or max_level != inf:
self.filters = PathFilters(to_include, to_exclude, min_level, max_level, filters, non_empty_folders)
else:
self.filters = False
self.console_output = console_output
self.console_stream = console_stream
self._hash_files = hash_files
self._printer = Printer(console_output, console_stream).printer
self._printer('DIRPATHS')
# Check that parallelization is enabled
if parallelize:
self.pool_size = pool_size
self.parallelize = parallelize
self.filepaths = []
# Check if directory is a singular (1) string or if it is a list of strings (multiple)
try:
self.directory = [str(directory)]
except TypeError:
self.directory = [str(dirs) for dirs in directory]
def __iter__(self):
return iter(list(self.filepaths))
def __str__(self):
return str(self.filepaths)
def __len__(self):
return len(self.filepaths)
def _get_filepaths(self):
"""Filters list of file paths to remove non-included, remove excluded files and concatenate full paths."""
self._printer(str(self.__len__()) + " file paths have been parsed in " + str(self.timer.end))
if self._hash_files:
return pool_hash(self.filepaths)
else:
return self.filepaths
def creation_dates(self, sort=True):
"""
Return a list of (file_path, creation_date) tuples created from list of walked paths.
:param sort: Bool, sorts file_paths on created_date from newest to oldest.
:return: List of (file_path, created_date) tuples.
"""
if not sort:
return pool_creation_date(self.filepaths)
else:
pcd = pool_creation_date(self.filepaths)
pcd.sort(key=itemgetter(1), reverse=True)
return pcd
def walk(self):
"""
Default file path retrieval function.
sprinter() - Generates file path list using pool processing and Queues
crawler() - Generates file path list using os.walk() in sequence
"""
if self.parallelize:
self.filepaths = Sprinter(self.directory, self.filters, self.full_paths, self.pool_size,
self._printer).sprinter()
else:
self.filepaths = Crawler(self.directory, self.filters, self.full_paths, self.topdown,
self._printer).crawler()
return self._get_filepaths()
def files(self):
"""Return list of files in root directory"""
self._printer('\tFiles Walk')
for directory in self.directory:
for path in os.listdir(directory):
full_path = os.path.join(directory, path)
if os.path.isfile(full_path):
if not path.startswith('.'):
self.filepaths.append(full_path)
return self._get_filepaths()
def folders(self):
"""Return list of folders in root directory"""
for directory in self.directory:
for path in os.listdir(directory):
full_path = os.path.join(directory, path)
if os.path.isdir(full_path):
if not path.startswith('.'):
self.filepaths.append(full_path)
return self._get_filepaths()
class DirTree:
def __init__(self, root, branches=None):
"""
Generate a tree dictionary of the contents of a root directory.
:param root: Starting directory
:param branches: List of function tuples used for filtering
"""
self.tree_dict = {}
self.directory = Path(root)
self.start = str(self.directory).rfind(os.sep) + 1
self.branches = branches
self.get()
def __iter__(self):
return iter(self.tree_dict.items())
def __str__(self):
return str(self.tree_dict)
@property
def dict(self):
return self.tree_dict
def _filter(self, folders, folder_or_file):
for index in range(0, len(folders)):
filters = self.branches[index][folder_or_file]
if filters:
exclude = filters.get
include = filters.get
if exclude and folders[index] in exclude:
return False
if include and folders[index] not in include:
return False
return True
def get(self):
"""
Generate path, dirs, files tuple for each path in directory. Executes filters if branches are not None
:return:
"""
for path, dirs | pool_hash | identifier_name |
walk.py |
self.console_stream = console_stream
def printer(self, message, stream=False):
if not stream:
if self.console_output:
print('\t' + message)
else:
if self.console_stream:
print('\t' + message)
def pool_process(func, iterable, process_name='Pool processing', cpus=cpu_count()):
"""
Apply a function to each element in an iterable and return a result list.
:param func: A function that returns a value
:param iterable: A list or set of elements to be passed to the func as the singular parameter
:param process_name: Name of the process, for printing purposes only
:param cpus: Number of CPUs
:return: Result list
"""
with Timer('\t{0} ({1}) completed in'.format(process_name, str(func))):
pool = Pool(cpus)
vals = pool.map(func, iterable)
pool.close()
return vals
def md5_hash(file_path):
"""Open a file path and hash the contents."""
with open(file_path, 'rb') as fp:
return md5(fp.read()).hexdigest()
def md5_tuple(file_path):
"""Returns a file_path, hash tuple."""
return file_path, md5_hash(file_path)
def pool_hash(path_list):
"""Pool process file hashing."""
return pool_process(md5_tuple, path_list, 'MD5 hashing')
def remover(file_path):
"""Delete a file or directory path only if it exists."""
if os.path.isfile(file_path):
os.remove(file_path)
return True
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
return True
else:
return False
def creation_date(path_to_file, return_datetime=True):
"""
Retrieve a file's creation date.
Try to get the date that a file was created, falling back to when it was
last modified if that isn't possible.
See http://stackoverflow.com/a/39501288/1709587 for explanation.
:param path_to_file: File path
:param return_datetime: Bool, returns value in Datetime format
:return: Creation date
"""
if platform.system() == 'Windows':
created_at = os.path.getctime(path_to_file)
else:
stat = os.stat(path_to_file)
try:
created_at = stat.st_birthtime
except AttributeError:
# We're probably on Linux. No easy way to get creation dates here,
# so we'll settle for when its content was last modified.
created_at = stat.st_mtime
if return_datetime:
return datetime.fromtimestamp(created_at)
else:
return created_at
def creation_date_tuple(file_path):
"""Returns a (file_path, creation_date) tuple."""
return file_path, creation_date(file_path)
def pool_creation_date(path_list):
|
class DirPaths:
def __init__(self,
directory,
full_paths=False,
topdown=True,
to_include=None,
to_exclude=None,
min_level=0,
max_level=inf,
filters=None,
non_empty_folders=False,
parallelize=False,
pool_size=cpu_count(),
console_output=False,
console_stream=False,
hash_files=False):
"""
This class generates a list of either files and or folders within a root directory.
The walk method generates a directory list of files by walking the file tree top down or bottom up. The
files and folders method generate a list of files or folders in the top level of the tree.
:param directory: Starting directory file path
:param full_paths: Bool, when true full paths are concatenated to file paths list
:param topdown: Bool, when true walk method walks tree from the topdwon. When false tree is walked bottom up
:param to_include: None by default. List of filters acceptable to find within file path string return
:param to_exclude: None by default. List of filters NOT acceptable to return
:param min_level: 0 by default. Minimum directory level to save paths from
:param max_level: Infinity by default. Maximum directory level to save paths from
:param parallelize: Bool, when true pool processing is enabled within walk method
:param pool_size: Number of CPUs for pool processing, default is number of processors
:param console_output: Bool, when true console output is printed
:param console_stream: Bool, when true loops print live results
:param hash_files: Bool, when true walk() method return a dictionary file_paths and hashes
"""
self.timer = Timer()
self.full_paths = full_paths
self.topdown = topdown
# Exclude .DS_Store by default, set to_exclude to False to include .DS_Store
to_exclude = ['.DS_Store'] if to_exclude is None else to_exclude
if any(i for i in [to_include, to_exclude, filters]) or min_level != 0 or max_level != inf:
self.filters = PathFilters(to_include, to_exclude, min_level, max_level, filters, non_empty_folders)
else:
self.filters = False
self.console_output = console_output
self.console_stream = console_stream
self._hash_files = hash_files
self._printer = Printer(console_output, console_stream).printer
self._printer('DIRPATHS')
# Check that parallelization is enabled
if parallelize:
self.pool_size = pool_size
self.parallelize = parallelize
self.filepaths = []
# Check if directory is a singular (1) string or if it is a list of strings (multiple)
try:
self.directory = [str(directory)]
except TypeError:
self.directory = [str(dirs) for dirs in directory]
def __iter__(self):
return iter(list(self.filepaths))
def __str__(self):
return str(self.filepaths)
def __len__(self):
return len(self.filepaths)
def _get_filepaths(self):
"""Filters list of file paths to remove non-included, remove excluded files and concatenate full paths."""
self._printer(str(self.__len__()) + " file paths have been parsed in " + str(self.timer.end))
if self._hash_files:
return pool_hash(self.filepaths)
else:
return self.filepaths
def creation_dates(self, sort=True):
"""
Return a list of (file_path, creation_date) tuples created from list of walked paths.
:param sort: Bool, sorts file_paths on created_date from newest to oldest.
:return: List of (file_path, created_date) tuples.
"""
if not sort:
return pool_creation_date(self.filepaths)
else:
pcd = pool_creation_date(self.filepaths)
pcd.sort(key=itemgetter(1), reverse=True)
return pcd
def walk(self):
"""
Default file path retrieval function.
sprinter() - Generates file path list using pool processing and Queues
crawler() - Generates file path list using os.walk() in sequence
"""
if self.parallelize:
self.filepaths = Sprinter(self.directory, self.filters, self.full_paths, self.pool_size,
self._printer).sprinter()
else:
self.filepaths = Crawler(self.directory, self.filters, self.full_paths, self.topdown,
self._printer).crawler()
return self._get_filepaths()
def files(self):
"""Return list of files in root directory"""
self._printer('\tFiles Walk')
for directory in self.directory:
for path in os.listdir(directory):
full_path = os.path.join(directory, path)
if os.path.isfile(full_path):
if not path.startswith('.'):
self.filepaths.append(full_path)
return self._get_filepaths()
def folders(self):
"""Return list of folders in root directory"""
for directory in self.directory:
for path in os.listdir(directory):
full_path = os.path.join(directory, path)
if os.path.isdir(full_path):
if not path.startswith('.'):
self.filepaths.append(full_path)
return self._get_filepaths()
class DirTree:
def __init__(self, root, branches=None):
"""
Generate a tree dictionary of the contents of a root directory.
:param root: Starting directory
:param branches: List of function tuples used for filtering
"""
self.tree_dict = {}
self.directory = Path(root)
self.start = str(self.directory).rfind(os.sep) + 1
self.branches = branches
self.get()
def __iter__(self):
return iter(self.tree_dict.items())
def __str__(self):
return str(self.tree_dict)
@property
def dict(self):
return self.tree_dict
def _filter(self, folders, folder_or_file):
for index in range(0, len(folders)):
filters = self.branches[index][folder_or_file]
if filters:
exclude = filters.get
include = filters.get
if exclude and folders[index] in exclude:
return False
if include and folders[index] not in include:
return False
return True
def get(self):
"""
Generate path, dirs, files tuple for each path in directory. Executes filters if branches are not None
:return:
"""
for path, dirs | """Pool process file creation dates."""
return pool_process(creation_date_tuple, path_list, 'File creation dates') | identifier_body |
walk.py |
self.console_stream = console_stream
def printer(self, message, stream=False):
if not stream:
if self.console_output:
print('\t' + message)
else:
if self.console_stream:
print('\t' + message)
def pool_process(func, iterable, process_name='Pool processing', cpus=cpu_count()):
"""
Apply a function to each element in an iterable and return a result list.
:param func: A function that returns a value
:param iterable: A list or set of elements to be passed to the func as the singular parameter
:param process_name: Name of the process, for printing purposes only
:param cpus: Number of CPUs
:return: Result list
"""
with Timer('\t{0} ({1}) completed in'.format(process_name, str(func))):
pool = Pool(cpus)
vals = pool.map(func, iterable)
pool.close()
return vals
def md5_hash(file_path):
"""Open a file path and hash the contents."""
with open(file_path, 'rb') as fp:
return md5(fp.read()).hexdigest()
def md5_tuple(file_path):
"""Returns a file_path, hash tuple."""
return file_path, md5_hash(file_path)
def pool_hash(path_list):
"""Pool process file hashing."""
return pool_process(md5_tuple, path_list, 'MD5 hashing')
def remover(file_path):
"""Delete a file or directory path only if it exists."""
if os.path.isfile(file_path):
os.remove(file_path)
return True
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
return True
else:
return False
def creation_date(path_to_file, return_datetime=True):
"""
Retrieve a file's creation date.
Try to get the date that a file was created, falling back to when it was
last modified if that isn't possible.
See http://stackoverflow.com/a/39501288/1709587 for explanation.
:param path_to_file: File path
:param return_datetime: Bool, returns value in Datetime format
:return: Creation date
"""
if platform.system() == 'Windows':
created_at = os.path.getctime(path_to_file)
else:
stat = os.stat(path_to_file)
try:
created_at = stat.st_birthtime
except AttributeError:
# We're probably on Linux. No easy way to get creation dates here,
# so we'll settle for when its content was last modified.
created_at = stat.st_mtime
if return_datetime:
return datetime.fromtimestamp(created_at)
else:
return created_at
def creation_date_tuple(file_path):
"""Returns a (file_path, creation_date) tuple."""
return file_path, creation_date(file_path)
def pool_creation_date(path_list):
"""Pool process file creation dates."""
return pool_process(creation_date_tuple, path_list, 'File creation dates')
class DirPaths:
def __init__(self,
directory,
full_paths=False,
topdown=True,
to_include=None,
to_exclude=None,
min_level=0,
max_level=inf,
filters=None,
non_empty_folders=False,
parallelize=False,
pool_size=cpu_count(),
console_output=False,
console_stream=False,
hash_files=False):
"""
This class generates a list of either files and or folders within a root directory.
The walk method generates a directory list of files by walking the file tree top down or bottom up. The
files and folders method generate a list of files or folders in the top level of the tree.
:param directory: Starting directory file path
:param full_paths: Bool, when true full paths are concatenated to file paths list
:param topdown: Bool, when true walk method walks tree from the topdwon. When false tree is walked bottom up
:param to_include: None by default. List of filters acceptable to find within file path string return
:param to_exclude: None by default. List of filters NOT acceptable to return
:param min_level: 0 by default. Minimum directory level to save paths from
:param max_level: Infinity by default. Maximum directory level to save paths from
:param parallelize: Bool, when true pool processing is enabled within walk method
:param pool_size: Number of CPUs for pool processing, default is number of processors
:param console_output: Bool, when true console output is printed
:param console_stream: Bool, when true loops print live results
:param hash_files: Bool, when true walk() method return a dictionary file_paths and hashes
"""
self.timer = Timer()
self.full_paths = full_paths
self.topdown = topdown
# Exclude .DS_Store by default, set to_exclude to False to include .DS_Store
to_exclude = ['.DS_Store'] if to_exclude is None else to_exclude
if any(i for i in [to_include, to_exclude, filters]) or min_level != 0 or max_level != inf:
|
else:
self.filters = False
self.console_output = console_output
self.console_stream = console_stream
self._hash_files = hash_files
self._printer = Printer(console_output, console_stream).printer
self._printer('DIRPATHS')
# Check that parallelization is enabled
if parallelize:
self.pool_size = pool_size
self.parallelize = parallelize
self.filepaths = []
# Check if directory is a singular (1) string or if it is a list of strings (multiple)
try:
self.directory = [str(directory)]
except TypeError:
self.directory = [str(dirs) for dirs in directory]
def __iter__(self):
return iter(list(self.filepaths))
def __str__(self):
return str(self.filepaths)
def __len__(self):
return len(self.filepaths)
def _get_filepaths(self):
"""Filters list of file paths to remove non-included, remove excluded files and concatenate full paths."""
self._printer(str(self.__len__()) + " file paths have been parsed in " + str(self.timer.end))
if self._hash_files:
return pool_hash(self.filepaths)
else:
return self.filepaths
def creation_dates(self, sort=True):
"""
Return a list of (file_path, creation_date) tuples created from list of walked paths.
:param sort: Bool, sorts file_paths on created_date from newest to oldest.
:return: List of (file_path, created_date) tuples.
"""
if not sort:
return pool_creation_date(self.filepaths)
else:
pcd = pool_creation_date(self.filepaths)
pcd.sort(key=itemgetter(1), reverse=True)
return pcd
def walk(self):
"""
Default file path retrieval function.
sprinter() - Generates file path list using pool processing and Queues
crawler() - Generates file path list using os.walk() in sequence
"""
if self.parallelize:
self.filepaths = Sprinter(self.directory, self.filters, self.full_paths, self.pool_size,
self._printer).sprinter()
else:
self.filepaths = Crawler(self.directory, self.filters, self.full_paths, self.topdown,
self._printer).crawler()
return self._get_filepaths()
def files(self):
"""Return list of files in root directory"""
self._printer('\tFiles Walk')
for directory in self.directory:
for path in os.listdir(directory):
full_path = os.path.join(directory, path)
if os.path.isfile(full_path):
if not path.startswith('.'):
self.filepaths.append(full_path)
return self._get_filepaths()
def folders(self):
"""Return list of folders in root directory"""
for directory in self.directory:
for path in os.listdir(directory):
full_path = os.path.join(directory, path)
if os.path.isdir(full_path):
if not path.startswith('.'):
self.filepaths.append(full_path)
return self._get_filepaths()
class DirTree:
def __init__(self, root, branches=None):
"""
Generate a tree dictionary of the contents of a root directory.
:param root: Starting directory
:param branches: List of function tuples used for filtering
"""
self.tree_dict = {}
self.directory = Path(root)
self.start = str(self.directory).rfind(os.sep) + 1
self.branches = branches
self.get()
def __iter__(self):
return iter(self.tree_dict.items())
def __str__(self):
return str(self.tree_dict)
@property
def dict(self):
return self.tree_dict
def _filter(self, folders, folder_or_file):
for index in range(0, len(folders)):
filters = self.branches[index][folder_or_file]
if filters:
exclude = filters.get
include = filters.get
if exclude and folders[index] in exclude:
return False
if include and folders[index] not in include:
return False
return True
def get(self):
"""
Generate path, dirs, files tuple for each path in directory. Executes filters if branches are not None
:return:
"""
for path, dirs | self.filters = PathFilters(to_include, to_exclude, min_level, max_level, filters, non_empty_folders) | conditional_block |
walk.py | _output
self.console_stream = console_stream
def printer(self, message, stream=False):
if not stream:
if self.console_output:
print('\t' + message)
else:
if self.console_stream:
print('\t' + message)
def pool_process(func, iterable, process_name='Pool processing', cpus=cpu_count()):
"""
Apply a function to each element in an iterable and return a result list.
:param func: A function that returns a value
:param iterable: A list or set of elements to be passed to the func as the singular parameter
:param process_name: Name of the process, for printing purposes only
:param cpus: Number of CPUs
:return: Result list
"""
with Timer('\t{0} ({1}) completed in'.format(process_name, str(func))):
pool = Pool(cpus)
vals = pool.map(func, iterable)
pool.close()
return vals
def md5_hash(file_path):
"""Open a file path and hash the contents."""
with open(file_path, 'rb') as fp:
return md5(fp.read()).hexdigest()
def md5_tuple(file_path):
"""Returns a file_path, hash tuple."""
return file_path, md5_hash(file_path)
def pool_hash(path_list):
"""Pool process file hashing."""
return pool_process(md5_tuple, path_list, 'MD5 hashing')
def remover(file_path):
"""Delete a file or directory path only if it exists."""
if os.path.isfile(file_path):
os.remove(file_path)
return True
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
return True
else:
return False
def creation_date(path_to_file, return_datetime=True):
"""
Retrieve a file's creation date.
Try to get the date that a file was created, falling back to when it was
last modified if that isn't possible.
See http://stackoverflow.com/a/39501288/1709587 for explanation.
:param path_to_file: File path
:param return_datetime: Bool, returns value in Datetime format
:return: Creation date
"""
if platform.system() == 'Windows':
created_at = os.path.getctime(path_to_file)
else:
stat = os.stat(path_to_file)
try:
created_at = stat.st_birthtime
except AttributeError:
# We're probably on Linux. No easy way to get creation dates here,
# so we'll settle for when its content was last modified.
created_at = stat.st_mtime
if return_datetime:
return datetime.fromtimestamp(created_at)
else:
return created_at
def creation_date_tuple(file_path):
"""Returns a (file_path, creation_date) tuple."""
return file_path, creation_date(file_path)
def pool_creation_date(path_list):
"""Pool process file creation dates."""
return pool_process(creation_date_tuple, path_list, 'File creation dates')
class DirPaths:
def __init__(self,
directory,
full_paths=False,
topdown=True,
to_include=None,
to_exclude=None,
min_level=0,
max_level=inf,
filters=None,
non_empty_folders=False,
parallelize=False,
pool_size=cpu_count(),
console_output=False,
console_stream=False,
hash_files=False):
"""
This class generates a list of either files and or folders within a root directory.
The walk method generates a directory list of files by walking the file tree top down or bottom up. The
files and folders method generate a list of files or folders in the top level of the tree.
:param directory: Starting directory file path
:param full_paths: Bool, when true full paths are concatenated to file paths list
:param topdown: Bool, when true walk method walks tree from the topdwon. When false tree is walked bottom up
:param to_include: None by default. List of filters acceptable to find within file path string return
:param to_exclude: None by default. List of filters NOT acceptable to return
:param min_level: 0 by default. Minimum directory level to save paths from
:param max_level: Infinity by default. Maximum directory level to save paths from
:param parallelize: Bool, when true pool processing is enabled within walk method
:param pool_size: Number of CPUs for pool processing, default is number of processors
:param console_output: Bool, when true console output is printed
:param console_stream: Bool, when true loops print live results
:param hash_files: Bool, when true walk() method return a dictionary file_paths and hashes
"""
self.timer = Timer()
self.full_paths = full_paths
self.topdown = topdown
# Exclude .DS_Store by default, set to_exclude to False to include .DS_Store
to_exclude = ['.DS_Store'] if to_exclude is None else to_exclude
if any(i for i in [to_include, to_exclude, filters]) or min_level != 0 or max_level != inf:
self.filters = PathFilters(to_include, to_exclude, min_level, max_level, filters, non_empty_folders)
else:
self.filters = False
self.console_output = console_output
self.console_stream = console_stream
self._hash_files = hash_files
self._printer = Printer(console_output, console_stream).printer
self._printer('DIRPATHS')
# Check that parallelization is enabled
if parallelize:
self.pool_size = pool_size
self.parallelize = parallelize
self.filepaths = []
# Check if directory is a singular (1) string or if it is a list of strings (multiple)
try:
self.directory = [str(directory)]
except TypeError:
self.directory = [str(dirs) for dirs in directory]
def __iter__(self):
return iter(list(self.filepaths))
def __str__(self):
return str(self.filepaths)
def __len__(self):
return len(self.filepaths)
def _get_filepaths(self):
"""Filters list of file paths to remove non-included, remove excluded files and concatenate full paths."""
self._printer(str(self.__len__()) + " file paths have been parsed in " + str(self.timer.end))
if self._hash_files:
return pool_hash(self.filepaths)
else:
return self.filepaths
def creation_dates(self, sort=True):
"""
Return a list of (file_path, creation_date) tuples created from list of walked paths.
:param sort: Bool, sorts file_paths on created_date from newest to oldest.
:return: List of (file_path, created_date) tuples.
"""
if not sort:
return pool_creation_date(self.filepaths)
else:
pcd = pool_creation_date(self.filepaths)
pcd.sort(key=itemgetter(1), reverse=True)
return pcd
def walk(self):
"""
Default file path retrieval function.
sprinter() - Generates file path list using pool processing and Queues
crawler() - Generates file path list using os.walk() in sequence
"""
if self.parallelize:
self.filepaths = Sprinter(self.directory, self.filters, self.full_paths, self.pool_size,
self._printer).sprinter()
else:
self.filepaths = Crawler(self.directory, self.filters, self.full_paths, self.topdown,
self._printer).crawler()
return self._get_filepaths()
def files(self):
"""Return list of files in root directory"""
self._printer('\tFiles Walk')
for directory in self.directory:
for path in os.listdir(directory):
full_path = os.path.join(directory, path)
if os.path.isfile(full_path):
if not path.startswith('.'):
self.filepaths.append(full_path)
return self._get_filepaths()
def folders(self):
"""Return list of folders in root directory"""
for directory in self.directory:
for path in os.listdir(directory):
full_path = os.path.join(directory, path)
if os.path.isdir(full_path):
if not path.startswith('.'):
self.filepaths.append(full_path)
return self._get_filepaths()
class DirTree:
def __init__(self, root, branches=None):
"""
Generate a tree dictionary of the contents of a root directory.
:param root: Starting directory
:param branches: List of function tuples used for filtering
"""
self.tree_dict = {}
self.directory = Path(root)
self.start = str(self.directory).rfind(os.sep) + 1
self.branches = branches
self.get()
| return str(self.tree_dict)
@property
def dict(self):
return self.tree_dict
def _filter(self, folders, folder_or_file):
for index in range(0, len(folders)):
filters = self.branches[index][folder_or_file]
if filters:
exclude = filters.get
include = filters.get
if exclude and folders[index] in exclude:
return False
if include and folders[index] not in include:
return False
return True
def get(self):
"""
Generate path, dirs, files tuple for each path in directory. Executes filters if branches are not None
:return:
"""
for path, dirs | def __iter__(self):
return iter(self.tree_dict.items())
def __str__(self): | random_line_split |
utils.py | 1].plot(range(total_epochs), bnn.fit_history.history["val_{}".format(this_metric)], '-o', label="validation")
axes[i+1].legend()
axes[i+1].set_ylabel(this_metric)
axes[i+1].set_xlabel("epoch")
plt.tight_layout()
return fig, axes
def make_1d2d(arr):
assert arr.ndim == 1
return arr.reshape(arr.shape[0], 1)
def onehot_encode_labels(y):
"""
One-hot encode integer labels y. The number of classes is assumed to be
the largest value in y
Args:
y: array with shape (n_examples,)
Returns:
array with shape (n_examples, n_classes)
"""
return OneHotEncoder(categories="auto", sparse=False).fit_transform(y.reshape(y.shape[0],1))
def get_roc_curves(variable_importances):
"""
Calculate ROC curves
# TODO: set row idx as variable
Args:
variable_importances: A dataframe with the following columns:
- method
- n
- p
- repeat_idx
- variable
"""
roc_curve_df = pd.DataFrame()
base_fpr = np.linspace(0, 1, 101) # Interpolate tpr (y-axis) at these fpr (x-axis) values
for method in variable_importances["method"].unique():
for n in variable_importances["n"].unique():
for p in variable_importances["p"].unique():
for repeat_idx in range(np.amax(variable_importances["repeat_idx"].unique()+1)):
df = variable_importances.loc[
(variable_importances["method"]==method) &
(variable_importances["repeat_idx"]==repeat_idx) &
(variable_importances["n"]==n) &
(variable_importances["p"]==p)
]
if len(df)==0:
continue
preds, labels = df["value"].values, df["causal"].values.astype(float)
fpr, tpr, _ = roc_curve(labels, np.abs(preds))
interp_tpr = np.interp(base_fpr, fpr, tpr)
auroc = auc(fpr, tpr)
roc_curve_df = pd.concat([
roc_curve_df,
pd.DataFrame({
"fpr" : base_fpr, "tpr" : interp_tpr, "auc" : auroc,
"method" : method, "n" : n, "p" : p
})
])
return roc_curve_df
def load_mnist(fashion, onehot_encode=True, flatten_x=False, crop_x=0, classes=None):
"""
Load the MNIST dataset
Args:
onehot_encode: Boolean indicating whether to one-hot encode training
and test labels (default True)
flatten_x: Boolean indicating whether to flatten the training and
test inputs to 2D arrays with shape (n_examples, image_size**2).
If False, returned inputs have shape (n_examples, image_size, image_size
(default False)
crop_x: Integer controlling the size of the border to be removed from the input
images (default 0, meaning no cropping).
classes: None to include all classes (default). Otherwise include a list of two
integers that will be encoded as 0, 1 in the order they appear.
Returns:
x_train, y_train, x_test, y_test: train and test inputs and labels.
First dimension is always the number of examples
"""
if not fashion:
(x_train, y_train),(x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
else:
(x_train, y_train),(x_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
def crop(X, crop_size):
assert crop_x < X.shape[1]/2
assert crop_x < X.shape[2]/2
return X[:,crop_size:-crop_size,crop_size:-crop_size]
if crop_x > 0:
x_train = crop(x_train, crop_x)
x_test = crop(x_test, crop_x)
# Flatten to 2d arrays (each example 1d)
def flatten_image(X):
return X.reshape(X.shape[0], X.shape[1]*X.shape[1])
if flatten_x:
x_train = flatten_image(x_train)
x_test = flatten_image(x_test)
if onehot_encode:
y_train = onehot_encode_labels(y_train)
y_test = onehot_encode_labels(y_test)
if classes is not None:
assert len(classes) == 2
c0, c1 = classes
train_idxs_to_keep = np.logical_or(y_train==c0, y_train==c1)
x_train, y_train = x_train[train_idxs_to_keep,:], y_train[train_idxs_to_keep]
test_idxs_to_keep = np.logical_or(y_test==c0, y_test==c1)
x_test, y_test = x_test[test_idxs_to_keep,:], y_test[test_idxs_to_keep]
y_train = (y_train==c1).astype(int)[:,np.newaxis]
y_test = (y_test==c1).astype(int)[:,np.newaxis]
return x_train, y_train, x_test, y_test
def make_square(arr):
"""
Reshape a 1D array (or 2D array with .shape[2]==1) into a square 2D array
"""
assert arr.ndim==1 or arr.ndim==2, "array must be 1 or 2-D"
if arr.ndim==2:
assert arr.shape[1]==1, "If array is 2d then second dimension must be 1"
arr = arr.reshape(arr.shape[0])
assert arr.shape[0]**0.5 == int(arr.shape[0]**0.5), "array shape must be square (it is {})".format(arr.shape[0])
return arr.reshape(int(arr.shape[0]**0.5), int(arr.shape[0]**0.5))
def accuracy_onehot(labels, preds):
"""
Compute the accuracy of predictions using one-hot encoded labels
Args:
labels: array of labels with shape (n_examples, n_classes). Must be one-hot encoded
or result may be nonsense (this is not checked)
preds: array of predictions with shape (n_examples, n_classes)
Returns:
Accuracy as float. Result is in [0,1]
"""
assert labels.shape[0]==preds.shape[0]
return np.sum(np.argmax(preds, axis=1) == np.argmax(labels, axis=1))/float(labels.shape[0])
def accuracy(labels, preds):
"""
Compute the accuracy of predictions using integer labels
Args:
labels: array of labels with shape (n_examples,)
preds: array of predictions with shape (n_examples, n_classes)
Returns:
Accuracy as float. Result is in [0,1]
"""
assert labels.shape[0]==preds.shape[0]
return np.sum(preds==labels)/float(labels.shape[0])
def get_nullify_idxs(original_size, border_size):
"""
Get the indices of a flattened image that lie within border_size of the
edge of an image (use to pass to nullify argument in RATE function)
Args:
original size: Integer giving the size of the image
border_size: Integer giving the size of the border to be removed.
Returns:
Array of (integer) indices that lie in the border.
"""
assert border_size < original_size/2, "Border too large to be removed from image of this size"
tmp = np.zeros((original_size, original_size), dtype=int)
tmp[:border_size,:] = 1
tmp[-border_size:,:] = 1
tmp[:,-border_size:] = 1
tmp[:,:border_size] = 1
tmp = tmp.reshape(tmp.shape[0]*tmp.shape[1])
return np.where(tmp==1)[0]
def idx2pixel(idx, image_size):
"""
Get the 2D pixel location corresponding to the index of its flattened array
Args:
idx: integer index to be converted to pixel location
image_size: integer giving size of the image
Returns:
i, j: the location of the pixel corresponding to idx
"""
assert idx < image_size**2, "index {} too large for image size {}".format(idx, image_size)
tmp = np.zeros(image_size**2)
tmp[idx] = 1
tmp = tmp.reshape(image_size, image_size)
i, j = np.where(tmp==1)
return i[0], j[0]
def sampled_accuracies(pred_proba_samples, labels):
""" | pred_proba_samples: array of predicted probability samples with shape
(n_mc_samples, n_examples, n_classes)/(n_mc_samples, n_examples)
for multiclass/binary classification. (This is the shape returned by BNN_Classifier.predict).
labels: array of one-hot encoded labels with shape (n_examples, n_classes) for non-binary clasification
or (n_examples,1) for binary classification.
Returns:
Array of | Get the sampled accuracies over the entire test set from logit samples.
Args: | random_line_split |
utils.py | 1].plot(range(total_epochs), bnn.fit_history.history["val_{}".format(this_metric)], '-o', label="validation")
axes[i+1].legend()
axes[i+1].set_ylabel(this_metric)
axes[i+1].set_xlabel("epoch")
plt.tight_layout()
return fig, axes
def make_1d2d(arr):
assert arr.ndim == 1
return arr.reshape(arr.shape[0], 1)
def onehot_encode_labels(y):
"""
One-hot encode integer labels y. The number of classes is assumed to be
the largest value in y
Args:
y: array with shape (n_examples,)
Returns:
array with shape (n_examples, n_classes)
"""
return OneHotEncoder(categories="auto", sparse=False).fit_transform(y.reshape(y.shape[0],1))
def get_roc_curves(variable_importances):
"""
Calculate ROC curves
# TODO: set row idx as variable
Args:
variable_importances: A dataframe with the following columns:
- method
- n
- p
- repeat_idx
- variable
"""
roc_curve_df = pd.DataFrame()
base_fpr = np.linspace(0, 1, 101) # Interpolate tpr (y-axis) at these fpr (x-axis) values
for method in variable_importances["method"].unique():
for n in variable_importances["n"].unique():
for p in variable_importances["p"].unique():
|
return roc_curve_df
def load_mnist(fashion, onehot_encode=True, flatten_x=False, crop_x=0, classes=None):
"""
Load the MNIST dataset
Args:
onehot_encode: Boolean indicating whether to one-hot encode training
and test labels (default True)
flatten_x: Boolean indicating whether to flatten the training and
test inputs to 2D arrays with shape (n_examples, image_size**2).
If False, returned inputs have shape (n_examples, image_size, image_size
(default False)
crop_x: Integer controlling the size of the border to be removed from the input
images (default 0, meaning no cropping).
classes: None to include all classes (default). Otherwise include a list of two
integers that will be encoded as 0, 1 in the order they appear.
Returns:
x_train, y_train, x_test, y_test: train and test inputs and labels.
First dimension is always the number of examples
"""
if not fashion:
(x_train, y_train),(x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
else:
(x_train, y_train),(x_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
def crop(X, crop_size):
assert crop_x < X.shape[1]/2
assert crop_x < X.shape[2]/2
return X[:,crop_size:-crop_size,crop_size:-crop_size]
if crop_x > 0:
x_train = crop(x_train, crop_x)
x_test = crop(x_test, crop_x)
# Flatten to 2d arrays (each example 1d)
def flatten_image(X):
return X.reshape(X.shape[0], X.shape[1]*X.shape[1])
if flatten_x:
x_train = flatten_image(x_train)
x_test = flatten_image(x_test)
if onehot_encode:
y_train = onehot_encode_labels(y_train)
y_test = onehot_encode_labels(y_test)
if classes is not None:
assert len(classes) == 2
c0, c1 = classes
train_idxs_to_keep = np.logical_or(y_train==c0, y_train==c1)
x_train, y_train = x_train[train_idxs_to_keep,:], y_train[train_idxs_to_keep]
test_idxs_to_keep = np.logical_or(y_test==c0, y_test==c1)
x_test, y_test = x_test[test_idxs_to_keep,:], y_test[test_idxs_to_keep]
y_train = (y_train==c1).astype(int)[:,np.newaxis]
y_test = (y_test==c1).astype(int)[:,np.newaxis]
return x_train, y_train, x_test, y_test
def make_square(arr):
"""
Reshape a 1D array (or 2D array with .shape[2]==1) into a square 2D array
"""
assert arr.ndim==1 or arr.ndim==2, "array must be 1 or 2-D"
if arr.ndim==2:
assert arr.shape[1]==1, "If array is 2d then second dimension must be 1"
arr = arr.reshape(arr.shape[0])
assert arr.shape[0]**0.5 == int(arr.shape[0]**0.5), "array shape must be square (it is {})".format(arr.shape[0])
return arr.reshape(int(arr.shape[0]**0.5), int(arr.shape[0]**0.5))
def accuracy_onehot(labels, preds):
"""
Compute the accuracy of predictions using one-hot encoded labels
Args:
labels: array of labels with shape (n_examples, n_classes). Must be one-hot encoded
or result may be nonsense (this is not checked)
preds: array of predictions with shape (n_examples, n_classes)
Returns:
Accuracy as float. Result is in [0,1]
"""
assert labels.shape[0]==preds.shape[0]
return np.sum(np.argmax(preds, axis=1) == np.argmax(labels, axis=1))/float(labels.shape[0])
def accuracy(labels, preds):
"""
Compute the accuracy of predictions using integer labels
Args:
labels: array of labels with shape (n_examples,)
preds: array of predictions with shape (n_examples, n_classes)
Returns:
Accuracy as float. Result is in [0,1]
"""
assert labels.shape[0]==preds.shape[0]
return np.sum(preds==labels)/float(labels.shape[0])
def get_nullify_idxs(original_size, border_size):
"""
Get the indices of a flattened image that lie within border_size of the
edge of an image (use to pass to nullify argument in RATE function)
Args:
original size: Integer giving the size of the image
border_size: Integer giving the size of the border to be removed.
Returns:
Array of (integer) indices that lie in the border.
"""
assert border_size < original_size/2, "Border too large to be removed from image of this size"
tmp = np.zeros((original_size, original_size), dtype=int)
tmp[:border_size,:] = 1
tmp[-border_size:,:] = 1
tmp[:,-border_size:] = 1
tmp[:,:border_size] = 1
tmp = tmp.reshape(tmp.shape[0]*tmp.shape[1])
return np.where(tmp==1)[0]
def idx2pixel(idx, image_size):
"""
Get the 2D pixel location corresponding to the index of its flattened array
Args:
idx: integer index to be converted to pixel location
image_size: integer giving size of the image
Returns:
i, j: the location of the pixel corresponding to idx
"""
assert idx < image_size**2, "index {} too large for image size {}".format(idx, image_size)
tmp = np.zeros(image_size**2)
tmp[idx] = 1
tmp = tmp.reshape(image_size, image_size)
i, j = np.where(tmp==1)
return i[0], j[0]
def sampled_accuracies(pred_proba_samples, labels):
"""
Get the sampled accuracies over the entire test set from logit samples.
Args:
pred_proba_samples: array of predicted probability samples with shape
(n_mc_samples, n_examples, n_classes)/(n_mc_samples, n_examples)
for multiclass/binary classification. (This is the shape returned by BNN_Classifier.predict).
labels: array of one-hot encoded labels with shape (n_examples, n_classes) for non-binary clasification
or (n_examples,1) for binary classification.
Returns:
Array | for repeat_idx in range(np.amax(variable_importances["repeat_idx"].unique()+1)):
df = variable_importances.loc[
(variable_importances["method"]==method) &
(variable_importances["repeat_idx"]==repeat_idx) &
(variable_importances["n"]==n) &
(variable_importances["p"]==p)
]
if len(df)==0:
continue
preds, labels = df["value"].values, df["causal"].values.astype(float)
fpr, tpr, _ = roc_curve(labels, np.abs(preds))
interp_tpr = np.interp(base_fpr, fpr, tpr)
auroc = auc(fpr, tpr)
roc_curve_df = pd.concat([
roc_curve_df,
pd.DataFrame({
"fpr" : base_fpr, "tpr" : interp_tpr, "auc" : auroc,
"method" : method, "n" : n, "p" : p
})
]) | conditional_block |
utils.py | if len(df)==0:
continue
preds, labels = df["value"].values, df["causal"].values.astype(float)
fpr, tpr, _ = roc_curve(labels, np.abs(preds))
interp_tpr = np.interp(base_fpr, fpr, tpr)
auroc = auc(fpr, tpr)
roc_curve_df = pd.concat([
roc_curve_df,
pd.DataFrame({
"fpr" : base_fpr, "tpr" : interp_tpr, "auc" : auroc,
"method" : method, "n" : n, "p" : p
})
])
return roc_curve_df
def load_mnist(fashion, onehot_encode=True, flatten_x=False, crop_x=0, classes=None):
"""
Load the MNIST dataset
Args:
onehot_encode: Boolean indicating whether to one-hot encode training
and test labels (default True)
flatten_x: Boolean indicating whether to flatten the training and
test inputs to 2D arrays with shape (n_examples, image_size**2).
If False, returned inputs have shape (n_examples, image_size, image_size
(default False)
crop_x: Integer controlling the size of the border to be removed from the input
images (default 0, meaning no cropping).
classes: None to include all classes (default). Otherwise include a list of two
integers that will be encoded as 0, 1 in the order they appear.
Returns:
x_train, y_train, x_test, y_test: train and test inputs and labels.
First dimension is always the number of examples
"""
if not fashion:
(x_train, y_train),(x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
else:
(x_train, y_train),(x_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
def crop(X, crop_size):
assert crop_x < X.shape[1]/2
assert crop_x < X.shape[2]/2
return X[:,crop_size:-crop_size,crop_size:-crop_size]
if crop_x > 0:
x_train = crop(x_train, crop_x)
x_test = crop(x_test, crop_x)
# Flatten to 2d arrays (each example 1d)
def flatten_image(X):
return X.reshape(X.shape[0], X.shape[1]*X.shape[1])
if flatten_x:
x_train = flatten_image(x_train)
x_test = flatten_image(x_test)
if onehot_encode:
y_train = onehot_encode_labels(y_train)
y_test = onehot_encode_labels(y_test)
if classes is not None:
assert len(classes) == 2
c0, c1 = classes
train_idxs_to_keep = np.logical_or(y_train==c0, y_train==c1)
x_train, y_train = x_train[train_idxs_to_keep,:], y_train[train_idxs_to_keep]
test_idxs_to_keep = np.logical_or(y_test==c0, y_test==c1)
x_test, y_test = x_test[test_idxs_to_keep,:], y_test[test_idxs_to_keep]
y_train = (y_train==c1).astype(int)[:,np.newaxis]
y_test = (y_test==c1).astype(int)[:,np.newaxis]
return x_train, y_train, x_test, y_test
def make_square(arr):
"""
Reshape a 1D array (or 2D array with .shape[2]==1) into a square 2D array
"""
assert arr.ndim==1 or arr.ndim==2, "array must be 1 or 2-D"
if arr.ndim==2:
assert arr.shape[1]==1, "If array is 2d then second dimension must be 1"
arr = arr.reshape(arr.shape[0])
assert arr.shape[0]**0.5 == int(arr.shape[0]**0.5), "array shape must be square (it is {})".format(arr.shape[0])
return arr.reshape(int(arr.shape[0]**0.5), int(arr.shape[0]**0.5))
def accuracy_onehot(labels, preds):
"""
Compute the accuracy of predictions using one-hot encoded labels
Args:
labels: array of labels with shape (n_examples, n_classes). Must be one-hot encoded
or result may be nonsense (this is not checked)
preds: array of predictions with shape (n_examples, n_classes)
Returns:
Accuracy as float. Result is in [0,1]
"""
assert labels.shape[0]==preds.shape[0]
return np.sum(np.argmax(preds, axis=1) == np.argmax(labels, axis=1))/float(labels.shape[0])
def accuracy(labels, preds):
"""
Compute the accuracy of predictions using integer labels
Args:
labels: array of labels with shape (n_examples,)
preds: array of predictions with shape (n_examples, n_classes)
Returns:
Accuracy as float. Result is in [0,1]
"""
assert labels.shape[0]==preds.shape[0]
return np.sum(preds==labels)/float(labels.shape[0])
def get_nullify_idxs(original_size, border_size):
"""
Get the indices of a flattened image that lie within border_size of the
edge of an image (use to pass to nullify argument in RATE function)
Args:
original size: Integer giving the size of the image
border_size: Integer giving the size of the border to be removed.
Returns:
Array of (integer) indices that lie in the border.
"""
assert border_size < original_size/2, "Border too large to be removed from image of this size"
tmp = np.zeros((original_size, original_size), dtype=int)
tmp[:border_size,:] = 1
tmp[-border_size:,:] = 1
tmp[:,-border_size:] = 1
tmp[:,:border_size] = 1
tmp = tmp.reshape(tmp.shape[0]*tmp.shape[1])
return np.where(tmp==1)[0]
def idx2pixel(idx, image_size):
"""
Get the 2D pixel location corresponding to the index of its flattened array
Args:
idx: integer index to be converted to pixel location
image_size: integer giving size of the image
Returns:
i, j: the location of the pixel corresponding to idx
"""
assert idx < image_size**2, "index {} too large for image size {}".format(idx, image_size)
tmp = np.zeros(image_size**2)
tmp[idx] = 1
tmp = tmp.reshape(image_size, image_size)
i, j = np.where(tmp==1)
return i[0], j[0]
def sampled_accuracies(pred_proba_samples, labels):
"""
Get the sampled accuracies over the entire test set from logit samples.
Args:
pred_proba_samples: array of predicted probability samples with shape
(n_mc_samples, n_examples, n_classes)/(n_mc_samples, n_examples)
for multiclass/binary classification. (This is the shape returned by BNN_Classifier.predict).
labels: array of one-hot encoded labels with shape (n_examples, n_classes) for non-binary clasification
or (n_examples,1) for binary classification.
Returns:
Array of test accuracies for each round of MC samples with shape (n_mc_samples,)
"""
binary_labels = labels.shape[1]==1
assert pred_proba_samples.shape[1]==labels.shape[0], "Different number of examples in logit samples and labels"
if not binary_labels:
assert pred_proba_samples.shape[2]==labels.shape[1], "Different number of classes in logit samples and labels"
sampled_test_accuracies = np.sum(
np.argmax(pred_proba_samples, axis=2) == np.argmax(labels, axis=1)[:,np.newaxis], axis=1)/float(labels.shape[0])
else:
sampled_test_accuracies = np.sum((pred_proba_samples[:,:]>0.5) == labels[:,0], axis=1)/float(labels.shape[0])
return sampled_test_accuracies
def accuracy_hist(pred_proba_samples, labels):
"""
Plot a histogram showing test accuracies.
Just calls sampled_accuracies then plots the result.
"""
sampled_acc = sampled_accuracies(pred_proba_samples, labels)
avg_accuracy = round(np.mean(sampled_acc) * 100, 3)
print("average accuracy across " + str(pred_proba_samples.shape[0]) + " samples: " + str(avg_accuracy) + "%\n")
fig, ax = plt.subplots(figsize=(10,5))
sns.distplot(100*sampled_acc, ax=ax, rug=True, kde=False)
ax.set_xlabel("Test set accuracy (%)", fontsize=30)
ax.set_ylabel("Frequency density", fontsize=30);
ax.tick_params("both", labelsize=15)
return sampled_acc
def rank_array(arr):
assert arr.ndim==1
return (arr.shape[0] - rankdata(arr)).astype(int)
def reverse_ranks(rankarr):
return rankarr.shape[0] - rankarr - 1
def | compute_power | identifier_name | |
utils.py | 1].plot(range(total_epochs), bnn.fit_history.history["val_{}".format(this_metric)], '-o', label="validation")
axes[i+1].legend()
axes[i+1].set_ylabel(this_metric)
axes[i+1].set_xlabel("epoch")
plt.tight_layout()
return fig, axes
def make_1d2d(arr):
assert arr.ndim == 1
return arr.reshape(arr.shape[0], 1)
def onehot_encode_labels(y):
"""
One-hot encode integer labels y. The number of classes is assumed to be
the largest value in y
Args:
y: array with shape (n_examples,)
Returns:
array with shape (n_examples, n_classes)
"""
return OneHotEncoder(categories="auto", sparse=False).fit_transform(y.reshape(y.shape[0],1))
def get_roc_curves(variable_importances):
"""
Calculate ROC curves
# TODO: set row idx as variable
Args:
variable_importances: A dataframe with the following columns:
- method
- n
- p
- repeat_idx
- variable
"""
roc_curve_df = pd.DataFrame()
base_fpr = np.linspace(0, 1, 101) # Interpolate tpr (y-axis) at these fpr (x-axis) values
for method in variable_importances["method"].unique():
for n in variable_importances["n"].unique():
for p in variable_importances["p"].unique():
for repeat_idx in range(np.amax(variable_importances["repeat_idx"].unique()+1)):
df = variable_importances.loc[
(variable_importances["method"]==method) &
(variable_importances["repeat_idx"]==repeat_idx) &
(variable_importances["n"]==n) &
(variable_importances["p"]==p)
]
if len(df)==0:
continue
preds, labels = df["value"].values, df["causal"].values.astype(float)
fpr, tpr, _ = roc_curve(labels, np.abs(preds))
interp_tpr = np.interp(base_fpr, fpr, tpr)
auroc = auc(fpr, tpr)
roc_curve_df = pd.concat([
roc_curve_df,
pd.DataFrame({
"fpr" : base_fpr, "tpr" : interp_tpr, "auc" : auroc,
"method" : method, "n" : n, "p" : p
})
])
return roc_curve_df
def load_mnist(fashion, onehot_encode=True, flatten_x=False, crop_x=0, classes=None):
"""
Load the MNIST dataset
Args:
onehot_encode: Boolean indicating whether to one-hot encode training
and test labels (default True)
flatten_x: Boolean indicating whether to flatten the training and
test inputs to 2D arrays with shape (n_examples, image_size**2).
If False, returned inputs have shape (n_examples, image_size, image_size
(default False)
crop_x: Integer controlling the size of the border to be removed from the input
images (default 0, meaning no cropping).
classes: None to include all classes (default). Otherwise include a list of two
integers that will be encoded as 0, 1 in the order they appear.
Returns:
x_train, y_train, x_test, y_test: train and test inputs and labels.
First dimension is always the number of examples
"""
if not fashion:
(x_train, y_train),(x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
else:
(x_train, y_train),(x_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
def crop(X, crop_size):
|
if crop_x > 0:
x_train = crop(x_train, crop_x)
x_test = crop(x_test, crop_x)
# Flatten to 2d arrays (each example 1d)
def flatten_image(X):
return X.reshape(X.shape[0], X.shape[1]*X.shape[1])
if flatten_x:
x_train = flatten_image(x_train)
x_test = flatten_image(x_test)
if onehot_encode:
y_train = onehot_encode_labels(y_train)
y_test = onehot_encode_labels(y_test)
if classes is not None:
assert len(classes) == 2
c0, c1 = classes
train_idxs_to_keep = np.logical_or(y_train==c0, y_train==c1)
x_train, y_train = x_train[train_idxs_to_keep,:], y_train[train_idxs_to_keep]
test_idxs_to_keep = np.logical_or(y_test==c0, y_test==c1)
x_test, y_test = x_test[test_idxs_to_keep,:], y_test[test_idxs_to_keep]
y_train = (y_train==c1).astype(int)[:,np.newaxis]
y_test = (y_test==c1).astype(int)[:,np.newaxis]
return x_train, y_train, x_test, y_test
def make_square(arr):
"""
Reshape a 1D array (or 2D array with .shape[2]==1) into a square 2D array
"""
assert arr.ndim==1 or arr.ndim==2, "array must be 1 or 2-D"
if arr.ndim==2:
assert arr.shape[1]==1, "If array is 2d then second dimension must be 1"
arr = arr.reshape(arr.shape[0])
assert arr.shape[0]**0.5 == int(arr.shape[0]**0.5), "array shape must be square (it is {})".format(arr.shape[0])
return arr.reshape(int(arr.shape[0]**0.5), int(arr.shape[0]**0.5))
def accuracy_onehot(labels, preds):
"""
Compute the accuracy of predictions using one-hot encoded labels
Args:
labels: array of labels with shape (n_examples, n_classes). Must be one-hot encoded
or result may be nonsense (this is not checked)
preds: array of predictions with shape (n_examples, n_classes)
Returns:
Accuracy as float. Result is in [0,1]
"""
assert labels.shape[0]==preds.shape[0]
return np.sum(np.argmax(preds, axis=1) == np.argmax(labels, axis=1))/float(labels.shape[0])
def accuracy(labels, preds):
"""
Compute the accuracy of predictions using integer labels
Args:
labels: array of labels with shape (n_examples,)
preds: array of predictions with shape (n_examples, n_classes)
Returns:
Accuracy as float. Result is in [0,1]
"""
assert labels.shape[0]==preds.shape[0]
return np.sum(preds==labels)/float(labels.shape[0])
def get_nullify_idxs(original_size, border_size):
"""
Get the indices of a flattened image that lie within border_size of the
edge of an image (use to pass to nullify argument in RATE function)
Args:
original size: Integer giving the size of the image
border_size: Integer giving the size of the border to be removed.
Returns:
Array of (integer) indices that lie in the border.
"""
assert border_size < original_size/2, "Border too large to be removed from image of this size"
tmp = np.zeros((original_size, original_size), dtype=int)
tmp[:border_size,:] = 1
tmp[-border_size:,:] = 1
tmp[:,-border_size:] = 1
tmp[:,:border_size] = 1
tmp = tmp.reshape(tmp.shape[0]*tmp.shape[1])
return np.where(tmp==1)[0]
def idx2pixel(idx, image_size):
"""
Get the 2D pixel location corresponding to the index of its flattened array
Args:
idx: integer index to be converted to pixel location
image_size: integer giving size of the image
Returns:
i, j: the location of the pixel corresponding to idx
"""
assert idx < image_size**2, "index {} too large for image size {}".format(idx, image_size)
tmp = np.zeros(image_size**2)
tmp[idx] = 1
tmp = tmp.reshape(image_size, image_size)
i, j = np.where(tmp==1)
return i[0], j[0]
def sampled_accuracies(pred_proba_samples, labels):
"""
Get the sampled accuracies over the entire test set from logit samples.
Args:
pred_proba_samples: array of predicted probability samples with shape
(n_mc_samples, n_examples, n_classes)/(n_mc_samples, n_examples)
for multiclass/binary classification. (This is the shape returned by BNN_Classifier.predict).
labels: array of one-hot encoded labels with shape (n_examples, n_classes) for non-binary clasification
or (n_examples,1) for binary classification.
Returns:
Array | assert crop_x < X.shape[1]/2
assert crop_x < X.shape[2]/2
return X[:,crop_size:-crop_size,crop_size:-crop_size] | identifier_body |
analysis.py | :param data: Data in which to detect outliers. Take care that n_samples > n_features ** 2 .
:type data: pandas.DataFrame
:param contamination: The amount of contamination of the data set, i.e. the proportion of outliers in the data set.
Range is (0, 0.5).
:type contamination: float
:returns: Decision on each row if it's an outlier. And contour array for drawing ellipse in graph.
:rtype: tuple[numpy.ndarray, numpy.ndarray]
"""
robust_cov = EllipticEnvelope(support_fraction=1., contamination=contamination)
outlyingness = robust_cov.fit_predict(data)
decision = (outlyingness-1).astype(bool)
# Visualisation.
xx, yy = np.meshgrid(np.linspace(0, 100, 101),
np.linspace(0, 100, 101))
z = robust_cov.predict(np.c_[xx.ravel(), yy.ravel()])
z = z.reshape(xx.shape)
return decision, z
#ToDo: remove blocks/sessions with sum mean way off.
#ToDo: remove sessions with less than 10 trials in any block.
def get_pca_data(dataframe):
""" Conduct Principal Component Analysis on 2D dataset.
:param dataframe: Data holding 'df1' and 'df2' values as columns.
:type dataframe: pandas.DataFrame
:return: Explained variance, components and means.
:rtype: pandas.DataFrame
"""
# We don't reduce dimensionality, but overlay the 2 principal components in 2D.
pca = PCA(n_components=2)
x = dataframe[['df1', 'df2']].values
try:
# df1 and df2 have the same scale. No need to standardize. Standardizing might actually distort PCA here.
pca.fit(x)
except ValueError:
# Return empty.
df = pd.DataFrame(columns=['var_expl', 'var_expl_ratio', 'x', 'y', 'meanx', 'meany'])
else:
df = pd.DataFrame({'var_expl': pca.explained_variance_.T,
'var_expl_ratio': pca.explained_variance_ratio_.T * 100, # In percent
'x': pca.components_[:, 0],
'y': pca.components_[:, 1],
'meanx': pca.mean_[0],
'meany': pca.mean_[1],
},
index=[1, 2] # For designating principal components.
)
df.index.rename('PC', inplace=True)
return df
def get_pca_vectors(dataframe):
""" Get principal components as vectors. Vectors can then be used to annotate graphs.
:param dataframe: Tabular PCA data.
:type dataframe: pandas.DataFrame
:return: Principal components as vector pairs in input space with mean as origin first and offset second.
:rtype: list
"""
vectors = list()
# Use the "components" to define the direction of the vectors,
# and the "explained variance" to define the squared-length of the vectors.
for idx, row in dataframe.iterrows():
|
return vectors
def get_pca_vectors_by(dataframe, by=None):
""" Get principal components for each group as vectors. Vectors can then be used to annotate graphs.
:param dataframe: Data holding 'df1' and 'df2' values as columns.
:type dataframe: pandas.DataFrame
:param by: Column to group data by and return 2 vectors for each group.
:type by: str|list
:return: list of principal components as vector pairs in input space with mean as origin first and offset second.
:rtype: list
"""
vector_pairs = list()
if by is None:
pca_df = get_pca_data(dataframe)
v = get_pca_vectors(pca_df)
vector_pairs.append(v)
else:
grouped = dataframe.groupby(by)
for group, data in grouped:
pca_df = get_pca_data(data)
v = get_pca_vectors(pca_df)
vector_pairs.append(v)
# ToDo: Augment by groupby criteria.
return vector_pairs
def get_interior_angle(vec0, vec1):
""" Get the smaller angle between vec0 and vec1 in degrees.
:param vec0: Vector 0
:type vec0: numpy.ndarray
:param vec1: Vector 1
:type vec1: numpy.ndarray
:return: Interior angle between vector0 and vector1 in degrees.
:rtype: float
"""
angle = np.math.atan2(np.linalg.det([vec0, vec1]), np.dot(vec0, vec1))
degrees = abs(np.degrees(angle))
# Min and max should be between 0° an 90°.
degrees = min(degrees, 180.0 - degrees)
return degrees
def get_ucm_vec(p0=None, p1=None):
""" Returns 2D unit vector in direction of uncontrolled manifold. """
if p0 is None:
p0 = np.array([25, 100])
if p1 is None:
p1 = np.array([100, 25])
parallel = p1 - p0
parallel = parallel / np.linalg.norm(parallel) # Normalize.
return parallel
def get_orthogonal_vec2d(vec):
""" Get a vector that is orthogonal to vec and has same length.
:param vec: 2D Vector
:return: 2D Vector orthogonal to vec.
:rtype: numpy.ndarray
"""
ortho = np.array([-vec[1], vec[0]])
return ortho
def get_pc_ucm_angles(dataframe, vec_ucm):
""" Computes the interior angles between pca vectors and ucm parallel/orthogonal vectors.
:param dataframe: PCA data .
:type dataframe: pandas.DataFrame
:param vec_ucm: Vector parallel to UCM.
:type vec_ucm: numpy.ndarray
:return: Each angle between principal components and UCM parallel and orthogonal vector.
:rtype: pandas.DataFrame
"""
vec_ucm_ortho = get_orthogonal_vec2d(vec_ucm)
df_angles = pd.DataFrame(columns=['parallel', 'orthogonal'])
for idx, row in dataframe.iterrows():
angle_parallel = get_interior_angle(vec_ucm, row[['x', 'y']])
angle_ortho = get_interior_angle(vec_ucm_ortho, row[['x', 'y']])
df_angles.loc[idx] = [angle_parallel, angle_ortho]
df_angles[['parallel', 'orthogonal']] = df_angles[['parallel', 'orthogonal']].astype(float)
df_angles.insert(0, 'PC', dataframe['PC'])
return df_angles
def get_projections(points, vec_ucm):
""" Returns coefficients a and b in x = a*vec_ucm + b*vec_ortho with x being the difference of a data point and
the mean.
Projection is computed using a transformation matrix with ucm parallel and orthogonal vectors as basis.
:param points: Data of 2D points.
:type points: pandas.Dataframe
:param vec_ucm: Unit vector parallel to uncontrolled manifold.
:type vec_ucm: numpy.ndarray
:return: Array with projected lengths onto vector parallel to UCM as 'a', onto vector orthogonal to UCM as 'b'.
:rtype: pandas.Dataframe
"""
# Get the vector orthogonal to the UCM.
vec_ortho = get_orthogonal_vec2d(vec_ucm)
# Build a transformation matrix with vec_ucm and vec_ortho as new basis vectors.
A = np.vstack((vec_ucm, vec_ortho)).T # A is not an orthogonal projection matrix (A=A.T), but this works.
# Centralize the data. Analogous to calculating across trials deviation from average for each time step.
diffs = points - points.mean()
# For computational efficiency we shortcut the projection calculation with matrix multiplication.
# The actual math behind it:
# coeffs = vec_ucm.T@diff/np.sqrt(vec_ucm.T@vec_ucm), vec_ortho.T@diff/np.sqrt(vec_ortho.T@vec_ortho)
# Biased variance (normalized by (n-1)) of projection onto UCM vector:
# var_ucm = vec_ucm.T@np.cov(diffs, bias=True, rowvar=False)@vec_ucm/(vec_ucm.T@vec_ucm) # Rayleigh fraction.
coeffs = diffs@A
coeffs.columns = ['parallel', 'orthogonal']
return coeffs
def get_synergy_indices(variances, n=2, d=1):
"""
n: Number of degrees of freedom. In our case 2.
d: Dimensionality of performance variable. In our case a scalar (1).
Vucm = 1/N * 1/(n - d) * sum(ProjUCM**2)
Vort = | v = row[['x', 'y']].values * np.sqrt(row['var_expl']) * 3 # Scale up for better visibility.
mean = row[['meanx', 'meany']].values
mean_offset = (mean, mean + v)
vectors.append(mean_offset) | conditional_block |
analysis.py | frame with columns mean, var, count and column names of data as rows.
:rtype: pandas.Dataframe
"""
# There's a bug in pandas 1.0.4 where you can't use custom numpy functions in agg anymore (ValueError).
# Note that the variance of projections is usually divided by (n-d) for Vucm and d for Vort. Both are 1 in our case.
# Pandas default var returns unbiased population variance /(n-1). Doesn't make a difference for synergy indices.
f_var = lambda series: series.var(ddof=0)
f_var.__name__ = 'variance' # Column name gets function name.
f_avg = lambda series: series.abs().mean()
f_avg.__name__ = 'absolute average'
# When there're no data, return empty DataFrame with columns.
if data.empty:
if by:
data.set_index(by, drop=True, inplace=True)
col_idx = pd.MultiIndex.from_product([data.columns, [f_avg.__name__, 'mean', f_var.__name__]])
stats = pd.DataFrame(None, index=data.index, columns=col_idx)
stats['count'] = None
return stats
if not by:
stats = data.agg([f_avg, 'mean', f_var, 'count']).T
stats['count'] = stats['count'].astype(int)
else:
grouped = data.groupby(by)
stats = grouped.agg([f_avg, 'mean', f_var])
stats['count'] = grouped.size()
stats.dropna(inplace=True)
return stats
def get_statistics(df_trials, df_proj):
""" Calculate descriptive statistics for key values of the anaylsis.
:param df_trials: Data from joined table on trials.
:type df_trials: pandas.DataFrame
:param df_proj: Projections onto UCM and its orthogonal space.
:type df_proj: pandas.DataFrame
:return: Descriptive statistics and synergy indices.
:rtype: pandas.DataFrame
"""
groupers = ['user', 'session', 'condition', 'block', 'task']
try:
# Get only those trials we have the projections for, in the same order.
df_trials = df_trials.iloc[df_proj.index]
df_trials[groupers] = df_trials[groupers].astype('category')
except (KeyError, ValueError):
df_proj_stats = get_descriptive_stats(pd.DataFrame(columns=df_proj.columns))
df_dof_stats = get_descriptive_stats(pd.DataFrame(columns=df_trials.columns))
cov = pd.DataFrame(columns=[('df1,df2 covariance', '')])
else:
df_proj[groupers] = df_trials[groupers]
# Get statistic characteristics of absolute lengths.
df_proj_stats = get_descriptive_stats(df_proj, by=groupers)
# Clean-up to match data on degrees of freedom.
df_proj_stats.dropna(inplace=True)
df_dof_stats = get_descriptive_stats(df_trials[groupers + ['df1', 'df2', 'sum']], by=groupers)
# For degrees of freedom absolute average is the same as the mean, since there are no negative values.
df_dof_stats.drop('absolute average', axis='columns', level=1, inplace=True)
# Get covariance between degrees of freedom.
cov = df_trials.groupby(groupers).apply(lambda x: np.cov(x[['df1', 'df2']].T, ddof=0)[0, 1])
try:
cov = cov.to_frame(('df1,df2 covariance', '')) # MultiIndex.
except AttributeError: # In case cov is an empty Dataframe.
cov = pd.DataFrame(columns=pd.MultiIndex.from_tuples([('df1,df2 covariance', '')]))
# We now have 1 count column too many, since the projection statistics already has the identical column.
df_dof_stats.drop('count', axis='columns', level=0, inplace=True)
# For projections the mean is 0, since projections are from deviations from the mean. So we don't need to show it.
df_proj_stats.drop('mean', axis='columns', level=1, inplace=True)
# Get synergy indices based on projection variances we just calculated.
df_synergies = get_synergy_indices(df_proj_stats.xs('variance', level=1, axis='columns'))
# Before we merge dataframes, give this one a Multiindex, too.
df_synergies.columns = pd.MultiIndex.from_product([df_synergies.columns, ['']])
# Join the 3 statistics to be displayed in a single table.
df = pd.concat((df_dof_stats, cov, df_proj_stats, df_synergies), axis='columns')
return df
def wilcoxon_rank_test(data):
w, p = wilcoxon(data['parallel'], data['orthogonal'], alternative='greater')
return p < 0.05, w, p
def wide_to_long(df, stubs, suffixes, j):
""" Transforms a dataframe to long format, where the stubs are melted into a single column with name j and suffixes
into value columns. Filters for all columns that are a stubs+suffixes combination.
Keeps 'user', 'task' as id_vars. When an error is encountered an emtpy dataframe is returned.
:param df: Data in wide/mixed format.
:type df: pandas.DataFrame
:param stubs: First part of a column name. These names will be the values of the new column j.
:type stubs: list[str]
:param suffixes: Second part of a column name. These will be the new columns holding the respective values.
:type suffixes: str|list[str]
:param j: Name for new column containing stubs.
:type j: str
:return: Filtered Dataframe in long format.
:rtype: pandas.Dataframe
"""
if isinstance(suffixes, str):
suffixes = [suffixes]
# We want all stubs+suffix combinations as columns.
val_cols = [" ".join(x) for x in itertools.product(stubs, suffixes)]
try:
# Filter for data we want to plot.
df = df[['user', 'condition', 'block', 'task', *val_cols]]
# Reverse stub and suffix for long format. We want the measurements as columns, not the categories.
df.columns = [" ".join(x.split(" ")[::-1]) for x in df.columns]
long_df = pd.wide_to_long(df=df, stubnames=suffixes, i=['user', 'condition', 'block', 'task'],
j=j, sep=" ", suffix=f'(!?{"|".join(stubs)})')
long_df.reset_index(inplace=True)
except (KeyError, ValueError):
long_df = pd.DataFrame(columns=['user', 'condition', 'block', 'task', j, *suffixes])
long_df[['user', 'condition', 'block', 'task']] = long_df[['user', 'condition', 'block', 'task']].astype('category')
return long_df
def normality_test(df, columns, multivariate=False):
""" Tests whether there is considerable deviation from a normal distribution.
If no deviation could be detected, we don't know much about the distribution.
Independent normality tests use the Shapiro-Wilk method. Multivariate tests use the Henze-Zirkler multivariate
normality test.
:param df: Aggregated data containing Fisher-z-transformed synergy index.
:type df: pandas.DataFrame
:param columns: Which columns to test for normality deviation.
:type columns: list[str]
:param multivariate: Do multivariate normality testing?
:type multivariate: bool
:return: Normality test results.
:rtype: pandas.DataFrame
"""
if multivariate:
# Multivariate testing.
is_normal, p = df.groupby(['user', 'block'])[columns].apply(pg.multivariate_normality)
res = df.groupby(['user', 'block'])[['df1', 'df2']].apply(pg.multivariate_normality).apply(pd.Series)\
.rename(columns={0: 'normal', 1: 'p'})
else:
# We would want to minimize type II error rate, risk of not rejecting the null when it's false.
res = df.groupby(['user', 'block'])[columns].apply(pg.normality).unstack(level=2) # Shapiro-Wilk tests.
return res
def mixed_anova_synergy_index_z(dataframe):
"" | " 3 x (3) Two-way split-plot ANOVA with between-factor condition and within-factor block.
:param dataframe: Aggregated data containing Fisher-z-transformed synergy index.
:type dataframe: pandas.DataFrame
:return: mixed-design ANOVA results.
:rtype: pandas.DataFrame
"""
if dataframe['condition'].nunique() <= 1:
raise ValueError("ERROR: Between factor has insufficient number of levels.")
#ToDo: If there's only 1 condition, run ANOVA with one within factor instead.
if dataframe['block'].nunique() <= 1:
raise ValueError("ERROR: Between factor has insufficient number of levels.")
#ToDo: If there's only 1 block, run ANOVA with one between factor instead.
aov = pg.mixed_anova(data=dataframe, dv='dVz', within='block', subject='user', between='condition', correction=True)
return aov
| identifier_body | |
analysis.py | try:
# df1 and df2 have the same scale. No need to standardize. Standardizing might actually distort PCA here.
pca.fit(x)
except ValueError:
# Return empty.
df = pd.DataFrame(columns=['var_expl', 'var_expl_ratio', 'x', 'y', 'meanx', 'meany'])
else:
df = pd.DataFrame({'var_expl': pca.explained_variance_.T,
'var_expl_ratio': pca.explained_variance_ratio_.T * 100, # In percent
'x': pca.components_[:, 0],
'y': pca.components_[:, 1],
'meanx': pca.mean_[0],
'meany': pca.mean_[1],
},
index=[1, 2] # For designating principal components.
)
df.index.rename('PC', inplace=True)
return df
def get_pca_vectors(dataframe):
""" Get principal components as vectors. Vectors can then be used to annotate graphs.
:param dataframe: Tabular PCA data.
:type dataframe: pandas.DataFrame
:return: Principal components as vector pairs in input space with mean as origin first and offset second.
:rtype: list
"""
vectors = list()
# Use the "components" to define the direction of the vectors,
# and the "explained variance" to define the squared-length of the vectors.
for idx, row in dataframe.iterrows():
v = row[['x', 'y']].values * np.sqrt(row['var_expl']) * 3 # Scale up for better visibility.
mean = row[['meanx', 'meany']].values
mean_offset = (mean, mean + v)
vectors.append(mean_offset)
return vectors
def get_pca_vectors_by(dataframe, by=None):
""" Get principal components for each group as vectors. Vectors can then be used to annotate graphs.
:param dataframe: Data holding 'df1' and 'df2' values as columns.
:type dataframe: pandas.DataFrame
:param by: Column to group data by and return 2 vectors for each group.
:type by: str|list
:return: list of principal components as vector pairs in input space with mean as origin first and offset second.
:rtype: list
"""
vector_pairs = list()
if by is None:
pca_df = get_pca_data(dataframe)
v = get_pca_vectors(pca_df)
vector_pairs.append(v)
else:
grouped = dataframe.groupby(by)
for group, data in grouped:
pca_df = get_pca_data(data)
v = get_pca_vectors(pca_df)
vector_pairs.append(v)
# ToDo: Augment by groupby criteria.
return vector_pairs
def get_interior_angle(vec0, vec1):
""" Get the smaller angle between vec0 and vec1 in degrees.
:param vec0: Vector 0
:type vec0: numpy.ndarray
:param vec1: Vector 1
:type vec1: numpy.ndarray
:return: Interior angle between vector0 and vector1 in degrees.
:rtype: float
"""
angle = np.math.atan2(np.linalg.det([vec0, vec1]), np.dot(vec0, vec1))
degrees = abs(np.degrees(angle))
# Min and max should be between 0° an 90°.
degrees = min(degrees, 180.0 - degrees)
return degrees
def get_ucm_vec(p0=None, p1=None):
""" Returns 2D unit vector in direction of uncontrolled manifold. """
if p0 is None:
p0 = np.array([25, 100])
if p1 is None:
p1 = np.array([100, 25])
parallel = p1 - p0
parallel = parallel / np.linalg.norm(parallel) # Normalize.
return parallel
def get_orthogonal_vec2d(vec):
""" Get a vector that is orthogonal to vec and has same length.
:param vec: 2D Vector
:return: 2D Vector orthogonal to vec.
:rtype: numpy.ndarray
"""
ortho = np.array([-vec[1], vec[0]])
return ortho
def get_pc_ucm_angles(dataframe, vec_ucm):
""" Computes the interior angles between pca vectors and ucm parallel/orthogonal vectors.
:param dataframe: PCA data .
:type dataframe: pandas.DataFrame
:param vec_ucm: Vector parallel to UCM.
:type vec_ucm: numpy.ndarray
:return: Each angle between principal components and UCM parallel and orthogonal vector.
:rtype: pandas.DataFrame
"""
vec_ucm_ortho = get_orthogonal_vec2d(vec_ucm)
df_angles = pd.DataFrame(columns=['parallel', 'orthogonal'])
for idx, row in dataframe.iterrows():
angle_parallel = get_interior_angle(vec_ucm, row[['x', 'y']])
angle_ortho = get_interior_angle(vec_ucm_ortho, row[['x', 'y']])
df_angles.loc[idx] = [angle_parallel, angle_ortho]
df_angles[['parallel', 'orthogonal']] = df_angles[['parallel', 'orthogonal']].astype(float)
df_angles.insert(0, 'PC', dataframe['PC'])
return df_angles
def get_projections(points, vec_ucm):
""" Returns coefficients a and b in x = a*vec_ucm + b*vec_ortho with x being the difference of a data point and
the mean.
Projection is computed using a transformation matrix with ucm parallel and orthogonal vectors as basis.
:param points: Data of 2D points.
:type points: pandas.Dataframe
:param vec_ucm: Unit vector parallel to uncontrolled manifold.
:type vec_ucm: numpy.ndarray
:return: Array with projected lengths onto vector parallel to UCM as 'a', onto vector orthogonal to UCM as 'b'.
:rtype: pandas.Dataframe
"""
# Get the vector orthogonal to the UCM.
vec_ortho = get_orthogonal_vec2d(vec_ucm)
# Build a transformation matrix with vec_ucm and vec_ortho as new basis vectors.
A = np.vstack((vec_ucm, vec_ortho)).T # A is not an orthogonal projection matrix (A=A.T), but this works.
# Centralize the data. Analogous to calculating across trials deviation from average for each time step.
diffs = points - points.mean()
# For computational efficiency we shortcut the projection calculation with matrix multiplication.
# The actual math behind it:
# coeffs = vec_ucm.T@diff/np.sqrt(vec_ucm.T@vec_ucm), vec_ortho.T@diff/np.sqrt(vec_ortho.T@vec_ortho)
# Biased variance (normalized by (n-1)) of projection onto UCM vector:
# var_ucm = vec_ucm.T@np.cov(diffs, bias=True, rowvar=False)@vec_ucm/(vec_ucm.T@vec_ucm) # Rayleigh fraction.
coeffs = diffs@A
coeffs.columns = ['parallel', 'orthogonal']
return coeffs
def get_synergy_indices(variances, n=2, d=1):
"""
n: Number of degrees of freedom. In our case 2.
d: Dimensionality of performance variable. In our case a scalar (1).
Vucm = 1/N * 1/(n - d) * sum(ProjUCM**2)
Vort = 1/N * 1/(d) * sum(ProjORT**2)
Vtotal = 1/n * (d * Vort + (n-d) * Vucm) # Anull the weights on Vucm and Vort for the sum.
dV = (Vucm - Vort) / Vtotal
dV = n*(Vucm - Vort) / ((n - d)*Vucm + d*Vort)
Zhang (2008) without weighting Vucm, Vort and Vtotal first:
dV = n * (Vucm/(n - d) - Vort/d) / (Vucm + Vort)
dVz = 0.5*ln((n / d + dV) / (n / ((n - d) - dV))
dVz = 0.5*ln((2 + dV) / (2 - dV))
Reference: https://www.frontiersin.org/articles/10.3389/fnagi.2019.00032/full#supplementary-material
:param variances: Unweighted variances of parallel and orthogonal projections to the UCM.
:type variances: pandas.DataFrame
:param n: Number of degrees of freedom. Defaults to 2.
:type: int
:param d: Dimensionality of performance variable. Defaults to 1.
:type d: int
:returns: Synergy index, Fisher's z-transformed synergy index.
:rtype: pandas.DataFrame
""" | try:
dV = n * (variances['parallel']/(n-d) - variances['orthogonal']/d) \ | random_line_split | |
analysis.py | :param data: Data in which to detect outliers. Take care that n_samples > n_features ** 2 .
:type data: pandas.DataFrame
:param contamination: The amount of contamination of the data set, i.e. the proportion of outliers in the data set.
Range is (0, 0.5).
:type contamination: float
:returns: Decision on each row if it's an outlier. And contour array for drawing ellipse in graph.
:rtype: tuple[numpy.ndarray, numpy.ndarray]
"""
robust_cov = EllipticEnvelope(support_fraction=1., contamination=contamination)
outlyingness = robust_cov.fit_predict(data)
decision = (outlyingness-1).astype(bool)
# Visualisation.
xx, yy = np.meshgrid(np.linspace(0, 100, 101),
np.linspace(0, 100, 101))
z = robust_cov.predict(np.c_[xx.ravel(), yy.ravel()])
z = z.reshape(xx.shape)
return decision, z
#ToDo: remove blocks/sessions with sum mean way off.
#ToDo: remove sessions with less than 10 trials in any block.
def get_pca_data(dataframe):
""" Conduct Principal Component Analysis on 2D dataset.
:param dataframe: Data holding 'df1' and 'df2' values as columns.
:type dataframe: pandas.DataFrame
:return: Explained variance, components and means.
:rtype: pandas.DataFrame
"""
# We don't reduce dimensionality, but overlay the 2 principal components in 2D.
pca = PCA(n_components=2)
x = dataframe[['df1', 'df2']].values
try:
# df1 and df2 have the same scale. No need to standardize. Standardizing might actually distort PCA here.
pca.fit(x)
except ValueError:
# Return empty.
df = pd.DataFrame(columns=['var_expl', 'var_expl_ratio', 'x', 'y', 'meanx', 'meany'])
else:
df = pd.DataFrame({'var_expl': pca.explained_variance_.T,
'var_expl_ratio': pca.explained_variance_ratio_.T * 100, # In percent
'x': pca.components_[:, 0],
'y': pca.components_[:, 1],
'meanx': pca.mean_[0],
'meany': pca.mean_[1],
},
index=[1, 2] # For designating principal components.
)
df.index.rename('PC', inplace=True)
return df
def | (dataframe):
""" Get principal components as vectors. Vectors can then be used to annotate graphs.
:param dataframe: Tabular PCA data.
:type dataframe: pandas.DataFrame
:return: Principal components as vector pairs in input space with mean as origin first and offset second.
:rtype: list
"""
vectors = list()
# Use the "components" to define the direction of the vectors,
# and the "explained variance" to define the squared-length of the vectors.
for idx, row in dataframe.iterrows():
v = row[['x', 'y']].values * np.sqrt(row['var_expl']) * 3 # Scale up for better visibility.
mean = row[['meanx', 'meany']].values
mean_offset = (mean, mean + v)
vectors.append(mean_offset)
return vectors
def get_pca_vectors_by(dataframe, by=None):
""" Get principal components for each group as vectors. Vectors can then be used to annotate graphs.
:param dataframe: Data holding 'df1' and 'df2' values as columns.
:type dataframe: pandas.DataFrame
:param by: Column to group data by and return 2 vectors for each group.
:type by: str|list
:return: list of principal components as vector pairs in input space with mean as origin first and offset second.
:rtype: list
"""
vector_pairs = list()
if by is None:
pca_df = get_pca_data(dataframe)
v = get_pca_vectors(pca_df)
vector_pairs.append(v)
else:
grouped = dataframe.groupby(by)
for group, data in grouped:
pca_df = get_pca_data(data)
v = get_pca_vectors(pca_df)
vector_pairs.append(v)
# ToDo: Augment by groupby criteria.
return vector_pairs
def get_interior_angle(vec0, vec1):
""" Get the smaller angle between vec0 and vec1 in degrees.
:param vec0: Vector 0
:type vec0: numpy.ndarray
:param vec1: Vector 1
:type vec1: numpy.ndarray
:return: Interior angle between vector0 and vector1 in degrees.
:rtype: float
"""
angle = np.math.atan2(np.linalg.det([vec0, vec1]), np.dot(vec0, vec1))
degrees = abs(np.degrees(angle))
# Min and max should be between 0° an 90°.
degrees = min(degrees, 180.0 - degrees)
return degrees
def get_ucm_vec(p0=None, p1=None):
""" Returns 2D unit vector in direction of uncontrolled manifold. """
if p0 is None:
p0 = np.array([25, 100])
if p1 is None:
p1 = np.array([100, 25])
parallel = p1 - p0
parallel = parallel / np.linalg.norm(parallel) # Normalize.
return parallel
def get_orthogonal_vec2d(vec):
""" Get a vector that is orthogonal to vec and has same length.
:param vec: 2D Vector
:return: 2D Vector orthogonal to vec.
:rtype: numpy.ndarray
"""
ortho = np.array([-vec[1], vec[0]])
return ortho
def get_pc_ucm_angles(dataframe, vec_ucm):
""" Computes the interior angles between pca vectors and ucm parallel/orthogonal vectors.
:param dataframe: PCA data .
:type dataframe: pandas.DataFrame
:param vec_ucm: Vector parallel to UCM.
:type vec_ucm: numpy.ndarray
:return: Each angle between principal components and UCM parallel and orthogonal vector.
:rtype: pandas.DataFrame
"""
vec_ucm_ortho = get_orthogonal_vec2d(vec_ucm)
df_angles = pd.DataFrame(columns=['parallel', 'orthogonal'])
for idx, row in dataframe.iterrows():
angle_parallel = get_interior_angle(vec_ucm, row[['x', 'y']])
angle_ortho = get_interior_angle(vec_ucm_ortho, row[['x', 'y']])
df_angles.loc[idx] = [angle_parallel, angle_ortho]
df_angles[['parallel', 'orthogonal']] = df_angles[['parallel', 'orthogonal']].astype(float)
df_angles.insert(0, 'PC', dataframe['PC'])
return df_angles
def get_projections(points, vec_ucm):
""" Returns coefficients a and b in x = a*vec_ucm + b*vec_ortho with x being the difference of a data point and
the mean.
Projection is computed using a transformation matrix with ucm parallel and orthogonal vectors as basis.
:param points: Data of 2D points.
:type points: pandas.Dataframe
:param vec_ucm: Unit vector parallel to uncontrolled manifold.
:type vec_ucm: numpy.ndarray
:return: Array with projected lengths onto vector parallel to UCM as 'a', onto vector orthogonal to UCM as 'b'.
:rtype: pandas.Dataframe
"""
# Get the vector orthogonal to the UCM.
vec_ortho = get_orthogonal_vec2d(vec_ucm)
# Build a transformation matrix with vec_ucm and vec_ortho as new basis vectors.
A = np.vstack((vec_ucm, vec_ortho)).T # A is not an orthogonal projection matrix (A=A.T), but this works.
# Centralize the data. Analogous to calculating across trials deviation from average for each time step.
diffs = points - points.mean()
# For computational efficiency we shortcut the projection calculation with matrix multiplication.
# The actual math behind it:
# coeffs = vec_ucm.T@diff/np.sqrt(vec_ucm.T@vec_ucm), vec_ortho.T@diff/np.sqrt(vec_ortho.T@vec_ortho)
# Biased variance (normalized by (n-1)) of projection onto UCM vector:
# var_ucm = vec_ucm.T@np.cov(diffs, bias=True, rowvar=False)@vec_ucm/(vec_ucm.T@vec_ucm) # Rayleigh fraction.
coeffs = diffs@A
coeffs.columns = ['parallel', 'orthogonal']
return coeffs
def get_synergy_indices(variances, n=2, d=1):
"""
n: Number of degrees of freedom. In our case 2.
d: Dimensionality of performance variable. In our case a scalar (1).
Vucm = 1/N * 1/(n - d) * sum(ProjUCM**2)
Vort = | get_pca_vectors | identifier_name |
system_information.rs | fn parts(&self) -> &'a UndefinedStruct {
self.parts
}
}
impl<'a> SMBiosSystemInformation<'a> {
/// Manufacturer
pub fn manufacturer(&self) -> Option<String> {
self.parts.get_field_string(0x04)
}
/// Product name
pub fn product_name(&self) -> Option<String> {
self.parts.get_field_string(0x05)
}
/// Version
pub fn version(&self) -> Option<String> {
self.parts.get_field_string(0x06)
}
/// Serial number
pub fn serial_number(&self) -> Option<String> {
self.parts.get_field_string(0x07)
}
/// System UUID
pub fn uuid(&self) -> Option<SystemUuidData> {
self.parts
.get_field_data(0x08, 0x18)
.map(|raw| SystemUuidData::try_from(raw).expect("A GUID is 0x10 bytes"))
}
/// Wake-up type
///
/// Identifies the event that caused the system to power up.
pub fn wakeup_type(&self) -> Option<SystemWakeUpTypeData> {
self.parts
.get_field_byte(0x18)
.map(|raw| SystemWakeUpTypeData::from(raw))
}
/// SKU Number
///
/// This text string identifies a particular computer
/// configuration for sale. It is sometimes also
/// called a product ID or purchase order number.
/// This number is frequently found in existing
/// fields, but there is no standard format.
/// Typically for a given system board from a
/// given OEM, there are tens of unique
/// processor, memory, hard drive, and optical
/// drive configurations.
pub fn sku_number(&self) -> Option<String> {
self.parts.get_field_string(0x19)
}
/// Family
///
/// This text string identifies the family to which a
/// particular computer belongs. A family refers to
/// a set of computers that are similar but not
/// identical from a hardware or software point of
/// view. Typically, a family is composed of
/// different computer models, which have
/// different configurations and pricing points.
/// Computers in the same family often have
/// similar branding and cosmetic features.
pub fn family(&self) -> Option<String> {
self.parts.get_field_string(0x1A)
}
}
impl fmt::Debug for SMBiosSystemInformation<'_> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct(any::type_name::<SMBiosSystemInformation<'_>>())
.field("header", &self.parts.header)
.field("manufacturer", &self.manufacturer())
.field("product_name", &self.product_name())
.field("version", &self.version())
.field("serial_number", &self.serial_number())
.field("uuid", &self.uuid())
.field("wakeup_type", &self.wakeup_type())
.field("sku_number", &self.sku_number())
.field("family", &self.family())
.finish()
}
}
impl Serialize for SMBiosSystemInformation<'_> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut state = serializer.serialize_struct("SMBiosSystemInformation", 9)?;
state.serialize_field("header", &self.parts.header)?;
state.serialize_field("manufacturer", &self.manufacturer())?;
state.serialize_field("product_name", &self.product_name())?;
state.serialize_field("version", &self.version())?;
state.serialize_field("serial_number", &self.serial_number())?;
state.serialize_field("uuid", &self.uuid())?;
state.serialize_field("wakeup_type", &self.wakeup_type())?;
state.serialize_field("sku_number", &self.sku_number())?;
state.serialize_field("family", &self.family())?;
state.end()
}
}
/// # System - UUID Data
#[derive(Serialize, Debug)]
pub enum SystemUuidData {
/// The ID is not currently present in the system, but it can be set
IdNotPresentButSettable,
/// The ID is not present in the system
IdNotPresent,
/// System UUID
Uuid(SystemUuid),
}
impl SystemUuidData {
fn new<'a>(array: &'a [u8; 0x10]) -> SystemUuidData {
if array.iter().all(|&x| x == 0) {
SystemUuidData::IdNotPresentButSettable
} else if array.iter().all(|&x| x == 0xFF) {
SystemUuidData::IdNotPresent
} else {
SystemUuidData::Uuid(SystemUuid::from(array))
}
}
}
impl<'a> TryFrom<&'a [u8]> for SystemUuidData {
type Error = TryFromSliceError;
fn try_from(raw: &'a [u8]) -> Result<Self, Self::Error> {
<&[u8; 0x10]>::try_from(raw).and_then(|array| Ok(SystemUuidData::new(array)))
}
}
impl fmt::Display for SystemUuidData {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match &*self {
SystemUuidData::IdNotPresent => write!(f, "IdNotPresent"),
SystemUuidData::IdNotPresentButSettable => write!(f, "IdNotPresentButSettable"),
SystemUuidData::Uuid(_system_uuid) => write!(f, "{}", &_system_uuid),
}
}
}
/// # System - UUID
#[derive(PartialEq, Eq)]
pub struct SystemUuid {
/// Raw byte array for this UUID
pub raw: [u8; 0x10],
}
impl SystemUuid {
/// Low field of the timestamp
pub fn time_low(&self) -> u32 {
u32::from_le_bytes(self.raw[..0x4].try_into().expect("incorrect size"))
}
/// Middle field of the timestamp
pub fn time_mid(&self) -> u16 {
u16::from_le_bytes(self.raw[0x4..0x6].try_into().expect("incorrect size"))
}
/// High field of the timestamp multiplexed with the version number
pub fn time_high_and_version(&self) -> u16 {
u16::from_le_bytes(self.raw[0x6..0x8].try_into().expect("incorrect size"))
}
/// High field of the clock sequence multiplexed with the variant
pub fn clock_seq_high_and_reserved(&self) -> u8 {
self.raw[0x8]
}
/// Low field of the clock sequence
pub fn clock_seq_low(&self) -> u8 {
self.raw[0x9]
}
/// Spatially unique node identifier
pub fn node(&self) -> &[u8; 6] {
self.raw[0xA..0x10].try_into().expect("incorrect size")
}
}
impl<'a> From<&'a [u8; 0x10]> for SystemUuid {
fn from(raw: &'a [u8; 0x10]) -> Self {
SystemUuid { raw: raw.clone() }
}
}
impl fmt::Display for SystemUuid {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// Example output:
// "00360FE7-D4D5-11E5-9C43-BC0000F00000"
// <TimeLow>-<TimeMid>-<TimeHiAndVersion>-<ClockSeqHiAndReserved><ClockSeqLow>-<Node[6]>
write!(
f,
"{:08X}-{:04X}-{:04X}-{:02X}{:02X}-",
self.time_low(),
self.time_mid(),
self.time_high_and_version(),
self.clock_seq_high_and_reserved(),
self.clock_seq_low()
)?;
self.node().iter().fold(Ok(()), |result, node_byte| {
result.and_then(|_| write!(f, "{:02X}", node_byte))
})
}
}
impl fmt::Debug for SystemUuid {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", &self)
}
}
impl Serialize for SystemUuid {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(format!("{}", self).as_str())
}
}
/// # System - Wake-up Type Data
pub struct SystemWakeUpTypeData {
/// Raw value
///
/// _raw_ is most useful when _value_ is None.
/// This is most likely to occur when the standard was updated but
/// this library code has not been updated to match the current
/// standard.
pub raw: u8,
/// The contained [SystemWakeUpType] value
pub value: SystemWakeUpType | Self { parts }
}
| identifier_body | |
system_information.rs | up.
pub fn wakeup_type(&self) -> Option<SystemWakeUpTypeData> {
self.parts
.get_field_byte(0x18)
.map(|raw| SystemWakeUpTypeData::from(raw))
}
/// SKU Number
///
/// This text string identifies a particular computer
/// configuration for sale. It is sometimes also
/// called a product ID or purchase order number.
/// This number is frequently found in existing
/// fields, but there is no standard format.
/// Typically for a given system board from a
/// given OEM, there are tens of unique
/// processor, memory, hard drive, and optical
/// drive configurations.
pub fn sku_number(&self) -> Option<String> {
self.parts.get_field_string(0x19)
}
/// Family
///
/// This text string identifies the family to which a
/// particular computer belongs. A family refers to
/// a set of computers that are similar but not
/// identical from a hardware or software point of
/// view. Typically, a family is composed of
/// different computer models, which have
/// different configurations and pricing points.
/// Computers in the same family often have
/// similar branding and cosmetic features.
pub fn family(&self) -> Option<String> {
self.parts.get_field_string(0x1A)
}
}
impl fmt::Debug for SMBiosSystemInformation<'_> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct(any::type_name::<SMBiosSystemInformation<'_>>())
.field("header", &self.parts.header)
.field("manufacturer", &self.manufacturer())
.field("product_name", &self.product_name())
.field("version", &self.version())
.field("serial_number", &self.serial_number())
.field("uuid", &self.uuid())
.field("wakeup_type", &self.wakeup_type())
.field("sku_number", &self.sku_number())
.field("family", &self.family())
.finish()
}
}
impl Serialize for SMBiosSystemInformation<'_> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut state = serializer.serialize_struct("SMBiosSystemInformation", 9)?;
state.serialize_field("header", &self.parts.header)?;
state.serialize_field("manufacturer", &self.manufacturer())?;
state.serialize_field("product_name", &self.product_name())?;
state.serialize_field("version", &self.version())?;
state.serialize_field("serial_number", &self.serial_number())?;
state.serialize_field("uuid", &self.uuid())?;
state.serialize_field("wakeup_type", &self.wakeup_type())?;
state.serialize_field("sku_number", &self.sku_number())?;
state.serialize_field("family", &self.family())?;
state.end()
}
}
/// # System - UUID Data
#[derive(Serialize, Debug)]
pub enum SystemUuidData {
/// The ID is not currently present in the system, but it can be set
IdNotPresentButSettable,
/// The ID is not present in the system
IdNotPresent,
/// System UUID
Uuid(SystemUuid),
}
impl SystemUuidData {
fn new<'a>(array: &'a [u8; 0x10]) -> SystemUuidData {
if array.iter().all(|&x| x == 0) {
SystemUuidData::IdNotPresentButSettable
} else if array.iter().all(|&x| x == 0xFF) {
SystemUuidData::IdNotPresent
} else {
SystemUuidData::Uuid(SystemUuid::from(array))
}
}
}
impl<'a> TryFrom<&'a [u8]> for SystemUuidData {
type Error = TryFromSliceError;
fn try_from(raw: &'a [u8]) -> Result<Self, Self::Error> {
<&[u8; 0x10]>::try_from(raw).and_then(|array| Ok(SystemUuidData::new(array)))
}
}
impl fmt::Display for SystemUuidData {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match &*self {
SystemUuidData::IdNotPresent => write!(f, "IdNotPresent"),
SystemUuidData::IdNotPresentButSettable => write!(f, "IdNotPresentButSettable"),
SystemUuidData::Uuid(_system_uuid) => write!(f, "{}", &_system_uuid),
}
}
}
/// # System - UUID
#[derive(PartialEq, Eq)]
pub struct SystemUuid {
/// Raw byte array for this UUID
pub raw: [u8; 0x10],
}
impl SystemUuid {
/// Low field of the timestamp
pub fn time_low(&self) -> u32 {
u32::from_le_bytes(self.raw[..0x4].try_into().expect("incorrect size"))
}
/// Middle field of the timestamp
pub fn time_mid(&self) -> u16 {
u16::from_le_bytes(self.raw[0x4..0x6].try_into().expect("incorrect size"))
}
/// High field of the timestamp multiplexed with the version number
pub fn time_high_and_version(&self) -> u16 {
u16::from_le_bytes(self.raw[0x6..0x8].try_into().expect("incorrect size"))
}
/// High field of the clock sequence multiplexed with the variant
pub fn clock_seq_high_and_reserved(&self) -> u8 {
self.raw[0x8]
}
/// Low field of the clock sequence
pub fn clock_seq_low(&self) -> u8 {
self.raw[0x9]
}
/// Spatially unique node identifier
pub fn node(&self) -> &[u8; 6] {
self.raw[0xA..0x10].try_into().expect("incorrect size")
}
}
impl<'a> From<&'a [u8; 0x10]> for SystemUuid {
fn from(raw: &'a [u8; 0x10]) -> Self {
SystemUuid { raw: raw.clone() }
}
}
impl fmt::Display for SystemUuid {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// Example output:
// "00360FE7-D4D5-11E5-9C43-BC0000F00000"
// <TimeLow>-<TimeMid>-<TimeHiAndVersion>-<ClockSeqHiAndReserved><ClockSeqLow>-<Node[6]>
write!(
f,
"{:08X}-{:04X}-{:04X}-{:02X}{:02X}-",
self.time_low(),
self.time_mid(),
self.time_high_and_version(),
self.clock_seq_high_and_reserved(),
self.clock_seq_low()
)?;
self.node().iter().fold(Ok(()), |result, node_byte| {
result.and_then(|_| write!(f, "{:02X}", node_byte))
})
}
}
impl fmt::Debug for SystemUuid {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", &self)
}
}
impl Serialize for SystemUuid {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(format!("{}", self).as_str())
}
} | /// Raw value
///
/// _raw_ is most useful when _value_ is None.
/// This is most likely to occur when the standard was updated but
/// this library code has not been updated to match the current
/// standard.
pub raw: u8,
/// The contained [SystemWakeUpType] value
pub value: SystemWakeUpType,
}
impl fmt::Debug for SystemWakeUpTypeData {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct(any::type_name::<SystemWakeUpTypeData>())
.field("raw", &self.raw)
.field("value", &self.value)
.finish()
}
}
impl Serialize for SystemWakeUpTypeData {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut state = serializer.serialize_struct("SystemWakeUpTypeData", 2)?;
state.serialize_field("raw", &self.raw)?;
state.serialize_field("value", &self.value)?;
state.end()
}
}
impl fmt::Display for SystemWakeUpTypeData {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match &self.value {
SystemWakeUpType::None => write!(f, "{}", &self.raw),
_ => write!(f, "{:?}", &self.value),
}
}
}
impl Deref for SystemWakeUpTypeData {
|
/// # System - Wake-up Type Data
pub struct SystemWakeUpTypeData { | random_line_split |
system_information.rs | .
pub fn wakeup_type(&self) -> Option<SystemWakeUpTypeData> {
self.parts
.get_field_byte(0x18)
.map(|raw| SystemWakeUpTypeData::from(raw))
}
/// SKU Number
///
/// This text string identifies a particular computer
/// configuration for sale. It is sometimes also
/// called a product ID or purchase order number.
/// This number is frequently found in existing
/// fields, but there is no standard format.
/// Typically for a given system board from a
/// given OEM, there are tens of unique
/// processor, memory, hard drive, and optical
/// drive configurations.
pub fn sku_number(&self) -> Option<String> {
self.parts.get_field_string(0x19)
}
/// Family
///
/// This text string identifies the family to which a
/// particular computer belongs. A family refers to
/// a set of computers that are similar but not
/// identical from a hardware or software point of
/// view. Typically, a family is composed of
/// different computer models, which have
/// different configurations and pricing points.
/// Computers in the same family often have
/// similar branding and cosmetic features.
pub fn family(&self) -> Option<String> {
self.parts.get_field_string(0x1A)
}
}
impl fmt::Debug for SMBiosSystemInformation<'_> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct(any::type_name::<SMBiosSystemInformation<'_>>())
.field("header", &self.parts.header)
.field("manufacturer", &self.manufacturer())
.field("product_name", &self.product_name())
.field("version", &self.version())
.field("serial_number", &self.serial_number())
.field("uuid", &self.uuid())
.field("wakeup_type", &self.wakeup_type())
.field("sku_number", &self.sku_number())
.field("family", &self.family())
.finish()
}
}
impl Serialize for SMBiosSystemInformation<'_> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut state = serializer.serialize_struct("SMBiosSystemInformation", 9)?;
state.serialize_field("header", &self.parts.header)?;
state.serialize_field("manufacturer", &self.manufacturer())?;
state.serialize_field("product_name", &self.product_name())?;
state.serialize_field("version", &self.version())?;
state.serialize_field("serial_number", &self.serial_number())?;
state.serialize_field("uuid", &self.uuid())?;
state.serialize_field("wakeup_type", &self.wakeup_type())?;
state.serialize_field("sku_number", &self.sku_number())?;
state.serialize_field("family", &self.family())?;
state.end()
}
}
/// # System - UUID Data
#[derive(Serialize, Debug)]
pub enum SystemUuidData {
/// The ID is not currently present in the system, but it can be set
IdNotPresentButSettable,
/// The ID is not present in the system
IdNotPresent,
/// System UUID
Uuid(SystemUuid),
}
impl SystemUuidData {
fn new<'a>(array: &'a [u8; 0x10]) -> SystemUuidData {
if array.iter().all(|&x| x == 0) {
SystemUuidData::IdNotPresentButSettable
} else if array.iter().all(|&x| x == 0xFF) {
SystemUuidData::IdNotPresent
} else {
SystemUuidData::Uuid(SystemUuid::from(array))
}
}
}
impl<'a> TryFrom<&'a [u8]> for SystemUuidData {
type Error = TryFromSliceError;
fn try_from(raw: &'a [u8]) -> Result<Self, Self::Error> {
<&[u8; 0x10]>::try_from(raw).and_then(|array| Ok(SystemUuidData::new(array)))
}
}
impl fmt::Display for SystemUuidData {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match &*self {
SystemUuidData::IdNotPresent => write!(f, "IdNotPresent"),
SystemUuidData::IdNotPresentButSettable => write!(f, "IdNotPresentButSettable"),
SystemUuidData::Uuid(_system_uuid) => write!(f, "{}", &_system_uuid),
}
}
}
/// # System - UUID
#[derive(PartialEq, Eq)]
pub struct SystemUuid {
/// Raw byte array for this UUID
pub raw: [u8; 0x10],
}
impl SystemUuid {
/// Low field of the timestamp
pub fn time_low(&self) -> u32 {
u32::from_le_bytes(self.raw[..0x4].try_into().expect("incorrect size"))
}
/// Middle field of the timestamp
pub fn time_mid(&self) -> u16 {
u16::from_le_bytes(self.raw[0x4..0x6].try_into().expect("incorrect size"))
}
/// High field of the timestamp multiplexed with the version number
pub fn time_high_and_version(&self) -> u16 {
u16::from_le_bytes(self.raw[0x6..0x8].try_into().expect("incorrect size"))
}
/// High field of the clock sequence multiplexed with the variant
pub fn clock_seq_high_and_reserved(&self) -> u8 {
self.raw[0x8]
}
/// Low field of the clock sequence
pub fn clock_seq_low(&self) -> u8 {
self.raw[0x9]
}
/// Spatially unique node identifier
pub fn node(&self) -> &[u8; 6] {
self.raw[0xA..0x10].try_into().expect("incorrect size")
}
}
impl<'a> From<&'a [u8; 0x10]> for SystemUuid {
fn from(raw: &'a [u8; 0x10]) -> Self {
SystemUuid { raw: raw.clone() }
}
}
impl fmt::Display for SystemUuid {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// Example output:
// "00360FE7-D4D5-11E5-9C43-BC0000F00000"
// <TimeLow>-<TimeMid>-<TimeHiAndVersion>-<ClockSeqHiAndReserved><ClockSeqLow>-<Node[6]>
write!(
f,
"{:08X}-{:04X}-{:04X}-{:02X}{:02X}-",
self.time_low(),
self.time_mid(),
self.time_high_and_version(),
self.clock_seq_high_and_reserved(),
self.clock_seq_low()
)?;
self.node().iter().fold(Ok(()), |result, node_byte| {
result.and_then(|_| write!(f, "{:02X}", node_byte))
})
}
}
impl fmt::Debug for SystemUuid {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", &self)
}
}
impl Serialize for SystemUuid {
fn se | >(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(format!("{}", self).as_str())
}
}
/// # System - Wake-up Type Data
pub struct SystemWakeUpTypeData {
/// Raw value
///
/// _raw_ is most useful when _value_ is None.
/// This is most likely to occur when the standard was updated but
/// this library code has not been updated to match the current
/// standard.
pub raw: u8,
/// The contained [SystemWakeUpType] value
pub value: SystemWakeUpType,
}
impl fmt::Debug for SystemWakeUpTypeData {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct(any::type_name::<SystemWakeUpTypeData>())
.field("raw", &self.raw)
.field("value", &self.value)
.finish()
}
}
impl Serialize for SystemWakeUpTypeData {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut state = serializer.serialize_struct("SystemWakeUpTypeData", 2)?;
state.serialize_field("raw", &self.raw)?;
state.serialize_field("value", &self.value)?;
state.end()
}
}
impl fmt::Display for SystemWakeUpTypeData {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match &self.value {
SystemWakeUpType::None => write!(f, "{}", &self.raw),
_ => write!(f, "{:?}", &self.value),
}
}
}
impl Deref for SystemWakeUpTypeData {
| rialize<S | identifier_name |
loader.rs | = data.fullname;
self.source = data.source;
self.source_other = data.source_other;
}
}
struct ImageData {
filename: String,
fullname: Option<String>,
source: Option<String>,
source_other: Option<String>,
// align
// frameDuration
}
#[derive(Debug, Default)]
pub struct PackInfo {
name: String,
author: Option<String>,
description: Option<String>,
link: Option<String>
}
impl PackInfo {
fn new(name: &str) -> Self {
PackInfo {
name: name.to_owned(),
..Default::default()
}
}
}
pub fn load_respack<T: AsRef<Path>>(path: T, tx: Sender<LoadStatus>) -> Result<()> {
let path = path.as_ref();
let f = File::open(path)?;
let total_size = f.metadata()?.len();
tx.send(LoadStatus::TotalSize(total_size))?;
let mut archive = ZipArchive::new(f)?;
let mut images: HashMap<String, ImageLoader> = HashMap::new();
let mut audio: HashMap<String, _> = HashMap::new();
let mut song_data = Vec::new();
let mut image_data = Vec::new();
let mut pack_info = PackInfo::new(path.file_stem().and_then(OsStr::to_str).unwrap_or("???"));
let mut loaded_size = 0;
for i in 0..archive.len() {
let mut file = archive.by_index(i)?;
let path: PathBuf = file.name().into();
let size = file.compressed_size();
let name: &str = path.file_stem().and_then(OsStr::to_str).ok_or_else(|| "Bad path")?;
match path.extension().and_then(OsStr::to_str) {
Some("png") => {
let surface = {
let mut buffer = Vec::with_capacity(file.size() as usize);
file.read_to_end(&mut buffer)?;
let rwops = RWops::from_bytes(&buffer[..])?;
let surface = rwops.load_png()?;
Surface::from_surface(surface)?
};
let image = ImageLoader::new(name, surface);
images.insert(name.to_owned(), image);
}
Some("mp3") => {
let mut data = Vec::with_capacity(file.size() as usize);
file.read_to_end(&mut data)?;
let decoder = Mp3Decoder::new(Cursor::new(data));
let source = (Box::new(decoder) as Box<Source<Item = i16> + Send>).buffered();
audio.insert(name.to_owned(), source);
}
Some("xml") => {
parse_xml(file, &mut song_data, &mut image_data, &mut pack_info);
}
Some("") => {},
_ => println!("{:?}", path),
}
tx.send(LoadStatus::LoadSize(size))?;
loaded_size += size;
}
// Leftovers
tx.send(LoadStatus::LoadSize(total_size - loaded_size))?;
// Process songs
let songs: Vec<Song> = song_data
.into_iter()
.filter_map(|data| Song::new(data, &mut audio).ok())
.collect();
if !audio.is_empty() {
println!("Warning: Unused audio data {:?}", audio.keys());
}
// Process images
for image in image_data.into_iter() {
if let Some(loader) = images.get_mut(&image.filename) {
loader.add_data(image);
} else {
println!("Warning: Could not find image {}", image.filename);
}
}
tx.send(LoadStatus::Done(ResPack {
info: pack_info,
images: images.into_iter().map(|(_k, v)| v).collect(),
songs,
}))?;
Ok(())
}
// XML
// tempted to try and write a macro to handle this
// maybe if it grows some more
enum State {
Document,
Songs,
Song(Option<SongField>),
Images,
Image(Option<ImageField>),
Info(Option<InfoField>),
}
#[derive(Copy, Clone, Debug)]
enum SongField {
Title,
Source,
Rhythm,
Buildup,
BuildupRhythm,
}
#[derive(Copy, Clone, Debug)]
enum ImageField {
Source,
SourceOther,
FullName,
Align,
FrameDuration, // TODO: handle animations
}
#[derive(Copy, Clone, Debug)]
enum InfoField {
Name,
Author,
Description,
Link,
}
// based off code from stebalien on rust-lang
// ok this got ugly, clean it up
fn parse_xml(file: ZipFile, songs: &mut Vec<SongData>, images: &mut Vec<ImageData>, pack_info: &mut PackInfo) {
let mut reader = EventReader::new(BufReader::new(file));
let mut state = State::Document;
let mut song_name = None;
let mut song_title = None;
let mut song_source = None;
let mut song_rhythm = Vec::new();
let mut song_buildup = None;
let mut song_buildup_rhythm = Vec::new();
let mut image_filename = None;
let mut image_name = None;
let mut image_source = None;
let mut image_source_other = None;
// TODO: handle smart align
//let mut image_align = None;
while let Ok(event) = reader.next() {
state = match state {
State::Document => match event {
XmlEvent::StartDocument { .. } => State::Document,
XmlEvent::StartElement { name, .. } => match name.local_name.as_ref() {
"info" => State::Info(None),
"songs" => State::Songs,
"images" => State::Images,
_ => {
println!("Unknown xml tag {}", name.local_name);
xml_skip_tag(&mut reader).unwrap();
State::Document
}
},
XmlEvent::EndDocument => break,
_ => {
println!("Unexpected");
State::Document
}
},
State::Songs => match event {
XmlEvent::StartElement {
name, attributes, ..
} => {
if name.local_name != "song" {
panic!("Expected a song tag - got {}", name.local_name);
}
for attr in attributes.into_iter() {
if attr.name.local_name == "name" {
song_name = Some(attr.value);
break;
}
}
if song_name.is_none() {
panic!("Expected a song name");
}
State::Song(None)
}
XmlEvent::EndElement { .. } => State::Document,
XmlEvent::Whitespace(_) => State::Songs,
_ => {
println!("Expected a song tag - got {:?}", event);
State::Songs
}
},
State::Song(None) => match event {
XmlEvent::StartElement { ref name, .. } => match name.local_name.as_ref() {
"title" => State::Song(Some(SongField::Title)),
"source" => State::Song(Some(SongField::Source)),
"rhythm" => State::Song(Some(SongField::Rhythm)), | println!("Unknown song field {}", name.local_name);
xml_skip_tag(&mut reader).unwrap();
State::Song(None)
}
},
XmlEvent::EndElement { .. } => {
if song_rhythm.is_empty() {
// TODO: be graceful
panic!("Empty rhythm");
}
let song = SongData {
name: song_name.take().unwrap(),
title: song_title.take().unwrap(),
source: song_source.take(),
rhythm: std::mem::replace(&mut song_rhythm, Vec::new()),
buildup: song_buildup.take(),
buildup_rhythm: std::mem::replace(&mut song_buildup_rhythm, Vec::new()),
};
songs.push(song);
State::Songs
}
_ => State::Song(None),
},
State::Song(Some(field)) => match event {
XmlEvent::Characters(data) => {
match field {
SongField::Title => song_title = Some(data),
SongField::Source => song_source = Some(data),
SongField::Rhythm => {
if !data.is_ascii() {
panic!("Expected ascii characters in rhythm");
}
song_rhythm = data.chars().collect();
}
SongField::Buildup => song_buildup = Some(data),
SongField::BuildupRhythm => {
if !data.is_ascii() {
panic!("Expected ascii characters in rhythm");
}
if data.is_empty() {
panic!("Buildup rhythm empty!");
}
song_buildup_rhythm = data.chars().collect();
}
}
State::Song(Some(field))
}
XmlEvent::EndElement { .. } => State::Song(None),
_ => panic!("Expected data for tag {:?}", field),
},
State::Images => match event {
XmlEvent::StartElement {
name, attributes, ..
} => {
if name.local_name != "image" {
panic!("Expected an image tag - got {}", | "buildup" => State::Song(Some(SongField::Buildup)),
"buildupRhythm" => State::Song(Some(SongField::BuildupRhythm)),
_ => { | random_line_split |
loader.rs | {
pub info: PackInfo,
pub images: Vec<ImageLoader>,
pub songs: Vec<Song>,
}
pub struct ImageLoader {
//data: SurfaceContext
pub name: String,
pub fullname: Option<String>,
pub data: Surface,
pub source: Option<String>,
pub source_other: Option<String>,
}
pub struct SongData {
pub name: String,
pub title: String,
pub source: Option<String>,
pub rhythm: Vec<char>,
pub buildup: Option<String>,
pub buildup_rhythm: Vec<char>,
}
impl ImageLoader {
fn new(name: &str, buffer: Surface) -> Self {
ImageLoader {
name: name.to_owned(),
data: buffer,
fullname: None,
source: None,
source_other: None,
}
}
fn add_data(&mut self, data: ImageData) {
self.fullname = data.fullname;
self.source = data.source;
self.source_other = data.source_other;
}
}
struct ImageData {
filename: String,
fullname: Option<String>,
source: Option<String>,
source_other: Option<String>,
// align
// frameDuration
}
#[derive(Debug, Default)]
pub struct PackInfo {
name: String,
author: Option<String>,
description: Option<String>,
link: Option<String>
}
impl PackInfo {
fn new(name: &str) -> Self {
PackInfo {
name: name.to_owned(),
..Default::default()
}
}
}
pub fn load_respack<T: AsRef<Path>>(path: T, tx: Sender<LoadStatus>) -> Result<()> {
let path = path.as_ref();
let f = File::open(path)?;
let total_size = f.metadata()?.len();
tx.send(LoadStatus::TotalSize(total_size))?;
let mut archive = ZipArchive::new(f)?;
let mut images: HashMap<String, ImageLoader> = HashMap::new();
let mut audio: HashMap<String, _> = HashMap::new();
let mut song_data = Vec::new();
let mut image_data = Vec::new();
let mut pack_info = PackInfo::new(path.file_stem().and_then(OsStr::to_str).unwrap_or("???"));
let mut loaded_size = 0;
for i in 0..archive.len() {
let mut file = archive.by_index(i)?;
let path: PathBuf = file.name().into();
let size = file.compressed_size();
let name: &str = path.file_stem().and_then(OsStr::to_str).ok_or_else(|| "Bad path")?;
match path.extension().and_then(OsStr::to_str) {
Some("png") => {
let surface = {
let mut buffer = Vec::with_capacity(file.size() as usize);
file.read_to_end(&mut buffer)?;
let rwops = RWops::from_bytes(&buffer[..])?;
let surface = rwops.load_png()?;
Surface::from_surface(surface)?
};
let image = ImageLoader::new(name, surface);
images.insert(name.to_owned(), image);
}
Some("mp3") => {
let mut data = Vec::with_capacity(file.size() as usize);
file.read_to_end(&mut data)?;
let decoder = Mp3Decoder::new(Cursor::new(data));
let source = (Box::new(decoder) as Box<Source<Item = i16> + Send>).buffered();
audio.insert(name.to_owned(), source);
}
Some("xml") => {
parse_xml(file, &mut song_data, &mut image_data, &mut pack_info);
}
Some("") => {},
_ => println!("{:?}", path),
}
tx.send(LoadStatus::LoadSize(size))?;
loaded_size += size;
}
// Leftovers
tx.send(LoadStatus::LoadSize(total_size - loaded_size))?;
// Process songs
let songs: Vec<Song> = song_data
.into_iter()
.filter_map(|data| Song::new(data, &mut audio).ok())
.collect();
if !audio.is_empty() {
println!("Warning: Unused audio data {:?}", audio.keys());
}
// Process images
for image in image_data.into_iter() {
if let Some(loader) = images.get_mut(&image.filename) {
loader.add_data(image);
} else {
println!("Warning: Could not find image {}", image.filename);
}
}
tx.send(LoadStatus::Done(ResPack {
info: pack_info,
images: images.into_iter().map(|(_k, v)| v).collect(),
songs,
}))?;
Ok(())
}
// XML
// tempted to try and write a macro to handle this
// maybe if it grows some more
enum State {
Document,
Songs,
Song(Option<SongField>),
Images,
Image(Option<ImageField>),
Info(Option<InfoField>),
}
#[derive(Copy, Clone, Debug)]
enum SongField {
Title,
Source,
Rhythm,
Buildup,
BuildupRhythm,
}
#[derive(Copy, Clone, Debug)]
enum ImageField {
Source,
SourceOther,
FullName,
Align,
FrameDuration, // TODO: handle animations
}
#[derive(Copy, Clone, Debug)]
enum InfoField {
Name,
Author,
Description,
Link,
}
// based off code from stebalien on rust-lang
// ok this got ugly, clean it up
fn parse_xml(file: ZipFile, songs: &mut Vec<SongData>, images: &mut Vec<ImageData>, pack_info: &mut PackInfo) {
let mut reader = EventReader::new(BufReader::new(file));
let mut state = State::Document;
let mut song_name = None;
let mut song_title = None;
let mut song_source = None;
let mut song_rhythm = Vec::new();
let mut song_buildup = None;
let mut song_buildup_rhythm = Vec::new();
let mut image_filename = None;
let mut image_name = None;
let mut image_source = None;
let mut image_source_other = None;
// TODO: handle smart align
//let mut image_align = None;
while let Ok(event) = reader.next() {
state = match state {
State::Document => match event {
XmlEvent::StartDocument { .. } => State::Document,
XmlEvent::StartElement { name, .. } => match name.local_name.as_ref() {
"info" => State::Info(None),
"songs" => State::Songs,
"images" => State::Images,
_ => {
println!("Unknown xml tag {}", name.local_name);
xml_skip_tag(&mut reader).unwrap();
State::Document
}
},
XmlEvent::EndDocument => break,
_ => {
println!("Unexpected");
State::Document
}
},
State::Songs => match event {
XmlEvent::StartElement {
name, attributes, ..
} => {
if name.local_name != "song" {
panic!("Expected a song tag - got {}", name.local_name);
}
for attr in attributes.into_iter() {
if attr.name.local_name == "name" {
song_name = Some(attr.value);
break;
}
}
if song_name.is_none() {
panic!("Expected a song name");
}
State::Song(None)
}
XmlEvent::EndElement { .. } => State::Document,
XmlEvent::Whitespace(_) => State::Songs,
_ => {
println!("Expected a song tag - got {:?}", event);
State::Songs
}
},
State::Song(None) => match event {
XmlEvent::StartElement { ref name, .. } => match name.local_name.as_ref() {
"title" => State::Song(Some(SongField::Title)),
"source" => State::Song(Some(SongField::Source)),
"rhythm" => State::Song(Some(SongField::Rhythm)),
"buildup" => State::Song(Some(SongField::Buildup)),
"buildupRhythm" => State::Song(Some(SongField::BuildupRhythm)),
_ => {
println!("Unknown song field {}", name.local_name);
xml_skip_tag(&mut reader).unwrap();
State::Song(None)
}
},
XmlEvent::EndElement { .. } => {
if song_rhythm.is_empty() {
// TODO: be graceful
panic!("Empty rhythm");
}
let song = SongData {
name: song_name.take().unwrap(),
title: song_title.take().unwrap(),
source: song_source.take(),
rhythm: std::mem::replace(&mut song_rhythm, Vec::new()),
buildup: song_buildup.take(),
buildup_rhythm: std::mem::replace(&mut song_buildup_rhythm, Vec::new()),
};
songs.push(song);
State::Songs
}
_ => State::Song(None),
},
State::Song(Some(field)) => match event {
XmlEvent::Characters(data) => {
match field {
SongField::Title => song_title = Some(data),
SongField::Source => song_source = Some(data),
SongField::Rhythm => {
if !data.is_ascii() {
panic!("Expected ascii characters in rhythm");
}
s | ResPack | identifier_name | |
lib.rs | NewLiability(
T::Index,
TechnicsFor<T>,
EconomicsFor<T>,
T::AccountId,
T::AccountId,
),
/// Liability report published.
NewReport(T::Index, ReportFor<T>),
}
#[pallet::error]
pub enum Error<T> {
/// Agreement proof verification failed.
BadAgreementProof,
/// Report proof verification failed.
BadReportProof,
/// Wrong report sender account.
BadReportSender,
/// Liability already finalized.
AlreadyFinalized,
/// Real world oracle is not ready for this report.
OracleIsNotReady,
/// Unable to load agreement from storage.
AgreementNotFound,
}
#[pallet::storage]
#[pallet::getter(fn latest_index)]
/// [DEPRECATED] Latest liability index.
/// TODO: remove after mainnet upgrade
pub(super) type LatestIndex<T: Config> = StorageValue<_, T::Index>;
#[pallet::storage]
#[pallet::getter(fn next_index)]
/// Next liability index.
pub(super) type NextIndex<T: Config> = StorageValue<_, T::Index>;
#[pallet::storage]
#[pallet::getter(fn agreement_of)]
/// Technical and economical parameters of liability.
pub(super) type AgreementOf<T: Config> = StorageMap<_, Twox64Concat, T::Index, T::Agreement>;
#[pallet::storage]
#[pallet::getter(fn report_of)]
/// Result of liability execution.
pub(super) type ReportOf<T: Config> = StorageMap<_, Twox64Concat, T::Index, ReportFor<T>>;
#[pallet::hooks]
impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
// TODO: remove after mainnet upgrade
fn on_runtime_upgrade() -> Weight {
if <NextIndex<T>>::get().is_none() {
if let Some(index) = <LatestIndex<T>>::take() {
<NextIndex<T>>::put(index)
}
}
1
}
}
#[pallet::pallet]
#[pallet::generate_store(pub(super) trait Store)]
#[pallet::without_storage_info]
pub struct Pallet<T>(PhantomData<T>);
#[pallet::call]
impl<T: Config> Pallet<T> {
/// Create agreement between two parties.
#[pallet::weight(200_000)]
pub fn create(origin: OriginFor<T>, agreement: T::Agreement) -> DispatchResultWithPostInfo {
let _ = ensure_signed(origin)?;
ensure!(agreement.verify(), Error::<T>::BadAgreementProof);
// Start agreement processing
agreement.on_start()?;
// Store agreement on storage
let next_index = <NextIndex<T>>::get().unwrap_or(Default::default());
<AgreementOf<T>>::insert(next_index, agreement.clone());
<NextIndex<T>>::put(next_index + 1u32.into());
// Emit event
Self::deposit_event(Event::NewLiability(
next_index,
agreement.technical(),
agreement.economical(),
agreement.promisee(),
agreement.promisor(),
));
Ok(().into())
}
/// Publish technical report of complite works.
#[pallet::weight(200_000)]
pub fn finalize(origin: OriginFor<T>, report: ReportFor<T>) -> DispatchResultWithPostInfo {
let _ = ensure_signed(origin)?;
// Check report proof
ensure!(report.verify(), Error::<T>::BadReportProof);
let index = report.index();
// Is liability already finalized?
ensure!(
<ReportOf<T>>::get(index) == None,
Error::<T>::AlreadyFinalized
);
// Decode agreement from storage
if let Some(agreement) = <AgreementOf<T>>::get(index) {
// Check report sender
ensure!(
report.sender() == agreement.promisor(),
Error::<T>::BadReportSender
);
// Run agreement final processing
match report.is_confirmed() {
None => Err(Error::<T>::OracleIsNotReady)?,
Some(x) => agreement.on_finish(x)?,
}
// Store report on storage
<ReportOf<T>>::insert(index, report.clone());
// Emit event
Self::deposit_event(Event::NewReport(index, report));
Ok(().into())
} else {
Err(Error::<T>::AgreementNotFound.into())
}
}
}
}
#[cfg(test)]
mod tests {
use crate::economics::SimpleMarket;
use crate::signed::*;
use crate::technics::IPFS;
use crate::traits::*;
use crate::{self as liability, *};
use frame_support::{assert_err, assert_ok, parameter_types};
use hex_literal::hex;
use sp_core::{crypto::Pair, sr25519, H256};
use sp_keyring::AccountKeyring;
use sp_runtime::{
testing::Header,
traits::{IdentifyAccount, IdentityLookup, Verify},
AccountId32, MultiSignature,
};
type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic<Runtime>;
type Block = frame_system::mocking::MockBlock<Runtime>;
type Balance = u128;
const XRT: Balance = 1_000_000_000;
frame_support::construct_runtime!(
pub enum Runtime where
Block = Block,
NodeBlock = Block,
UncheckedExtrinsic = UncheckedExtrinsic,
{
System: frame_system::{Pallet, Call, Config, Storage, Event<T>},
Balances: pallet_balances::{Pallet, Call, Storage, Config<T>, Event<T>},
Liability: liability::{Pallet, Call, Storage, Event<T>},
}
);
parameter_types! {
pub const BlockHashCount: u64 = 250;
}
impl frame_system::Config for Runtime {
type Origin = Origin;
type Index = u64;
type BlockNumber = u64;
type Call = Call;
type Hash = H256;
type Hashing = ::sp_runtime::traits::BlakeTwo256;
type AccountId = AccountId32;
type Lookup = IdentityLookup<Self::AccountId>;
type Header = Header;
type Event = Event;
type BlockHashCount = BlockHashCount;
type Version = ();
type PalletInfo = PalletInfo;
type AccountData = pallet_balances::AccountData<Balance>;
type OnNewAccount = ();
type OnKilledAccount = ();
type DbWeight = ();
type BaseCallFilter = frame_support::traits::Everything;
type SystemWeightInfo = ();
type BlockWeights = ();
type BlockLength = ();
type SS58Prefix = ();
type OnSetCode = ();
type MaxConsumers = frame_support::traits::ConstU32<16>;
}
parameter_types! {
pub const MaxLocks: u32 = 50;
pub const MaxReserves: u32 = 50;
pub const ExistentialDeposit: Balance = 10;
}
impl pallet_balances::Config for Runtime {
type MaxLocks = MaxLocks;
type MaxReserves = MaxReserves;
type ReserveIdentifier = [u8; 8];
type Balance = Balance;
type Event = Event;
type DustRemoval = ();
type ExistentialDeposit = ExistentialDeposit;
type AccountStore = System;
type WeightInfo = ();
}
impl Config for Runtime {
type Event = Event;
type Agreement = SignedAgreement<
// Provide task in IPFS
IPFS,
// Liability has a price
SimpleMarket<Self::AccountId, Balances>,
// Use standard accounts
Self::AccountId,
// Use standard signatures
MultiSignature,
>;
type Report = SignedReport<
// Indexing liabilities using system index
Self::Index,
// Use standard accounts
Self::AccountId,
// Use standard signatures
MultiSignature,
// Provide report in IPFS
IPFS,
>;
}
// IPFS raw hash (sha256)
const IPFS_HASH: [u8; 32] =
hex!["30f3d649b3d140a6601e11a2cfbe3560e60dc5434f62d702ac8ceff4e1890015"];
fn new_test_ext() -> sp_io::TestExternalities {
let mut storage = frame_system::GenesisConfig::default()
.build_storage::<Runtime>()
.unwrap();
let _ = pallet_balances::GenesisConfig::<Runtime> {
balances: vec![
(AccountKeyring::Alice.into(), 100 * XRT),
(AccountKeyring::Bob.into(), 100 * XRT),
],
}
.assimilate_storage(&mut storage);
storage.into()
}
#[test]
fn test_initial_setup() {
new_test_ext().execute_with(|| {
assert_eq!(Liability::next_index(), None);
});
}
fn | (
uri: &str,
technics: & | get_params_proof | identifier_name |
lib.rs |
/// How to report of agreement execution.
type Report: dispatch::Parameter + Report<Self::Index, Self::AccountId>;
/// The overarching event type.
type Event: From<Event<Self>> + IsType<<Self as frame_system::Config>::Event>;
}
pub type TechnicsFor<T> =
<<T as Config>::Agreement as Agreement<<T as frame_system::Config>::AccountId>>::Technical;
pub type EconomicsFor<T> =
<<T as Config>::Agreement as Agreement<<T as frame_system::Config>::AccountId>>::Economical;
pub type ReportFor<T> = <T as Config>::Report;
#[pallet::event]
#[pallet::generate_deposit(pub(super) fn deposit_event)]
pub enum Event<T: Config> {
/// Yay! New liability created.
NewLiability(
T::Index,
TechnicsFor<T>,
EconomicsFor<T>,
T::AccountId,
T::AccountId,
),
/// Liability report published.
NewReport(T::Index, ReportFor<T>),
}
#[pallet::error]
pub enum Error<T> {
/// Agreement proof verification failed.
BadAgreementProof,
/// Report proof verification failed.
BadReportProof,
/// Wrong report sender account.
BadReportSender,
/// Liability already finalized.
AlreadyFinalized,
/// Real world oracle is not ready for this report.
OracleIsNotReady,
/// Unable to load agreement from storage.
AgreementNotFound,
}
#[pallet::storage]
#[pallet::getter(fn latest_index)]
/// [DEPRECATED] Latest liability index.
/// TODO: remove after mainnet upgrade
pub(super) type LatestIndex<T: Config> = StorageValue<_, T::Index>;
#[pallet::storage]
#[pallet::getter(fn next_index)]
/// Next liability index.
pub(super) type NextIndex<T: Config> = StorageValue<_, T::Index>;
#[pallet::storage]
#[pallet::getter(fn agreement_of)]
/// Technical and economical parameters of liability.
pub(super) type AgreementOf<T: Config> = StorageMap<_, Twox64Concat, T::Index, T::Agreement>;
#[pallet::storage]
#[pallet::getter(fn report_of)]
/// Result of liability execution.
pub(super) type ReportOf<T: Config> = StorageMap<_, Twox64Concat, T::Index, ReportFor<T>>;
#[pallet::hooks]
impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
// TODO: remove after mainnet upgrade
fn on_runtime_upgrade() -> Weight {
if <NextIndex<T>>::get().is_none() {
if let Some(index) = <LatestIndex<T>>::take() {
<NextIndex<T>>::put(index)
}
}
1
}
}
#[pallet::pallet]
#[pallet::generate_store(pub(super) trait Store)]
#[pallet::without_storage_info]
pub struct Pallet<T>(PhantomData<T>);
#[pallet::call]
impl<T: Config> Pallet<T> {
/// Create agreement between two parties.
#[pallet::weight(200_000)]
pub fn create(origin: OriginFor<T>, agreement: T::Agreement) -> DispatchResultWithPostInfo {
let _ = ensure_signed(origin)?;
ensure!(agreement.verify(), Error::<T>::BadAgreementProof);
// Start agreement processing
agreement.on_start()?;
// Store agreement on storage
let next_index = <NextIndex<T>>::get().unwrap_or(Default::default());
<AgreementOf<T>>::insert(next_index, agreement.clone());
<NextIndex<T>>::put(next_index + 1u32.into());
// Emit event
Self::deposit_event(Event::NewLiability(
next_index,
agreement.technical(),
agreement.economical(),
agreement.promisee(),
agreement.promisor(),
));
Ok(().into())
}
/// Publish technical report of complite works.
#[pallet::weight(200_000)]
pub fn finalize(origin: OriginFor<T>, report: ReportFor<T>) -> DispatchResultWithPostInfo {
let _ = ensure_signed(origin)?;
// Check report proof
ensure!(report.verify(), Error::<T>::BadReportProof);
let index = report.index();
// Is liability already finalized?
ensure!(
<ReportOf<T>>::get(index) == None,
Error::<T>::AlreadyFinalized
);
// Decode agreement from storage
if let Some(agreement) = <AgreementOf<T>>::get(index) {
// Check report sender
ensure!(
report.sender() == agreement.promisor(),
Error::<T>::BadReportSender
);
// Run agreement final processing
match report.is_confirmed() {
None => Err(Error::<T>::OracleIsNotReady)?,
Some(x) => agreement.on_finish(x)?,
}
// Store report on storage
<ReportOf<T>>::insert(index, report.clone());
// Emit event
Self::deposit_event(Event::NewReport(index, report));
Ok(().into())
} else {
Err(Error::<T>::AgreementNotFound.into())
}
}
}
}
#[cfg(test)]
mod tests {
use crate::economics::SimpleMarket;
use crate::signed::*;
use crate::technics::IPFS;
use crate::traits::*;
use crate::{self as liability, *};
use frame_support::{assert_err, assert_ok, parameter_types};
use hex_literal::hex;
use sp_core::{crypto::Pair, sr25519, H256};
use sp_keyring::AccountKeyring;
use sp_runtime::{
testing::Header,
traits::{IdentifyAccount, IdentityLookup, Verify},
AccountId32, MultiSignature,
};
type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic<Runtime>;
type Block = frame_system::mocking::MockBlock<Runtime>;
type Balance = u128;
const XRT: Balance = 1_000_000_000;
frame_support::construct_runtime!(
pub enum Runtime where
Block = Block,
NodeBlock = Block,
UncheckedExtrinsic = UncheckedExtrinsic,
{
System: frame_system::{Pallet, Call, Config, Storage, Event<T>},
Balances: pallet_balances::{Pallet, Call, Storage, Config<T>, Event<T>},
Liability: liability::{Pallet, Call, Storage, Event<T>},
}
);
parameter_types! {
pub const BlockHashCount: u64 = 250;
}
impl frame_system::Config for Runtime {
type Origin = Origin;
type Index = u64;
type BlockNumber = u64;
type Call = Call;
type Hash = H256;
type Hashing = ::sp_runtime::traits::BlakeTwo256;
type AccountId = AccountId32;
type Lookup = IdentityLookup<Self::AccountId>;
type Header = Header;
type Event = Event;
type BlockHashCount = BlockHashCount;
type Version = ();
type PalletInfo = PalletInfo;
type AccountData = pallet_balances::AccountData<Balance>;
type OnNewAccount = ();
type OnKilledAccount = ();
type DbWeight = ();
type BaseCallFilter = frame_support::traits::Everything;
type SystemWeightInfo = ();
type BlockWeights = ();
type BlockLength = ();
type SS58Prefix = ();
type OnSetCode = ();
type MaxConsumers = frame_support::traits::ConstU32<16>;
}
parameter_types! {
pub const MaxLocks: u32 = 50;
pub const MaxReserves: u32 = 50;
pub const ExistentialDeposit: Balance = 10;
}
impl pallet_balances::Config for Runtime {
type MaxLocks = MaxLocks;
type MaxReserves = MaxReserves;
type ReserveIdentifier = [u8; 8];
type Balance = Balance;
type Event = Event;
type DustRemoval = ();
type ExistentialDeposit = ExistentialDeposit;
type AccountStore = System;
type WeightInfo = ();
}
impl Config for Runtime {
type Event = Event;
type Agreement = SignedAgreement<
// Provide task in IPFS
IPFS,
// Liability has a price
SimpleMarket<Self::AccountId, Balances>,
// Use standard accounts
Self::AccountId,
// Use standard signatures
MultiSignature,
>;
type Report = SignedReport<
// Indexing liabilities using system index
Self::Index,
// Use standard accounts
Self::AccountId,
// Use standard signatures
MultiSignature,
// Provide report in IPFS
IPFS,
>;
}
// IPFS raw hash (sha256)
const IPFS_HASH: [u8; 32] =
hex!["30f3d649b3d140a6601e11a | /// How to make and process agreement between two parties.
type Agreement: dispatch::Parameter + Processing + Agreement<Self::AccountId>; | random_line_split | |
vrf.go | v *VRF) Disable() {
if v.enabled {
v.router.disable()
v.tap.disable()
if v.hostif != nil {
v.hostif.disable()
}
v.enabled = false
}
}
// Name returns the name of the VRF.
func (v *VRF) Name() string {
return v.name
}
func (v *VRF) String() string {
return v.name
}
// Index returns a unique identifier of the VRF.
func (v *VRF) Index() VRFIndex {
return v.index
}
// VRFIndex returns a unique VIFIndex of the VRF.
// VIFIndex is used for inter-VRF routing.
func (v *VRF) VIFIndex() VIFIndex {
return v.vifIndex
}
// Input returns an input ring for the VRF
// which is the input ring for the underlying interface.
func (v *VRF) Input() *dpdk.Ring {
return v.router.base.Input()
}
// SetRD sets the route distinguisher of thr VRF.
func (v *VRF) SetRD(rd uint64) error {
vrfMgr.mutex.Lock()
defer vrfMgr.mutex.Unlock()
oldrd := v.rd
if _, exists := vrfMgr.rds[rd]; exists {
return fmt.Errorf("VRF RD %d already exists", rd)
}
v.rd = rd
vrfMgr.rds[rd] = struct{}{}
if oldrd != 0 {
delete(vrfMgr.rds, oldrd)
}
return nil
}
// RD returns the route distinguisher of the VRF.
func (v *VRF) RD() uint64 {
return v.rd
}
// AddVIF adds VIF to the VRF.
// If the same VIF is added more than once to the VRF,
// it sliently ignores.
func (v *VRF) AddVIF(vif *VIF) error {
var err error
if _, exists := v.devs[vif.VIFIndex()]; exists {
return nil
}
if err = vif.setVRF(v); err != nil {
return err
}
// router -> VIF
if err = v.router.addVIF(vif); err != nil {
goto error1
}
// ICMP -> VIF
if err = v.tap.connect(vif.Outbound(), MatchOutVIF, vif); err != nil {
goto error2
}
// VIF -> router (DST_SELF)
if err = vif.connect(v.router.input(), MatchEthDstSelf, nil); err != nil {
goto error3
}
// VIF -> router (broadcast)
if err = vif.connect(v.router.input(), MatchEthDstBC, nil); err != nil {
goto error4
}
// VIF -> router (multicast)
if err = vif.connect(v.router.input(), MatchEthDstMC, nil); err != nil {
goto error5
}
// Enable NAPT if needed
if vif.isNAPTEnabled() {
if err = v.enableNAPT(vif); err != nil {
goto error6
}
}
v.devs[vif.VIFIndex()] = vif
// TUN/TAP for the VIF will be created
noti.Notify(notifier.Add, v, vif)
return nil
error6:
vif.disconnect(MatchEthDstMC, nil)
error5:
vif.disconnect(MatchEthDstBC, nil)
error4:
vif.disconnect(MatchEthDstSelf, nil)
error3:
v.tap.disconnect(MatchOutVIF, vif)
error2:
v.router.deleteVIF(vif)
error1:
vif.setVRF(nil)
return err
}
func (v *VRF) DeleteVIF(vif *VIF) error {
if _, ok := v.devs[vif.VIFIndex()]; !ok {
return fmt.Errorf("Can't find %v in the VRF.", vif)
}
// Delete routes related a vif as notifications about deletion of the routes
// is not notified from netlink when the vif is deleted from a vrf.
for _, route := range v.ListEntries() {
if route.Dev.VIFIndex() == vif.VIFIndex() {
v.DeleteEntry(route)
}
}
v.tap.disconnect(MatchOutVIF, vif)
vif.disconnect(MatchEthDstSelf, nil)
vif.disconnect(MatchEthDstBC, nil)
vif.disconnect(MatchEthDstMC, nil)
vif.setVRF(nil)
v.router.deleteVIF(vif)
delete(v.devs, vif.VIFIndex())
// TUN/TAP for the VIF will be deleted
noti.Notify(notifier.Delete, v, vif)
return nil
}
// Called only when VRRP is added to VIF
func (v *VRF) vrrpEnabled(vif *VIF) {
v.vrrpMutex.Lock()
defer v.vrrpMutex.Unlock()
var ipv4dst *ScopedAddress
var err error
if ipv4dst, err = NewScopedAddress(VRRPMcastAddr.IP, vif); err != nil {
return
}
// Create only one hostif for vrf
if v.hostif == nil {
hostifName := v.name + "-hostif"
if v.hostif, err = newInstance(hostifModule, hostifName, v.name); err != nil {
return
} else {
if v.enabled {
if err = v.hostif.enable(); err != nil {
goto error
}
}
}
}
// Add packet forwarding rule
// router -> hostif
// VRRP advertisement multicast address.
if err = v.router.connect(v.hostif.Input(), MatchIPv4DstInVIF, ipv4dst); err != nil {
goto error
}
v.vrrpref++
return
error:
if v.vrrpref == 0 {
v.hostif.free()
v.hostif = nil
}
}
// Called only when VRRP is deleted from VIF
func (v *VRF) vrrpDisabled(vif *VIF) {
v.vrrpMutex.Lock()
defer v.vrrpMutex.Unlock()
ipv4dst, err := NewScopedAddress(VRRPMcastAddr.IP, vif)
if err != nil {
return
}
v.router.disconnect(MatchIPv4DstInVIF, ipv4dst)
if v.hostif != nil && v.vrrpref == 1 {
v.hostif.free()
v.hostif = nil
}
v.vrrpref--
}
// VIF returns a slice of Vif Indices in the VRF.
func (v *VRF) VIF() []*VIF {
var vifs []*VIF
for _, dev := range v.devs {
if vif, ok := dev.(*VIF); ok {
vifs = append(vifs, vif)
}
}
return vifs
}
// Dump returns descriptive information about the VRF
func (v *VRF) Dump() string {
str := fmt.Sprintf("%s: RD=%d. %d DEV(s):", v.name, v.rd, len(v.devs))
for _, dev := range v.devs {
str += fmt.Sprintf(" %v", dev)
}
if v.sadb != nil {
sad := v.sadb.SAD()
str += fmt.Sprintf("\n%d SAD", len(sad))
for _, sa := range sad {
str += fmt.Sprintf("\n\t%v", sa)
}
spd := v.sadb.SPD()
str += fmt.Sprintf("\n%d SPD", len(spd))
for _, sp := range spd {
str += fmt.Sprintf("\n\t%v", sp)
}
}
return str
}
// SADatabases returns SADatabases associated with the VRF.
func (v *VRF) SADatabases() *SADatabases {
v.sadbOnce.Do(func() {
v.sadb = newSADatabases(v)
})
return v.sadb
}
// HasSADatabases returns true if the VRF has associated SADatbases.
// Returns false otherwise.
func (v *VRF) HasSADatabases() bool {
return v.sadb != nil
}
func createFiveTuples(remotes []net.IP, local net.IP, proto IPProto, dstPort PortRange) []*FiveTuple {
fiveTuples := make([]*FiveTuple, len(remotes))
for i, remote := range remotes {
ft := NewFiveTuple()
ft.SrcIP = CreateIPAddr(remote)
ft.DstIP = CreateIPAddr(local)
ft.DstPort = dstPort
ft.Proto = proto
fiveTuples[i] = ft
}
return fiveTuples
}
func (v *VRF) addL3Tunnel(vif *VIF) error {
t := vif.Tunnel()
if t == nil {
return fmt.Errorf("%v is not tunnel.", vif)
}
ra := t.RemoteAddresses()
if len(ra) == 0 {
return fmt.Errorf("No remote address(es) specified: %v.", t)
}
if err := vif.connect(v.router.input(), MatchIPv4Dst, &ra[0]); err != nil | {
return fmt.Errorf("Adding a rule to %v failed for L3 tunnel: %v", vif, err)
} | conditional_block | |
vrf.go | := range remotes {
ft := NewFiveTuple()
ft.SrcIP = CreateIPAddr(remote)
ft.DstIP = CreateIPAddr(local)
ft.DstPort = dstPort
ft.Proto = proto
fiveTuples[i] = ft
}
return fiveTuples
}
func (v *VRF) addL3Tunnel(vif *VIF) error {
t := vif.Tunnel()
if t == nil {
return fmt.Errorf("%v is not tunnel.", vif)
}
ra := t.RemoteAddresses()
if len(ra) == 0 {
return fmt.Errorf("No remote address(es) specified: %v.", t)
}
if err := vif.connect(v.router.input(), MatchIPv4Dst, &ra[0]); err != nil {
return fmt.Errorf("Adding a rule to %v failed for L3 tunnel: %v", vif, err)
}
// Forward inbound packets to L3 Tunnel
fts := createFiveTuples(ra, t.local, t.IPProto(), PortRange{})
for i, ft := range fts {
if err := v.router.connect(vif.Inbound(), Match5Tuple, ft); err != nil {
vif.disconnect(MatchIPv4Dst, &ra[0])
for _, addedFt := range fts[0:i] {
v.router.disconnect(Match5Tuple, addedFt)
}
return fmt.Errorf("Adding a rule to router for L3 tunnel failed: %v", err)
}
}
// Add a rule for NAT Traversal, if the tunnel is IPSec.
if t.Security() == SecurityIPSec {
nats := createFiveTuples(ra, t.local, IPP_UDP, PortRange{Start: 4500})
for i, nat := range nats {
if err := v.router.connect(vif.Inbound(), Match5Tuple, nat); err != nil {
vif.disconnect(MatchIPv4Dst, &ra[0])
for _, ft := range fts {
v.router.disconnect(Match5Tuple, ft)
}
for _, addedNat := range nats[0:i] {
v.router.disconnect(Match5Tuple, addedNat)
}
return fmt.Errorf("Adding a rule for IPSec NAT traversal failed: %v", err)
}
}
}
return nil
}
func (v *VRF) deleteL3Tunnel(vif *VIF) {
t := vif.Tunnel()
for _, ft := range createFiveTuples(t.remotes, t.local, t.IPProto(), PortRange{}) {
v.router.disconnect(Match5Tuple, ft)
}
if t.Security() == SecurityIPSec {
for _, nat := range createFiveTuples(t.remotes, t.local, IPP_UDP, PortRange{Start: 4500}) {
v.router.disconnect(Match5Tuple, nat)
}
}
vif.disconnect(MatchIPv4Dst, &(t.RemoteAddresses()[0]))
}
func createVxLANs(remotes []net.IP, local net.IP, dstPort uint16, vni uint32) []*VxLAN {
vxlans := make([]*VxLAN, len(remotes))
for i, remote := range remotes {
vxlan := &VxLAN{
Src: remote,
Dst: local,
DstPort: dstPort,
VNI: vni,
}
vxlans[i] = vxlan
}
return vxlans
}
func (v *VRF) addL2Tunnel(i *Interface) error {
t := i.Tunnel()
if t == nil {
return fmt.Errorf("%v is not tunnel.", i)
}
ra := t.RemoteAddresses()
if len(ra) == 0 {
return fmt.Errorf("No remote address(es) specified: %v.", t)
}
if err := i.connect(v.router.input(), MatchIPv4Dst, &ra[0]); err != nil {
return fmt.Errorf("Adding a rule to %v failed for L2 tunnel: %v", i, err)
}
// Forward inbound packets to L2 Tunnel
switch e := t.EncapsMethod(); e {
case EncapsMethodGRE:
fts := createFiveTuples(ra, t.local, IPP_GRE, PortRange{})
for idx, ft := range fts {
if err := v.router.connect(i.Inbound(), Match5Tuple, ft); err != nil {
i.disconnect(MatchIPv4Dst, &ra[0])
for _, addedFt := range fts[0:idx] {
v.router.disconnect(Match5Tuple, addedFt)
}
return fmt.Errorf("Can't connect L2 tunnel to the router: %v", err)
}
}
case EncapsMethodVxLAN:
vxlans := createVxLANs(ra, t.local, t.vxlanPort, t.vni)
for idx, vxlan := range vxlans {
if err := v.router.connect(i.Inbound(), MatchVxLAN, vxlan); err != nil {
i.disconnect(MatchIPv4Dst, &ra[0])
for _, addedVxLAN := range vxlans[0:idx] {
v.router.disconnect(Match5Tuple, addedVxLAN)
}
return fmt.Errorf("Can't connect L2 tunnel to the router: %v", err)
}
}
default:
return fmt.Errorf("Unsupported L2 Tunnel encaps method: %v", e)
}
return nil
}
func (v *VRF) deleteL2Tunnel(i *Interface) {
t := i.Tunnel()
switch e := t.EncapsMethod(); e {
case EncapsMethodGRE:
for _, ft := range createFiveTuples(t.remotes, t.local, IPP_GRE, PortRange{}) {
v.router.disconnect(Match5Tuple, ft)
}
case EncapsMethodVxLAN:
for _, vxlan := range createVxLANs(t.remotes, t.local, t.vxlanPort, t.vni) {
v.router.disconnect(MatchVxLAN, vxlan)
}
}
i.disconnect(MatchIPv4Dst, &(t.RemoteAddresses()[0]))
}
func (v *VRF) enableNAPT(vif *VIF) error {
return v.router.enableNAPT(vif)
}
func (v *VRF) disableNAPT(vif *VIF) error {
return v.router.disableNAPT(vif)
}
func (v *VRF) MarshalJSON() ([]byte, error) {
return []byte(`"` + v.name + `"`), nil
}
func (v *VRF) registerOutputDevice(dev OutputDevice) error {
// If OutputDevice is already in devs, it means the OutputDevice
// is either VIF, or VRF that has already been added.
if _, exists := v.devs[dev.VIFIndex()]; exists {
return nil
}
// OutputDevice to be added shall be VRF.
// If OutputDevice is VIF, it should have been added via AddVIF already.
if _, ok := dev.(*VRF); !ok {
return fmt.Errorf("OutputDevice is not VRF: %v", dev)
}
// Add VRF to router instance
if err := v.router.addOutputDevice(dev); err != nil {
return fmt.Errorf("Adding OutputDevice %v failed.", dev)
}
v.devs[dev.VIFIndex()] = dev
return nil
}
func (v *VRF) routeEntryAdded(entry Route) {
// Check if all OutputDevice that appears in Route has already
// been registered to the router instance.
if len(entry.Nexthops) == 0 {
if err := v.registerOutputDevice(entry.Dev); err != nil {
logger.Err("%v", err)
return
}
} else {
for _, nh := range entry.Nexthops {
if err := v.registerOutputDevice(nh.Dev); err != nil {
logger.Err("%v", err)
return
}
}
}
noti.Notify(notifier.Add, v, entry)
}
func (v *VRF) routeEntryDeleted(entry Route) {
// TODO: Remove unused VRF from the router instance
noti.Notify(notifier.Delete, v, entry)
}
func (v *VRF) pbrEntryAdded(entry *PBREntry) {
for _, nh := range entry.NextHops {
if nh.Dev == nil {
continue
}
if err := v.registerOutputDevice(nh.Dev); err != nil {
logger.Err("%v", err)
return
}
}
noti.Notify(notifier.Add, v, entry)
}
func (v *VRF) pbrEntryDeleted(entry *PBREntry) {
// TODO: Remove unused VRF from the router instance
noti.Notify(notifier.Delete, v, entry)
}
// GetAllVRF returns a slice of available VRF.
func GetAllVRF() []*VRF {
vrfMgr.mutex.Lock()
defer vrfMgr.mutex.Unlock()
v := make([]*VRF, len(vrfMgr.byName))
n := 0
for _, vrf := range vrfMgr.byName {
v[n] = vrf
n++
}
return v
}
// GetVRFByName returns a VRF with the given name.
func | GetVRFByName | identifier_name | |
vrf.go |
// should be called with lock held
func (vm *vrfManager) releaseIndex(vrf *VRF) {
vm.byIndex[int(vrf.index)] = nil
}
// NewVRF creates a VRF instance.
func NewVRF(name string) (*VRF, error) {
if !vrfMgr.re.MatchString(name) {
return nil, fmt.Errorf("Invalid VRF name: '%v'", name)
}
vrfMgr.mutex.Lock()
defer vrfMgr.mutex.Unlock()
if _, exists := vrfMgr.byName[name]; exists {
return nil, fmt.Errorf("VRF %s already exists", name)
}
vrf := &VRF{
name: name,
enabled: false,
}
if !vrfMgr.assignIndex(vrf) {
return nil, fmt.Errorf("No space left for new VRF")
}
vifIndex, err := vifIdxMgr.allocVIFIndex(vrf)
if err != nil {
vrfMgr.releaseIndex(vrf)
return nil, fmt.Errorf("Can't assign VIFIndex: %v", err)
}
vrf.vifIndex = vifIndex
// Create an ICMP processor
var errMsg error
tapName := name + "-tap"
if tap, err := newInstance(tapModule, tapName, name); err != nil {
errMsg = fmt.Errorf("ICMP handler instance creation failed: %v", err)
goto error1
} else {
vrf.tap = tap
}
// Craete a router
if router, err := newRouter(vrf, name); err != nil {
errMsg = fmt.Errorf("Router instance creation failed: %v", err)
goto error2
} else {
vrf.router = router
}
// Forward all IP packets to the ICMP processor
if err := vrf.router.connect(vrf.tap.Input(), MatchIPv4DstSelf, nil); err != nil {
errMsg = errors.New("Can't connect a router and an tap modules")
goto error3
}
vrf.RoutingTable = newRoutingTable(vrf)
vrf.devs = make(map[VIFIndex]OutputDevice)
vrf.PBR = newPBR(vrf)
vrfMgr.byName[name] = vrf
noti.Notify(notifier.Add, vrf, nil)
return vrf, nil
error3:
vrf.router.free()
error2:
vrf.tap.free()
error1:
vrfMgr.releaseIndex(vrf)
return nil, errMsg
}
func (v *VRF) Free() {
vrfMgr.mutex.Lock()
defer vrfMgr.mutex.Unlock()
for _, dev := range v.devs {
if vif, ok := dev.(*VIF); ok {
v.DeleteVIF(vif)
}
}
v.router.disconnect(MatchIPv4DstSelf, nil)
v.tap.free()
v.router.free()
delete(vrfMgr.byName, v.name)
vrfMgr.releaseIndex(v)
if err := vifIdxMgr.freeVIFIndex(v.vifIndex); err != nil {
logger.Err("Freeing VIFIndex for %v failed: %v", v.name, err)
}
if v.rd != 0 {
delete(vrfMgr.rds, v.rd)
}
noti.Notify(notifier.Delete, v, nil)
}
func (v *VRF) baseInstance() *BaseInstance {
return v.router.base
}
func (v *VRF) IsEnabled() bool {
return v.enabled
}
func (v *VRF) Enable() error {
if v.enabled {
return nil
}
if err := v.tap.enable(); err != nil {
return err
}
// Even if vrf is enabled, hostif may not be enabled.
if v.hostif != nil {
if err := v.hostif.enable(); err != nil {
// If activation of hostif fails,
// disable other functions.
v.tap.disable()
return err
}
}
if err := v.router.enable(); err != nil {
v.tap.disable()
if v.hostif != nil {
v.hostif.disable()
}
return err
}
v.enabled = true
return nil
}
func (v *VRF) Disable() {
if v.enabled {
v.router.disable()
v.tap.disable()
if v.hostif != nil {
v.hostif.disable()
}
v.enabled = false
}
}
// Name returns the name of the VRF.
func (v *VRF) Name() string {
return v.name
}
func (v *VRF) String() string {
return v.name
}
// Index returns a unique identifier of the VRF.
func (v *VRF) Index() VRFIndex {
return v.index
}
// VRFIndex returns a unique VIFIndex of the VRF.
// VIFIndex is used for inter-VRF routing.
func (v *VRF) VIFIndex() VIFIndex {
return v.vifIndex
}
// Input returns an input ring for the VRF
// which is the input ring for the underlying interface.
func (v *VRF) Input() *dpdk.Ring {
return v.router.base.Input()
}
// SetRD sets the route distinguisher of thr VRF.
func (v *VRF) SetRD(rd uint64) error {
vrfMgr.mutex.Lock()
defer vrfMgr.mutex.Unlock()
oldrd := v.rd
if _, exists := vrfMgr.rds[rd]; exists {
return fmt.Errorf("VRF RD %d already exists", rd)
}
v.rd = rd
vrfMgr.rds[rd] = struct{}{}
if oldrd != 0 {
delete(vrfMgr.rds, oldrd)
}
return nil
}
// RD returns the route distinguisher of the VRF.
func (v *VRF) RD() uint64 {
return v.rd
}
// AddVIF adds VIF to the VRF.
// If the same VIF is added more than once to the VRF,
// it sliently ignores.
func (v *VRF) AddVIF(vif *VIF) error {
var err error
if _, exists := v.devs[vif.VIFIndex()]; exists {
return nil
}
if err = vif.setVRF(v); err != nil {
return err
}
// router -> VIF
if err = v.router.addVIF(vif); err != nil {
goto error1
}
// ICMP -> VIF
if err = v.tap.connect(vif.Outbound(), MatchOutVIF, vif); err != nil {
goto error2
}
// VIF -> router (DST_SELF)
if err = vif.connect(v.router.input(), MatchEthDstSelf, nil); err != nil {
goto error3
}
// VIF -> router (broadcast)
if err = vif.connect(v.router.input(), MatchEthDstBC, nil); err != nil {
goto error4
}
// VIF -> router (multicast)
if err = vif.connect(v.router.input(), MatchEthDstMC, nil); err != nil {
goto error5
}
// Enable NAPT if needed
if vif.isNAPTEnabled() {
if err = v.enableNAPT(vif); err != nil {
goto error6
}
}
v.devs[vif.VIFIndex()] = vif
// TUN/TAP for the VIF will be created
noti.Notify(notifier.Add, v, vif)
return nil
error6:
vif.disconnect(MatchEthDstMC, nil)
error5:
vif.disconnect(MatchEthDstBC, nil)
error4:
vif.disconnect(MatchEthDstSelf, nil)
error3:
v.tap.disconnect(MatchOutVIF, vif)
error2:
v.router.deleteVIF(vif)
error1:
vif.setVRF(nil)
return err
}
func (v *VRF) DeleteVIF(vif *VIF) error {
if _, ok := v.devs[vif.VIFIndex()]; !ok {
return fmt.Errorf("Can't find %v in the VRF.", vif)
}
// Delete routes related a vif as notifications about deletion of the routes
// is not notified from netlink when the vif is deleted from a vrf.
for _, route := range v.ListEntries() {
if route.Dev.VIFIndex() == vif.VIFIndex() {
v.DeleteEntry(route)
}
}
v.tap.disconnect(MatchOutVIF, vif)
vif.disconnect(MatchEthDstSelf, nil)
vif.disconnect(MatchEthDstBC, nil)
vif.disconnect(MatchEthDstMC, nil)
vif.setVRF(nil)
v.router.deleteVIF(vif)
delete(v.devs, vif.VIFIndex())
// TUN/TAP for the VIF will be deleted
noti.Notify(notifier.Delete, v, vif)
return nil
}
// Called only when VRRP is added to VIF
func (v *VRF) vrrpEnabled(vif *VIF) {
v.vrrpMutex.Lock()
defer v.vrrpMutex.Unlock()
var ipv | {
// try from the nextIndex to the end
if vm.findSlot(vrf, vm.nextIndex, len(vm.byIndex)) {
return true
}
// try from the head to the nextIndex
return vm.findSlot(vrf, 0, vm.nextIndex)
} | identifier_body | |
vrf.go | return nil
}
if err = vif.setVRF(v); err != nil {
return err
}
// router -> VIF
if err = v.router.addVIF(vif); err != nil {
goto error1
}
// ICMP -> VIF
if err = v.tap.connect(vif.Outbound(), MatchOutVIF, vif); err != nil {
goto error2
}
// VIF -> router (DST_SELF)
if err = vif.connect(v.router.input(), MatchEthDstSelf, nil); err != nil {
goto error3
}
// VIF -> router (broadcast)
if err = vif.connect(v.router.input(), MatchEthDstBC, nil); err != nil {
goto error4
}
// VIF -> router (multicast)
if err = vif.connect(v.router.input(), MatchEthDstMC, nil); err != nil {
goto error5
}
// Enable NAPT if needed
if vif.isNAPTEnabled() {
if err = v.enableNAPT(vif); err != nil {
goto error6
}
}
v.devs[vif.VIFIndex()] = vif
// TUN/TAP for the VIF will be created
noti.Notify(notifier.Add, v, vif)
return nil
error6:
vif.disconnect(MatchEthDstMC, nil)
error5:
vif.disconnect(MatchEthDstBC, nil)
error4:
vif.disconnect(MatchEthDstSelf, nil)
error3:
v.tap.disconnect(MatchOutVIF, vif)
error2:
v.router.deleteVIF(vif)
error1:
vif.setVRF(nil)
return err
}
func (v *VRF) DeleteVIF(vif *VIF) error {
if _, ok := v.devs[vif.VIFIndex()]; !ok {
return fmt.Errorf("Can't find %v in the VRF.", vif)
}
// Delete routes related a vif as notifications about deletion of the routes
// is not notified from netlink when the vif is deleted from a vrf.
for _, route := range v.ListEntries() {
if route.Dev.VIFIndex() == vif.VIFIndex() {
v.DeleteEntry(route)
}
}
v.tap.disconnect(MatchOutVIF, vif)
vif.disconnect(MatchEthDstSelf, nil)
vif.disconnect(MatchEthDstBC, nil)
vif.disconnect(MatchEthDstMC, nil)
vif.setVRF(nil)
v.router.deleteVIF(vif)
delete(v.devs, vif.VIFIndex())
// TUN/TAP for the VIF will be deleted
noti.Notify(notifier.Delete, v, vif)
return nil
}
// Called only when VRRP is added to VIF
func (v *VRF) vrrpEnabled(vif *VIF) {
v.vrrpMutex.Lock()
defer v.vrrpMutex.Unlock()
var ipv4dst *ScopedAddress
var err error
if ipv4dst, err = NewScopedAddress(VRRPMcastAddr.IP, vif); err != nil {
return
}
// Create only one hostif for vrf
if v.hostif == nil {
hostifName := v.name + "-hostif"
if v.hostif, err = newInstance(hostifModule, hostifName, v.name); err != nil {
return
} else {
if v.enabled {
if err = v.hostif.enable(); err != nil {
goto error
}
}
}
}
// Add packet forwarding rule
// router -> hostif
// VRRP advertisement multicast address.
if err = v.router.connect(v.hostif.Input(), MatchIPv4DstInVIF, ipv4dst); err != nil {
goto error
}
v.vrrpref++
return
error:
if v.vrrpref == 0 {
v.hostif.free()
v.hostif = nil
}
}
// Called only when VRRP is deleted from VIF
func (v *VRF) vrrpDisabled(vif *VIF) {
v.vrrpMutex.Lock()
defer v.vrrpMutex.Unlock()
ipv4dst, err := NewScopedAddress(VRRPMcastAddr.IP, vif)
if err != nil {
return
}
v.router.disconnect(MatchIPv4DstInVIF, ipv4dst)
if v.hostif != nil && v.vrrpref == 1 {
v.hostif.free()
v.hostif = nil
}
v.vrrpref--
}
// VIF returns a slice of Vif Indices in the VRF.
func (v *VRF) VIF() []*VIF {
var vifs []*VIF
for _, dev := range v.devs {
if vif, ok := dev.(*VIF); ok {
vifs = append(vifs, vif)
}
}
return vifs
}
// Dump returns descriptive information about the VRF
func (v *VRF) Dump() string {
str := fmt.Sprintf("%s: RD=%d. %d DEV(s):", v.name, v.rd, len(v.devs))
for _, dev := range v.devs {
str += fmt.Sprintf(" %v", dev)
}
if v.sadb != nil {
sad := v.sadb.SAD()
str += fmt.Sprintf("\n%d SAD", len(sad))
for _, sa := range sad {
str += fmt.Sprintf("\n\t%v", sa)
}
spd := v.sadb.SPD()
str += fmt.Sprintf("\n%d SPD", len(spd))
for _, sp := range spd {
str += fmt.Sprintf("\n\t%v", sp)
}
}
return str
}
// SADatabases returns SADatabases associated with the VRF.
func (v *VRF) SADatabases() *SADatabases {
v.sadbOnce.Do(func() {
v.sadb = newSADatabases(v)
})
return v.sadb
}
// HasSADatabases returns true if the VRF has associated SADatbases.
// Returns false otherwise.
func (v *VRF) HasSADatabases() bool {
return v.sadb != nil
}
func createFiveTuples(remotes []net.IP, local net.IP, proto IPProto, dstPort PortRange) []*FiveTuple {
fiveTuples := make([]*FiveTuple, len(remotes))
for i, remote := range remotes {
ft := NewFiveTuple()
ft.SrcIP = CreateIPAddr(remote)
ft.DstIP = CreateIPAddr(local)
ft.DstPort = dstPort
ft.Proto = proto
fiveTuples[i] = ft
}
return fiveTuples
}
func (v *VRF) addL3Tunnel(vif *VIF) error {
t := vif.Tunnel()
if t == nil {
return fmt.Errorf("%v is not tunnel.", vif)
}
ra := t.RemoteAddresses()
if len(ra) == 0 {
return fmt.Errorf("No remote address(es) specified: %v.", t)
}
if err := vif.connect(v.router.input(), MatchIPv4Dst, &ra[0]); err != nil {
return fmt.Errorf("Adding a rule to %v failed for L3 tunnel: %v", vif, err)
}
// Forward inbound packets to L3 Tunnel
fts := createFiveTuples(ra, t.local, t.IPProto(), PortRange{})
for i, ft := range fts {
if err := v.router.connect(vif.Inbound(), Match5Tuple, ft); err != nil {
vif.disconnect(MatchIPv4Dst, &ra[0])
for _, addedFt := range fts[0:i] {
v.router.disconnect(Match5Tuple, addedFt)
}
return fmt.Errorf("Adding a rule to router for L3 tunnel failed: %v", err)
}
}
// Add a rule for NAT Traversal, if the tunnel is IPSec.
if t.Security() == SecurityIPSec {
nats := createFiveTuples(ra, t.local, IPP_UDP, PortRange{Start: 4500})
for i, nat := range nats {
if err := v.router.connect(vif.Inbound(), Match5Tuple, nat); err != nil {
vif.disconnect(MatchIPv4Dst, &ra[0])
for _, ft := range fts {
v.router.disconnect(Match5Tuple, ft)
}
for _, addedNat := range nats[0:i] {
v.router.disconnect(Match5Tuple, addedNat)
}
return fmt.Errorf("Adding a rule for IPSec NAT traversal failed: %v", err)
}
}
}
return nil
}
func (v *VRF) deleteL3Tunnel(vif *VIF) {
t := vif.Tunnel()
for _, ft := range createFiveTuples(t.remotes, t.local, t.IPProto(), PortRange{}) {
v.router.disconnect(Match5Tuple, ft)
}
| if t.Security() == SecurityIPSec {
for _, nat := range createFiveTuples(t.remotes, t.local, IPP_UDP, PortRange{Start: 4500}) {
v.router.disconnect(Match5Tuple, nat)
}
} | random_line_split | |
brush.rs | fn depth_stencil_state() -> Option<wgpu::DepthStencilState> {
WorldPipelineBase::depth_stencil_state()
}
// NOTE: if the vertex format is changed, this descriptor must also be changed accordingly.
fn vertex_buffer_layouts() -> Vec<wgpu::VertexBufferLayout<'static>> {
vec![wgpu::VertexBufferLayout {
array_stride: size_of::<BrushVertex>() as u64,
step_mode: wgpu::InputStepMode::Vertex,
attributes: &VERTEX_ATTRIBUTES[..],
}]
}
}
fn calculate_lightmap_texcoords(
position: Vector3<f32>,
face: &BspFace,
texinfo: &BspTexInfo,
) -> [f32; 2] {
let mut s = texinfo.s_vector.dot(position) + texinfo.s_offset;
s -= (face.texture_mins[0] as f32 / 16.0).floor() * 16.0;
s += 0.5;
s /= face.extents[0] as f32;
let mut t = texinfo.t_vector.dot(position) + texinfo.t_offset;
t -= (face.texture_mins[1] as f32 / 16.0).floor() * 16.0;
t += 0.5;
t /= face.extents[1] as f32;
[s, t]
}
type Position = [f32; 3];
type Normal = [f32; 3];
type DiffuseTexcoord = [f32; 2];
type LightmapTexcoord = [f32; 2];
type LightmapAnim = [u8; 4];
#[repr(C)]
#[derive(Clone, Copy, Debug)]
struct BrushVertex {
position: Position,
normal: Normal,
diffuse_texcoord: DiffuseTexcoord,
lightmap_texcoord: LightmapTexcoord,
lightmap_anim: LightmapAnim,
}
#[repr(u32)]
#[derive(Clone, Copy, Debug)]
pub enum TextureKind {
Normal = 0,
Warp = 1,
Sky = 2,
}
/// A single frame of a brush texture.
pub struct BrushTextureFrame {
bind_group_id: usize,
diffuse: wgpu::Texture,
fullbright: wgpu::Texture,
diffuse_view: wgpu::TextureView,
fullbright_view: wgpu::TextureView,
kind: TextureKind,
}
/// A brush texture.
pub enum BrushTexture {
/// A brush texture with a single frame.
Static(BrushTextureFrame),
/// A brush texture with multiple frames.
///
/// Animated brush textures advance one frame every 200 milliseconds, i.e.,
/// they have a framerate of 5 fps.
Animated {
primary: Vec<BrushTextureFrame>,
alternate: Option<Vec<BrushTextureFrame>>,
},
}
impl BrushTexture {
fn kind(&self) -> TextureKind {
match self {
BrushTexture::Static(ref frame) => frame.kind,
BrushTexture::Animated { ref primary, .. } => primary[0].kind,
}
}
}
#[derive(Debug)]
struct BrushFace {
vertices: Range<u32>,
min: Vector3<f32>,
max: Vector3<f32>,
texture_id: usize,
lightmap_ids: Vec<usize>,
light_styles: [u8; 4],
/// Indicates whether the face should be drawn this frame.
///
/// This is set to false by default, and will be set to true if the model is
/// a worldmodel and the containing leaf is in the PVS. If the model is not
/// a worldmodel, this flag is ignored.
draw_flag: Cell<bool>,
}
struct BrushLeaf {
facelist_ids: Range<usize>,
}
impl<B> std::convert::From<B> for BrushLeaf
where
B: std::borrow::Borrow<BspLeaf>,
{
fn from(bsp_leaf: B) -> Self {
let bsp_leaf = bsp_leaf.borrow();
BrushLeaf {
facelist_ids: bsp_leaf.facelist_id..bsp_leaf.facelist_id + bsp_leaf.facelist_count,
}
}
}
pub struct BrushRendererBuilder {
bsp_data: Rc<BspData>,
face_range: Range<usize>,
leaves: Option<Vec<BrushLeaf>>,
per_texture_bind_groups: RefCell<Vec<wgpu::BindGroup>>,
per_face_bind_groups: Vec<wgpu::BindGroup>,
vertices: Vec<BrushVertex>,
faces: Vec<BrushFace>,
texture_chains: HashMap<usize, Vec<usize>>,
textures: Vec<BrushTexture>,
lightmaps: Vec<wgpu::Texture>,
//lightmap_views: Vec<wgpu::TextureView>,
}
impl BrushRendererBuilder {
pub fn new(bsp_model: &BspModel, worldmodel: bool) -> BrushRendererBuilder {
BrushRendererBuilder {
bsp_data: bsp_model.bsp_data().clone(),
face_range: bsp_model.face_id..bsp_model.face_id + bsp_model.face_count,
leaves: if worldmodel {
Some(
bsp_model
.iter_leaves()
.map(|leaf| BrushLeaf::from(leaf))
.collect(),
)
} else {
None
},
per_texture_bind_groups: RefCell::new(Vec::new()),
per_face_bind_groups: Vec::new(),
vertices: Vec::new(),
faces: Vec::new(),
texture_chains: HashMap::new(),
textures: Vec::new(),
lightmaps: Vec::new(),
//lightmap_views: Vec::new(),
}
}
fn create_face(&mut self, state: &GraphicsState, face_id: usize) -> BrushFace {
let face = &self.bsp_data.faces()[face_id];
let face_vert_id = self.vertices.len();
let texinfo = &self.bsp_data.texinfo()[face.texinfo_id];
let tex = &self.bsp_data.textures()[texinfo.tex_id];
let mut min = Vector3::new(f32::INFINITY, f32::INFINITY, f32::INFINITY);
let mut max = Vector3::new(f32::NEG_INFINITY, f32::NEG_INFINITY, f32::NEG_INFINITY);
let no_collinear =
math::remove_collinear(self.bsp_data.face_iter_vertices(face_id).collect());
for vert in no_collinear.iter() {
for component in 0..3 {
min[component] = min[component].min(vert[component]);
max[component] = max[component].max(vert[component]);
}
}
if tex.name().starts_with("*") {
// tessellate the surface so we can do texcoord warping
let verts = warp::subdivide(no_collinear);
let normal = (verts[0] - verts[1]).cross(verts[2] - verts[1]).normalize();
for vert in verts.into_iter() {
self.vertices.push(BrushVertex {
position: vert.into(),
normal: normal.into(),
diffuse_texcoord: [
((vert.dot(texinfo.s_vector) + texinfo.s_offset) / tex.width() as f32),
((vert.dot(texinfo.t_vector) + texinfo.t_offset) / tex.height() as f32),
],
lightmap_texcoord: calculate_lightmap_texcoords(vert.into(), face, texinfo),
lightmap_anim: face.light_styles,
})
}
} else {
// expand the vertices into a triangle list.
// the vertices are guaranteed to be in valid triangle fan order (that's
// how GLQuake renders them) so we expand from triangle fan to triangle
// list order.
//
// v1 is the base vertex, so it remains constant.
// v2 takes the previous value of v3.
// v3 is the newest vertex.
let verts = no_collinear;
let normal = (verts[0] - verts[1]).cross(verts[2] - verts[1]).normalize();
let mut vert_iter = verts.into_iter();
let v1 = vert_iter.next().unwrap();
let mut v2 = vert_iter.next().unwrap();
for v3 in vert_iter {
let tri = &[v1, v2, v3];
// skip collinear points
for vert in tri.iter() {
self.vertices.push(BrushVertex {
position: (*vert).into(),
normal: normal.into(),
diffuse_texcoord: [
((vert.dot(texinfo.s_vector) + texinfo.s_offset) / tex.width() as f32),
((vert.dot(texinfo.t_vector) + texinfo.t_offset) / tex.height() as f32),
],
lightmap_texcoord: calculate_lightmap_texcoords(
(*vert).into(),
face,
texinfo,
),
lightmap_anim: face.light_styles,
});
}
v2 = v3;
}
}
// build the lightmaps
let lightmaps = if !texinfo.special {
self.bsp_data.face_lightmaps(face_id)
} else {
Vec::new()
};
let mut lightmap_ids = Vec::new();
for lightmap in lightmaps {
let lightmap_data = TextureData::Lightmap(LightmapData { | lightmap: Cow::Borrowed(lightmap.data()),
});
| random_line_split | |
brush.rs |
pub fn pipeline(&self) -> &wgpu::RenderPipeline {
&self.pipeline
}
pub fn bind_group_layouts(&self) -> &[wgpu::BindGroupLayout] {
&self.bind_group_layouts
}
pub fn bind_group_layout(&self, id: BindGroupLayoutId) -> &wgpu::BindGroupLayout {
assert!(id as usize >= BindGroupLayoutId::PerTexture as usize);
&self.bind_group_layouts[id as usize - BindGroupLayoutId::PerTexture as usize]
}
}
#[repr(C)]
#[derive(Copy, Clone, Debug)]
pub struct VertexPushConstants {
pub transform: Matrix4<f32>,
pub model_view: Matrix4<f32>,
}
#[repr(C)]
#[derive(Copy, Clone, Debug)]
pub struct SharedPushConstants {
pub texture_kind: u32,
}
const BIND_GROUP_LAYOUT_ENTRIES: &[&[wgpu::BindGroupLayoutEntry]] = &[
&[
// diffuse texture, updated once per face
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Texture {
view_dimension: wgpu::TextureViewDimension::D2,
sample_type: wgpu::TextureSampleType::Float { filterable: true },
multisampled: false,
},
count: None,
},
// fullbright texture
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Texture {
view_dimension: wgpu::TextureViewDimension::D2,
sample_type: wgpu::TextureSampleType::Float { filterable: true },
multisampled: false,
},
count: None,
},
],
&[
// lightmap texture array
wgpu::BindGroupLayoutEntry {
count: NonZeroU32::new(4),
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Texture {
view_dimension: wgpu::TextureViewDimension::D2,
sample_type: wgpu::TextureSampleType::Float { filterable: true },
multisampled: false,
},
},
],
];
lazy_static! {
static ref VERTEX_ATTRIBUTES: [wgpu::VertexAttribute; 5] =
wgpu::vertex_attr_array![
// position
0 => Float32x3,
// normal
1 => Float32x3,
// diffuse texcoord
2 => Float32x2,
// lightmap texcoord
3 => Float32x2,
// lightmap animation ids
4 => Uint8x4,
];
}
impl Pipeline for BrushPipeline {
type VertexPushConstants = VertexPushConstants;
type SharedPushConstants = SharedPushConstants;
type FragmentPushConstants = ();
fn name() -> &'static str {
"brush"
}
fn vertex_shader() -> &'static str {
include_str!(concat!(env!("CARGO_MANIFEST_DIR"), "/shaders/brush.vert"))
}
fn fragment_shader() -> &'static str {
include_str!(concat!(env!("CARGO_MANIFEST_DIR"), "/shaders/brush.frag"))
}
// NOTE: if any of the binding indices are changed, they must also be changed in
// the corresponding shaders and the BindGroupLayout generation functions.
fn bind_group_layout_descriptors() -> Vec<wgpu::BindGroupLayoutDescriptor<'static>> {
vec![
// group 2: updated per-texture
wgpu::BindGroupLayoutDescriptor {
label: Some("brush per-texture bind group"),
entries: BIND_GROUP_LAYOUT_ENTRIES[0],
},
// group 3: updated per-face
wgpu::BindGroupLayoutDescriptor {
label: Some("brush per-face bind group"),
entries: BIND_GROUP_LAYOUT_ENTRIES[1],
},
]
}
fn primitive_state() -> wgpu::PrimitiveState {
WorldPipelineBase::primitive_state()
}
fn color_target_states() -> Vec<wgpu::ColorTargetState> {
WorldPipelineBase::color_target_states()
}
fn depth_stencil_state() -> Option<wgpu::DepthStencilState> {
WorldPipelineBase::depth_stencil_state()
}
// NOTE: if the vertex format is changed, this descriptor must also be changed accordingly.
fn vertex_buffer_layouts() -> Vec<wgpu::VertexBufferLayout<'static>> {
vec![wgpu::VertexBufferLayout {
array_stride: size_of::<BrushVertex>() as u64,
step_mode: wgpu::InputStepMode::Vertex,
attributes: &VERTEX_ATTRIBUTES[..],
}]
}
}
fn calculate_lightmap_texcoords(
position: Vector3<f32>,
face: &BspFace,
texinfo: &BspTexInfo,
) -> [f32; 2] {
let mut s = texinfo.s_vector.dot(position) + texinfo.s_offset;
s -= (face.texture_mins[0] as f32 / 16.0).floor() * 16.0;
s += 0.5;
s /= face.extents[0] as f32;
let mut t = texinfo.t_vector.dot(position) + texinfo.t_offset;
t -= (face.texture_mins[1] as f32 / 16.0).floor() * 16.0;
t += 0.5;
t /= face.extents[1] as f32;
[s, t]
}
type Position = [f32; 3];
type Normal = [f32; 3];
type DiffuseTexcoord = [f32; 2];
type LightmapTexcoord = [f32; 2];
type LightmapAnim = [u8; 4];
#[repr(C)]
#[derive(Clone, Copy, Debug)]
struct BrushVertex {
position: Position,
normal: Normal,
diffuse_texcoord: DiffuseTexcoord,
lightmap_texcoord: LightmapTexcoord,
lightmap_anim: LightmapAnim,
}
#[repr(u32)]
#[derive(Clone, Copy, Debug)]
pub enum TextureKind {
Normal = 0,
Warp = 1,
Sky = 2,
}
/// A single frame of a brush texture.
pub struct BrushTextureFrame {
bind_group_id: usize,
diffuse: wgpu::Texture,
fullbright: wgpu::Texture,
diffuse_view: wgpu::TextureView,
fullbright_view: wgpu::TextureView,
kind: TextureKind,
}
/// A brush texture.
pub enum BrushTexture {
/// A brush texture with a single frame.
Static(BrushTextureFrame),
/// A brush texture with multiple frames.
///
/// Animated brush textures advance one frame every 200 milliseconds, i.e.,
/// they have a framerate of 5 fps.
Animated {
primary: Vec<BrushTextureFrame>,
alternate: Option<Vec<BrushTextureFrame>>,
},
}
impl BrushTexture {
fn kind(&self) -> TextureKind {
match self {
BrushTexture::Static(ref frame) => frame.kind,
BrushTexture::Animated { ref primary, .. } => primary[0].kind,
}
}
}
#[derive(Debug)]
struct BrushFace {
vertices: Range<u32>,
min: Vector3<f32>,
max: Vector3<f32>,
texture_id: usize,
lightmap_ids: Vec<usize>,
light_styles: [u8; 4],
/// Indicates whether the face should be drawn this frame.
///
/// This is set to false by default, and will be set to true if the model is
/// a worldmodel and the containing leaf is in the PVS. If the model is not
/// a worldmodel, this flag is ignored.
draw_flag: Cell<bool>,
}
struct BrushLeaf {
facelist_ids: Range<usize>,
}
impl<B> std::convert::From<B> for BrushLeaf
where
B: std::borrow::Borrow<BspLeaf>,
{
fn from(bsp_leaf: B) -> Self {
let bsp_leaf = bsp_leaf.borrow();
BrushLeaf {
facelist_ids: bsp_leaf.facelist_id..bsp_leaf.facelist_id + bsp_leaf.facelist_count,
}
}
}
pub struct BrushRendererBuilder {
bsp_data: Rc<BspData>,
face_range: Range<usize>,
leaves: Option<Vec<BrushLeaf>>,
per_texture_bind_groups: RefCell<Vec<wgpu::BindGroup>>,
per_face_bind_groups: Vec<wgpu::BindGroup>,
vertices: Vec<BrushVertex>,
faces: Vec<BrushFace>,
texture_chains: HashMap<usize, Vec<usize>>,
textures: Vec<BrushTexture>,
lightmaps: Vec<wgpu::Texture>,
//lightmap_views: Vec<wgpu::TextureView>,
}
impl BrushRendererBuilder {
pub fn new(bsp_model: &BspModel, worldmodel: bool) -> BrushRendererBuilder |
let layout_refs: Vec<_> = world_bind_group_layouts
.iter()
.chain(self.bind_group_layouts.iter())
.collect();
self.pipeline = BrushPipeline::recreate(device, compiler, &layout_refs, sample_count);
}
| identifier_body | |
brush.rs | BrushRendererBuilder {
BrushRendererBuilder {
bsp_data: bsp_model.bsp_data().clone(),
face_range: bsp_model.face_id..bsp_model.face_id + bsp_model.face_count,
leaves: if worldmodel {
Some(
bsp_model
.iter_leaves()
.map(|leaf| BrushLeaf::from(leaf))
.collect(),
)
} else {
None
},
per_texture_bind_groups: RefCell::new(Vec::new()),
per_face_bind_groups: Vec::new(),
vertices: Vec::new(),
faces: Vec::new(),
texture_chains: HashMap::new(),
textures: Vec::new(),
lightmaps: Vec::new(),
//lightmap_views: Vec::new(),
}
}
fn create_face(&mut self, state: &GraphicsState, face_id: usize) -> BrushFace {
let face = &self.bsp_data.faces()[face_id];
let face_vert_id = self.vertices.len();
let texinfo = &self.bsp_data.texinfo()[face.texinfo_id];
let tex = &self.bsp_data.textures()[texinfo.tex_id];
let mut min = Vector3::new(f32::INFINITY, f32::INFINITY, f32::INFINITY);
let mut max = Vector3::new(f32::NEG_INFINITY, f32::NEG_INFINITY, f32::NEG_INFINITY);
let no_collinear =
math::remove_collinear(self.bsp_data.face_iter_vertices(face_id).collect());
for vert in no_collinear.iter() {
for component in 0..3 {
min[component] = min[component].min(vert[component]);
max[component] = max[component].max(vert[component]);
}
}
if tex.name().starts_with("*") {
// tessellate the surface so we can do texcoord warping
let verts = warp::subdivide(no_collinear);
let normal = (verts[0] - verts[1]).cross(verts[2] - verts[1]).normalize();
for vert in verts.into_iter() {
self.vertices.push(BrushVertex {
position: vert.into(),
normal: normal.into(),
diffuse_texcoord: [
((vert.dot(texinfo.s_vector) + texinfo.s_offset) / tex.width() as f32),
((vert.dot(texinfo.t_vector) + texinfo.t_offset) / tex.height() as f32),
],
lightmap_texcoord: calculate_lightmap_texcoords(vert.into(), face, texinfo),
lightmap_anim: face.light_styles,
})
}
} else {
// expand the vertices into a triangle list.
// the vertices are guaranteed to be in valid triangle fan order (that's
// how GLQuake renders them) so we expand from triangle fan to triangle
// list order.
//
// v1 is the base vertex, so it remains constant.
// v2 takes the previous value of v3.
// v3 is the newest vertex.
let verts = no_collinear;
let normal = (verts[0] - verts[1]).cross(verts[2] - verts[1]).normalize();
let mut vert_iter = verts.into_iter();
let v1 = vert_iter.next().unwrap();
let mut v2 = vert_iter.next().unwrap();
for v3 in vert_iter {
let tri = &[v1, v2, v3];
// skip collinear points
for vert in tri.iter() {
self.vertices.push(BrushVertex {
position: (*vert).into(),
normal: normal.into(),
diffuse_texcoord: [
((vert.dot(texinfo.s_vector) + texinfo.s_offset) / tex.width() as f32),
((vert.dot(texinfo.t_vector) + texinfo.t_offset) / tex.height() as f32),
],
lightmap_texcoord: calculate_lightmap_texcoords(
(*vert).into(),
face,
texinfo,
),
lightmap_anim: face.light_styles,
});
}
v2 = v3;
}
}
// build the lightmaps
let lightmaps = if !texinfo.special {
self.bsp_data.face_lightmaps(face_id)
} else {
Vec::new()
};
let mut lightmap_ids = Vec::new();
for lightmap in lightmaps {
let lightmap_data = TextureData::Lightmap(LightmapData {
lightmap: Cow::Borrowed(lightmap.data()),
});
let texture =
state.create_texture(None, lightmap.width(), lightmap.height(), &lightmap_data);
let id = self.lightmaps.len();
self.lightmaps.push(texture);
//self.lightmap_views
//.push(self.lightmaps[id].create_view(&Default::default()));
lightmap_ids.push(id);
}
BrushFace {
vertices: face_vert_id as u32..self.vertices.len() as u32,
min,
max,
texture_id: texinfo.tex_id as usize,
lightmap_ids,
light_styles: face.light_styles,
draw_flag: Cell::new(true),
}
}
fn create_per_texture_bind_group(
&self,
state: &GraphicsState,
tex: &BrushTextureFrame,
) -> wgpu::BindGroup {
let layout = &state
.brush_pipeline()
.bind_group_layout(BindGroupLayoutId::PerTexture);
let desc = wgpu::BindGroupDescriptor {
label: Some("per-texture bind group"),
layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&tex.diffuse_view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::TextureView(&tex.fullbright_view),
},
],
};
state.device().create_bind_group(&desc)
}
fn create_per_face_bind_group(&self, state: &GraphicsState, face_id: usize) -> wgpu::BindGroup {
let mut lightmap_views: Vec<_> = self.faces[face_id]
.lightmap_ids
.iter()
.map(|id| self.lightmaps[*id].create_view(&Default::default()))
.collect();
lightmap_views.resize_with(4, || {
state.default_lightmap().create_view(&Default::default())
});
let lightmap_view_refs = lightmap_views.iter().collect::<Vec<_>>();
let layout = &state
.brush_pipeline()
.bind_group_layout(BindGroupLayoutId::PerFace);
let desc = wgpu::BindGroupDescriptor {
label: Some("per-face bind group"),
layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureViewArray(&lightmap_view_refs[..]),
}],
};
state.device().create_bind_group(&desc)
}
fn create_brush_texture_frame<S>(
&self,
state: &GraphicsState,
mipmap: &[u8],
width: u32,
height: u32,
name: S,
) -> BrushTextureFrame
where
S: AsRef<str>,
{
let name = name.as_ref();
let (diffuse_data, fullbright_data) = state.palette().translate(mipmap);
let diffuse =
state.create_texture(None, width, height, &TextureData::Diffuse(diffuse_data));
let fullbright = state.create_texture(
None,
width,
height,
&TextureData::Fullbright(fullbright_data),
);
let diffuse_view = diffuse.create_view(&Default::default());
let fullbright_view = fullbright.create_view(&Default::default());
let kind = if name.starts_with("sky") {
TextureKind::Sky
} else if name.starts_with("*") {
TextureKind::Warp
} else {
TextureKind::Normal
};
let mut frame = BrushTextureFrame {
bind_group_id: 0,
diffuse,
fullbright,
diffuse_view,
fullbright_view,
kind,
};
// generate texture bind group
let per_texture_bind_group = self.create_per_texture_bind_group(state, &frame);
let bind_group_id = self.per_texture_bind_groups.borrow().len();
self.per_texture_bind_groups
.borrow_mut()
.push(per_texture_bind_group);
frame.bind_group_id = bind_group_id;
frame
}
pub fn create_brush_texture(&self, state: &GraphicsState, tex: &BspTexture) -> BrushTexture {
// TODO: upload mipmaps
let (width, height) = tex.dimensions();
match tex.kind() {
// sequence animated textures
BspTextureKind::Animated { primary, alternate } => { |
let primary_frames: Vec<_> = primary
.iter()
.map(|f| {
self.create_brush_texture_frame(
state,
f.mipmap(BspTextureMipmap::Full),
width,
height,
tex.name(),
)
})
.collect();
let alternate_frames: Option<Vec<_>> = alternate.as_ref().map(|a| {
a.iter()
.map(|f| {
self.create_brush_texture_frame(
state,
f.mipmap(BspTextureMipmap::Full), | conditional_block | |
brush.rs | &self) -> &[wgpu::BindGroupLayout] {
&self.bind_group_layouts
}
pub fn bind_group_layout(&self, id: BindGroupLayoutId) -> &wgpu::BindGroupLayout {
assert!(id as usize >= BindGroupLayoutId::PerTexture as usize);
&self.bind_group_layouts[id as usize - BindGroupLayoutId::PerTexture as usize]
}
}
#[repr(C)]
#[derive(Copy, Clone, Debug)]
pub struct VertexPushConstants {
pub transform: Matrix4<f32>,
pub model_view: Matrix4<f32>,
}
#[repr(C)]
#[derive(Copy, Clone, Debug)]
pub struct SharedPushConstants {
pub texture_kind: u32,
}
const BIND_GROUP_LAYOUT_ENTRIES: &[&[wgpu::BindGroupLayoutEntry]] = &[
&[
// diffuse texture, updated once per face
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Texture {
view_dimension: wgpu::TextureViewDimension::D2,
sample_type: wgpu::TextureSampleType::Float { filterable: true },
multisampled: false,
},
count: None,
},
// fullbright texture
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Texture {
view_dimension: wgpu::TextureViewDimension::D2,
sample_type: wgpu::TextureSampleType::Float { filterable: true },
multisampled: false,
},
count: None,
},
],
&[
// lightmap texture array
wgpu::BindGroupLayoutEntry {
count: NonZeroU32::new(4),
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Texture {
view_dimension: wgpu::TextureViewDimension::D2,
sample_type: wgpu::TextureSampleType::Float { filterable: true },
multisampled: false,
},
},
],
];
lazy_static! {
static ref VERTEX_ATTRIBUTES: [wgpu::VertexAttribute; 5] =
wgpu::vertex_attr_array![
// position
0 => Float32x3,
// normal
1 => Float32x3,
// diffuse texcoord
2 => Float32x2,
// lightmap texcoord
3 => Float32x2,
// lightmap animation ids
4 => Uint8x4,
];
}
impl Pipeline for BrushPipeline {
type VertexPushConstants = VertexPushConstants;
type SharedPushConstants = SharedPushConstants;
type FragmentPushConstants = ();
fn name() -> &'static str {
"brush"
}
fn vertex_shader() -> &'static str {
include_str!(concat!(env!("CARGO_MANIFEST_DIR"), "/shaders/brush.vert"))
}
fn fragment_shader() -> &'static str {
include_str!(concat!(env!("CARGO_MANIFEST_DIR"), "/shaders/brush.frag"))
}
// NOTE: if any of the binding indices are changed, they must also be changed in
// the corresponding shaders and the BindGroupLayout generation functions.
fn bind_group_layout_descriptors() -> Vec<wgpu::BindGroupLayoutDescriptor<'static>> {
vec![
// group 2: updated per-texture
wgpu::BindGroupLayoutDescriptor {
label: Some("brush per-texture bind group"),
entries: BIND_GROUP_LAYOUT_ENTRIES[0],
},
// group 3: updated per-face
wgpu::BindGroupLayoutDescriptor {
label: Some("brush per-face bind group"),
entries: BIND_GROUP_LAYOUT_ENTRIES[1],
},
]
}
fn primitive_state() -> wgpu::PrimitiveState {
WorldPipelineBase::primitive_state()
}
fn color_target_states() -> Vec<wgpu::ColorTargetState> {
WorldPipelineBase::color_target_states()
}
fn depth_stencil_state() -> Option<wgpu::DepthStencilState> {
WorldPipelineBase::depth_stencil_state()
}
// NOTE: if the vertex format is changed, this descriptor must also be changed accordingly.
fn vertex_buffer_layouts() -> Vec<wgpu::VertexBufferLayout<'static>> {
vec![wgpu::VertexBufferLayout {
array_stride: size_of::<BrushVertex>() as u64,
step_mode: wgpu::InputStepMode::Vertex,
attributes: &VERTEX_ATTRIBUTES[..],
}]
}
}
fn calculate_lightmap_texcoords(
position: Vector3<f32>,
face: &BspFace,
texinfo: &BspTexInfo,
) -> [f32; 2] {
let mut s = texinfo.s_vector.dot(position) + texinfo.s_offset;
s -= (face.texture_mins[0] as f32 / 16.0).floor() * 16.0;
s += 0.5;
s /= face.extents[0] as f32;
let mut t = texinfo.t_vector.dot(position) + texinfo.t_offset;
t -= (face.texture_mins[1] as f32 / 16.0).floor() * 16.0;
t += 0.5;
t /= face.extents[1] as f32;
[s, t]
}
type Position = [f32; 3];
type Normal = [f32; 3];
type DiffuseTexcoord = [f32; 2];
type LightmapTexcoord = [f32; 2];
type LightmapAnim = [u8; 4];
#[repr(C)]
#[derive(Clone, Copy, Debug)]
struct BrushVertex {
position: Position,
normal: Normal,
diffuse_texcoord: DiffuseTexcoord,
lightmap_texcoord: LightmapTexcoord,
lightmap_anim: LightmapAnim,
}
#[repr(u32)]
#[derive(Clone, Copy, Debug)]
pub enum TextureKind {
Normal = 0,
Warp = 1,
Sky = 2,
}
/// A single frame of a brush texture.
pub struct BrushTextureFrame {
bind_group_id: usize,
diffuse: wgpu::Texture,
fullbright: wgpu::Texture,
diffuse_view: wgpu::TextureView,
fullbright_view: wgpu::TextureView,
kind: TextureKind,
}
/// A brush texture.
pub enum BrushTexture {
/// A brush texture with a single frame.
Static(BrushTextureFrame),
/// A brush texture with multiple frames.
///
/// Animated brush textures advance one frame every 200 milliseconds, i.e.,
/// they have a framerate of 5 fps.
Animated {
primary: Vec<BrushTextureFrame>,
alternate: Option<Vec<BrushTextureFrame>>,
},
}
impl BrushTexture {
fn kind(&self) -> TextureKind {
match self {
BrushTexture::Static(ref frame) => frame.kind,
BrushTexture::Animated { ref primary, .. } => primary[0].kind,
}
}
}
#[derive(Debug)]
struct BrushFace {
vertices: Range<u32>,
min: Vector3<f32>,
max: Vector3<f32>,
texture_id: usize,
lightmap_ids: Vec<usize>,
light_styles: [u8; 4],
/// Indicates whether the face should be drawn this frame.
///
/// This is set to false by default, and will be set to true if the model is
/// a worldmodel and the containing leaf is in the PVS. If the model is not
/// a worldmodel, this flag is ignored.
draw_flag: Cell<bool>,
}
struct BrushLeaf {
facelist_ids: Range<usize>,
}
impl<B> std::convert::From<B> for BrushLeaf
where
B: std::borrow::Borrow<BspLeaf>,
{
fn from(bsp_leaf: B) -> Self {
let bsp_leaf = bsp_leaf.borrow();
BrushLeaf {
facelist_ids: bsp_leaf.facelist_id..bsp_leaf.facelist_id + bsp_leaf.facelist_count,
}
}
}
pub struct BrushRendererBuilder {
bsp_data: Rc<BspData>,
face_range: Range<usize>,
leaves: Option<Vec<BrushLeaf>>,
per_texture_bind_groups: RefCell<Vec<wgpu::BindGroup>>,
per_face_bind_groups: Vec<wgpu::BindGroup>,
vertices: Vec<BrushVertex>,
faces: Vec<BrushFace>,
texture_chains: HashMap<usize, Vec<usize>>,
textures: Vec<BrushTexture>,
lightmaps: Vec<wgpu::Texture>,
//lightmap_views: Vec<wgpu::TextureView>,
}
impl BrushRendererBuilder {
pub fn new(bsp_model: &BspModel, worldmodel: bool) -> BrushRendererBuilder {
BrushRendererBuilder {
bsp_data: bsp_model.bsp_data().clone(),
face_range: bsp_model.face_id..bsp_model.face_id + bsp_model.face_count,
leaves: if worldmodel {
Some(
bsp_model
.iter_leaves()
.map(|leaf| BrushLeaf::from(leaf))
.collect(),
)
| ind_group_layouts( | identifier_name | |
glsl3.rs | : GLuint,
batch: Batch,
}
impl Glsl3Renderer {
pub fn new() -> Result<Self, Error> {
info!("Using OpenGL 3.3 renderer");
let program = TextShaderProgram::new(ShaderVersion::Glsl3)?;
let mut vao: GLuint = 0;
let mut ebo: GLuint = 0;
let mut vbo_instance: GLuint = 0;
unsafe {
gl::Enable(gl::BLEND);
gl::BlendFunc(gl::SRC1_COLOR, gl::ONE_MINUS_SRC1_COLOR);
// Disable depth mask, as the renderer never uses depth tests.
gl::DepthMask(gl::FALSE);
gl::GenVertexArrays(1, &mut vao);
gl::GenBuffers(1, &mut ebo);
gl::GenBuffers(1, &mut vbo_instance);
gl::BindVertexArray(vao);
// ---------------------
// Set up element buffer
// ---------------------
let indices: [u32; 6] = [0, 1, 3, 1, 2, 3];
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, ebo);
gl::BufferData(
gl::ELEMENT_ARRAY_BUFFER,
(6 * size_of::<u32>()) as isize,
indices.as_ptr() as *const _,
gl::STATIC_DRAW,
);
// ----------------------------
// Setup vertex instance buffer
// ----------------------------
gl::BindBuffer(gl::ARRAY_BUFFER, vbo_instance);
gl::BufferData(
gl::ARRAY_BUFFER,
(BATCH_MAX * size_of::<InstanceData>()) as isize,
ptr::null(),
gl::STREAM_DRAW,
);
let mut index = 0;
let mut size = 0;
macro_rules! add_attr {
($count:expr, $gl_type:expr, $type:ty) => {
gl::VertexAttribPointer(
index,
$count,
$gl_type,
gl::FALSE,
size_of::<InstanceData>() as i32,
size as *const _,
);
gl::EnableVertexAttribArray(index);
gl::VertexAttribDivisor(index, 1);
#[allow(unused_assignments)]
{
size += $count * size_of::<$type>();
index += 1;
}
};
}
// Coords.
add_attr!(2, gl::UNSIGNED_SHORT, u16);
// Glyph offset and size.
add_attr!(4, gl::SHORT, i16);
// UV offset.
add_attr!(4, gl::FLOAT, f32);
// Color and cell flags.
//
// These are packed together because of an OpenGL driver issue on macOS, which caused a
// `vec3(u8)` text color and a `u8` cell flags to increase the rendering time by a
// huge margin.
add_attr!(4, gl::UNSIGNED_BYTE, u8);
// Background color.
add_attr!(4, gl::UNSIGNED_BYTE, u8);
// Cleanup.
gl::BindVertexArray(0);
gl::BindBuffer(gl::ARRAY_BUFFER, 0);
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, 0);
}
Ok(Self {
program,
vao,
ebo,
vbo_instance,
atlas: vec![Atlas::new(ATLAS_SIZE, false)],
current_atlas: 0,
active_tex: 0,
batch: Batch::new(),
})
}
}
impl<'a> TextRenderer<'a> for Glsl3Renderer {
type RenderApi = RenderApi<'a>;
type RenderBatch = Batch;
type Shader = TextShaderProgram;
fn with_api<'b: 'a, F, T>(&'b mut self, size_info: &'b SizeInfo, func: F) -> T
where
F: FnOnce(Self::RenderApi) -> T,
{
unsafe {
gl::UseProgram(self.program.id());
self.program.set_term_uniforms(size_info);
gl::BindVertexArray(self.vao);
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, self.ebo);
gl::BindBuffer(gl::ARRAY_BUFFER, self.vbo_instance);
gl::ActiveTexture(gl::TEXTURE0);
}
let res = func(RenderApi {
active_tex: &mut self.active_tex,
batch: &mut self.batch,
atlas: &mut self.atlas,
current_atlas: &mut self.current_atlas,
program: &mut self.program,
});
unsafe {
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, 0);
gl::BindBuffer(gl::ARRAY_BUFFER, 0);
gl::BindVertexArray(0);
gl::UseProgram(0);
}
res
}
fn program(&self) -> &Self::Shader {
&self.program
}
fn loader_api(&mut self) -> LoaderApi<'_> {
LoaderApi {
active_tex: &mut self.active_tex,
atlas: &mut self.atlas,
current_atlas: &mut self.current_atlas,
}
}
}
impl Drop for Glsl3Renderer {
fn drop(&mut self) {
unsafe {
gl::DeleteBuffers(1, &self.vbo_instance);
gl::DeleteBuffers(1, &self.ebo);
gl::DeleteVertexArrays(1, &self.vao);
}
}
}
#[derive(Debug)]
pub struct RenderApi<'a> {
active_tex: &'a mut GLuint,
batch: &'a mut Batch,
atlas: &'a mut Vec<Atlas>,
current_atlas: &'a mut usize,
program: &'a mut TextShaderProgram,
}
impl<'a> TextRenderApi<Batch> for RenderApi<'a> {
fn batch(&mut self) -> &mut Batch {
self.batch
}
fn render_batch(&mut self) {
unsafe {
gl::BufferSubData(
gl::ARRAY_BUFFER,
0,
self.batch.size() as isize,
self.batch.instances.as_ptr() as *const _,
);
}
// Bind texture if necessary.
if *self.active_tex != self.batch.tex() |
unsafe {
self.program.set_rendering_pass(RenderingPass::Background);
gl::DrawElementsInstanced(
gl::TRIANGLES,
6,
gl::UNSIGNED_INT,
ptr::null(),
self.batch.len() as GLsizei,
);
self.program.set_rendering_pass(RenderingPass::SubpixelPass1);
gl::DrawElementsInstanced(
gl::TRIANGLES,
6,
gl::UNSIGNED_INT,
ptr::null(),
self.batch.len() as GLsizei,
);
}
self.batch.clear();
}
}
impl<'a> LoadGlyph for RenderApi<'a> {
fn load_glyph(&mut self, rasterized: &RasterizedGlyph) -> Glyph {
Atlas::load_glyph(self.active_tex, self.atlas, self.current_atlas, rasterized)
}
fn clear(&mut self) {
Atlas::clear_atlas(self.atlas, self.current_atlas)
}
}
impl<'a> Drop for RenderApi<'a> {
fn drop(&mut self) {
if !self.batch.is_empty() {
self.render_batch();
}
}
}
#[derive(Debug)]
#[repr(C)]
struct InstanceData {
// Coords.
col: u16,
row: u16,
// Glyph offset.
left: i16,
top: i16,
// Glyph size.
width: i16,
height: i16,
// UV offset.
uv_left: f32,
uv_bot: f32,
// uv scale.
uv_width: f32,
uv_height: f32,
// Color.
r: u8,
g: u8,
b: u8,
// Cell flags like multicolor or fullwidth character.
cell_flags: RenderingGlyphFlags,
// Background color.
bg_r: u8,
bg_g: u8,
bg_b: u8,
bg_a: u8,
}
#[derive(Debug, Default)]
pub struct Batch {
tex: GLuint,
instances: Vec<InstanceData>,
}
impl TextRenderBatch for Batch {
#[inline]
fn tex(&self) -> GLuint {
self.tex
}
#[inline]
fn full(&self) -> bool {
self.capacity() == self.len()
}
#[inline]
fn is_empty(&self) -> bool {
self.len() == 0
}
fn add_item(&mut self, cell: &RenderableCell, glyph: &Glyph, _: &SizeInfo) {
if self.is_empty() {
self.tex = glyph.tex_id;
}
let mut cell_flags = RenderingGlyphFlags::empty();
cell_flags.set(RenderingGlyphFlags::COLORED, glyph.multicolor);
cell_flags.set(RenderingGlyphFlags::WIDE_CHAR, cell.flags.contains(Flags::WIDE_CHAR));
self.instances.push(InstanceData {
col: cell.point.column.0 as u16,
row: cell.point.line as u16,
top: glyph.top,
left: glyph.left,
width: glyph.width,
height: | {
unsafe {
gl::BindTexture(gl::TEXTURE_2D, self.batch.tex());
}
*self.active_tex = self.batch.tex();
} | conditional_block |
glsl3.rs | ,
ptr::null(),
gl::STREAM_DRAW,
);
let mut index = 0;
let mut size = 0;
macro_rules! add_attr {
($count:expr, $gl_type:expr, $type:ty) => {
gl::VertexAttribPointer(
index,
$count,
$gl_type,
gl::FALSE,
size_of::<InstanceData>() as i32,
size as *const _,
);
gl::EnableVertexAttribArray(index);
gl::VertexAttribDivisor(index, 1);
#[allow(unused_assignments)]
{
size += $count * size_of::<$type>();
index += 1;
}
};
}
// Coords.
add_attr!(2, gl::UNSIGNED_SHORT, u16);
// Glyph offset and size.
add_attr!(4, gl::SHORT, i16);
// UV offset.
add_attr!(4, gl::FLOAT, f32);
// Color and cell flags.
//
// These are packed together because of an OpenGL driver issue on macOS, which caused a
// `vec3(u8)` text color and a `u8` cell flags to increase the rendering time by a
// huge margin.
add_attr!(4, gl::UNSIGNED_BYTE, u8);
// Background color.
add_attr!(4, gl::UNSIGNED_BYTE, u8);
// Cleanup.
gl::BindVertexArray(0);
gl::BindBuffer(gl::ARRAY_BUFFER, 0);
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, 0);
}
Ok(Self {
program,
vao,
ebo,
vbo_instance,
atlas: vec![Atlas::new(ATLAS_SIZE, false)],
current_atlas: 0,
active_tex: 0,
batch: Batch::new(),
})
}
}
impl<'a> TextRenderer<'a> for Glsl3Renderer {
type RenderApi = RenderApi<'a>;
type RenderBatch = Batch;
type Shader = TextShaderProgram;
fn with_api<'b: 'a, F, T>(&'b mut self, size_info: &'b SizeInfo, func: F) -> T
where
F: FnOnce(Self::RenderApi) -> T,
{
unsafe {
gl::UseProgram(self.program.id());
self.program.set_term_uniforms(size_info);
gl::BindVertexArray(self.vao);
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, self.ebo);
gl::BindBuffer(gl::ARRAY_BUFFER, self.vbo_instance);
gl::ActiveTexture(gl::TEXTURE0);
}
let res = func(RenderApi {
active_tex: &mut self.active_tex,
batch: &mut self.batch,
atlas: &mut self.atlas,
current_atlas: &mut self.current_atlas,
program: &mut self.program,
});
unsafe {
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, 0);
gl::BindBuffer(gl::ARRAY_BUFFER, 0);
gl::BindVertexArray(0);
gl::UseProgram(0);
}
res
}
fn program(&self) -> &Self::Shader {
&self.program
}
fn loader_api(&mut self) -> LoaderApi<'_> {
LoaderApi {
active_tex: &mut self.active_tex,
atlas: &mut self.atlas,
current_atlas: &mut self.current_atlas,
}
}
}
impl Drop for Glsl3Renderer {
fn drop(&mut self) {
unsafe {
gl::DeleteBuffers(1, &self.vbo_instance);
gl::DeleteBuffers(1, &self.ebo);
gl::DeleteVertexArrays(1, &self.vao);
}
}
}
#[derive(Debug)]
pub struct RenderApi<'a> {
active_tex: &'a mut GLuint,
batch: &'a mut Batch,
atlas: &'a mut Vec<Atlas>,
current_atlas: &'a mut usize,
program: &'a mut TextShaderProgram,
}
impl<'a> TextRenderApi<Batch> for RenderApi<'a> {
fn batch(&mut self) -> &mut Batch {
self.batch
}
fn render_batch(&mut self) {
unsafe {
gl::BufferSubData(
gl::ARRAY_BUFFER,
0,
self.batch.size() as isize,
self.batch.instances.as_ptr() as *const _,
);
}
// Bind texture if necessary.
if *self.active_tex != self.batch.tex() {
unsafe {
gl::BindTexture(gl::TEXTURE_2D, self.batch.tex());
}
*self.active_tex = self.batch.tex();
}
unsafe {
self.program.set_rendering_pass(RenderingPass::Background);
gl::DrawElementsInstanced(
gl::TRIANGLES,
6,
gl::UNSIGNED_INT,
ptr::null(),
self.batch.len() as GLsizei,
);
self.program.set_rendering_pass(RenderingPass::SubpixelPass1);
gl::DrawElementsInstanced(
gl::TRIANGLES,
6,
gl::UNSIGNED_INT,
ptr::null(),
self.batch.len() as GLsizei,
);
}
self.batch.clear();
}
}
impl<'a> LoadGlyph for RenderApi<'a> {
fn load_glyph(&mut self, rasterized: &RasterizedGlyph) -> Glyph {
Atlas::load_glyph(self.active_tex, self.atlas, self.current_atlas, rasterized)
}
fn clear(&mut self) {
Atlas::clear_atlas(self.atlas, self.current_atlas)
}
}
impl<'a> Drop for RenderApi<'a> {
fn drop(&mut self) {
if !self.batch.is_empty() {
self.render_batch();
}
}
}
#[derive(Debug)]
#[repr(C)]
struct InstanceData {
// Coords.
col: u16,
row: u16,
// Glyph offset.
left: i16,
top: i16,
// Glyph size.
width: i16,
height: i16,
// UV offset.
uv_left: f32,
uv_bot: f32,
// uv scale.
uv_width: f32,
uv_height: f32,
// Color.
r: u8,
g: u8,
b: u8,
// Cell flags like multicolor or fullwidth character.
cell_flags: RenderingGlyphFlags,
// Background color.
bg_r: u8,
bg_g: u8,
bg_b: u8,
bg_a: u8,
}
#[derive(Debug, Default)]
pub struct Batch {
tex: GLuint,
instances: Vec<InstanceData>,
}
impl TextRenderBatch for Batch {
#[inline]
fn tex(&self) -> GLuint {
self.tex
}
#[inline]
fn full(&self) -> bool {
self.capacity() == self.len()
}
#[inline]
fn is_empty(&self) -> bool {
self.len() == 0
}
fn add_item(&mut self, cell: &RenderableCell, glyph: &Glyph, _: &SizeInfo) {
if self.is_empty() {
self.tex = glyph.tex_id;
}
let mut cell_flags = RenderingGlyphFlags::empty();
cell_flags.set(RenderingGlyphFlags::COLORED, glyph.multicolor);
cell_flags.set(RenderingGlyphFlags::WIDE_CHAR, cell.flags.contains(Flags::WIDE_CHAR));
self.instances.push(InstanceData {
col: cell.point.column.0 as u16,
row: cell.point.line as u16,
top: glyph.top,
left: glyph.left,
width: glyph.width,
height: glyph.height,
uv_bot: glyph.uv_bot,
uv_left: glyph.uv_left,
uv_width: glyph.uv_width,
uv_height: glyph.uv_height,
r: cell.fg.r,
g: cell.fg.g,
b: cell.fg.b,
cell_flags,
bg_r: cell.bg.r,
bg_g: cell.bg.g,
bg_b: cell.bg.b,
bg_a: (cell.bg_alpha * 255.0) as u8,
});
}
}
impl Batch {
#[inline]
pub fn new() -> Self {
Self { tex: 0, instances: Vec::with_capacity(BATCH_MAX) }
}
#[inline]
pub fn len(&self) -> usize {
self.instances.len()
}
#[inline]
pub fn capacity(&self) -> usize {
BATCH_MAX
}
#[inline]
pub fn size(&self) -> usize {
self.len() * size_of::<InstanceData>()
}
pub fn clear(&mut self) {
self.tex = 0;
self.instances.clear();
}
}
/// Text drawing program.
///
/// Uniforms are prefixed with "u", and vertex attributes are prefixed with "a".
#[derive(Debug)]
pub struct TextShaderProgram {
/// Shader program.
program: ShaderProgram,
/// Projection scale and offset uniform.
u_projection: GLint,
/// Cell dimensions (pixels).
u_cell_dim: GLint,
/// Background pass flag.
/// | /// Rendering is split into two passes; one for backgrounds, and one for text.
u_rendering_pass: GLint,
} | random_line_split | |
glsl3.rs | : GLuint,
batch: Batch,
}
impl Glsl3Renderer {
pub fn new() -> Result<Self, Error> {
info!("Using OpenGL 3.3 renderer");
let program = TextShaderProgram::new(ShaderVersion::Glsl3)?;
let mut vao: GLuint = 0;
let mut ebo: GLuint = 0;
let mut vbo_instance: GLuint = 0;
unsafe {
gl::Enable(gl::BLEND);
gl::BlendFunc(gl::SRC1_COLOR, gl::ONE_MINUS_SRC1_COLOR);
// Disable depth mask, as the renderer never uses depth tests.
gl::DepthMask(gl::FALSE);
gl::GenVertexArrays(1, &mut vao);
gl::GenBuffers(1, &mut ebo);
gl::GenBuffers(1, &mut vbo_instance);
gl::BindVertexArray(vao);
// ---------------------
// Set up element buffer
// ---------------------
let indices: [u32; 6] = [0, 1, 3, 1, 2, 3];
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, ebo);
gl::BufferData(
gl::ELEMENT_ARRAY_BUFFER,
(6 * size_of::<u32>()) as isize,
indices.as_ptr() as *const _,
gl::STATIC_DRAW,
);
// ----------------------------
// Setup vertex instance buffer
// ----------------------------
gl::BindBuffer(gl::ARRAY_BUFFER, vbo_instance);
gl::BufferData(
gl::ARRAY_BUFFER,
(BATCH_MAX * size_of::<InstanceData>()) as isize,
ptr::null(),
gl::STREAM_DRAW,
);
let mut index = 0;
let mut size = 0;
macro_rules! add_attr {
($count:expr, $gl_type:expr, $type:ty) => {
gl::VertexAttribPointer(
index,
$count,
$gl_type,
gl::FALSE,
size_of::<InstanceData>() as i32,
size as *const _,
);
gl::EnableVertexAttribArray(index);
gl::VertexAttribDivisor(index, 1);
#[allow(unused_assignments)]
{
size += $count * size_of::<$type>();
index += 1;
}
};
}
// Coords.
add_attr!(2, gl::UNSIGNED_SHORT, u16);
// Glyph offset and size.
add_attr!(4, gl::SHORT, i16);
// UV offset.
add_attr!(4, gl::FLOAT, f32);
// Color and cell flags.
//
// These are packed together because of an OpenGL driver issue on macOS, which caused a
// `vec3(u8)` text color and a `u8` cell flags to increase the rendering time by a
// huge margin.
add_attr!(4, gl::UNSIGNED_BYTE, u8);
// Background color.
add_attr!(4, gl::UNSIGNED_BYTE, u8);
// Cleanup.
gl::BindVertexArray(0);
gl::BindBuffer(gl::ARRAY_BUFFER, 0);
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, 0);
}
Ok(Self {
program,
vao,
ebo,
vbo_instance,
atlas: vec![Atlas::new(ATLAS_SIZE, false)],
current_atlas: 0,
active_tex: 0,
batch: Batch::new(),
})
}
}
impl<'a> TextRenderer<'a> for Glsl3Renderer {
type RenderApi = RenderApi<'a>;
type RenderBatch = Batch;
type Shader = TextShaderProgram;
fn with_api<'b: 'a, F, T>(&'b mut self, size_info: &'b SizeInfo, func: F) -> T
where
F: FnOnce(Self::RenderApi) -> T,
{
unsafe {
gl::UseProgram(self.program.id());
self.program.set_term_uniforms(size_info);
gl::BindVertexArray(self.vao);
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, self.ebo);
gl::BindBuffer(gl::ARRAY_BUFFER, self.vbo_instance);
gl::ActiveTexture(gl::TEXTURE0);
}
let res = func(RenderApi {
active_tex: &mut self.active_tex,
batch: &mut self.batch,
atlas: &mut self.atlas,
current_atlas: &mut self.current_atlas,
program: &mut self.program,
});
unsafe {
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, 0);
gl::BindBuffer(gl::ARRAY_BUFFER, 0);
gl::BindVertexArray(0);
gl::UseProgram(0);
}
res
}
fn program(&self) -> &Self::Shader {
&self.program
}
fn loader_api(&mut self) -> LoaderApi<'_> {
LoaderApi {
active_tex: &mut self.active_tex,
atlas: &mut self.atlas,
current_atlas: &mut self.current_atlas,
}
}
}
impl Drop for Glsl3Renderer {
fn drop(&mut self) {
unsafe {
gl::DeleteBuffers(1, &self.vbo_instance);
gl::DeleteBuffers(1, &self.ebo);
gl::DeleteVertexArrays(1, &self.vao);
}
}
}
#[derive(Debug)]
pub struct | <'a> {
active_tex: &'a mut GLuint,
batch: &'a mut Batch,
atlas: &'a mut Vec<Atlas>,
current_atlas: &'a mut usize,
program: &'a mut TextShaderProgram,
}
impl<'a> TextRenderApi<Batch> for RenderApi<'a> {
fn batch(&mut self) -> &mut Batch {
self.batch
}
fn render_batch(&mut self) {
unsafe {
gl::BufferSubData(
gl::ARRAY_BUFFER,
0,
self.batch.size() as isize,
self.batch.instances.as_ptr() as *const _,
);
}
// Bind texture if necessary.
if *self.active_tex != self.batch.tex() {
unsafe {
gl::BindTexture(gl::TEXTURE_2D, self.batch.tex());
}
*self.active_tex = self.batch.tex();
}
unsafe {
self.program.set_rendering_pass(RenderingPass::Background);
gl::DrawElementsInstanced(
gl::TRIANGLES,
6,
gl::UNSIGNED_INT,
ptr::null(),
self.batch.len() as GLsizei,
);
self.program.set_rendering_pass(RenderingPass::SubpixelPass1);
gl::DrawElementsInstanced(
gl::TRIANGLES,
6,
gl::UNSIGNED_INT,
ptr::null(),
self.batch.len() as GLsizei,
);
}
self.batch.clear();
}
}
impl<'a> LoadGlyph for RenderApi<'a> {
fn load_glyph(&mut self, rasterized: &RasterizedGlyph) -> Glyph {
Atlas::load_glyph(self.active_tex, self.atlas, self.current_atlas, rasterized)
}
fn clear(&mut self) {
Atlas::clear_atlas(self.atlas, self.current_atlas)
}
}
impl<'a> Drop for RenderApi<'a> {
fn drop(&mut self) {
if !self.batch.is_empty() {
self.render_batch();
}
}
}
#[derive(Debug)]
#[repr(C)]
struct InstanceData {
// Coords.
col: u16,
row: u16,
// Glyph offset.
left: i16,
top: i16,
// Glyph size.
width: i16,
height: i16,
// UV offset.
uv_left: f32,
uv_bot: f32,
// uv scale.
uv_width: f32,
uv_height: f32,
// Color.
r: u8,
g: u8,
b: u8,
// Cell flags like multicolor or fullwidth character.
cell_flags: RenderingGlyphFlags,
// Background color.
bg_r: u8,
bg_g: u8,
bg_b: u8,
bg_a: u8,
}
#[derive(Debug, Default)]
pub struct Batch {
tex: GLuint,
instances: Vec<InstanceData>,
}
impl TextRenderBatch for Batch {
#[inline]
fn tex(&self) -> GLuint {
self.tex
}
#[inline]
fn full(&self) -> bool {
self.capacity() == self.len()
}
#[inline]
fn is_empty(&self) -> bool {
self.len() == 0
}
fn add_item(&mut self, cell: &RenderableCell, glyph: &Glyph, _: &SizeInfo) {
if self.is_empty() {
self.tex = glyph.tex_id;
}
let mut cell_flags = RenderingGlyphFlags::empty();
cell_flags.set(RenderingGlyphFlags::COLORED, glyph.multicolor);
cell_flags.set(RenderingGlyphFlags::WIDE_CHAR, cell.flags.contains(Flags::WIDE_CHAR));
self.instances.push(InstanceData {
col: cell.point.column.0 as u16,
row: cell.point.line as u16,
top: glyph.top,
left: glyph.left,
width: glyph.width,
height: glyph | RenderApi | identifier_name |
glsl3.rs | ::GenBuffers(1, &mut vbo_instance);
gl::BindVertexArray(vao);
// ---------------------
// Set up element buffer
// ---------------------
let indices: [u32; 6] = [0, 1, 3, 1, 2, 3];
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, ebo);
gl::BufferData(
gl::ELEMENT_ARRAY_BUFFER,
(6 * size_of::<u32>()) as isize,
indices.as_ptr() as *const _,
gl::STATIC_DRAW,
);
// ----------------------------
// Setup vertex instance buffer
// ----------------------------
gl::BindBuffer(gl::ARRAY_BUFFER, vbo_instance);
gl::BufferData(
gl::ARRAY_BUFFER,
(BATCH_MAX * size_of::<InstanceData>()) as isize,
ptr::null(),
gl::STREAM_DRAW,
);
let mut index = 0;
let mut size = 0;
macro_rules! add_attr {
($count:expr, $gl_type:expr, $type:ty) => {
gl::VertexAttribPointer(
index,
$count,
$gl_type,
gl::FALSE,
size_of::<InstanceData>() as i32,
size as *const _,
);
gl::EnableVertexAttribArray(index);
gl::VertexAttribDivisor(index, 1);
#[allow(unused_assignments)]
{
size += $count * size_of::<$type>();
index += 1;
}
};
}
// Coords.
add_attr!(2, gl::UNSIGNED_SHORT, u16);
// Glyph offset and size.
add_attr!(4, gl::SHORT, i16);
// UV offset.
add_attr!(4, gl::FLOAT, f32);
// Color and cell flags.
//
// These are packed together because of an OpenGL driver issue on macOS, which caused a
// `vec3(u8)` text color and a `u8` cell flags to increase the rendering time by a
// huge margin.
add_attr!(4, gl::UNSIGNED_BYTE, u8);
// Background color.
add_attr!(4, gl::UNSIGNED_BYTE, u8);
// Cleanup.
gl::BindVertexArray(0);
gl::BindBuffer(gl::ARRAY_BUFFER, 0);
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, 0);
}
Ok(Self {
program,
vao,
ebo,
vbo_instance,
atlas: vec![Atlas::new(ATLAS_SIZE, false)],
current_atlas: 0,
active_tex: 0,
batch: Batch::new(),
})
}
}
impl<'a> TextRenderer<'a> for Glsl3Renderer {
type RenderApi = RenderApi<'a>;
type RenderBatch = Batch;
type Shader = TextShaderProgram;
fn with_api<'b: 'a, F, T>(&'b mut self, size_info: &'b SizeInfo, func: F) -> T
where
F: FnOnce(Self::RenderApi) -> T,
{
unsafe {
gl::UseProgram(self.program.id());
self.program.set_term_uniforms(size_info);
gl::BindVertexArray(self.vao);
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, self.ebo);
gl::BindBuffer(gl::ARRAY_BUFFER, self.vbo_instance);
gl::ActiveTexture(gl::TEXTURE0);
}
let res = func(RenderApi {
active_tex: &mut self.active_tex,
batch: &mut self.batch,
atlas: &mut self.atlas,
current_atlas: &mut self.current_atlas,
program: &mut self.program,
});
unsafe {
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, 0);
gl::BindBuffer(gl::ARRAY_BUFFER, 0);
gl::BindVertexArray(0);
gl::UseProgram(0);
}
res
}
fn program(&self) -> &Self::Shader {
&self.program
}
fn loader_api(&mut self) -> LoaderApi<'_> {
LoaderApi {
active_tex: &mut self.active_tex,
atlas: &mut self.atlas,
current_atlas: &mut self.current_atlas,
}
}
}
impl Drop for Glsl3Renderer {
fn drop(&mut self) {
unsafe {
gl::DeleteBuffers(1, &self.vbo_instance);
gl::DeleteBuffers(1, &self.ebo);
gl::DeleteVertexArrays(1, &self.vao);
}
}
}
#[derive(Debug)]
pub struct RenderApi<'a> {
active_tex: &'a mut GLuint,
batch: &'a mut Batch,
atlas: &'a mut Vec<Atlas>,
current_atlas: &'a mut usize,
program: &'a mut TextShaderProgram,
}
impl<'a> TextRenderApi<Batch> for RenderApi<'a> {
fn batch(&mut self) -> &mut Batch {
self.batch
}
fn render_batch(&mut self) {
unsafe {
gl::BufferSubData(
gl::ARRAY_BUFFER,
0,
self.batch.size() as isize,
self.batch.instances.as_ptr() as *const _,
);
}
// Bind texture if necessary.
if *self.active_tex != self.batch.tex() {
unsafe {
gl::BindTexture(gl::TEXTURE_2D, self.batch.tex());
}
*self.active_tex = self.batch.tex();
}
unsafe {
self.program.set_rendering_pass(RenderingPass::Background);
gl::DrawElementsInstanced(
gl::TRIANGLES,
6,
gl::UNSIGNED_INT,
ptr::null(),
self.batch.len() as GLsizei,
);
self.program.set_rendering_pass(RenderingPass::SubpixelPass1);
gl::DrawElementsInstanced(
gl::TRIANGLES,
6,
gl::UNSIGNED_INT,
ptr::null(),
self.batch.len() as GLsizei,
);
}
self.batch.clear();
}
}
impl<'a> LoadGlyph for RenderApi<'a> {
fn load_glyph(&mut self, rasterized: &RasterizedGlyph) -> Glyph {
Atlas::load_glyph(self.active_tex, self.atlas, self.current_atlas, rasterized)
}
fn clear(&mut self) {
Atlas::clear_atlas(self.atlas, self.current_atlas)
}
}
impl<'a> Drop for RenderApi<'a> {
fn drop(&mut self) {
if !self.batch.is_empty() {
self.render_batch();
}
}
}
#[derive(Debug)]
#[repr(C)]
struct InstanceData {
// Coords.
col: u16,
row: u16,
// Glyph offset.
left: i16,
top: i16,
// Glyph size.
width: i16,
height: i16,
// UV offset.
uv_left: f32,
uv_bot: f32,
// uv scale.
uv_width: f32,
uv_height: f32,
// Color.
r: u8,
g: u8,
b: u8,
// Cell flags like multicolor or fullwidth character.
cell_flags: RenderingGlyphFlags,
// Background color.
bg_r: u8,
bg_g: u8,
bg_b: u8,
bg_a: u8,
}
#[derive(Debug, Default)]
pub struct Batch {
tex: GLuint,
instances: Vec<InstanceData>,
}
impl TextRenderBatch for Batch {
#[inline]
fn tex(&self) -> GLuint {
self.tex
}
#[inline]
fn full(&self) -> bool {
self.capacity() == self.len()
}
#[inline]
fn is_empty(&self) -> bool {
self.len() == 0
}
fn add_item(&mut self, cell: &RenderableCell, glyph: &Glyph, _: &SizeInfo) {
if self.is_empty() {
self.tex = glyph.tex_id;
}
let mut cell_flags = RenderingGlyphFlags::empty();
cell_flags.set(RenderingGlyphFlags::COLORED, glyph.multicolor);
cell_flags.set(RenderingGlyphFlags::WIDE_CHAR, cell.flags.contains(Flags::WIDE_CHAR));
self.instances.push(InstanceData {
col: cell.point.column.0 as u16,
row: cell.point.line as u16,
top: glyph.top,
left: glyph.left,
width: glyph.width,
height: glyph.height,
uv_bot: glyph.uv_bot,
uv_left: glyph.uv_left,
uv_width: glyph.uv_width,
uv_height: glyph.uv_height,
r: cell.fg.r,
g: cell.fg.g,
b: cell.fg.b,
cell_flags,
bg_r: cell.bg.r,
bg_g: cell.bg.g,
bg_b: cell.bg.b,
bg_a: (cell.bg_alpha * 255.0) as u8,
});
}
}
impl Batch {
#[inline]
pub fn new() -> Self {
Self { tex: 0, instances: Vec::with_capacity(BATCH_MAX) }
}
#[inline]
pub fn len(&self) -> usize | {
self.instances.len()
} | identifier_body | |
service.rs | /// A future that orchestrates the entire aggregator service.
// TODO: maybe add a HashSet or HashMap of clients who already
// uploaded their weights to prevent a client from uploading weights
// multiple times. Or we could just remove that ID from the
// `allowed_ids` map.
// TODO: maybe add a HashSet for clients that are already
// downloading/uploading, to prevent DoS attacks.
pub struct Service<A>
where
A: Aggregator,
{
/// Clients that the coordinator selected for the current
/// round. They can use their unique token to download the global
/// weights and upload their own local results once they finished
/// training.
allowed_ids: HashMap<ClientId, Token>,
/// The latest global weights as computed by the aggregator.
// NOTE: We could store this directly in the task that handles the
// HTTP requests. I initially though that having it here would
// make it easier to bypass the HTTP layer, which is convenient
// for testing because we can simulate client with just
// AggregatorHandles. But maybe that's just another layer of
// complexity that is not worth it.
global_weights: Bytes,
/// The aggregator itself, which handles the weights or performs
/// the aggregations.
aggregator: A,
/// A client for the coordinator RPC service.
rpc_client: coordinator::rpc::Client,
requests: ServiceRequests<A>,
aggregation_future: Option<AggregationFuture<A>>,
model_number: usize,
}
/// This trait defines the methods that an aggregator should
/// implement.
pub trait Aggregator {
type Error: Error + Send + 'static + Sync;
type AggregateFut: Future<Output = Result<Bytes, Self::Error>> + Unpin;
type AddWeightsFut: Future<Output = Result<(), Self::Error>> + Unpin + Send + 'static;
/// Check the validity of the given weights and if they are valid,
/// add them to the set of weights to aggregate.
fn add_weights(&mut self, weights: Bytes) -> Self::AddWeightsFut;
/// Run the aggregator and return the result.
fn aggregate(&mut self) -> Self::AggregateFut;
}
impl<A> Service<A>
where
A: Aggregator,
{
pub fn new(
aggregator: A,
rpc_client: coordinator::rpc::Client,
requests: ServiceRequests<A>,
) -> Self {
Self {
aggregator,
requests,
rpc_client,
allowed_ids: HashMap::new(),
global_weights: Bytes::new(),
aggregation_future: None,
model_number: 0,
}
}
/// Handle the incoming requests.
fn poll_requests(&mut self, cx: &mut Context) -> Poll<()> {
trace!("polling requests");
loop {
match ready!(Pin::new(&mut self.requests).poll_next(cx)) {
Some(request) => self.handle_request(request),
None => {
trace!("no more request to handle");
return Poll::Ready(());
}
}
}
}
fn handle_download_request(&mut self, request: DownloadRequest) {
debug!("handling download request");
let DownloadRequest {
credentials,
response_tx,
} = request;
if self
.allowed_ids
.get(credentials.id())
.map(|expected_token| credentials.token() == expected_token)
.unwrap_or(false)
{
let _ = response_tx.send(Ok(self.global_weights.clone()));
} else {
warn!("rejecting download request");
let _ = response_tx.send(Err(DownloadError::Unauthorized));
}
}
fn handle_upload_request(&mut self, request: UploadRequest) {
debug!("handling upload request");
let UploadRequest { credentials, data } = request;
let accept_upload = self
.allowed_ids
.get(credentials.id())
.map(|expected_token| credentials.token() == expected_token)
.unwrap_or(false);
if !accept_upload {
warn!("rejecting upload request");
return;
}
let mut rpc_client = self.rpc_client.clone();
let fut = self.aggregator.add_weights(data);
tokio::spawn(
async move {
let result = fut.await;
debug!("sending end training request to the coordinator");
rpc_client
.end_training(rpc_context(), *credentials.id(), result.is_ok())
.await
.map_err(|e| {
warn!(
"failed to send end training request to the coordinator: {}",
e
);
})
}
.instrument(trace_span!("end_training_rpc_request")),
);
}
fn handle_request(&mut self, request: Request<A>) {
match request {
Request::Download(req) => self.handle_download_request(req),
Request::Upload(req) => self.handle_upload_request(req),
Request::Select(req) => self.handle_select_request(req),
Request::Aggregate(req) => self.handle_aggregate_request(req),
}
}
fn handle_aggregate_request(&mut self, request: AggregateRequest<A>) {
info!("handling aggregate request");
let AggregateRequest { response_tx } = request;
self.allowed_ids = HashMap::new();
self.aggregation_future = Some(AggregationFuture {
future: self.aggregator.aggregate(),
response_tx,
});
}
fn handle_select_request(&mut self, request: SelectRequest<A>) {
info!("handling select request");
let SelectRequest {
credentials,
response_tx,
} = request;
let (id, token) = credentials.into_parts();
self.allowed_ids.insert(id, token);
if response_tx.send(Ok(())).is_err() {
warn!("failed to send reponse: channel closed");
}
}
#[allow(clippy::cognitive_complexity)]
fn poll_aggregation(&mut self, cx: &mut Context) {
// Check if we're waiting for an aggregation, ie whether
// there's a future to poll.
let future = if let Some(future) = self.aggregation_future.take() {
future
} else {
trace!("no aggregation future running: skipping polling");
return;
};
trace!("polling aggregation future");
let AggregationFuture {
mut future,
response_tx,
} = future;
let result = match Pin::new(&mut future).poll(cx) {
Poll::Ready(Ok(weights)) => {
info!("aggregation succeeded, settings global weights");
self.global_weights = weights;
if let Ok(path) = env::var("NEVERMINED_OUTPUTS_PATH") {
let file_name = format!("{}/model_{}.npy", path, self.model_number);
let mut file = File::create(&file_name).unwrap();
info!("Writing model {}", file_name);
file.write_all(&self.global_weights).unwrap();
self.model_number += 1;
}
Ok(())
}
Poll::Ready(Err(e)) => {
error!(error = %e, "aggregation failed");
Err(e)
}
Poll::Pending => {
debug!("aggregation future still running");
self.aggregation_future = Some(AggregationFuture {
future,
response_tx,
});
return;
}
};
if response_tx.send(result).is_err() {
error!("failed to send aggregation response to RPC task: receiver dropped");
}
if self.model_number == 10 {
thread::sleep(Duration::from_millis(10 * 1000));
signal::kill(Pid::this(), signal::Signal::SIGINT).unwrap();
}
}
}
struct AggregationFuture<A>
where
A: Aggregator,
{
future: A::AggregateFut,
response_tx: oneshot::Sender<Result<(), A::Error>>,
}
impl<A> Future for Service<A>
where
A: Aggregator + Unpin,
{
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
trace!("polling Service");
let pin = self.get_mut();
if let Poll::Ready(_) = pin.poll_requests(cx) {
return Poll::Ready(());
}
pin.poll_aggregation(cx);
Poll::Pending
}
}
pub struct ServiceRequests<A>(Pin<Box<dyn Stream<Item = Request<A>> + Send>>)
where
A: Aggregator;
impl<A> Stream for ServiceRequests<A>
where
A: Aggregator,
{
type Item = Request<A>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
trace!("polling ServiceRequests");
self.0.as_mut().poll_next(cx)
}
}
impl<A> ServiceRequests<A>
where
A: Aggregator + 'static,
{
fn new(
upload: UnboundedReceiver<UploadRequest>,
download: UnboundedReceiver<DownloadRequest>,
aggregate: UnboundedReceiver<AggregateRequest<A>>,
select: UnboundedReceiver<SelectRequest<A>>,
) -> Self {
let stream = download
.map(Request::from)
.merge(upload.map(Request::from))
.merge(aggregate.map(Request::from))
.merge(select.map(Request::from));
Self(Box::pin(stream))
}
}
#[derive(From)]
pub struct UploadRequest {
credentials: Credentials,
data: Bytes,
}
#[derive(From)]
pub struct DownloadRequest {
credentials: Credentials,
response_tx: oneshot::Sender<Result<Bytes, DownloadError>>,
}
#[derive(From)]
| random_line_split | ||
service.rs | the aggregations.
aggregator: A,
/// A client for the coordinator RPC service.
rpc_client: coordinator::rpc::Client,
requests: ServiceRequests<A>,
aggregation_future: Option<AggregationFuture<A>>,
model_number: usize,
}
/// This trait defines the methods that an aggregator should
/// implement.
pub trait Aggregator {
type Error: Error + Send + 'static + Sync;
type AggregateFut: Future<Output = Result<Bytes, Self::Error>> + Unpin;
type AddWeightsFut: Future<Output = Result<(), Self::Error>> + Unpin + Send + 'static;
/// Check the validity of the given weights and if they are valid,
/// add them to the set of weights to aggregate.
fn add_weights(&mut self, weights: Bytes) -> Self::AddWeightsFut;
/// Run the aggregator and return the result.
fn aggregate(&mut self) -> Self::AggregateFut;
}
impl<A> Service<A>
where
A: Aggregator,
{
pub fn new(
aggregator: A,
rpc_client: coordinator::rpc::Client,
requests: ServiceRequests<A>,
) -> Self {
Self {
aggregator,
requests,
rpc_client,
allowed_ids: HashMap::new(),
global_weights: Bytes::new(),
aggregation_future: None,
model_number: 0,
}
}
/// Handle the incoming requests.
fn poll_requests(&mut self, cx: &mut Context) -> Poll<()> {
trace!("polling requests");
loop {
match ready!(Pin::new(&mut self.requests).poll_next(cx)) {
Some(request) => self.handle_request(request),
None => {
trace!("no more request to handle");
return Poll::Ready(());
}
}
}
}
fn handle_download_request(&mut self, request: DownloadRequest) {
debug!("handling download request");
let DownloadRequest {
credentials,
response_tx,
} = request;
if self
.allowed_ids
.get(credentials.id())
.map(|expected_token| credentials.token() == expected_token)
.unwrap_or(false)
{
let _ = response_tx.send(Ok(self.global_weights.clone()));
} else {
warn!("rejecting download request");
let _ = response_tx.send(Err(DownloadError::Unauthorized));
}
}
fn handle_upload_request(&mut self, request: UploadRequest) {
debug!("handling upload request");
let UploadRequest { credentials, data } = request;
let accept_upload = self
.allowed_ids
.get(credentials.id())
.map(|expected_token| credentials.token() == expected_token)
.unwrap_or(false);
if !accept_upload {
warn!("rejecting upload request");
return;
}
let mut rpc_client = self.rpc_client.clone();
let fut = self.aggregator.add_weights(data);
tokio::spawn(
async move {
let result = fut.await;
debug!("sending end training request to the coordinator");
rpc_client
.end_training(rpc_context(), *credentials.id(), result.is_ok())
.await
.map_err(|e| {
warn!(
"failed to send end training request to the coordinator: {}",
e
);
})
}
.instrument(trace_span!("end_training_rpc_request")),
);
}
fn handle_request(&mut self, request: Request<A>) {
match request {
Request::Download(req) => self.handle_download_request(req),
Request::Upload(req) => self.handle_upload_request(req),
Request::Select(req) => self.handle_select_request(req),
Request::Aggregate(req) => self.handle_aggregate_request(req),
}
}
fn handle_aggregate_request(&mut self, request: AggregateRequest<A>) {
info!("handling aggregate request");
let AggregateRequest { response_tx } = request;
self.allowed_ids = HashMap::new();
self.aggregation_future = Some(AggregationFuture {
future: self.aggregator.aggregate(),
response_tx,
});
}
fn handle_select_request(&mut self, request: SelectRequest<A>) {
info!("handling select request");
let SelectRequest {
credentials,
response_tx,
} = request;
let (id, token) = credentials.into_parts();
self.allowed_ids.insert(id, token);
if response_tx.send(Ok(())).is_err() {
warn!("failed to send reponse: channel closed");
}
}
#[allow(clippy::cognitive_complexity)]
fn poll_aggregation(&mut self, cx: &mut Context) {
// Check if we're waiting for an aggregation, ie whether
// there's a future to poll.
let future = if let Some(future) = self.aggregation_future.take() {
future
} else {
trace!("no aggregation future running: skipping polling");
return;
};
trace!("polling aggregation future");
let AggregationFuture {
mut future,
response_tx,
} = future;
let result = match Pin::new(&mut future).poll(cx) {
Poll::Ready(Ok(weights)) => {
info!("aggregation succeeded, settings global weights");
self.global_weights = weights;
if let Ok(path) = env::var("NEVERMINED_OUTPUTS_PATH") {
let file_name = format!("{}/model_{}.npy", path, self.model_number);
let mut file = File::create(&file_name).unwrap();
info!("Writing model {}", file_name);
file.write_all(&self.global_weights).unwrap();
self.model_number += 1;
}
Ok(())
}
Poll::Ready(Err(e)) => {
error!(error = %e, "aggregation failed");
Err(e)
}
Poll::Pending => {
debug!("aggregation future still running");
self.aggregation_future = Some(AggregationFuture {
future,
response_tx,
});
return;
}
};
if response_tx.send(result).is_err() {
error!("failed to send aggregation response to RPC task: receiver dropped");
}
if self.model_number == 10 {
thread::sleep(Duration::from_millis(10 * 1000));
signal::kill(Pid::this(), signal::Signal::SIGINT).unwrap();
}
}
}
struct AggregationFuture<A>
where
A: Aggregator,
{
future: A::AggregateFut,
response_tx: oneshot::Sender<Result<(), A::Error>>,
}
impl<A> Future for Service<A>
where
A: Aggregator + Unpin,
{
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
trace!("polling Service");
let pin = self.get_mut();
if let Poll::Ready(_) = pin.poll_requests(cx) {
return Poll::Ready(());
}
pin.poll_aggregation(cx);
Poll::Pending
}
}
pub struct ServiceRequests<A>(Pin<Box<dyn Stream<Item = Request<A>> + Send>>)
where
A: Aggregator;
impl<A> Stream for ServiceRequests<A>
where
A: Aggregator,
{
type Item = Request<A>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
trace!("polling ServiceRequests");
self.0.as_mut().poll_next(cx)
}
}
impl<A> ServiceRequests<A>
where
A: Aggregator + 'static,
{
fn new(
upload: UnboundedReceiver<UploadRequest>,
download: UnboundedReceiver<DownloadRequest>,
aggregate: UnboundedReceiver<AggregateRequest<A>>,
select: UnboundedReceiver<SelectRequest<A>>,
) -> Self {
let stream = download
.map(Request::from)
.merge(upload.map(Request::from))
.merge(aggregate.map(Request::from))
.merge(select.map(Request::from));
Self(Box::pin(stream))
}
}
#[derive(From)]
pub struct UploadRequest {
credentials: Credentials,
data: Bytes,
}
#[derive(From)]
pub struct DownloadRequest {
credentials: Credentials,
response_tx: oneshot::Sender<Result<Bytes, DownloadError>>,
}
#[derive(From)]
pub struct AggregateRequest<A>
where
A: Aggregator,
{
response_tx: oneshot::Sender<Result<(), A::Error>>,
}
#[derive(From)]
pub struct SelectRequest<A>
where
A: Aggregator,
{
credentials: Credentials,
response_tx: oneshot::Sender<Result<(), A::Error>>,
}
#[derive(From)]
pub enum Request<A>
where
A: Aggregator,
{
Upload(UploadRequest),
Download(DownloadRequest),
Aggregate(AggregateRequest<A>),
Select(SelectRequest<A>),
}
pub struct ServiceHandle<A>
where
A: Aggregator,
{
upload: UnboundedSender<UploadRequest>,
download: UnboundedSender<DownloadRequest>,
aggregate: UnboundedSender<AggregateRequest<A>>,
select: UnboundedSender<SelectRequest<A>>,
}
// We implement Clone manually because it can only be derived if A:
// Clone, which we don't want.
impl<A> Clone for ServiceHandle<A>
where
A: Aggregator,
{
fn clone(&self) -> Self | {
Self {
upload: self.upload.clone(),
download: self.download.clone(),
aggregate: self.aggregate.clone(),
select: self.select.clone(),
}
} | identifier_body | |
service.rs | that having it here would
// make it easier to bypass the HTTP layer, which is convenient
// for testing because we can simulate client with just
// AggregatorHandles. But maybe that's just another layer of
// complexity that is not worth it.
global_weights: Bytes,
/// The aggregator itself, which handles the weights or performs
/// the aggregations.
aggregator: A,
/// A client for the coordinator RPC service.
rpc_client: coordinator::rpc::Client,
requests: ServiceRequests<A>,
aggregation_future: Option<AggregationFuture<A>>,
model_number: usize,
}
/// This trait defines the methods that an aggregator should
/// implement.
pub trait Aggregator {
type Error: Error + Send + 'static + Sync;
type AggregateFut: Future<Output = Result<Bytes, Self::Error>> + Unpin;
type AddWeightsFut: Future<Output = Result<(), Self::Error>> + Unpin + Send + 'static;
/// Check the validity of the given weights and if they are valid,
/// add them to the set of weights to aggregate.
fn add_weights(&mut self, weights: Bytes) -> Self::AddWeightsFut;
/// Run the aggregator and return the result.
fn aggregate(&mut self) -> Self::AggregateFut;
}
impl<A> Service<A>
where
A: Aggregator,
{
pub fn new(
aggregator: A,
rpc_client: coordinator::rpc::Client,
requests: ServiceRequests<A>,
) -> Self {
Self {
aggregator,
requests,
rpc_client,
allowed_ids: HashMap::new(),
global_weights: Bytes::new(),
aggregation_future: None,
model_number: 0,
}
}
/// Handle the incoming requests.
fn poll_requests(&mut self, cx: &mut Context) -> Poll<()> {
trace!("polling requests");
loop {
match ready!(Pin::new(&mut self.requests).poll_next(cx)) {
Some(request) => self.handle_request(request),
None => {
trace!("no more request to handle");
return Poll::Ready(());
}
}
}
}
fn handle_download_request(&mut self, request: DownloadRequest) {
debug!("handling download request");
let DownloadRequest {
credentials,
response_tx,
} = request;
if self
.allowed_ids
.get(credentials.id())
.map(|expected_token| credentials.token() == expected_token)
.unwrap_or(false)
{
let _ = response_tx.send(Ok(self.global_weights.clone()));
} else {
warn!("rejecting download request");
let _ = response_tx.send(Err(DownloadError::Unauthorized));
}
}
fn handle_upload_request(&mut self, request: UploadRequest) {
debug!("handling upload request");
let UploadRequest { credentials, data } = request;
let accept_upload = self
.allowed_ids
.get(credentials.id())
.map(|expected_token| credentials.token() == expected_token)
.unwrap_or(false);
if !accept_upload {
warn!("rejecting upload request");
return;
}
let mut rpc_client = self.rpc_client.clone();
let fut = self.aggregator.add_weights(data);
tokio::spawn(
async move {
let result = fut.await;
debug!("sending end training request to the coordinator");
rpc_client
.end_training(rpc_context(), *credentials.id(), result.is_ok())
.await
.map_err(|e| {
warn!(
"failed to send end training request to the coordinator: {}",
e
);
})
}
.instrument(trace_span!("end_training_rpc_request")),
);
}
fn handle_request(&mut self, request: Request<A>) {
match request {
Request::Download(req) => self.handle_download_request(req),
Request::Upload(req) => self.handle_upload_request(req),
Request::Select(req) => self.handle_select_request(req),
Request::Aggregate(req) => self.handle_aggregate_request(req),
}
}
fn handle_aggregate_request(&mut self, request: AggregateRequest<A>) {
info!("handling aggregate request");
let AggregateRequest { response_tx } = request;
self.allowed_ids = HashMap::new();
self.aggregation_future = Some(AggregationFuture {
future: self.aggregator.aggregate(),
response_tx,
});
}
fn | (&mut self, request: SelectRequest<A>) {
info!("handling select request");
let SelectRequest {
credentials,
response_tx,
} = request;
let (id, token) = credentials.into_parts();
self.allowed_ids.insert(id, token);
if response_tx.send(Ok(())).is_err() {
warn!("failed to send reponse: channel closed");
}
}
#[allow(clippy::cognitive_complexity)]
fn poll_aggregation(&mut self, cx: &mut Context) {
// Check if we're waiting for an aggregation, ie whether
// there's a future to poll.
let future = if let Some(future) = self.aggregation_future.take() {
future
} else {
trace!("no aggregation future running: skipping polling");
return;
};
trace!("polling aggregation future");
let AggregationFuture {
mut future,
response_tx,
} = future;
let result = match Pin::new(&mut future).poll(cx) {
Poll::Ready(Ok(weights)) => {
info!("aggregation succeeded, settings global weights");
self.global_weights = weights;
if let Ok(path) = env::var("NEVERMINED_OUTPUTS_PATH") {
let file_name = format!("{}/model_{}.npy", path, self.model_number);
let mut file = File::create(&file_name).unwrap();
info!("Writing model {}", file_name);
file.write_all(&self.global_weights).unwrap();
self.model_number += 1;
}
Ok(())
}
Poll::Ready(Err(e)) => {
error!(error = %e, "aggregation failed");
Err(e)
}
Poll::Pending => {
debug!("aggregation future still running");
self.aggregation_future = Some(AggregationFuture {
future,
response_tx,
});
return;
}
};
if response_tx.send(result).is_err() {
error!("failed to send aggregation response to RPC task: receiver dropped");
}
if self.model_number == 10 {
thread::sleep(Duration::from_millis(10 * 1000));
signal::kill(Pid::this(), signal::Signal::SIGINT).unwrap();
}
}
}
struct AggregationFuture<A>
where
A: Aggregator,
{
future: A::AggregateFut,
response_tx: oneshot::Sender<Result<(), A::Error>>,
}
impl<A> Future for Service<A>
where
A: Aggregator + Unpin,
{
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
trace!("polling Service");
let pin = self.get_mut();
if let Poll::Ready(_) = pin.poll_requests(cx) {
return Poll::Ready(());
}
pin.poll_aggregation(cx);
Poll::Pending
}
}
pub struct ServiceRequests<A>(Pin<Box<dyn Stream<Item = Request<A>> + Send>>)
where
A: Aggregator;
impl<A> Stream for ServiceRequests<A>
where
A: Aggregator,
{
type Item = Request<A>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
trace!("polling ServiceRequests");
self.0.as_mut().poll_next(cx)
}
}
impl<A> ServiceRequests<A>
where
A: Aggregator + 'static,
{
fn new(
upload: UnboundedReceiver<UploadRequest>,
download: UnboundedReceiver<DownloadRequest>,
aggregate: UnboundedReceiver<AggregateRequest<A>>,
select: UnboundedReceiver<SelectRequest<A>>,
) -> Self {
let stream = download
.map(Request::from)
.merge(upload.map(Request::from))
.merge(aggregate.map(Request::from))
.merge(select.map(Request::from));
Self(Box::pin(stream))
}
}
#[derive(From)]
pub struct UploadRequest {
credentials: Credentials,
data: Bytes,
}
#[derive(From)]
pub struct DownloadRequest {
credentials: Credentials,
response_tx: oneshot::Sender<Result<Bytes, DownloadError>>,
}
#[derive(From)]
pub struct AggregateRequest<A>
where
A: Aggregator,
{
response_tx: oneshot::Sender<Result<(), A::Error>>,
}
#[derive(From)]
pub struct SelectRequest<A>
where
A: Aggregator,
{
credentials: Credentials,
response_tx: oneshot::Sender<Result<(), A::Error>>,
}
#[derive(From)]
pub enum Request<A>
where
A: Aggregator,
{
Upload(UploadRequest),
Download(DownloadRequest),
Aggregate(AggregateRequest<A>),
Select(SelectRequest<A>),
}
pub struct ServiceHandle<A>
where
A: Aggregator,
{
upload: UnboundedSender<UploadRequest>,
download: UnboundedSender<DownloadRequest>,
aggregate: UnboundedSender<AggregateRequest<A>>,
select: UnboundedSender<SelectRequest<A>>,
}
// We implement Clone manually because it | handle_select_request | identifier_name |
service.rs | that having it here would
// make it easier to bypass the HTTP layer, which is convenient
// for testing because we can simulate client with just
// AggregatorHandles. But maybe that's just another layer of
// complexity that is not worth it.
global_weights: Bytes,
/// The aggregator itself, which handles the weights or performs
/// the aggregations.
aggregator: A,
/// A client for the coordinator RPC service.
rpc_client: coordinator::rpc::Client,
requests: ServiceRequests<A>,
aggregation_future: Option<AggregationFuture<A>>,
model_number: usize,
}
/// This trait defines the methods that an aggregator should
/// implement.
pub trait Aggregator {
type Error: Error + Send + 'static + Sync;
type AggregateFut: Future<Output = Result<Bytes, Self::Error>> + Unpin;
type AddWeightsFut: Future<Output = Result<(), Self::Error>> + Unpin + Send + 'static;
/// Check the validity of the given weights and if they are valid,
/// add them to the set of weights to aggregate.
fn add_weights(&mut self, weights: Bytes) -> Self::AddWeightsFut;
/// Run the aggregator and return the result.
fn aggregate(&mut self) -> Self::AggregateFut;
}
impl<A> Service<A>
where
A: Aggregator,
{
pub fn new(
aggregator: A,
rpc_client: coordinator::rpc::Client,
requests: ServiceRequests<A>,
) -> Self {
Self {
aggregator,
requests,
rpc_client,
allowed_ids: HashMap::new(),
global_weights: Bytes::new(),
aggregation_future: None,
model_number: 0,
}
}
/// Handle the incoming requests.
fn poll_requests(&mut self, cx: &mut Context) -> Poll<()> {
trace!("polling requests");
loop {
match ready!(Pin::new(&mut self.requests).poll_next(cx)) {
Some(request) => self.handle_request(request),
None => {
trace!("no more request to handle");
return Poll::Ready(());
}
}
}
}
fn handle_download_request(&mut self, request: DownloadRequest) {
debug!("handling download request");
let DownloadRequest {
credentials,
response_tx,
} = request;
if self
.allowed_ids
.get(credentials.id())
.map(|expected_token| credentials.token() == expected_token)
.unwrap_or(false)
{
let _ = response_tx.send(Ok(self.global_weights.clone()));
} else {
warn!("rejecting download request");
let _ = response_tx.send(Err(DownloadError::Unauthorized));
}
}
fn handle_upload_request(&mut self, request: UploadRequest) {
debug!("handling upload request");
let UploadRequest { credentials, data } = request;
let accept_upload = self
.allowed_ids
.get(credentials.id())
.map(|expected_token| credentials.token() == expected_token)
.unwrap_or(false);
if !accept_upload {
warn!("rejecting upload request");
return;
}
let mut rpc_client = self.rpc_client.clone();
let fut = self.aggregator.add_weights(data);
tokio::spawn(
async move {
let result = fut.await;
debug!("sending end training request to the coordinator");
rpc_client
.end_training(rpc_context(), *credentials.id(), result.is_ok())
.await
.map_err(|e| {
warn!(
"failed to send end training request to the coordinator: {}",
e
);
})
}
.instrument(trace_span!("end_training_rpc_request")),
);
}
fn handle_request(&mut self, request: Request<A>) {
match request {
Request::Download(req) => self.handle_download_request(req),
Request::Upload(req) => self.handle_upload_request(req),
Request::Select(req) => self.handle_select_request(req),
Request::Aggregate(req) => self.handle_aggregate_request(req),
}
}
fn handle_aggregate_request(&mut self, request: AggregateRequest<A>) {
info!("handling aggregate request");
let AggregateRequest { response_tx } = request;
self.allowed_ids = HashMap::new();
self.aggregation_future = Some(AggregationFuture {
future: self.aggregator.aggregate(),
response_tx,
});
}
fn handle_select_request(&mut self, request: SelectRequest<A>) {
info!("handling select request");
let SelectRequest {
credentials,
response_tx,
} = request;
let (id, token) = credentials.into_parts();
self.allowed_ids.insert(id, token);
if response_tx.send(Ok(())).is_err() {
warn!("failed to send reponse: channel closed");
}
}
#[allow(clippy::cognitive_complexity)]
fn poll_aggregation(&mut self, cx: &mut Context) {
// Check if we're waiting for an aggregation, ie whether
// there's a future to poll.
let future = if let Some(future) = self.aggregation_future.take() {
future
} else {
trace!("no aggregation future running: skipping polling");
return;
};
trace!("polling aggregation future");
let AggregationFuture {
mut future,
response_tx,
} = future;
let result = match Pin::new(&mut future).poll(cx) {
Poll::Ready(Ok(weights)) => {
info!("aggregation succeeded, settings global weights");
self.global_weights = weights;
if let Ok(path) = env::var("NEVERMINED_OUTPUTS_PATH") {
let file_name = format!("{}/model_{}.npy", path, self.model_number);
let mut file = File::create(&file_name).unwrap();
info!("Writing model {}", file_name);
file.write_all(&self.global_weights).unwrap();
self.model_number += 1;
}
Ok(())
}
Poll::Ready(Err(e)) => {
error!(error = %e, "aggregation failed");
Err(e)
}
Poll::Pending => {
debug!("aggregation future still running");
self.aggregation_future = Some(AggregationFuture {
future,
response_tx,
});
return;
}
};
if response_tx.send(result).is_err() {
error!("failed to send aggregation response to RPC task: receiver dropped");
}
if self.model_number == 10 {
thread::sleep(Duration::from_millis(10 * 1000));
signal::kill(Pid::this(), signal::Signal::SIGINT).unwrap();
}
}
}
struct AggregationFuture<A>
where
A: Aggregator,
{
future: A::AggregateFut,
response_tx: oneshot::Sender<Result<(), A::Error>>,
}
impl<A> Future for Service<A>
where
A: Aggregator + Unpin,
{
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
trace!("polling Service");
let pin = self.get_mut();
if let Poll::Ready(_) = pin.poll_requests(cx) |
pin.poll_aggregation(cx);
Poll::Pending
}
}
pub struct ServiceRequests<A>(Pin<Box<dyn Stream<Item = Request<A>> + Send>>)
where
A: Aggregator;
impl<A> Stream for ServiceRequests<A>
where
A: Aggregator,
{
type Item = Request<A>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
trace!("polling ServiceRequests");
self.0.as_mut().poll_next(cx)
}
}
impl<A> ServiceRequests<A>
where
A: Aggregator + 'static,
{
fn new(
upload: UnboundedReceiver<UploadRequest>,
download: UnboundedReceiver<DownloadRequest>,
aggregate: UnboundedReceiver<AggregateRequest<A>>,
select: UnboundedReceiver<SelectRequest<A>>,
) -> Self {
let stream = download
.map(Request::from)
.merge(upload.map(Request::from))
.merge(aggregate.map(Request::from))
.merge(select.map(Request::from));
Self(Box::pin(stream))
}
}
#[derive(From)]
pub struct UploadRequest {
credentials: Credentials,
data: Bytes,
}
#[derive(From)]
pub struct DownloadRequest {
credentials: Credentials,
response_tx: oneshot::Sender<Result<Bytes, DownloadError>>,
}
#[derive(From)]
pub struct AggregateRequest<A>
where
A: Aggregator,
{
response_tx: oneshot::Sender<Result<(), A::Error>>,
}
#[derive(From)]
pub struct SelectRequest<A>
where
A: Aggregator,
{
credentials: Credentials,
response_tx: oneshot::Sender<Result<(), A::Error>>,
}
#[derive(From)]
pub enum Request<A>
where
A: Aggregator,
{
Upload(UploadRequest),
Download(DownloadRequest),
Aggregate(AggregateRequest<A>),
Select(SelectRequest<A>),
}
pub struct ServiceHandle<A>
where
A: Aggregator,
{
upload: UnboundedSender<UploadRequest>,
download: UnboundedSender<DownloadRequest>,
aggregate: UnboundedSender<AggregateRequest<A>>,
select: UnboundedSender<SelectRequest<A>>,
}
// We implement Clone manually because | {
return Poll::Ready(());
} | conditional_block |
utils.py | )
for args in
zip(names_list, thickness_list))
res = np.array(res)
Rs, Ts, As = res[:, 0, :], res[:, 1, :], res[:, 2, :]
return Rs, Ts, As
def merge_layers(categories, thicknesses):
'''
Merges consecutive layers with the same material types.
'''
thicknesses = thicknesses[1:-1]
c_output = [categories[0]]
t_output = [thicknesses[0]]
for i, (c, d) in enumerate(zip(categories[1:], thicknesses[1:])):
if c == c_output[-1]:
t_output[-1] += d
continue
else:
c_output.append(c)
t_output.append(d)
t_output.insert(0, np.inf)
t_output.insert(len(t_output), np.inf)
return c_output, t_output
def get_structure(categories, values, materials, ds, continuous=False,
max_value=400):
'''
Given categories and values, return the strucure in the form
(name (str), thickness (nm))
'''
def threshold(value):
'''
'''
names = [materials[item] for item in categories]
if not continuous:
thickness = [np.inf] + [ds[item] for item in values] + [np.inf]
else:
thickness = []
for category, value in zip(categories, values):
name = materials[category]
if name == 'Ag':
thickness.append(
min(max(15, int(value * max_value//2)), max_value))
elif name in METALS:
thickness.append(
min(max(5, int(value * max_value//2)), max_value))
elif name in INSULATORS:
thickness.append(
min(max(1, int(value * max_value//2)), max_value))
else:
raise ValueError('Material not known')
# thickness = [np.inf] + [min(max(5, int(item * 2e2)), 200) for i,
# item in enumerate(values)] + [np.inf]
thickness = [np.inf] + thickness + [np.inf]
return names, thickness
class DesignTracker():
def __init__(self, epochs, **kwargs):
"""
This class tracks the best designs discovered.
"""
if epochs == -1:
self.layer_ls = []
self.thick_ls = []
self.max_ret_ls = []
self.layer_ls = [0] * epochs
self.thick_ls = [0] * epochs
self.max_ret_ls = [0] * epochs
self.kwargs = kwargs
self.current_e = 0
def store(self, layers, thicknesses, ret, e, append_mode=False):
if append_mode:
self.layer_ls.append(layers)
self.thick_ls.append(thicknesses)
self.max_ret_ls.append(ret)
else:
if ret >= self.max_ret_ls[e]:
self.layer_ls[e] = layers
self.thick_ls[e] = thicknesses
self.max_ret_ls[e] = ret
def save_state(self):
# save buffer from all processes
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
filename = os.path.join(self.kwargs['output_dir'], 'design_tracker_{}.pkl'.format(rank))
pkl.dump(self, open(filename, 'wb'))
def print_progress(self):
progress = list(zip(self.layer_ls, self.thick_ls, self.max_ret_ls))
read_progress = []
for i in range(len(progress)):
if progress[i] == (0,0,0):
break
read_progress.append(['|'.join([l + ' ' + str(d) + ' nm' for l, d in zip(progress[i][0], progress[i][1])]) + ', Merit {:.3f}'.format(progress[i][2])])
return read_progress
def print_progress(progress):
|
class TMM_sim():
def __init__(self, mats=['Ge'], wavelength=np.arange(0.38, 0.805, 0.01), substrate='Cr', substrate_thick=500):
'''
This class returns the spectrum given the designed structures.
'''
self.mats = mats
# include substrate
self.all_mats = mats + [substrate] if substrate not in ['Glass', 'Air'] else mats
self.wavelength = wavelength
self.nk_dict = self.load_materials()
self.substrate = substrate
self.substrate_thick = substrate_thick
def load_materials(self):
'''
Load material nk and return corresponding interpolators.
Return:
nk_dict: dict, key -- material name, value: n, k in the
self.wavelength range
'''
nk_dict = {}
for mat in self.all_mats:
nk = pd.read_csv(os.path.join(DATABASE, mat + '.csv'))
nk.dropna(inplace=True)
wl = nk['wl'].to_numpy()
index = (nk['n'] + nk['k'] * 1.j).to_numpy()
mat_nk_data = np.hstack((wl[:, np.newaxis], index[:, np.newaxis]))
mat_nk_fn = interp1d(
mat_nk_data[:, 0].real, mat_nk_data[:, 1], kind='quadratic')
nk_dict[mat] = mat_nk_fn(self.wavelength)
return nk_dict
def spectrum(self, materials, thickness, theta=0, plot=False, title=False):
'''
Input:
materials: list
thickness: list
theta: degree, the incidence angle
Return:
s: array, spectrum
'''
degree = pi/180
if self.substrate != 'Air':
thickness.insert(-1, self.substrate_thick) # substrate thickness
R, T, A = [], [], []
for i, lambda_vac in enumerate(self.wavelength * 1e3):
# we assume the last layer is glass
if self.substrate == 'Glass':
n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [1.45, 1]
elif self.substrate == 'Air':
n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [1]
else:
n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [self.nk_dict[self.substrate][i], 1]
# n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [self.nk_dict['Cr'][i]]
# mport pdb; pdb.set_trace()
res = coh_tmm('s', n_list, thickness, theta * degree, lambda_vac)
R.append(res['R'])
T.append(res['T'])
R, T = np.array(R), np.array(T)
A = 1 - R - T
if plot:
self.plot_spectrum(R, T, A)
if title:
thick = thickness[1:-1]
title = ' | '.join(['{}nm {}'.format(d, m)
for d, m in zip(thick, materials)])
if self.substrate is not 'Air':
title = 'Air | ' + title + ' | {}nm {} '.format(self.substrate_thick, self.substrate) + '| Air'
else:
title = 'Air | ' + title + ' | Air'
plt.title(title, **{'size': '10'})
return R, T, A
def plot_spectrum(self, R, T, A):
plt.plot(self.wavelength * 1000, R, self.wavelength *
1000, T, self.wavelength * 1000, A, linewidth=3)
plt.ylabel('R/T/A')
plt.xlabel('Wavelength (nm)')
plt.legend(['R: Average = {:.2f}%'.
format(np.mean(R)*100),
'T: Average = {:.2f}%'.
format(np.mean(T)*100),
'A: Average = {:.2f}%'.
format(np.mean(A)*100)])
plt.grid('on', linestyle='--')
plt.ylim([0, 1])
# Plotting utils
def visualize_progress(file, x, ax=None, color='b', alpha=1):
df = pd.read_csv(file, sep="\t")
width = 0.5
# x = 'Time'
if ax is None:
fig, ax = plt.subplots(2,1)
sns.lineplot(x=x, y='MaxEpRet', data=df, ax=ax[0], color=color, alpha=alpha)
# ax[0].legend(['Max {}'.format(np.max(df['MaxEpRet']))])
sns.lineplot(x=x, y='AverageEpRet', data=df,
ax=ax[1], color=color, alpha=alpha)
plt.fill_between(df[x],
| for i in range(len(progress)):
print(progress[i], 0)
progress[i] = ['|'.join([l + ' ' + str(d) + ' nm' for l, d in zip(progress[i][0], progress[i][1])]), progress[i][2]]
return progress | identifier_body |
utils.py |
res = Parallel(n_jobs=num_workers)(delayed(spectrum)(args)
for args in
zip(names_list, thickness_list))
res = np.array(res)
Rs, Ts, As = res[:, 0, :], res[:, 1, :], res[:, 2, :]
return Rs, Ts, As
def merge_layers(categories, thicknesses):
'''
Merges consecutive layers with the same material types.
'''
thicknesses = thicknesses[1:-1]
c_output = [categories[0]]
t_output = [thicknesses[0]]
for i, (c, d) in enumerate(zip(categories[1:], thicknesses[1:])):
if c == c_output[-1]:
t_output[-1] += d
continue
else:
c_output.append(c)
t_output.append(d)
t_output.insert(0, np.inf)
t_output.insert(len(t_output), np.inf)
return c_output, t_output
def get_structure(categories, values, materials, ds, continuous=False,
max_value=400):
'''
Given categories and values, return the strucure in the form
(name (str), thickness (nm))
'''
def threshold(value):
'''
'''
names = [materials[item] for item in categories]
if not continuous:
thickness = [np.inf] + [ds[item] for item in values] + [np.inf]
else:
thickness = []
for category, value in zip(categories, values):
name = materials[category]
if name == 'Ag':
thickness.append(
min(max(15, int(value * max_value//2)), max_value))
elif name in METALS:
thickness.append(
min(max(5, int(value * max_value//2)), max_value))
elif name in INSULATORS:
thickness.append(
min(max(1, int(value * max_value//2)), max_value))
else:
raise ValueError('Material not known')
# thickness = [np.inf] + [min(max(5, int(item * 2e2)), 200) for i,
# item in enumerate(values)] + [np.inf]
thickness = [np.inf] + thickness + [np.inf]
return names, thickness
class DesignTracker():
def __init__(self, epochs, **kwargs):
"""
This class tracks the best designs discovered.
"""
if epochs == -1:
self.layer_ls = []
self.thick_ls = []
self.max_ret_ls = []
self.layer_ls = [0] * epochs
self.thick_ls = [0] * epochs
self.max_ret_ls = [0] * epochs
self.kwargs = kwargs
self.current_e = 0
def store(self, layers, thicknesses, ret, e, append_mode=False):
if append_mode:
self.layer_ls.append(layers)
self.thick_ls.append(thicknesses)
self.max_ret_ls.append(ret)
else:
if ret >= self.max_ret_ls[e]:
self.layer_ls[e] = layers
self.thick_ls[e] = thicknesses
self.max_ret_ls[e] = ret
def save_state(self):
# save buffer from all processes
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
filename = os.path.join(self.kwargs['output_dir'], 'design_tracker_{}.pkl'.format(rank))
pkl.dump(self, open(filename, 'wb'))
def print_progress(self):
progress = list(zip(self.layer_ls, self.thick_ls, self.max_ret_ls))
read_progress = []
for i in range(len(progress)):
if progress[i] == (0,0,0):
break
read_progress.append(['|'.join([l + ' ' + str(d) + ' nm' for l, d in zip(progress[i][0], progress[i][1])]) + ', Merit {:.3f}'.format(progress[i][2])])
return read_progress
def print_progress(progress):
for i in range(len(progress)):
print(progress[i], 0)
progress[i] = ['|'.join([l + ' ' + str(d) + ' nm' for l, d in zip(progress[i][0], progress[i][1])]), progress[i][2]]
return progress
class TMM_sim():
def __init__(self, mats=['Ge'], wavelength=np.arange(0.38, 0.805, 0.01), substrate='Cr', substrate_thick=500):
'''
This class returns the spectrum given the designed structures.
'''
self.mats = mats
# include substrate
self.all_mats = mats + [substrate] if substrate not in ['Glass', 'Air'] else mats
self.wavelength = wavelength
self.nk_dict = self.load_materials()
self.substrate = substrate
self.substrate_thick = substrate_thick
def load_materials(self):
'''
Load material nk and return corresponding interpolators.
Return:
nk_dict: dict, key -- material name, value: n, k in the
self.wavelength range
'''
nk_dict = {}
for mat in self.all_mats:
nk = pd.read_csv(os.path.join(DATABASE, mat + '.csv'))
nk.dropna(inplace=True)
wl = nk['wl'].to_numpy()
index = (nk['n'] + nk['k'] * 1.j).to_numpy()
mat_nk_data = np.hstack((wl[:, np.newaxis], index[:, np.newaxis]))
mat_nk_fn = interp1d(
mat_nk_data[:, 0].real, mat_nk_data[:, 1], kind='quadratic')
nk_dict[mat] = mat_nk_fn(self.wavelength)
return nk_dict
def spectrum(self, materials, thickness, theta=0, plot=False, title=False):
'''
Input:
materials: list
thickness: list
theta: degree, the incidence angle
Return:
s: array, spectrum
'''
degree = pi/180
if self.substrate != 'Air':
thickness.insert(-1, self.substrate_thick) # substrate thickness
R, T, A = [], [], []
for i, lambda_vac in enumerate(self.wavelength * 1e3):
# we assume the last layer is glass
if self.substrate == 'Glass':
n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [1.45, 1]
elif self.substrate == 'Air':
n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [1]
else:
n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [self.nk_dict[self.substrate][i], 1]
# n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [self.nk_dict['Cr'][i]]
# mport pdb; pdb.set_trace()
res = coh_tmm('s', n_list, thickness, theta * degree, lambda_vac)
R.append(res['R'])
T.append(res['T'])
R, T = np.array(R), np.array(T)
A = 1 - R - T
if plot:
self.plot_spectrum(R, T, A)
if title:
thick = thickness[1:-1]
title = ' | '.join(['{}nm {}'.format(d, m)
for d, m in zip(thick, materials)])
if self.substrate is not 'Air':
title = 'Air | ' + title + ' | {}nm {} '.format(self.substrate_thick, self.substrate) + '| Air'
else:
title = 'Air | ' + title + ' | Air'
plt.title(title, **{'size': '10'})
return R, T, A
def plot_spectrum(self, R, T, A):
plt.plot(self.wavelength * 1000, R, self.wavelength *
1000, T, self.wavelength * 1000, A, linewidth=3)
plt.ylabel('R/T/A')
plt.xlabel('Wavelength (nm)')
plt.legend(['R: Average = {:.2f}%'.
format(np.mean(R)*100),
'T: Average = {:.2f}%'.
format(np.mean(T)*100),
'A: Average = {:.2f}%'.
format(np.mean(A)*100)])
plt.grid('on', linestyle='--')
plt.ylim([0, 1])
# Plotting utils
def visualize_progress(file, x, ax=None, color='b', alpha=1):
df = pd.read_csv(file, sep="\t")
width = 0.5
# x = 'Time'
if ax is None:
fig, ax = plt.subplots(2,1)
sns.lineplot(x=x, y='MaxEpRet', data=df, ax=ax[0], color=color, alpha=alpha)
# ax[0].legend(['Max {}'.format(np.max(df['MaxEpRet']))])
sns.lineplot(x=x, y='AverageEpRet', data=df,
ax=ax[ | random_line_split | ||
utils.py | )
for args in
zip(names_list, thickness_list))
res = np.array(res)
Rs, Ts, As = res[:, 0, :], res[:, 1, :], res[:, 2, :]
return Rs, Ts, As
def merge_layers(categories, thicknesses):
'''
Merges consecutive layers with the same material types.
'''
thicknesses = thicknesses[1:-1]
c_output = [categories[0]]
t_output = [thicknesses[0]]
for i, (c, d) in enumerate(zip(categories[1:], thicknesses[1:])):
if c == c_output[-1]:
t_output[-1] += d
continue
else:
c_output.append(c)
t_output.append(d)
t_output.insert(0, np.inf)
t_output.insert(len(t_output), np.inf)
return c_output, t_output
def get_structure(categories, values, materials, ds, continuous=False,
max_value=400):
'''
Given categories and values, return the strucure in the form
(name (str), thickness (nm))
'''
def threshold(value):
'''
'''
names = [materials[item] for item in categories]
if not continuous:
thickness = [np.inf] + [ds[item] for item in values] + [np.inf]
else:
thickness = []
for category, value in zip(categories, values):
name = materials[category]
if name == 'Ag':
thickness.append(
min(max(15, int(value * max_value//2)), max_value))
elif name in METALS:
thickness.append(
min(max(5, int(value * max_value//2)), max_value))
elif name in INSULATORS:
thickness.append(
min(max(1, int(value * max_value//2)), max_value))
else:
raise ValueError('Material not known')
# thickness = [np.inf] + [min(max(5, int(item * 2e2)), 200) for i,
# item in enumerate(values)] + [np.inf]
thickness = [np.inf] + thickness + [np.inf]
return names, thickness
class DesignTracker():
def __init__(self, epochs, **kwargs):
"""
This class tracks the best designs discovered.
"""
if epochs == -1:
self.layer_ls = []
self.thick_ls = []
self.max_ret_ls = []
self.layer_ls = [0] * epochs
self.thick_ls = [0] * epochs
self.max_ret_ls = [0] * epochs
self.kwargs = kwargs
self.current_e = 0
def store(self, layers, thicknesses, ret, e, append_mode=False):
if append_mode:
self.layer_ls.append(layers)
self.thick_ls.append(thicknesses)
self.max_ret_ls.append(ret)
else:
if ret >= self.max_ret_ls[e]:
self.layer_ls[e] = layers
self.thick_ls[e] = thicknesses
self.max_ret_ls[e] = ret
def save_state(self):
# save buffer from all processes
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
filename = os.path.join(self.kwargs['output_dir'], 'design_tracker_{}.pkl'.format(rank))
pkl.dump(self, open(filename, 'wb'))
def print_progress(self):
progress = list(zip(self.layer_ls, self.thick_ls, self.max_ret_ls))
read_progress = []
for i in range(len(progress)):
if progress[i] == (0,0,0):
break
read_progress.append(['|'.join([l + ' ' + str(d) + ' nm' for l, d in zip(progress[i][0], progress[i][1])]) + ', Merit {:.3f}'.format(progress[i][2])])
return read_progress
def print_progress(progress):
for i in range(len(progress)):
|
return progress
class TMM_sim():
def __init__(self, mats=['Ge'], wavelength=np.arange(0.38, 0.805, 0.01), substrate='Cr', substrate_thick=500):
'''
This class returns the spectrum given the designed structures.
'''
self.mats = mats
# include substrate
self.all_mats = mats + [substrate] if substrate not in ['Glass', 'Air'] else mats
self.wavelength = wavelength
self.nk_dict = self.load_materials()
self.substrate = substrate
self.substrate_thick = substrate_thick
def load_materials(self):
'''
Load material nk and return corresponding interpolators.
Return:
nk_dict: dict, key -- material name, value: n, k in the
self.wavelength range
'''
nk_dict = {}
for mat in self.all_mats:
nk = pd.read_csv(os.path.join(DATABASE, mat + '.csv'))
nk.dropna(inplace=True)
wl = nk['wl'].to_numpy()
index = (nk['n'] + nk['k'] * 1.j).to_numpy()
mat_nk_data = np.hstack((wl[:, np.newaxis], index[:, np.newaxis]))
mat_nk_fn = interp1d(
mat_nk_data[:, 0].real, mat_nk_data[:, 1], kind='quadratic')
nk_dict[mat] = mat_nk_fn(self.wavelength)
return nk_dict
def spectrum(self, materials, thickness, theta=0, plot=False, title=False):
'''
Input:
materials: list
thickness: list
theta: degree, the incidence angle
Return:
s: array, spectrum
'''
degree = pi/180
if self.substrate != 'Air':
thickness.insert(-1, self.substrate_thick) # substrate thickness
R, T, A = [], [], []
for i, lambda_vac in enumerate(self.wavelength * 1e3):
# we assume the last layer is glass
if self.substrate == 'Glass':
n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [1.45, 1]
elif self.substrate == 'Air':
n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [1]
else:
n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [self.nk_dict[self.substrate][i], 1]
# n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [self.nk_dict['Cr'][i]]
# mport pdb; pdb.set_trace()
res = coh_tmm('s', n_list, thickness, theta * degree, lambda_vac)
R.append(res['R'])
T.append(res['T'])
R, T = np.array(R), np.array(T)
A = 1 - R - T
if plot:
self.plot_spectrum(R, T, A)
if title:
thick = thickness[1:-1]
title = ' | '.join(['{}nm {}'.format(d, m)
for d, m in zip(thick, materials)])
if self.substrate is not 'Air':
title = 'Air | ' + title + ' | {}nm {} '.format(self.substrate_thick, self.substrate) + '| Air'
else:
title = 'Air | ' + title + ' | Air'
plt.title(title, **{'size': '10'})
return R, T, A
def plot_spectrum(self, R, T, A):
plt.plot(self.wavelength * 1000, R, self.wavelength *
1000, T, self.wavelength * 1000, A, linewidth=3)
plt.ylabel('R/T/A')
plt.xlabel('Wavelength (nm)')
plt.legend(['R: Average = {:.2f}%'.
format(np.mean(R)*100),
'T: Average = {:.2f}%'.
format(np.mean(T)*100),
'A: Average = {:.2f}%'.
format(np.mean(A)*100)])
plt.grid('on', linestyle='--')
plt.ylim([0, 1])
# Plotting utils
def visualize_progress(file, x, ax=None, color='b', alpha=1):
df = pd.read_csv(file, sep="\t")
width = 0.5
# x = 'Time'
if ax is None:
fig, ax = plt.subplots(2,1)
sns.lineplot(x=x, y='MaxEpRet', data=df, ax=ax[0], color=color, alpha=alpha)
# ax[0].legend(['Max {}'.format(np.max(df['MaxEpRet']))])
sns.lineplot(x=x, y='AverageEpRet', data=df,
ax=ax[1], color=color, alpha=alpha)
plt.fill_between(df[x | print(progress[i], 0)
progress[i] = ['|'.join([l + ' ' + str(d) + ' nm' for l, d in zip(progress[i][0], progress[i][1])]), progress[i][2]] | conditional_block |
utils.py | )
for args in
zip(names_list, thickness_list))
res = np.array(res)
Rs, Ts, As = res[:, 0, :], res[:, 1, :], res[:, 2, :]
return Rs, Ts, As
def | (categories, thicknesses):
'''
Merges consecutive layers with the same material types.
'''
thicknesses = thicknesses[1:-1]
c_output = [categories[0]]
t_output = [thicknesses[0]]
for i, (c, d) in enumerate(zip(categories[1:], thicknesses[1:])):
if c == c_output[-1]:
t_output[-1] += d
continue
else:
c_output.append(c)
t_output.append(d)
t_output.insert(0, np.inf)
t_output.insert(len(t_output), np.inf)
return c_output, t_output
def get_structure(categories, values, materials, ds, continuous=False,
max_value=400):
'''
Given categories and values, return the strucure in the form
(name (str), thickness (nm))
'''
def threshold(value):
'''
'''
names = [materials[item] for item in categories]
if not continuous:
thickness = [np.inf] + [ds[item] for item in values] + [np.inf]
else:
thickness = []
for category, value in zip(categories, values):
name = materials[category]
if name == 'Ag':
thickness.append(
min(max(15, int(value * max_value//2)), max_value))
elif name in METALS:
thickness.append(
min(max(5, int(value * max_value//2)), max_value))
elif name in INSULATORS:
thickness.append(
min(max(1, int(value * max_value//2)), max_value))
else:
raise ValueError('Material not known')
# thickness = [np.inf] + [min(max(5, int(item * 2e2)), 200) for i,
# item in enumerate(values)] + [np.inf]
thickness = [np.inf] + thickness + [np.inf]
return names, thickness
class DesignTracker():
def __init__(self, epochs, **kwargs):
"""
This class tracks the best designs discovered.
"""
if epochs == -1:
self.layer_ls = []
self.thick_ls = []
self.max_ret_ls = []
self.layer_ls = [0] * epochs
self.thick_ls = [0] * epochs
self.max_ret_ls = [0] * epochs
self.kwargs = kwargs
self.current_e = 0
def store(self, layers, thicknesses, ret, e, append_mode=False):
if append_mode:
self.layer_ls.append(layers)
self.thick_ls.append(thicknesses)
self.max_ret_ls.append(ret)
else:
if ret >= self.max_ret_ls[e]:
self.layer_ls[e] = layers
self.thick_ls[e] = thicknesses
self.max_ret_ls[e] = ret
def save_state(self):
# save buffer from all processes
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
filename = os.path.join(self.kwargs['output_dir'], 'design_tracker_{}.pkl'.format(rank))
pkl.dump(self, open(filename, 'wb'))
def print_progress(self):
progress = list(zip(self.layer_ls, self.thick_ls, self.max_ret_ls))
read_progress = []
for i in range(len(progress)):
if progress[i] == (0,0,0):
break
read_progress.append(['|'.join([l + ' ' + str(d) + ' nm' for l, d in zip(progress[i][0], progress[i][1])]) + ', Merit {:.3f}'.format(progress[i][2])])
return read_progress
def print_progress(progress):
for i in range(len(progress)):
print(progress[i], 0)
progress[i] = ['|'.join([l + ' ' + str(d) + ' nm' for l, d in zip(progress[i][0], progress[i][1])]), progress[i][2]]
return progress
class TMM_sim():
def __init__(self, mats=['Ge'], wavelength=np.arange(0.38, 0.805, 0.01), substrate='Cr', substrate_thick=500):
'''
This class returns the spectrum given the designed structures.
'''
self.mats = mats
# include substrate
self.all_mats = mats + [substrate] if substrate not in ['Glass', 'Air'] else mats
self.wavelength = wavelength
self.nk_dict = self.load_materials()
self.substrate = substrate
self.substrate_thick = substrate_thick
def load_materials(self):
'''
Load material nk and return corresponding interpolators.
Return:
nk_dict: dict, key -- material name, value: n, k in the
self.wavelength range
'''
nk_dict = {}
for mat in self.all_mats:
nk = pd.read_csv(os.path.join(DATABASE, mat + '.csv'))
nk.dropna(inplace=True)
wl = nk['wl'].to_numpy()
index = (nk['n'] + nk['k'] * 1.j).to_numpy()
mat_nk_data = np.hstack((wl[:, np.newaxis], index[:, np.newaxis]))
mat_nk_fn = interp1d(
mat_nk_data[:, 0].real, mat_nk_data[:, 1], kind='quadratic')
nk_dict[mat] = mat_nk_fn(self.wavelength)
return nk_dict
def spectrum(self, materials, thickness, theta=0, plot=False, title=False):
'''
Input:
materials: list
thickness: list
theta: degree, the incidence angle
Return:
s: array, spectrum
'''
degree = pi/180
if self.substrate != 'Air':
thickness.insert(-1, self.substrate_thick) # substrate thickness
R, T, A = [], [], []
for i, lambda_vac in enumerate(self.wavelength * 1e3):
# we assume the last layer is glass
if self.substrate == 'Glass':
n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [1.45, 1]
elif self.substrate == 'Air':
n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [1]
else:
n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [self.nk_dict[self.substrate][i], 1]
# n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [self.nk_dict['Cr'][i]]
# mport pdb; pdb.set_trace()
res = coh_tmm('s', n_list, thickness, theta * degree, lambda_vac)
R.append(res['R'])
T.append(res['T'])
R, T = np.array(R), np.array(T)
A = 1 - R - T
if plot:
self.plot_spectrum(R, T, A)
if title:
thick = thickness[1:-1]
title = ' | '.join(['{}nm {}'.format(d, m)
for d, m in zip(thick, materials)])
if self.substrate is not 'Air':
title = 'Air | ' + title + ' | {}nm {} '.format(self.substrate_thick, self.substrate) + '| Air'
else:
title = 'Air | ' + title + ' | Air'
plt.title(title, **{'size': '10'})
return R, T, A
def plot_spectrum(self, R, T, A):
plt.plot(self.wavelength * 1000, R, self.wavelength *
1000, T, self.wavelength * 1000, A, linewidth=3)
plt.ylabel('R/T/A')
plt.xlabel('Wavelength (nm)')
plt.legend(['R: Average = {:.2f}%'.
format(np.mean(R)*100),
'T: Average = {:.2f}%'.
format(np.mean(T)*100),
'A: Average = {:.2f}%'.
format(np.mean(A)*100)])
plt.grid('on', linestyle='--')
plt.ylim([0, 1])
# Plotting utils
def visualize_progress(file, x, ax=None, color='b', alpha=1):
df = pd.read_csv(file, sep="\t")
width = 0.5
# x = 'Time'
if ax is None:
fig, ax = plt.subplots(2,1)
sns.lineplot(x=x, y='MaxEpRet', data=df, ax=ax[0], color=color, alpha=alpha)
# ax[0].legend(['Max {}'.format(np.max(df['MaxEpRet']))])
sns.lineplot(x=x, y='AverageEpRet', data=df,
ax=ax[1], color=color, alpha=alpha)
plt.fill_between(df[x | merge_layers | identifier_name |
ImpConcat-Recall.py | desired confounds from the confounds_regressors.tsv file from fmriprep, trim the columns corresponding to trimmed volumes, and save as a .txt file.
starttime = time.time()
confounds=[]
confounds_all=[]
mc_all=[]
ntr=[]
ntr=np.zeros((n_runs_recall,1))
for r in range(firstrun,lastrun+1):
fname='_ses-01_task-recall_run-0%i_desc-confounds_regressors.tsv' % (r)
confounds = pd.read_csv(ses1_dir + sub + fname, sep='\t', header=(0))
confounds_selected=confounds[['trans_x','trans_y','trans_z','rot_x','rot_y','rot_z','framewise_displacement','a_comp_cor_00','a_comp_cor_01','a_comp_cor_02','a_comp_cor_03','a_comp_cor_04','a_comp_cor_05']][n_trunc:]
confounds_selected=pd.DataFrame(confounds_selected)
confounds_selected.to_csv(out_dir + 'ses-01/' + sub + '_ses-01_task-recall_run-0%i_confounds_selected.txt' % r, index=False, sep='\t', mode='w')
if 0==firstrun:
ntr[r]=confounds_selected.shape[0]
if 1==firstrun:
ntr[r-1]=confounds_selected.shape[0]
if r==firstrun:
confounds_all=confounds_selected
else:
confounds_all=np.vstack([confounds_all,confounds_selected])
print(confounds_selected.shape[0])
print(ntr)
print(sum(ntr[0]))
# In[15]:
mask_imgs=[]
for run in range(firstrun,lastrun+1):
mask_name = ses1_dir + sub + '_ses-01_task-recall_run-0%i_space-MNI152NLin2009cAsym_desc-brain_mask.nii.gz' % run
mask_imgs.append(mask_name)
template = load_mni152_template()
i=np.eye(3)*3
template =image.resample_img(template, target_affine=i)
# intersect 3 view brain masks
avg_mask=intersect_masks(mask_imgs, threshold=0.5, connected=True)
avg_mask = resample_to_img(avg_mask, template)
thresha=avg_mask.dataobj>-10000
thresh=avg_mask.dataobj>0.5
avg_mask.dataobj[thresha] = 0
avg_mask.dataobj[thresh] = 1
if ipynby==1:
crange=1
plt.figure(figsize=(16,10))
this_img = avg_mask.dataobj[50,:,:];
plt.imshow(this_img,cmap="viridis",vmin=0,vmax=crange,origin='lower',interpolation='none',aspect="auto")
cbar = plt.colorbar()
dimsize=avg_mask.header.get_zooms()
affine_mat = avg_mask.affine
print(affine_mat)
coords = np.where(avg_mask.get_fdata())
# In[16]:
#plot average brain????
t1_file = anat_fmriprep_dir + sub + '_space-MNI152NLin2009cAsym_desc-brain_mask.nii.gz'
print(t1_file)
t1_img = image.load_img(t1_file)
t1_img = resample_to_img(t1_img, template)
if ipynby==1:
plot_roi(avg_mask, bg_img=t1_img)
# Save the mask
output_name_mask = mask_fold + '%s_%s_brain.nii.gz' % (sub, ses)
'''hdr = avg_mask.header # get a handle for the .nii file's header
hdr.set_zooms((dimsize[0], dimsize[1], dimsize[2]))
nib.save(avg_mask, output_name_mask)'''
# In[17]:
def mod_smooth(in_file, mask_file, fwhm, smooth_type):
|
# In[18]:
#truncate first n_trunc TRs
#confounds_trunc=confounds_selected[3:end]
epi_trunc=[]
#https://github.com/INCF/BrainImagingPipelines/blob/master/bips/workflows/gablab/wips/scripts/modular_nodes.py
print('Number of runs to concatenate:', n_runs_recall)
for run in range(firstrun,lastrun+1):#lastrun+1
out_smooth=(out_dir + 'ses-01/' + '%s_ses-01_task-recall9_run-0%i_space-MNI152NLin2009cAsym_desc-preproc_bold_trim%d_smooth%d.nii.gz' % (sub, run, n_trunc,fwhmval))
if os.path.exists(out_smooth):
proceeeeed=[]
epi_data=nib.load(out_smooth)
epi_data=resample_to_img(epi_data, template)# JWA, August 25 change
epi=epi_data.get_fdata()
#truncate
epi_trunc =np.zeros((epi_data.shape[0], epi_data.shape[1], epi_data.shape[2], epi_data.shape[3]-n_trunc))
epi_trunc[:, :, :, :] = epi[:,:,:,n_trunc:]
print(epi_data.shape, ' ', epi_trunc.shape)
dimsize=epi_data.header.get_zooms()
#print(dimsize)
orig_dimsize=dimsize
affine_mat = epi_data.affine # What is the orientation of the data
print(affine_mat)
else:
epi_file=ses1_dir + sub + '_ses-01_task-recall_run-0%i_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz' % run
epi_data=nib.load(epi_file)
epi_data=resample_to_img(epi_data, template)# JWA, August 25 change
epi=epi_data.get_fdata()
#truncate
epi_trunc =np.zeros((epi_data.shape[0], epi_data.shape[1], epi_data.shape[2], epi_data.shape[3]-n_trunc))
epi_trunc[:, :, :, :] = epi[:,:,:,n_trunc:]
print(epi_data.shape, ' ', epi_trunc.shape)
dimsize=epi_data.header.get_zooms()
#print(dimsize)
orig_dimsize=dimsize
affine_mat = epi_data.affine # What is the orientation of the data
print(affine_mat)
# Save the volume
output_name = (out_dir + 'ses-01/' + '%s_ses-01_task-recall_run-0%i_space-MNI152NLin2009cAsym_desc-preproc_bold_trim%dTRs.nii.gz' % (sub, run, n_trunc))
bold_nii = nib.Nifti1Image(epi_trunc, affine_mat)
hdr = bold_nii.header # get a handle for the .nii file's header
hdr.set_zooms((dimsize[0], dimsize[1], dimsize[2], dimsize[3]))
nib.save(bold_nii, output_name)
# smooth with susan
smoothed_file = mod_smooth(output_name,output_name_mask,fwhmval, 'susan')
#move file
in_smooth=(out_dir+'susan_smooth/smooth/mapflow/_smooth0/' + '%s_ses-01_task-recall_run-0%i_space-MNI152NLin2009cAsym_desc-preproc_bold_trim%dTRs_smooth.nii.gz' % (sub, run, n_trunc))
#out_smooth=(out_dir + 'ses-01/' + '%s_ses-01_task-recall_run-0%i_space-MNI152NLin2009cAsym_desc-preproc_bold_trim%d_smooth%d.nii.gz' % (sub, run, n_trunc,fwhmval))
os.rename(in_smooth,out_smooth)
# ## Load fMRI data <a id="load_fmri"></a>
# #### Get voxels from an ROI
#
# We will extract BOLD data, only for vox | import nipype.interfaces.fsl as fsl
import nipype.interfaces.freesurfer as fs
import os
if smooth_type == 'susan':
if fwhm == 0:
return in_file
smooth = create_susan_smooth()
smooth.base_dir = out_dir#os.getcwd()
smooth.inputs.inputnode.fwhm = fwhm
smooth.inputs.inputnode.mask_file = mask_file
smooth.inputs.inputnode.in_files = in_file
#smooth.outputs.outputnode.smoothed_files='/jukebox/norman/jantony/surprisesuspense/data/bids/Norman/Antony/ss/derivatives/firstlevel/sub-02/ses-01/sub-02_ses-01_task-recall_run-01_space-MNI152NLin2009cAsym_desc-preproc_bold_trim3TRs_smooth.nii.gz'
res = smooth.run()
smoothed_file=[] #smoothed_file = res.outputs.outputnode.smoothed_files
return smoothed_file | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.