file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
helper.go | /*
Copyright 2019 The Machine Controller Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package openstack
import (
"errors"
"fmt"
"strconv"
"sync"
"time"
"github.com/gophercloud/gophercloud"
goopenstack "github.com/gophercloud/gophercloud/openstack"
osavailabilityzones "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones"
osflavors "github.com/gophercloud/gophercloud/openstack/compute/v2/flavors"
osregions "github.com/gophercloud/gophercloud/openstack/identity/v3/regions"
osimagesv2 "github.com/gophercloud/gophercloud/openstack/imageservice/v2/images"
osfloatingips "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips"
ossecuritygroups "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups"
osecruritygrouprules "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules"
osnetworks "github.com/gophercloud/gophercloud/openstack/networking/v2/networks"
osports "github.com/gophercloud/gophercloud/openstack/networking/v2/ports"
ossubnets "github.com/gophercloud/gophercloud/openstack/networking/v2/subnets"
"github.com/gophercloud/gophercloud/pagination"
"go.uber.org/zap"
)
var (
errNotFound = errors.New("not found")
securityGroupCreationLock = sync.Mutex{}
)
const (
errorStatus = "ERROR"
floatingReassignIPCheckPeriod = 3 * time.Second
)
func getRegion(client *gophercloud.ProviderClient, name string) (*osregions.Region, error) {
idClient, err := goopenstack.NewIdentityV3(client, gophercloud.EndpointOpts{})
if err != nil {
return nil, err
}
return osregions.Get(idClient, name).Extract()
}
func getRegions(client *gophercloud.ProviderClient) ([]osregions.Region, error) {
idClient, err := goopenstack.NewIdentityV3(client, gophercloud.EndpointOpts{})
if err != nil {
return nil, err
}
listOpts := osregions.ListOpts{
ParentRegionID: "",
}
allPages, err := osregions.List(idClient, listOpts).AllPages()
if err != nil {
return nil, err
}
regions, err := osregions.ExtractRegions(allPages)
if err != nil {
return nil, err
}
return regions, nil
}
func getNewComputeV2(client *gophercloud.ProviderClient, c *Config) (*gophercloud.ServiceClient, error) {
computeClient, err := goopenstack.NewComputeV2(client, gophercloud.EndpointOpts{Region: c.Region})
if err != nil {
return nil, err
}
if c.ComputeAPIVersion != "" {
// Validation - empty value default to microversion 2.0=2.1
version, err := strconv.ParseFloat(c.ComputeAPIVersion, 32)
if err != nil || version < 2.0 {
return nil, fmt.Errorf("invalid computeAPIVersion: %w", err)
}
// See https://github.com/gophercloud/gophercloud/blob/master/docs/MICROVERSIONS.md
computeClient.Microversion = c.ComputeAPIVersion
}
return computeClient, nil
}
func getAvailabilityZones(computeClient *gophercloud.ServiceClient) ([]osavailabilityzones.AvailabilityZone, error) {
allPages, err := osavailabilityzones.List(computeClient).AllPages()
if err != nil {
return nil, err
}
return osavailabilityzones.ExtractAvailabilityZones(allPages)
}
func getAvailabilityZone(computeClient *gophercloud.ServiceClient, c *Config) (*osavailabilityzones.AvailabilityZone, error) {
zones, err := getAvailabilityZones(computeClient)
if err != nil {
return nil, err
}
for _, z := range zones {
if z.ZoneName == c.AvailabilityZone {
return &z, nil
}
}
return nil, errNotFound
}
func getImageByName(imageClient *gophercloud.ServiceClient, c *Config) (*osimagesv2.Image, error) {
var allImages []osimagesv2.Image
pager := osimagesv2.List(imageClient, osimagesv2.ListOpts{Name: c.Image})
err := pager.EachPage(func(page pagination.Page) (bool, error) {
images, err := osimagesv2.ExtractImages(page)
if err != nil {
return false, err
}
allImages = append(allImages, images...)
return true, nil
})
if err != nil {
return nil, err
}
if len(allImages) == 0 {
return nil, errNotFound
}
return &allImages[0], nil
}
func getFlavor(computeClient *gophercloud.ServiceClient, c *Config) (*osflavors.Flavor, error) {
var allFlavors []osflavors.Flavor
pager := osflavors.ListDetail(computeClient, osflavors.ListOpts{})
err := pager.EachPage(func(page pagination.Page) (bool, error) {
flavors, err := osflavors.ExtractFlavors(page)
if err != nil {
return false, err | return true, nil
})
if err != nil {
return nil, err
}
for _, f := range allFlavors {
if f.Name == c.Flavor {
return &f, nil
}
}
return nil, errNotFound
}
func getSecurityGroup(client *gophercloud.ProviderClient, region, name string) (*ossecuritygroups.SecGroup, error) {
netClient, err := goopenstack.NewNetworkV2(client, gophercloud.EndpointOpts{Region: region})
if err != nil {
return nil, err
}
var allGroups []ossecuritygroups.SecGroup
pager := ossecuritygroups.List(netClient, ossecuritygroups.ListOpts{})
err = pager.EachPage(func(page pagination.Page) (bool, error) {
groups, err := ossecuritygroups.ExtractGroups(page)
if err != nil {
return false, err
}
allGroups = append(allGroups, groups...)
return true, nil
})
if err != nil {
return nil, err
}
for _, g := range allGroups {
if g.Name == name {
return &g, nil
}
}
return nil, errNotFound
}
func getNetworks(netClient *gophercloud.ServiceClient) ([]osnetworks.Network, error) {
var allNetworks []osnetworks.Network
pager := osnetworks.List(netClient, osnetworks.ListOpts{})
err := pager.EachPage(func(page pagination.Page) (bool, error) {
networks, err := osnetworks.ExtractNetworks(page)
if err != nil {
return false, err
}
allNetworks = append(allNetworks, networks...)
return true, nil
})
if err != nil {
return nil, err
}
return allNetworks, nil
}
func getNetwork(netClient *gophercloud.ServiceClient, nameOrID string) (*osnetworks.Network, error) {
allNetworks, err := getNetworks(netClient)
if err != nil {
return nil, err
}
for _, n := range allNetworks {
if n.Name == nameOrID || n.ID == nameOrID {
return &n, nil
}
}
return nil, errNotFound
}
func getSubnets(netClient *gophercloud.ServiceClient, networkID string) ([]ossubnets.Subnet, error) {
listOpts := ossubnets.ListOpts{}
if networkID != "" {
listOpts = ossubnets.ListOpts{NetworkID: networkID}
}
var allSubnets []ossubnets.Subnet
pager := ossubnets.List(netClient, listOpts)
err := pager.EachPage(func(page pagination.Page) (bool, error) {
subnets, err := ossubnets.ExtractSubnets(page)
if err != nil {
return false, err
}
allSubnets = append(allSubnets, subnets...)
return true, nil
})
if err != nil {
return nil, err
}
return allSubnets, nil
}
func getSubnet(netClient *gophercloud.ServiceClient, nameOrID string) (*ossubnets.Subnet, error) {
allSubnets, err := getSubnets(netClient, "")
if err != nil {
return nil, err
}
for _, s := range allSubnets {
if s.Name == nameOrID || s.ID == nameOrID {
return &s, nil
}
}
return nil, errNotFound
}
func ensureKubernetesSecurityGroupExist(log *zap.SugaredLogger, client *gophercloud.ProviderClient, region, name string) error {
// We need a mutex here because otherwise if more than one machine gets created at roughly the same time
// we will create two security groups and subsequently not be able anymore to identify our security group
// by name
securityGroupCreationLock.Lock()
defer securityGroupCreationLock.Unlock()
netClient, err := goopenstack.NewNetworkV2(client, gophercloud.EndpointOpts{Region: region})
if err != nil {
return osErrorToTerminalError(log, err, "failed to get network client")
}
_, err = getSecurityGroup(client, region, name)
if err != nil {
if errors.Is(err, errNotFound) {
sg, err := ossecuritygroups.Create(netClient, ossecuritygroups.CreateOpts{Name: name}).Extract()
if err != nil {
return osErrorToTerminalError(log, err, fmt.Sprintf("failed to create security group %s", name))
}
rules := []osecruritygrouprules.CreateOpts{
{
// Allows ipv4 traffic within this group
Direction: osecruritygrouprules.DirIngress,
EtherType: osecruritygrouprules.EtherType4,
SecGroupID: sg.ID,
RemoteGroupID: sg.ID,
},
{
// Allows ipv6 traffic within this group
Direction: osecruritygrouprules.DirIngress,
EtherType: osecruritygrouprules.EtherType6,
SecGroupID: sg.ID,
RemoteGroupID: sg.ID,
},
}
for _, opts := range rules {
if _, err := osecruritygrouprules.Create(netClient, opts).Extract(); err != nil {
return osErrorToTerminalError(log, err, "failed to create security group rule")
}
}
}
}
return nil
}
func getFreeFloatingIPs(netClient *gophercloud.ServiceClient, floatingIPPool *osnetworks.Network) ([]osfloatingips.FloatingIP, error) {
allPages, err := osfloatingips.List(netClient, osfloatingips.ListOpts{FloatingNetworkID: floatingIPPool.ID}).AllPages()
if err != nil {
return nil, err
}
allFIPs, err := osfloatingips.ExtractFloatingIPs(allPages)
if err != nil {
return nil, err
}
var freeFIPs []osfloatingips.FloatingIP
for _, f := range allFIPs {
// See some details about this test here:
// https://github.com/kubermatic/machine-controller/pull/28#discussion_r163773619
// The check of FixedIP has been added to avoid false positives on OTC,
// where FIPs associated to Classic LoadBalandcers never get assigned a
// PortID even when they are in use.
if f.Status != errorStatus && f.PortID == "" && f.FixedIP == "" {
freeFIPs = append(freeFIPs, f)
}
}
return freeFIPs, nil
}
func createFloatingIP(netClient *gophercloud.ServiceClient, portID string, floatingIPPool *osnetworks.Network) (*osfloatingips.FloatingIP, error) {
opts := osfloatingips.CreateOpts{
FloatingNetworkID: floatingIPPool.ID,
PortID: portID,
}
return osfloatingips.Create(netClient, opts).Extract()
}
func getInstancePort(netClient *gophercloud.ServiceClient, instanceID, networkID string) (*osports.Port, error) {
allPages, err := osports.List(netClient, osports.ListOpts{
DeviceID: instanceID,
NetworkID: networkID,
}).AllPages()
if err != nil {
return nil, err
}
allPorts, err := osports.ExtractPorts(allPages)
if err != nil {
return nil, err
}
for _, p := range allPorts {
if p.NetworkID == networkID && p.DeviceID == instanceID {
return &p, nil
}
}
return nil, errNotFound
}
func getDefaultNetwork(netClient *gophercloud.ServiceClient) (*osnetworks.Network, error) {
networks, err := getNetworks(netClient)
if err != nil {
return nil, err
}
if len(networks) == 1 {
return &networks[0], nil
}
// Networks without subnets can't be used, try finding a default by excluding them
// However the network object itself still contains the subnet, the only difference
// is that the subnet can not be retrieved by itself
var candidates []osnetworks.Network
NetworkLoop:
for _, network := range networks {
for _, subnet := range network.Subnets {
_, err := getSubnet(netClient, subnet)
if errors.Is(err, errNotFound) {
continue
} else if err != nil {
return nil, err
}
candidates = append(candidates, network)
continue NetworkLoop
}
}
if len(candidates) == 1 {
return &candidates[0], nil
}
return nil, fmt.Errorf("%d candidate networks found", len(candidates))
}
func getDefaultSubnet(netClient *gophercloud.ServiceClient, network *osnetworks.Network) (*string, error) {
if len(network.Subnets) == 1 {
return &network.Subnets[0], nil
}
subnets, err := getSubnets(netClient, network.ID)
if err != nil {
return nil, err
}
if len(subnets) == 0 {
return nil, errors.New("no subnets available")
}
return &subnets[0].ID, nil
} | }
allFlavors = append(allFlavors, flavors...) | random_line_split |
helper.go | /*
Copyright 2019 The Machine Controller Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package openstack
import (
"errors"
"fmt"
"strconv"
"sync"
"time"
"github.com/gophercloud/gophercloud"
goopenstack "github.com/gophercloud/gophercloud/openstack"
osavailabilityzones "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones"
osflavors "github.com/gophercloud/gophercloud/openstack/compute/v2/flavors"
osregions "github.com/gophercloud/gophercloud/openstack/identity/v3/regions"
osimagesv2 "github.com/gophercloud/gophercloud/openstack/imageservice/v2/images"
osfloatingips "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips"
ossecuritygroups "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups"
osecruritygrouprules "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules"
osnetworks "github.com/gophercloud/gophercloud/openstack/networking/v2/networks"
osports "github.com/gophercloud/gophercloud/openstack/networking/v2/ports"
ossubnets "github.com/gophercloud/gophercloud/openstack/networking/v2/subnets"
"github.com/gophercloud/gophercloud/pagination"
"go.uber.org/zap"
)
var (
errNotFound = errors.New("not found")
securityGroupCreationLock = sync.Mutex{}
)
const (
errorStatus = "ERROR"
floatingReassignIPCheckPeriod = 3 * time.Second
)
func getRegion(client *gophercloud.ProviderClient, name string) (*osregions.Region, error) {
idClient, err := goopenstack.NewIdentityV3(client, gophercloud.EndpointOpts{})
if err != nil {
return nil, err
}
return osregions.Get(idClient, name).Extract()
}
func getRegions(client *gophercloud.ProviderClient) ([]osregions.Region, error) {
idClient, err := goopenstack.NewIdentityV3(client, gophercloud.EndpointOpts{})
if err != nil {
return nil, err
}
listOpts := osregions.ListOpts{
ParentRegionID: "",
}
allPages, err := osregions.List(idClient, listOpts).AllPages()
if err != nil {
return nil, err
}
regions, err := osregions.ExtractRegions(allPages)
if err != nil {
return nil, err
}
return regions, nil
}
func getNewComputeV2(client *gophercloud.ProviderClient, c *Config) (*gophercloud.ServiceClient, error) {
computeClient, err := goopenstack.NewComputeV2(client, gophercloud.EndpointOpts{Region: c.Region})
if err != nil {
return nil, err
}
if c.ComputeAPIVersion != "" {
// Validation - empty value default to microversion 2.0=2.1
version, err := strconv.ParseFloat(c.ComputeAPIVersion, 32)
if err != nil || version < 2.0 {
return nil, fmt.Errorf("invalid computeAPIVersion: %w", err)
}
// See https://github.com/gophercloud/gophercloud/blob/master/docs/MICROVERSIONS.md
computeClient.Microversion = c.ComputeAPIVersion
}
return computeClient, nil
}
func getAvailabilityZones(computeClient *gophercloud.ServiceClient) ([]osavailabilityzones.AvailabilityZone, error) {
allPages, err := osavailabilityzones.List(computeClient).AllPages()
if err != nil {
return nil, err
}
return osavailabilityzones.ExtractAvailabilityZones(allPages)
}
func getAvailabilityZone(computeClient *gophercloud.ServiceClient, c *Config) (*osavailabilityzones.AvailabilityZone, error) {
zones, err := getAvailabilityZones(computeClient)
if err != nil {
return nil, err
}
for _, z := range zones {
if z.ZoneName == c.AvailabilityZone {
return &z, nil
}
}
return nil, errNotFound
}
func getImageByName(imageClient *gophercloud.ServiceClient, c *Config) (*osimagesv2.Image, error) {
var allImages []osimagesv2.Image
pager := osimagesv2.List(imageClient, osimagesv2.ListOpts{Name: c.Image})
err := pager.EachPage(func(page pagination.Page) (bool, error) {
images, err := osimagesv2.ExtractImages(page)
if err != nil {
return false, err
}
allImages = append(allImages, images...)
return true, nil
})
if err != nil {
return nil, err
}
if len(allImages) == 0 {
return nil, errNotFound
}
return &allImages[0], nil
}
func getFlavor(computeClient *gophercloud.ServiceClient, c *Config) (*osflavors.Flavor, error) {
var allFlavors []osflavors.Flavor
pager := osflavors.ListDetail(computeClient, osflavors.ListOpts{})
err := pager.EachPage(func(page pagination.Page) (bool, error) {
flavors, err := osflavors.ExtractFlavors(page)
if err != nil {
return false, err
}
allFlavors = append(allFlavors, flavors...)
return true, nil
})
if err != nil {
return nil, err
}
for _, f := range allFlavors {
if f.Name == c.Flavor {
return &f, nil
}
}
return nil, errNotFound
}
func getSecurityGroup(client *gophercloud.ProviderClient, region, name string) (*ossecuritygroups.SecGroup, error) {
netClient, err := goopenstack.NewNetworkV2(client, gophercloud.EndpointOpts{Region: region})
if err != nil {
return nil, err
}
var allGroups []ossecuritygroups.SecGroup
pager := ossecuritygroups.List(netClient, ossecuritygroups.ListOpts{})
err = pager.EachPage(func(page pagination.Page) (bool, error) {
groups, err := ossecuritygroups.ExtractGroups(page)
if err != nil {
return false, err
}
allGroups = append(allGroups, groups...)
return true, nil
})
if err != nil {
return nil, err
}
for _, g := range allGroups {
if g.Name == name {
return &g, nil
}
}
return nil, errNotFound
}
func getNetworks(netClient *gophercloud.ServiceClient) ([]osnetworks.Network, error) {
var allNetworks []osnetworks.Network
pager := osnetworks.List(netClient, osnetworks.ListOpts{})
err := pager.EachPage(func(page pagination.Page) (bool, error) {
networks, err := osnetworks.ExtractNetworks(page)
if err != nil {
return false, err
}
allNetworks = append(allNetworks, networks...)
return true, nil
})
if err != nil {
return nil, err
}
return allNetworks, nil
}
func getNetwork(netClient *gophercloud.ServiceClient, nameOrID string) (*osnetworks.Network, error) {
allNetworks, err := getNetworks(netClient)
if err != nil {
return nil, err
}
for _, n := range allNetworks {
if n.Name == nameOrID || n.ID == nameOrID {
return &n, nil
}
}
return nil, errNotFound
}
func getSubnets(netClient *gophercloud.ServiceClient, networkID string) ([]ossubnets.Subnet, error) {
listOpts := ossubnets.ListOpts{}
if networkID != "" {
listOpts = ossubnets.ListOpts{NetworkID: networkID}
}
var allSubnets []ossubnets.Subnet
pager := ossubnets.List(netClient, listOpts)
err := pager.EachPage(func(page pagination.Page) (bool, error) {
subnets, err := ossubnets.ExtractSubnets(page)
if err != nil {
return false, err
}
allSubnets = append(allSubnets, subnets...)
return true, nil
})
if err != nil {
return nil, err
}
return allSubnets, nil
}
func getSubnet(netClient *gophercloud.ServiceClient, nameOrID string) (*ossubnets.Subnet, error) {
allSubnets, err := getSubnets(netClient, "")
if err != nil {
return nil, err
}
for _, s := range allSubnets {
if s.Name == nameOrID || s.ID == nameOrID {
return &s, nil
}
}
return nil, errNotFound
}
func ensureKubernetesSecurityGroupExist(log *zap.SugaredLogger, client *gophercloud.ProviderClient, region, name string) error {
// We need a mutex here because otherwise if more than one machine gets created at roughly the same time
// we will create two security groups and subsequently not be able anymore to identify our security group
// by name
securityGroupCreationLock.Lock()
defer securityGroupCreationLock.Unlock()
netClient, err := goopenstack.NewNetworkV2(client, gophercloud.EndpointOpts{Region: region})
if err != nil {
return osErrorToTerminalError(log, err, "failed to get network client")
}
_, err = getSecurityGroup(client, region, name)
if err != nil {
if errors.Is(err, errNotFound) {
sg, err := ossecuritygroups.Create(netClient, ossecuritygroups.CreateOpts{Name: name}).Extract()
if err != nil {
return osErrorToTerminalError(log, err, fmt.Sprintf("failed to create security group %s", name))
}
rules := []osecruritygrouprules.CreateOpts{
{
// Allows ipv4 traffic within this group
Direction: osecruritygrouprules.DirIngress,
EtherType: osecruritygrouprules.EtherType4,
SecGroupID: sg.ID,
RemoteGroupID: sg.ID,
},
{
// Allows ipv6 traffic within this group
Direction: osecruritygrouprules.DirIngress,
EtherType: osecruritygrouprules.EtherType6,
SecGroupID: sg.ID,
RemoteGroupID: sg.ID,
},
}
for _, opts := range rules {
if _, err := osecruritygrouprules.Create(netClient, opts).Extract(); err != nil {
return osErrorToTerminalError(log, err, "failed to create security group rule")
}
}
}
}
return nil
}
func getFreeFloatingIPs(netClient *gophercloud.ServiceClient, floatingIPPool *osnetworks.Network) ([]osfloatingips.FloatingIP, error) {
allPages, err := osfloatingips.List(netClient, osfloatingips.ListOpts{FloatingNetworkID: floatingIPPool.ID}).AllPages()
if err != nil |
allFIPs, err := osfloatingips.ExtractFloatingIPs(allPages)
if err != nil {
return nil, err
}
var freeFIPs []osfloatingips.FloatingIP
for _, f := range allFIPs {
// See some details about this test here:
// https://github.com/kubermatic/machine-controller/pull/28#discussion_r163773619
// The check of FixedIP has been added to avoid false positives on OTC,
// where FIPs associated to Classic LoadBalandcers never get assigned a
// PortID even when they are in use.
if f.Status != errorStatus && f.PortID == "" && f.FixedIP == "" {
freeFIPs = append(freeFIPs, f)
}
}
return freeFIPs, nil
}
func createFloatingIP(netClient *gophercloud.ServiceClient, portID string, floatingIPPool *osnetworks.Network) (*osfloatingips.FloatingIP, error) {
opts := osfloatingips.CreateOpts{
FloatingNetworkID: floatingIPPool.ID,
PortID: portID,
}
return osfloatingips.Create(netClient, opts).Extract()
}
func getInstancePort(netClient *gophercloud.ServiceClient, instanceID, networkID string) (*osports.Port, error) {
allPages, err := osports.List(netClient, osports.ListOpts{
DeviceID: instanceID,
NetworkID: networkID,
}).AllPages()
if err != nil {
return nil, err
}
allPorts, err := osports.ExtractPorts(allPages)
if err != nil {
return nil, err
}
for _, p := range allPorts {
if p.NetworkID == networkID && p.DeviceID == instanceID {
return &p, nil
}
}
return nil, errNotFound
}
func getDefaultNetwork(netClient *gophercloud.ServiceClient) (*osnetworks.Network, error) {
networks, err := getNetworks(netClient)
if err != nil {
return nil, err
}
if len(networks) == 1 {
return &networks[0], nil
}
// Networks without subnets can't be used, try finding a default by excluding them
// However the network object itself still contains the subnet, the only difference
// is that the subnet can not be retrieved by itself
var candidates []osnetworks.Network
NetworkLoop:
for _, network := range networks {
for _, subnet := range network.Subnets {
_, err := getSubnet(netClient, subnet)
if errors.Is(err, errNotFound) {
continue
} else if err != nil {
return nil, err
}
candidates = append(candidates, network)
continue NetworkLoop
}
}
if len(candidates) == 1 {
return &candidates[0], nil
}
return nil, fmt.Errorf("%d candidate networks found", len(candidates))
}
func getDefaultSubnet(netClient *gophercloud.ServiceClient, network *osnetworks.Network) (*string, error) {
if len(network.Subnets) == 1 {
return &network.Subnets[0], nil
}
subnets, err := getSubnets(netClient, network.ID)
if err != nil {
return nil, err
}
if len(subnets) == 0 {
return nil, errors.New("no subnets available")
}
return &subnets[0].ID, nil
}
| {
return nil, err
} | conditional_block |
helper.go | /*
Copyright 2019 The Machine Controller Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package openstack
import (
"errors"
"fmt"
"strconv"
"sync"
"time"
"github.com/gophercloud/gophercloud"
goopenstack "github.com/gophercloud/gophercloud/openstack"
osavailabilityzones "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones"
osflavors "github.com/gophercloud/gophercloud/openstack/compute/v2/flavors"
osregions "github.com/gophercloud/gophercloud/openstack/identity/v3/regions"
osimagesv2 "github.com/gophercloud/gophercloud/openstack/imageservice/v2/images"
osfloatingips "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips"
ossecuritygroups "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups"
osecruritygrouprules "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules"
osnetworks "github.com/gophercloud/gophercloud/openstack/networking/v2/networks"
osports "github.com/gophercloud/gophercloud/openstack/networking/v2/ports"
ossubnets "github.com/gophercloud/gophercloud/openstack/networking/v2/subnets"
"github.com/gophercloud/gophercloud/pagination"
"go.uber.org/zap"
)
var (
errNotFound = errors.New("not found")
securityGroupCreationLock = sync.Mutex{}
)
const (
errorStatus = "ERROR"
floatingReassignIPCheckPeriod = 3 * time.Second
)
func getRegion(client *gophercloud.ProviderClient, name string) (*osregions.Region, error) {
idClient, err := goopenstack.NewIdentityV3(client, gophercloud.EndpointOpts{})
if err != nil {
return nil, err
}
return osregions.Get(idClient, name).Extract()
}
func getRegions(client *gophercloud.ProviderClient) ([]osregions.Region, error) {
idClient, err := goopenstack.NewIdentityV3(client, gophercloud.EndpointOpts{})
if err != nil {
return nil, err
}
listOpts := osregions.ListOpts{
ParentRegionID: "",
}
allPages, err := osregions.List(idClient, listOpts).AllPages()
if err != nil {
return nil, err
}
regions, err := osregions.ExtractRegions(allPages)
if err != nil {
return nil, err
}
return regions, nil
}
func getNewComputeV2(client *gophercloud.ProviderClient, c *Config) (*gophercloud.ServiceClient, error) {
computeClient, err := goopenstack.NewComputeV2(client, gophercloud.EndpointOpts{Region: c.Region})
if err != nil {
return nil, err
}
if c.ComputeAPIVersion != "" {
// Validation - empty value default to microversion 2.0=2.1
version, err := strconv.ParseFloat(c.ComputeAPIVersion, 32)
if err != nil || version < 2.0 {
return nil, fmt.Errorf("invalid computeAPIVersion: %w", err)
}
// See https://github.com/gophercloud/gophercloud/blob/master/docs/MICROVERSIONS.md
computeClient.Microversion = c.ComputeAPIVersion
}
return computeClient, nil
}
func getAvailabilityZones(computeClient *gophercloud.ServiceClient) ([]osavailabilityzones.AvailabilityZone, error) {
allPages, err := osavailabilityzones.List(computeClient).AllPages()
if err != nil {
return nil, err
}
return osavailabilityzones.ExtractAvailabilityZones(allPages)
}
func getAvailabilityZone(computeClient *gophercloud.ServiceClient, c *Config) (*osavailabilityzones.AvailabilityZone, error) {
zones, err := getAvailabilityZones(computeClient)
if err != nil {
return nil, err
}
for _, z := range zones {
if z.ZoneName == c.AvailabilityZone {
return &z, nil
}
}
return nil, errNotFound
}
func getImageByName(imageClient *gophercloud.ServiceClient, c *Config) (*osimagesv2.Image, error) {
var allImages []osimagesv2.Image
pager := osimagesv2.List(imageClient, osimagesv2.ListOpts{Name: c.Image})
err := pager.EachPage(func(page pagination.Page) (bool, error) {
images, err := osimagesv2.ExtractImages(page)
if err != nil {
return false, err
}
allImages = append(allImages, images...)
return true, nil
})
if err != nil {
return nil, err
}
if len(allImages) == 0 {
return nil, errNotFound
}
return &allImages[0], nil
}
func getFlavor(computeClient *gophercloud.ServiceClient, c *Config) (*osflavors.Flavor, error) {
var allFlavors []osflavors.Flavor
pager := osflavors.ListDetail(computeClient, osflavors.ListOpts{})
err := pager.EachPage(func(page pagination.Page) (bool, error) {
flavors, err := osflavors.ExtractFlavors(page)
if err != nil {
return false, err
}
allFlavors = append(allFlavors, flavors...)
return true, nil
})
if err != nil {
return nil, err
}
for _, f := range allFlavors {
if f.Name == c.Flavor {
return &f, nil
}
}
return nil, errNotFound
}
func getSecurityGroup(client *gophercloud.ProviderClient, region, name string) (*ossecuritygroups.SecGroup, error) {
netClient, err := goopenstack.NewNetworkV2(client, gophercloud.EndpointOpts{Region: region})
if err != nil {
return nil, err
}
var allGroups []ossecuritygroups.SecGroup
pager := ossecuritygroups.List(netClient, ossecuritygroups.ListOpts{})
err = pager.EachPage(func(page pagination.Page) (bool, error) {
groups, err := ossecuritygroups.ExtractGroups(page)
if err != nil {
return false, err
}
allGroups = append(allGroups, groups...)
return true, nil
})
if err != nil {
return nil, err
}
for _, g := range allGroups {
if g.Name == name {
return &g, nil
}
}
return nil, errNotFound
}
func getNetworks(netClient *gophercloud.ServiceClient) ([]osnetworks.Network, error) {
var allNetworks []osnetworks.Network
pager := osnetworks.List(netClient, osnetworks.ListOpts{})
err := pager.EachPage(func(page pagination.Page) (bool, error) {
networks, err := osnetworks.ExtractNetworks(page)
if err != nil {
return false, err
}
allNetworks = append(allNetworks, networks...)
return true, nil
})
if err != nil {
return nil, err
}
return allNetworks, nil
}
func | (netClient *gophercloud.ServiceClient, nameOrID string) (*osnetworks.Network, error) {
allNetworks, err := getNetworks(netClient)
if err != nil {
return nil, err
}
for _, n := range allNetworks {
if n.Name == nameOrID || n.ID == nameOrID {
return &n, nil
}
}
return nil, errNotFound
}
func getSubnets(netClient *gophercloud.ServiceClient, networkID string) ([]ossubnets.Subnet, error) {
listOpts := ossubnets.ListOpts{}
if networkID != "" {
listOpts = ossubnets.ListOpts{NetworkID: networkID}
}
var allSubnets []ossubnets.Subnet
pager := ossubnets.List(netClient, listOpts)
err := pager.EachPage(func(page pagination.Page) (bool, error) {
subnets, err := ossubnets.ExtractSubnets(page)
if err != nil {
return false, err
}
allSubnets = append(allSubnets, subnets...)
return true, nil
})
if err != nil {
return nil, err
}
return allSubnets, nil
}
func getSubnet(netClient *gophercloud.ServiceClient, nameOrID string) (*ossubnets.Subnet, error) {
allSubnets, err := getSubnets(netClient, "")
if err != nil {
return nil, err
}
for _, s := range allSubnets {
if s.Name == nameOrID || s.ID == nameOrID {
return &s, nil
}
}
return nil, errNotFound
}
func ensureKubernetesSecurityGroupExist(log *zap.SugaredLogger, client *gophercloud.ProviderClient, region, name string) error {
// We need a mutex here because otherwise if more than one machine gets created at roughly the same time
// we will create two security groups and subsequently not be able anymore to identify our security group
// by name
securityGroupCreationLock.Lock()
defer securityGroupCreationLock.Unlock()
netClient, err := goopenstack.NewNetworkV2(client, gophercloud.EndpointOpts{Region: region})
if err != nil {
return osErrorToTerminalError(log, err, "failed to get network client")
}
_, err = getSecurityGroup(client, region, name)
if err != nil {
if errors.Is(err, errNotFound) {
sg, err := ossecuritygroups.Create(netClient, ossecuritygroups.CreateOpts{Name: name}).Extract()
if err != nil {
return osErrorToTerminalError(log, err, fmt.Sprintf("failed to create security group %s", name))
}
rules := []osecruritygrouprules.CreateOpts{
{
// Allows ipv4 traffic within this group
Direction: osecruritygrouprules.DirIngress,
EtherType: osecruritygrouprules.EtherType4,
SecGroupID: sg.ID,
RemoteGroupID: sg.ID,
},
{
// Allows ipv6 traffic within this group
Direction: osecruritygrouprules.DirIngress,
EtherType: osecruritygrouprules.EtherType6,
SecGroupID: sg.ID,
RemoteGroupID: sg.ID,
},
}
for _, opts := range rules {
if _, err := osecruritygrouprules.Create(netClient, opts).Extract(); err != nil {
return osErrorToTerminalError(log, err, "failed to create security group rule")
}
}
}
}
return nil
}
func getFreeFloatingIPs(netClient *gophercloud.ServiceClient, floatingIPPool *osnetworks.Network) ([]osfloatingips.FloatingIP, error) {
allPages, err := osfloatingips.List(netClient, osfloatingips.ListOpts{FloatingNetworkID: floatingIPPool.ID}).AllPages()
if err != nil {
return nil, err
}
allFIPs, err := osfloatingips.ExtractFloatingIPs(allPages)
if err != nil {
return nil, err
}
var freeFIPs []osfloatingips.FloatingIP
for _, f := range allFIPs {
// See some details about this test here:
// https://github.com/kubermatic/machine-controller/pull/28#discussion_r163773619
// The check of FixedIP has been added to avoid false positives on OTC,
// where FIPs associated to Classic LoadBalandcers never get assigned a
// PortID even when they are in use.
if f.Status != errorStatus && f.PortID == "" && f.FixedIP == "" {
freeFIPs = append(freeFIPs, f)
}
}
return freeFIPs, nil
}
func createFloatingIP(netClient *gophercloud.ServiceClient, portID string, floatingIPPool *osnetworks.Network) (*osfloatingips.FloatingIP, error) {
opts := osfloatingips.CreateOpts{
FloatingNetworkID: floatingIPPool.ID,
PortID: portID,
}
return osfloatingips.Create(netClient, opts).Extract()
}
func getInstancePort(netClient *gophercloud.ServiceClient, instanceID, networkID string) (*osports.Port, error) {
allPages, err := osports.List(netClient, osports.ListOpts{
DeviceID: instanceID,
NetworkID: networkID,
}).AllPages()
if err != nil {
return nil, err
}
allPorts, err := osports.ExtractPorts(allPages)
if err != nil {
return nil, err
}
for _, p := range allPorts {
if p.NetworkID == networkID && p.DeviceID == instanceID {
return &p, nil
}
}
return nil, errNotFound
}
func getDefaultNetwork(netClient *gophercloud.ServiceClient) (*osnetworks.Network, error) {
networks, err := getNetworks(netClient)
if err != nil {
return nil, err
}
if len(networks) == 1 {
return &networks[0], nil
}
// Networks without subnets can't be used, try finding a default by excluding them
// However the network object itself still contains the subnet, the only difference
// is that the subnet can not be retrieved by itself
var candidates []osnetworks.Network
NetworkLoop:
for _, network := range networks {
for _, subnet := range network.Subnets {
_, err := getSubnet(netClient, subnet)
if errors.Is(err, errNotFound) {
continue
} else if err != nil {
return nil, err
}
candidates = append(candidates, network)
continue NetworkLoop
}
}
if len(candidates) == 1 {
return &candidates[0], nil
}
return nil, fmt.Errorf("%d candidate networks found", len(candidates))
}
func getDefaultSubnet(netClient *gophercloud.ServiceClient, network *osnetworks.Network) (*string, error) {
if len(network.Subnets) == 1 {
return &network.Subnets[0], nil
}
subnets, err := getSubnets(netClient, network.ID)
if err != nil {
return nil, err
}
if len(subnets) == 0 {
return nil, errors.New("no subnets available")
}
return &subnets[0].ID, nil
}
| getNetwork | identifier_name |
helper.go | /*
Copyright 2019 The Machine Controller Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package openstack
import (
"errors"
"fmt"
"strconv"
"sync"
"time"
"github.com/gophercloud/gophercloud"
goopenstack "github.com/gophercloud/gophercloud/openstack"
osavailabilityzones "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones"
osflavors "github.com/gophercloud/gophercloud/openstack/compute/v2/flavors"
osregions "github.com/gophercloud/gophercloud/openstack/identity/v3/regions"
osimagesv2 "github.com/gophercloud/gophercloud/openstack/imageservice/v2/images"
osfloatingips "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips"
ossecuritygroups "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups"
osecruritygrouprules "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules"
osnetworks "github.com/gophercloud/gophercloud/openstack/networking/v2/networks"
osports "github.com/gophercloud/gophercloud/openstack/networking/v2/ports"
ossubnets "github.com/gophercloud/gophercloud/openstack/networking/v2/subnets"
"github.com/gophercloud/gophercloud/pagination"
"go.uber.org/zap"
)
var (
errNotFound = errors.New("not found")
securityGroupCreationLock = sync.Mutex{}
)
const (
errorStatus = "ERROR"
floatingReassignIPCheckPeriod = 3 * time.Second
)
func getRegion(client *gophercloud.ProviderClient, name string) (*osregions.Region, error) {
idClient, err := goopenstack.NewIdentityV3(client, gophercloud.EndpointOpts{})
if err != nil {
return nil, err
}
return osregions.Get(idClient, name).Extract()
}
func getRegions(client *gophercloud.ProviderClient) ([]osregions.Region, error) {
idClient, err := goopenstack.NewIdentityV3(client, gophercloud.EndpointOpts{})
if err != nil {
return nil, err
}
listOpts := osregions.ListOpts{
ParentRegionID: "",
}
allPages, err := osregions.List(idClient, listOpts).AllPages()
if err != nil {
return nil, err
}
regions, err := osregions.ExtractRegions(allPages)
if err != nil {
return nil, err
}
return regions, nil
}
func getNewComputeV2(client *gophercloud.ProviderClient, c *Config) (*gophercloud.ServiceClient, error) {
computeClient, err := goopenstack.NewComputeV2(client, gophercloud.EndpointOpts{Region: c.Region})
if err != nil {
return nil, err
}
if c.ComputeAPIVersion != "" {
// Validation - empty value default to microversion 2.0=2.1
version, err := strconv.ParseFloat(c.ComputeAPIVersion, 32)
if err != nil || version < 2.0 {
return nil, fmt.Errorf("invalid computeAPIVersion: %w", err)
}
// See https://github.com/gophercloud/gophercloud/blob/master/docs/MICROVERSIONS.md
computeClient.Microversion = c.ComputeAPIVersion
}
return computeClient, nil
}
func getAvailabilityZones(computeClient *gophercloud.ServiceClient) ([]osavailabilityzones.AvailabilityZone, error) |
func getAvailabilityZone(computeClient *gophercloud.ServiceClient, c *Config) (*osavailabilityzones.AvailabilityZone, error) {
zones, err := getAvailabilityZones(computeClient)
if err != nil {
return nil, err
}
for _, z := range zones {
if z.ZoneName == c.AvailabilityZone {
return &z, nil
}
}
return nil, errNotFound
}
func getImageByName(imageClient *gophercloud.ServiceClient, c *Config) (*osimagesv2.Image, error) {
var allImages []osimagesv2.Image
pager := osimagesv2.List(imageClient, osimagesv2.ListOpts{Name: c.Image})
err := pager.EachPage(func(page pagination.Page) (bool, error) {
images, err := osimagesv2.ExtractImages(page)
if err != nil {
return false, err
}
allImages = append(allImages, images...)
return true, nil
})
if err != nil {
return nil, err
}
if len(allImages) == 0 {
return nil, errNotFound
}
return &allImages[0], nil
}
func getFlavor(computeClient *gophercloud.ServiceClient, c *Config) (*osflavors.Flavor, error) {
var allFlavors []osflavors.Flavor
pager := osflavors.ListDetail(computeClient, osflavors.ListOpts{})
err := pager.EachPage(func(page pagination.Page) (bool, error) {
flavors, err := osflavors.ExtractFlavors(page)
if err != nil {
return false, err
}
allFlavors = append(allFlavors, flavors...)
return true, nil
})
if err != nil {
return nil, err
}
for _, f := range allFlavors {
if f.Name == c.Flavor {
return &f, nil
}
}
return nil, errNotFound
}
func getSecurityGroup(client *gophercloud.ProviderClient, region, name string) (*ossecuritygroups.SecGroup, error) {
netClient, err := goopenstack.NewNetworkV2(client, gophercloud.EndpointOpts{Region: region})
if err != nil {
return nil, err
}
var allGroups []ossecuritygroups.SecGroup
pager := ossecuritygroups.List(netClient, ossecuritygroups.ListOpts{})
err = pager.EachPage(func(page pagination.Page) (bool, error) {
groups, err := ossecuritygroups.ExtractGroups(page)
if err != nil {
return false, err
}
allGroups = append(allGroups, groups...)
return true, nil
})
if err != nil {
return nil, err
}
for _, g := range allGroups {
if g.Name == name {
return &g, nil
}
}
return nil, errNotFound
}
func getNetworks(netClient *gophercloud.ServiceClient) ([]osnetworks.Network, error) {
var allNetworks []osnetworks.Network
pager := osnetworks.List(netClient, osnetworks.ListOpts{})
err := pager.EachPage(func(page pagination.Page) (bool, error) {
networks, err := osnetworks.ExtractNetworks(page)
if err != nil {
return false, err
}
allNetworks = append(allNetworks, networks...)
return true, nil
})
if err != nil {
return nil, err
}
return allNetworks, nil
}
func getNetwork(netClient *gophercloud.ServiceClient, nameOrID string) (*osnetworks.Network, error) {
allNetworks, err := getNetworks(netClient)
if err != nil {
return nil, err
}
for _, n := range allNetworks {
if n.Name == nameOrID || n.ID == nameOrID {
return &n, nil
}
}
return nil, errNotFound
}
func getSubnets(netClient *gophercloud.ServiceClient, networkID string) ([]ossubnets.Subnet, error) {
listOpts := ossubnets.ListOpts{}
if networkID != "" {
listOpts = ossubnets.ListOpts{NetworkID: networkID}
}
var allSubnets []ossubnets.Subnet
pager := ossubnets.List(netClient, listOpts)
err := pager.EachPage(func(page pagination.Page) (bool, error) {
subnets, err := ossubnets.ExtractSubnets(page)
if err != nil {
return false, err
}
allSubnets = append(allSubnets, subnets...)
return true, nil
})
if err != nil {
return nil, err
}
return allSubnets, nil
}
func getSubnet(netClient *gophercloud.ServiceClient, nameOrID string) (*ossubnets.Subnet, error) {
allSubnets, err := getSubnets(netClient, "")
if err != nil {
return nil, err
}
for _, s := range allSubnets {
if s.Name == nameOrID || s.ID == nameOrID {
return &s, nil
}
}
return nil, errNotFound
}
func ensureKubernetesSecurityGroupExist(log *zap.SugaredLogger, client *gophercloud.ProviderClient, region, name string) error {
// We need a mutex here because otherwise if more than one machine gets created at roughly the same time
// we will create two security groups and subsequently not be able anymore to identify our security group
// by name
securityGroupCreationLock.Lock()
defer securityGroupCreationLock.Unlock()
netClient, err := goopenstack.NewNetworkV2(client, gophercloud.EndpointOpts{Region: region})
if err != nil {
return osErrorToTerminalError(log, err, "failed to get network client")
}
_, err = getSecurityGroup(client, region, name)
if err != nil {
if errors.Is(err, errNotFound) {
sg, err := ossecuritygroups.Create(netClient, ossecuritygroups.CreateOpts{Name: name}).Extract()
if err != nil {
return osErrorToTerminalError(log, err, fmt.Sprintf("failed to create security group %s", name))
}
rules := []osecruritygrouprules.CreateOpts{
{
// Allows ipv4 traffic within this group
Direction: osecruritygrouprules.DirIngress,
EtherType: osecruritygrouprules.EtherType4,
SecGroupID: sg.ID,
RemoteGroupID: sg.ID,
},
{
// Allows ipv6 traffic within this group
Direction: osecruritygrouprules.DirIngress,
EtherType: osecruritygrouprules.EtherType6,
SecGroupID: sg.ID,
RemoteGroupID: sg.ID,
},
}
for _, opts := range rules {
if _, err := osecruritygrouprules.Create(netClient, opts).Extract(); err != nil {
return osErrorToTerminalError(log, err, "failed to create security group rule")
}
}
}
}
return nil
}
func getFreeFloatingIPs(netClient *gophercloud.ServiceClient, floatingIPPool *osnetworks.Network) ([]osfloatingips.FloatingIP, error) {
allPages, err := osfloatingips.List(netClient, osfloatingips.ListOpts{FloatingNetworkID: floatingIPPool.ID}).AllPages()
if err != nil {
return nil, err
}
allFIPs, err := osfloatingips.ExtractFloatingIPs(allPages)
if err != nil {
return nil, err
}
var freeFIPs []osfloatingips.FloatingIP
for _, f := range allFIPs {
// See some details about this test here:
// https://github.com/kubermatic/machine-controller/pull/28#discussion_r163773619
// The check of FixedIP has been added to avoid false positives on OTC,
// where FIPs associated to Classic LoadBalandcers never get assigned a
// PortID even when they are in use.
if f.Status != errorStatus && f.PortID == "" && f.FixedIP == "" {
freeFIPs = append(freeFIPs, f)
}
}
return freeFIPs, nil
}
func createFloatingIP(netClient *gophercloud.ServiceClient, portID string, floatingIPPool *osnetworks.Network) (*osfloatingips.FloatingIP, error) {
opts := osfloatingips.CreateOpts{
FloatingNetworkID: floatingIPPool.ID,
PortID: portID,
}
return osfloatingips.Create(netClient, opts).Extract()
}
func getInstancePort(netClient *gophercloud.ServiceClient, instanceID, networkID string) (*osports.Port, error) {
allPages, err := osports.List(netClient, osports.ListOpts{
DeviceID: instanceID,
NetworkID: networkID,
}).AllPages()
if err != nil {
return nil, err
}
allPorts, err := osports.ExtractPorts(allPages)
if err != nil {
return nil, err
}
for _, p := range allPorts {
if p.NetworkID == networkID && p.DeviceID == instanceID {
return &p, nil
}
}
return nil, errNotFound
}
func getDefaultNetwork(netClient *gophercloud.ServiceClient) (*osnetworks.Network, error) {
networks, err := getNetworks(netClient)
if err != nil {
return nil, err
}
if len(networks) == 1 {
return &networks[0], nil
}
// Networks without subnets can't be used, try finding a default by excluding them
// However the network object itself still contains the subnet, the only difference
// is that the subnet can not be retrieved by itself
var candidates []osnetworks.Network
NetworkLoop:
for _, network := range networks {
for _, subnet := range network.Subnets {
_, err := getSubnet(netClient, subnet)
if errors.Is(err, errNotFound) {
continue
} else if err != nil {
return nil, err
}
candidates = append(candidates, network)
continue NetworkLoop
}
}
if len(candidates) == 1 {
return &candidates[0], nil
}
return nil, fmt.Errorf("%d candidate networks found", len(candidates))
}
func getDefaultSubnet(netClient *gophercloud.ServiceClient, network *osnetworks.Network) (*string, error) {
if len(network.Subnets) == 1 {
return &network.Subnets[0], nil
}
subnets, err := getSubnets(netClient, network.ID)
if err != nil {
return nil, err
}
if len(subnets) == 0 {
return nil, errors.New("no subnets available")
}
return &subnets[0].ID, nil
}
| {
allPages, err := osavailabilityzones.List(computeClient).AllPages()
if err != nil {
return nil, err
}
return osavailabilityzones.ExtractAvailabilityZones(allPages)
} | identifier_body |
get_cdips_lc_stats.py | """
* get simple rms vs mag stats for CDIPS LCs
* plot them.
* assess how many all-nan LCs there are.
* move allnan light curves to a graveyard directory to collect dust
* supplement the statsfile by matching against Gaia DR2 and CDIPS catalogs.
usage:
$ (cdips) python -u get_cdips_lc_stats.py |& tee logs/s6_stats_overview_log.txt
NOTE: depends on pipe-trex (--> run in environment with aperturephot on path)
"""
import sys
sys.path.append('/nfs/phtess1/ar1/TESS/PROJ/jhartman/202106_CDIPS/cdips-pipeline')
import pandas as pd, numpy as np
import aperturephot as ap
import os, subprocess, shlex, shutil
from glob import glob
from os.path import join
from cdips.utils import collect_cdips_lightcurves as ccl
def get_cdips_lc_stats(
sector=6,
cdipssource_vnum=None,
nworkers=32,
overwrite=0,
filesystem='phtess2'
):
if filesystem in ['phtess2', 'php1']:
fs = f"/nfs/{filesystem}"
projdir = f'{fs}/ar0/TESS/PROJ/lbouma/cdips'
lcdirectory = f'/nfs/phtess2/ar0/TESS/PROJ/lbouma/CDIPS_LCS/sector-{sector}/'
catdir = '/nfs/phtess1/ar1/TESS/PROJ/lbouma/'
elif filesystem in ['wh1', 'wh2']:
projdir = "/ar1/PROJ/luke/proj/cdips"
lcdirectory = f'/ar1/PROJ/luke/proj/CDIPS_LCS/sector-{sector}/'
catdir = '/ar1/local/cdips/catalogs/'
statsdir = join(projdir, 'results', 'cdips_lc_stats', f'sector-{sector}')
if not os.path.exists(statsdir): os.mkdir(statsdir)
statsfile = os.path.join(statsdir,'cdips_lc_statistics.txt')
if os.path.exists(statsfile) and not overwrite:
print("found statsfile and not overwrite. skip")
return
lcglob = 'cam?_ccd?/*_llc.fits'
# a cut on OC_MG_FINAL_GaiaRp_lt_16_v0.4.csv to be genfromtxt readable
catalogfile = join(catdir,
f'sourceid_and_photrpmeanmag_v{cdipssource_vnum}.csv' )
if not os.path.exists(catalogfile):
if cdipssource_vnum < 0.6:
cfile = join(catdir,
f'OC_MG_FINAL_GaiaRp_lt_16_v{cdipssource_vnum}.csv')
cdipsdf = pd.read_csv(cfile, sep=';')
else:
cfile = join(catdir,
f'cdips_targets_v{cdipssource_vnum}_gaiasources_Rplt16_orclose.csv')
cdipsdf = pd.read_csv(cfile, sep=',')
outdf = cdipsdf[['source_id','phot_rp_mean_mag']].dropna(axis=0, how='any')
outdf.to_csv(catalogfile, sep=' ', index=False, header=False)
ap.parallel_lc_statistics(lcdirectory, lcglob,
catalogfile, tfalcrequired=True,
epdlcrequired=False,
fitslcnottxt=True,
fovcatcols=(0,1), # objectid, magcol to use
fovcatmaglabel='GRp', outfile=statsfile,
nworkers=nworkers,
workerntasks=500, rmcols=None,
epcols=None, tfcols=None,
rfcols=None, correctioncoeffs=None,
sigclip=5.0, fovcathasgaiaids=True)
ap.plot_stats_file(statsfile, statsdir,
f'sector-{sector} cdips',
binned=False, logy=True, logx=False,
correctmagsafter=None, rangex=(5.9,16),
observatory='tess', fovcathasgaiaids=True,
yaxisval='RMS')
print('Finished get_cdips_lc_stats!')
def supplement_stats_file(
cdipssource_vnum=None,
sector=6,
filesystem=None):
"""
add crossmatching info per line:
* all gaia mags. also gaia extinction and parallax. (also parallax upper
and lower bounds).
* calculated T mag from TICv8 relations
* all the gaia info (especially teff, rstar, etc if available. but also
position ra,dec and x,y, for sky-map plots. rstar to then be used when
applying rstar>5rsun cut in vetting)
* all the CDIPS catalog info (especially the name of the damn cluster)
* all the TIC info (the CROWDING metric, the TICID, and the Tmag)
"""
if filesystem in ['phtess2', 'php1']:
fs = f"/nfs/{filesystem}"
projdir = f'{fs}/ar0/TESS/PROJ/lbouma/cdips'
lcdirectory = f'/nfs/phtess2/ar0/TESS/PROJ/lbouma/CDIPS_LCS/sector-{sector}/'
catdir = '/nfs/phtess1/ar1/TESS/PROJ/lbouma/'
elif filesystem in ['wh1', 'wh2']:
projdir = "/ar1/PROJ/luke/proj/cdips"
lcdirectory = f'/ar1/PROJ/luke/proj/CDIPS_LCS/sector-{sector}/'
catdir = '/ar1/local/cdips/catalogs/'
statsdir = join(projdir, 'results', 'cdips_lc_stats', f'sector-{sector}')
if not os.path.exists(statsdir): os.mkdir(statsdir)
statsfile = os.path.join(statsdir,'cdips_lc_statistics.txt')
outpath = statsfile.replace('cdips_lc_statistics',
'supplemented_cdips_lc_statistics')
outdir = os.path.dirname(outpath)
stats = ap.read_stats_file(statsfile, fovcathasgaiaids=True)
df = pd.DataFrame(stats)
del stats
lcobjcsv = os.path.join(outdir, 'sector{}_lcobj.csv'.format(sector))
lcobjtxt = os.path.join(outdir, 'sector{}_lcobj.txt'.format(sector))
df['lcobj'].to_csv(lcobjcsv, index=False, header=False)
# run the gaia2read on this list
if not os.path.exists(lcobjtxt):
gaia2readcmd = (
"gaia2read --header --extra --idfile {} > {}".format(
lcobjcsv, lcobjtxt
)
)
returncode = os.system(gaia2readcmd)
if returncode != 0:
raise AssertionError('gaia2read cmd failed!!')
else:
print('ran {}'.format(gaia2readcmd))
# merge statsfile against (most of) gaia dr2
gdf = pd.read_csv(lcobjtxt, delim_whitespace=True)
desiredcols = ['#Gaia-ID[1]', 'RA[deg][2]', 'Dec[deg][3]',
'RAError[mas][4]', 'DecError[mas][5]',
'Parallax[mas][6]', 'Parallax_error[mas][7]',
'PM_RA[mas/yr][8]', 'PM_Dec[mas/year][9]',
'PMRA_error[mas/yr][10]', 'PMDec_error[mas/yr][11]',
'Ref_Epoch[yr][12]', 'phot_g_mean_mag[20]',
'phot_bp_mean_mag[25]', 'phot_rp_mean_mag[30]',
'radial_velocity[32]', 'radial_velocity_error[33]',
'teff_val[35]', 'teff_percentile_lower[36]',
'teff_percentile_upper[37]', 'a_g_val[38]',
'a_g_percentile_lower[39]', 'a_g_percentile_upper[40]',
'e_bp_min_rp_val[41]',
'e_bp_min_rp_percentile_lower[42]',
'e_bp_min_rp_percentile_upper[43]', 'radius_val[44]',
'radius_percentile_lower[45]',
'radius_percentile_upper[46]', 'lum_val[47]',
'lum_percentile_lower[48]', 'lum_percentile_upper[49]']
cgdf = gdf[desiredcols]
df['lcobj'] = df['lcobj'].astype(np.int64)
mdf = df.merge(cgdf, how='left', left_on='lcobj', right_on='#Gaia-ID[1]')
if np.all(pd.isnull(mdf['RA[deg][2]'])):
errmsg = (
'ERR! probably merging against bad temp files!! check gaia2read '
'call, perhaps.'
)
raise AssertionError(errmsg)
del df, cgdf, gdf
# merge against CDIPS catalog info
cdips_df = ccl.get_cdips_pub_catalog(ver=cdipssource_vnum)
if cdipssource_vnum < 0.6:
dcols = (
'cluster;ext_catalog_name;reference;source_id;unique_cluster_name;logt;logt_provenance;comment'
)
dcols = dcols.split(';')
else:
dcols = (
'source_id,ra,dec,parallax,parallax_error,pmra,pmdec,phot_g_mean_mag,phot_rp_mean_mag,phot_bp_mean_mag,cluster,age,mean_age,reference_id,reference_bibcode'
)
dcols = dcols.split(',')
ccdf = cdips_df[dcols]
ccdf['source_id'] = ccdf['source_id'].astype(np.int64)
megadf = mdf.merge(ccdf, how='left', left_on='lcobj', right_on='source_id')
# finally save
megadf.to_csv(outpath, index=False, sep=';')
print('made {}'.format(outpath))
print('Finished supplement_stats_file!')
def print_metadata_stats(sector=6, filesystem=None):
"""
how many LCs?
how many all nan LCs?
"""
assert isinstance(filesystem, str)
if filesystem in ['phtess2', 'php1']:
fs = f"/nfs/{filesystem}"
projdir = f'{fs}/ar0/TESS/PROJ/lbouma/cdips'
lcdirectory = f'/nfs/phtess2/ar0/TESS/PROJ/lbouma/CDIPS_LCS/sector-{sector}/'
catdir = '/nfs/phtess1/ar1/TESS/PROJ/lbouma/'
elif filesystem in ['wh1', 'wh2']:
|
statsdir = join(projdir, 'results', 'cdips_lc_stats', f'sector-{sector}')
if not os.path.exists(statsdir): os.mkdir(statsdir)
statsfile = os.path.join(statsdir,'cdips_lc_statistics.txt')
stats = ap.read_stats_file(statsfile, fovcathasgaiaids=True)
N_lcs = len(stats)
print('CDIPS LIGHTCURVES STATS FOR SECTOR {}'.format(sector))
print(42*'-')
print('total N_lcs: {}'.format(N_lcs))
for apn in [1,2,3]:
N_nan = len(stats[stats['ndet_tf{}'.format(apn)]==0])
print('for ap {}, {} ({:.1f}%) are all nan, leaving {} ok lcs'.
format(apn, N_nan, N_nan/N_lcs*100, N_lcs-N_nan))
print('\nsanity check: {} TF1 LCs have stdev > 0'.
format(len(stats[stats['stdev_tf1'] > 0])))
print('Finished print_metadata_stats!')
def move_allnan_lcs(sector=None, cdipsvnum=None, filesystem=None):
assert isinstance(filesystem, str)
if filesystem in ['phtess2', 'php1']:
fs = f"/nfs/{filesystem}"
projdir = f'{fs}/ar0/TESS/PROJ/lbouma/cdips'
lcdirectory = f'/nfs/phtess2/ar0/TESS/PROJ/lbouma/CDIPS_LCS/sector-{sector}/'
catdir = '/nfs/phtess1/ar1/TESS/PROJ/lbouma/'
elif filesystem in ['wh1', 'wh2']:
projdir = "/ar1/PROJ/luke/proj/cdips"
lcdirectory = f'/ar1/PROJ/luke/proj/CDIPS_LCS/sector-{sector}/'
catdir = '/ar1/local/cdips/catalogs/'
statsdir = join(projdir, 'results', 'cdips_lc_stats', f'sector-{sector}')
if not os.path.exists(statsdir): os.mkdir(statsdir)
statsfile = os.path.join(statsdir,'cdips_lc_statistics.txt')
stats = ap.read_stats_file(statsfile, fovcathasgaiaids=True)
N_lcs = len(stats)
print('CDIPS LIGHTCURVES STATS FOR SECTOR {}'.format(sector))
print(42*'-')
print('total N_lcs: {}'.format(N_lcs))
for apn in [1,2,3]:
N_nan = len(stats[stats['ndet_rm{}'.format(apn)]==0])
print('for ap {}, {} ({:.1f}%) are all nan, leaving {} ok lcs'.
format(apn, N_nan, N_nan/N_lcs*100, N_lcs-N_nan))
print(42*'-')
print('BEGINNING MOVE OF ALLNAN LIGHT CURVES')
sel = (
(stats['ndet_rm1']==0) &
(stats['ndet_rm2']==0) &
(stats['ndet_rm3']==0)
)
nanobjs = stats[sel]['lcobj']
lcdirectory = join(lcdirectory, "cam?_ccd?")
lcnames = [(
'hlsp_cdips_tess_ffi_'
'gaiatwo{zsourceid}-{zsector}-cam{cam}-ccd{ccd}_'
'tess_v{zcdipsvnum}_llc.fits'
).format(
cam='?',
ccd='?',
zsourceid=str(lcgaiaid).zfill(22),
zsector=str(sector).zfill(4),
zcdipsvnum=str(cdipsvnum).zfill(2)
)
for lcgaiaid in nanobjs
]
lcglobs = [os.path.join(lcdirectory, lcname) for lcname in lcnames]
lcpaths = []
for l in lcglobs:
try:
lcpaths.append(glob(l)[0])
except:
pass
dstpaths = [os.path.join(os.path.dirname(l),
'allnanlcs',
os.path.basename(l))
for l in lcpaths]
for src,dst in zip(lcpaths,dstpaths):
dstdir = os.path.dirname(dst)
if not os.path.exists(dstdir):
os.mkdir(dstdir)
try:
shutil.move(src,dst)
print('moved {} -> {}'.format(src,dst))
except FileNotFoundError as e:
if os.path.exists(dst):
pass
else:
print(repr(e))
raise FileNotFoundError
def main(sector, cdipssource_vnum, cdipsvnum, overwrite, get_stats=1,
make_supp_stats=0, print_metadata=1, move_allnan=1, filesystem=None):
assert isinstance(filesystem, str)
if get_stats:
get_cdips_lc_stats(
sector=sector,
cdipssource_vnum=cdipssource_vnum,
nworkers=40,
overwrite=overwrite,
filesystem=filesystem
)
if make_supp_stats:
supplement_stats_file(
cdipssource_vnum=cdipssource_vnum,
sector=sector,
filesystem=filesystem
)
if print_metadata:
print_metadata_stats(
sector=sector,
filesystem=filesystem
)
if move_allnan:
move_allnan_lcs(
sector=sector, cdipsvnum=cdipsvnum, filesystem=filesystem
)
if __name__ == "__main__":
sector=40
cdipssource_vnum=0.6
cdipsvnum=1
overwrite=0
get_stats=0
make_supp_stats=1
print_metadata=0
move_allnan=1
filesystem='wh1'
main(sector, cdipssource_vnum, cdipsvnum, overwrite, get_stats=get_stats,
make_supp_stats=make_supp_stats, print_metadata=print_metadata,
move_allnan=move_allnan, filesystem=filesystem)
| projdir = "/ar1/PROJ/luke/proj/cdips"
lcdirectory = f'/ar1/PROJ/luke/proj/CDIPS_LCS/sector-{sector}/'
catdir = '/ar1/local/cdips/catalogs/' | conditional_block |
get_cdips_lc_stats.py | """
* get simple rms vs mag stats for CDIPS LCs
* plot them.
* assess how many all-nan LCs there are.
* move allnan light curves to a graveyard directory to collect dust
* supplement the statsfile by matching against Gaia DR2 and CDIPS catalogs.
usage:
$ (cdips) python -u get_cdips_lc_stats.py |& tee logs/s6_stats_overview_log.txt
NOTE: depends on pipe-trex (--> run in environment with aperturephot on path)
"""
import sys
sys.path.append('/nfs/phtess1/ar1/TESS/PROJ/jhartman/202106_CDIPS/cdips-pipeline')
import pandas as pd, numpy as np
import aperturephot as ap
import os, subprocess, shlex, shutil
from glob import glob
from os.path import join
from cdips.utils import collect_cdips_lightcurves as ccl
def get_cdips_lc_stats(
sector=6,
cdipssource_vnum=None,
nworkers=32,
overwrite=0,
filesystem='phtess2'
):
if filesystem in ['phtess2', 'php1']:
fs = f"/nfs/{filesystem}"
projdir = f'{fs}/ar0/TESS/PROJ/lbouma/cdips'
lcdirectory = f'/nfs/phtess2/ar0/TESS/PROJ/lbouma/CDIPS_LCS/sector-{sector}/'
catdir = '/nfs/phtess1/ar1/TESS/PROJ/lbouma/'
elif filesystem in ['wh1', 'wh2']:
projdir = "/ar1/PROJ/luke/proj/cdips"
lcdirectory = f'/ar1/PROJ/luke/proj/CDIPS_LCS/sector-{sector}/'
catdir = '/ar1/local/cdips/catalogs/'
statsdir = join(projdir, 'results', 'cdips_lc_stats', f'sector-{sector}')
if not os.path.exists(statsdir): os.mkdir(statsdir)
statsfile = os.path.join(statsdir,'cdips_lc_statistics.txt')
if os.path.exists(statsfile) and not overwrite:
print("found statsfile and not overwrite. skip")
return
lcglob = 'cam?_ccd?/*_llc.fits'
# a cut on OC_MG_FINAL_GaiaRp_lt_16_v0.4.csv to be genfromtxt readable
catalogfile = join(catdir,
f'sourceid_and_photrpmeanmag_v{cdipssource_vnum}.csv' )
if not os.path.exists(catalogfile):
if cdipssource_vnum < 0.6:
cfile = join(catdir,
f'OC_MG_FINAL_GaiaRp_lt_16_v{cdipssource_vnum}.csv')
cdipsdf = pd.read_csv(cfile, sep=';')
else:
cfile = join(catdir,
f'cdips_targets_v{cdipssource_vnum}_gaiasources_Rplt16_orclose.csv')
cdipsdf = pd.read_csv(cfile, sep=',')
outdf = cdipsdf[['source_id','phot_rp_mean_mag']].dropna(axis=0, how='any')
outdf.to_csv(catalogfile, sep=' ', index=False, header=False)
ap.parallel_lc_statistics(lcdirectory, lcglob,
catalogfile, tfalcrequired=True,
epdlcrequired=False,
fitslcnottxt=True,
fovcatcols=(0,1), # objectid, magcol to use
fovcatmaglabel='GRp', outfile=statsfile,
nworkers=nworkers,
workerntasks=500, rmcols=None,
epcols=None, tfcols=None,
rfcols=None, correctioncoeffs=None,
sigclip=5.0, fovcathasgaiaids=True)
ap.plot_stats_file(statsfile, statsdir,
f'sector-{sector} cdips',
binned=False, logy=True, logx=False,
correctmagsafter=None, rangex=(5.9,16),
observatory='tess', fovcathasgaiaids=True,
yaxisval='RMS')
print('Finished get_cdips_lc_stats!')
def supplement_stats_file(
cdipssource_vnum=None,
sector=6,
filesystem=None):
|
def print_metadata_stats(sector=6, filesystem=None):
"""
how many LCs?
how many all nan LCs?
"""
assert isinstance(filesystem, str)
if filesystem in ['phtess2', 'php1']:
fs = f"/nfs/{filesystem}"
projdir = f'{fs}/ar0/TESS/PROJ/lbouma/cdips'
lcdirectory = f'/nfs/phtess2/ar0/TESS/PROJ/lbouma/CDIPS_LCS/sector-{sector}/'
catdir = '/nfs/phtess1/ar1/TESS/PROJ/lbouma/'
elif filesystem in ['wh1', 'wh2']:
projdir = "/ar1/PROJ/luke/proj/cdips"
lcdirectory = f'/ar1/PROJ/luke/proj/CDIPS_LCS/sector-{sector}/'
catdir = '/ar1/local/cdips/catalogs/'
statsdir = join(projdir, 'results', 'cdips_lc_stats', f'sector-{sector}')
if not os.path.exists(statsdir): os.mkdir(statsdir)
statsfile = os.path.join(statsdir,'cdips_lc_statistics.txt')
stats = ap.read_stats_file(statsfile, fovcathasgaiaids=True)
N_lcs = len(stats)
print('CDIPS LIGHTCURVES STATS FOR SECTOR {}'.format(sector))
print(42*'-')
print('total N_lcs: {}'.format(N_lcs))
for apn in [1,2,3]:
N_nan = len(stats[stats['ndet_tf{}'.format(apn)]==0])
print('for ap {}, {} ({:.1f}%) are all nan, leaving {} ok lcs'.
format(apn, N_nan, N_nan/N_lcs*100, N_lcs-N_nan))
print('\nsanity check: {} TF1 LCs have stdev > 0'.
format(len(stats[stats['stdev_tf1'] > 0])))
print('Finished print_metadata_stats!')
def move_allnan_lcs(sector=None, cdipsvnum=None, filesystem=None):
assert isinstance(filesystem, str)
if filesystem in ['phtess2', 'php1']:
fs = f"/nfs/{filesystem}"
projdir = f'{fs}/ar0/TESS/PROJ/lbouma/cdips'
lcdirectory = f'/nfs/phtess2/ar0/TESS/PROJ/lbouma/CDIPS_LCS/sector-{sector}/'
catdir = '/nfs/phtess1/ar1/TESS/PROJ/lbouma/'
elif filesystem in ['wh1', 'wh2']:
projdir = "/ar1/PROJ/luke/proj/cdips"
lcdirectory = f'/ar1/PROJ/luke/proj/CDIPS_LCS/sector-{sector}/'
catdir = '/ar1/local/cdips/catalogs/'
statsdir = join(projdir, 'results', 'cdips_lc_stats', f'sector-{sector}')
if not os.path.exists(statsdir): os.mkdir(statsdir)
statsfile = os.path.join(statsdir,'cdips_lc_statistics.txt')
stats = ap.read_stats_file(statsfile, fovcathasgaiaids=True)
N_lcs = len(stats)
print('CDIPS LIGHTCURVES STATS FOR SECTOR {}'.format(sector))
print(42*'-')
print('total N_lcs: {}'.format(N_lcs))
for apn in [1,2,3]:
N_nan = len(stats[stats['ndet_rm{}'.format(apn)]==0])
print('for ap {}, {} ({:.1f}%) are all nan, leaving {} ok lcs'.
format(apn, N_nan, N_nan/N_lcs*100, N_lcs-N_nan))
print(42*'-')
print('BEGINNING MOVE OF ALLNAN LIGHT CURVES')
sel = (
(stats['ndet_rm1']==0) &
(stats['ndet_rm2']==0) &
(stats['ndet_rm3']==0)
)
nanobjs = stats[sel]['lcobj']
lcdirectory = join(lcdirectory, "cam?_ccd?")
lcnames = [(
'hlsp_cdips_tess_ffi_'
'gaiatwo{zsourceid}-{zsector}-cam{cam}-ccd{ccd}_'
'tess_v{zcdipsvnum}_llc.fits'
).format(
cam='?',
ccd='?',
zsourceid=str(lcgaiaid).zfill(22),
zsector=str(sector).zfill(4),
zcdipsvnum=str(cdipsvnum).zfill(2)
)
for lcgaiaid in nanobjs
]
lcglobs = [os.path.join(lcdirectory, lcname) for lcname in lcnames]
lcpaths = []
for l in lcglobs:
try:
lcpaths.append(glob(l)[0])
except:
pass
dstpaths = [os.path.join(os.path.dirname(l),
'allnanlcs',
os.path.basename(l))
for l in lcpaths]
for src,dst in zip(lcpaths,dstpaths):
dstdir = os.path.dirname(dst)
if not os.path.exists(dstdir):
os.mkdir(dstdir)
try:
shutil.move(src,dst)
print('moved {} -> {}'.format(src,dst))
except FileNotFoundError as e:
if os.path.exists(dst):
pass
else:
print(repr(e))
raise FileNotFoundError
def main(sector, cdipssource_vnum, cdipsvnum, overwrite, get_stats=1,
make_supp_stats=0, print_metadata=1, move_allnan=1, filesystem=None):
assert isinstance(filesystem, str)
if get_stats:
get_cdips_lc_stats(
sector=sector,
cdipssource_vnum=cdipssource_vnum,
nworkers=40,
overwrite=overwrite,
filesystem=filesystem
)
if make_supp_stats:
supplement_stats_file(
cdipssource_vnum=cdipssource_vnum,
sector=sector,
filesystem=filesystem
)
if print_metadata:
print_metadata_stats(
sector=sector,
filesystem=filesystem
)
if move_allnan:
move_allnan_lcs(
sector=sector, cdipsvnum=cdipsvnum, filesystem=filesystem
)
if __name__ == "__main__":
sector=40
cdipssource_vnum=0.6
cdipsvnum=1
overwrite=0
get_stats=0
make_supp_stats=1
print_metadata=0
move_allnan=1
filesystem='wh1'
main(sector, cdipssource_vnum, cdipsvnum, overwrite, get_stats=get_stats,
make_supp_stats=make_supp_stats, print_metadata=print_metadata,
move_allnan=move_allnan, filesystem=filesystem)
| """
add crossmatching info per line:
* all gaia mags. also gaia extinction and parallax. (also parallax upper
and lower bounds).
* calculated T mag from TICv8 relations
* all the gaia info (especially teff, rstar, etc if available. but also
position ra,dec and x,y, for sky-map plots. rstar to then be used when
applying rstar>5rsun cut in vetting)
* all the CDIPS catalog info (especially the name of the damn cluster)
* all the TIC info (the CROWDING metric, the TICID, and the Tmag)
"""
if filesystem in ['phtess2', 'php1']:
fs = f"/nfs/{filesystem}"
projdir = f'{fs}/ar0/TESS/PROJ/lbouma/cdips'
lcdirectory = f'/nfs/phtess2/ar0/TESS/PROJ/lbouma/CDIPS_LCS/sector-{sector}/'
catdir = '/nfs/phtess1/ar1/TESS/PROJ/lbouma/'
elif filesystem in ['wh1', 'wh2']:
projdir = "/ar1/PROJ/luke/proj/cdips"
lcdirectory = f'/ar1/PROJ/luke/proj/CDIPS_LCS/sector-{sector}/'
catdir = '/ar1/local/cdips/catalogs/'
statsdir = join(projdir, 'results', 'cdips_lc_stats', f'sector-{sector}')
if not os.path.exists(statsdir): os.mkdir(statsdir)
statsfile = os.path.join(statsdir,'cdips_lc_statistics.txt')
outpath = statsfile.replace('cdips_lc_statistics',
'supplemented_cdips_lc_statistics')
outdir = os.path.dirname(outpath)
stats = ap.read_stats_file(statsfile, fovcathasgaiaids=True)
df = pd.DataFrame(stats)
del stats
lcobjcsv = os.path.join(outdir, 'sector{}_lcobj.csv'.format(sector))
lcobjtxt = os.path.join(outdir, 'sector{}_lcobj.txt'.format(sector))
df['lcobj'].to_csv(lcobjcsv, index=False, header=False)
# run the gaia2read on this list
if not os.path.exists(lcobjtxt):
gaia2readcmd = (
"gaia2read --header --extra --idfile {} > {}".format(
lcobjcsv, lcobjtxt
)
)
returncode = os.system(gaia2readcmd)
if returncode != 0:
raise AssertionError('gaia2read cmd failed!!')
else:
print('ran {}'.format(gaia2readcmd))
# merge statsfile against (most of) gaia dr2
gdf = pd.read_csv(lcobjtxt, delim_whitespace=True)
desiredcols = ['#Gaia-ID[1]', 'RA[deg][2]', 'Dec[deg][3]',
'RAError[mas][4]', 'DecError[mas][5]',
'Parallax[mas][6]', 'Parallax_error[mas][7]',
'PM_RA[mas/yr][8]', 'PM_Dec[mas/year][9]',
'PMRA_error[mas/yr][10]', 'PMDec_error[mas/yr][11]',
'Ref_Epoch[yr][12]', 'phot_g_mean_mag[20]',
'phot_bp_mean_mag[25]', 'phot_rp_mean_mag[30]',
'radial_velocity[32]', 'radial_velocity_error[33]',
'teff_val[35]', 'teff_percentile_lower[36]',
'teff_percentile_upper[37]', 'a_g_val[38]',
'a_g_percentile_lower[39]', 'a_g_percentile_upper[40]',
'e_bp_min_rp_val[41]',
'e_bp_min_rp_percentile_lower[42]',
'e_bp_min_rp_percentile_upper[43]', 'radius_val[44]',
'radius_percentile_lower[45]',
'radius_percentile_upper[46]', 'lum_val[47]',
'lum_percentile_lower[48]', 'lum_percentile_upper[49]']
cgdf = gdf[desiredcols]
df['lcobj'] = df['lcobj'].astype(np.int64)
mdf = df.merge(cgdf, how='left', left_on='lcobj', right_on='#Gaia-ID[1]')
if np.all(pd.isnull(mdf['RA[deg][2]'])):
errmsg = (
'ERR! probably merging against bad temp files!! check gaia2read '
'call, perhaps.'
)
raise AssertionError(errmsg)
del df, cgdf, gdf
# merge against CDIPS catalog info
cdips_df = ccl.get_cdips_pub_catalog(ver=cdipssource_vnum)
if cdipssource_vnum < 0.6:
dcols = (
'cluster;ext_catalog_name;reference;source_id;unique_cluster_name;logt;logt_provenance;comment'
)
dcols = dcols.split(';')
else:
dcols = (
'source_id,ra,dec,parallax,parallax_error,pmra,pmdec,phot_g_mean_mag,phot_rp_mean_mag,phot_bp_mean_mag,cluster,age,mean_age,reference_id,reference_bibcode'
)
dcols = dcols.split(',')
ccdf = cdips_df[dcols]
ccdf['source_id'] = ccdf['source_id'].astype(np.int64)
megadf = mdf.merge(ccdf, how='left', left_on='lcobj', right_on='source_id')
# finally save
megadf.to_csv(outpath, index=False, sep=';')
print('made {}'.format(outpath))
print('Finished supplement_stats_file!') | identifier_body |
get_cdips_lc_stats.py | """
* get simple rms vs mag stats for CDIPS LCs
* plot them.
* assess how many all-nan LCs there are.
* move allnan light curves to a graveyard directory to collect dust
* supplement the statsfile by matching against Gaia DR2 and CDIPS catalogs.
usage:
$ (cdips) python -u get_cdips_lc_stats.py |& tee logs/s6_stats_overview_log.txt
NOTE: depends on pipe-trex (--> run in environment with aperturephot on path)
"""
import sys
sys.path.append('/nfs/phtess1/ar1/TESS/PROJ/jhartman/202106_CDIPS/cdips-pipeline')
import pandas as pd, numpy as np
import aperturephot as ap
import os, subprocess, shlex, shutil
from glob import glob
from os.path import join
from cdips.utils import collect_cdips_lightcurves as ccl
def get_cdips_lc_stats(
sector=6,
cdipssource_vnum=None,
nworkers=32,
overwrite=0,
filesystem='phtess2'
):
if filesystem in ['phtess2', 'php1']:
fs = f"/nfs/{filesystem}"
projdir = f'{fs}/ar0/TESS/PROJ/lbouma/cdips'
lcdirectory = f'/nfs/phtess2/ar0/TESS/PROJ/lbouma/CDIPS_LCS/sector-{sector}/'
catdir = '/nfs/phtess1/ar1/TESS/PROJ/lbouma/'
elif filesystem in ['wh1', 'wh2']:
projdir = "/ar1/PROJ/luke/proj/cdips"
lcdirectory = f'/ar1/PROJ/luke/proj/CDIPS_LCS/sector-{sector}/'
catdir = '/ar1/local/cdips/catalogs/'
statsdir = join(projdir, 'results', 'cdips_lc_stats', f'sector-{sector}')
if not os.path.exists(statsdir): os.mkdir(statsdir)
statsfile = os.path.join(statsdir,'cdips_lc_statistics.txt')
if os.path.exists(statsfile) and not overwrite:
print("found statsfile and not overwrite. skip")
return
lcglob = 'cam?_ccd?/*_llc.fits'
# a cut on OC_MG_FINAL_GaiaRp_lt_16_v0.4.csv to be genfromtxt readable
catalogfile = join(catdir,
f'sourceid_and_photrpmeanmag_v{cdipssource_vnum}.csv' )
if not os.path.exists(catalogfile):
if cdipssource_vnum < 0.6:
cfile = join(catdir,
f'OC_MG_FINAL_GaiaRp_lt_16_v{cdipssource_vnum}.csv')
cdipsdf = pd.read_csv(cfile, sep=';')
else:
cfile = join(catdir,
f'cdips_targets_v{cdipssource_vnum}_gaiasources_Rplt16_orclose.csv')
cdipsdf = pd.read_csv(cfile, sep=',')
outdf = cdipsdf[['source_id','phot_rp_mean_mag']].dropna(axis=0, how='any')
outdf.to_csv(catalogfile, sep=' ', index=False, header=False)
ap.parallel_lc_statistics(lcdirectory, lcglob,
catalogfile, tfalcrequired=True,
epdlcrequired=False,
fitslcnottxt=True,
fovcatcols=(0,1), # objectid, magcol to use
fovcatmaglabel='GRp', outfile=statsfile,
nworkers=nworkers,
workerntasks=500, rmcols=None,
epcols=None, tfcols=None,
rfcols=None, correctioncoeffs=None,
sigclip=5.0, fovcathasgaiaids=True)
ap.plot_stats_file(statsfile, statsdir,
f'sector-{sector} cdips',
binned=False, logy=True, logx=False,
correctmagsafter=None, rangex=(5.9,16),
observatory='tess', fovcathasgaiaids=True,
yaxisval='RMS')
print('Finished get_cdips_lc_stats!')
def | (
cdipssource_vnum=None,
sector=6,
filesystem=None):
"""
add crossmatching info per line:
* all gaia mags. also gaia extinction and parallax. (also parallax upper
and lower bounds).
* calculated T mag from TICv8 relations
* all the gaia info (especially teff, rstar, etc if available. but also
position ra,dec and x,y, for sky-map plots. rstar to then be used when
applying rstar>5rsun cut in vetting)
* all the CDIPS catalog info (especially the name of the damn cluster)
* all the TIC info (the CROWDING metric, the TICID, and the Tmag)
"""
if filesystem in ['phtess2', 'php1']:
fs = f"/nfs/{filesystem}"
projdir = f'{fs}/ar0/TESS/PROJ/lbouma/cdips'
lcdirectory = f'/nfs/phtess2/ar0/TESS/PROJ/lbouma/CDIPS_LCS/sector-{sector}/'
catdir = '/nfs/phtess1/ar1/TESS/PROJ/lbouma/'
elif filesystem in ['wh1', 'wh2']:
projdir = "/ar1/PROJ/luke/proj/cdips"
lcdirectory = f'/ar1/PROJ/luke/proj/CDIPS_LCS/sector-{sector}/'
catdir = '/ar1/local/cdips/catalogs/'
statsdir = join(projdir, 'results', 'cdips_lc_stats', f'sector-{sector}')
if not os.path.exists(statsdir): os.mkdir(statsdir)
statsfile = os.path.join(statsdir,'cdips_lc_statistics.txt')
outpath = statsfile.replace('cdips_lc_statistics',
'supplemented_cdips_lc_statistics')
outdir = os.path.dirname(outpath)
stats = ap.read_stats_file(statsfile, fovcathasgaiaids=True)
df = pd.DataFrame(stats)
del stats
lcobjcsv = os.path.join(outdir, 'sector{}_lcobj.csv'.format(sector))
lcobjtxt = os.path.join(outdir, 'sector{}_lcobj.txt'.format(sector))
df['lcobj'].to_csv(lcobjcsv, index=False, header=False)
# run the gaia2read on this list
if not os.path.exists(lcobjtxt):
gaia2readcmd = (
"gaia2read --header --extra --idfile {} > {}".format(
lcobjcsv, lcobjtxt
)
)
returncode = os.system(gaia2readcmd)
if returncode != 0:
raise AssertionError('gaia2read cmd failed!!')
else:
print('ran {}'.format(gaia2readcmd))
# merge statsfile against (most of) gaia dr2
gdf = pd.read_csv(lcobjtxt, delim_whitespace=True)
desiredcols = ['#Gaia-ID[1]', 'RA[deg][2]', 'Dec[deg][3]',
'RAError[mas][4]', 'DecError[mas][5]',
'Parallax[mas][6]', 'Parallax_error[mas][7]',
'PM_RA[mas/yr][8]', 'PM_Dec[mas/year][9]',
'PMRA_error[mas/yr][10]', 'PMDec_error[mas/yr][11]',
'Ref_Epoch[yr][12]', 'phot_g_mean_mag[20]',
'phot_bp_mean_mag[25]', 'phot_rp_mean_mag[30]',
'radial_velocity[32]', 'radial_velocity_error[33]',
'teff_val[35]', 'teff_percentile_lower[36]',
'teff_percentile_upper[37]', 'a_g_val[38]',
'a_g_percentile_lower[39]', 'a_g_percentile_upper[40]',
'e_bp_min_rp_val[41]',
'e_bp_min_rp_percentile_lower[42]',
'e_bp_min_rp_percentile_upper[43]', 'radius_val[44]',
'radius_percentile_lower[45]',
'radius_percentile_upper[46]', 'lum_val[47]',
'lum_percentile_lower[48]', 'lum_percentile_upper[49]']
cgdf = gdf[desiredcols]
df['lcobj'] = df['lcobj'].astype(np.int64)
mdf = df.merge(cgdf, how='left', left_on='lcobj', right_on='#Gaia-ID[1]')
if np.all(pd.isnull(mdf['RA[deg][2]'])):
errmsg = (
'ERR! probably merging against bad temp files!! check gaia2read '
'call, perhaps.'
)
raise AssertionError(errmsg)
del df, cgdf, gdf
# merge against CDIPS catalog info
cdips_df = ccl.get_cdips_pub_catalog(ver=cdipssource_vnum)
if cdipssource_vnum < 0.6:
dcols = (
'cluster;ext_catalog_name;reference;source_id;unique_cluster_name;logt;logt_provenance;comment'
)
dcols = dcols.split(';')
else:
dcols = (
'source_id,ra,dec,parallax,parallax_error,pmra,pmdec,phot_g_mean_mag,phot_rp_mean_mag,phot_bp_mean_mag,cluster,age,mean_age,reference_id,reference_bibcode'
)
dcols = dcols.split(',')
ccdf = cdips_df[dcols]
ccdf['source_id'] = ccdf['source_id'].astype(np.int64)
megadf = mdf.merge(ccdf, how='left', left_on='lcobj', right_on='source_id')
# finally save
megadf.to_csv(outpath, index=False, sep=';')
print('made {}'.format(outpath))
print('Finished supplement_stats_file!')
def print_metadata_stats(sector=6, filesystem=None):
"""
how many LCs?
how many all nan LCs?
"""
assert isinstance(filesystem, str)
if filesystem in ['phtess2', 'php1']:
fs = f"/nfs/{filesystem}"
projdir = f'{fs}/ar0/TESS/PROJ/lbouma/cdips'
lcdirectory = f'/nfs/phtess2/ar0/TESS/PROJ/lbouma/CDIPS_LCS/sector-{sector}/'
catdir = '/nfs/phtess1/ar1/TESS/PROJ/lbouma/'
elif filesystem in ['wh1', 'wh2']:
projdir = "/ar1/PROJ/luke/proj/cdips"
lcdirectory = f'/ar1/PROJ/luke/proj/CDIPS_LCS/sector-{sector}/'
catdir = '/ar1/local/cdips/catalogs/'
statsdir = join(projdir, 'results', 'cdips_lc_stats', f'sector-{sector}')
if not os.path.exists(statsdir): os.mkdir(statsdir)
statsfile = os.path.join(statsdir,'cdips_lc_statistics.txt')
stats = ap.read_stats_file(statsfile, fovcathasgaiaids=True)
N_lcs = len(stats)
print('CDIPS LIGHTCURVES STATS FOR SECTOR {}'.format(sector))
print(42*'-')
print('total N_lcs: {}'.format(N_lcs))
for apn in [1,2,3]:
N_nan = len(stats[stats['ndet_tf{}'.format(apn)]==0])
print('for ap {}, {} ({:.1f}%) are all nan, leaving {} ok lcs'.
format(apn, N_nan, N_nan/N_lcs*100, N_lcs-N_nan))
print('\nsanity check: {} TF1 LCs have stdev > 0'.
format(len(stats[stats['stdev_tf1'] > 0])))
print('Finished print_metadata_stats!')
def move_allnan_lcs(sector=None, cdipsvnum=None, filesystem=None):
assert isinstance(filesystem, str)
if filesystem in ['phtess2', 'php1']:
fs = f"/nfs/{filesystem}"
projdir = f'{fs}/ar0/TESS/PROJ/lbouma/cdips'
lcdirectory = f'/nfs/phtess2/ar0/TESS/PROJ/lbouma/CDIPS_LCS/sector-{sector}/'
catdir = '/nfs/phtess1/ar1/TESS/PROJ/lbouma/'
elif filesystem in ['wh1', 'wh2']:
projdir = "/ar1/PROJ/luke/proj/cdips"
lcdirectory = f'/ar1/PROJ/luke/proj/CDIPS_LCS/sector-{sector}/'
catdir = '/ar1/local/cdips/catalogs/'
statsdir = join(projdir, 'results', 'cdips_lc_stats', f'sector-{sector}')
if not os.path.exists(statsdir): os.mkdir(statsdir)
statsfile = os.path.join(statsdir,'cdips_lc_statistics.txt')
stats = ap.read_stats_file(statsfile, fovcathasgaiaids=True)
N_lcs = len(stats)
print('CDIPS LIGHTCURVES STATS FOR SECTOR {}'.format(sector))
print(42*'-')
print('total N_lcs: {}'.format(N_lcs))
for apn in [1,2,3]:
N_nan = len(stats[stats['ndet_rm{}'.format(apn)]==0])
print('for ap {}, {} ({:.1f}%) are all nan, leaving {} ok lcs'.
format(apn, N_nan, N_nan/N_lcs*100, N_lcs-N_nan))
print(42*'-')
print('BEGINNING MOVE OF ALLNAN LIGHT CURVES')
sel = (
(stats['ndet_rm1']==0) &
(stats['ndet_rm2']==0) &
(stats['ndet_rm3']==0)
)
nanobjs = stats[sel]['lcobj']
lcdirectory = join(lcdirectory, "cam?_ccd?")
lcnames = [(
'hlsp_cdips_tess_ffi_'
'gaiatwo{zsourceid}-{zsector}-cam{cam}-ccd{ccd}_'
'tess_v{zcdipsvnum}_llc.fits'
).format(
cam='?',
ccd='?',
zsourceid=str(lcgaiaid).zfill(22),
zsector=str(sector).zfill(4),
zcdipsvnum=str(cdipsvnum).zfill(2)
)
for lcgaiaid in nanobjs
]
lcglobs = [os.path.join(lcdirectory, lcname) for lcname in lcnames]
lcpaths = []
for l in lcglobs:
try:
lcpaths.append(glob(l)[0])
except:
pass
dstpaths = [os.path.join(os.path.dirname(l),
'allnanlcs',
os.path.basename(l))
for l in lcpaths]
for src,dst in zip(lcpaths,dstpaths):
dstdir = os.path.dirname(dst)
if not os.path.exists(dstdir):
os.mkdir(dstdir)
try:
shutil.move(src,dst)
print('moved {} -> {}'.format(src,dst))
except FileNotFoundError as e:
if os.path.exists(dst):
pass
else:
print(repr(e))
raise FileNotFoundError
def main(sector, cdipssource_vnum, cdipsvnum, overwrite, get_stats=1,
make_supp_stats=0, print_metadata=1, move_allnan=1, filesystem=None):
assert isinstance(filesystem, str)
if get_stats:
get_cdips_lc_stats(
sector=sector,
cdipssource_vnum=cdipssource_vnum,
nworkers=40,
overwrite=overwrite,
filesystem=filesystem
)
if make_supp_stats:
supplement_stats_file(
cdipssource_vnum=cdipssource_vnum,
sector=sector,
filesystem=filesystem
)
if print_metadata:
print_metadata_stats(
sector=sector,
filesystem=filesystem
)
if move_allnan:
move_allnan_lcs(
sector=sector, cdipsvnum=cdipsvnum, filesystem=filesystem
)
if __name__ == "__main__":
sector=40
cdipssource_vnum=0.6
cdipsvnum=1
overwrite=0
get_stats=0
make_supp_stats=1
print_metadata=0
move_allnan=1
filesystem='wh1'
main(sector, cdipssource_vnum, cdipsvnum, overwrite, get_stats=get_stats,
make_supp_stats=make_supp_stats, print_metadata=print_metadata,
move_allnan=move_allnan, filesystem=filesystem)
| supplement_stats_file | identifier_name |
get_cdips_lc_stats.py | """
* get simple rms vs mag stats for CDIPS LCs
* plot them.
* assess how many all-nan LCs there are.
* move allnan light curves to a graveyard directory to collect dust
* supplement the statsfile by matching against Gaia DR2 and CDIPS catalogs.
usage:
$ (cdips) python -u get_cdips_lc_stats.py |& tee logs/s6_stats_overview_log.txt
NOTE: depends on pipe-trex (--> run in environment with aperturephot on path)
"""
import sys
sys.path.append('/nfs/phtess1/ar1/TESS/PROJ/jhartman/202106_CDIPS/cdips-pipeline')
import pandas as pd, numpy as np
import aperturephot as ap
import os, subprocess, shlex, shutil
from glob import glob
from os.path import join
from cdips.utils import collect_cdips_lightcurves as ccl
def get_cdips_lc_stats(
sector=6,
cdipssource_vnum=None,
nworkers=32,
overwrite=0,
filesystem='phtess2'
):
if filesystem in ['phtess2', 'php1']:
fs = f"/nfs/{filesystem}"
projdir = f'{fs}/ar0/TESS/PROJ/lbouma/cdips'
lcdirectory = f'/nfs/phtess2/ar0/TESS/PROJ/lbouma/CDIPS_LCS/sector-{sector}/'
catdir = '/nfs/phtess1/ar1/TESS/PROJ/lbouma/'
elif filesystem in ['wh1', 'wh2']:
projdir = "/ar1/PROJ/luke/proj/cdips"
lcdirectory = f'/ar1/PROJ/luke/proj/CDIPS_LCS/sector-{sector}/'
catdir = '/ar1/local/cdips/catalogs/'
statsdir = join(projdir, 'results', 'cdips_lc_stats', f'sector-{sector}')
if not os.path.exists(statsdir): os.mkdir(statsdir)
statsfile = os.path.join(statsdir,'cdips_lc_statistics.txt')
if os.path.exists(statsfile) and not overwrite:
print("found statsfile and not overwrite. skip")
return
lcglob = 'cam?_ccd?/*_llc.fits'
# a cut on OC_MG_FINAL_GaiaRp_lt_16_v0.4.csv to be genfromtxt readable
catalogfile = join(catdir,
f'sourceid_and_photrpmeanmag_v{cdipssource_vnum}.csv' )
if not os.path.exists(catalogfile):
if cdipssource_vnum < 0.6:
cfile = join(catdir,
f'OC_MG_FINAL_GaiaRp_lt_16_v{cdipssource_vnum}.csv')
cdipsdf = pd.read_csv(cfile, sep=';')
else:
cfile = join(catdir,
f'cdips_targets_v{cdipssource_vnum}_gaiasources_Rplt16_orclose.csv')
cdipsdf = pd.read_csv(cfile, sep=',')
outdf = cdipsdf[['source_id','phot_rp_mean_mag']].dropna(axis=0, how='any')
outdf.to_csv(catalogfile, sep=' ', index=False, header=False)
ap.parallel_lc_statistics(lcdirectory, lcglob,
catalogfile, tfalcrequired=True,
epdlcrequired=False,
fitslcnottxt=True,
fovcatcols=(0,1), # objectid, magcol to use
fovcatmaglabel='GRp', outfile=statsfile,
nworkers=nworkers,
workerntasks=500, rmcols=None,
epcols=None, tfcols=None,
rfcols=None, correctioncoeffs=None,
sigclip=5.0, fovcathasgaiaids=True)
ap.plot_stats_file(statsfile, statsdir,
f'sector-{sector} cdips',
binned=False, logy=True, logx=False,
correctmagsafter=None, rangex=(5.9,16),
observatory='tess', fovcathasgaiaids=True,
yaxisval='RMS')
print('Finished get_cdips_lc_stats!')
def supplement_stats_file(
cdipssource_vnum=None,
sector=6,
filesystem=None):
"""
add crossmatching info per line:
* all gaia mags. also gaia extinction and parallax. (also parallax upper
and lower bounds).
* calculated T mag from TICv8 relations
* all the gaia info (especially teff, rstar, etc if available. but also
position ra,dec and x,y, for sky-map plots. rstar to then be used when
applying rstar>5rsun cut in vetting)
* all the CDIPS catalog info (especially the name of the damn cluster)
* all the TIC info (the CROWDING metric, the TICID, and the Tmag)
"""
if filesystem in ['phtess2', 'php1']:
fs = f"/nfs/{filesystem}"
projdir = f'{fs}/ar0/TESS/PROJ/lbouma/cdips'
lcdirectory = f'/nfs/phtess2/ar0/TESS/PROJ/lbouma/CDIPS_LCS/sector-{sector}/'
catdir = '/nfs/phtess1/ar1/TESS/PROJ/lbouma/'
elif filesystem in ['wh1', 'wh2']:
projdir = "/ar1/PROJ/luke/proj/cdips"
lcdirectory = f'/ar1/PROJ/luke/proj/CDIPS_LCS/sector-{sector}/'
catdir = '/ar1/local/cdips/catalogs/'
statsdir = join(projdir, 'results', 'cdips_lc_stats', f'sector-{sector}')
if not os.path.exists(statsdir): os.mkdir(statsdir)
statsfile = os.path.join(statsdir,'cdips_lc_statistics.txt')
outpath = statsfile.replace('cdips_lc_statistics',
'supplemented_cdips_lc_statistics')
outdir = os.path.dirname(outpath)
stats = ap.read_stats_file(statsfile, fovcathasgaiaids=True)
df = pd.DataFrame(stats)
del stats
lcobjcsv = os.path.join(outdir, 'sector{}_lcobj.csv'.format(sector))
lcobjtxt = os.path.join(outdir, 'sector{}_lcobj.txt'.format(sector))
df['lcobj'].to_csv(lcobjcsv, index=False, header=False)
# run the gaia2read on this list
if not os.path.exists(lcobjtxt):
gaia2readcmd = (
"gaia2read --header --extra --idfile {} > {}".format(
lcobjcsv, lcobjtxt
)
)
returncode = os.system(gaia2readcmd)
if returncode != 0:
raise AssertionError('gaia2read cmd failed!!')
else:
print('ran {}'.format(gaia2readcmd))
# merge statsfile against (most of) gaia dr2
gdf = pd.read_csv(lcobjtxt, delim_whitespace=True)
desiredcols = ['#Gaia-ID[1]', 'RA[deg][2]', 'Dec[deg][3]',
'RAError[mas][4]', 'DecError[mas][5]',
'Parallax[mas][6]', 'Parallax_error[mas][7]',
'PM_RA[mas/yr][8]', 'PM_Dec[mas/year][9]',
'PMRA_error[mas/yr][10]', 'PMDec_error[mas/yr][11]',
'Ref_Epoch[yr][12]', 'phot_g_mean_mag[20]',
'phot_bp_mean_mag[25]', 'phot_rp_mean_mag[30]',
'radial_velocity[32]', 'radial_velocity_error[33]',
'teff_val[35]', 'teff_percentile_lower[36]',
'teff_percentile_upper[37]', 'a_g_val[38]',
'a_g_percentile_lower[39]', 'a_g_percentile_upper[40]',
'e_bp_min_rp_val[41]',
'e_bp_min_rp_percentile_lower[42]',
'e_bp_min_rp_percentile_upper[43]', 'radius_val[44]',
'radius_percentile_lower[45]',
'radius_percentile_upper[46]', 'lum_val[47]',
'lum_percentile_lower[48]', 'lum_percentile_upper[49]']
cgdf = gdf[desiredcols]
df['lcobj'] = df['lcobj'].astype(np.int64)
mdf = df.merge(cgdf, how='left', left_on='lcobj', right_on='#Gaia-ID[1]')
if np.all(pd.isnull(mdf['RA[deg][2]'])):
errmsg = (
'ERR! probably merging against bad temp files!! check gaia2read '
'call, perhaps.'
)
raise AssertionError(errmsg)
del df, cgdf, gdf
# merge against CDIPS catalog info
cdips_df = ccl.get_cdips_pub_catalog(ver=cdipssource_vnum)
if cdipssource_vnum < 0.6:
dcols = (
'cluster;ext_catalog_name;reference;source_id;unique_cluster_name;logt;logt_provenance;comment'
)
dcols = dcols.split(';')
else:
dcols = (
'source_id,ra,dec,parallax,parallax_error,pmra,pmdec,phot_g_mean_mag,phot_rp_mean_mag,phot_bp_mean_mag,cluster,age,mean_age,reference_id,reference_bibcode'
)
dcols = dcols.split(',')
ccdf = cdips_df[dcols]
ccdf['source_id'] = ccdf['source_id'].astype(np.int64)
megadf = mdf.merge(ccdf, how='left', left_on='lcobj', right_on='source_id')
# finally save
megadf.to_csv(outpath, index=False, sep=';')
print('made {}'.format(outpath))
print('Finished supplement_stats_file!')
def print_metadata_stats(sector=6, filesystem=None):
"""
how many LCs?
how many all nan LCs?
"""
assert isinstance(filesystem, str)
if filesystem in ['phtess2', 'php1']:
fs = f"/nfs/{filesystem}"
projdir = f'{fs}/ar0/TESS/PROJ/lbouma/cdips'
lcdirectory = f'/nfs/phtess2/ar0/TESS/PROJ/lbouma/CDIPS_LCS/sector-{sector}/'
catdir = '/nfs/phtess1/ar1/TESS/PROJ/lbouma/'
elif filesystem in ['wh1', 'wh2']:
projdir = "/ar1/PROJ/luke/proj/cdips"
lcdirectory = f'/ar1/PROJ/luke/proj/CDIPS_LCS/sector-{sector}/'
catdir = '/ar1/local/cdips/catalogs/'
statsdir = join(projdir, 'results', 'cdips_lc_stats', f'sector-{sector}')
if not os.path.exists(statsdir): os.mkdir(statsdir)
statsfile = os.path.join(statsdir,'cdips_lc_statistics.txt')
stats = ap.read_stats_file(statsfile, fovcathasgaiaids=True)
N_lcs = len(stats)
print('CDIPS LIGHTCURVES STATS FOR SECTOR {}'.format(sector))
print(42*'-')
print('total N_lcs: {}'.format(N_lcs))
for apn in [1,2,3]:
N_nan = len(stats[stats['ndet_tf{}'.format(apn)]==0])
print('for ap {}, {} ({:.1f}%) are all nan, leaving {} ok lcs'.
format(apn, N_nan, N_nan/N_lcs*100, N_lcs-N_nan))
print('\nsanity check: {} TF1 LCs have stdev > 0'.
format(len(stats[stats['stdev_tf1'] > 0])))
print('Finished print_metadata_stats!')
def move_allnan_lcs(sector=None, cdipsvnum=None, filesystem=None):
assert isinstance(filesystem, str)
if filesystem in ['phtess2', 'php1']:
fs = f"/nfs/{filesystem}"
projdir = f'{fs}/ar0/TESS/PROJ/lbouma/cdips'
lcdirectory = f'/nfs/phtess2/ar0/TESS/PROJ/lbouma/CDIPS_LCS/sector-{sector}/'
catdir = '/nfs/phtess1/ar1/TESS/PROJ/lbouma/'
elif filesystem in ['wh1', 'wh2']:
projdir = "/ar1/PROJ/luke/proj/cdips"
lcdirectory = f'/ar1/PROJ/luke/proj/CDIPS_LCS/sector-{sector}/'
catdir = '/ar1/local/cdips/catalogs/'
statsdir = join(projdir, 'results', 'cdips_lc_stats', f'sector-{sector}')
if not os.path.exists(statsdir): os.mkdir(statsdir)
statsfile = os.path.join(statsdir,'cdips_lc_statistics.txt')
stats = ap.read_stats_file(statsfile, fovcathasgaiaids=True)
N_lcs = len(stats)
print('CDIPS LIGHTCURVES STATS FOR SECTOR {}'.format(sector))
print(42*'-')
print('total N_lcs: {}'.format(N_lcs))
for apn in [1,2,3]:
N_nan = len(stats[stats['ndet_rm{}'.format(apn)]==0])
print('for ap {}, {} ({:.1f}%) are all nan, leaving {} ok lcs'.
format(apn, N_nan, N_nan/N_lcs*100, N_lcs-N_nan))
print(42*'-')
print('BEGINNING MOVE OF ALLNAN LIGHT CURVES') | (stats['ndet_rm3']==0)
)
nanobjs = stats[sel]['lcobj']
lcdirectory = join(lcdirectory, "cam?_ccd?")
lcnames = [(
'hlsp_cdips_tess_ffi_'
'gaiatwo{zsourceid}-{zsector}-cam{cam}-ccd{ccd}_'
'tess_v{zcdipsvnum}_llc.fits'
).format(
cam='?',
ccd='?',
zsourceid=str(lcgaiaid).zfill(22),
zsector=str(sector).zfill(4),
zcdipsvnum=str(cdipsvnum).zfill(2)
)
for lcgaiaid in nanobjs
]
lcglobs = [os.path.join(lcdirectory, lcname) for lcname in lcnames]
lcpaths = []
for l in lcglobs:
try:
lcpaths.append(glob(l)[0])
except:
pass
dstpaths = [os.path.join(os.path.dirname(l),
'allnanlcs',
os.path.basename(l))
for l in lcpaths]
for src,dst in zip(lcpaths,dstpaths):
dstdir = os.path.dirname(dst)
if not os.path.exists(dstdir):
os.mkdir(dstdir)
try:
shutil.move(src,dst)
print('moved {} -> {}'.format(src,dst))
except FileNotFoundError as e:
if os.path.exists(dst):
pass
else:
print(repr(e))
raise FileNotFoundError
def main(sector, cdipssource_vnum, cdipsvnum, overwrite, get_stats=1,
make_supp_stats=0, print_metadata=1, move_allnan=1, filesystem=None):
assert isinstance(filesystem, str)
if get_stats:
get_cdips_lc_stats(
sector=sector,
cdipssource_vnum=cdipssource_vnum,
nworkers=40,
overwrite=overwrite,
filesystem=filesystem
)
if make_supp_stats:
supplement_stats_file(
cdipssource_vnum=cdipssource_vnum,
sector=sector,
filesystem=filesystem
)
if print_metadata:
print_metadata_stats(
sector=sector,
filesystem=filesystem
)
if move_allnan:
move_allnan_lcs(
sector=sector, cdipsvnum=cdipsvnum, filesystem=filesystem
)
if __name__ == "__main__":
sector=40
cdipssource_vnum=0.6
cdipsvnum=1
overwrite=0
get_stats=0
make_supp_stats=1
print_metadata=0
move_allnan=1
filesystem='wh1'
main(sector, cdipssource_vnum, cdipsvnum, overwrite, get_stats=get_stats,
make_supp_stats=make_supp_stats, print_metadata=print_metadata,
move_allnan=move_allnan, filesystem=filesystem) |
sel = (
(stats['ndet_rm1']==0) &
(stats['ndet_rm2']==0) & | random_line_split |
lane-finder.py | import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from glob import glob
from moviepy.editor import VideoFileClip
output_images_dir = './output_images/'
test_images_dir = './test_images/'
output_video_file = 'output.mp4'
mtx = None
dist = None
def load_image(filename):
return mpimg.imread(filename)
def calibrate_camera(rows=6, cols=9):
mtx = None
dist = None
save_file = 'calibration.npz'
try:
data = np.load(save_file)
mtx = data['mtx']
dist = data['dist']
print('using saved calibration')
except FileNotFoundError:
print('begin calibration')
filenames = glob('camera_cal/*.jpg')
objpoints = [] # 3D points in real world space
imgpoints = [] # 2D points in image plane
#Prepare object points, like (0,0,0), (1,0,0)...
objp = np.zeros((rows*cols,3), np.float32)
objp[:,:2] = np.mgrid[0:cols,0:rows].T.reshape(-1,2) # x, y coordinates
for f in filenames:
img = load_image(f)
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (cols,rows), None)
if ret:
imgpoints.append(corners)
objpoints.append(objp)
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
if ret:
for f in filenames:
img = load_image(f)
undist = cv2.undistort(img, mtx, dist, None, mtx)
save_output_image(undist, 'undistorted-' + f.split('/')[-1])
print('end calibration')
np.savez(save_file, mtx=mtx, dist=dist)
return mtx, dist
def save_output_image(img, filename, cmap=None):
|
def undistort(img):
return cv2.undistort(img, mtx, dist, None, mtx)
def abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255)):
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
if orient == 'x':
abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel))
if orient == 'y':
abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel))
scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
grad_binary = np.zeros_like(scaled_sobel)
grad_binary[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1
return grad_binary
def color_threshold(img):
#R = img[:,:,0]
#G = img[:,:,1]
#B = img[:,:,2]
#binary = np.zeros_like(R)
#binary[(R > 200) & (G > 160) & ((B < 100) | (B > 200))] = 1
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
H = hls[:,:,0]
L = hls[:,:,1]
S = hls[:,:,2]
binary = np.zeros_like(H)
binary[(((H > 15) & (H < 24) & (S > 90) & (L > 50)) | (L > 220))] = 1
return binary
def window_mask(width, height, img_ref, center,level):
output = np.zeros_like(img_ref)
output[int(img_ref.shape[0]-(level+1)*height):int(img_ref.shape[0]-level*height),max(0,int(center-width/2)):min(int(center+width/2),img_ref.shape[1])] = 1
return output
def find_lr_window_centroids(image, window_width, window_height, margin):
#window_centroids = [] # Store the (left,right) window centroid positions per level
window = np.ones(window_width) # Create our window template that we will use for convolutions
left_centroids = []
right_centroids = []
# First find the two starting positions for the left and right lane by using np.sum to get the vertical image slice
# and then np.convolve the vertical image slice with the window template
# Sum quarter bottom of image to get slice, could use a different ratio
l_sum = np.sum(image[int(3*image.shape[0]/4):,:int(image.shape[1]/2)], axis=0)
l_center = np.argmax(np.convolve(window,l_sum))-window_width/2
r_sum = np.sum(image[int(3*image.shape[0]/4):,int(image.shape[1]/2):], axis=0)
r_center = np.argmax(np.convolve(window,r_sum))-window_width/2+int(image.shape[1]/2)
y_base = int(image.shape[0] - window_height/2)
# Add what we found for the first layer
y_center = y_base
left_centroids.append((l_center, y_center))
right_centroids.append((r_center, y_center))
# Go through each layer looking for max pixel locations
for level in range(1,(int)(image.shape[0]/window_height)):
y_center = int(y_base - (level * window_height))
# convolve the window into the vertical slice of the image
image_layer = np.sum(image[int(image.shape[0]-(level+1)*window_height):int(image.shape[0]-level*window_height),:], axis=0)
conv_signal = np.convolve(window, image_layer)
# Find the best left centroid by using past left center as a reference
# Use window_width/2 as offset because convolution signal reference is at right side of window, not center of window
offset = window_width/2
l_min_index = int(max(l_center+offset-margin,0))
l_max_index = int(min(l_center+offset+margin,image.shape[1]))
l_max = np.argmax(conv_signal[l_min_index:l_max_index])
if l_max > 50:
left_centroids.append((l_center, y_center))
l_center = l_max+l_min_index-offset
# Find the best right centroid by using past right center as a reference
r_min_index = int(max(r_center+offset-margin,0))
r_max_index = int(min(r_center+offset+margin,image.shape[1]))
r_max = np.argmax(conv_signal[r_min_index:r_max_index])
if r_max > 50:
right_centroids.append((r_center, y_center))
r_center = r_max+r_min_index-offset
return left_centroids, right_centroids
def draw_window_boxes(img, l_points, r_points, window_width, window_height):
if len(l_points) > 0:
for p in l_points:
cv2.rectangle(img, (p[0], p[1]), (p[0] + window_width, p[1] + window_height), (255,0,0), -1)
if len(r_points) > 0:
for p in r_points:
cv2.rectangle(img, (p[0], p[1]), (p[0] + window_width, p[1] + window_height), (0,255,0), -1)
return img
def draw_window_centroids(warped, window_centroids, window_width = 50, window_height = 80):
if len(window_centroids) > 0:
# Points used to draw all the left and right windows
l_points = np.zeros_like(warped)
r_points = np.zeros_like(warped)
# Go through each level and draw the windows
for level in range(0,len(window_centroids)):
# Window_mask is a function to draw window areas
l_mask = window_mask(window_width,window_height,warped,window_centroids[level][0],level)
r_mask = window_mask(window_width,window_height,warped,window_centroids[level][1],level)
# Add graphic points from window mask here to total pixels found
l_points[(l_points == 255) | ((l_mask == 1) ) ] = 255
r_points[(r_points == 255) | ((r_mask == 1) ) ] = 255
# Draw the results
#template = np.array(r_points+l_points,np.uint8) # add both left and right window pixels together
zero_channel = np.zeros_like(l_points) # create a zero color channle
template = np.array(cv2.merge((l_points,r_points,zero_channel)),np.uint8) # make window pixels green
warpage = np.array(cv2.merge((warped,warped,warped)),np.uint8) # making the original road pixels 3 color channels
output = cv2.addWeighted(warpage, 0.5, template, 0.5, 0.0) # overlay the orignal road image with window results
# If no window centers found, just display orginal road image
else:
output = np.array(cv2.merge((warped,warped,warped)),np.uint8)
return output
def draw_text(img, text, origin):
cv2.putText(img, text, origin, cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), thickness=2)
def pipeline_image(img, save_images=None, save_suffix='.jpg'):
if save_images:
print('begin pipeline_image', save_suffix)
undistorted = undistort(img)
if save_images:
save_output_image(undistorted, 'undistorted' + save_suffix)
#binary = abs_sobel_thresh(undistorted, orient='x', sobel_kernel=15, thresh=(20,100))
binary = color_threshold(undistorted)
if save_images:
save_output_image(binary, 'binary' + save_suffix, cmap='gray')
img_size = binary.shape[::-1]
src = np.float32(
[[(img_size[0] / 2) - 55, img_size[1] / 2 + 100],
[((img_size[0] / 6) - 10), img_size[1]],
[(img_size[0] * 5 / 6) + 60, img_size[1]],
[(img_size[0] / 2 + 55), img_size[1] / 2 + 100]])
dst = np.float32(
[[(img_size[0] / 4), 0],
[(img_size[0] / 4), img_size[1]],
[(img_size[0] * 3 / 4), img_size[1]],
[(img_size[0] * 3 / 4), 0]])
if save_images:
cv2.polylines(img, np.int32([src]), True, (255,0,0), thickness=3)
save_output_image(img, 'polygon' + save_suffix)
M = cv2.getPerspectiveTransform(src, dst)
Minv = cv2.getPerspectiveTransform(dst, src)
warped = cv2.warpPerspective(binary, M, img_size, flags=cv2.INTER_LINEAR)
if save_images:
save_output_image(warped, 'warped' + save_suffix, cmap='gray')
window_width = 40
window_height = 60
#identified lane-line pixels and fit their positions with a polynomial
l_points, r_points = find_lr_window_centroids(warped, window_width, window_height, 100)
global last_l_points, last_r_points
if len(l_points) < 5 and len(last_l_points) > 0:
#print("less than 4 l_points:", len(r_points))
# use the previous points
l_points = last_l_points
else:
last_l_points = l_points
l_points = np.array(l_points, dtype=np.int32)
l_poly = np.polyfit(l_points[:,1], l_points[:,0], 2)
if len(r_points) < 5 and len(last_r_points) > 0:
#print("less than 4 r_points:", len(r_points))
r_points = last_r_points
else:
last_r_points = r_points
r_points = np.array(r_points, dtype=np.int32)
r_poly = np.polyfit(r_points[:,1], r_points[:,0], 2)
yval = np.arange(0, warped.shape[0])
l_xval = np.polyval(l_poly, yval)
r_xval = np.polyval(r_poly, yval)
if save_images:
lanes = warped*255
lanes = np.array(cv2.merge((lanes,lanes,lanes)),np.uint8) # make window pixels green
lanes = draw_window_boxes(lanes, l_points, r_points, window_width, window_height)
for p in l_points:
cv2.circle(lanes, (p[0], p[1]), 10, (255,0,255), -1)
for p in r_points:
cv2.circle(lanes, (p[0], p[1]), 10, (255,0,255), -1)
for x,y in zip(l_xval, yval):
cv2.circle(lanes, (int(x),y), 5, (255,255,0), -1)
for x,y in zip(r_xval, yval):
cv2.circle(lanes, (int(x),y), 5, (0,255,255), -1)
save_output_image(lanes, 'lanes' + save_suffix, cmap='gray')
# Define conversions in x and y from pixels space to meters
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/700 # meters per pixel in x dimension
#calculated the position of the vehicle with respect to center
lane_center_offset_m = (warped.shape[1]/2 - (l_xval[-1] + r_xval[-1])/2) * xm_per_pix
direction = 'Left'
if lane_center_offset_m > 0:
direction = 'Right'
#calculated the radius of curvature of the lane
y_eval = np.max(yval)
# Fit new polynomials to x,y in world space
left_fit_cr = np.polyfit(l_points[:,1]*ym_per_pix, l_points[:,0]*xm_per_pix, 2)
right_fit_cr = np.polyfit(r_points[:,1]*ym_per_pix, r_points[:,0]*xm_per_pix, 2)
# Calculate the new radii of curvature
left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])
right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])
# Now our radius of curvature is in meters
#Provide an example image of your result plotted back down onto the road such that the lane area is identified clearly
warp_zero = np.zeros_like(warped).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([l_xval , yval]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([r_xval, yval])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
unwarp = cv2.warpPerspective(color_warp, Minv, img_size, flags=cv2.INTER_LINEAR)
draw_text(undistorted, "Radius: {:.1f}m {:.1f}m".format(left_curverad, right_curverad), (50, 50))
draw_text(undistorted, "{:.3f}m {} of Center".format(abs(lane_center_offset_m), direction), (50, 100))
output = cv2.addWeighted(undistorted, 1, unwarp, 0.4, 0)
if save_images:
save_output_image(output, 'output' + save_suffix)
return output
def process_test_images():
filenames = glob('test_images/*.jpg')
#filenames = ['test_images/test2.jpg']
for f in filenames:
img = load_image(f)
img_out = pipeline_image(img, True, '-' + f.split('/')[-1])
#show_before_after(img, img_out, 'gray')
def process_video(in_file, out_file):
clip = VideoFileClip(in_file)
video_clip = clip.fl_image(pipeline_image)
video_clip.write_videofile(out_file, audio=False)
def show_before_after(before, after, cmap=None):
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6))
fig.subplots_adjust(hspace=0.5, wspace=0.5)
ax1.imshow(before)
ax1.set_title('Before')
ax2.imshow(after, cmap=cmap)
ax2.set_title('After')
plt.show()
def show_images(imgs, titles):
fig, axes = plt.subplots(3, 6, figsize=(12, 6))
fig.subplots_adjust(hspace=0.5, wspace=0.5)
for ax, img, title in zip(axes.flat, imgs, titles):
ax.imshow(img)
ax.set_title(title)
plt.show()
last_l_points = []
last_r_points = []
mtx, dist = calibrate_camera()
process_test_images()
process_video('project_video.mp4', 'output.mp4')
process_video('challenge_video.mp4', 'challenge_output.mp4')
process_video('harder_challenge_video.mp4', 'harder_challenge_output.mp4')
| mpimg.imsave(output_images_dir + filename, img, cmap=cmap) | identifier_body |
lane-finder.py | import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from glob import glob
from moviepy.editor import VideoFileClip
output_images_dir = './output_images/'
test_images_dir = './test_images/'
output_video_file = 'output.mp4'
mtx = None
dist = None
def | (filename):
return mpimg.imread(filename)
def calibrate_camera(rows=6, cols=9):
mtx = None
dist = None
save_file = 'calibration.npz'
try:
data = np.load(save_file)
mtx = data['mtx']
dist = data['dist']
print('using saved calibration')
except FileNotFoundError:
print('begin calibration')
filenames = glob('camera_cal/*.jpg')
objpoints = [] # 3D points in real world space
imgpoints = [] # 2D points in image plane
#Prepare object points, like (0,0,0), (1,0,0)...
objp = np.zeros((rows*cols,3), np.float32)
objp[:,:2] = np.mgrid[0:cols,0:rows].T.reshape(-1,2) # x, y coordinates
for f in filenames:
img = load_image(f)
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (cols,rows), None)
if ret:
imgpoints.append(corners)
objpoints.append(objp)
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
if ret:
for f in filenames:
img = load_image(f)
undist = cv2.undistort(img, mtx, dist, None, mtx)
save_output_image(undist, 'undistorted-' + f.split('/')[-1])
print('end calibration')
np.savez(save_file, mtx=mtx, dist=dist)
return mtx, dist
def save_output_image(img, filename, cmap=None):
mpimg.imsave(output_images_dir + filename, img, cmap=cmap)
def undistort(img):
return cv2.undistort(img, mtx, dist, None, mtx)
def abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255)):
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
if orient == 'x':
abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel))
if orient == 'y':
abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel))
scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
grad_binary = np.zeros_like(scaled_sobel)
grad_binary[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1
return grad_binary
def color_threshold(img):
#R = img[:,:,0]
#G = img[:,:,1]
#B = img[:,:,2]
#binary = np.zeros_like(R)
#binary[(R > 200) & (G > 160) & ((B < 100) | (B > 200))] = 1
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
H = hls[:,:,0]
L = hls[:,:,1]
S = hls[:,:,2]
binary = np.zeros_like(H)
binary[(((H > 15) & (H < 24) & (S > 90) & (L > 50)) | (L > 220))] = 1
return binary
def window_mask(width, height, img_ref, center,level):
output = np.zeros_like(img_ref)
output[int(img_ref.shape[0]-(level+1)*height):int(img_ref.shape[0]-level*height),max(0,int(center-width/2)):min(int(center+width/2),img_ref.shape[1])] = 1
return output
def find_lr_window_centroids(image, window_width, window_height, margin):
#window_centroids = [] # Store the (left,right) window centroid positions per level
window = np.ones(window_width) # Create our window template that we will use for convolutions
left_centroids = []
right_centroids = []
# First find the two starting positions for the left and right lane by using np.sum to get the vertical image slice
# and then np.convolve the vertical image slice with the window template
# Sum quarter bottom of image to get slice, could use a different ratio
l_sum = np.sum(image[int(3*image.shape[0]/4):,:int(image.shape[1]/2)], axis=0)
l_center = np.argmax(np.convolve(window,l_sum))-window_width/2
r_sum = np.sum(image[int(3*image.shape[0]/4):,int(image.shape[1]/2):], axis=0)
r_center = np.argmax(np.convolve(window,r_sum))-window_width/2+int(image.shape[1]/2)
y_base = int(image.shape[0] - window_height/2)
# Add what we found for the first layer
y_center = y_base
left_centroids.append((l_center, y_center))
right_centroids.append((r_center, y_center))
# Go through each layer looking for max pixel locations
for level in range(1,(int)(image.shape[0]/window_height)):
y_center = int(y_base - (level * window_height))
# convolve the window into the vertical slice of the image
image_layer = np.sum(image[int(image.shape[0]-(level+1)*window_height):int(image.shape[0]-level*window_height),:], axis=0)
conv_signal = np.convolve(window, image_layer)
# Find the best left centroid by using past left center as a reference
# Use window_width/2 as offset because convolution signal reference is at right side of window, not center of window
offset = window_width/2
l_min_index = int(max(l_center+offset-margin,0))
l_max_index = int(min(l_center+offset+margin,image.shape[1]))
l_max = np.argmax(conv_signal[l_min_index:l_max_index])
if l_max > 50:
left_centroids.append((l_center, y_center))
l_center = l_max+l_min_index-offset
# Find the best right centroid by using past right center as a reference
r_min_index = int(max(r_center+offset-margin,0))
r_max_index = int(min(r_center+offset+margin,image.shape[1]))
r_max = np.argmax(conv_signal[r_min_index:r_max_index])
if r_max > 50:
right_centroids.append((r_center, y_center))
r_center = r_max+r_min_index-offset
return left_centroids, right_centroids
def draw_window_boxes(img, l_points, r_points, window_width, window_height):
if len(l_points) > 0:
for p in l_points:
cv2.rectangle(img, (p[0], p[1]), (p[0] + window_width, p[1] + window_height), (255,0,0), -1)
if len(r_points) > 0:
for p in r_points:
cv2.rectangle(img, (p[0], p[1]), (p[0] + window_width, p[1] + window_height), (0,255,0), -1)
return img
def draw_window_centroids(warped, window_centroids, window_width = 50, window_height = 80):
if len(window_centroids) > 0:
# Points used to draw all the left and right windows
l_points = np.zeros_like(warped)
r_points = np.zeros_like(warped)
# Go through each level and draw the windows
for level in range(0,len(window_centroids)):
# Window_mask is a function to draw window areas
l_mask = window_mask(window_width,window_height,warped,window_centroids[level][0],level)
r_mask = window_mask(window_width,window_height,warped,window_centroids[level][1],level)
# Add graphic points from window mask here to total pixels found
l_points[(l_points == 255) | ((l_mask == 1) ) ] = 255
r_points[(r_points == 255) | ((r_mask == 1) ) ] = 255
# Draw the results
#template = np.array(r_points+l_points,np.uint8) # add both left and right window pixels together
zero_channel = np.zeros_like(l_points) # create a zero color channle
template = np.array(cv2.merge((l_points,r_points,zero_channel)),np.uint8) # make window pixels green
warpage = np.array(cv2.merge((warped,warped,warped)),np.uint8) # making the original road pixels 3 color channels
output = cv2.addWeighted(warpage, 0.5, template, 0.5, 0.0) # overlay the orignal road image with window results
# If no window centers found, just display orginal road image
else:
output = np.array(cv2.merge((warped,warped,warped)),np.uint8)
return output
def draw_text(img, text, origin):
cv2.putText(img, text, origin, cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), thickness=2)
def pipeline_image(img, save_images=None, save_suffix='.jpg'):
if save_images:
print('begin pipeline_image', save_suffix)
undistorted = undistort(img)
if save_images:
save_output_image(undistorted, 'undistorted' + save_suffix)
#binary = abs_sobel_thresh(undistorted, orient='x', sobel_kernel=15, thresh=(20,100))
binary = color_threshold(undistorted)
if save_images:
save_output_image(binary, 'binary' + save_suffix, cmap='gray')
img_size = binary.shape[::-1]
src = np.float32(
[[(img_size[0] / 2) - 55, img_size[1] / 2 + 100],
[((img_size[0] / 6) - 10), img_size[1]],
[(img_size[0] * 5 / 6) + 60, img_size[1]],
[(img_size[0] / 2 + 55), img_size[1] / 2 + 100]])
dst = np.float32(
[[(img_size[0] / 4), 0],
[(img_size[0] / 4), img_size[1]],
[(img_size[0] * 3 / 4), img_size[1]],
[(img_size[0] * 3 / 4), 0]])
if save_images:
cv2.polylines(img, np.int32([src]), True, (255,0,0), thickness=3)
save_output_image(img, 'polygon' + save_suffix)
M = cv2.getPerspectiveTransform(src, dst)
Minv = cv2.getPerspectiveTransform(dst, src)
warped = cv2.warpPerspective(binary, M, img_size, flags=cv2.INTER_LINEAR)
if save_images:
save_output_image(warped, 'warped' + save_suffix, cmap='gray')
window_width = 40
window_height = 60
#identified lane-line pixels and fit their positions with a polynomial
l_points, r_points = find_lr_window_centroids(warped, window_width, window_height, 100)
global last_l_points, last_r_points
if len(l_points) < 5 and len(last_l_points) > 0:
#print("less than 4 l_points:", len(r_points))
# use the previous points
l_points = last_l_points
else:
last_l_points = l_points
l_points = np.array(l_points, dtype=np.int32)
l_poly = np.polyfit(l_points[:,1], l_points[:,0], 2)
if len(r_points) < 5 and len(last_r_points) > 0:
#print("less than 4 r_points:", len(r_points))
r_points = last_r_points
else:
last_r_points = r_points
r_points = np.array(r_points, dtype=np.int32)
r_poly = np.polyfit(r_points[:,1], r_points[:,0], 2)
yval = np.arange(0, warped.shape[0])
l_xval = np.polyval(l_poly, yval)
r_xval = np.polyval(r_poly, yval)
if save_images:
lanes = warped*255
lanes = np.array(cv2.merge((lanes,lanes,lanes)),np.uint8) # make window pixels green
lanes = draw_window_boxes(lanes, l_points, r_points, window_width, window_height)
for p in l_points:
cv2.circle(lanes, (p[0], p[1]), 10, (255,0,255), -1)
for p in r_points:
cv2.circle(lanes, (p[0], p[1]), 10, (255,0,255), -1)
for x,y in zip(l_xval, yval):
cv2.circle(lanes, (int(x),y), 5, (255,255,0), -1)
for x,y in zip(r_xval, yval):
cv2.circle(lanes, (int(x),y), 5, (0,255,255), -1)
save_output_image(lanes, 'lanes' + save_suffix, cmap='gray')
# Define conversions in x and y from pixels space to meters
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/700 # meters per pixel in x dimension
#calculated the position of the vehicle with respect to center
lane_center_offset_m = (warped.shape[1]/2 - (l_xval[-1] + r_xval[-1])/2) * xm_per_pix
direction = 'Left'
if lane_center_offset_m > 0:
direction = 'Right'
#calculated the radius of curvature of the lane
y_eval = np.max(yval)
# Fit new polynomials to x,y in world space
left_fit_cr = np.polyfit(l_points[:,1]*ym_per_pix, l_points[:,0]*xm_per_pix, 2)
right_fit_cr = np.polyfit(r_points[:,1]*ym_per_pix, r_points[:,0]*xm_per_pix, 2)
# Calculate the new radii of curvature
left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])
right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])
# Now our radius of curvature is in meters
#Provide an example image of your result plotted back down onto the road such that the lane area is identified clearly
warp_zero = np.zeros_like(warped).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([l_xval , yval]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([r_xval, yval])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
unwarp = cv2.warpPerspective(color_warp, Minv, img_size, flags=cv2.INTER_LINEAR)
draw_text(undistorted, "Radius: {:.1f}m {:.1f}m".format(left_curverad, right_curverad), (50, 50))
draw_text(undistorted, "{:.3f}m {} of Center".format(abs(lane_center_offset_m), direction), (50, 100))
output = cv2.addWeighted(undistorted, 1, unwarp, 0.4, 0)
if save_images:
save_output_image(output, 'output' + save_suffix)
return output
def process_test_images():
filenames = glob('test_images/*.jpg')
#filenames = ['test_images/test2.jpg']
for f in filenames:
img = load_image(f)
img_out = pipeline_image(img, True, '-' + f.split('/')[-1])
#show_before_after(img, img_out, 'gray')
def process_video(in_file, out_file):
clip = VideoFileClip(in_file)
video_clip = clip.fl_image(pipeline_image)
video_clip.write_videofile(out_file, audio=False)
def show_before_after(before, after, cmap=None):
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6))
fig.subplots_adjust(hspace=0.5, wspace=0.5)
ax1.imshow(before)
ax1.set_title('Before')
ax2.imshow(after, cmap=cmap)
ax2.set_title('After')
plt.show()
def show_images(imgs, titles):
fig, axes = plt.subplots(3, 6, figsize=(12, 6))
fig.subplots_adjust(hspace=0.5, wspace=0.5)
for ax, img, title in zip(axes.flat, imgs, titles):
ax.imshow(img)
ax.set_title(title)
plt.show()
last_l_points = []
last_r_points = []
mtx, dist = calibrate_camera()
process_test_images()
process_video('project_video.mp4', 'output.mp4')
process_video('challenge_video.mp4', 'challenge_output.mp4')
process_video('harder_challenge_video.mp4', 'harder_challenge_output.mp4')
| load_image | identifier_name |
lane-finder.py | import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from glob import glob
from moviepy.editor import VideoFileClip
output_images_dir = './output_images/'
test_images_dir = './test_images/'
output_video_file = 'output.mp4'
mtx = None
dist = None
def load_image(filename):
return mpimg.imread(filename)
def calibrate_camera(rows=6, cols=9):
mtx = None
dist = None
save_file = 'calibration.npz'
try:
data = np.load(save_file)
mtx = data['mtx']
dist = data['dist']
print('using saved calibration')
except FileNotFoundError:
print('begin calibration')
filenames = glob('camera_cal/*.jpg')
objpoints = [] # 3D points in real world space
imgpoints = [] # 2D points in image plane
#Prepare object points, like (0,0,0), (1,0,0)...
objp = np.zeros((rows*cols,3), np.float32)
objp[:,:2] = np.mgrid[0:cols,0:rows].T.reshape(-1,2) # x, y coordinates
for f in filenames:
img = load_image(f)
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (cols,rows), None)
if ret:
|
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
if ret:
for f in filenames:
img = load_image(f)
undist = cv2.undistort(img, mtx, dist, None, mtx)
save_output_image(undist, 'undistorted-' + f.split('/')[-1])
print('end calibration')
np.savez(save_file, mtx=mtx, dist=dist)
return mtx, dist
def save_output_image(img, filename, cmap=None):
mpimg.imsave(output_images_dir + filename, img, cmap=cmap)
def undistort(img):
return cv2.undistort(img, mtx, dist, None, mtx)
def abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255)):
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
if orient == 'x':
abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel))
if orient == 'y':
abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel))
scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
grad_binary = np.zeros_like(scaled_sobel)
grad_binary[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1
return grad_binary
def color_threshold(img):
#R = img[:,:,0]
#G = img[:,:,1]
#B = img[:,:,2]
#binary = np.zeros_like(R)
#binary[(R > 200) & (G > 160) & ((B < 100) | (B > 200))] = 1
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
H = hls[:,:,0]
L = hls[:,:,1]
S = hls[:,:,2]
binary = np.zeros_like(H)
binary[(((H > 15) & (H < 24) & (S > 90) & (L > 50)) | (L > 220))] = 1
return binary
def window_mask(width, height, img_ref, center,level):
output = np.zeros_like(img_ref)
output[int(img_ref.shape[0]-(level+1)*height):int(img_ref.shape[0]-level*height),max(0,int(center-width/2)):min(int(center+width/2),img_ref.shape[1])] = 1
return output
def find_lr_window_centroids(image, window_width, window_height, margin):
#window_centroids = [] # Store the (left,right) window centroid positions per level
window = np.ones(window_width) # Create our window template that we will use for convolutions
left_centroids = []
right_centroids = []
# First find the two starting positions for the left and right lane by using np.sum to get the vertical image slice
# and then np.convolve the vertical image slice with the window template
# Sum quarter bottom of image to get slice, could use a different ratio
l_sum = np.sum(image[int(3*image.shape[0]/4):,:int(image.shape[1]/2)], axis=0)
l_center = np.argmax(np.convolve(window,l_sum))-window_width/2
r_sum = np.sum(image[int(3*image.shape[0]/4):,int(image.shape[1]/2):], axis=0)
r_center = np.argmax(np.convolve(window,r_sum))-window_width/2+int(image.shape[1]/2)
y_base = int(image.shape[0] - window_height/2)
# Add what we found for the first layer
y_center = y_base
left_centroids.append((l_center, y_center))
right_centroids.append((r_center, y_center))
# Go through each layer looking for max pixel locations
for level in range(1,(int)(image.shape[0]/window_height)):
y_center = int(y_base - (level * window_height))
# convolve the window into the vertical slice of the image
image_layer = np.sum(image[int(image.shape[0]-(level+1)*window_height):int(image.shape[0]-level*window_height),:], axis=0)
conv_signal = np.convolve(window, image_layer)
# Find the best left centroid by using past left center as a reference
# Use window_width/2 as offset because convolution signal reference is at right side of window, not center of window
offset = window_width/2
l_min_index = int(max(l_center+offset-margin,0))
l_max_index = int(min(l_center+offset+margin,image.shape[1]))
l_max = np.argmax(conv_signal[l_min_index:l_max_index])
if l_max > 50:
left_centroids.append((l_center, y_center))
l_center = l_max+l_min_index-offset
# Find the best right centroid by using past right center as a reference
r_min_index = int(max(r_center+offset-margin,0))
r_max_index = int(min(r_center+offset+margin,image.shape[1]))
r_max = np.argmax(conv_signal[r_min_index:r_max_index])
if r_max > 50:
right_centroids.append((r_center, y_center))
r_center = r_max+r_min_index-offset
return left_centroids, right_centroids
def draw_window_boxes(img, l_points, r_points, window_width, window_height):
if len(l_points) > 0:
for p in l_points:
cv2.rectangle(img, (p[0], p[1]), (p[0] + window_width, p[1] + window_height), (255,0,0), -1)
if len(r_points) > 0:
for p in r_points:
cv2.rectangle(img, (p[0], p[1]), (p[0] + window_width, p[1] + window_height), (0,255,0), -1)
return img
def draw_window_centroids(warped, window_centroids, window_width = 50, window_height = 80):
if len(window_centroids) > 0:
# Points used to draw all the left and right windows
l_points = np.zeros_like(warped)
r_points = np.zeros_like(warped)
# Go through each level and draw the windows
for level in range(0,len(window_centroids)):
# Window_mask is a function to draw window areas
l_mask = window_mask(window_width,window_height,warped,window_centroids[level][0],level)
r_mask = window_mask(window_width,window_height,warped,window_centroids[level][1],level)
# Add graphic points from window mask here to total pixels found
l_points[(l_points == 255) | ((l_mask == 1) ) ] = 255
r_points[(r_points == 255) | ((r_mask == 1) ) ] = 255
# Draw the results
#template = np.array(r_points+l_points,np.uint8) # add both left and right window pixels together
zero_channel = np.zeros_like(l_points) # create a zero color channle
template = np.array(cv2.merge((l_points,r_points,zero_channel)),np.uint8) # make window pixels green
warpage = np.array(cv2.merge((warped,warped,warped)),np.uint8) # making the original road pixels 3 color channels
output = cv2.addWeighted(warpage, 0.5, template, 0.5, 0.0) # overlay the orignal road image with window results
# If no window centers found, just display orginal road image
else:
output = np.array(cv2.merge((warped,warped,warped)),np.uint8)
return output
def draw_text(img, text, origin):
cv2.putText(img, text, origin, cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), thickness=2)
def pipeline_image(img, save_images=None, save_suffix='.jpg'):
if save_images:
print('begin pipeline_image', save_suffix)
undistorted = undistort(img)
if save_images:
save_output_image(undistorted, 'undistorted' + save_suffix)
#binary = abs_sobel_thresh(undistorted, orient='x', sobel_kernel=15, thresh=(20,100))
binary = color_threshold(undistorted)
if save_images:
save_output_image(binary, 'binary' + save_suffix, cmap='gray')
img_size = binary.shape[::-1]
src = np.float32(
[[(img_size[0] / 2) - 55, img_size[1] / 2 + 100],
[((img_size[0] / 6) - 10), img_size[1]],
[(img_size[0] * 5 / 6) + 60, img_size[1]],
[(img_size[0] / 2 + 55), img_size[1] / 2 + 100]])
dst = np.float32(
[[(img_size[0] / 4), 0],
[(img_size[0] / 4), img_size[1]],
[(img_size[0] * 3 / 4), img_size[1]],
[(img_size[0] * 3 / 4), 0]])
if save_images:
cv2.polylines(img, np.int32([src]), True, (255,0,0), thickness=3)
save_output_image(img, 'polygon' + save_suffix)
M = cv2.getPerspectiveTransform(src, dst)
Minv = cv2.getPerspectiveTransform(dst, src)
warped = cv2.warpPerspective(binary, M, img_size, flags=cv2.INTER_LINEAR)
if save_images:
save_output_image(warped, 'warped' + save_suffix, cmap='gray')
window_width = 40
window_height = 60
#identified lane-line pixels and fit their positions with a polynomial
l_points, r_points = find_lr_window_centroids(warped, window_width, window_height, 100)
global last_l_points, last_r_points
if len(l_points) < 5 and len(last_l_points) > 0:
#print("less than 4 l_points:", len(r_points))
# use the previous points
l_points = last_l_points
else:
last_l_points = l_points
l_points = np.array(l_points, dtype=np.int32)
l_poly = np.polyfit(l_points[:,1], l_points[:,0], 2)
if len(r_points) < 5 and len(last_r_points) > 0:
#print("less than 4 r_points:", len(r_points))
r_points = last_r_points
else:
last_r_points = r_points
r_points = np.array(r_points, dtype=np.int32)
r_poly = np.polyfit(r_points[:,1], r_points[:,0], 2)
yval = np.arange(0, warped.shape[0])
l_xval = np.polyval(l_poly, yval)
r_xval = np.polyval(r_poly, yval)
if save_images:
lanes = warped*255
lanes = np.array(cv2.merge((lanes,lanes,lanes)),np.uint8) # make window pixels green
lanes = draw_window_boxes(lanes, l_points, r_points, window_width, window_height)
for p in l_points:
cv2.circle(lanes, (p[0], p[1]), 10, (255,0,255), -1)
for p in r_points:
cv2.circle(lanes, (p[0], p[1]), 10, (255,0,255), -1)
for x,y in zip(l_xval, yval):
cv2.circle(lanes, (int(x),y), 5, (255,255,0), -1)
for x,y in zip(r_xval, yval):
cv2.circle(lanes, (int(x),y), 5, (0,255,255), -1)
save_output_image(lanes, 'lanes' + save_suffix, cmap='gray')
# Define conversions in x and y from pixels space to meters
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/700 # meters per pixel in x dimension
#calculated the position of the vehicle with respect to center
lane_center_offset_m = (warped.shape[1]/2 - (l_xval[-1] + r_xval[-1])/2) * xm_per_pix
direction = 'Left'
if lane_center_offset_m > 0:
direction = 'Right'
#calculated the radius of curvature of the lane
y_eval = np.max(yval)
# Fit new polynomials to x,y in world space
left_fit_cr = np.polyfit(l_points[:,1]*ym_per_pix, l_points[:,0]*xm_per_pix, 2)
right_fit_cr = np.polyfit(r_points[:,1]*ym_per_pix, r_points[:,0]*xm_per_pix, 2)
# Calculate the new radii of curvature
left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])
right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])
# Now our radius of curvature is in meters
#Provide an example image of your result plotted back down onto the road such that the lane area is identified clearly
warp_zero = np.zeros_like(warped).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([l_xval , yval]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([r_xval, yval])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
unwarp = cv2.warpPerspective(color_warp, Minv, img_size, flags=cv2.INTER_LINEAR)
draw_text(undistorted, "Radius: {:.1f}m {:.1f}m".format(left_curverad, right_curverad), (50, 50))
draw_text(undistorted, "{:.3f}m {} of Center".format(abs(lane_center_offset_m), direction), (50, 100))
output = cv2.addWeighted(undistorted, 1, unwarp, 0.4, 0)
if save_images:
save_output_image(output, 'output' + save_suffix)
return output
def process_test_images():
filenames = glob('test_images/*.jpg')
#filenames = ['test_images/test2.jpg']
for f in filenames:
img = load_image(f)
img_out = pipeline_image(img, True, '-' + f.split('/')[-1])
#show_before_after(img, img_out, 'gray')
def process_video(in_file, out_file):
clip = VideoFileClip(in_file)
video_clip = clip.fl_image(pipeline_image)
video_clip.write_videofile(out_file, audio=False)
def show_before_after(before, after, cmap=None):
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6))
fig.subplots_adjust(hspace=0.5, wspace=0.5)
ax1.imshow(before)
ax1.set_title('Before')
ax2.imshow(after, cmap=cmap)
ax2.set_title('After')
plt.show()
def show_images(imgs, titles):
fig, axes = plt.subplots(3, 6, figsize=(12, 6))
fig.subplots_adjust(hspace=0.5, wspace=0.5)
for ax, img, title in zip(axes.flat, imgs, titles):
ax.imshow(img)
ax.set_title(title)
plt.show()
last_l_points = []
last_r_points = []
mtx, dist = calibrate_camera()
process_test_images()
process_video('project_video.mp4', 'output.mp4')
process_video('challenge_video.mp4', 'challenge_output.mp4')
process_video('harder_challenge_video.mp4', 'harder_challenge_output.mp4')
| imgpoints.append(corners)
objpoints.append(objp) | conditional_block |
lane-finder.py | import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from glob import glob
from moviepy.editor import VideoFileClip
output_images_dir = './output_images/'
test_images_dir = './test_images/'
output_video_file = 'output.mp4'
mtx = None
dist = None
def load_image(filename):
return mpimg.imread(filename)
def calibrate_camera(rows=6, cols=9):
mtx = None
dist = None
save_file = 'calibration.npz'
try:
data = np.load(save_file)
mtx = data['mtx']
dist = data['dist']
print('using saved calibration')
except FileNotFoundError:
print('begin calibration')
filenames = glob('camera_cal/*.jpg')
objpoints = [] # 3D points in real world space
imgpoints = [] # 2D points in image plane
#Prepare object points, like (0,0,0), (1,0,0)...
objp = np.zeros((rows*cols,3), np.float32)
objp[:,:2] = np.mgrid[0:cols,0:rows].T.reshape(-1,2) # x, y coordinates
for f in filenames:
img = load_image(f)
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (cols,rows), None)
if ret:
imgpoints.append(corners)
objpoints.append(objp)
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
if ret:
for f in filenames:
img = load_image(f)
undist = cv2.undistort(img, mtx, dist, None, mtx)
save_output_image(undist, 'undistorted-' + f.split('/')[-1])
print('end calibration')
np.savez(save_file, mtx=mtx, dist=dist)
return mtx, dist
def save_output_image(img, filename, cmap=None):
mpimg.imsave(output_images_dir + filename, img, cmap=cmap)
def undistort(img):
return cv2.undistort(img, mtx, dist, None, mtx)
def abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255)):
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
if orient == 'x':
abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel))
if orient == 'y':
abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel))
scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
grad_binary = np.zeros_like(scaled_sobel)
grad_binary[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1
return grad_binary
def color_threshold(img):
#R = img[:,:,0]
#G = img[:,:,1]
#B = img[:,:,2]
#binary = np.zeros_like(R)
#binary[(R > 200) & (G > 160) & ((B < 100) | (B > 200))] = 1
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
H = hls[:,:,0]
L = hls[:,:,1]
S = hls[:,:,2]
binary = np.zeros_like(H)
binary[(((H > 15) & (H < 24) & (S > 90) & (L > 50)) | (L > 220))] = 1
return binary
def window_mask(width, height, img_ref, center,level):
output = np.zeros_like(img_ref)
output[int(img_ref.shape[0]-(level+1)*height):int(img_ref.shape[0]-level*height),max(0,int(center-width/2)):min(int(center+width/2),img_ref.shape[1])] = 1
return output
def find_lr_window_centroids(image, window_width, window_height, margin):
#window_centroids = [] # Store the (left,right) window centroid positions per level
window = np.ones(window_width) # Create our window template that we will use for convolutions
left_centroids = []
right_centroids = []
# First find the two starting positions for the left and right lane by using np.sum to get the vertical image slice
# and then np.convolve the vertical image slice with the window template
# Sum quarter bottom of image to get slice, could use a different ratio
l_sum = np.sum(image[int(3*image.shape[0]/4):,:int(image.shape[1]/2)], axis=0)
l_center = np.argmax(np.convolve(window,l_sum))-window_width/2 | y_base = int(image.shape[0] - window_height/2)
# Add what we found for the first layer
y_center = y_base
left_centroids.append((l_center, y_center))
right_centroids.append((r_center, y_center))
# Go through each layer looking for max pixel locations
for level in range(1,(int)(image.shape[0]/window_height)):
y_center = int(y_base - (level * window_height))
# convolve the window into the vertical slice of the image
image_layer = np.sum(image[int(image.shape[0]-(level+1)*window_height):int(image.shape[0]-level*window_height),:], axis=0)
conv_signal = np.convolve(window, image_layer)
# Find the best left centroid by using past left center as a reference
# Use window_width/2 as offset because convolution signal reference is at right side of window, not center of window
offset = window_width/2
l_min_index = int(max(l_center+offset-margin,0))
l_max_index = int(min(l_center+offset+margin,image.shape[1]))
l_max = np.argmax(conv_signal[l_min_index:l_max_index])
if l_max > 50:
left_centroids.append((l_center, y_center))
l_center = l_max+l_min_index-offset
# Find the best right centroid by using past right center as a reference
r_min_index = int(max(r_center+offset-margin,0))
r_max_index = int(min(r_center+offset+margin,image.shape[1]))
r_max = np.argmax(conv_signal[r_min_index:r_max_index])
if r_max > 50:
right_centroids.append((r_center, y_center))
r_center = r_max+r_min_index-offset
return left_centroids, right_centroids
def draw_window_boxes(img, l_points, r_points, window_width, window_height):
if len(l_points) > 0:
for p in l_points:
cv2.rectangle(img, (p[0], p[1]), (p[0] + window_width, p[1] + window_height), (255,0,0), -1)
if len(r_points) > 0:
for p in r_points:
cv2.rectangle(img, (p[0], p[1]), (p[0] + window_width, p[1] + window_height), (0,255,0), -1)
return img
def draw_window_centroids(warped, window_centroids, window_width = 50, window_height = 80):
if len(window_centroids) > 0:
# Points used to draw all the left and right windows
l_points = np.zeros_like(warped)
r_points = np.zeros_like(warped)
# Go through each level and draw the windows
for level in range(0,len(window_centroids)):
# Window_mask is a function to draw window areas
l_mask = window_mask(window_width,window_height,warped,window_centroids[level][0],level)
r_mask = window_mask(window_width,window_height,warped,window_centroids[level][1],level)
# Add graphic points from window mask here to total pixels found
l_points[(l_points == 255) | ((l_mask == 1) ) ] = 255
r_points[(r_points == 255) | ((r_mask == 1) ) ] = 255
# Draw the results
#template = np.array(r_points+l_points,np.uint8) # add both left and right window pixels together
zero_channel = np.zeros_like(l_points) # create a zero color channle
template = np.array(cv2.merge((l_points,r_points,zero_channel)),np.uint8) # make window pixels green
warpage = np.array(cv2.merge((warped,warped,warped)),np.uint8) # making the original road pixels 3 color channels
output = cv2.addWeighted(warpage, 0.5, template, 0.5, 0.0) # overlay the orignal road image with window results
# If no window centers found, just display orginal road image
else:
output = np.array(cv2.merge((warped,warped,warped)),np.uint8)
return output
def draw_text(img, text, origin):
cv2.putText(img, text, origin, cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), thickness=2)
def pipeline_image(img, save_images=None, save_suffix='.jpg'):
if save_images:
print('begin pipeline_image', save_suffix)
undistorted = undistort(img)
if save_images:
save_output_image(undistorted, 'undistorted' + save_suffix)
#binary = abs_sobel_thresh(undistorted, orient='x', sobel_kernel=15, thresh=(20,100))
binary = color_threshold(undistorted)
if save_images:
save_output_image(binary, 'binary' + save_suffix, cmap='gray')
img_size = binary.shape[::-1]
src = np.float32(
[[(img_size[0] / 2) - 55, img_size[1] / 2 + 100],
[((img_size[0] / 6) - 10), img_size[1]],
[(img_size[0] * 5 / 6) + 60, img_size[1]],
[(img_size[0] / 2 + 55), img_size[1] / 2 + 100]])
dst = np.float32(
[[(img_size[0] / 4), 0],
[(img_size[0] / 4), img_size[1]],
[(img_size[0] * 3 / 4), img_size[1]],
[(img_size[0] * 3 / 4), 0]])
if save_images:
cv2.polylines(img, np.int32([src]), True, (255,0,0), thickness=3)
save_output_image(img, 'polygon' + save_suffix)
M = cv2.getPerspectiveTransform(src, dst)
Minv = cv2.getPerspectiveTransform(dst, src)
warped = cv2.warpPerspective(binary, M, img_size, flags=cv2.INTER_LINEAR)
if save_images:
save_output_image(warped, 'warped' + save_suffix, cmap='gray')
window_width = 40
window_height = 60
#identified lane-line pixels and fit their positions with a polynomial
l_points, r_points = find_lr_window_centroids(warped, window_width, window_height, 100)
global last_l_points, last_r_points
if len(l_points) < 5 and len(last_l_points) > 0:
#print("less than 4 l_points:", len(r_points))
# use the previous points
l_points = last_l_points
else:
last_l_points = l_points
l_points = np.array(l_points, dtype=np.int32)
l_poly = np.polyfit(l_points[:,1], l_points[:,0], 2)
if len(r_points) < 5 and len(last_r_points) > 0:
#print("less than 4 r_points:", len(r_points))
r_points = last_r_points
else:
last_r_points = r_points
r_points = np.array(r_points, dtype=np.int32)
r_poly = np.polyfit(r_points[:,1], r_points[:,0], 2)
yval = np.arange(0, warped.shape[0])
l_xval = np.polyval(l_poly, yval)
r_xval = np.polyval(r_poly, yval)
if save_images:
lanes = warped*255
lanes = np.array(cv2.merge((lanes,lanes,lanes)),np.uint8) # make window pixels green
lanes = draw_window_boxes(lanes, l_points, r_points, window_width, window_height)
for p in l_points:
cv2.circle(lanes, (p[0], p[1]), 10, (255,0,255), -1)
for p in r_points:
cv2.circle(lanes, (p[0], p[1]), 10, (255,0,255), -1)
for x,y in zip(l_xval, yval):
cv2.circle(lanes, (int(x),y), 5, (255,255,0), -1)
for x,y in zip(r_xval, yval):
cv2.circle(lanes, (int(x),y), 5, (0,255,255), -1)
save_output_image(lanes, 'lanes' + save_suffix, cmap='gray')
# Define conversions in x and y from pixels space to meters
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/700 # meters per pixel in x dimension
#calculated the position of the vehicle with respect to center
lane_center_offset_m = (warped.shape[1]/2 - (l_xval[-1] + r_xval[-1])/2) * xm_per_pix
direction = 'Left'
if lane_center_offset_m > 0:
direction = 'Right'
#calculated the radius of curvature of the lane
y_eval = np.max(yval)
# Fit new polynomials to x,y in world space
left_fit_cr = np.polyfit(l_points[:,1]*ym_per_pix, l_points[:,0]*xm_per_pix, 2)
right_fit_cr = np.polyfit(r_points[:,1]*ym_per_pix, r_points[:,0]*xm_per_pix, 2)
# Calculate the new radii of curvature
left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])
right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])
# Now our radius of curvature is in meters
#Provide an example image of your result plotted back down onto the road such that the lane area is identified clearly
warp_zero = np.zeros_like(warped).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([l_xval , yval]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([r_xval, yval])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
unwarp = cv2.warpPerspective(color_warp, Minv, img_size, flags=cv2.INTER_LINEAR)
draw_text(undistorted, "Radius: {:.1f}m {:.1f}m".format(left_curverad, right_curverad), (50, 50))
draw_text(undistorted, "{:.3f}m {} of Center".format(abs(lane_center_offset_m), direction), (50, 100))
output = cv2.addWeighted(undistorted, 1, unwarp, 0.4, 0)
if save_images:
save_output_image(output, 'output' + save_suffix)
return output
def process_test_images():
filenames = glob('test_images/*.jpg')
#filenames = ['test_images/test2.jpg']
for f in filenames:
img = load_image(f)
img_out = pipeline_image(img, True, '-' + f.split('/')[-1])
#show_before_after(img, img_out, 'gray')
def process_video(in_file, out_file):
clip = VideoFileClip(in_file)
video_clip = clip.fl_image(pipeline_image)
video_clip.write_videofile(out_file, audio=False)
def show_before_after(before, after, cmap=None):
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6))
fig.subplots_adjust(hspace=0.5, wspace=0.5)
ax1.imshow(before)
ax1.set_title('Before')
ax2.imshow(after, cmap=cmap)
ax2.set_title('After')
plt.show()
def show_images(imgs, titles):
fig, axes = plt.subplots(3, 6, figsize=(12, 6))
fig.subplots_adjust(hspace=0.5, wspace=0.5)
for ax, img, title in zip(axes.flat, imgs, titles):
ax.imshow(img)
ax.set_title(title)
plt.show()
last_l_points = []
last_r_points = []
mtx, dist = calibrate_camera()
process_test_images()
process_video('project_video.mp4', 'output.mp4')
process_video('challenge_video.mp4', 'challenge_output.mp4')
process_video('harder_challenge_video.mp4', 'harder_challenge_output.mp4') | r_sum = np.sum(image[int(3*image.shape[0]/4):,int(image.shape[1]/2):], axis=0)
r_center = np.argmax(np.convolve(window,r_sum))-window_width/2+int(image.shape[1]/2)
| random_line_split |
main.rs | use std::convert::TryInto;
use std::str::FromStr;
use anyhow::{anyhow, Context, Result};
use clap::{ArgEnum, Clap};
use gloryctl::macros::Event;
use gloryctl::{rgb::Effect, ButtonAction, Color, DpiValue, GloriousDevice};
#[derive(Clap)]
pub struct Opts {
#[clap(subcommand)]
cmd: Command,
}
#[derive(Clap)]
enum Command {
/// Dump the firmware version and Config
Dump(Dump),
/// Configure the button mapping
Button(Buttons),
/// Configure DPI profiles
Dpi(Dpi),
/// Configure macros
Macro(Macro),
/// Configure the RGB effect
// This is weird due to https://github.com/clap-rs/clap/issues/2005
Rgb {
#[clap(subcommand)]
rgbcmd: Rgb,
},
}
#[derive(Clap)]
struct Dump {}
#[derive(Clap)]
#[clap(after_help = r"DISCUSSION:
The format of a mapping is button:action-type[:action-params...]
where button is a number from 1 to 6 and action-type:action-params]
is one of the following:
- disable
- mouse:button (button is one of 'left', 'right', 'middle', 'back', 'forward')
- scroll:amount (amount can also be 'up' and 'down', corresponding to 1 and -1)
- repeat:button:count[:interval=50] (button is same as 'mouse', )
- dpi:direction, direction is one of 'loop', 'up', 'down'
- dpi-lock:value
- media:key
- macro:bank
- keyboard:modifiers:key
The provided mappings are always applied over the default configuration,
not the current one. If no mappings are provided, the default configuration
is used.
The default configuration can be represented as:
1:mouse:left 2:mouse:right 3:mouse:middle 4:mouse:back 5:mouse:forward 6:dpi:loop")]
struct Buttons {
mappings: Vec<SingleButton>,
}
struct SingleButton {
which: usize,
action: ButtonAction,
}
impl FromStr for SingleButton {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let (btn, act) = s
.split_once(':')
.context("Format: button:action-type[:action-params]")?;
let which = usize::from_str(btn)?;
let action = ButtonAction::from_str(act)?;
Ok(Self { which, action })
}
}
#[derive(Clap)]
#[clap(after_help = r"DISCUSSION:
The mouse has support for 8 dpi profiles, of which each has a
configured dpi value and a color (which is displayed on the LED
on the bottom of the mouse. For example, to change the color
of dpi profile number 3, you could use
gloryctl dpi -c 00ffff 3
At this point, it is not possible to enable or disable profiles.")] // TODO
struct Dpi {
#[clap(possible_values = &["1", "2", "3", "4", "5", "6", "7", "8"])]
which: usize,
#[clap(short, long)]
color: Option<Color>,
#[clap(short, long)]
dpi: Option<u16>,
// TODO independent X and Y
}
#[derive(Clap)]
#[clap(after_help = r"DISCUSSION:
This subcommand can be used to program macros. The first argument
is the bank number. Following is a list of events. Each event has
a format of state:type:key:duration.
- state is either 'up' or 'down'
- type is one of 'keyboard', 'modifier', 'mouse'
- key takes on values depending on type, similar to button mappings
- duration is in milliseconds, how long to pause before continuing")]
struct Macro {
bank: u8,
events: Vec<Event>,
}
#[derive(Clap)]
enum Rgb {
/// Lighting disabled
Off,
/// Rotating rainbow (default for new mice)
Glorious {
#[clap(arg_enum, long, short)]
direction: Option<Direction>,
#[clap(arg_enum, long, short)]
speed: Option<Speed>,
},
/// Single color
Single {
#[clap(long, short)]
color: Option<Color>,
#[clap(arg_enum, long, short)]
brightness: Option<Brightness>,
},
/// Slowly cycles through the given list of colors
Breathing {
#[clap(arg_enum, long, short)]
speed: Option<Speed>,
//#[clap(long, short, max_values = 7)]
// we are not using max_values here, because it
// leads to confusing behaviour when more values are passed
#[clap(long, short)]
colors: Vec<Color>,
},
///
Tail {
#[clap(arg_enum, long, short)]
brightness: Option<Brightness>,
#[clap(arg_enum, long, short)]
speed: Option<Speed>,
},
/// Cycle through colors seamlessly
SeamlessBreathing {
#[clap(arg_enum, long, short)]
speed: Option<Speed>,
},
/// Constant color for each of the six LEDs
ConstantRgb {
#[clap(long, short, number_of_values = 6)]
colors: Vec<Color>,
},
/// Switching between two configured colors
Rave {
#[clap(arg_enum, long, short)]
brightness: Option<Brightness>,
#[clap(arg_enum, long, short)]
speed: Option<Speed>,
#[clap(long, short, number_of_values = 2)]
colors: Vec<Color>,
},
/// Randomly changing colors
Random {
#[clap(arg_enum, long, short)]
speed: Option<Speed>,
},
Wave {
#[clap(arg_enum, long, short)]
brightness: Option<Brightness>,
#[clap(arg_enum, long, short)]
speed: Option<Speed>,
},
/// Single color breathing
SingleBreathing {
#[clap(arg_enum, long, short)]
speed: Option<Speed>,
#[clap(long, short)]
color: Option<Color>,
},
}
#[derive(ArgEnum)]
enum Direction {
Up,
Down,
}
#[derive(ArgEnum)]
enum Speed {
Slow,
Medium,
Fast,
}
#[derive(ArgEnum)]
enum Brightness {
_0,
_25,
_50,
_75,
_100,
}
impl From<&Direction> for u8 {
fn from(d: &Direction) -> u8 {
match d {
Direction::Up => 1,
Direction::Down => 0,
}
}
}
impl From<&Speed> for u8 {
fn from(s: &Speed) -> u8 {
match s {
Speed::Slow => 1,
Speed::Medium => 2,
Speed::Fast => 3,
}
}
}
impl From<&Brightness> for u8 {
fn from(b: &Brightness) -> u8 {
match b {
Brightness::_0 => 0,
Brightness::_25 => 1,
Brightness::_50 => 2,
Brightness::_75 => 3,
Brightness::_100 => 4,
}
}
}
impl Dump {
fn run(&self, dev: &mut GloriousDevice) -> Result<()> {
dbg!(dev.read_fw_version()?);
dbg!(dev.read_config()?);
//dbg!(dev.read_buttonmap()?);
Ok(())
}
}
impl Buttons {
fn run(&self, dev: &mut GloriousDevice) -> Result<()> {
let mut map = gloryctl::DEFAULT_MAP;
for b in &self.mappings {
if b.which < 1 || b.which > 6 {
return Err(anyhow!("Invalid button number {}", b.which));
}
let i = b.which - 1;
map[i] = b.action;
}
dev.send_buttonmap(&map)
}
}
impl Dpi {
fn run(&self, dev: &mut GloriousDevice) -> Result<()> {
let mut conf = dev.read_config()?;
assert!(self.which >= 1 && self.which <= 8);
let i = self.which - 1;
let prof = &mut conf.dpi_profiles[i];
if let Some(color) = self.color {
prof.color = color;
}
if let Some(dpi) = self.dpi {
prof.value = DpiValue::Single(dpi)
}
conf.fixup_dpi_metadata();
dev.send_config(&conf)?;
Ok(())
}
}
impl Macro {
fn run(&self, dev: &mut GloriousDevice) -> Result<()> {
if self.bank > 3 {
return Err(anyhow!(
r"Only 2 macro banks are supported for now,
TODO find out how many the hardware supports without bricking it"
));
}
dev.send_macro_bank(self.bank, &self.events)
}
}
impl Rgb {
fn run(&self, dev: &mut GloriousDevice) -> Result<()> {
let mut conf = dev.read_config()?;
match self {
Rgb::Off => {
conf.rgb_current_effect = Effect::Off;
}
Rgb::Glorious { direction, speed } => {
conf.rgb_current_effect = Effect::Glorious;
if let Some(dir) = direction {
conf.rgb_effect_parameters.glorious.direction = dir.into();
}
if let Some(spd) = speed {
conf.rgb_effect_parameters.glorious.speed = spd.into();
}
}
Rgb::Single { color, brightness } => {
conf.rgb_current_effect = Effect::SingleColor;
if let Some(clr) = color {
conf.rgb_effect_parameters.single_color.color = *clr;
}
if let Some(br) = brightness {
conf.rgb_effect_parameters.single_color.brightness = br.into();
}
}
Rgb::Breathing { speed, colors } => {
conf.rgb_current_effect = Effect::Breathing;
if let Some(spd) = speed {
conf.rgb_effect_parameters.breathing.speed = spd.into();
}
if colors.len() > 7 {
return Err(anyhow::Error::msg("At most 7 colors are supported."));
}
if colors.len() > 0 |
}
Rgb::Tail { speed, brightness } => {
conf.rgb_current_effect = Effect::Tail;
if let Some(spd) = speed {
conf.rgb_effect_parameters.tail.speed = spd.into();
}
if let Some(br) = brightness {
conf.rgb_effect_parameters.tail.brightness = br.into();
}
}
Rgb::SeamlessBreathing { speed } => {
conf.rgb_current_effect = Effect::SeamlessBreathing;
if let Some(spd) = speed {
conf.rgb_effect_parameters.seamless_breathing.speed = spd.into();
}
}
Rgb::ConstantRgb { colors } => {
conf.rgb_current_effect = Effect::ConstantRgb;
assert!(colors.len() <= 6);
for (i, c) in colors.iter().enumerate() {
conf.rgb_effect_parameters.constant_rgb.colors[i] = *c;
}
}
Rgb::Rave {
brightness,
speed,
colors,
} => {
conf.rgb_current_effect = Effect::Rave;
if let Some(br) = brightness {
conf.rgb_effect_parameters.rave.brightness = br.into();
}
if let Some(spd) = speed {
conf.rgb_effect_parameters.rave.speed = spd.into();
}
assert!(colors.len() <= 2);
for (i, c) in colors.iter().enumerate() {
conf.rgb_effect_parameters.rave.colors[i] = *c;
}
}
Rgb::Random { speed } => {
conf.rgb_current_effect = Effect::Random;
// HACK: this effect is not available officialy, and it is not properly
// intialized, with the speed set to 0 (which is likely not a valid value,
// as it behaves the same as if 0 is set for the speed of other effects,
// that is the effect is extremely fast).
// Initialize the value if needed.
if conf.rgb_effect_parameters.random.speed == 0 {
conf.rgb_effect_parameters.random.speed = 1;
}
if let Some(spd) = speed {
conf.rgb_effect_parameters.random.speed = spd.into();
}
}
Rgb::Wave { brightness, speed } => {
conf.rgb_current_effect = Effect::Wave;
if let Some(br) = brightness {
conf.rgb_effect_parameters.wave.brightness = br.into();
}
if let Some(spd) = speed {
conf.rgb_effect_parameters.wave.speed = spd.into();
}
}
Rgb::SingleBreathing { speed, color } => {
conf.rgb_current_effect = Effect::SingleBreathing;
if let Some(spd) = speed {
conf.rgb_effect_parameters.single_breathing.speed = spd.into();
}
if let Some(clr) = color {
conf.rgb_effect_parameters.single_breathing.color = *clr;
}
}
};
dev.send_config(&conf)
}
}
fn main() -> Result<()> {
//Dump {}.run()?;
let opts = Opts::parse();
let hid = hidapi::HidApi::new()?;
let mut dev = GloriousDevice::open_first(&hid)?;
dev.send_msg(0x02, 1)?;
match opts.cmd {
Command::Dump(dump) => dump.run(&mut dev),
Command::Button(b) => b.run(&mut dev),
Command::Rgb { rgbcmd } => rgbcmd.run(&mut dev),
Command::Dpi(dpi) => dpi.run(&mut dev),
Command::Macro(macro_) => macro_.run(&mut dev),
}
}
| {
conf.rgb_effect_parameters.breathing.count = colors.len().try_into()?;
for (i, c) in colors.iter().enumerate() {
conf.rgb_effect_parameters.breathing.colors[i] = *c;
}
} | conditional_block |
main.rs | use std::convert::TryInto;
use std::str::FromStr;
use anyhow::{anyhow, Context, Result};
use clap::{ArgEnum, Clap};
use gloryctl::macros::Event;
use gloryctl::{rgb::Effect, ButtonAction, Color, DpiValue, GloriousDevice};
#[derive(Clap)]
pub struct Opts {
#[clap(subcommand)]
cmd: Command,
}
#[derive(Clap)]
enum Command {
/// Dump the firmware version and Config
Dump(Dump),
/// Configure the button mapping
Button(Buttons),
/// Configure DPI profiles
Dpi(Dpi),
/// Configure macros
Macro(Macro),
/// Configure the RGB effect
// This is weird due to https://github.com/clap-rs/clap/issues/2005
Rgb {
#[clap(subcommand)]
rgbcmd: Rgb,
},
}
#[derive(Clap)]
struct Dump {}
#[derive(Clap)]
#[clap(after_help = r"DISCUSSION:
The format of a mapping is button:action-type[:action-params...]
where button is a number from 1 to 6 and action-type:action-params]
is one of the following:
- disable
- mouse:button (button is one of 'left', 'right', 'middle', 'back', 'forward')
- scroll:amount (amount can also be 'up' and 'down', corresponding to 1 and -1)
- repeat:button:count[:interval=50] (button is same as 'mouse', )
- dpi:direction, direction is one of 'loop', 'up', 'down'
- dpi-lock:value
- media:key
- macro:bank
- keyboard:modifiers:key
The provided mappings are always applied over the default configuration,
not the current one. If no mappings are provided, the default configuration
is used.
The default configuration can be represented as:
1:mouse:left 2:mouse:right 3:mouse:middle 4:mouse:back 5:mouse:forward 6:dpi:loop")]
struct Buttons {
mappings: Vec<SingleButton>,
}
struct SingleButton {
which: usize,
action: ButtonAction,
}
impl FromStr for SingleButton {
type Err = anyhow::Error;
fn | (s: &str) -> Result<Self, Self::Err> {
let (btn, act) = s
.split_once(':')
.context("Format: button:action-type[:action-params]")?;
let which = usize::from_str(btn)?;
let action = ButtonAction::from_str(act)?;
Ok(Self { which, action })
}
}
#[derive(Clap)]
#[clap(after_help = r"DISCUSSION:
The mouse has support for 8 dpi profiles, of which each has a
configured dpi value and a color (which is displayed on the LED
on the bottom of the mouse. For example, to change the color
of dpi profile number 3, you could use
gloryctl dpi -c 00ffff 3
At this point, it is not possible to enable or disable profiles.")] // TODO
struct Dpi {
#[clap(possible_values = &["1", "2", "3", "4", "5", "6", "7", "8"])]
which: usize,
#[clap(short, long)]
color: Option<Color>,
#[clap(short, long)]
dpi: Option<u16>,
// TODO independent X and Y
}
#[derive(Clap)]
#[clap(after_help = r"DISCUSSION:
This subcommand can be used to program macros. The first argument
is the bank number. Following is a list of events. Each event has
a format of state:type:key:duration.
- state is either 'up' or 'down'
- type is one of 'keyboard', 'modifier', 'mouse'
- key takes on values depending on type, similar to button mappings
- duration is in milliseconds, how long to pause before continuing")]
struct Macro {
bank: u8,
events: Vec<Event>,
}
#[derive(Clap)]
enum Rgb {
/// Lighting disabled
Off,
/// Rotating rainbow (default for new mice)
Glorious {
#[clap(arg_enum, long, short)]
direction: Option<Direction>,
#[clap(arg_enum, long, short)]
speed: Option<Speed>,
},
/// Single color
Single {
#[clap(long, short)]
color: Option<Color>,
#[clap(arg_enum, long, short)]
brightness: Option<Brightness>,
},
/// Slowly cycles through the given list of colors
Breathing {
#[clap(arg_enum, long, short)]
speed: Option<Speed>,
//#[clap(long, short, max_values = 7)]
// we are not using max_values here, because it
// leads to confusing behaviour when more values are passed
#[clap(long, short)]
colors: Vec<Color>,
},
///
Tail {
#[clap(arg_enum, long, short)]
brightness: Option<Brightness>,
#[clap(arg_enum, long, short)]
speed: Option<Speed>,
},
/// Cycle through colors seamlessly
SeamlessBreathing {
#[clap(arg_enum, long, short)]
speed: Option<Speed>,
},
/// Constant color for each of the six LEDs
ConstantRgb {
#[clap(long, short, number_of_values = 6)]
colors: Vec<Color>,
},
/// Switching between two configured colors
Rave {
#[clap(arg_enum, long, short)]
brightness: Option<Brightness>,
#[clap(arg_enum, long, short)]
speed: Option<Speed>,
#[clap(long, short, number_of_values = 2)]
colors: Vec<Color>,
},
/// Randomly changing colors
Random {
#[clap(arg_enum, long, short)]
speed: Option<Speed>,
},
Wave {
#[clap(arg_enum, long, short)]
brightness: Option<Brightness>,
#[clap(arg_enum, long, short)]
speed: Option<Speed>,
},
/// Single color breathing
SingleBreathing {
#[clap(arg_enum, long, short)]
speed: Option<Speed>,
#[clap(long, short)]
color: Option<Color>,
},
}
#[derive(ArgEnum)]
enum Direction {
Up,
Down,
}
#[derive(ArgEnum)]
enum Speed {
Slow,
Medium,
Fast,
}
#[derive(ArgEnum)]
enum Brightness {
_0,
_25,
_50,
_75,
_100,
}
impl From<&Direction> for u8 {
fn from(d: &Direction) -> u8 {
match d {
Direction::Up => 1,
Direction::Down => 0,
}
}
}
impl From<&Speed> for u8 {
fn from(s: &Speed) -> u8 {
match s {
Speed::Slow => 1,
Speed::Medium => 2,
Speed::Fast => 3,
}
}
}
impl From<&Brightness> for u8 {
fn from(b: &Brightness) -> u8 {
match b {
Brightness::_0 => 0,
Brightness::_25 => 1,
Brightness::_50 => 2,
Brightness::_75 => 3,
Brightness::_100 => 4,
}
}
}
impl Dump {
fn run(&self, dev: &mut GloriousDevice) -> Result<()> {
dbg!(dev.read_fw_version()?);
dbg!(dev.read_config()?);
//dbg!(dev.read_buttonmap()?);
Ok(())
}
}
impl Buttons {
fn run(&self, dev: &mut GloriousDevice) -> Result<()> {
let mut map = gloryctl::DEFAULT_MAP;
for b in &self.mappings {
if b.which < 1 || b.which > 6 {
return Err(anyhow!("Invalid button number {}", b.which));
}
let i = b.which - 1;
map[i] = b.action;
}
dev.send_buttonmap(&map)
}
}
impl Dpi {
fn run(&self, dev: &mut GloriousDevice) -> Result<()> {
let mut conf = dev.read_config()?;
assert!(self.which >= 1 && self.which <= 8);
let i = self.which - 1;
let prof = &mut conf.dpi_profiles[i];
if let Some(color) = self.color {
prof.color = color;
}
if let Some(dpi) = self.dpi {
prof.value = DpiValue::Single(dpi)
}
conf.fixup_dpi_metadata();
dev.send_config(&conf)?;
Ok(())
}
}
impl Macro {
fn run(&self, dev: &mut GloriousDevice) -> Result<()> {
if self.bank > 3 {
return Err(anyhow!(
r"Only 2 macro banks are supported for now,
TODO find out how many the hardware supports without bricking it"
));
}
dev.send_macro_bank(self.bank, &self.events)
}
}
impl Rgb {
fn run(&self, dev: &mut GloriousDevice) -> Result<()> {
let mut conf = dev.read_config()?;
match self {
Rgb::Off => {
conf.rgb_current_effect = Effect::Off;
}
Rgb::Glorious { direction, speed } => {
conf.rgb_current_effect = Effect::Glorious;
if let Some(dir) = direction {
conf.rgb_effect_parameters.glorious.direction = dir.into();
}
if let Some(spd) = speed {
conf.rgb_effect_parameters.glorious.speed = spd.into();
}
}
Rgb::Single { color, brightness } => {
conf.rgb_current_effect = Effect::SingleColor;
if let Some(clr) = color {
conf.rgb_effect_parameters.single_color.color = *clr;
}
if let Some(br) = brightness {
conf.rgb_effect_parameters.single_color.brightness = br.into();
}
}
Rgb::Breathing { speed, colors } => {
conf.rgb_current_effect = Effect::Breathing;
if let Some(spd) = speed {
conf.rgb_effect_parameters.breathing.speed = spd.into();
}
if colors.len() > 7 {
return Err(anyhow::Error::msg("At most 7 colors are supported."));
}
if colors.len() > 0 {
conf.rgb_effect_parameters.breathing.count = colors.len().try_into()?;
for (i, c) in colors.iter().enumerate() {
conf.rgb_effect_parameters.breathing.colors[i] = *c;
}
}
}
Rgb::Tail { speed, brightness } => {
conf.rgb_current_effect = Effect::Tail;
if let Some(spd) = speed {
conf.rgb_effect_parameters.tail.speed = spd.into();
}
if let Some(br) = brightness {
conf.rgb_effect_parameters.tail.brightness = br.into();
}
}
Rgb::SeamlessBreathing { speed } => {
conf.rgb_current_effect = Effect::SeamlessBreathing;
if let Some(spd) = speed {
conf.rgb_effect_parameters.seamless_breathing.speed = spd.into();
}
}
Rgb::ConstantRgb { colors } => {
conf.rgb_current_effect = Effect::ConstantRgb;
assert!(colors.len() <= 6);
for (i, c) in colors.iter().enumerate() {
conf.rgb_effect_parameters.constant_rgb.colors[i] = *c;
}
}
Rgb::Rave {
brightness,
speed,
colors,
} => {
conf.rgb_current_effect = Effect::Rave;
if let Some(br) = brightness {
conf.rgb_effect_parameters.rave.brightness = br.into();
}
if let Some(spd) = speed {
conf.rgb_effect_parameters.rave.speed = spd.into();
}
assert!(colors.len() <= 2);
for (i, c) in colors.iter().enumerate() {
conf.rgb_effect_parameters.rave.colors[i] = *c;
}
}
Rgb::Random { speed } => {
conf.rgb_current_effect = Effect::Random;
// HACK: this effect is not available officialy, and it is not properly
// intialized, with the speed set to 0 (which is likely not a valid value,
// as it behaves the same as if 0 is set for the speed of other effects,
// that is the effect is extremely fast).
// Initialize the value if needed.
if conf.rgb_effect_parameters.random.speed == 0 {
conf.rgb_effect_parameters.random.speed = 1;
}
if let Some(spd) = speed {
conf.rgb_effect_parameters.random.speed = spd.into();
}
}
Rgb::Wave { brightness, speed } => {
conf.rgb_current_effect = Effect::Wave;
if let Some(br) = brightness {
conf.rgb_effect_parameters.wave.brightness = br.into();
}
if let Some(spd) = speed {
conf.rgb_effect_parameters.wave.speed = spd.into();
}
}
Rgb::SingleBreathing { speed, color } => {
conf.rgb_current_effect = Effect::SingleBreathing;
if let Some(spd) = speed {
conf.rgb_effect_parameters.single_breathing.speed = spd.into();
}
if let Some(clr) = color {
conf.rgb_effect_parameters.single_breathing.color = *clr;
}
}
};
dev.send_config(&conf)
}
}
fn main() -> Result<()> {
//Dump {}.run()?;
let opts = Opts::parse();
let hid = hidapi::HidApi::new()?;
let mut dev = GloriousDevice::open_first(&hid)?;
dev.send_msg(0x02, 1)?;
match opts.cmd {
Command::Dump(dump) => dump.run(&mut dev),
Command::Button(b) => b.run(&mut dev),
Command::Rgb { rgbcmd } => rgbcmd.run(&mut dev),
Command::Dpi(dpi) => dpi.run(&mut dev),
Command::Macro(macro_) => macro_.run(&mut dev),
}
}
| from_str | identifier_name |
main.rs | use std::convert::TryInto;
use std::str::FromStr;
use anyhow::{anyhow, Context, Result};
use clap::{ArgEnum, Clap};
use gloryctl::macros::Event;
use gloryctl::{rgb::Effect, ButtonAction, Color, DpiValue, GloriousDevice};
#[derive(Clap)]
pub struct Opts {
#[clap(subcommand)]
cmd: Command,
}
#[derive(Clap)]
enum Command {
/// Dump the firmware version and Config
Dump(Dump),
/// Configure the button mapping
Button(Buttons),
/// Configure DPI profiles
Dpi(Dpi),
/// Configure macros
Macro(Macro),
/// Configure the RGB effect
// This is weird due to https://github.com/clap-rs/clap/issues/2005
Rgb {
#[clap(subcommand)]
rgbcmd: Rgb,
},
}
#[derive(Clap)]
struct Dump {}
#[derive(Clap)]
#[clap(after_help = r"DISCUSSION:
The format of a mapping is button:action-type[:action-params...]
where button is a number from 1 to 6 and action-type:action-params]
is one of the following:
- disable
- mouse:button (button is one of 'left', 'right', 'middle', 'back', 'forward')
- scroll:amount (amount can also be 'up' and 'down', corresponding to 1 and -1)
- repeat:button:count[:interval=50] (button is same as 'mouse', )
- dpi:direction, direction is one of 'loop', 'up', 'down'
- dpi-lock:value
- media:key
- macro:bank
- keyboard:modifiers:key
The provided mappings are always applied over the default configuration,
not the current one. If no mappings are provided, the default configuration
is used.
The default configuration can be represented as:
1:mouse:left 2:mouse:right 3:mouse:middle 4:mouse:back 5:mouse:forward 6:dpi:loop")]
struct Buttons {
mappings: Vec<SingleButton>,
}
struct SingleButton {
which: usize,
action: ButtonAction,
}
impl FromStr for SingleButton {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let (btn, act) = s
.split_once(':')
.context("Format: button:action-type[:action-params]")?;
let which = usize::from_str(btn)?;
let action = ButtonAction::from_str(act)?;
Ok(Self { which, action })
}
}
#[derive(Clap)]
#[clap(after_help = r"DISCUSSION:
The mouse has support for 8 dpi profiles, of which each has a
configured dpi value and a color (which is displayed on the LED
on the bottom of the mouse. For example, to change the color
of dpi profile number 3, you could use
gloryctl dpi -c 00ffff 3
At this point, it is not possible to enable or disable profiles.")] // TODO
struct Dpi {
#[clap(possible_values = &["1", "2", "3", "4", "5", "6", "7", "8"])]
which: usize,
#[clap(short, long)]
color: Option<Color>,
#[clap(short, long)]
dpi: Option<u16>,
// TODO independent X and Y
}
#[derive(Clap)]
#[clap(after_help = r"DISCUSSION:
This subcommand can be used to program macros. The first argument
is the bank number. Following is a list of events. Each event has
a format of state:type:key:duration.
- state is either 'up' or 'down'
- type is one of 'keyboard', 'modifier', 'mouse'
- key takes on values depending on type, similar to button mappings
- duration is in milliseconds, how long to pause before continuing")]
struct Macro {
bank: u8,
events: Vec<Event>,
}
#[derive(Clap)]
enum Rgb {
/// Lighting disabled
Off,
/// Rotating rainbow (default for new mice)
Glorious {
#[clap(arg_enum, long, short)]
direction: Option<Direction>,
#[clap(arg_enum, long, short)]
speed: Option<Speed>,
},
/// Single color
Single {
#[clap(long, short)]
color: Option<Color>,
#[clap(arg_enum, long, short)]
brightness: Option<Brightness>,
},
/// Slowly cycles through the given list of colors
Breathing {
#[clap(arg_enum, long, short)]
speed: Option<Speed>,
//#[clap(long, short, max_values = 7)]
// we are not using max_values here, because it
// leads to confusing behaviour when more values are passed
#[clap(long, short)]
colors: Vec<Color>,
},
///
Tail {
#[clap(arg_enum, long, short)]
brightness: Option<Brightness>,
#[clap(arg_enum, long, short)]
speed: Option<Speed>,
},
/// Cycle through colors seamlessly
SeamlessBreathing {
#[clap(arg_enum, long, short)]
speed: Option<Speed>,
},
/// Constant color for each of the six LEDs
ConstantRgb {
#[clap(long, short, number_of_values = 6)]
colors: Vec<Color>,
},
/// Switching between two configured colors
Rave {
#[clap(arg_enum, long, short)]
brightness: Option<Brightness>,
#[clap(arg_enum, long, short)]
speed: Option<Speed>,
#[clap(long, short, number_of_values = 2)]
colors: Vec<Color>,
},
/// Randomly changing colors
Random {
#[clap(arg_enum, long, short)]
speed: Option<Speed>,
},
Wave {
#[clap(arg_enum, long, short)]
brightness: Option<Brightness>,
#[clap(arg_enum, long, short)]
speed: Option<Speed>,
},
/// Single color breathing
SingleBreathing {
#[clap(arg_enum, long, short)]
speed: Option<Speed>,
#[clap(long, short)]
color: Option<Color>,
},
}
#[derive(ArgEnum)]
enum Direction {
Up,
Down,
}
#[derive(ArgEnum)]
enum Speed {
Slow,
Medium,
Fast,
}
#[derive(ArgEnum)]
enum Brightness {
_0,
_25,
_50,
_75,
_100,
}
impl From<&Direction> for u8 {
fn from(d: &Direction) -> u8 {
match d {
Direction::Up => 1,
Direction::Down => 0,
}
}
}
impl From<&Speed> for u8 {
fn from(s: &Speed) -> u8 {
match s {
Speed::Slow => 1,
Speed::Medium => 2,
Speed::Fast => 3,
}
}
}
impl From<&Brightness> for u8 {
fn from(b: &Brightness) -> u8 {
match b {
Brightness::_0 => 0,
Brightness::_25 => 1,
Brightness::_50 => 2,
Brightness::_75 => 3,
Brightness::_100 => 4,
}
}
}
impl Dump {
fn run(&self, dev: &mut GloriousDevice) -> Result<()> {
dbg!(dev.read_fw_version()?);
dbg!(dev.read_config()?);
//dbg!(dev.read_buttonmap()?);
Ok(())
}
}
impl Buttons {
fn run(&self, dev: &mut GloriousDevice) -> Result<()> {
let mut map = gloryctl::DEFAULT_MAP;
for b in &self.mappings {
if b.which < 1 || b.which > 6 {
return Err(anyhow!("Invalid button number {}", b.which));
}
let i = b.which - 1;
map[i] = b.action;
}
dev.send_buttonmap(&map)
}
}
impl Dpi {
fn run(&self, dev: &mut GloriousDevice) -> Result<()> {
let mut conf = dev.read_config()?;
assert!(self.which >= 1 && self.which <= 8);
let i = self.which - 1;
let prof = &mut conf.dpi_profiles[i];
if let Some(color) = self.color {
prof.color = color;
}
if let Some(dpi) = self.dpi {
prof.value = DpiValue::Single(dpi)
} | }
}
impl Macro {
fn run(&self, dev: &mut GloriousDevice) -> Result<()> {
if self.bank > 3 {
return Err(anyhow!(
r"Only 2 macro banks are supported for now,
TODO find out how many the hardware supports without bricking it"
));
}
dev.send_macro_bank(self.bank, &self.events)
}
}
impl Rgb {
fn run(&self, dev: &mut GloriousDevice) -> Result<()> {
let mut conf = dev.read_config()?;
match self {
Rgb::Off => {
conf.rgb_current_effect = Effect::Off;
}
Rgb::Glorious { direction, speed } => {
conf.rgb_current_effect = Effect::Glorious;
if let Some(dir) = direction {
conf.rgb_effect_parameters.glorious.direction = dir.into();
}
if let Some(spd) = speed {
conf.rgb_effect_parameters.glorious.speed = spd.into();
}
}
Rgb::Single { color, brightness } => {
conf.rgb_current_effect = Effect::SingleColor;
if let Some(clr) = color {
conf.rgb_effect_parameters.single_color.color = *clr;
}
if let Some(br) = brightness {
conf.rgb_effect_parameters.single_color.brightness = br.into();
}
}
Rgb::Breathing { speed, colors } => {
conf.rgb_current_effect = Effect::Breathing;
if let Some(spd) = speed {
conf.rgb_effect_parameters.breathing.speed = spd.into();
}
if colors.len() > 7 {
return Err(anyhow::Error::msg("At most 7 colors are supported."));
}
if colors.len() > 0 {
conf.rgb_effect_parameters.breathing.count = colors.len().try_into()?;
for (i, c) in colors.iter().enumerate() {
conf.rgb_effect_parameters.breathing.colors[i] = *c;
}
}
}
Rgb::Tail { speed, brightness } => {
conf.rgb_current_effect = Effect::Tail;
if let Some(spd) = speed {
conf.rgb_effect_parameters.tail.speed = spd.into();
}
if let Some(br) = brightness {
conf.rgb_effect_parameters.tail.brightness = br.into();
}
}
Rgb::SeamlessBreathing { speed } => {
conf.rgb_current_effect = Effect::SeamlessBreathing;
if let Some(spd) = speed {
conf.rgb_effect_parameters.seamless_breathing.speed = spd.into();
}
}
Rgb::ConstantRgb { colors } => {
conf.rgb_current_effect = Effect::ConstantRgb;
assert!(colors.len() <= 6);
for (i, c) in colors.iter().enumerate() {
conf.rgb_effect_parameters.constant_rgb.colors[i] = *c;
}
}
Rgb::Rave {
brightness,
speed,
colors,
} => {
conf.rgb_current_effect = Effect::Rave;
if let Some(br) = brightness {
conf.rgb_effect_parameters.rave.brightness = br.into();
}
if let Some(spd) = speed {
conf.rgb_effect_parameters.rave.speed = spd.into();
}
assert!(colors.len() <= 2);
for (i, c) in colors.iter().enumerate() {
conf.rgb_effect_parameters.rave.colors[i] = *c;
}
}
Rgb::Random { speed } => {
conf.rgb_current_effect = Effect::Random;
// HACK: this effect is not available officialy, and it is not properly
// intialized, with the speed set to 0 (which is likely not a valid value,
// as it behaves the same as if 0 is set for the speed of other effects,
// that is the effect is extremely fast).
// Initialize the value if needed.
if conf.rgb_effect_parameters.random.speed == 0 {
conf.rgb_effect_parameters.random.speed = 1;
}
if let Some(spd) = speed {
conf.rgb_effect_parameters.random.speed = spd.into();
}
}
Rgb::Wave { brightness, speed } => {
conf.rgb_current_effect = Effect::Wave;
if let Some(br) = brightness {
conf.rgb_effect_parameters.wave.brightness = br.into();
}
if let Some(spd) = speed {
conf.rgb_effect_parameters.wave.speed = spd.into();
}
}
Rgb::SingleBreathing { speed, color } => {
conf.rgb_current_effect = Effect::SingleBreathing;
if let Some(spd) = speed {
conf.rgb_effect_parameters.single_breathing.speed = spd.into();
}
if let Some(clr) = color {
conf.rgb_effect_parameters.single_breathing.color = *clr;
}
}
};
dev.send_config(&conf)
}
}
fn main() -> Result<()> {
//Dump {}.run()?;
let opts = Opts::parse();
let hid = hidapi::HidApi::new()?;
let mut dev = GloriousDevice::open_first(&hid)?;
dev.send_msg(0x02, 1)?;
match opts.cmd {
Command::Dump(dump) => dump.run(&mut dev),
Command::Button(b) => b.run(&mut dev),
Command::Rgb { rgbcmd } => rgbcmd.run(&mut dev),
Command::Dpi(dpi) => dpi.run(&mut dev),
Command::Macro(macro_) => macro_.run(&mut dev),
}
} |
conf.fixup_dpi_metadata();
dev.send_config(&conf)?;
Ok(()) | random_line_split |
pickup-search.component.ts | import {
Component,
Input,
Output,
ViewEncapsulation,
EventEmitter
} from '@angular/core';
import { Router, ActivatedRoute } from '@angular/router';
import { Subject } from 'rxjs/Subject';
import { map } from 'rxjs/operators/map';
import { Observable } from 'rxjs/Observable';
import { Subscription } from "rxjs";
import { Angular2Csv } from 'angular2-csv';
import { RowClickTypeEnum } from '@ukmjkim/aid-data-table';
import { AidLoggerService, AidDialogService, AidMessageIndicatorService, AidIndicatorParams, AidDateFormatPipe, AidGlobalConfig, AidAutoCompleteSearchDto } from '@ukmjkim/aid-lib-services';
import { AppMainPermissionType } from '../../models/app-main-permission-type.enum';
import { AppMainState } from '../../models/app-main-state';
import { RemoteSearchBaseComponent } from '@ukmjkim/aid-data-table';
import { PickupService } from '../../services/pickup.service';
import { PickupSearchService } from '../../services/pickup-search.service';
import { Pickup } from '../../models/pickup';
import { PickupSearchDto } from '../../models/pickup-search-dto';
import { PickupSubItem } from '../../models/pickup-sub-item';
import { PickupItemType } from '../../models/pickup-item-type.enum';
import { PickupColumnVariableName, pickupSearchableFields } from './conf/pickup-column-variable-name.conf';
import { PickupSearchFilterConfigurator } from './conf/pickup-search-filter-configurator';
import { PickupSearchViewEnum } from './conf/pickup-search-view.enum';
import { PickupSearchDefaultColumnsMap } from './conf/pickup-search-view-template';
import { pickupDownloadFields } from './conf/pickup-column-variable-name.conf';
import { PickupSearchDatasource } from './pickup-search-datasource';
import { PickupSearchQueryService } from './pickup-search-query.service';
import { MatDialog } from '@angular/material';
import {
New,
InProgress,
Completed,
Canceled
} from '../../constants/pickup-dashboard-state';
import { PickupActionType } from '../../constants/pickup-action-type';
import { PickupLinkType } from '../../constants/link-type';
@Component({
selector: 'aid-pickup-search',
templateUrl: './pickup-search.component.html',
styleUrls: ['./pickup-search.component.scss'],
encapsulation: ViewEncapsulation.None
})
export class PickupSearchComponent extends RemoteSearchBaseComponent<PickupSearchDto> {
@Input() id: string;
@Input() appMainState: AppMainState;
@Input('enableRowCheckbox') enableRowCheckbox: boolean = true;
@Output() selectedRows: EventEmitter<number[]> = new EventEmitter<number[]>();
readonly PickupItemType = PickupItemType;
readonly dateFormatPipe = new AidDateFormatPipe();
public isAdvancedShown: boolean = false;
public enableRowAction: boolean = false;
public permissionName: string;
dataLoadedCallback: Subject<any> = new Subject<any>();
private eventListInSite: { id: number, name: string }[];
private actionIndicatorSubscription: Subscription;
constructor(private router: Router,
private route: ActivatedRoute,
private aidMessageIndicatorService: AidMessageIndicatorService,
private pickupSearchService: PickupSearchService,
private pickupSearchQueryService: PickupSearchQueryService,
private dialog: MatDialog,
private logger: AidLoggerService,
private dialogService: AidDialogService,
public pickupService: PickupService) {
super(pickupSearchQueryService, true);
this.subscribeMessageIndicator();
}
// implement abstract method - checkPrivilege
checkPrivilege(): boolean {
return true;
}
// implement abstract method - watchPredefinedGlobalSearchFilter
watchPredefinedGlobalSearchFilter() {
this.predefinedGlobalSearchFilter().subscribe(keyword => {
this.search = keyword;
})
}
// implement abstract method - loadPrerequisiteData
loadPrerequisiteData() {
this.pickupSearchService.setUXOnly(this.appMainState.isUXOnly);
this.pickupService.setUXOnly(this.appMainState.isUXOnly);
this.pickupSearchQueryService.setAppMainState(this.appMainState);
this.pickupSearchQueryService.loadConfiguration();
this.pickupSearchService.getPickupSearchRange(this.appMainState.siteId, this.appMainState.eventId)
.pipe(
)
.subscribe(response => {
const filterRangeResults = response as Array<any>;
this.prerequisiteData.next(filterRangeResults);
});
}
// implement abstract method - setupFilterConfiguratorAndDataSource
setupFilterConfiguratorAndDataSource() {
this.searchGroupFieldName = 'groupId';
this.searchGroupFieldId = this.appMainState.eventId && this.appMainState.eventId > 0 ? this.appMainState.eventId : this.appMainState.siteId;
this.filterConfigurator = new PickupSearchFilterConfigurator();
this.dataSource = new PickupSearchDatasource(
this.appMainState,
this.searchGroupFieldName,
this.searchGroupFieldId,
pickupSearchableFields,
this.pickupSearchService,
this.filterConfigurator,
this.dialogService);
}
// implement abstract method - setupFiltersDependOnSearchRange
setupFiltersDependOnSearchRange(filterRangeResults) {
this.logger.info('setupFiltersDependOnSearchRange filterRangeResults', filterRangeResults);
if (filterRangeResults.length > 0) {
this.setupNumberRangeFilters(filterRangeResults[0]);
this.setupDateRangeFilters(filterRangeResults[0]);
this.setupTextRangeFilters(filterRangeResults[0]);
}
this.eventListInSite = new Array<{ id: number, name: string }>();
if (filterRangeResults && filterRangeResults.length > 1) {
filterRangeResults[1].forEach((saleEvent: AidAutoCompleteSearchDto, index) => {
this.eventListInSite.push({ id: Number(saleEvent.value), name: saleEvent.description });
});
}
}
// implement abstract method - setDefaultDisplayedColumns
setDefaultDisplayedColumns() {
if (!this.displayedColumns || this.displayedColumns.length === 0) {
let searchView = this.selectedView;
if (PickupSearchDefaultColumnsMap.has(searchView)) {
this.displayedColumns = PickupSearchDefaultColumnsMap.get(searchView).columns.slice();
} else {
console.error('Cannot find given asset search view template', searchView);
this.displayedColumns = PickupSearchDefaultColumnsMap.get(String(PickupSearchViewEnum.Default)).columns.slice();
}
}
}
// implement abstract method - setupPicklistPreSelected
setupPicklistPreSelected(column: string) {
this.setupPicklistFilterChoices(PickupColumnVariableName[column], []);
}
private setupNumberRangeFilters(data: any) {
if (data.numberOfItems) {
this.filterConfigurator
.addFilter(PickupColumnVariableName.numberOfItems, { min: data.numberOfItems[0], max: data.numberOfItems[1] });
}
}
private setupDateRangeFilters(data: any) {
if (data.createdTS) {
this.filterConfigurator
.addFilter(PickupColumnVariableName.createdTS, { min: data.createdTS[0], max: data.createdTS[1] });
}
}
private setupTextRangeFilters(data: any) {
}
// implement abstract method - watchData
watchData(): void {
this.dataSource.data().subscribe(data => {
if (!this.dataLoadedCallback.isStopped) this.dataLoadedCallback.next();
if (this.queryService.getGlobalSearchFilter.length === 0) {
this.search = '';
}
if (data.length > 0) {
this.queryService.setSearchResult(JSON.stringify(data));
}
});
}
// implement abstract method - watchSelectedRows
watchSelectedRows(): void {
this.dataSource.selection().subscribe((selectedItems) => {
if (selectedItems.length < this.numberOfItemsPerPage) {
this.allItemsSelected = false;
}
this.selectedItems = selectedItems;
this.updateNumberOfSelectedItems();
let selectedItemList: number[] = [];
selectedItems.forEach(item => {
selectedItemList.push(item.pickupId);
});
if (selectedItems.length > 0) {
this.enableRowAction = true;
} else {
this.enableRowAction = false;
}
this.selectedRows.emit(selectedItemList);
});
}
// implement abstract method - watchRowClicked
watchRowClicked() {
this.dataSource.rowClick().subscribe((row) => {
if (row) {
this.openGoToPage(row.data.pickupId, this.dataSource.getRowClickType());
}
})
}
// implement abstract method - watchRowExpanded
watchRowExpanded() {
this.dataSource.rowExpanded().subscribe((row) => {
if (row && row.data.numberOfItems > 0) {
if (!row.data.items || row.data.items.length === 0) {
this.logger.info('Lazy Loading for Pickups Lots...');
this.getSubTableData(row.data.pickupId).subscribe(subitems => {
row.data.items = subitems;
});
}
}
});
}
private getSubTableData(orderNumber: number): Observable<PickupSubItem[]> {
this.dialogService.showProgress();
return this.pickupSearchService.getPickupSubItemsByOrderNumber(this.appMainState.siteId, orderNumber)
.pipe(
map(lineItemList => {
this.dialogService.hideProgress();
return lineItemList;
}));
}
// override class method - watchTotalNumberOfItems
watchTotalNumberOfItems(): void {
this.dataSource.rowCount().subscribe(numberOfItems => this.numberOfItems = numberOfItems);
this.updateNumberOfSelectedItems();
}
applyGlobalSearch() {
if (this.search) {
this.dataSource.requestGlobalSearchFilter(this.search);
this.queryService.addGlobalSearchFilterValue(this.search);
}
this.callSearchAPI();
}
/* ====================================================================== */
/* EXPORT DATA TO EXCEL - START */
exportToExcel() {
this.dialogService.showProgress();
let searchQuery = this.dataSource.getCurrentSearchCriteria(this.searchGroupFieldName, this.searchGroupFieldId, 0).getSearchCriteriaString();
this.pickupSearchService.getPickupSearchById(this.appMainState.siteId, this.appMainState.eventId, searchQuery)
.pipe(
map(data => PickupSearchDto.constructFromSearchListJson(data)),
map(poList => poList.map(data => this.constructExportRow(data)))
)
.subscribe(data => {
//Create an array of headers that matches what the UI shows to be fed into the export
if (data.length > 0) {
let headers = [];
let headerVariableToName = {};
pickupDownloadFields.forEach((column) => {
headerVariableToName[column.field] = column.header;
});
// Populate the header row by using column variable name
for (let prop in data[0]) {
headers.push(headerVariableToName[prop]);
}
// We use a 3rd party library called Angular2Csv that takes in a JSON array of data, a file name and options
new Angular2Csv(data, this.appMainState.siteName + '-Pickup_List', { headers: headers });
}
//this.dialogService.hideProgress();
});
}
| (po: PickupSearchDto) {
let retval = {};
let array = pickupDownloadFields;
for (let col of array) {
if (po.hasOwnProperty(col.field)) {
let val = po[col.field];
if (col.field === 'createdTS') {
retval[col.field] = this.dateFormatPipe.transformDate(val);
} else {
retval[col.field] = val;
}
} else {
retval[col.field] = '';
}
}
return retval;
}
/* EXPORT DATA TO EXCEL - END */
/* ====================================================================== */
toggleAdvancedSearch() {
this.isAdvancedShown = !this.isAdvancedShown;
return this.isAdvancedShown;
}
openGoToPage(id: number, rowClickType: RowClickTypeEnum): void {
this.queryService.setViewedEquipId(id);
if (rowClickType === RowClickTypeEnum.CTRL || rowClickType === RowClickTypeEnum.RIGHT_CLICK_NEW_TAB) {
window.open(this.getGoToLink(id, true), '_blank');
} else if (rowClickType === RowClickTypeEnum.RIGHT_CLICK_NEW_WINDOW) {
window.open(this.getGoToLink(id, true), '_blank', AidGlobalConfig.newWindowOptions);
} else {
this.router.navigateByUrl(this.getGoToLink(id, false));
}
}
getGoToLink(orderNumber: number, isNewTab: boolean) {
let url: string;
if (this.appMainState.eventId && this.appMainState.eventId !== undefined && this.appMainState.eventId > 0) {
url = isNewTab ? this.appMainState.linkSet.get(PickupLinkType.eventPickupDetailOnNewTab.toString()) : this.appMainState.linkSet.get(PickupLinkType.eventPickupDetail.toString());
} else {
url = isNewTab ? this.appMainState.linkSet.get(PickupLinkType.sitePickupDetailOnNewTab.toString()) : this.appMainState.linkSet.get(PickupLinkType.sitePickupDetail.toString());
}
return url
.replace(':siteId', String(this.appMainState.siteId))
.replace(':eventId', String(this.appMainState.eventId))
.replace(':orderNumber', String(orderNumber));
}
/* ====================================================================== */
ngOnDestroy() {
}
public isReadable() {
return this.appMainState.isAllowed(AppMainPermissionType.canViewEmployee);
}
public isWritable() {
return this.appMainState.isAllowed(AppMainPermissionType.canManageEmployee);
}
private subscribeMessageIndicator() {
this.actionIndicatorSubscription = this.aidMessageIndicatorService.announced$.subscribe((param: AidIndicatorParams) => {
switch (param.type) {
case PickupActionType.purchaseOrderReload.toString():
this.logger.info("PickupSearchComponent > subscribeMessageIndicator", param);
this.search = param.data;
this.switchSearchView(PickupSearchViewEnum.Default, param.data, true);
break;
}
})
}
}
| constructExportRow | identifier_name |
pickup-search.component.ts | import {
Component,
Input,
Output,
ViewEncapsulation,
EventEmitter
} from '@angular/core';
import { Router, ActivatedRoute } from '@angular/router';
import { Subject } from 'rxjs/Subject';
import { map } from 'rxjs/operators/map';
import { Observable } from 'rxjs/Observable';
import { Subscription } from "rxjs";
import { Angular2Csv } from 'angular2-csv';
import { RowClickTypeEnum } from '@ukmjkim/aid-data-table';
import { AidLoggerService, AidDialogService, AidMessageIndicatorService, AidIndicatorParams, AidDateFormatPipe, AidGlobalConfig, AidAutoCompleteSearchDto } from '@ukmjkim/aid-lib-services';
import { AppMainPermissionType } from '../../models/app-main-permission-type.enum';
import { AppMainState } from '../../models/app-main-state';
import { RemoteSearchBaseComponent } from '@ukmjkim/aid-data-table';
import { PickupService } from '../../services/pickup.service';
import { PickupSearchService } from '../../services/pickup-search.service';
import { Pickup } from '../../models/pickup';
import { PickupSearchDto } from '../../models/pickup-search-dto';
import { PickupSubItem } from '../../models/pickup-sub-item';
import { PickupItemType } from '../../models/pickup-item-type.enum';
import { PickupColumnVariableName, pickupSearchableFields } from './conf/pickup-column-variable-name.conf';
import { PickupSearchFilterConfigurator } from './conf/pickup-search-filter-configurator';
import { PickupSearchViewEnum } from './conf/pickup-search-view.enum';
import { PickupSearchDefaultColumnsMap } from './conf/pickup-search-view-template';
import { pickupDownloadFields } from './conf/pickup-column-variable-name.conf';
import { PickupSearchDatasource } from './pickup-search-datasource';
import { PickupSearchQueryService } from './pickup-search-query.service';
import { MatDialog } from '@angular/material';
import {
New,
InProgress,
Completed,
Canceled
} from '../../constants/pickup-dashboard-state';
import { PickupActionType } from '../../constants/pickup-action-type';
import { PickupLinkType } from '../../constants/link-type';
@Component({
selector: 'aid-pickup-search',
templateUrl: './pickup-search.component.html',
styleUrls: ['./pickup-search.component.scss'],
encapsulation: ViewEncapsulation.None
})
export class PickupSearchComponent extends RemoteSearchBaseComponent<PickupSearchDto> {
@Input() id: string;
@Input() appMainState: AppMainState;
@Input('enableRowCheckbox') enableRowCheckbox: boolean = true;
@Output() selectedRows: EventEmitter<number[]> = new EventEmitter<number[]>();
readonly PickupItemType = PickupItemType;
readonly dateFormatPipe = new AidDateFormatPipe();
public isAdvancedShown: boolean = false;
public enableRowAction: boolean = false;
public permissionName: string;
dataLoadedCallback: Subject<any> = new Subject<any>();
private eventListInSite: { id: number, name: string }[];
private actionIndicatorSubscription: Subscription;
constructor(private router: Router,
private route: ActivatedRoute,
private aidMessageIndicatorService: AidMessageIndicatorService,
private pickupSearchService: PickupSearchService,
private pickupSearchQueryService: PickupSearchQueryService,
private dialog: MatDialog,
private logger: AidLoggerService,
private dialogService: AidDialogService,
public pickupService: PickupService) {
super(pickupSearchQueryService, true);
this.subscribeMessageIndicator();
}
// implement abstract method - checkPrivilege
checkPrivilege(): boolean {
return true;
}
// implement abstract method - watchPredefinedGlobalSearchFilter
watchPredefinedGlobalSearchFilter() {
this.predefinedGlobalSearchFilter().subscribe(keyword => {
this.search = keyword;
})
}
// implement abstract method - loadPrerequisiteData
loadPrerequisiteData() {
this.pickupSearchService.setUXOnly(this.appMainState.isUXOnly);
this.pickupService.setUXOnly(this.appMainState.isUXOnly);
this.pickupSearchQueryService.setAppMainState(this.appMainState);
this.pickupSearchQueryService.loadConfiguration();
this.pickupSearchService.getPickupSearchRange(this.appMainState.siteId, this.appMainState.eventId)
.pipe(
)
.subscribe(response => {
const filterRangeResults = response as Array<any>;
this.prerequisiteData.next(filterRangeResults);
});
}
// implement abstract method - setupFilterConfiguratorAndDataSource
setupFilterConfiguratorAndDataSource() {
this.searchGroupFieldName = 'groupId';
this.searchGroupFieldId = this.appMainState.eventId && this.appMainState.eventId > 0 ? this.appMainState.eventId : this.appMainState.siteId;
this.filterConfigurator = new PickupSearchFilterConfigurator();
this.dataSource = new PickupSearchDatasource(
this.appMainState,
this.searchGroupFieldName,
this.searchGroupFieldId,
pickupSearchableFields,
this.pickupSearchService,
this.filterConfigurator,
this.dialogService);
}
// implement abstract method - setupFiltersDependOnSearchRange
setupFiltersDependOnSearchRange(filterRangeResults) {
this.logger.info('setupFiltersDependOnSearchRange filterRangeResults', filterRangeResults);
if (filterRangeResults.length > 0) {
this.setupNumberRangeFilters(filterRangeResults[0]);
this.setupDateRangeFilters(filterRangeResults[0]);
this.setupTextRangeFilters(filterRangeResults[0]);
}
this.eventListInSite = new Array<{ id: number, name: string }>();
if (filterRangeResults && filterRangeResults.length > 1) |
}
// implement abstract method - setDefaultDisplayedColumns
setDefaultDisplayedColumns() {
if (!this.displayedColumns || this.displayedColumns.length === 0) {
let searchView = this.selectedView;
if (PickupSearchDefaultColumnsMap.has(searchView)) {
this.displayedColumns = PickupSearchDefaultColumnsMap.get(searchView).columns.slice();
} else {
console.error('Cannot find given asset search view template', searchView);
this.displayedColumns = PickupSearchDefaultColumnsMap.get(String(PickupSearchViewEnum.Default)).columns.slice();
}
}
}
// implement abstract method - setupPicklistPreSelected
setupPicklistPreSelected(column: string) {
this.setupPicklistFilterChoices(PickupColumnVariableName[column], []);
}
private setupNumberRangeFilters(data: any) {
if (data.numberOfItems) {
this.filterConfigurator
.addFilter(PickupColumnVariableName.numberOfItems, { min: data.numberOfItems[0], max: data.numberOfItems[1] });
}
}
private setupDateRangeFilters(data: any) {
if (data.createdTS) {
this.filterConfigurator
.addFilter(PickupColumnVariableName.createdTS, { min: data.createdTS[0], max: data.createdTS[1] });
}
}
private setupTextRangeFilters(data: any) {
}
// implement abstract method - watchData
watchData(): void {
this.dataSource.data().subscribe(data => {
if (!this.dataLoadedCallback.isStopped) this.dataLoadedCallback.next();
if (this.queryService.getGlobalSearchFilter.length === 0) {
this.search = '';
}
if (data.length > 0) {
this.queryService.setSearchResult(JSON.stringify(data));
}
});
}
// implement abstract method - watchSelectedRows
watchSelectedRows(): void {
this.dataSource.selection().subscribe((selectedItems) => {
if (selectedItems.length < this.numberOfItemsPerPage) {
this.allItemsSelected = false;
}
this.selectedItems = selectedItems;
this.updateNumberOfSelectedItems();
let selectedItemList: number[] = [];
selectedItems.forEach(item => {
selectedItemList.push(item.pickupId);
});
if (selectedItems.length > 0) {
this.enableRowAction = true;
} else {
this.enableRowAction = false;
}
this.selectedRows.emit(selectedItemList);
});
}
// implement abstract method - watchRowClicked
watchRowClicked() {
this.dataSource.rowClick().subscribe((row) => {
if (row) {
this.openGoToPage(row.data.pickupId, this.dataSource.getRowClickType());
}
})
}
// implement abstract method - watchRowExpanded
watchRowExpanded() {
this.dataSource.rowExpanded().subscribe((row) => {
if (row && row.data.numberOfItems > 0) {
if (!row.data.items || row.data.items.length === 0) {
this.logger.info('Lazy Loading for Pickups Lots...');
this.getSubTableData(row.data.pickupId).subscribe(subitems => {
row.data.items = subitems;
});
}
}
});
}
private getSubTableData(orderNumber: number): Observable<PickupSubItem[]> {
this.dialogService.showProgress();
return this.pickupSearchService.getPickupSubItemsByOrderNumber(this.appMainState.siteId, orderNumber)
.pipe(
map(lineItemList => {
this.dialogService.hideProgress();
return lineItemList;
}));
}
// override class method - watchTotalNumberOfItems
watchTotalNumberOfItems(): void {
this.dataSource.rowCount().subscribe(numberOfItems => this.numberOfItems = numberOfItems);
this.updateNumberOfSelectedItems();
}
applyGlobalSearch() {
if (this.search) {
this.dataSource.requestGlobalSearchFilter(this.search);
this.queryService.addGlobalSearchFilterValue(this.search);
}
this.callSearchAPI();
}
/* ====================================================================== */
/* EXPORT DATA TO EXCEL - START */
exportToExcel() {
this.dialogService.showProgress();
let searchQuery = this.dataSource.getCurrentSearchCriteria(this.searchGroupFieldName, this.searchGroupFieldId, 0).getSearchCriteriaString();
this.pickupSearchService.getPickupSearchById(this.appMainState.siteId, this.appMainState.eventId, searchQuery)
.pipe(
map(data => PickupSearchDto.constructFromSearchListJson(data)),
map(poList => poList.map(data => this.constructExportRow(data)))
)
.subscribe(data => {
//Create an array of headers that matches what the UI shows to be fed into the export
if (data.length > 0) {
let headers = [];
let headerVariableToName = {};
pickupDownloadFields.forEach((column) => {
headerVariableToName[column.field] = column.header;
});
// Populate the header row by using column variable name
for (let prop in data[0]) {
headers.push(headerVariableToName[prop]);
}
// We use a 3rd party library called Angular2Csv that takes in a JSON array of data, a file name and options
new Angular2Csv(data, this.appMainState.siteName + '-Pickup_List', { headers: headers });
}
//this.dialogService.hideProgress();
});
}
constructExportRow(po: PickupSearchDto) {
let retval = {};
let array = pickupDownloadFields;
for (let col of array) {
if (po.hasOwnProperty(col.field)) {
let val = po[col.field];
if (col.field === 'createdTS') {
retval[col.field] = this.dateFormatPipe.transformDate(val);
} else {
retval[col.field] = val;
}
} else {
retval[col.field] = '';
}
}
return retval;
}
/* EXPORT DATA TO EXCEL - END */
/* ====================================================================== */
toggleAdvancedSearch() {
this.isAdvancedShown = !this.isAdvancedShown;
return this.isAdvancedShown;
}
openGoToPage(id: number, rowClickType: RowClickTypeEnum): void {
this.queryService.setViewedEquipId(id);
if (rowClickType === RowClickTypeEnum.CTRL || rowClickType === RowClickTypeEnum.RIGHT_CLICK_NEW_TAB) {
window.open(this.getGoToLink(id, true), '_blank');
} else if (rowClickType === RowClickTypeEnum.RIGHT_CLICK_NEW_WINDOW) {
window.open(this.getGoToLink(id, true), '_blank', AidGlobalConfig.newWindowOptions);
} else {
this.router.navigateByUrl(this.getGoToLink(id, false));
}
}
getGoToLink(orderNumber: number, isNewTab: boolean) {
let url: string;
if (this.appMainState.eventId && this.appMainState.eventId !== undefined && this.appMainState.eventId > 0) {
url = isNewTab ? this.appMainState.linkSet.get(PickupLinkType.eventPickupDetailOnNewTab.toString()) : this.appMainState.linkSet.get(PickupLinkType.eventPickupDetail.toString());
} else {
url = isNewTab ? this.appMainState.linkSet.get(PickupLinkType.sitePickupDetailOnNewTab.toString()) : this.appMainState.linkSet.get(PickupLinkType.sitePickupDetail.toString());
}
return url
.replace(':siteId', String(this.appMainState.siteId))
.replace(':eventId', String(this.appMainState.eventId))
.replace(':orderNumber', String(orderNumber));
}
/* ====================================================================== */
ngOnDestroy() {
}
public isReadable() {
return this.appMainState.isAllowed(AppMainPermissionType.canViewEmployee);
}
public isWritable() {
return this.appMainState.isAllowed(AppMainPermissionType.canManageEmployee);
}
private subscribeMessageIndicator() {
this.actionIndicatorSubscription = this.aidMessageIndicatorService.announced$.subscribe((param: AidIndicatorParams) => {
switch (param.type) {
case PickupActionType.purchaseOrderReload.toString():
this.logger.info("PickupSearchComponent > subscribeMessageIndicator", param);
this.search = param.data;
this.switchSearchView(PickupSearchViewEnum.Default, param.data, true);
break;
}
})
}
}
| {
filterRangeResults[1].forEach((saleEvent: AidAutoCompleteSearchDto, index) => {
this.eventListInSite.push({ id: Number(saleEvent.value), name: saleEvent.description });
});
} | conditional_block |
pickup-search.component.ts | import {
Component,
Input,
Output,
ViewEncapsulation,
EventEmitter
} from '@angular/core';
import { Router, ActivatedRoute } from '@angular/router';
import { Subject } from 'rxjs/Subject';
import { map } from 'rxjs/operators/map';
import { Observable } from 'rxjs/Observable';
import { Subscription } from "rxjs";
import { Angular2Csv } from 'angular2-csv';
import { RowClickTypeEnum } from '@ukmjkim/aid-data-table';
import { AidLoggerService, AidDialogService, AidMessageIndicatorService, AidIndicatorParams, AidDateFormatPipe, AidGlobalConfig, AidAutoCompleteSearchDto } from '@ukmjkim/aid-lib-services';
import { AppMainPermissionType } from '../../models/app-main-permission-type.enum';
import { AppMainState } from '../../models/app-main-state';
import { RemoteSearchBaseComponent } from '@ukmjkim/aid-data-table';
import { PickupService } from '../../services/pickup.service';
import { PickupSearchService } from '../../services/pickup-search.service';
import { Pickup } from '../../models/pickup';
import { PickupSearchDto } from '../../models/pickup-search-dto';
import { PickupSubItem } from '../../models/pickup-sub-item';
import { PickupItemType } from '../../models/pickup-item-type.enum';
import { PickupColumnVariableName, pickupSearchableFields } from './conf/pickup-column-variable-name.conf';
import { PickupSearchFilterConfigurator } from './conf/pickup-search-filter-configurator';
import { PickupSearchViewEnum } from './conf/pickup-search-view.enum';
import { PickupSearchDefaultColumnsMap } from './conf/pickup-search-view-template';
import { pickupDownloadFields } from './conf/pickup-column-variable-name.conf';
import { PickupSearchDatasource } from './pickup-search-datasource';
import { PickupSearchQueryService } from './pickup-search-query.service';
import { MatDialog } from '@angular/material';
import {
New,
InProgress,
Completed,
Canceled
} from '../../constants/pickup-dashboard-state';
import { PickupActionType } from '../../constants/pickup-action-type';
import { PickupLinkType } from '../../constants/link-type';
@Component({
selector: 'aid-pickup-search',
templateUrl: './pickup-search.component.html',
styleUrls: ['./pickup-search.component.scss'],
encapsulation: ViewEncapsulation.None
})
export class PickupSearchComponent extends RemoteSearchBaseComponent<PickupSearchDto> {
@Input() id: string;
@Input() appMainState: AppMainState;
@Input('enableRowCheckbox') enableRowCheckbox: boolean = true;
@Output() selectedRows: EventEmitter<number[]> = new EventEmitter<number[]>();
readonly PickupItemType = PickupItemType;
readonly dateFormatPipe = new AidDateFormatPipe();
public isAdvancedShown: boolean = false;
public enableRowAction: boolean = false;
public permissionName: string;
dataLoadedCallback: Subject<any> = new Subject<any>();
private eventListInSite: { id: number, name: string }[];
private actionIndicatorSubscription: Subscription;
constructor(private router: Router,
private route: ActivatedRoute,
private aidMessageIndicatorService: AidMessageIndicatorService,
private pickupSearchService: PickupSearchService,
private pickupSearchQueryService: PickupSearchQueryService,
private dialog: MatDialog,
private logger: AidLoggerService,
private dialogService: AidDialogService,
public pickupService: PickupService) {
super(pickupSearchQueryService, true);
this.subscribeMessageIndicator();
}
// implement abstract method - checkPrivilege
checkPrivilege(): boolean {
return true;
}
// implement abstract method - watchPredefinedGlobalSearchFilter
watchPredefinedGlobalSearchFilter() {
this.predefinedGlobalSearchFilter().subscribe(keyword => {
this.search = keyword;
})
}
// implement abstract method - loadPrerequisiteData
loadPrerequisiteData() {
this.pickupSearchService.setUXOnly(this.appMainState.isUXOnly);
this.pickupService.setUXOnly(this.appMainState.isUXOnly);
this.pickupSearchQueryService.setAppMainState(this.appMainState);
this.pickupSearchQueryService.loadConfiguration();
this.pickupSearchService.getPickupSearchRange(this.appMainState.siteId, this.appMainState.eventId)
.pipe(
)
.subscribe(response => {
const filterRangeResults = response as Array<any>;
this.prerequisiteData.next(filterRangeResults);
});
}
// implement abstract method - setupFilterConfiguratorAndDataSource
setupFilterConfiguratorAndDataSource() {
this.searchGroupFieldName = 'groupId';
this.searchGroupFieldId = this.appMainState.eventId && this.appMainState.eventId > 0 ? this.appMainState.eventId : this.appMainState.siteId;
this.filterConfigurator = new PickupSearchFilterConfigurator();
this.dataSource = new PickupSearchDatasource(
this.appMainState,
this.searchGroupFieldName,
this.searchGroupFieldId,
pickupSearchableFields,
this.pickupSearchService,
this.filterConfigurator,
this.dialogService);
}
// implement abstract method - setupFiltersDependOnSearchRange
setupFiltersDependOnSearchRange(filterRangeResults) {
this.logger.info('setupFiltersDependOnSearchRange filterRangeResults', filterRangeResults);
if (filterRangeResults.length > 0) {
this.setupNumberRangeFilters(filterRangeResults[0]);
this.setupDateRangeFilters(filterRangeResults[0]);
this.setupTextRangeFilters(filterRangeResults[0]);
}
this.eventListInSite = new Array<{ id: number, name: string }>();
if (filterRangeResults && filterRangeResults.length > 1) {
filterRangeResults[1].forEach((saleEvent: AidAutoCompleteSearchDto, index) => {
this.eventListInSite.push({ id: Number(saleEvent.value), name: saleEvent.description });
});
}
}
// implement abstract method - setDefaultDisplayedColumns
setDefaultDisplayedColumns() {
if (!this.displayedColumns || this.displayedColumns.length === 0) {
let searchView = this.selectedView;
if (PickupSearchDefaultColumnsMap.has(searchView)) {
this.displayedColumns = PickupSearchDefaultColumnsMap.get(searchView).columns.slice();
} else {
console.error('Cannot find given asset search view template', searchView);
this.displayedColumns = PickupSearchDefaultColumnsMap.get(String(PickupSearchViewEnum.Default)).columns.slice();
}
}
}
// implement abstract method - setupPicklistPreSelected
setupPicklistPreSelected(column: string) {
this.setupPicklistFilterChoices(PickupColumnVariableName[column], []);
}
private setupNumberRangeFilters(data: any) {
if (data.numberOfItems) {
this.filterConfigurator
.addFilter(PickupColumnVariableName.numberOfItems, { min: data.numberOfItems[0], max: data.numberOfItems[1] });
}
}
private setupDateRangeFilters(data: any) {
if (data.createdTS) {
this.filterConfigurator
.addFilter(PickupColumnVariableName.createdTS, { min: data.createdTS[0], max: data.createdTS[1] });
}
}
private setupTextRangeFilters(data: any) {
}
// implement abstract method - watchData
watchData(): void {
this.dataSource.data().subscribe(data => {
if (!this.dataLoadedCallback.isStopped) this.dataLoadedCallback.next();
if (this.queryService.getGlobalSearchFilter.length === 0) {
this.search = '';
}
if (data.length > 0) {
this.queryService.setSearchResult(JSON.stringify(data));
}
});
}
// implement abstract method - watchSelectedRows
watchSelectedRows(): void {
this.dataSource.selection().subscribe((selectedItems) => {
if (selectedItems.length < this.numberOfItemsPerPage) {
this.allItemsSelected = false;
}
this.selectedItems = selectedItems;
this.updateNumberOfSelectedItems();
let selectedItemList: number[] = [];
selectedItems.forEach(item => {
selectedItemList.push(item.pickupId);
});
if (selectedItems.length > 0) {
this.enableRowAction = true;
} else {
this.enableRowAction = false;
}
this.selectedRows.emit(selectedItemList);
});
}
// implement abstract method - watchRowClicked
watchRowClicked() {
this.dataSource.rowClick().subscribe((row) => {
if (row) {
this.openGoToPage(row.data.pickupId, this.dataSource.getRowClickType());
}
})
}
// implement abstract method - watchRowExpanded
watchRowExpanded() {
this.dataSource.rowExpanded().subscribe((row) => {
if (row && row.data.numberOfItems > 0) {
if (!row.data.items || row.data.items.length === 0) {
this.logger.info('Lazy Loading for Pickups Lots...');
this.getSubTableData(row.data.pickupId).subscribe(subitems => {
row.data.items = subitems;
});
}
}
});
}
private getSubTableData(orderNumber: number): Observable<PickupSubItem[]> {
this.dialogService.showProgress();
return this.pickupSearchService.getPickupSubItemsByOrderNumber(this.appMainState.siteId, orderNumber)
.pipe(
map(lineItemList => {
this.dialogService.hideProgress();
return lineItemList;
}));
}
// override class method - watchTotalNumberOfItems
watchTotalNumberOfItems(): void {
this.dataSource.rowCount().subscribe(numberOfItems => this.numberOfItems = numberOfItems);
this.updateNumberOfSelectedItems();
}
applyGlobalSearch() |
/* ====================================================================== */
/* EXPORT DATA TO EXCEL - START */
exportToExcel() {
this.dialogService.showProgress();
let searchQuery = this.dataSource.getCurrentSearchCriteria(this.searchGroupFieldName, this.searchGroupFieldId, 0).getSearchCriteriaString();
this.pickupSearchService.getPickupSearchById(this.appMainState.siteId, this.appMainState.eventId, searchQuery)
.pipe(
map(data => PickupSearchDto.constructFromSearchListJson(data)),
map(poList => poList.map(data => this.constructExportRow(data)))
)
.subscribe(data => {
//Create an array of headers that matches what the UI shows to be fed into the export
if (data.length > 0) {
let headers = [];
let headerVariableToName = {};
pickupDownloadFields.forEach((column) => {
headerVariableToName[column.field] = column.header;
});
// Populate the header row by using column variable name
for (let prop in data[0]) {
headers.push(headerVariableToName[prop]);
}
// We use a 3rd party library called Angular2Csv that takes in a JSON array of data, a file name and options
new Angular2Csv(data, this.appMainState.siteName + '-Pickup_List', { headers: headers });
}
//this.dialogService.hideProgress();
});
}
constructExportRow(po: PickupSearchDto) {
let retval = {};
let array = pickupDownloadFields;
for (let col of array) {
if (po.hasOwnProperty(col.field)) {
let val = po[col.field];
if (col.field === 'createdTS') {
retval[col.field] = this.dateFormatPipe.transformDate(val);
} else {
retval[col.field] = val;
}
} else {
retval[col.field] = '';
}
}
return retval;
}
/* EXPORT DATA TO EXCEL - END */
/* ====================================================================== */
toggleAdvancedSearch() {
this.isAdvancedShown = !this.isAdvancedShown;
return this.isAdvancedShown;
}
openGoToPage(id: number, rowClickType: RowClickTypeEnum): void {
this.queryService.setViewedEquipId(id);
if (rowClickType === RowClickTypeEnum.CTRL || rowClickType === RowClickTypeEnum.RIGHT_CLICK_NEW_TAB) {
window.open(this.getGoToLink(id, true), '_blank');
} else if (rowClickType === RowClickTypeEnum.RIGHT_CLICK_NEW_WINDOW) {
window.open(this.getGoToLink(id, true), '_blank', AidGlobalConfig.newWindowOptions);
} else {
this.router.navigateByUrl(this.getGoToLink(id, false));
}
}
getGoToLink(orderNumber: number, isNewTab: boolean) {
let url: string;
if (this.appMainState.eventId && this.appMainState.eventId !== undefined && this.appMainState.eventId > 0) {
url = isNewTab ? this.appMainState.linkSet.get(PickupLinkType.eventPickupDetailOnNewTab.toString()) : this.appMainState.linkSet.get(PickupLinkType.eventPickupDetail.toString());
} else {
url = isNewTab ? this.appMainState.linkSet.get(PickupLinkType.sitePickupDetailOnNewTab.toString()) : this.appMainState.linkSet.get(PickupLinkType.sitePickupDetail.toString());
}
return url
.replace(':siteId', String(this.appMainState.siteId))
.replace(':eventId', String(this.appMainState.eventId))
.replace(':orderNumber', String(orderNumber));
}
/* ====================================================================== */
ngOnDestroy() {
}
public isReadable() {
return this.appMainState.isAllowed(AppMainPermissionType.canViewEmployee);
}
public isWritable() {
return this.appMainState.isAllowed(AppMainPermissionType.canManageEmployee);
}
private subscribeMessageIndicator() {
this.actionIndicatorSubscription = this.aidMessageIndicatorService.announced$.subscribe((param: AidIndicatorParams) => {
switch (param.type) {
case PickupActionType.purchaseOrderReload.toString():
this.logger.info("PickupSearchComponent > subscribeMessageIndicator", param);
this.search = param.data;
this.switchSearchView(PickupSearchViewEnum.Default, param.data, true);
break;
}
})
}
}
| {
if (this.search) {
this.dataSource.requestGlobalSearchFilter(this.search);
this.queryService.addGlobalSearchFilterValue(this.search);
}
this.callSearchAPI();
} | identifier_body |
pickup-search.component.ts | import {
Component,
Input,
Output,
ViewEncapsulation,
EventEmitter
} from '@angular/core';
import { Router, ActivatedRoute } from '@angular/router';
import { Subject } from 'rxjs/Subject';
import { map } from 'rxjs/operators/map';
import { Observable } from 'rxjs/Observable';
import { Subscription } from "rxjs";
import { Angular2Csv } from 'angular2-csv';
import { RowClickTypeEnum } from '@ukmjkim/aid-data-table';
import { AidLoggerService, AidDialogService, AidMessageIndicatorService, AidIndicatorParams, AidDateFormatPipe, AidGlobalConfig, AidAutoCompleteSearchDto } from '@ukmjkim/aid-lib-services';
import { AppMainPermissionType } from '../../models/app-main-permission-type.enum';
import { AppMainState } from '../../models/app-main-state';
import { RemoteSearchBaseComponent } from '@ukmjkim/aid-data-table';
import { PickupService } from '../../services/pickup.service';
import { PickupSearchService } from '../../services/pickup-search.service';
import { Pickup } from '../../models/pickup';
import { PickupSearchDto } from '../../models/pickup-search-dto';
import { PickupSubItem } from '../../models/pickup-sub-item';
import { PickupItemType } from '../../models/pickup-item-type.enum';
import { PickupColumnVariableName, pickupSearchableFields } from './conf/pickup-column-variable-name.conf';
import { PickupSearchFilterConfigurator } from './conf/pickup-search-filter-configurator';
import { PickupSearchViewEnum } from './conf/pickup-search-view.enum';
import { PickupSearchDefaultColumnsMap } from './conf/pickup-search-view-template';
import { pickupDownloadFields } from './conf/pickup-column-variable-name.conf';
import { PickupSearchDatasource } from './pickup-search-datasource';
import { PickupSearchQueryService } from './pickup-search-query.service';
import { MatDialog } from '@angular/material';
import {
New,
InProgress,
Completed,
Canceled
} from '../../constants/pickup-dashboard-state';
import { PickupActionType } from '../../constants/pickup-action-type';
import { PickupLinkType } from '../../constants/link-type';
@Component({
selector: 'aid-pickup-search',
templateUrl: './pickup-search.component.html',
styleUrls: ['./pickup-search.component.scss'],
encapsulation: ViewEncapsulation.None
})
export class PickupSearchComponent extends RemoteSearchBaseComponent<PickupSearchDto> {
@Input() id: string;
@Input() appMainState: AppMainState;
@Input('enableRowCheckbox') enableRowCheckbox: boolean = true;
@Output() selectedRows: EventEmitter<number[]> = new EventEmitter<number[]>();
readonly PickupItemType = PickupItemType;
readonly dateFormatPipe = new AidDateFormatPipe();
public isAdvancedShown: boolean = false;
public enableRowAction: boolean = false;
public permissionName: string;
dataLoadedCallback: Subject<any> = new Subject<any>();
private eventListInSite: { id: number, name: string }[];
private actionIndicatorSubscription: Subscription;
constructor(private router: Router,
private route: ActivatedRoute,
private aidMessageIndicatorService: AidMessageIndicatorService,
private pickupSearchService: PickupSearchService,
private pickupSearchQueryService: PickupSearchQueryService,
private dialog: MatDialog,
private logger: AidLoggerService,
private dialogService: AidDialogService,
public pickupService: PickupService) {
super(pickupSearchQueryService, true);
this.subscribeMessageIndicator();
}
// implement abstract method - checkPrivilege
checkPrivilege(): boolean {
return true;
}
// implement abstract method - watchPredefinedGlobalSearchFilter
watchPredefinedGlobalSearchFilter() {
this.predefinedGlobalSearchFilter().subscribe(keyword => {
this.search = keyword;
})
}
// implement abstract method - loadPrerequisiteData
loadPrerequisiteData() {
this.pickupSearchService.setUXOnly(this.appMainState.isUXOnly);
this.pickupService.setUXOnly(this.appMainState.isUXOnly);
this.pickupSearchQueryService.setAppMainState(this.appMainState);
this.pickupSearchQueryService.loadConfiguration();
this.pickupSearchService.getPickupSearchRange(this.appMainState.siteId, this.appMainState.eventId)
.pipe(
)
.subscribe(response => {
const filterRangeResults = response as Array<any>;
this.prerequisiteData.next(filterRangeResults);
});
}
// implement abstract method - setupFilterConfiguratorAndDataSource
setupFilterConfiguratorAndDataSource() {
this.searchGroupFieldName = 'groupId';
this.searchGroupFieldId = this.appMainState.eventId && this.appMainState.eventId > 0 ? this.appMainState.eventId : this.appMainState.siteId;
this.filterConfigurator = new PickupSearchFilterConfigurator();
this.dataSource = new PickupSearchDatasource(
this.appMainState,
this.searchGroupFieldName,
this.searchGroupFieldId,
pickupSearchableFields,
this.pickupSearchService,
this.filterConfigurator,
this.dialogService);
}
// implement abstract method - setupFiltersDependOnSearchRange
setupFiltersDependOnSearchRange(filterRangeResults) {
this.logger.info('setupFiltersDependOnSearchRange filterRangeResults', filterRangeResults);
if (filterRangeResults.length > 0) {
this.setupNumberRangeFilters(filterRangeResults[0]);
this.setupDateRangeFilters(filterRangeResults[0]);
this.setupTextRangeFilters(filterRangeResults[0]);
}
this.eventListInSite = new Array<{ id: number, name: string }>();
if (filterRangeResults && filterRangeResults.length > 1) {
filterRangeResults[1].forEach((saleEvent: AidAutoCompleteSearchDto, index) => {
this.eventListInSite.push({ id: Number(saleEvent.value), name: saleEvent.description });
});
}
}
// implement abstract method - setDefaultDisplayedColumns
setDefaultDisplayedColumns() {
if (!this.displayedColumns || this.displayedColumns.length === 0) {
let searchView = this.selectedView;
if (PickupSearchDefaultColumnsMap.has(searchView)) {
this.displayedColumns = PickupSearchDefaultColumnsMap.get(searchView).columns.slice();
} else {
console.error('Cannot find given asset search view template', searchView);
this.displayedColumns = PickupSearchDefaultColumnsMap.get(String(PickupSearchViewEnum.Default)).columns.slice();
}
}
}
// implement abstract method - setupPicklistPreSelected
setupPicklistPreSelected(column: string) {
this.setupPicklistFilterChoices(PickupColumnVariableName[column], []);
}
private setupNumberRangeFilters(data: any) {
if (data.numberOfItems) {
this.filterConfigurator
.addFilter(PickupColumnVariableName.numberOfItems, { min: data.numberOfItems[0], max: data.numberOfItems[1] });
}
}
private setupDateRangeFilters(data: any) {
if (data.createdTS) {
this.filterConfigurator
.addFilter(PickupColumnVariableName.createdTS, { min: data.createdTS[0], max: data.createdTS[1] });
}
}
private setupTextRangeFilters(data: any) {
}
// implement abstract method - watchData
watchData(): void {
this.dataSource.data().subscribe(data => {
if (!this.dataLoadedCallback.isStopped) this.dataLoadedCallback.next();
if (this.queryService.getGlobalSearchFilter.length === 0) {
this.search = '';
}
if (data.length > 0) {
this.queryService.setSearchResult(JSON.stringify(data));
}
});
}
// implement abstract method - watchSelectedRows
watchSelectedRows(): void {
this.dataSource.selection().subscribe((selectedItems) => {
if (selectedItems.length < this.numberOfItemsPerPage) {
this.allItemsSelected = false;
}
this.selectedItems = selectedItems;
this.updateNumberOfSelectedItems();
let selectedItemList: number[] = [];
selectedItems.forEach(item => {
selectedItemList.push(item.pickupId);
});
if (selectedItems.length > 0) {
this.enableRowAction = true;
} else {
this.enableRowAction = false;
}
this.selectedRows.emit(selectedItemList);
});
}
// implement abstract method - watchRowClicked
watchRowClicked() {
this.dataSource.rowClick().subscribe((row) => {
if (row) {
this.openGoToPage(row.data.pickupId, this.dataSource.getRowClickType());
}
})
}
// implement abstract method - watchRowExpanded
watchRowExpanded() {
this.dataSource.rowExpanded().subscribe((row) => {
if (row && row.data.numberOfItems > 0) {
if (!row.data.items || row.data.items.length === 0) {
this.logger.info('Lazy Loading for Pickups Lots...');
this.getSubTableData(row.data.pickupId).subscribe(subitems => {
row.data.items = subitems;
});
}
}
});
}
private getSubTableData(orderNumber: number): Observable<PickupSubItem[]> {
this.dialogService.showProgress();
return this.pickupSearchService.getPickupSubItemsByOrderNumber(this.appMainState.siteId, orderNumber)
.pipe(
map(lineItemList => {
this.dialogService.hideProgress();
return lineItemList;
}));
}
// override class method - watchTotalNumberOfItems
watchTotalNumberOfItems(): void {
this.dataSource.rowCount().subscribe(numberOfItems => this.numberOfItems = numberOfItems);
this.updateNumberOfSelectedItems();
}
applyGlobalSearch() {
if (this.search) {
this.dataSource.requestGlobalSearchFilter(this.search);
this.queryService.addGlobalSearchFilterValue(this.search);
}
this.callSearchAPI();
}
/* ====================================================================== */
/* EXPORT DATA TO EXCEL - START */
exportToExcel() {
this.dialogService.showProgress();
let searchQuery = this.dataSource.getCurrentSearchCriteria(this.searchGroupFieldName, this.searchGroupFieldId, 0).getSearchCriteriaString();
this.pickupSearchService.getPickupSearchById(this.appMainState.siteId, this.appMainState.eventId, searchQuery)
.pipe(
map(data => PickupSearchDto.constructFromSearchListJson(data)),
map(poList => poList.map(data => this.constructExportRow(data)))
)
.subscribe(data => {
//Create an array of headers that matches what the UI shows to be fed into the export
if (data.length > 0) {
let headers = [];
let headerVariableToName = {};
pickupDownloadFields.forEach((column) => {
headerVariableToName[column.field] = column.header;
});
// Populate the header row by using column variable name
for (let prop in data[0]) {
headers.push(headerVariableToName[prop]);
}
// We use a 3rd party library called Angular2Csv that takes in a JSON array of data, a file name and options
new Angular2Csv(data, this.appMainState.siteName + '-Pickup_List', { headers: headers });
}
//this.dialogService.hideProgress();
});
}
constructExportRow(po: PickupSearchDto) {
let retval = {};
let array = pickupDownloadFields;
for (let col of array) {
if (po.hasOwnProperty(col.field)) {
let val = po[col.field];
if (col.field === 'createdTS') {
retval[col.field] = this.dateFormatPipe.transformDate(val);
} else {
retval[col.field] = val;
}
} else {
retval[col.field] = '';
}
}
return retval;
}
/* EXPORT DATA TO EXCEL - END */
/* ====================================================================== */
toggleAdvancedSearch() {
this.isAdvancedShown = !this.isAdvancedShown;
return this.isAdvancedShown;
}
openGoToPage(id: number, rowClickType: RowClickTypeEnum): void {
this.queryService.setViewedEquipId(id);
if (rowClickType === RowClickTypeEnum.CTRL || rowClickType === RowClickTypeEnum.RIGHT_CLICK_NEW_TAB) {
window.open(this.getGoToLink(id, true), '_blank');
} else if (rowClickType === RowClickTypeEnum.RIGHT_CLICK_NEW_WINDOW) {
window.open(this.getGoToLink(id, true), '_blank', AidGlobalConfig.newWindowOptions);
} else {
this.router.navigateByUrl(this.getGoToLink(id, false));
} | url = isNewTab ? this.appMainState.linkSet.get(PickupLinkType.eventPickupDetailOnNewTab.toString()) : this.appMainState.linkSet.get(PickupLinkType.eventPickupDetail.toString());
} else {
url = isNewTab ? this.appMainState.linkSet.get(PickupLinkType.sitePickupDetailOnNewTab.toString()) : this.appMainState.linkSet.get(PickupLinkType.sitePickupDetail.toString());
}
return url
.replace(':siteId', String(this.appMainState.siteId))
.replace(':eventId', String(this.appMainState.eventId))
.replace(':orderNumber', String(orderNumber));
}
/* ====================================================================== */
ngOnDestroy() {
}
public isReadable() {
return this.appMainState.isAllowed(AppMainPermissionType.canViewEmployee);
}
public isWritable() {
return this.appMainState.isAllowed(AppMainPermissionType.canManageEmployee);
}
private subscribeMessageIndicator() {
this.actionIndicatorSubscription = this.aidMessageIndicatorService.announced$.subscribe((param: AidIndicatorParams) => {
switch (param.type) {
case PickupActionType.purchaseOrderReload.toString():
this.logger.info("PickupSearchComponent > subscribeMessageIndicator", param);
this.search = param.data;
this.switchSearchView(PickupSearchViewEnum.Default, param.data, true);
break;
}
})
}
} | }
getGoToLink(orderNumber: number, isNewTab: boolean) {
let url: string;
if (this.appMainState.eventId && this.appMainState.eventId !== undefined && this.appMainState.eventId > 0) { | random_line_split |
bootit2.py | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# vim: ts=4 sts=4 sw=4 tw=100 sta et
__author__ = 'Patrick Butler'
__email__ = 'pbutler@killertux.org'
import argparse
import fcntl
import logging
import os
from pathlib import Path
import shutil
import subprocess as sp
import sys
logging.basicConfig(level=logging.INFO)
class BootItException(Exception):
pass
class BootItState:
state = []
@classmethod
def push_state(cls, bootit):
cls.state += [bootit]
@classmethod
def pop_state(cls):
cls.state.pop()
@classmethod
def cur_state(cls):
if cls.state:
return cls.state[-1]
else:
return None
class BootIt:
def __init__(self, args=None):
if args is None:
args = sys.argv
self.argparse(args)
self.selfdir = Path(__file__).absolute().parent
if (self.selfdir / ".git").exists():
logging.debug("Detected git mode")
else:
logging.debug("Detected untracked mode, forcing no update")
self.options.update = False
if self.options.update:
logging.info("Gitting")
Cmd("git", "pull", state=self)
def argparse(self, argv=None):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("-G", "--no-git",
action="store_false", dest="update", default=True,
help="don't run git pull")
parser.add_argument("--conf_file", default="bootstrap.py")
parser.add_argument("--dry-run", action="store_true", default=False,
help="don't perform any real tasks")
parser.add_argument("--working-dir", default=Path.cwd(), type=Path)
self.options = parser.parse_args()
@property
def dry_run(self):
return self.options.dry_run
def __enter__(self):
BootItState.push_state(self)
self.orig_dir = Path.cwd()
self.curdir = self.options.working_dir
os.chdir(str(self.curdir))
def __exit__(self, exc_type, exc_value, traceback):
os.chdir(str(self.orig_dir))
class Command(object):
def __init__(self, *args, state=None, **kwargs):
if state is None:
self.state = BootItState.cur_state()
else:
self.state = state
self.do(*args, **kwargs)
class Cmd(Command):
def do(self, *args, quiet=False, exception=True):
if len(args) == 1:
shell = True
else:
shell = False
self.stdout = ""
self.stderr = ""
logging.info("Running cmd {} shell={}".format(args, shell))
if self.state.dry_run:
return
p = sp.Popen(args, stdout=sp.PIPE, stderr=sp.PIPE, shell=shell)
while p.returncode is None:
s = self.non_block_read(p.stdout)
if s:
if not quiet:
sys.stdout.write(s)
self.stdout += s
s = self.non_block_read(p.stderr)
if s:
sys.stderr.write(s)
self.stderr += s
p.poll()
logging.debug("ret={} stdout={} stderr={}".format(p.returncode,
self.stdout,
self.stderr))
self.okay = (p.returncode == 0)
if exception and not self.okay:
raise BootItException("Cmd {} failed".format(" ".join(args)))
def non_block_read(self, output):
fd = output.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
try:
return output.read().decode("utf8")
except Exception:
return ""
class Copy(Command):
def do(self, src, dest):
src = Path(src).expanduser()
dest = Path(dest).expanduser()
destdir = dest.parent
if not destdir.exists():
logging.info("making dir {}". format(destdir))
if not self.state.dry_run:
destdir.mkdir(parents=True)
if dest.exists():
logging.info("removing existing file {}". format(dest))
if not self.state.dry_run:
dest.unlink()
shutil.copy2(str(src), str(dest))
class Link(Command):
def do(self, src, dest):
src = Path(src).expanduser()
if not src.exists():
raise BootItException("{} does not exist for linking".format(src))
src = src.resolve()
dest = Path(dest).expanduser()
try:
logging.info("%s %s", src, dest.resolve())
rel_src = os.path.relpath(src, dest.resolve().parent)
except ValueError:
logging.info("Couldn't calculate relative path using absolute {}".format(src))
rel_src = src.resolve()
destdir = dest.parent
if not destdir.exists():
destdir.mkdir(parents=True)
if dest.exists() and not dest.is_symlink():
raise BootItException("Real file exists at %s" % dest)
elif dest.is_symlink() and not dest.exists():
logging.warn("Removing dangling link %s->%s" % (dest, rel_src))
if not self.state.dry_run:
os.unlink(str(dest))
elif dest.exists() and not src.samefile(dest):
logging.warn("Changing link from %s to %s" % (os.path.realpath(str(dest)), rel_src))
if not self.state.dry_run:
os.unlink(str(dest))
if not dest.exists():
logging.info("Linking from %s to %s" % (rel_src, dest))
dest.symlink_to(rel_src)
class Mkdir(Command):
def do(self, name, mode):
name = Path(name).expanduser()
if not name.exists():
logging.info("Making dir {}".format(name))
if not self.state.dry_run:
name.mkdir(int(mode, 8), parents=True)
Chmod(name, mode)
class Chmod(Command):
def do(self, name, mode):
name = Path(name).expanduser()
if name.exists():
logging.info("Chmodding dir {}".format(name))
if not self.state.dry_run:
name.chmod(int(mode, 8))
else:
raise BootItException("No file to chmod {}".format(name))
class Sync(Command):
"""clean is ignored at the moment"""
def do(self, src, dest, clean=True, patterns=None):
src = Path(src).expanduser()
dest = Path(dest).expanduser()
if patterns is None:
patterns = [".git"]
if not dest.exists():
logging.info("Creating dir to sync to {}".format(dest))
if not self.state.dry_run:
dest.mkdir(parents=True)
sunk = []
for syncable in src.rglob("*"):
if syncable == src:
continue
skip = False
for pat in patterns:
cur = syncable
while cur != src:
if cur.match(pat):
logging.debug("Skipping sync of {}".format(syncable))
skip = True
break
cur = cur.parent
if skip:
break
if skip:
continue
partial = syncable.relative_to(src)
sunk += [(syncable, dest / partial)]
for fsrc, fdest in sunk: # type: Path, Path
fsrc = src.resolve().parent / fsrc
if fsrc.is_dir():
logging.info("Make Sync dir {} in {}".format(fsrc, fdest))
Mkdir(fdest)
# TODO: copy chmod settings
else:
logging.info("Syncing {} to {}".format(fsrc, fdest))
Copy(src=fsrc, dest=fdest)
class Echo(Command):
"""echos the given arguments to the command line"""
def do(self, *args):
"""
:param *args: arguments to be echoed
"""
logging.info(" ".join(str(a) for a in args))
class Touch(Command):
"""Creates an empty file"""
def do(self, fname):
"""@todo: to be defined
:param fname: @todo
"""
fname = Path(fname).expanduser()
logging.info("Touching {}".format(fname))
if not self.state.dry_run:
fname.touch()
class Pip(Command):
def do(self, pkgs=[], user=True, upgrade=True):
opts = []
if user:
opts += ["--user"]
if upgrade:
opts += ["--upgrade"]
cmd = "python3 -mpip install {} {}".format(
" ".join(opts),
" ".join(pkgs))
Cmd(cmd)
class Brew(Command):
""" upgrade is currently ignored"""
def do(self, pkgs=[], cask=False, upgrade=True, tap=False):
assert sum([tap, cask]) < 2, "Choose either cask or tap or neither"
prefix = ["brew"]
if cask:
prefix += ["cask"]
cmd = "list"
elif tap:
prefix += ["tap"]
cmd = ""
else:
cmd = "list"
prefix = " " .join(prefix)
logging.info("Checking for installed {} pkgs".format(prefix))
ret = Cmd("{} {}".format(prefix, cmd), quiet=True)
installed = {p.split("@")[0] for p in ret.stdout.split("\n")}
needed = set(pkgs) - installed
if needed:
logging.info("Installing from {}: {}".format(prefix, ", ".join(needed)))
if tap:
|
else:
Cmd("{} install {}".format(prefix, " ".join(needed)))
else:
logging.info("No pkgs from {} needed".format(prefix))
# def main(args):
# commands = {}
# for name, var in globals().items():
# if name == "Command":
# continue
# if isinstance(var, type) and issubclass(var, Command):
# commands[name.lower()] = var
#
# evaluator = Evaluator(commands, options.dry_run)
# evaluator.start(options.conf_file)
#
# os.chdir(curdir)
# return 0
#
#
# if __name__ == "__main__":
# sys.exit(main(sys.argv))
| Cmd("{} {}".format(prefix, " ".join(needed))) | conditional_block |
bootit2.py | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# vim: ts=4 sts=4 sw=4 tw=100 sta et
__author__ = 'Patrick Butler'
__email__ = 'pbutler@killertux.org'
import argparse
import fcntl
import logging
import os
from pathlib import Path
import shutil
import subprocess as sp
import sys
logging.basicConfig(level=logging.INFO)
class BootItException(Exception):
pass
class | :
state = []
@classmethod
def push_state(cls, bootit):
cls.state += [bootit]
@classmethod
def pop_state(cls):
cls.state.pop()
@classmethod
def cur_state(cls):
if cls.state:
return cls.state[-1]
else:
return None
class BootIt:
def __init__(self, args=None):
if args is None:
args = sys.argv
self.argparse(args)
self.selfdir = Path(__file__).absolute().parent
if (self.selfdir / ".git").exists():
logging.debug("Detected git mode")
else:
logging.debug("Detected untracked mode, forcing no update")
self.options.update = False
if self.options.update:
logging.info("Gitting")
Cmd("git", "pull", state=self)
def argparse(self, argv=None):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("-G", "--no-git",
action="store_false", dest="update", default=True,
help="don't run git pull")
parser.add_argument("--conf_file", default="bootstrap.py")
parser.add_argument("--dry-run", action="store_true", default=False,
help="don't perform any real tasks")
parser.add_argument("--working-dir", default=Path.cwd(), type=Path)
self.options = parser.parse_args()
@property
def dry_run(self):
return self.options.dry_run
def __enter__(self):
BootItState.push_state(self)
self.orig_dir = Path.cwd()
self.curdir = self.options.working_dir
os.chdir(str(self.curdir))
def __exit__(self, exc_type, exc_value, traceback):
os.chdir(str(self.orig_dir))
class Command(object):
def __init__(self, *args, state=None, **kwargs):
if state is None:
self.state = BootItState.cur_state()
else:
self.state = state
self.do(*args, **kwargs)
class Cmd(Command):
def do(self, *args, quiet=False, exception=True):
if len(args) == 1:
shell = True
else:
shell = False
self.stdout = ""
self.stderr = ""
logging.info("Running cmd {} shell={}".format(args, shell))
if self.state.dry_run:
return
p = sp.Popen(args, stdout=sp.PIPE, stderr=sp.PIPE, shell=shell)
while p.returncode is None:
s = self.non_block_read(p.stdout)
if s:
if not quiet:
sys.stdout.write(s)
self.stdout += s
s = self.non_block_read(p.stderr)
if s:
sys.stderr.write(s)
self.stderr += s
p.poll()
logging.debug("ret={} stdout={} stderr={}".format(p.returncode,
self.stdout,
self.stderr))
self.okay = (p.returncode == 0)
if exception and not self.okay:
raise BootItException("Cmd {} failed".format(" ".join(args)))
def non_block_read(self, output):
fd = output.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
try:
return output.read().decode("utf8")
except Exception:
return ""
class Copy(Command):
def do(self, src, dest):
src = Path(src).expanduser()
dest = Path(dest).expanduser()
destdir = dest.parent
if not destdir.exists():
logging.info("making dir {}". format(destdir))
if not self.state.dry_run:
destdir.mkdir(parents=True)
if dest.exists():
logging.info("removing existing file {}". format(dest))
if not self.state.dry_run:
dest.unlink()
shutil.copy2(str(src), str(dest))
class Link(Command):
def do(self, src, dest):
src = Path(src).expanduser()
if not src.exists():
raise BootItException("{} does not exist for linking".format(src))
src = src.resolve()
dest = Path(dest).expanduser()
try:
logging.info("%s %s", src, dest.resolve())
rel_src = os.path.relpath(src, dest.resolve().parent)
except ValueError:
logging.info("Couldn't calculate relative path using absolute {}".format(src))
rel_src = src.resolve()
destdir = dest.parent
if not destdir.exists():
destdir.mkdir(parents=True)
if dest.exists() and not dest.is_symlink():
raise BootItException("Real file exists at %s" % dest)
elif dest.is_symlink() and not dest.exists():
logging.warn("Removing dangling link %s->%s" % (dest, rel_src))
if not self.state.dry_run:
os.unlink(str(dest))
elif dest.exists() and not src.samefile(dest):
logging.warn("Changing link from %s to %s" % (os.path.realpath(str(dest)), rel_src))
if not self.state.dry_run:
os.unlink(str(dest))
if not dest.exists():
logging.info("Linking from %s to %s" % (rel_src, dest))
dest.symlink_to(rel_src)
class Mkdir(Command):
def do(self, name, mode):
name = Path(name).expanduser()
if not name.exists():
logging.info("Making dir {}".format(name))
if not self.state.dry_run:
name.mkdir(int(mode, 8), parents=True)
Chmod(name, mode)
class Chmod(Command):
def do(self, name, mode):
name = Path(name).expanduser()
if name.exists():
logging.info("Chmodding dir {}".format(name))
if not self.state.dry_run:
name.chmod(int(mode, 8))
else:
raise BootItException("No file to chmod {}".format(name))
class Sync(Command):
"""clean is ignored at the moment"""
def do(self, src, dest, clean=True, patterns=None):
src = Path(src).expanduser()
dest = Path(dest).expanduser()
if patterns is None:
patterns = [".git"]
if not dest.exists():
logging.info("Creating dir to sync to {}".format(dest))
if not self.state.dry_run:
dest.mkdir(parents=True)
sunk = []
for syncable in src.rglob("*"):
if syncable == src:
continue
skip = False
for pat in patterns:
cur = syncable
while cur != src:
if cur.match(pat):
logging.debug("Skipping sync of {}".format(syncable))
skip = True
break
cur = cur.parent
if skip:
break
if skip:
continue
partial = syncable.relative_to(src)
sunk += [(syncable, dest / partial)]
for fsrc, fdest in sunk: # type: Path, Path
fsrc = src.resolve().parent / fsrc
if fsrc.is_dir():
logging.info("Make Sync dir {} in {}".format(fsrc, fdest))
Mkdir(fdest)
# TODO: copy chmod settings
else:
logging.info("Syncing {} to {}".format(fsrc, fdest))
Copy(src=fsrc, dest=fdest)
class Echo(Command):
"""echos the given arguments to the command line"""
def do(self, *args):
"""
:param *args: arguments to be echoed
"""
logging.info(" ".join(str(a) for a in args))
class Touch(Command):
"""Creates an empty file"""
def do(self, fname):
"""@todo: to be defined
:param fname: @todo
"""
fname = Path(fname).expanduser()
logging.info("Touching {}".format(fname))
if not self.state.dry_run:
fname.touch()
class Pip(Command):
def do(self, pkgs=[], user=True, upgrade=True):
opts = []
if user:
opts += ["--user"]
if upgrade:
opts += ["--upgrade"]
cmd = "python3 -mpip install {} {}".format(
" ".join(opts),
" ".join(pkgs))
Cmd(cmd)
class Brew(Command):
""" upgrade is currently ignored"""
def do(self, pkgs=[], cask=False, upgrade=True, tap=False):
assert sum([tap, cask]) < 2, "Choose either cask or tap or neither"
prefix = ["brew"]
if cask:
prefix += ["cask"]
cmd = "list"
elif tap:
prefix += ["tap"]
cmd = ""
else:
cmd = "list"
prefix = " " .join(prefix)
logging.info("Checking for installed {} pkgs".format(prefix))
ret = Cmd("{} {}".format(prefix, cmd), quiet=True)
installed = {p.split("@")[0] for p in ret.stdout.split("\n")}
needed = set(pkgs) - installed
if needed:
logging.info("Installing from {}: {}".format(prefix, ", ".join(needed)))
if tap:
Cmd("{} {}".format(prefix, " ".join(needed)))
else:
Cmd("{} install {}".format(prefix, " ".join(needed)))
else:
logging.info("No pkgs from {} needed".format(prefix))
# def main(args):
# commands = {}
# for name, var in globals().items():
# if name == "Command":
# continue
# if isinstance(var, type) and issubclass(var, Command):
# commands[name.lower()] = var
#
# evaluator = Evaluator(commands, options.dry_run)
# evaluator.start(options.conf_file)
#
# os.chdir(curdir)
# return 0
#
#
# if __name__ == "__main__":
# sys.exit(main(sys.argv))
| BootItState | identifier_name |
bootit2.py | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# vim: ts=4 sts=4 sw=4 tw=100 sta et
__author__ = 'Patrick Butler'
__email__ = 'pbutler@killertux.org'
import argparse
import fcntl
import logging
import os
from pathlib import Path
import shutil
import subprocess as sp
import sys
logging.basicConfig(level=logging.INFO)
class BootItException(Exception):
pass
class BootItState:
state = []
@classmethod
def push_state(cls, bootit):
cls.state += [bootit]
@classmethod
def pop_state(cls):
cls.state.pop()
@classmethod
def cur_state(cls):
if cls.state:
return cls.state[-1]
else:
return None
class BootIt:
def __init__(self, args=None):
if args is None:
args = sys.argv
self.argparse(args)
self.selfdir = Path(__file__).absolute().parent
if (self.selfdir / ".git").exists():
logging.debug("Detected git mode")
else:
logging.debug("Detected untracked mode, forcing no update")
self.options.update = False
if self.options.update:
logging.info("Gitting")
Cmd("git", "pull", state=self)
def argparse(self, argv=None):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("-G", "--no-git",
action="store_false", dest="update", default=True,
help="don't run git pull")
parser.add_argument("--conf_file", default="bootstrap.py")
parser.add_argument("--dry-run", action="store_true", default=False,
help="don't perform any real tasks")
parser.add_argument("--working-dir", default=Path.cwd(), type=Path)
self.options = parser.parse_args()
@property
def dry_run(self):
return self.options.dry_run
def __enter__(self):
BootItState.push_state(self)
self.orig_dir = Path.cwd()
self.curdir = self.options.working_dir
os.chdir(str(self.curdir))
def __exit__(self, exc_type, exc_value, traceback):
os.chdir(str(self.orig_dir))
class Command(object):
def __init__(self, *args, state=None, **kwargs):
if state is None:
self.state = BootItState.cur_state()
else:
self.state = state
self.do(*args, **kwargs)
class Cmd(Command):
def do(self, *args, quiet=False, exception=True):
if len(args) == 1:
shell = True
else:
shell = False
self.stdout = ""
self.stderr = ""
logging.info("Running cmd {} shell={}".format(args, shell))
if self.state.dry_run:
return
p = sp.Popen(args, stdout=sp.PIPE, stderr=sp.PIPE, shell=shell)
while p.returncode is None:
s = self.non_block_read(p.stdout)
if s:
if not quiet:
sys.stdout.write(s)
self.stdout += s
s = self.non_block_read(p.stderr)
if s:
sys.stderr.write(s)
self.stderr += s
p.poll()
logging.debug("ret={} stdout={} stderr={}".format(p.returncode,
self.stdout,
self.stderr))
self.okay = (p.returncode == 0)
if exception and not self.okay:
raise BootItException("Cmd {} failed".format(" ".join(args)))
def non_block_read(self, output):
fd = output.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
try:
return output.read().decode("utf8")
except Exception:
return ""
class Copy(Command):
def do(self, src, dest):
src = Path(src).expanduser()
dest = Path(dest).expanduser()
destdir = dest.parent
if not destdir.exists():
logging.info("making dir {}". format(destdir))
if not self.state.dry_run:
destdir.mkdir(parents=True)
if dest.exists():
logging.info("removing existing file {}". format(dest))
if not self.state.dry_run:
dest.unlink()
shutil.copy2(str(src), str(dest))
class Link(Command):
def do(self, src, dest):
src = Path(src).expanduser()
if not src.exists():
raise BootItException("{} does not exist for linking".format(src))
src = src.resolve()
dest = Path(dest).expanduser()
try:
logging.info("%s %s", src, dest.resolve())
rel_src = os.path.relpath(src, dest.resolve().parent)
except ValueError:
logging.info("Couldn't calculate relative path using absolute {}".format(src))
rel_src = src.resolve()
destdir = dest.parent
if not destdir.exists():
destdir.mkdir(parents=True)
if dest.exists() and not dest.is_symlink():
raise BootItException("Real file exists at %s" % dest)
elif dest.is_symlink() and not dest.exists():
logging.warn("Removing dangling link %s->%s" % (dest, rel_src))
if not self.state.dry_run:
os.unlink(str(dest))
elif dest.exists() and not src.samefile(dest):
logging.warn("Changing link from %s to %s" % (os.path.realpath(str(dest)), rel_src))
if not self.state.dry_run:
os.unlink(str(dest))
if not dest.exists():
logging.info("Linking from %s to %s" % (rel_src, dest))
dest.symlink_to(rel_src)
class Mkdir(Command):
def do(self, name, mode):
|
class Chmod(Command):
def do(self, name, mode):
name = Path(name).expanduser()
if name.exists():
logging.info("Chmodding dir {}".format(name))
if not self.state.dry_run:
name.chmod(int(mode, 8))
else:
raise BootItException("No file to chmod {}".format(name))
class Sync(Command):
"""clean is ignored at the moment"""
def do(self, src, dest, clean=True, patterns=None):
src = Path(src).expanduser()
dest = Path(dest).expanduser()
if patterns is None:
patterns = [".git"]
if not dest.exists():
logging.info("Creating dir to sync to {}".format(dest))
if not self.state.dry_run:
dest.mkdir(parents=True)
sunk = []
for syncable in src.rglob("*"):
if syncable == src:
continue
skip = False
for pat in patterns:
cur = syncable
while cur != src:
if cur.match(pat):
logging.debug("Skipping sync of {}".format(syncable))
skip = True
break
cur = cur.parent
if skip:
break
if skip:
continue
partial = syncable.relative_to(src)
sunk += [(syncable, dest / partial)]
for fsrc, fdest in sunk: # type: Path, Path
fsrc = src.resolve().parent / fsrc
if fsrc.is_dir():
logging.info("Make Sync dir {} in {}".format(fsrc, fdest))
Mkdir(fdest)
# TODO: copy chmod settings
else:
logging.info("Syncing {} to {}".format(fsrc, fdest))
Copy(src=fsrc, dest=fdest)
class Echo(Command):
"""echos the given arguments to the command line"""
def do(self, *args):
"""
:param *args: arguments to be echoed
"""
logging.info(" ".join(str(a) for a in args))
class Touch(Command):
"""Creates an empty file"""
def do(self, fname):
"""@todo: to be defined
:param fname: @todo
"""
fname = Path(fname).expanduser()
logging.info("Touching {}".format(fname))
if not self.state.dry_run:
fname.touch()
class Pip(Command):
def do(self, pkgs=[], user=True, upgrade=True):
opts = []
if user:
opts += ["--user"]
if upgrade:
opts += ["--upgrade"]
cmd = "python3 -mpip install {} {}".format(
" ".join(opts),
" ".join(pkgs))
Cmd(cmd)
class Brew(Command):
""" upgrade is currently ignored"""
def do(self, pkgs=[], cask=False, upgrade=True, tap=False):
assert sum([tap, cask]) < 2, "Choose either cask or tap or neither"
prefix = ["brew"]
if cask:
prefix += ["cask"]
cmd = "list"
elif tap:
prefix += ["tap"]
cmd = ""
else:
cmd = "list"
prefix = " " .join(prefix)
logging.info("Checking for installed {} pkgs".format(prefix))
ret = Cmd("{} {}".format(prefix, cmd), quiet=True)
installed = {p.split("@")[0] for p in ret.stdout.split("\n")}
needed = set(pkgs) - installed
if needed:
logging.info("Installing from {}: {}".format(prefix, ", ".join(needed)))
if tap:
Cmd("{} {}".format(prefix, " ".join(needed)))
else:
Cmd("{} install {}".format(prefix, " ".join(needed)))
else:
logging.info("No pkgs from {} needed".format(prefix))
# def main(args):
# commands = {}
# for name, var in globals().items():
# if name == "Command":
# continue
# if isinstance(var, type) and issubclass(var, Command):
# commands[name.lower()] = var
#
# evaluator = Evaluator(commands, options.dry_run)
# evaluator.start(options.conf_file)
#
# os.chdir(curdir)
# return 0
#
#
# if __name__ == "__main__":
# sys.exit(main(sys.argv))
| name = Path(name).expanduser()
if not name.exists():
logging.info("Making dir {}".format(name))
if not self.state.dry_run:
name.mkdir(int(mode, 8), parents=True)
Chmod(name, mode) | identifier_body |
bootit2.py | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# vim: ts=4 sts=4 sw=4 tw=100 sta et
__author__ = 'Patrick Butler'
__email__ = 'pbutler@killertux.org'
import argparse
import fcntl
import logging
import os
from pathlib import Path
import shutil
import subprocess as sp
import sys
logging.basicConfig(level=logging.INFO)
class BootItException(Exception):
pass
class BootItState:
state = []
@classmethod
def push_state(cls, bootit):
cls.state += [bootit]
@classmethod
def pop_state(cls):
cls.state.pop()
@classmethod
def cur_state(cls):
if cls.state:
return cls.state[-1]
else:
return None
class BootIt:
def __init__(self, args=None):
if args is None:
args = sys.argv
self.argparse(args)
self.selfdir = Path(__file__).absolute().parent
if (self.selfdir / ".git").exists():
logging.debug("Detected git mode")
else:
logging.debug("Detected untracked mode, forcing no update")
self.options.update = False
if self.options.update:
logging.info("Gitting")
Cmd("git", "pull", state=self)
def argparse(self, argv=None):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("-G", "--no-git",
action="store_false", dest="update", default=True,
help="don't run git pull")
parser.add_argument("--conf_file", default="bootstrap.py")
parser.add_argument("--dry-run", action="store_true", default=False,
help="don't perform any real tasks")
parser.add_argument("--working-dir", default=Path.cwd(), type=Path)
self.options = parser.parse_args()
@property
def dry_run(self):
return self.options.dry_run
def __enter__(self):
BootItState.push_state(self)
self.orig_dir = Path.cwd()
self.curdir = self.options.working_dir
os.chdir(str(self.curdir))
def __exit__(self, exc_type, exc_value, traceback):
os.chdir(str(self.orig_dir))
class Command(object):
def __init__(self, *args, state=None, **kwargs):
if state is None:
self.state = BootItState.cur_state()
else:
self.state = state
self.do(*args, **kwargs)
class Cmd(Command):
def do(self, *args, quiet=False, exception=True):
if len(args) == 1:
shell = True
else:
shell = False
self.stdout = ""
self.stderr = ""
logging.info("Running cmd {} shell={}".format(args, shell))
if self.state.dry_run:
return
p = sp.Popen(args, stdout=sp.PIPE, stderr=sp.PIPE, shell=shell)
while p.returncode is None:
s = self.non_block_read(p.stdout)
if s:
if not quiet:
sys.stdout.write(s)
self.stdout += s
s = self.non_block_read(p.stderr)
if s:
sys.stderr.write(s)
self.stderr += s
p.poll()
logging.debug("ret={} stdout={} stderr={}".format(p.returncode,
self.stdout,
self.stderr))
self.okay = (p.returncode == 0)
if exception and not self.okay:
raise BootItException("Cmd {} failed".format(" ".join(args)))
def non_block_read(self, output):
fd = output.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
try:
return output.read().decode("utf8")
except Exception:
return ""
class Copy(Command):
def do(self, src, dest):
src = Path(src).expanduser()
dest = Path(dest).expanduser()
destdir = dest.parent
if not destdir.exists():
logging.info("making dir {}". format(destdir))
if not self.state.dry_run:
destdir.mkdir(parents=True)
if dest.exists():
logging.info("removing existing file {}". format(dest))
if not self.state.dry_run:
dest.unlink()
shutil.copy2(str(src), str(dest))
class Link(Command):
def do(self, src, dest):
src = Path(src).expanduser()
if not src.exists():
raise BootItException("{} does not exist for linking".format(src))
src = src.resolve()
dest = Path(dest).expanduser()
try:
logging.info("%s %s", src, dest.resolve())
rel_src = os.path.relpath(src, dest.resolve().parent)
except ValueError:
logging.info("Couldn't calculate relative path using absolute {}".format(src))
rel_src = src.resolve()
destdir = dest.parent
if not destdir.exists():
destdir.mkdir(parents=True)
if dest.exists() and not dest.is_symlink():
raise BootItException("Real file exists at %s" % dest)
elif dest.is_symlink() and not dest.exists():
logging.warn("Removing dangling link %s->%s" % (dest, rel_src))
if not self.state.dry_run:
os.unlink(str(dest))
elif dest.exists() and not src.samefile(dest):
logging.warn("Changing link from %s to %s" % (os.path.realpath(str(dest)), rel_src))
if not self.state.dry_run:
os.unlink(str(dest))
if not dest.exists():
logging.info("Linking from %s to %s" % (rel_src, dest))
dest.symlink_to(rel_src)
class Mkdir(Command):
def do(self, name, mode):
name = Path(name).expanduser()
if not name.exists():
logging.info("Making dir {}".format(name))
if not self.state.dry_run:
name.mkdir(int(mode, 8), parents=True)
Chmod(name, mode)
class Chmod(Command):
def do(self, name, mode):
name = Path(name).expanduser()
if name.exists():
logging.info("Chmodding dir {}".format(name))
if not self.state.dry_run:
name.chmod(int(mode, 8))
else:
raise BootItException("No file to chmod {}".format(name))
class Sync(Command):
"""clean is ignored at the moment"""
def do(self, src, dest, clean=True, patterns=None):
src = Path(src).expanduser()
dest = Path(dest).expanduser()
if patterns is None:
patterns = [".git"]
if not dest.exists():
logging.info("Creating dir to sync to {}".format(dest))
if not self.state.dry_run:
dest.mkdir(parents=True)
sunk = []
for syncable in src.rglob("*"):
if syncable == src:
continue
skip = False
for pat in patterns:
cur = syncable
while cur != src:
if cur.match(pat):
logging.debug("Skipping sync of {}".format(syncable))
skip = True
break
cur = cur.parent
if skip:
break
if skip:
continue
partial = syncable.relative_to(src)
sunk += [(syncable, dest / partial)]
for fsrc, fdest in sunk: # type: Path, Path
fsrc = src.resolve().parent / fsrc
if fsrc.is_dir():
logging.info("Make Sync dir {} in {}".format(fsrc, fdest))
Mkdir(fdest)
# TODO: copy chmod settings
else:
logging.info("Syncing {} to {}".format(fsrc, fdest))
Copy(src=fsrc, dest=fdest)
class Echo(Command):
"""echos the given arguments to the command line"""
def do(self, *args):
"""
:param *args: arguments to be echoed
"""
logging.info(" ".join(str(a) for a in args))
class Touch(Command):
"""Creates an empty file"""
def do(self, fname):
"""@todo: to be defined
:param fname: @todo
"""
fname = Path(fname).expanduser()
logging.info("Touching {}".format(fname))
if not self.state.dry_run:
fname.touch()
class Pip(Command):
def do(self, pkgs=[], user=True, upgrade=True):
opts = []
if user:
opts += ["--user"]
if upgrade:
opts += ["--upgrade"]
cmd = "python3 -mpip install {} {}".format(
" ".join(opts),
" ".join(pkgs))
Cmd(cmd)
class Brew(Command):
""" upgrade is currently ignored"""
def do(self, pkgs=[], cask=False, upgrade=True, tap=False):
assert sum([tap, cask]) < 2, "Choose either cask or tap or neither"
prefix = ["brew"]
if cask:
prefix += ["cask"]
cmd = "list"
elif tap:
prefix += ["tap"]
cmd = ""
else:
cmd = "list"
prefix = " " .join(prefix)
logging.info("Checking for installed {} pkgs".format(prefix))
ret = Cmd("{} {}".format(prefix, cmd), quiet=True)
installed = {p.split("@")[0] for p in ret.stdout.split("\n")}
needed = set(pkgs) - installed
if needed:
logging.info("Installing from {}: {}".format(prefix, ", ".join(needed)))
if tap:
Cmd("{} {}".format(prefix, " ".join(needed)))
else:
Cmd("{} install {}".format(prefix, " ".join(needed)))
else:
logging.info("No pkgs from {} needed".format(prefix))
# def main(args):
# commands = {}
# for name, var in globals().items():
# if name == "Command":
# continue
# if isinstance(var, type) and issubclass(var, Command):
# commands[name.lower()] = var | # evaluator.start(options.conf_file)
#
# os.chdir(curdir)
# return 0
#
#
# if __name__ == "__main__":
# sys.exit(main(sys.argv)) | #
# evaluator = Evaluator(commands, options.dry_run) | random_line_split |
jwxt.js | const req = require('./req')
const utils = require('./utils');
const resModel = require('../config/resModel')
require('tls').DEFAULT_MIN_VERSION = 'TLSv1'; // 兼容教务系统TLS1.1
// 进行登录并在返回值携带cookies
exports.doLogin = async (username, password) => {
const url = `xk/LoginToXk`;
const datas = {
encoded: utils.encodeInp(username) + '%%%' + utils.encodeInp(password)
};
let loginRes;
try {
loginRes = await req.post(url, datas, null, true);
} catch (e) {
return {
ret: false,
code: resModel.CODE.JWXT_INACCESSIBLE,
msg: resModel.TEXT.JWXT_INACCESSIBLE
};
} | case 302: // 登录成功,302是因为需要跳转到主界面,此时cookies有效
return {
ret: true,
data: cookies
};
case 200: // 跳转回登录页,证明出现了登录错误,捕获错误类型
const regErrMsg = /<font style="display: inline;white-space:nowrap;" color="red">([^<]*?)<\/font\>/gi;
return {
ret: false,
code: resModel.CODE.NO_AUTH,
msg: regErrMsg.exec(loginRes.data)[1].trim() || '登录教务系统错误'
};
default: // 意料意外的返回状态码
return {
ret: false,
code: resModel.CODE.JWXT_INACCESSIBLE,
msg: resModel.TEXT.JWXT_INACCESSIBLE
};
}
}
// 获取个人资料
exports.getMyInfo = async (cookies) => {
const url = `framework/xsMain.jsp`;
const customHeader = { 'Cookie': cookies };
let myInfo;
try {
myInfo = await req.get(url, null, customHeader);
} catch (e) {
return {
ret: false,
code: resModel.CODE.JWXT_INACCESSIBLE,
msg: resModel.TEXT.JWXT_INACCESSIBLE
};
}
if (utils.isSessionExpired(myInfo)) {
return {
ret: false,
code: resModel.CODE.COOKIE_EXPIRED,
msg: resModel.TEXT.COOKIE_EXPIRED
};
}
const regDiv = /<div id="Top1_divLoginName" class="Nsb_top_menu_nc" style="color: #000000;">([^<]*?)<\/div\>/gi;
const nameAndNum = regDiv.exec(myInfo.data)[1].trim().split('(');
return {
ret: true,
data: {
name: nameAndNum[0],
number: nameAndNum[1].substr(0, nameAndNum[1].length - 1),
isStudent: nameAndNum[1].length > 10
}
};
}
// 获取课程
exports.getCourses = async (cookies, term, zc = null) => {
const url = `xskb/xskb_list.do`;
const datas = { xnxq01id: term };
if (zc) { datas[zc] = zc };
const customHeader = { 'cookie': cookies };
let courseRes;
try {
courseRes = await req.post(url, datas, customHeader, false);
} catch (e) {
return {
ret: false,
code: resModel.CODE.JWXT_INACCESSIBLE,
msg: resModel.TEXT.JWXT_INACCESSIBLE
};
}
if (utils.isSessionExpired(courseRes)) {
return {
ret: false,
code: resModel.CODE.COOKIE_EXPIRED,
msg: resModel.TEXT.COOKIE_EXPIRED
};
}
if (!courseRes.data) {
return {
ret: false,
code: resModel.CODE.NOT_FOUND,
msg: resModel.TEXT.NOT_FOUND
};
}
// 将文本的数据解析为表格数组
const kbReg = /<table id="kbtable"[\w\W]*?>([\w\W]*?)<\/table>/;
const coursesContent = utils.tdToArray(kbReg.exec(courseRes.data)[1], true);
// 去除表头
coursesContent.shift();
// 获取备注
const remarkArr = coursesContent.pop();
// 采用Set进行课程的存储
const coursesSet = new Set();
// 进行表格数组处理
coursesContent.map(row => {
// 去除开头的 “第*节” 标识
row.shift();
// 单节课循环
row.forEach((course, day) => {
// 删首尾空
const courseCellText = course.trim();
if (courseCellText) {
// 课程内容不为空,拆分多节课程后(如有),计入Set中
const courseCellArr = courseCellText.split('{@@}');
courseCellArr.forEach(perCourse => {
coursesSet.add(`${perCourse}星期{|}${day}`);
})
}
});
});
const coursesOutArr = [];
// 定义课程Object的key
const keyName = {
'标题': 'name',
'老师': 'teacher',
'周次(节次)': 'week_text',
'教室': 'classroom',
'节次': 'session_text',
'星期': 'day'
};
coursesSet.forEach(course => {
const courseOutObj = {};
// 解析课程文本的基本数据
const courseInfoArr = course.split('{||}');
// 去除重复的课程标题
courseInfoArr.shift();
// 循环每项信息进行 key-value 的解析
courseInfoArr.forEach(infoText => {
const infoArr = infoText.split('{|}');
if (infoArr.length === 1) {
// 没有key,对应为课程标题
courseOutObj[keyName['标题']] = infoArr[0].trim();
} else {
// 有key,对应课程信息
courseOutObj[keyName[infoArr[0].trim()]] = infoArr[1].trim();
}
})
// 解析显示的周次,生成对应数组
let courseWeekText = courseOutObj.week_text;
if (courseWeekText) {
const weekArr = []; //输出周次数组
let weekDescMode; // 模式说明:0-全周,1单周,2-双周
if (courseWeekText.endsWith('单周')) {
weekDescMode = 1;
} else if (courseWeekText.endsWith('双周')) {
weekDescMode = 2;
} else {
weekDescMode = 0;
}
// 判断传入的周次是否符合单双周模式的描述(不能直接依靠开始结束循环,会有例如「1-18单周」这种东西的描述,很坑)
const isWeekLeagl = week => {
if (weekDescMode === 0 || (weekDescMode === 1 && week % 2 === 1) || weekDescMode === 2 && week % 2 === 0) {
return true;
}
return false;
}
// 进行周次循环,将上课的周次推入week数组
courseWeekText = courseWeekText.replace(/[\u4e00-\u9fa5]/g, '');
courseWeekTextArr = courseWeekText.split(',');
courseWeekTextArr.forEach(weekRangeText => {
if (weekRangeText.indexOf('-') === -1) {
// 剩下单周,如13,判断是否可以直接推入结果数组
if (isWeekLeagl(weekRangeText)) {
weekArr.push(parseInt(weekRangeText));
}
} else {
// 是周次时间段,如2-6
weekRangeTextArr = weekRangeText.split('-');
for (let i = parseInt(weekRangeTextArr[0]); i <= parseInt(weekRangeTextArr[1]); i++) {
if (isWeekLeagl(i)) {
weekArr.push(i);
}
}
}
})
courseOutObj.week = weekArr;
}
// 解析上课节次
const courseSessionText = courseOutObj.session_text;
if (courseSessionText) {
if (courseSessionText.indexOf('-') === -1) {
// 单节课程
courseOutObj.session_start = parseInt(courseSessionText);
courseOutObj.session_end = parseInt(courseSessionText);
} else {
// 范围课程
courseSessionTextArr = courseSessionText.split('-');
courseOutObj.session_start = parseInt(courseSessionTextArr[0]);
courseOutObj.session_end = parseInt(courseSessionTextArr[courseSessionTextArr.length - 1]);
}
}
// 星期day字段整型化
courseOutObj.day = parseInt(courseOutObj.day);
coursesOutArr.push(courseOutObj);
})
return {
ret: true,
courses: coursesOutArr,
remark: remarkArr[1].trim() || null // 为空则传null
};
}
// 获取成绩
exports.getGrade = async (cookies, term) => {
const url = `kscj/cjcx_list`;
const datas = { kksj: term };
const customHeader = { 'cookie': cookies };
let gradeRes;
try {
gradeRes = await req.post(url, datas, customHeader, false);
} catch (e) {
return {
ret: false,
code: resModel.CODE.JWXT_INACCESSIBLE,
msg: resModel.TEXT.JWXT_INACCESSIBLE
};
}
if (utils.isSessionExpired(gradeRes)) {
return {
ret: false,
code: resModel.CODE.COOKIE_EXPIRED,
msg: resModel.TEXT.COOKIE_EXPIRED
};
}
if (!gradeRes.data) {
return {
ret: false,
code: resModel.CODE.NOT_FOUND,
msg: resModel.TEXT.NOT_FOUND
};
}
// 将文本的数据解析为表格数组
const gradeReg = /<table id="dataList"[\w\W]*?>([\w\W]*?)<\/table>/;
const gradesContent = utils.tdToArray(gradeReg.exec(gradeRes.data)[1], false, true);
// 定义表头字段
const keyName = {
'序号': 'no',
'开课学期': 'term',
'课程编号': 'number',
'课程名称': 'name',
'总评成绩': 'grade',
'学分': 'credit',
'总学时': 'class_hour',
'绩点': 'gpa',
'考核方式': 'exam_type',
'课程属性': 'properties',
'课程性质': 'class_type'
}
// 最终输出数组
const gradeOutArr = []
// 取出实际表头
const titleName = gradesContent.shift()
// 取出统计信息
const countText = gradesContent.pop()[0].trim() || ''
// 进行单个课程的数据循环
gradesContent.forEach(gradeArr => {
const perGradeObj = {}
gradeArr.forEach((gradeInfo, index) => {
perGradeObj[keyName[titleName[index]]] = gradeInfo.trim()
})
gradeOutArr.push(perGradeObj)
})
// 信息统计匹配
const countReg = /^选课学分:[\s]*([\d.]+)[\s]*获得学分:[\s]*([\d.]+)[\s]*平均学分绩点:[\s]*([\d.]+)[\s]*$/
const countRegRes = countReg.exec(countText) || {}
// console.log(countText,countRegRes)
const countObj = {
credit_expected: countRegRes[1] || null,
credit_gained: countRegRes[2] || null,
gpa: countRegRes[3] || null
}
return {
ret: true,
grade: gradeOutArr,
count: countObj
};
}
// 查询空教室
exports.getEmptyRoom = async (cookies, term, buildid, week, day, session) => {
const url = `kbxx/jsjy_query2`;
const datas = {
typewhere: 'jszq',
xnxqh: term,
xqid: 'Vn',
jxlbh: buildid,
zc: week,
zc2: week,
xq: day,
xq2: day,
jc1: session,
jc2: session
};
const customHeader = { 'cookie': cookies };
let roomRes;
try {
roomRes = await req.post(url, datas, customHeader, false);
} catch (e) {
return {
ret: false,
code: resModel.CODE.JWXT_INACCESSIBLE,
msg: resModel.TEXT.JWXT_INACCESSIBLE
};
}
if (utils.isSessionExpired(roomRes)) {
return {
ret: false,
code: resModel.CODE.COOKIE_EXPIRED,
msg: resModel.TEXT.COOKIE_EXPIRED
};
}
if (!roomRes.data) {
return {
ret: false,
code: resModel.CODE.NOT_FOUND,
msg: resModel.TEXT.NOT_FOUND
};
}
// 将文本的数据解析为表格数组
const roomReg = /<table id="dataList"[\w\W]*?>([\w\W]*?)<\/table>/;
const roomsContent = utils.tdToArray(roomReg.exec(roomRes.data)[1], false, true);
// 定义教室状态模板
const roomStatus = {
'L': '临调',
'G': '调课',
'': '空闲',
'Κ': '考试',
'X': '锁定',
'J': '借用',
'◆': '上课'
}
// 最终输出数组
const roomsOutArr = []
// 删除最前面无用的星期行数
roomsContent.shift();
// 删除最后面无用的空行
roomsContent.pop();
// 取出表头
const sessionTitle = roomsContent.shift();
// 去除第一个空白单元格内容
sessionTitle.shift()
// 循环每个课室并解析数据
roomsContent.forEach(room => {
const roomObj = {
title: null,
capacities: 0,
status: []
};
const roomName = room.shift();
const titleReg = /([\w\W]*?)\(([\d]*)\/([\d]*)\)/;
const titleRegRes = titleReg.exec(roomName);
roomObj.title = titleRegRes[1];
roomObj.capacities = parseInt(titleRegRes[2]);
room.forEach(roomCell => {
roomObj.status.push(roomStatus[roomCell])
})
roomsOutArr.push(roomObj);
})
return {
ret: true,
sessionTitle: sessionTitle,
roomInfo: roomsOutArr
};
} | const cookies = loginRes.headers['set-cookie'][0];
switch (loginRes.statusCode) { | random_line_split |
jwxt.js | const req = require('./req')
const utils = require('./utils');
const resModel = require('../config/resModel')
require('tls').DEFAULT_MIN_VERSION = 'TLSv1'; // 兼容教务系统TLS1.1
// 进行登录并在返回值携带cookies
exports.doLogin = async (username, password) => {
const url = `xk/LoginToXk`;
const datas = {
encoded: utils.encodeInp(username) + '%%%' + utils.encodeInp(password)
};
let loginRes;
try {
loginRes = await req.post(url, datas, null, true);
} catch (e) {
return {
ret: false,
code: resModel.CODE.JWXT_INACCESSIBLE,
msg: resModel.TEXT.JWXT_INACCESSIBLE
};
}
const cookies = loginRes.headers['set-cookie'][0];
switch (loginRes.statusCode) {
case 302: // 登录成功,302是因为需要跳转到主界面,此时cookies有效
return {
ret: true,
data: cookies
};
case 200: // 跳转回登录页,证明出现了登录错误,捕获错误类型
const regErrMsg = /<font style="display: inline;white-space:nowrap;" color="red">([^<]*?)<\/font\>/gi;
return {
ret: false,
code: resModel.CODE.NO_AUTH,
msg: regErrMsg.exec(loginRes.data)[1].trim() || '登录教务系统错误'
};
default: // 意料意外的返回状态码
return {
ret: false,
code: resModel.CODE.JWXT_INACCESSIBLE,
msg: resModel.TEXT.JWXT_INACCESSIBLE
};
}
}
// 获取个人资料
exports.getMyInfo = async (cookies) => {
const url = `framework/xsMain.jsp`;
const customHeader = { 'Cookie': cookies };
let myInfo;
try {
myInfo = await req.get(url, null, customHeader);
} catch (e) {
return {
ret: false,
code: resModel.CODE.JWXT_INACCESSIBLE,
msg: resModel.TEXT.JWXT_INACCESSIBLE
};
}
if (utils.isSessionExpired(myInfo)) {
return {
ret: false,
code: resModel.CODE.COOKIE_EXPIRED,
msg: resModel.TEXT.COOKIE_EXPIRED
};
}
const regDiv = /<div id="Top1_divLoginName" class="Nsb_top_menu_nc" style="color: #000000;">([^<]*?)<\/div\>/gi;
const nameAndNum = regDiv.exec(myInfo.data)[1].trim().split('(');
return {
ret: true,
data: {
name: nameAndNum[0],
number: nameAndNum[1].substr(0, nameAndNum[1].length - 1),
isStudent: nameAndNum[1].length > 10
}
};
}
// 获取课程
exports.getCourses = async (cookies, term, zc = null) => {
const url = `xskb/xskb_list.do`;
const datas = { xnxq01id: term };
if (zc) { datas[zc] = zc };
const customHeader = { 'cookie': cookies };
let courseRes;
try {
courseRes = await req.post(url, datas, customHeader, false);
} catch (e) {
return {
ret: false,
code: resModel.CODE.JWXT_INACCESSIBLE,
msg: resModel.TEXT.JWXT_INACCESSIBLE
};
}
if (utils.isSessionExpired(courseRes)) {
return {
ret: false,
code: resModel.CODE.COOKIE_EXPIRED,
msg: resModel.TEXT.COOKIE_EXPIRED
};
}
if (!courseRes.data) {
return {
ret: false,
code: resModel.CODE.NOT_FOUND,
msg: resModel.TEXT.NOT_FOUND
};
}
// 将文本的数据解析为表格数组
const kbReg = /<table id="kbtable"[\w\W]*?>([\w\W]*?)<\/table>/;
const coursesContent = utils.tdToArray(kbReg.exec(courseRes.data)[1], true);
// 去除表头
coursesContent.shift();
// 获取备注
const remarkArr = coursesContent.pop();
// 采用Set进行课程的存储
const coursesSet = new Set();
// 进行表格数组处理
coursesContent.map(row => {
// 去除开头的 “第*节” 标识
row.shift();
// 单节课循环
row.forEach((course, day) => {
// 删首尾空
const courseCellText = course.trim();
if (courseCellText) {
// 课程内容不为空,拆分多节课程后(如有),计入Set中
const courseCellArr = courseCellText.split('{@@}');
courseCellArr.forEach(perCourse => {
coursesSet.add(`${perCourse}星期{|}${day}`);
})
}
});
});
const coursesOutArr = [];
// 定义课程Object的key
const keyName = {
'标题': 'name',
'老师': 'teacher',
'周次(节次)': 'week_text',
'教室': 'classroom',
'节次': 'session_text',
'星期': 'day'
};
coursesSet.forEach(course => {
const courseOutObj = {};
// 解析课程文本的基本数据
const courseInfoArr = course.split('{||}');
// 去除重复的课程标题
courseInfoArr.shift();
// 循环每项信息进行 key-value 的解析
courseInfoArr.forEach(infoText => {
const infoArr = infoText.split('{|}');
if (infoArr.length === 1) {
// 没有key,对应为课程标题
courseOutObj[keyName['标题']] = infoArr[0].trim();
} else {
// 有key,对应课程信息
courseOutObj[keyName[infoArr[0].trim()]] = infoArr[1].trim();
}
})
// 解析显示的周次,生成对应数组
let courseWeekText = courseOutObj.week_text;
if (courseWeekText) {
const weekArr = []; //输出周次数组
let weekDescMode; // 模式说明:0-全周,1单周,2-双周
if (courseWeekText.endsWith('单周')) {
weekDescMode = 1;
} else if (courseWeekText.endsWith('双周')) {
weekDescMode = 2;
} else {
weekDescMode = 0;
}
// 判断传入的周次是否符合单双周模式的描述(不能直接依靠开始结束循环,会有例如「1-18单周」这种东西的描述,很坑)
const isWeekLeagl = week => {
if (weekDescMode === 0 || (weekDescMode === 1 && week % 2 === 1) || weekDescMode === 2 && week % 2 === 0) {
return true;
}
return false;
}
// 进行周次循环,将上课的周次推入week数组
courseWeekText = courseWeekText.replace(/[\u4e00-\u9fa5]/g, '');
courseWeekTextArr = courseWeekText.split(',');
courseWeekTextArr.forEach(weekRangeText => {
if (weekRangeText.indexOf('-') === -1) {
// 剩下单周,如13,判断是否可以直接推入结果数组
if (isWeekLeagl(weekRangeText)) {
weekArr.push(parseInt(weekRangeText));
}
} else {
// 是周次时间段,如2-6
weekRangeTextArr = weekRangeText.split('-');
for (let i = parseInt(weekRangeTextArr[0]); i <= parseInt(weekRangeTextArr[1]); i++) {
| weekArr.push(i);
}
}
}
})
courseOutObj.week = weekArr;
}
// 解析上课节次
const courseSessionText = courseOutObj.session_text;
if (courseSessionText) {
if (courseSessionText.indexOf('-') === -1) {
// 单节课程
courseOutObj.session_start = parseInt(courseSessionText);
courseOutObj.session_end = parseInt(courseSessionText);
} else {
// 范围课程
courseSessionTextArr = courseSessionText.split('-');
courseOutObj.session_start = parseInt(courseSessionTextArr[0]);
courseOutObj.session_end = parseInt(courseSessionTextArr[courseSessionTextArr.length - 1]);
}
}
// 星期day字段整型化
courseOutObj.day = parseInt(courseOutObj.day);
coursesOutArr.push(courseOutObj);
})
return {
ret: true,
courses: coursesOutArr,
remark: remarkArr[1].trim() || null // 为空则传null
};
}
// 获取成绩
exports.getGrade = async (cookies, term) => {
const url = `kscj/cjcx_list`;
const datas = { kksj: term };
const customHeader = { 'cookie': cookies };
let gradeRes;
try {
gradeRes = await req.post(url, datas, customHeader, false);
} catch (e) {
return {
ret: false,
code: resModel.CODE.JWXT_INACCESSIBLE,
msg: resModel.TEXT.JWXT_INACCESSIBLE
};
}
if (utils.isSessionExpired(gradeRes)) {
return {
ret: false,
code: resModel.CODE.COOKIE_EXPIRED,
msg: resModel.TEXT.COOKIE_EXPIRED
};
}
if (!gradeRes.data) {
return {
ret: false,
code: resModel.CODE.NOT_FOUND,
msg: resModel.TEXT.NOT_FOUND
};
}
// 将文本的数据解析为表格数组
const gradeReg = /<table id="dataList"[\w\W]*?>([\w\W]*?)<\/table>/;
const gradesContent = utils.tdToArray(gradeReg.exec(gradeRes.data)[1], false, true);
// 定义表头字段
const keyName = {
'序号': 'no',
'开课学期': 'term',
'课程编号': 'number',
'课程名称': 'name',
'总评成绩': 'grade',
'学分': 'credit',
'总学时': 'class_hour',
'绩点': 'gpa',
'考核方式': 'exam_type',
'课程属性': 'properties',
'课程性质': 'class_type'
}
// 最终输出数组
const gradeOutArr = []
// 取出实际表头
const titleName = gradesContent.shift()
// 取出统计信息
const countText = gradesContent.pop()[0].trim() || ''
// 进行单个课程的数据循环
gradesContent.forEach(gradeArr => {
const perGradeObj = {}
gradeArr.forEach((gradeInfo, index) => {
perGradeObj[keyName[titleName[index]]] = gradeInfo.trim()
})
gradeOutArr.push(perGradeObj)
})
// 信息统计匹配
const countReg = /^选课学分:[\s]*([\d.]+)[\s]*获得学分:[\s]*([\d.]+)[\s]*平均学分绩点:[\s]*([\d.]+)[\s]*$/
const countRegRes = countReg.exec(countText) || {}
// console.log(countText,countRegRes)
const countObj = {
credit_expected: countRegRes[1] || null,
credit_gained: countRegRes[2] || null,
gpa: countRegRes[3] || null
}
return {
ret: true,
grade: gradeOutArr,
count: countObj
};
}
// 查询空教室
exports.getEmptyRoom = async (cookies, term, buildid, week, day, session) => {
const url = `kbxx/jsjy_query2`;
const datas = {
typewhere: 'jszq',
xnxqh: term,
xqid: 'Vn',
jxlbh: buildid,
zc: week,
zc2: week,
xq: day,
xq2: day,
jc1: session,
jc2: session
};
const customHeader = { 'cookie': cookies };
let roomRes;
try {
roomRes = await req.post(url, datas, customHeader, false);
} catch (e) {
return {
ret: false,
code: resModel.CODE.JWXT_INACCESSIBLE,
msg: resModel.TEXT.JWXT_INACCESSIBLE
};
}
if (utils.isSessionExpired(roomRes)) {
return {
ret: false,
code: resModel.CODE.COOKIE_EXPIRED,
msg: resModel.TEXT.COOKIE_EXPIRED
};
}
if (!roomRes.data) {
return {
ret: false,
code: resModel.CODE.NOT_FOUND,
msg: resModel.TEXT.NOT_FOUND
};
}
// 将文本的数据解析为表格数组
const roomReg = /<table id="dataList"[\w\W]*?>([\w\W]*?)<\/table>/;
const roomsContent = utils.tdToArray(roomReg.exec(roomRes.data)[1], false, true);
// 定义教室状态模板
const roomStatus = {
'L': '临调',
'G': '调课',
'': '空闲',
'Κ': '考试',
'X': '锁定',
'J': '借用',
'◆': '上课'
}
// 最终输出数组
const roomsOutArr = []
// 删除最前面无用的星期行数
roomsContent.shift();
// 删除最后面无用的空行
roomsContent.pop();
// 取出表头
const sessionTitle = roomsContent.shift();
// 去除第一个空白单元格内容
sessionTitle.shift()
// 循环每个课室并解析数据
roomsContent.forEach(room => {
const roomObj = {
title: null,
capacities: 0,
status: []
};
const roomName = room.shift();
const titleReg = /([\w\W]*?)\(([\d]*)\/([\d]*)\)/;
const titleRegRes = titleReg.exec(roomName);
roomObj.title = titleRegRes[1];
roomObj.capacities = parseInt(titleRegRes[2]);
room.forEach(roomCell => {
roomObj.status.push(roomStatus[roomCell])
})
roomsOutArr.push(roomObj);
})
return {
ret: true,
sessionTitle: sessionTitle,
roomInfo: roomsOutArr
};
} | if (isWeekLeagl(i)) {
| conditional_block |
settings.py | """
Django settings for magnify project.
"""
import json
import os
from django.utils.translation import gettext_lazy as _
# pylint: disable=ungrouped-imports
import sentry_sdk
from configurations import Configuration, values
from sentry_sdk.integrations.django import DjangoIntegration
from magnify.apps.core.settings.mixins import MagnifyCoreConfigurationMixin
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join("/", "data")
def get_release():
"""Get the current release of the application.
By release, we mean the release from the version.json file à la Mozilla [1]
(if any). If this file has not been found, it defaults to "NA".
[1]
https://github.com/mozilla-services/Dockerflow/blob/master/docs/version_object.md
"""
# Try to get the current release from the version.json file generated by the
# CI during the Docker image build
try:
with open(os.path.join(BASE_DIR, "version.json"), encoding="utf8") as version:
return json.load(version)["version"]
except FileNotFoundError:
return "NA" # Default: not available
class Base(MagnifyCoreConfigurationMixin, Configuration):
"""
This is the base configuration every configuration (aka environnement) should inherit from. It
is recommended to configure third-party applications by creating a configuration mixins in
./configurations and compose the Base configuration with those mixins.
It depends on an environment variable that SHOULD be defined:
* DJANGO_SECRET_KEY
You may also want to override default configuration by setting the following environment
variables:
* DJANGO_SENTRY_DSN
* magnify_ES_HOST
* DB_NAME
* DB_HOST
* DB_PASSWORD
* DB_USER
"""
DEBUG = False
SITE_ID = 1
# Security
ALLOWED_HOSTS = []
CSRF_TRUSTED_ORIGINS = values.ListValue([])
SECRET_KEY = values.Value(None)
# CORS headers
CORS_ALLOWED_ORIGINS = values.ListValue([])
# System check reference:
# https://docs.djangoproject.com/en/2.2/ref/checks/#security
SILENCED_SYSTEM_CHECKS = values.ListValue(
[
# Allow the X_FRAME_OPTIONS to be set to "SAMEORIGIN"
"security.W019"
]
)
REST_FRAMEWORK = {
"ALLOWED_VERSIONS": ("1.0",),
"DEFAULT_AUTHENTICATION_CLASSES": values.ListValue(
["rest_framework_simplejwt.authentication.JWTAuthentication"],
environ_name="DRF_DEFAULT_AUTHENTICATION_CLASSES",
environ_prefix=None,
),
"DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.PageNumberPagination",
"PAGE_SIZE": 100,
"DEFAULT_VERSION": "1.0",
"DEFAULT_VERSIONING_CLASS": "rest_framework.versioning.URLPathVersioning",
"EXCEPTION_HANDLER": "magnify.apps.core.api.exception_handler",
}
# Frontend
FRONTEND_CONFIGURATION = {
"API_URL": values.Value(environ_name="MAGNIFY_API_URL", environ_prefix=None),
"SHOW_REGISTER_LINK": values.BooleanValue(
True, environ_name="MAGNIFY_SHOW_REGISTER_LINK", environ_prefix=None
),
"KEYCLOAK_URL": values.Value(environ_name="KEYCLOAK_URL", environ_prefix=None),
"KEYCLOAK_REALM": values.Value(
"magnify", environ_name="KEYCLOAK_REALM", environ_prefix=None
),
"KEYCLOAK_CLIENT_ID": values.Value(
"magnify-front", environ_name="KEYCLOAK_CLIENT_ID", environ_prefix=None
),
"KEYCLOAK_EXPIRATION_SECONDS": values.IntegerValue(
30 * 60,
environ_name="KEYCLOAK_EXPIRATION_SECONDS",
environ_prefix=None,
),
}
# Application definition
ROOT_URLCONF = "urls"
WSGI_APPLICATION = "wsgi.application"
AUTH_USER_MODEL = "core.User"
JITSI_CONFIGURATION = {
"jitsi_domain": values.Value(environ_name="JITSI_DOMAIN", environ_prefix=None),
"jitsi_app_id": values.Value(environ_name="JITSI_APP_ID", environ_prefix=None),
"jitsi_secret_key": values.Value(
environ_name="JITSI_SECRET_KEY", environ_prefix=None
),
"jitsi_xmpp_domain": values.Value(
environ_name="JITSI_XMPP_DOMAIN", environ_prefix=None
),
"jitsi_guest_avatar": values.Value(
"", environ_name="JITSI_GUEST_AVATAR", environ_prefix=None
),
"jitsi_guest_username": values.Value(
"Guest", environ_name="JITSI_GUEST_USERNAME", environ_prefix=None
),
"jitsi_token_expiration_seconds": values.Value(
300, environ_name="JITSI_TOKEN_EXPIRATION_SECONDS", environ_prefix=None
),
}
JITSI_ROOM_PREFIX = values.Value(
"", environ_name="MAGNIFY_JITSI_ROOM_PREFIX", environ_prefix=None
)
DEFAULT_ROOM_IS_PUBLIC = values.BooleanValue(
True, environ_name="MAGNIFY_DEFAULT_ROOM_IS_PUBLIC", environ_prefix=None
)
ALLOW_UNREGISTERED_ROOMS = values.BooleanValue(
True, environ_name="MAGNIFY_ALLOW_UNREGISTERED_ROOMS", environ_prefix=None
)
ALLOW_API_USER_CREATE = values.BooleanValue(
False, environ_name="MAGNIFY_ALLOW_API_USER_CREATE", environ_prefix=None
)
# Database
DATABASES = {
"default": {
"ENGINE": values.Value(
"django.db.backends.postgresql_psycopg2",
environ_name="DB_ENGINE",
environ_prefix=None,
),
"NAME": values.Value(
"magnify", environ_name="DB_NAME", environ_prefix=None
),
"USER": values.Value(
"magnify", environ_name="DB_USER", environ_prefix=None
),
"PASSWORD": values.Value(environ_name="DB_PASSWORD", environ_prefix=None),
"HOST": values.Value(
"postgresql", environ_name="DB_HOST", environ_prefix=None
),
"PORT": values.Value(5432, environ_name="DB_PORT", environ_prefix=None),
}
}
DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
MIGRATION_MODULES = {}
# Static files (CSS, JavaScript, Images)
STATIC_URL = "/static/"
MEDIA_URL = "/media/"
MEDIA_ROOT = os.path.join(DATA_DIR, "media")
STATIC_ROOT = os.path.join(DATA_DIR, "static")
# Simple JWT
SIMPLE_JWT = {
"ALGORITHM": values.Value(
"RS256", environ_name="MAGNIFY_JWT_ALGORITHM", environ_prefix=None
),
"JWK_URL": values.Value(
None, environ_name="MAGNIFY_JWT_JWK_URL", environ_prefix=None
),
"SIGNING_KEY": values.Value(
None, environ_name="MAGNIFY_JWT_SIGNING_KEY", environ_prefix=None
),
"VERIFYING_KEY": values.Value(
None, environ_name="MAGNIFY_JWT_VERIFYING_KEY", environ_prefix=None
),
"AUTH_HEADER_TYPES": ("Bearer",),
"AUTH_HEADER_NAME": "HTTP_AUTHORIZATION",
"TOKEN_TYPE_CLAIM": "typ",
"USER_ID_FIELD": "jwt_sub",
"USER_ID_CLAIM": "sub",
"AUTH_TOKEN_CLASSES": ("magnify.apps.core.tokens.BearerToken",),
}
JWT_USER_FIELDS_SYNC = values.DictValue(
{
"email": "email",
"name": "name",
"username": "preferred_username",
},
environ_name="MAGNIFY_JWT_USER_FIELDS_SYNC",
environ_prefix=None,
)
JWT_USER_DEVICE_AUDIENCES = values.ListValue(
[],
environ_name="MAGNIFY_JWT_USER_DEVICE_AUDIENCES",
environ_prefix=None,
)
USERNAME_REGEX = values.Value(
r"^[a-z0-9_.-]+$",
environ_name="MAGNIFY_USERNAME_REGEX",
environ_prefix=None,
)
# Login/registration related settings
LOGIN_REDIRECT_URL = "/"
LOGOUT_REDIRECT_URL = "/"
AUTHENTICATION_BACKENDS = ("django.contrib.auth.backends.ModelBackend",)
# Internationalization
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Templates
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(BASE_DIR, "templates")],
"OPTIONS": {
"context_processors": [
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"django.template.context_processors.i18n",
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.template.context_processors.media",
"django.template.context_processors.csrf",
"django.template.context_processors.tz",
"django.template.context_processors.static",
],
"loaders": [
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
},
}
]
MIDDLEWARE = (
"django.middleware.security.SecurityMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.locale.LocaleMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"corsheaders.middleware.CorsMiddleware",
"dockerflow.django.middleware.DockerflowMiddleware",
)
# Swagger
SWAGGER_SETTINGS = {
"SECURITY_DEFINITIONS": {
"Bearer": {"type": "apiKey", "name": "Authorization", "in": "header"},
}
}
# Django applications from the highest priority to the lowest
INSTALLED_APPS = (
# magnify stuff
"magnify.apps.core",
"magnify",
# Third party apps
"corsheaders",
"dockerflow.django",
"parler",
"rest_framework",
"drf_yasg",
# Django
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.admin",
"django.contrib.sites",
"django.contrib.sitemaps",
"django.contrib.staticfiles",
"django.contrib.messages",
"django.contrib.humanize",
)
# Languages
# - Django
LANGUAGE_CODE = values.Value("en")
# Careful! Languages should be ordered by priority, as this tuple is used to get
# fallback/default languages throughout the app.
# Use "en" as default as it is the language that is most likely to be spoken by any visitor
# when their preferred language, whatever it is, is unavailable
LANGUAGES = (("en", _("English")), ("fr", _("French")))
# Logging
LOGGING = {
"version": 1,
"disable_existing_loggers": True,
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
}
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
}
},
"loggers": {
"django.db.backends": {
"level": "ERROR",
"handlers": ["console"],
"propagate": False,
}
},
}
# Cache
CACHES = {
"default": {
"BACKEND": values.Value(
"django.core.cache.backends.locmem.LocMemCache",
environ_name="CACHE_DEFAULT_BACKEND",
environ_prefix=None,
),
"LOCATION": values.Value(
"", environ_name="CACHE_DEFAULT_LOCATION", environ_prefix=None
),
"OPTIONS": values.DictValue(
{}, environ_name="CACHE_DEFAULT_OPTIONS", environ_prefix=None
),
},
}
# Sentry
SENTRY_DSN = values.Value(None, environ_name="SENTRY_DSN")
@classmethod
def _get_environment(cls):
"""Environment in which the application is launched."""
return cls.__name__.lower()
# pylint: disable=invalid-name
@property
def ENVIRONMENT(self):
"""Environment in which the application is launched."""
return self._get_environment()
# pylint: disable=invalid-name
@property
def RELEASE(self):
"""
Return the release information.
Delegate to the module function to enable easier testing.
"""
return get_release()
@classmethod
def post_setup(cls):
"""Post setup configuration.
This is the place where you can configure settings that require other
settings to be loaded.
"""
super().post_setup()
# The SENTRY_DSN setting should be available to activate sentry for an environment
if cls.SENTRY_DSN is not None:
s |
class Build(Base):
"""Build environment settings"""
SECRET_KEY = "ThisIsAnExampleKeyForBuildPurposeOnly" # nosec
JWT_JITSI_SECRET_KEY = "ThisIsAnExampleKeyForBuildPurposeOnly" # nosec
STORAGES = {
"staticfiles": {
"BACKEND": str(
values.Value("whitenoise.storage.CompressedManifestStaticFilesStorage")
),
},
}
class Development(Base):
"""
Development environment settings
We set DEBUG to True and configure the server to respond from all hosts.
"""
DEBUG = True
ALLOWED_HOSTS = ["*"]
CORS_ALLOW_ALL_ORIGINS = True
CSRF_TRUSTED_ORIGINS = ["http://localhost:8071"]
@classmethod
def post_setup(cls):
"""Post setup configuration.
Activate local Keycloak as authentication backend for development.
"""
super().post_setup()
cls.REST_FRAMEWORK["DEFAULT_AUTHENTICATION_CLASSES"] = (
"magnify.apps.core.authentication.DelegatedJWTAuthentication",
"rest_framework.authentication.SessionAuthentication",
)
class Test(Base):
"""Test environment settings"""
JITSI_CONFIGURATION = {
"jitsi_domain": "meeting.education",
"jitsi_guest_avatar": "",
"jitsi_guest_default_password": "default",
"jitsi_guest_username": "guest",
"jitsi_xmpp_domain": "meet.jitsi",
"jitsi_secret_key": "ThisIsAnExampleKeyForTestPurposeOnly",
"jitsi_app_id": "app_id",
"jitsi_token_expiration_seconds": 300,
}
SIMPLE_JWT = {
"USER_ID_FIELD": "jwt_sub",
"USER_ID_CLAIM": "sub",
}
class ContinuousIntegration(Test):
"""
Continuous Integration environment settings
nota bene: it should inherit from the Test environment.
"""
ALLOWED_HOSTS = ["*"]
CORS_ALLOW_ALL_ORIGINS = True
CSRF_TRUSTED_ORIGINS = ["http://localhost:8070"]
class Production(Base):
"""Production environment settings
You must define the DJANGO_ALLOWED_HOSTS and DJANGO_SECRET_KEY environment
variables in Production configuration (and derived configurations):
DJANGO_ALLOWED_HOSTS="foo.com,foo.fr"
DJANGO_SECRET_KEY="your-secret-key"
"""
# Security
SECRET_KEY = values.SecretValue()
ALLOWED_HOSTS = values.ListValue([])
CSRF_COOKIE_SECURE = True
SECURE_BROWSER_XSS_FILTER = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SESSION_COOKIE_SECURE = True
# For static files in production, we want to use a backend that includes a hash in
# the filename, that is calculated from the file content, so that browsers always
# get the updated version of each file.
STORAGES = {
"staticfiles": {
"BACKEND": str(
values.Value("whitenoise.storage.CompressedManifestStaticFilesStorage")
),
},
}
class Feature(Production):
"""
Feature environment settings
nota bene: it should inherit from the Production environment.
"""
class Staging(Production):
"""
Staging environment settings
nota bene: it should inherit from the Production environment.
"""
class PreProduction(Production):
"""
Pre-production environment settings
nota bene: it should inherit from the Production environment.
"""
| entry_sdk.init( # pylint: disable=abstract-class-instantiated
dsn=cls.SENTRY_DSN,
environment=cls._get_environment(),
release=get_release(),
integrations=[DjangoIntegration()],
)
with sentry_sdk.configure_scope() as scope:
scope.set_extra("application", "backend")
| conditional_block |
settings.py | """
Django settings for magnify project.
"""
import json
import os
from django.utils.translation import gettext_lazy as _
# pylint: disable=ungrouped-imports
import sentry_sdk
from configurations import Configuration, values
from sentry_sdk.integrations.django import DjangoIntegration
from magnify.apps.core.settings.mixins import MagnifyCoreConfigurationMixin
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join("/", "data")
def get_release():
"""Get the current release of the application.
By release, we mean the release from the version.json file à la Mozilla [1]
(if any). If this file has not been found, it defaults to "NA".
[1]
https://github.com/mozilla-services/Dockerflow/blob/master/docs/version_object.md
"""
# Try to get the current release from the version.json file generated by the
# CI during the Docker image build
try:
with open(os.path.join(BASE_DIR, "version.json"), encoding="utf8") as version:
return json.load(version)["version"]
except FileNotFoundError:
return "NA" # Default: not available
class Base(MagnifyCoreConfigurationMixin, Configuration):
" |
class Build(Base):
"""Build environment settings"""
SECRET_KEY = "ThisIsAnExampleKeyForBuildPurposeOnly" # nosec
JWT_JITSI_SECRET_KEY = "ThisIsAnExampleKeyForBuildPurposeOnly" # nosec
STORAGES = {
"staticfiles": {
"BACKEND": str(
values.Value("whitenoise.storage.CompressedManifestStaticFilesStorage")
),
},
}
class Development(Base):
"""
Development environment settings
We set DEBUG to True and configure the server to respond from all hosts.
"""
DEBUG = True
ALLOWED_HOSTS = ["*"]
CORS_ALLOW_ALL_ORIGINS = True
CSRF_TRUSTED_ORIGINS = ["http://localhost:8071"]
@classmethod
def post_setup(cls):
"""Post setup configuration.
Activate local Keycloak as authentication backend for development.
"""
super().post_setup()
cls.REST_FRAMEWORK["DEFAULT_AUTHENTICATION_CLASSES"] = (
"magnify.apps.core.authentication.DelegatedJWTAuthentication",
"rest_framework.authentication.SessionAuthentication",
)
class Test(Base):
"""Test environment settings"""
JITSI_CONFIGURATION = {
"jitsi_domain": "meeting.education",
"jitsi_guest_avatar": "",
"jitsi_guest_default_password": "default",
"jitsi_guest_username": "guest",
"jitsi_xmpp_domain": "meet.jitsi",
"jitsi_secret_key": "ThisIsAnExampleKeyForTestPurposeOnly",
"jitsi_app_id": "app_id",
"jitsi_token_expiration_seconds": 300,
}
SIMPLE_JWT = {
"USER_ID_FIELD": "jwt_sub",
"USER_ID_CLAIM": "sub",
}
class ContinuousIntegration(Test):
"""
Continuous Integration environment settings
nota bene: it should inherit from the Test environment.
"""
ALLOWED_HOSTS = ["*"]
CORS_ALLOW_ALL_ORIGINS = True
CSRF_TRUSTED_ORIGINS = ["http://localhost:8070"]
class Production(Base):
"""Production environment settings
You must define the DJANGO_ALLOWED_HOSTS and DJANGO_SECRET_KEY environment
variables in Production configuration (and derived configurations):
DJANGO_ALLOWED_HOSTS="foo.com,foo.fr"
DJANGO_SECRET_KEY="your-secret-key"
"""
# Security
SECRET_KEY = values.SecretValue()
ALLOWED_HOSTS = values.ListValue([])
CSRF_COOKIE_SECURE = True
SECURE_BROWSER_XSS_FILTER = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SESSION_COOKIE_SECURE = True
# For static files in production, we want to use a backend that includes a hash in
# the filename, that is calculated from the file content, so that browsers always
# get the updated version of each file.
STORAGES = {
"staticfiles": {
"BACKEND": str(
values.Value("whitenoise.storage.CompressedManifestStaticFilesStorage")
),
},
}
class Feature(Production):
"""
Feature environment settings
nota bene: it should inherit from the Production environment.
"""
class Staging(Production):
"""
Staging environment settings
nota bene: it should inherit from the Production environment.
"""
class PreProduction(Production):
"""
Pre-production environment settings
nota bene: it should inherit from the Production environment.
"""
| ""
This is the base configuration every configuration (aka environnement) should inherit from. It
is recommended to configure third-party applications by creating a configuration mixins in
./configurations and compose the Base configuration with those mixins.
It depends on an environment variable that SHOULD be defined:
* DJANGO_SECRET_KEY
You may also want to override default configuration by setting the following environment
variables:
* DJANGO_SENTRY_DSN
* magnify_ES_HOST
* DB_NAME
* DB_HOST
* DB_PASSWORD
* DB_USER
"""
DEBUG = False
SITE_ID = 1
# Security
ALLOWED_HOSTS = []
CSRF_TRUSTED_ORIGINS = values.ListValue([])
SECRET_KEY = values.Value(None)
# CORS headers
CORS_ALLOWED_ORIGINS = values.ListValue([])
# System check reference:
# https://docs.djangoproject.com/en/2.2/ref/checks/#security
SILENCED_SYSTEM_CHECKS = values.ListValue(
[
# Allow the X_FRAME_OPTIONS to be set to "SAMEORIGIN"
"security.W019"
]
)
REST_FRAMEWORK = {
"ALLOWED_VERSIONS": ("1.0",),
"DEFAULT_AUTHENTICATION_CLASSES": values.ListValue(
["rest_framework_simplejwt.authentication.JWTAuthentication"],
environ_name="DRF_DEFAULT_AUTHENTICATION_CLASSES",
environ_prefix=None,
),
"DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.PageNumberPagination",
"PAGE_SIZE": 100,
"DEFAULT_VERSION": "1.0",
"DEFAULT_VERSIONING_CLASS": "rest_framework.versioning.URLPathVersioning",
"EXCEPTION_HANDLER": "magnify.apps.core.api.exception_handler",
}
# Frontend
FRONTEND_CONFIGURATION = {
"API_URL": values.Value(environ_name="MAGNIFY_API_URL", environ_prefix=None),
"SHOW_REGISTER_LINK": values.BooleanValue(
True, environ_name="MAGNIFY_SHOW_REGISTER_LINK", environ_prefix=None
),
"KEYCLOAK_URL": values.Value(environ_name="KEYCLOAK_URL", environ_prefix=None),
"KEYCLOAK_REALM": values.Value(
"magnify", environ_name="KEYCLOAK_REALM", environ_prefix=None
),
"KEYCLOAK_CLIENT_ID": values.Value(
"magnify-front", environ_name="KEYCLOAK_CLIENT_ID", environ_prefix=None
),
"KEYCLOAK_EXPIRATION_SECONDS": values.IntegerValue(
30 * 60,
environ_name="KEYCLOAK_EXPIRATION_SECONDS",
environ_prefix=None,
),
}
# Application definition
ROOT_URLCONF = "urls"
WSGI_APPLICATION = "wsgi.application"
AUTH_USER_MODEL = "core.User"
JITSI_CONFIGURATION = {
"jitsi_domain": values.Value(environ_name="JITSI_DOMAIN", environ_prefix=None),
"jitsi_app_id": values.Value(environ_name="JITSI_APP_ID", environ_prefix=None),
"jitsi_secret_key": values.Value(
environ_name="JITSI_SECRET_KEY", environ_prefix=None
),
"jitsi_xmpp_domain": values.Value(
environ_name="JITSI_XMPP_DOMAIN", environ_prefix=None
),
"jitsi_guest_avatar": values.Value(
"", environ_name="JITSI_GUEST_AVATAR", environ_prefix=None
),
"jitsi_guest_username": values.Value(
"Guest", environ_name="JITSI_GUEST_USERNAME", environ_prefix=None
),
"jitsi_token_expiration_seconds": values.Value(
300, environ_name="JITSI_TOKEN_EXPIRATION_SECONDS", environ_prefix=None
),
}
JITSI_ROOM_PREFIX = values.Value(
"", environ_name="MAGNIFY_JITSI_ROOM_PREFIX", environ_prefix=None
)
DEFAULT_ROOM_IS_PUBLIC = values.BooleanValue(
True, environ_name="MAGNIFY_DEFAULT_ROOM_IS_PUBLIC", environ_prefix=None
)
ALLOW_UNREGISTERED_ROOMS = values.BooleanValue(
True, environ_name="MAGNIFY_ALLOW_UNREGISTERED_ROOMS", environ_prefix=None
)
ALLOW_API_USER_CREATE = values.BooleanValue(
False, environ_name="MAGNIFY_ALLOW_API_USER_CREATE", environ_prefix=None
)
# Database
DATABASES = {
"default": {
"ENGINE": values.Value(
"django.db.backends.postgresql_psycopg2",
environ_name="DB_ENGINE",
environ_prefix=None,
),
"NAME": values.Value(
"magnify", environ_name="DB_NAME", environ_prefix=None
),
"USER": values.Value(
"magnify", environ_name="DB_USER", environ_prefix=None
),
"PASSWORD": values.Value(environ_name="DB_PASSWORD", environ_prefix=None),
"HOST": values.Value(
"postgresql", environ_name="DB_HOST", environ_prefix=None
),
"PORT": values.Value(5432, environ_name="DB_PORT", environ_prefix=None),
}
}
DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
MIGRATION_MODULES = {}
# Static files (CSS, JavaScript, Images)
STATIC_URL = "/static/"
MEDIA_URL = "/media/"
MEDIA_ROOT = os.path.join(DATA_DIR, "media")
STATIC_ROOT = os.path.join(DATA_DIR, "static")
# Simple JWT
SIMPLE_JWT = {
"ALGORITHM": values.Value(
"RS256", environ_name="MAGNIFY_JWT_ALGORITHM", environ_prefix=None
),
"JWK_URL": values.Value(
None, environ_name="MAGNIFY_JWT_JWK_URL", environ_prefix=None
),
"SIGNING_KEY": values.Value(
None, environ_name="MAGNIFY_JWT_SIGNING_KEY", environ_prefix=None
),
"VERIFYING_KEY": values.Value(
None, environ_name="MAGNIFY_JWT_VERIFYING_KEY", environ_prefix=None
),
"AUTH_HEADER_TYPES": ("Bearer",),
"AUTH_HEADER_NAME": "HTTP_AUTHORIZATION",
"TOKEN_TYPE_CLAIM": "typ",
"USER_ID_FIELD": "jwt_sub",
"USER_ID_CLAIM": "sub",
"AUTH_TOKEN_CLASSES": ("magnify.apps.core.tokens.BearerToken",),
}
JWT_USER_FIELDS_SYNC = values.DictValue(
{
"email": "email",
"name": "name",
"username": "preferred_username",
},
environ_name="MAGNIFY_JWT_USER_FIELDS_SYNC",
environ_prefix=None,
)
JWT_USER_DEVICE_AUDIENCES = values.ListValue(
[],
environ_name="MAGNIFY_JWT_USER_DEVICE_AUDIENCES",
environ_prefix=None,
)
USERNAME_REGEX = values.Value(
r"^[a-z0-9_.-]+$",
environ_name="MAGNIFY_USERNAME_REGEX",
environ_prefix=None,
)
# Login/registration related settings
LOGIN_REDIRECT_URL = "/"
LOGOUT_REDIRECT_URL = "/"
AUTHENTICATION_BACKENDS = ("django.contrib.auth.backends.ModelBackend",)
# Internationalization
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Templates
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(BASE_DIR, "templates")],
"OPTIONS": {
"context_processors": [
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"django.template.context_processors.i18n",
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.template.context_processors.media",
"django.template.context_processors.csrf",
"django.template.context_processors.tz",
"django.template.context_processors.static",
],
"loaders": [
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
},
}
]
MIDDLEWARE = (
"django.middleware.security.SecurityMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.locale.LocaleMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"corsheaders.middleware.CorsMiddleware",
"dockerflow.django.middleware.DockerflowMiddleware",
)
# Swagger
SWAGGER_SETTINGS = {
"SECURITY_DEFINITIONS": {
"Bearer": {"type": "apiKey", "name": "Authorization", "in": "header"},
}
}
# Django applications from the highest priority to the lowest
INSTALLED_APPS = (
# magnify stuff
"magnify.apps.core",
"magnify",
# Third party apps
"corsheaders",
"dockerflow.django",
"parler",
"rest_framework",
"drf_yasg",
# Django
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.admin",
"django.contrib.sites",
"django.contrib.sitemaps",
"django.contrib.staticfiles",
"django.contrib.messages",
"django.contrib.humanize",
)
# Languages
# - Django
LANGUAGE_CODE = values.Value("en")
# Careful! Languages should be ordered by priority, as this tuple is used to get
# fallback/default languages throughout the app.
# Use "en" as default as it is the language that is most likely to be spoken by any visitor
# when their preferred language, whatever it is, is unavailable
LANGUAGES = (("en", _("English")), ("fr", _("French")))
# Logging
LOGGING = {
"version": 1,
"disable_existing_loggers": True,
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
}
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
}
},
"loggers": {
"django.db.backends": {
"level": "ERROR",
"handlers": ["console"],
"propagate": False,
}
},
}
# Cache
CACHES = {
"default": {
"BACKEND": values.Value(
"django.core.cache.backends.locmem.LocMemCache",
environ_name="CACHE_DEFAULT_BACKEND",
environ_prefix=None,
),
"LOCATION": values.Value(
"", environ_name="CACHE_DEFAULT_LOCATION", environ_prefix=None
),
"OPTIONS": values.DictValue(
{}, environ_name="CACHE_DEFAULT_OPTIONS", environ_prefix=None
),
},
}
# Sentry
SENTRY_DSN = values.Value(None, environ_name="SENTRY_DSN")
@classmethod
def _get_environment(cls):
"""Environment in which the application is launched."""
return cls.__name__.lower()
# pylint: disable=invalid-name
@property
def ENVIRONMENT(self):
"""Environment in which the application is launched."""
return self._get_environment()
# pylint: disable=invalid-name
@property
def RELEASE(self):
"""
Return the release information.
Delegate to the module function to enable easier testing.
"""
return get_release()
@classmethod
def post_setup(cls):
"""Post setup configuration.
This is the place where you can configure settings that require other
settings to be loaded.
"""
super().post_setup()
# The SENTRY_DSN setting should be available to activate sentry for an environment
if cls.SENTRY_DSN is not None:
sentry_sdk.init( # pylint: disable=abstract-class-instantiated
dsn=cls.SENTRY_DSN,
environment=cls._get_environment(),
release=get_release(),
integrations=[DjangoIntegration()],
)
with sentry_sdk.configure_scope() as scope:
scope.set_extra("application", "backend")
| identifier_body |
settings.py | """
Django settings for magnify project.
"""
import json
import os
from django.utils.translation import gettext_lazy as _
# pylint: disable=ungrouped-imports
import sentry_sdk
from configurations import Configuration, values
from sentry_sdk.integrations.django import DjangoIntegration
from magnify.apps.core.settings.mixins import MagnifyCoreConfigurationMixin
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join("/", "data")
def get_release():
"""Get the current release of the application.
By release, we mean the release from the version.json file à la Mozilla [1]
(if any). If this file has not been found, it defaults to "NA".
[1]
https://github.com/mozilla-services/Dockerflow/blob/master/docs/version_object.md
"""
# Try to get the current release from the version.json file generated by the
# CI during the Docker image build
try:
with open(os.path.join(BASE_DIR, "version.json"), encoding="utf8") as version:
return json.load(version)["version"]
except FileNotFoundError:
return "NA" # Default: not available
class Base(MagnifyCoreConfigurationMixin, Configuration):
"""
This is the base configuration every configuration (aka environnement) should inherit from. It
is recommended to configure third-party applications by creating a configuration mixins in
./configurations and compose the Base configuration with those mixins.
It depends on an environment variable that SHOULD be defined:
* DJANGO_SECRET_KEY
You may also want to override default configuration by setting the following environment
variables:
* DJANGO_SENTRY_DSN
* magnify_ES_HOST
* DB_NAME
* DB_HOST
* DB_PASSWORD
* DB_USER
"""
DEBUG = False
SITE_ID = 1
# Security
ALLOWED_HOSTS = []
CSRF_TRUSTED_ORIGINS = values.ListValue([])
SECRET_KEY = values.Value(None)
# CORS headers
CORS_ALLOWED_ORIGINS = values.ListValue([])
# System check reference:
# https://docs.djangoproject.com/en/2.2/ref/checks/#security
SILENCED_SYSTEM_CHECKS = values.ListValue(
[
# Allow the X_FRAME_OPTIONS to be set to "SAMEORIGIN"
"security.W019"
]
)
REST_FRAMEWORK = {
"ALLOWED_VERSIONS": ("1.0",),
"DEFAULT_AUTHENTICATION_CLASSES": values.ListValue(
["rest_framework_simplejwt.authentication.JWTAuthentication"],
environ_name="DRF_DEFAULT_AUTHENTICATION_CLASSES",
environ_prefix=None,
),
"DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.PageNumberPagination",
"PAGE_SIZE": 100,
"DEFAULT_VERSION": "1.0",
"DEFAULT_VERSIONING_CLASS": "rest_framework.versioning.URLPathVersioning",
"EXCEPTION_HANDLER": "magnify.apps.core.api.exception_handler",
}
# Frontend
FRONTEND_CONFIGURATION = {
"API_URL": values.Value(environ_name="MAGNIFY_API_URL", environ_prefix=None),
"SHOW_REGISTER_LINK": values.BooleanValue(
True, environ_name="MAGNIFY_SHOW_REGISTER_LINK", environ_prefix=None
),
"KEYCLOAK_URL": values.Value(environ_name="KEYCLOAK_URL", environ_prefix=None),
"KEYCLOAK_REALM": values.Value(
"magnify", environ_name="KEYCLOAK_REALM", environ_prefix=None
),
"KEYCLOAK_CLIENT_ID": values.Value(
"magnify-front", environ_name="KEYCLOAK_CLIENT_ID", environ_prefix=None
),
"KEYCLOAK_EXPIRATION_SECONDS": values.IntegerValue(
30 * 60,
environ_name="KEYCLOAK_EXPIRATION_SECONDS",
environ_prefix=None,
),
}
# Application definition
ROOT_URLCONF = "urls"
WSGI_APPLICATION = "wsgi.application"
AUTH_USER_MODEL = "core.User"
JITSI_CONFIGURATION = {
"jitsi_domain": values.Value(environ_name="JITSI_DOMAIN", environ_prefix=None),
"jitsi_app_id": values.Value(environ_name="JITSI_APP_ID", environ_prefix=None),
"jitsi_secret_key": values.Value(
environ_name="JITSI_SECRET_KEY", environ_prefix=None
),
"jitsi_xmpp_domain": values.Value(
environ_name="JITSI_XMPP_DOMAIN", environ_prefix=None
),
"jitsi_guest_avatar": values.Value(
"", environ_name="JITSI_GUEST_AVATAR", environ_prefix=None
),
"jitsi_guest_username": values.Value(
"Guest", environ_name="JITSI_GUEST_USERNAME", environ_prefix=None
),
"jitsi_token_expiration_seconds": values.Value(
300, environ_name="JITSI_TOKEN_EXPIRATION_SECONDS", environ_prefix=None
),
}
JITSI_ROOM_PREFIX = values.Value(
"", environ_name="MAGNIFY_JITSI_ROOM_PREFIX", environ_prefix=None
)
DEFAULT_ROOM_IS_PUBLIC = values.BooleanValue(
True, environ_name="MAGNIFY_DEFAULT_ROOM_IS_PUBLIC", environ_prefix=None
)
ALLOW_UNREGISTERED_ROOMS = values.BooleanValue(
True, environ_name="MAGNIFY_ALLOW_UNREGISTERED_ROOMS", environ_prefix=None
)
ALLOW_API_USER_CREATE = values.BooleanValue(
False, environ_name="MAGNIFY_ALLOW_API_USER_CREATE", environ_prefix=None
)
# Database
DATABASES = {
"default": {
"ENGINE": values.Value(
"django.db.backends.postgresql_psycopg2",
environ_name="DB_ENGINE",
environ_prefix=None,
),
"NAME": values.Value(
"magnify", environ_name="DB_NAME", environ_prefix=None
),
"USER": values.Value(
"magnify", environ_name="DB_USER", environ_prefix=None
),
"PASSWORD": values.Value(environ_name="DB_PASSWORD", environ_prefix=None),
"HOST": values.Value(
"postgresql", environ_name="DB_HOST", environ_prefix=None
),
"PORT": values.Value(5432, environ_name="DB_PORT", environ_prefix=None),
}
}
DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
MIGRATION_MODULES = {}
# Static files (CSS, JavaScript, Images)
STATIC_URL = "/static/"
MEDIA_URL = "/media/"
MEDIA_ROOT = os.path.join(DATA_DIR, "media")
STATIC_ROOT = os.path.join(DATA_DIR, "static")
# Simple JWT
SIMPLE_JWT = {
"ALGORITHM": values.Value(
"RS256", environ_name="MAGNIFY_JWT_ALGORITHM", environ_prefix=None
),
"JWK_URL": values.Value(
None, environ_name="MAGNIFY_JWT_JWK_URL", environ_prefix=None
),
"SIGNING_KEY": values.Value(
None, environ_name="MAGNIFY_JWT_SIGNING_KEY", environ_prefix=None
),
"VERIFYING_KEY": values.Value(
None, environ_name="MAGNIFY_JWT_VERIFYING_KEY", environ_prefix=None
),
"AUTH_HEADER_TYPES": ("Bearer",),
"AUTH_HEADER_NAME": "HTTP_AUTHORIZATION",
"TOKEN_TYPE_CLAIM": "typ",
"USER_ID_FIELD": "jwt_sub",
"USER_ID_CLAIM": "sub",
"AUTH_TOKEN_CLASSES": ("magnify.apps.core.tokens.BearerToken",),
}
JWT_USER_FIELDS_SYNC = values.DictValue(
{
"email": "email",
"name": "name",
"username": "preferred_username",
},
environ_name="MAGNIFY_JWT_USER_FIELDS_SYNC",
environ_prefix=None,
)
JWT_USER_DEVICE_AUDIENCES = values.ListValue(
[],
environ_name="MAGNIFY_JWT_USER_DEVICE_AUDIENCES",
environ_prefix=None,
)
USERNAME_REGEX = values.Value(
r"^[a-z0-9_.-]+$",
environ_name="MAGNIFY_USERNAME_REGEX",
environ_prefix=None,
)
# Login/registration related settings
LOGIN_REDIRECT_URL = "/"
LOGOUT_REDIRECT_URL = "/"
AUTHENTICATION_BACKENDS = ("django.contrib.auth.backends.ModelBackend",)
# Internationalization
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Templates
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(BASE_DIR, "templates")],
"OPTIONS": {
"context_processors": [
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"django.template.context_processors.i18n",
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.template.context_processors.media",
"django.template.context_processors.csrf",
"django.template.context_processors.tz",
"django.template.context_processors.static",
],
"loaders": [
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
},
}
]
MIDDLEWARE = (
"django.middleware.security.SecurityMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.locale.LocaleMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"corsheaders.middleware.CorsMiddleware",
"dockerflow.django.middleware.DockerflowMiddleware",
)
# Swagger
SWAGGER_SETTINGS = {
"SECURITY_DEFINITIONS": {
"Bearer": {"type": "apiKey", "name": "Authorization", "in": "header"},
}
}
# Django applications from the highest priority to the lowest
INSTALLED_APPS = (
# magnify stuff
"magnify.apps.core",
"magnify",
# Third party apps
"corsheaders",
"dockerflow.django",
"parler",
"rest_framework",
"drf_yasg",
# Django
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.admin",
"django.contrib.sites",
"django.contrib.sitemaps",
"django.contrib.staticfiles",
"django.contrib.messages",
"django.contrib.humanize",
)
# Languages
# - Django
LANGUAGE_CODE = values.Value("en")
# Careful! Languages should be ordered by priority, as this tuple is used to get
# fallback/default languages throughout the app.
# Use "en" as default as it is the language that is most likely to be spoken by any visitor
# when their preferred language, whatever it is, is unavailable
LANGUAGES = (("en", _("English")), ("fr", _("French")))
# Logging
LOGGING = {
"version": 1,
"disable_existing_loggers": True,
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
}
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
}
},
"loggers": {
"django.db.backends": {
"level": "ERROR",
"handlers": ["console"],
"propagate": False,
}
},
}
# Cache
CACHES = {
"default": {
"BACKEND": values.Value(
"django.core.cache.backends.locmem.LocMemCache",
environ_name="CACHE_DEFAULT_BACKEND",
environ_prefix=None,
),
"LOCATION": values.Value(
"", environ_name="CACHE_DEFAULT_LOCATION", environ_prefix=None
),
"OPTIONS": values.DictValue(
{}, environ_name="CACHE_DEFAULT_OPTIONS", environ_prefix=None
),
},
}
# Sentry
SENTRY_DSN = values.Value(None, environ_name="SENTRY_DSN")
@classmethod
def _get_environment(cls):
"""Environment in which the application is launched."""
return cls.__name__.lower()
# pylint: disable=invalid-name
@property
def ENVIRONMENT(self):
"""Environment in which the application is launched."""
return self._get_environment()
# pylint: disable=invalid-name
@property
def RELEASE(self):
"""
Return the release information.
Delegate to the module function to enable easier testing.
"""
return get_release()
@classmethod
def post_setup(cls):
"""Post setup configuration.
This is the place where you can configure settings that require other
settings to be loaded.
"""
super().post_setup()
# The SENTRY_DSN setting should be available to activate sentry for an environment
if cls.SENTRY_DSN is not None:
sentry_sdk.init( # pylint: disable=abstract-class-instantiated
dsn=cls.SENTRY_DSN,
environment=cls._get_environment(),
release=get_release(),
integrations=[DjangoIntegration()],
)
with sentry_sdk.configure_scope() as scope:
scope.set_extra("application", "backend")
class Build(Base):
"""Build environment settings"""
SECRET_KEY = "ThisIsAnExampleKeyForBuildPurposeOnly" # nosec
JWT_JITSI_SECRET_KEY = "ThisIsAnExampleKeyForBuildPurposeOnly" # nosec
STORAGES = {
"staticfiles": {
"BACKEND": str(
values.Value("whitenoise.storage.CompressedManifestStaticFilesStorage")
),
},
}
class D | Base):
"""
Development environment settings
We set DEBUG to True and configure the server to respond from all hosts.
"""
DEBUG = True
ALLOWED_HOSTS = ["*"]
CORS_ALLOW_ALL_ORIGINS = True
CSRF_TRUSTED_ORIGINS = ["http://localhost:8071"]
@classmethod
def post_setup(cls):
"""Post setup configuration.
Activate local Keycloak as authentication backend for development.
"""
super().post_setup()
cls.REST_FRAMEWORK["DEFAULT_AUTHENTICATION_CLASSES"] = (
"magnify.apps.core.authentication.DelegatedJWTAuthentication",
"rest_framework.authentication.SessionAuthentication",
)
class Test(Base):
"""Test environment settings"""
JITSI_CONFIGURATION = {
"jitsi_domain": "meeting.education",
"jitsi_guest_avatar": "",
"jitsi_guest_default_password": "default",
"jitsi_guest_username": "guest",
"jitsi_xmpp_domain": "meet.jitsi",
"jitsi_secret_key": "ThisIsAnExampleKeyForTestPurposeOnly",
"jitsi_app_id": "app_id",
"jitsi_token_expiration_seconds": 300,
}
SIMPLE_JWT = {
"USER_ID_FIELD": "jwt_sub",
"USER_ID_CLAIM": "sub",
}
class ContinuousIntegration(Test):
"""
Continuous Integration environment settings
nota bene: it should inherit from the Test environment.
"""
ALLOWED_HOSTS = ["*"]
CORS_ALLOW_ALL_ORIGINS = True
CSRF_TRUSTED_ORIGINS = ["http://localhost:8070"]
class Production(Base):
"""Production environment settings
You must define the DJANGO_ALLOWED_HOSTS and DJANGO_SECRET_KEY environment
variables in Production configuration (and derived configurations):
DJANGO_ALLOWED_HOSTS="foo.com,foo.fr"
DJANGO_SECRET_KEY="your-secret-key"
"""
# Security
SECRET_KEY = values.SecretValue()
ALLOWED_HOSTS = values.ListValue([])
CSRF_COOKIE_SECURE = True
SECURE_BROWSER_XSS_FILTER = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SESSION_COOKIE_SECURE = True
# For static files in production, we want to use a backend that includes a hash in
# the filename, that is calculated from the file content, so that browsers always
# get the updated version of each file.
STORAGES = {
"staticfiles": {
"BACKEND": str(
values.Value("whitenoise.storage.CompressedManifestStaticFilesStorage")
),
},
}
class Feature(Production):
"""
Feature environment settings
nota bene: it should inherit from the Production environment.
"""
class Staging(Production):
"""
Staging environment settings
nota bene: it should inherit from the Production environment.
"""
class PreProduction(Production):
"""
Pre-production environment settings
nota bene: it should inherit from the Production environment.
"""
| evelopment( | identifier_name |
settings.py | """
Django settings for magnify project.
"""
import json
import os
from django.utils.translation import gettext_lazy as _
# pylint: disable=ungrouped-imports
import sentry_sdk
from configurations import Configuration, values
from sentry_sdk.integrations.django import DjangoIntegration
from magnify.apps.core.settings.mixins import MagnifyCoreConfigurationMixin
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join("/", "data")
def get_release():
"""Get the current release of the application.
By release, we mean the release from the version.json file à la Mozilla [1]
(if any). If this file has not been found, it defaults to "NA".
[1]
https://github.com/mozilla-services/Dockerflow/blob/master/docs/version_object.md
"""
# Try to get the current release from the version.json file generated by the
# CI during the Docker image build
try:
with open(os.path.join(BASE_DIR, "version.json"), encoding="utf8") as version:
return json.load(version)["version"]
except FileNotFoundError:
return "NA" # Default: not available
class Base(MagnifyCoreConfigurationMixin, Configuration):
"""
This is the base configuration every configuration (aka environnement) should inherit from. It
is recommended to configure third-party applications by creating a configuration mixins in
./configurations and compose the Base configuration with those mixins.
It depends on an environment variable that SHOULD be defined:
* DJANGO_SECRET_KEY
You may also want to override default configuration by setting the following environment
variables:
* DJANGO_SENTRY_DSN
* magnify_ES_HOST
* DB_NAME
* DB_HOST
* DB_PASSWORD
* DB_USER
"""
DEBUG = False
SITE_ID = 1
# Security
ALLOWED_HOSTS = []
CSRF_TRUSTED_ORIGINS = values.ListValue([])
SECRET_KEY = values.Value(None)
# CORS headers
CORS_ALLOWED_ORIGINS = values.ListValue([])
# System check reference:
# https://docs.djangoproject.com/en/2.2/ref/checks/#security
SILENCED_SYSTEM_CHECKS = values.ListValue(
[
# Allow the X_FRAME_OPTIONS to be set to "SAMEORIGIN"
"security.W019"
]
)
REST_FRAMEWORK = {
"ALLOWED_VERSIONS": ("1.0",),
"DEFAULT_AUTHENTICATION_CLASSES": values.ListValue(
["rest_framework_simplejwt.authentication.JWTAuthentication"],
environ_name="DRF_DEFAULT_AUTHENTICATION_CLASSES",
environ_prefix=None,
),
"DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.PageNumberPagination",
"PAGE_SIZE": 100,
"DEFAULT_VERSION": "1.0",
"DEFAULT_VERSIONING_CLASS": "rest_framework.versioning.URLPathVersioning",
"EXCEPTION_HANDLER": "magnify.apps.core.api.exception_handler",
}
# Frontend
FRONTEND_CONFIGURATION = {
"API_URL": values.Value(environ_name="MAGNIFY_API_URL", environ_prefix=None),
"SHOW_REGISTER_LINK": values.BooleanValue(
True, environ_name="MAGNIFY_SHOW_REGISTER_LINK", environ_prefix=None
),
"KEYCLOAK_URL": values.Value(environ_name="KEYCLOAK_URL", environ_prefix=None),
"KEYCLOAK_REALM": values.Value(
"magnify", environ_name="KEYCLOAK_REALM", environ_prefix=None
),
"KEYCLOAK_CLIENT_ID": values.Value(
"magnify-front", environ_name="KEYCLOAK_CLIENT_ID", environ_prefix=None
),
"KEYCLOAK_EXPIRATION_SECONDS": values.IntegerValue(
30 * 60,
environ_name="KEYCLOAK_EXPIRATION_SECONDS",
environ_prefix=None,
),
}
# Application definition
ROOT_URLCONF = "urls"
WSGI_APPLICATION = "wsgi.application"
AUTH_USER_MODEL = "core.User"
JITSI_CONFIGURATION = {
"jitsi_domain": values.Value(environ_name="JITSI_DOMAIN", environ_prefix=None),
"jitsi_app_id": values.Value(environ_name="JITSI_APP_ID", environ_prefix=None),
"jitsi_secret_key": values.Value(
environ_name="JITSI_SECRET_KEY", environ_prefix=None
),
"jitsi_xmpp_domain": values.Value(
environ_name="JITSI_XMPP_DOMAIN", environ_prefix=None
),
"jitsi_guest_avatar": values.Value(
"", environ_name="JITSI_GUEST_AVATAR", environ_prefix=None
),
"jitsi_guest_username": values.Value(
"Guest", environ_name="JITSI_GUEST_USERNAME", environ_prefix=None
),
"jitsi_token_expiration_seconds": values.Value(
300, environ_name="JITSI_TOKEN_EXPIRATION_SECONDS", environ_prefix=None
),
}
JITSI_ROOM_PREFIX = values.Value(
"", environ_name="MAGNIFY_JITSI_ROOM_PREFIX", environ_prefix=None
)
DEFAULT_ROOM_IS_PUBLIC = values.BooleanValue(
True, environ_name="MAGNIFY_DEFAULT_ROOM_IS_PUBLIC", environ_prefix=None
)
ALLOW_UNREGISTERED_ROOMS = values.BooleanValue(
True, environ_name="MAGNIFY_ALLOW_UNREGISTERED_ROOMS", environ_prefix=None
)
ALLOW_API_USER_CREATE = values.BooleanValue(
False, environ_name="MAGNIFY_ALLOW_API_USER_CREATE", environ_prefix=None
)
# Database
DATABASES = {
"default": {
"ENGINE": values.Value(
"django.db.backends.postgresql_psycopg2",
environ_name="DB_ENGINE",
environ_prefix=None,
),
"NAME": values.Value(
"magnify", environ_name="DB_NAME", environ_prefix=None
),
"USER": values.Value(
"magnify", environ_name="DB_USER", environ_prefix=None
),
"PASSWORD": values.Value(environ_name="DB_PASSWORD", environ_prefix=None),
"HOST": values.Value(
"postgresql", environ_name="DB_HOST", environ_prefix=None
),
"PORT": values.Value(5432, environ_name="DB_PORT", environ_prefix=None),
}
}
DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
MIGRATION_MODULES = {}
# Static files (CSS, JavaScript, Images)
STATIC_URL = "/static/"
MEDIA_URL = "/media/"
MEDIA_ROOT = os.path.join(DATA_DIR, "media")
STATIC_ROOT = os.path.join(DATA_DIR, "static")
# Simple JWT
SIMPLE_JWT = {
"ALGORITHM": values.Value(
"RS256", environ_name="MAGNIFY_JWT_ALGORITHM", environ_prefix=None
),
"JWK_URL": values.Value(
None, environ_name="MAGNIFY_JWT_JWK_URL", environ_prefix=None
),
"SIGNING_KEY": values.Value(
None, environ_name="MAGNIFY_JWT_SIGNING_KEY", environ_prefix=None
),
"VERIFYING_KEY": values.Value(
None, environ_name="MAGNIFY_JWT_VERIFYING_KEY", environ_prefix=None
),
"AUTH_HEADER_TYPES": ("Bearer",),
"AUTH_HEADER_NAME": "HTTP_AUTHORIZATION",
"TOKEN_TYPE_CLAIM": "typ",
"USER_ID_FIELD": "jwt_sub",
"USER_ID_CLAIM": "sub",
"AUTH_TOKEN_CLASSES": ("magnify.apps.core.tokens.BearerToken",),
}
JWT_USER_FIELDS_SYNC = values.DictValue(
{
"email": "email",
"name": "name",
"username": "preferred_username",
},
environ_name="MAGNIFY_JWT_USER_FIELDS_SYNC",
environ_prefix=None,
)
JWT_USER_DEVICE_AUDIENCES = values.ListValue(
[],
environ_name="MAGNIFY_JWT_USER_DEVICE_AUDIENCES",
environ_prefix=None,
)
USERNAME_REGEX = values.Value(
r"^[a-z0-9_.-]+$",
environ_name="MAGNIFY_USERNAME_REGEX",
environ_prefix=None,
)
# Login/registration related settings
LOGIN_REDIRECT_URL = "/"
LOGOUT_REDIRECT_URL = "/"
AUTHENTICATION_BACKENDS = ("django.contrib.auth.backends.ModelBackend",)
# Internationalization
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Templates
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(BASE_DIR, "templates")],
"OPTIONS": {
"context_processors": [
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"django.template.context_processors.i18n",
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.template.context_processors.media",
"django.template.context_processors.csrf",
"django.template.context_processors.tz",
"django.template.context_processors.static",
],
"loaders": [
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
},
}
]
MIDDLEWARE = (
"django.middleware.security.SecurityMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.locale.LocaleMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"corsheaders.middleware.CorsMiddleware",
"dockerflow.django.middleware.DockerflowMiddleware",
)
# Swagger
SWAGGER_SETTINGS = {
"SECURITY_DEFINITIONS": {
"Bearer": {"type": "apiKey", "name": "Authorization", "in": "header"},
}
}
# Django applications from the highest priority to the lowest
INSTALLED_APPS = (
# magnify stuff
"magnify.apps.core",
"magnify",
# Third party apps
"corsheaders",
"dockerflow.django",
"parler",
"rest_framework",
"drf_yasg",
# Django
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.admin",
"django.contrib.sites",
"django.contrib.sitemaps",
"django.contrib.staticfiles",
"django.contrib.messages",
"django.contrib.humanize",
)
# Languages
# - Django
LANGUAGE_CODE = values.Value("en")
# Careful! Languages should be ordered by priority, as this tuple is used to get
# fallback/default languages throughout the app.
# Use "en" as default as it is the language that is most likely to be spoken by any visitor
# when their preferred language, whatever it is, is unavailable
LANGUAGES = (("en", _("English")), ("fr", _("French")))
# Logging
LOGGING = {
"version": 1,
"disable_existing_loggers": True,
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
}
},
"handlers": {
"console": {
"level": "DEBUG", | "formatter": "verbose",
}
},
"loggers": {
"django.db.backends": {
"level": "ERROR",
"handlers": ["console"],
"propagate": False,
}
},
}
# Cache
CACHES = {
"default": {
"BACKEND": values.Value(
"django.core.cache.backends.locmem.LocMemCache",
environ_name="CACHE_DEFAULT_BACKEND",
environ_prefix=None,
),
"LOCATION": values.Value(
"", environ_name="CACHE_DEFAULT_LOCATION", environ_prefix=None
),
"OPTIONS": values.DictValue(
{}, environ_name="CACHE_DEFAULT_OPTIONS", environ_prefix=None
),
},
}
# Sentry
SENTRY_DSN = values.Value(None, environ_name="SENTRY_DSN")
@classmethod
def _get_environment(cls):
"""Environment in which the application is launched."""
return cls.__name__.lower()
# pylint: disable=invalid-name
@property
def ENVIRONMENT(self):
"""Environment in which the application is launched."""
return self._get_environment()
# pylint: disable=invalid-name
@property
def RELEASE(self):
"""
Return the release information.
Delegate to the module function to enable easier testing.
"""
return get_release()
@classmethod
def post_setup(cls):
"""Post setup configuration.
This is the place where you can configure settings that require other
settings to be loaded.
"""
super().post_setup()
# The SENTRY_DSN setting should be available to activate sentry for an environment
if cls.SENTRY_DSN is not None:
sentry_sdk.init( # pylint: disable=abstract-class-instantiated
dsn=cls.SENTRY_DSN,
environment=cls._get_environment(),
release=get_release(),
integrations=[DjangoIntegration()],
)
with sentry_sdk.configure_scope() as scope:
scope.set_extra("application", "backend")
class Build(Base):
"""Build environment settings"""
SECRET_KEY = "ThisIsAnExampleKeyForBuildPurposeOnly" # nosec
JWT_JITSI_SECRET_KEY = "ThisIsAnExampleKeyForBuildPurposeOnly" # nosec
STORAGES = {
"staticfiles": {
"BACKEND": str(
values.Value("whitenoise.storage.CompressedManifestStaticFilesStorage")
),
},
}
class Development(Base):
"""
Development environment settings
We set DEBUG to True and configure the server to respond from all hosts.
"""
DEBUG = True
ALLOWED_HOSTS = ["*"]
CORS_ALLOW_ALL_ORIGINS = True
CSRF_TRUSTED_ORIGINS = ["http://localhost:8071"]
@classmethod
def post_setup(cls):
"""Post setup configuration.
Activate local Keycloak as authentication backend for development.
"""
super().post_setup()
cls.REST_FRAMEWORK["DEFAULT_AUTHENTICATION_CLASSES"] = (
"magnify.apps.core.authentication.DelegatedJWTAuthentication",
"rest_framework.authentication.SessionAuthentication",
)
class Test(Base):
"""Test environment settings"""
JITSI_CONFIGURATION = {
"jitsi_domain": "meeting.education",
"jitsi_guest_avatar": "",
"jitsi_guest_default_password": "default",
"jitsi_guest_username": "guest",
"jitsi_xmpp_domain": "meet.jitsi",
"jitsi_secret_key": "ThisIsAnExampleKeyForTestPurposeOnly",
"jitsi_app_id": "app_id",
"jitsi_token_expiration_seconds": 300,
}
SIMPLE_JWT = {
"USER_ID_FIELD": "jwt_sub",
"USER_ID_CLAIM": "sub",
}
class ContinuousIntegration(Test):
"""
Continuous Integration environment settings
nota bene: it should inherit from the Test environment.
"""
ALLOWED_HOSTS = ["*"]
CORS_ALLOW_ALL_ORIGINS = True
CSRF_TRUSTED_ORIGINS = ["http://localhost:8070"]
class Production(Base):
"""Production environment settings
You must define the DJANGO_ALLOWED_HOSTS and DJANGO_SECRET_KEY environment
variables in Production configuration (and derived configurations):
DJANGO_ALLOWED_HOSTS="foo.com,foo.fr"
DJANGO_SECRET_KEY="your-secret-key"
"""
# Security
SECRET_KEY = values.SecretValue()
ALLOWED_HOSTS = values.ListValue([])
CSRF_COOKIE_SECURE = True
SECURE_BROWSER_XSS_FILTER = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SESSION_COOKIE_SECURE = True
# For static files in production, we want to use a backend that includes a hash in
# the filename, that is calculated from the file content, so that browsers always
# get the updated version of each file.
STORAGES = {
"staticfiles": {
"BACKEND": str(
values.Value("whitenoise.storage.CompressedManifestStaticFilesStorage")
),
},
}
class Feature(Production):
"""
Feature environment settings
nota bene: it should inherit from the Production environment.
"""
class Staging(Production):
"""
Staging environment settings
nota bene: it should inherit from the Production environment.
"""
class PreProduction(Production):
"""
Pre-production environment settings
nota bene: it should inherit from the Production environment.
""" | "class": "logging.StreamHandler", | random_line_split |
intergen.ts | import * as fs from 'fs'
import * as ts from 'typescript'
import {argparse, arg} from '@rondo.dev/argparse'
import {error, info} from '../log'
function isObjectType(type: ts.Type): type is ts.ObjectType {
return !!(type.flags & ts.TypeFlags.Object)
}
function isTypeReference(type: ts.ObjectType): type is ts.TypeReference {
return !!(type.objectFlags & ts.ObjectFlags.Reference)
}
function isAnonymous(type: ts.Type): boolean {
return isObjectType(type) && !!(
type.objectFlags & ts.ObjectFlags.Anonymous)
}
function filterInvisibleProperties(type: ts.Symbol): boolean {
const flags = ts.getCombinedModifierFlags(type.valueDeclaration)
return !(flags & ts.ModifierFlags.NonPublicAccessibilityModifier)
}
interface ClassProperty {
name: string
type: ts.Type
relevantTypes: ts.Type[]
typeString: string
optional: boolean
}
interface ClassDefinition {
name: string
type: ts.Type
typeParameters: ts.TypeParameter[]
relevantTypeParameters: ts.Type[]
allRelevantTypes: ts.Type[]
properties: ClassProperty[]
}
/*
* TODO
*
* Interfaces generated from exported class delcarations will be prefixed with
* "I". A few cases need to be differentiated:
*
* a) Private (non-exported) types / interfaces / classes defined and used in
* same module. In case of non-exported classes, an error can be thrown.
* These can be copied and perhaps indexed to prevent collisions.
* b) Referenced exported classes from the same file
* c) Referenced exported classes from a neighbouring file
* d) Referenced imported classes from external modules. Real world example:
* entities in @rondo.dev/comments-server import and use entities from
* @rondo.dev/comments. These types will have to be processed by this module.
* e) Referenced interfaces should be re-imported in the output file.
*
*/
export function intergen(...argv: string[]): string {
const args = argparse({
input: arg('string', {alias: 'i', required: true}),
debug: arg('boolean'),
help: arg('boolean', {alias: 'h'}),
output: arg('string', {alias: 'o', default: '-'}),
}, intergen.help).parse(argv)
function debug(m: string, ...meta: Array<unknown>) {
if (args.debug) {
error(m, ...meta)
}
}
/** Generate interfaces for all exported classes in a set of .ts files */
function classesToInterfaces(
fileNames: string[],
options: ts.CompilerOptions,
): string[] {
// Build a program using the set of root file names in fileNames
const program = ts.createProgram(fileNames, options)
// Get the checker, we will use it to find more about classes
const checker = program.getTypeChecker()
const classDefs: ClassDefinition[] = []
function typeToString(type: ts.Type): string {
return checker.typeToString(type)
}
/**
* Can be used to filters out global types like Array or string from a list
* of types. For example: types.filter(filterGlobalTypes)
*/
function filterGlobalTypes(type: ts.Type): boolean {
debug('filterGlobalTypes: %s', typeToString(type))
if (type.aliasSymbol) {
// keep type aliases
return true
}
const symbol = type.getSymbol()
if (!symbol) |
if (symbol && symbol.flags & ts.SymbolFlags.Transient) {
debug(' is transient')
// Array is transient. not sure if this is the best way to figure this
return false
}
// if (symbol && !((symbol as any).parent)) {
// // debug(' no parent', symbol)
// // e.g. Array symbol has no parent
// return false
// }
if (type.isLiteral()) {
debug(' is literal')
return false
}
if (type.isUnionOrIntersection()) {
debug(' is u or i')
return false
}
if (isObjectType(type) && isTypeReference(type)) {
debug(' is object type')
if (isObjectType(type.target) &&
type.target.objectFlags & ts.ObjectFlags.Tuple) {
debug(' is tuple')
return false
}
}
debug(' keep!')
return true
}
/**
* Converts a generic type to the target of the type reference.
*/
function mapGenericTypes(type: ts.Type): ts.Type {
if (type.aliasSymbol) {
return checker.getDeclaredTypeOfSymbol(type.aliasSymbol)
}
if (isObjectType(type) && isTypeReference(type)) {
return type.target
}
return type
}
/**
* Removes duplicates from an array of types
*/
function filterDuplicates(type: ts.Type, i: number, arr: ts.Type[]) {
// TODO improve performance of this method
return i === arr.indexOf(type)
}
/**
* Recursively retrieves a list of all type parameters.
*/
function getAllTypeParameters(type: ts.Type): ts.Type[] {
function collectTypeParams(
type2: ts.Type, params?: readonly ts.Type[],
): ts.Type[] {
const types: ts.Type[] = [type2]
if (params) {
params.forEach(t => {
const atp = getAllTypeParameters(t)
types.push(...atp)
})
}
return types
}
if (type.aliasSymbol) {
return collectTypeParams(type, type.aliasTypeArguments)
}
if (isObjectType(type) && isTypeReference(type)) {
return collectTypeParams(type, type.typeArguments)
}
if (type.isUnionOrIntersection()) {
return collectTypeParams(type, type.types)
}
if (type.isClassOrInterface()) {
return collectTypeParams(type, type.typeParameters)
}
return [type]
}
/**
* True if this is visible outside this file, false otherwise
*/
function isNodeExported(node: ts.Node): boolean {
return (
(ts.getCombinedModifierFlags(node as ts.Declaration) &
ts.ModifierFlags.Export) !== 0
// (!!node.parent && node.parent.kind === ts.SyntaxKind.SourceFile)
)
}
function handleClassDeclaration(node: ts.ClassDeclaration) {
if (!node.name) {
return
}
// This is a top level class, get its symbol
const symbol = checker.getSymbolAtLocation(node.name)
if (!symbol) {
return
}
const type = checker.getDeclaredTypeOfSymbol(symbol)
handleType(type)
}
const typeDefinitions: Map<ts.Type, ClassDefinition> = new Map()
function handleType(type: ts.Type) {
if (typeDefinitions.has(type)) {
return
}
if (type.aliasSymbol) {
throw new Error('Type aliases are not supported')
}
const typeParameters: ts.TypeParameter[] = []
const expandedTypeParameters: ts.Type[] = []
const allRelevantTypes: ts.Type[] = []
function handleTypeParameters(typeParams: readonly ts.Type[]) {
typeParams.forEach(tp => {
const constraint = tp.getConstraint()
if (constraint) {
expandedTypeParameters.push(...getAllTypeParameters(tp))
}
const def = tp.getDefault()
if (def) {
expandedTypeParameters.push(...getAllTypeParameters(tp))
}
typeParameters.push(tp)
})
}
if (type.isClassOrInterface() && type.typeParameters) {
handleTypeParameters(type.typeParameters)
}
if (type.aliasSymbol && type.aliasTypeArguments) {
handleTypeParameters(type.aliasTypeArguments)
}
const properties = type.getApparentProperties()
const filterClassTypeParameters =
(t: ts.Type) => typeParameters.every(tp => tp !== t)
const classProperties: ClassProperty[] = properties
.filter(filterInvisibleProperties)
.map(p => {
const vd = p.valueDeclaration
const optional = ts.isPropertyDeclaration(vd) && !!vd.questionToken
const propType = checker.getTypeOfSymbolAtLocation(p, vd)
const typeParams = getAllTypeParameters(propType)
const relevantTypes = typeParams
.filter(filterGlobalTypes)
.filter(filterClassTypeParameters)
.map(mapGenericTypes)
.filter(filterDuplicates)
allRelevantTypes.push(...relevantTypes)
return {
name: p.getName(),
type: propType,
relevantTypes,
typeString: typeToString(propType),
optional,
}
})
const relevantTypeParameters = expandedTypeParameters
.filter(filterGlobalTypes)
.filter(mapGenericTypes)
.filter(filterDuplicates)
allRelevantTypes.push(...relevantTypeParameters)
const classDef: ClassDefinition = {
name: typeToString(type),
type,
// name: symbol.getName(),
typeParameters,
allRelevantTypes: allRelevantTypes
.filter(filterClassTypeParameters)
.filter(filterDuplicates),
relevantTypeParameters,
properties: classProperties,
}
if (!isAnonymous(type)) {
// Prevent defining anonymous declarations as interfaces
classDefs.push(classDef)
}
typeDefinitions.set(type, classDef)
classDef.allRelevantTypes.forEach(handleType)
}
/**
* Visit nodes finding exported classes
*/
function visit(node: ts.Node) {
// Only consider exported nodes
if (!isNodeExported(node)) {
return
}
if (ts.isClassDeclaration(node)) {
handleClassDeclaration(node)
}
}
// Visit every sourceFile in the program
for (const sourceFile of program.getSourceFiles()) {
if (!sourceFile.isDeclarationFile) {
// Walk the tree to search for classes
ts.forEachChild(sourceFile, visit)
}
}
function setTypeName(type: ts.Type, mappings: Map<ts.Type, string>) {
if (isAnonymous(type)) {
return
}
const name = typeToString(type)
// (type as any).symbol.name = 'I' + type.symbol.name
mappings.set(type, `${name}`)
}
const nameMappings = new Map<ts.Type, string>()
for (const classDef of classDefs) {
setTypeName(classDef.type, nameMappings)
for (const t of classDef.allRelevantTypes) {
setTypeName(t, nameMappings)
}
}
function createInterface(classDef: ClassDefinition): string {
const name = nameMappings.get(classDef.type)!
const start = `export interface ${name} {`
const properties = classDef.properties.map(p => {
const q = p.optional ? '?' : ''
return ` ${p.name}${q}: ${nameMappings.get(p.type) || p.typeString}`
})
.join('\n')
const end = '}'
return `${start}\n${properties}\n${end}`
}
return classDefs.map(createInterface)
}
const interfaces = classesToInterfaces([args.input], {
target: ts.ScriptTarget.ES5,
module: ts.ModuleKind.CommonJS,
})
const header = '/* This file was generated by rondo intergen script */\n' +
'/* tslint:disable */\n\n'
const value = header + interfaces.join('\n\n')
if (args.output === '-') {
info(value)
} else {
fs.writeFileSync(args.output, value)
}
return value
}
intergen.help = 'Generate TypeScript interfaces from all found classes'
| {
debug(' no symbol')
// e.g. string or number types have no symbol
return false
} | conditional_block |
intergen.ts | import * as fs from 'fs'
import * as ts from 'typescript'
import {argparse, arg} from '@rondo.dev/argparse'
import {error, info} from '../log'
function isObjectType(type: ts.Type): type is ts.ObjectType {
return !!(type.flags & ts.TypeFlags.Object)
}
function isTypeReference(type: ts.ObjectType): type is ts.TypeReference {
return !!(type.objectFlags & ts.ObjectFlags.Reference)
}
function isAnonymous(type: ts.Type): boolean {
return isObjectType(type) && !!(
type.objectFlags & ts.ObjectFlags.Anonymous)
}
function filterInvisibleProperties(type: ts.Symbol): boolean {
const flags = ts.getCombinedModifierFlags(type.valueDeclaration)
return !(flags & ts.ModifierFlags.NonPublicAccessibilityModifier)
}
interface ClassProperty {
name: string
type: ts.Type
relevantTypes: ts.Type[]
typeString: string
optional: boolean
}
interface ClassDefinition {
name: string
type: ts.Type
typeParameters: ts.TypeParameter[]
relevantTypeParameters: ts.Type[]
allRelevantTypes: ts.Type[]
properties: ClassProperty[]
}
/*
* TODO
*
* Interfaces generated from exported class delcarations will be prefixed with
* "I". A few cases need to be differentiated:
*
* a) Private (non-exported) types / interfaces / classes defined and used in
* same module. In case of non-exported classes, an error can be thrown.
* These can be copied and perhaps indexed to prevent collisions.
* b) Referenced exported classes from the same file
* c) Referenced exported classes from a neighbouring file
* d) Referenced imported classes from external modules. Real world example:
* entities in @rondo.dev/comments-server import and use entities from
* @rondo.dev/comments. These types will have to be processed by this module.
* e) Referenced interfaces should be re-imported in the output file.
*
*/
export function intergen(...argv: string[]): string {
const args = argparse({
input: arg('string', {alias: 'i', required: true}),
debug: arg('boolean'),
help: arg('boolean', {alias: 'h'}),
output: arg('string', {alias: 'o', default: '-'}),
}, intergen.help).parse(argv)
function debug(m: string, ...meta: Array<unknown>) {
if (args.debug) {
error(m, ...meta)
}
}
/** Generate interfaces for all exported classes in a set of .ts files */
function classesToInterfaces(
fileNames: string[],
options: ts.CompilerOptions,
): string[] |
const interfaces = classesToInterfaces([args.input], {
target: ts.ScriptTarget.ES5,
module: ts.ModuleKind.CommonJS,
})
const header = '/* This file was generated by rondo intergen script */\n' +
'/* tslint:disable */\n\n'
const value = header + interfaces.join('\n\n')
if (args.output === '-') {
info(value)
} else {
fs.writeFileSync(args.output, value)
}
return value
}
intergen.help = 'Generate TypeScript interfaces from all found classes'
| {
// Build a program using the set of root file names in fileNames
const program = ts.createProgram(fileNames, options)
// Get the checker, we will use it to find more about classes
const checker = program.getTypeChecker()
const classDefs: ClassDefinition[] = []
function typeToString(type: ts.Type): string {
return checker.typeToString(type)
}
/**
* Can be used to filters out global types like Array or string from a list
* of types. For example: types.filter(filterGlobalTypes)
*/
function filterGlobalTypes(type: ts.Type): boolean {
debug('filterGlobalTypes: %s', typeToString(type))
if (type.aliasSymbol) {
// keep type aliases
return true
}
const symbol = type.getSymbol()
if (!symbol) {
debug(' no symbol')
// e.g. string or number types have no symbol
return false
}
if (symbol && symbol.flags & ts.SymbolFlags.Transient) {
debug(' is transient')
// Array is transient. not sure if this is the best way to figure this
return false
}
// if (symbol && !((symbol as any).parent)) {
// // debug(' no parent', symbol)
// // e.g. Array symbol has no parent
// return false
// }
if (type.isLiteral()) {
debug(' is literal')
return false
}
if (type.isUnionOrIntersection()) {
debug(' is u or i')
return false
}
if (isObjectType(type) && isTypeReference(type)) {
debug(' is object type')
if (isObjectType(type.target) &&
type.target.objectFlags & ts.ObjectFlags.Tuple) {
debug(' is tuple')
return false
}
}
debug(' keep!')
return true
}
/**
* Converts a generic type to the target of the type reference.
*/
function mapGenericTypes(type: ts.Type): ts.Type {
if (type.aliasSymbol) {
return checker.getDeclaredTypeOfSymbol(type.aliasSymbol)
}
if (isObjectType(type) && isTypeReference(type)) {
return type.target
}
return type
}
/**
* Removes duplicates from an array of types
*/
function filterDuplicates(type: ts.Type, i: number, arr: ts.Type[]) {
// TODO improve performance of this method
return i === arr.indexOf(type)
}
/**
* Recursively retrieves a list of all type parameters.
*/
function getAllTypeParameters(type: ts.Type): ts.Type[] {
function collectTypeParams(
type2: ts.Type, params?: readonly ts.Type[],
): ts.Type[] {
const types: ts.Type[] = [type2]
if (params) {
params.forEach(t => {
const atp = getAllTypeParameters(t)
types.push(...atp)
})
}
return types
}
if (type.aliasSymbol) {
return collectTypeParams(type, type.aliasTypeArguments)
}
if (isObjectType(type) && isTypeReference(type)) {
return collectTypeParams(type, type.typeArguments)
}
if (type.isUnionOrIntersection()) {
return collectTypeParams(type, type.types)
}
if (type.isClassOrInterface()) {
return collectTypeParams(type, type.typeParameters)
}
return [type]
}
/**
* True if this is visible outside this file, false otherwise
*/
function isNodeExported(node: ts.Node): boolean {
return (
(ts.getCombinedModifierFlags(node as ts.Declaration) &
ts.ModifierFlags.Export) !== 0
// (!!node.parent && node.parent.kind === ts.SyntaxKind.SourceFile)
)
}
function handleClassDeclaration(node: ts.ClassDeclaration) {
if (!node.name) {
return
}
// This is a top level class, get its symbol
const symbol = checker.getSymbolAtLocation(node.name)
if (!symbol) {
return
}
const type = checker.getDeclaredTypeOfSymbol(symbol)
handleType(type)
}
const typeDefinitions: Map<ts.Type, ClassDefinition> = new Map()
function handleType(type: ts.Type) {
if (typeDefinitions.has(type)) {
return
}
if (type.aliasSymbol) {
throw new Error('Type aliases are not supported')
}
const typeParameters: ts.TypeParameter[] = []
const expandedTypeParameters: ts.Type[] = []
const allRelevantTypes: ts.Type[] = []
function handleTypeParameters(typeParams: readonly ts.Type[]) {
typeParams.forEach(tp => {
const constraint = tp.getConstraint()
if (constraint) {
expandedTypeParameters.push(...getAllTypeParameters(tp))
}
const def = tp.getDefault()
if (def) {
expandedTypeParameters.push(...getAllTypeParameters(tp))
}
typeParameters.push(tp)
})
}
if (type.isClassOrInterface() && type.typeParameters) {
handleTypeParameters(type.typeParameters)
}
if (type.aliasSymbol && type.aliasTypeArguments) {
handleTypeParameters(type.aliasTypeArguments)
}
const properties = type.getApparentProperties()
const filterClassTypeParameters =
(t: ts.Type) => typeParameters.every(tp => tp !== t)
const classProperties: ClassProperty[] = properties
.filter(filterInvisibleProperties)
.map(p => {
const vd = p.valueDeclaration
const optional = ts.isPropertyDeclaration(vd) && !!vd.questionToken
const propType = checker.getTypeOfSymbolAtLocation(p, vd)
const typeParams = getAllTypeParameters(propType)
const relevantTypes = typeParams
.filter(filterGlobalTypes)
.filter(filterClassTypeParameters)
.map(mapGenericTypes)
.filter(filterDuplicates)
allRelevantTypes.push(...relevantTypes)
return {
name: p.getName(),
type: propType,
relevantTypes,
typeString: typeToString(propType),
optional,
}
})
const relevantTypeParameters = expandedTypeParameters
.filter(filterGlobalTypes)
.filter(mapGenericTypes)
.filter(filterDuplicates)
allRelevantTypes.push(...relevantTypeParameters)
const classDef: ClassDefinition = {
name: typeToString(type),
type,
// name: symbol.getName(),
typeParameters,
allRelevantTypes: allRelevantTypes
.filter(filterClassTypeParameters)
.filter(filterDuplicates),
relevantTypeParameters,
properties: classProperties,
}
if (!isAnonymous(type)) {
// Prevent defining anonymous declarations as interfaces
classDefs.push(classDef)
}
typeDefinitions.set(type, classDef)
classDef.allRelevantTypes.forEach(handleType)
}
/**
* Visit nodes finding exported classes
*/
function visit(node: ts.Node) {
// Only consider exported nodes
if (!isNodeExported(node)) {
return
}
if (ts.isClassDeclaration(node)) {
handleClassDeclaration(node)
}
}
// Visit every sourceFile in the program
for (const sourceFile of program.getSourceFiles()) {
if (!sourceFile.isDeclarationFile) {
// Walk the tree to search for classes
ts.forEachChild(sourceFile, visit)
}
}
function setTypeName(type: ts.Type, mappings: Map<ts.Type, string>) {
if (isAnonymous(type)) {
return
}
const name = typeToString(type)
// (type as any).symbol.name = 'I' + type.symbol.name
mappings.set(type, `${name}`)
}
const nameMappings = new Map<ts.Type, string>()
for (const classDef of classDefs) {
setTypeName(classDef.type, nameMappings)
for (const t of classDef.allRelevantTypes) {
setTypeName(t, nameMappings)
}
}
function createInterface(classDef: ClassDefinition): string {
const name = nameMappings.get(classDef.type)!
const start = `export interface ${name} {`
const properties = classDef.properties.map(p => {
const q = p.optional ? '?' : ''
return ` ${p.name}${q}: ${nameMappings.get(p.type) || p.typeString}`
})
.join('\n')
const end = '}'
return `${start}\n${properties}\n${end}`
}
return classDefs.map(createInterface)
} | identifier_body |
intergen.ts | import * as fs from 'fs'
import * as ts from 'typescript'
import {argparse, arg} from '@rondo.dev/argparse'
import {error, info} from '../log'
function isObjectType(type: ts.Type): type is ts.ObjectType {
return !!(type.flags & ts.TypeFlags.Object)
}
function isTypeReference(type: ts.ObjectType): type is ts.TypeReference {
return !!(type.objectFlags & ts.ObjectFlags.Reference)
}
function isAnonymous(type: ts.Type): boolean {
return isObjectType(type) && !!(
type.objectFlags & ts.ObjectFlags.Anonymous)
}
function filterInvisibleProperties(type: ts.Symbol): boolean {
const flags = ts.getCombinedModifierFlags(type.valueDeclaration)
return !(flags & ts.ModifierFlags.NonPublicAccessibilityModifier)
}
interface ClassProperty {
name: string
type: ts.Type
relevantTypes: ts.Type[]
typeString: string
optional: boolean
}
interface ClassDefinition {
name: string
type: ts.Type
typeParameters: ts.TypeParameter[]
relevantTypeParameters: ts.Type[]
allRelevantTypes: ts.Type[]
properties: ClassProperty[]
}
/*
* TODO
*
* Interfaces generated from exported class delcarations will be prefixed with
* "I". A few cases need to be differentiated:
*
* a) Private (non-exported) types / interfaces / classes defined and used in
* same module. In case of non-exported classes, an error can be thrown.
* These can be copied and perhaps indexed to prevent collisions.
* b) Referenced exported classes from the same file
* c) Referenced exported classes from a neighbouring file
* d) Referenced imported classes from external modules. Real world example:
* entities in @rondo.dev/comments-server import and use entities from
* @rondo.dev/comments. These types will have to be processed by this module.
* e) Referenced interfaces should be re-imported in the output file.
*
*/
export function intergen(...argv: string[]): string {
const args = argparse({
input: arg('string', {alias: 'i', required: true}),
debug: arg('boolean'),
help: arg('boolean', {alias: 'h'}),
output: arg('string', {alias: 'o', default: '-'}),
}, intergen.help).parse(argv)
function debug(m: string, ...meta: Array<unknown>) {
if (args.debug) {
error(m, ...meta)
}
}
/** Generate interfaces for all exported classes in a set of .ts files */
function classesToInterfaces(
fileNames: string[],
options: ts.CompilerOptions,
): string[] {
// Build a program using the set of root file names in fileNames
const program = ts.createProgram(fileNames, options)
// Get the checker, we will use it to find more about classes
const checker = program.getTypeChecker()
const classDefs: ClassDefinition[] = []
function typeToString(type: ts.Type): string {
return checker.typeToString(type)
}
/**
* Can be used to filters out global types like Array or string from a list
* of types. For example: types.filter(filterGlobalTypes)
*/
function filterGlobalTypes(type: ts.Type): boolean {
debug('filterGlobalTypes: %s', typeToString(type))
if (type.aliasSymbol) {
// keep type aliases
return true
}
const symbol = type.getSymbol()
if (!symbol) {
debug(' no symbol')
// e.g. string or number types have no symbol
return false
}
if (symbol && symbol.flags & ts.SymbolFlags.Transient) {
debug(' is transient')
// Array is transient. not sure if this is the best way to figure this
return false
}
// if (symbol && !((symbol as any).parent)) {
// // debug(' no parent', symbol)
// // e.g. Array symbol has no parent
// return false
// }
if (type.isLiteral()) {
debug(' is literal')
return false
}
if (type.isUnionOrIntersection()) {
debug(' is u or i')
return false
}
if (isObjectType(type) && isTypeReference(type)) {
debug(' is object type')
if (isObjectType(type.target) &&
type.target.objectFlags & ts.ObjectFlags.Tuple) {
debug(' is tuple')
return false
}
}
debug(' keep!')
return true
}
/**
* Converts a generic type to the target of the type reference.
*/
function mapGenericTypes(type: ts.Type): ts.Type {
if (type.aliasSymbol) {
return checker.getDeclaredTypeOfSymbol(type.aliasSymbol)
}
if (isObjectType(type) && isTypeReference(type)) {
return type.target
}
return type
}
/**
* Removes duplicates from an array of types
*/
function filterDuplicates(type: ts.Type, i: number, arr: ts.Type[]) {
// TODO improve performance of this method
return i === arr.indexOf(type)
}
/**
* Recursively retrieves a list of all type parameters.
*/
function | (type: ts.Type): ts.Type[] {
function collectTypeParams(
type2: ts.Type, params?: readonly ts.Type[],
): ts.Type[] {
const types: ts.Type[] = [type2]
if (params) {
params.forEach(t => {
const atp = getAllTypeParameters(t)
types.push(...atp)
})
}
return types
}
if (type.aliasSymbol) {
return collectTypeParams(type, type.aliasTypeArguments)
}
if (isObjectType(type) && isTypeReference(type)) {
return collectTypeParams(type, type.typeArguments)
}
if (type.isUnionOrIntersection()) {
return collectTypeParams(type, type.types)
}
if (type.isClassOrInterface()) {
return collectTypeParams(type, type.typeParameters)
}
return [type]
}
/**
* True if this is visible outside this file, false otherwise
*/
function isNodeExported(node: ts.Node): boolean {
return (
(ts.getCombinedModifierFlags(node as ts.Declaration) &
ts.ModifierFlags.Export) !== 0
// (!!node.parent && node.parent.kind === ts.SyntaxKind.SourceFile)
)
}
function handleClassDeclaration(node: ts.ClassDeclaration) {
if (!node.name) {
return
}
// This is a top level class, get its symbol
const symbol = checker.getSymbolAtLocation(node.name)
if (!symbol) {
return
}
const type = checker.getDeclaredTypeOfSymbol(symbol)
handleType(type)
}
const typeDefinitions: Map<ts.Type, ClassDefinition> = new Map()
function handleType(type: ts.Type) {
if (typeDefinitions.has(type)) {
return
}
if (type.aliasSymbol) {
throw new Error('Type aliases are not supported')
}
const typeParameters: ts.TypeParameter[] = []
const expandedTypeParameters: ts.Type[] = []
const allRelevantTypes: ts.Type[] = []
function handleTypeParameters(typeParams: readonly ts.Type[]) {
typeParams.forEach(tp => {
const constraint = tp.getConstraint()
if (constraint) {
expandedTypeParameters.push(...getAllTypeParameters(tp))
}
const def = tp.getDefault()
if (def) {
expandedTypeParameters.push(...getAllTypeParameters(tp))
}
typeParameters.push(tp)
})
}
if (type.isClassOrInterface() && type.typeParameters) {
handleTypeParameters(type.typeParameters)
}
if (type.aliasSymbol && type.aliasTypeArguments) {
handleTypeParameters(type.aliasTypeArguments)
}
const properties = type.getApparentProperties()
const filterClassTypeParameters =
(t: ts.Type) => typeParameters.every(tp => tp !== t)
const classProperties: ClassProperty[] = properties
.filter(filterInvisibleProperties)
.map(p => {
const vd = p.valueDeclaration
const optional = ts.isPropertyDeclaration(vd) && !!vd.questionToken
const propType = checker.getTypeOfSymbolAtLocation(p, vd)
const typeParams = getAllTypeParameters(propType)
const relevantTypes = typeParams
.filter(filterGlobalTypes)
.filter(filterClassTypeParameters)
.map(mapGenericTypes)
.filter(filterDuplicates)
allRelevantTypes.push(...relevantTypes)
return {
name: p.getName(),
type: propType,
relevantTypes,
typeString: typeToString(propType),
optional,
}
})
const relevantTypeParameters = expandedTypeParameters
.filter(filterGlobalTypes)
.filter(mapGenericTypes)
.filter(filterDuplicates)
allRelevantTypes.push(...relevantTypeParameters)
const classDef: ClassDefinition = {
name: typeToString(type),
type,
// name: symbol.getName(),
typeParameters,
allRelevantTypes: allRelevantTypes
.filter(filterClassTypeParameters)
.filter(filterDuplicates),
relevantTypeParameters,
properties: classProperties,
}
if (!isAnonymous(type)) {
// Prevent defining anonymous declarations as interfaces
classDefs.push(classDef)
}
typeDefinitions.set(type, classDef)
classDef.allRelevantTypes.forEach(handleType)
}
/**
* Visit nodes finding exported classes
*/
function visit(node: ts.Node) {
// Only consider exported nodes
if (!isNodeExported(node)) {
return
}
if (ts.isClassDeclaration(node)) {
handleClassDeclaration(node)
}
}
// Visit every sourceFile in the program
for (const sourceFile of program.getSourceFiles()) {
if (!sourceFile.isDeclarationFile) {
// Walk the tree to search for classes
ts.forEachChild(sourceFile, visit)
}
}
function setTypeName(type: ts.Type, mappings: Map<ts.Type, string>) {
if (isAnonymous(type)) {
return
}
const name = typeToString(type)
// (type as any).symbol.name = 'I' + type.symbol.name
mappings.set(type, `${name}`)
}
const nameMappings = new Map<ts.Type, string>()
for (const classDef of classDefs) {
setTypeName(classDef.type, nameMappings)
for (const t of classDef.allRelevantTypes) {
setTypeName(t, nameMappings)
}
}
function createInterface(classDef: ClassDefinition): string {
const name = nameMappings.get(classDef.type)!
const start = `export interface ${name} {`
const properties = classDef.properties.map(p => {
const q = p.optional ? '?' : ''
return ` ${p.name}${q}: ${nameMappings.get(p.type) || p.typeString}`
})
.join('\n')
const end = '}'
return `${start}\n${properties}\n${end}`
}
return classDefs.map(createInterface)
}
const interfaces = classesToInterfaces([args.input], {
target: ts.ScriptTarget.ES5,
module: ts.ModuleKind.CommonJS,
})
const header = '/* This file was generated by rondo intergen script */\n' +
'/* tslint:disable */\n\n'
const value = header + interfaces.join('\n\n')
if (args.output === '-') {
info(value)
} else {
fs.writeFileSync(args.output, value)
}
return value
}
intergen.help = 'Generate TypeScript interfaces from all found classes'
| getAllTypeParameters | identifier_name |
intergen.ts | import * as fs from 'fs'
import * as ts from 'typescript'
import {argparse, arg} from '@rondo.dev/argparse'
import {error, info} from '../log'
function isObjectType(type: ts.Type): type is ts.ObjectType {
return !!(type.flags & ts.TypeFlags.Object)
}
function isTypeReference(type: ts.ObjectType): type is ts.TypeReference {
return !!(type.objectFlags & ts.ObjectFlags.Reference)
}
function isAnonymous(type: ts.Type): boolean {
return isObjectType(type) && !!(
type.objectFlags & ts.ObjectFlags.Anonymous)
}
function filterInvisibleProperties(type: ts.Symbol): boolean {
const flags = ts.getCombinedModifierFlags(type.valueDeclaration)
return !(flags & ts.ModifierFlags.NonPublicAccessibilityModifier)
}
interface ClassProperty {
name: string
type: ts.Type
relevantTypes: ts.Type[]
typeString: string
optional: boolean
}
interface ClassDefinition {
name: string
type: ts.Type
typeParameters: ts.TypeParameter[]
relevantTypeParameters: ts.Type[]
allRelevantTypes: ts.Type[]
properties: ClassProperty[]
}
/*
* TODO
*
* Interfaces generated from exported class delcarations will be prefixed with
* "I". A few cases need to be differentiated:
*
* a) Private (non-exported) types / interfaces / classes defined and used in
* same module. In case of non-exported classes, an error can be thrown.
* These can be copied and perhaps indexed to prevent collisions.
* b) Referenced exported classes from the same file
* c) Referenced exported classes from a neighbouring file
* d) Referenced imported classes from external modules. Real world example:
* entities in @rondo.dev/comments-server import and use entities from
* @rondo.dev/comments. These types will have to be processed by this module.
* e) Referenced interfaces should be re-imported in the output file.
*
*/
export function intergen(...argv: string[]): string {
const args = argparse({
input: arg('string', {alias: 'i', required: true}),
debug: arg('boolean'),
help: arg('boolean', {alias: 'h'}),
output: arg('string', {alias: 'o', default: '-'}),
}, intergen.help).parse(argv)
function debug(m: string, ...meta: Array<unknown>) {
if (args.debug) {
error(m, ...meta)
}
}
/** Generate interfaces for all exported classes in a set of .ts files */
function classesToInterfaces(
fileNames: string[],
options: ts.CompilerOptions,
): string[] {
// Build a program using the set of root file names in fileNames
const program = ts.createProgram(fileNames, options)
// Get the checker, we will use it to find more about classes
const checker = program.getTypeChecker()
const classDefs: ClassDefinition[] = []
function typeToString(type: ts.Type): string {
return checker.typeToString(type)
}
/**
* Can be used to filters out global types like Array or string from a list
* of types. For example: types.filter(filterGlobalTypes)
*/
function filterGlobalTypes(type: ts.Type): boolean {
debug('filterGlobalTypes: %s', typeToString(type))
if (type.aliasSymbol) {
// keep type aliases
return true
}
const symbol = type.getSymbol()
if (!symbol) {
debug(' no symbol')
// e.g. string or number types have no symbol
return false
}
if (symbol && symbol.flags & ts.SymbolFlags.Transient) {
debug(' is transient')
// Array is transient. not sure if this is the best way to figure this
return false
}
// if (symbol && !((symbol as any).parent)) {
// // debug(' no parent', symbol)
// // e.g. Array symbol has no parent
// return false
// }
if (type.isLiteral()) {
debug(' is literal')
return false
}
if (type.isUnionOrIntersection()) {
debug(' is u or i')
return false
}
if (isObjectType(type) && isTypeReference(type)) {
debug(' is object type')
if (isObjectType(type.target) &&
type.target.objectFlags & ts.ObjectFlags.Tuple) {
debug(' is tuple')
return false
}
}
debug(' keep!')
return true
}
/**
* Converts a generic type to the target of the type reference.
*/
function mapGenericTypes(type: ts.Type): ts.Type {
if (type.aliasSymbol) {
return checker.getDeclaredTypeOfSymbol(type.aliasSymbol)
}
if (isObjectType(type) && isTypeReference(type)) {
return type.target
}
return type
}
/**
* Removes duplicates from an array of types
*/
function filterDuplicates(type: ts.Type, i: number, arr: ts.Type[]) {
// TODO improve performance of this method
return i === arr.indexOf(type)
}
/**
* Recursively retrieves a list of all type parameters.
*/
function getAllTypeParameters(type: ts.Type): ts.Type[] {
function collectTypeParams(
type2: ts.Type, params?: readonly ts.Type[],
): ts.Type[] {
const types: ts.Type[] = [type2]
if (params) {
params.forEach(t => {
const atp = getAllTypeParameters(t)
types.push(...atp)
})
}
return types
}
if (type.aliasSymbol) {
return collectTypeParams(type, type.aliasTypeArguments)
}
if (isObjectType(type) && isTypeReference(type)) {
return collectTypeParams(type, type.typeArguments)
}
if (type.isUnionOrIntersection()) {
return collectTypeParams(type, type.types)
}
if (type.isClassOrInterface()) {
return collectTypeParams(type, type.typeParameters)
}
return [type]
}
/**
* True if this is visible outside this file, false otherwise
*/
function isNodeExported(node: ts.Node): boolean {
return (
(ts.getCombinedModifierFlags(node as ts.Declaration) &
ts.ModifierFlags.Export) !== 0
// (!!node.parent && node.parent.kind === ts.SyntaxKind.SourceFile)
)
}
function handleClassDeclaration(node: ts.ClassDeclaration) {
if (!node.name) {
return
}
// This is a top level class, get its symbol
const symbol = checker.getSymbolAtLocation(node.name)
if (!symbol) {
return
}
const type = checker.getDeclaredTypeOfSymbol(symbol)
handleType(type)
}
const typeDefinitions: Map<ts.Type, ClassDefinition> = new Map()
function handleType(type: ts.Type) {
if (typeDefinitions.has(type)) {
return
}
if (type.aliasSymbol) {
throw new Error('Type aliases are not supported')
}
const typeParameters: ts.TypeParameter[] = []
const expandedTypeParameters: ts.Type[] = []
const allRelevantTypes: ts.Type[] = []
function handleTypeParameters(typeParams: readonly ts.Type[]) {
typeParams.forEach(tp => {
const constraint = tp.getConstraint()
if (constraint) {
expandedTypeParameters.push(...getAllTypeParameters(tp))
}
const def = tp.getDefault()
if (def) {
expandedTypeParameters.push(...getAllTypeParameters(tp))
}
typeParameters.push(tp)
})
}
if (type.isClassOrInterface() && type.typeParameters) {
handleTypeParameters(type.typeParameters)
}
if (type.aliasSymbol && type.aliasTypeArguments) {
handleTypeParameters(type.aliasTypeArguments)
}
const properties = type.getApparentProperties()
const filterClassTypeParameters =
(t: ts.Type) => typeParameters.every(tp => tp !== t)
const classProperties: ClassProperty[] = properties
.filter(filterInvisibleProperties)
.map(p => {
const vd = p.valueDeclaration
const optional = ts.isPropertyDeclaration(vd) && !!vd.questionToken
const propType = checker.getTypeOfSymbolAtLocation(p, vd)
const typeParams = getAllTypeParameters(propType)
const relevantTypes = typeParams
.filter(filterGlobalTypes)
.filter(filterClassTypeParameters)
.map(mapGenericTypes)
.filter(filterDuplicates)
allRelevantTypes.push(...relevantTypes)
return {
name: p.getName(),
type: propType,
relevantTypes, | optional,
}
})
const relevantTypeParameters = expandedTypeParameters
.filter(filterGlobalTypes)
.filter(mapGenericTypes)
.filter(filterDuplicates)
allRelevantTypes.push(...relevantTypeParameters)
const classDef: ClassDefinition = {
name: typeToString(type),
type,
// name: symbol.getName(),
typeParameters,
allRelevantTypes: allRelevantTypes
.filter(filterClassTypeParameters)
.filter(filterDuplicates),
relevantTypeParameters,
properties: classProperties,
}
if (!isAnonymous(type)) {
// Prevent defining anonymous declarations as interfaces
classDefs.push(classDef)
}
typeDefinitions.set(type, classDef)
classDef.allRelevantTypes.forEach(handleType)
}
/**
* Visit nodes finding exported classes
*/
function visit(node: ts.Node) {
// Only consider exported nodes
if (!isNodeExported(node)) {
return
}
if (ts.isClassDeclaration(node)) {
handleClassDeclaration(node)
}
}
// Visit every sourceFile in the program
for (const sourceFile of program.getSourceFiles()) {
if (!sourceFile.isDeclarationFile) {
// Walk the tree to search for classes
ts.forEachChild(sourceFile, visit)
}
}
function setTypeName(type: ts.Type, mappings: Map<ts.Type, string>) {
if (isAnonymous(type)) {
return
}
const name = typeToString(type)
// (type as any).symbol.name = 'I' + type.symbol.name
mappings.set(type, `${name}`)
}
const nameMappings = new Map<ts.Type, string>()
for (const classDef of classDefs) {
setTypeName(classDef.type, nameMappings)
for (const t of classDef.allRelevantTypes) {
setTypeName(t, nameMappings)
}
}
function createInterface(classDef: ClassDefinition): string {
const name = nameMappings.get(classDef.type)!
const start = `export interface ${name} {`
const properties = classDef.properties.map(p => {
const q = p.optional ? '?' : ''
return ` ${p.name}${q}: ${nameMappings.get(p.type) || p.typeString}`
})
.join('\n')
const end = '}'
return `${start}\n${properties}\n${end}`
}
return classDefs.map(createInterface)
}
const interfaces = classesToInterfaces([args.input], {
target: ts.ScriptTarget.ES5,
module: ts.ModuleKind.CommonJS,
})
const header = '/* This file was generated by rondo intergen script */\n' +
'/* tslint:disable */\n\n'
const value = header + interfaces.join('\n\n')
if (args.output === '-') {
info(value)
} else {
fs.writeFileSync(args.output, value)
}
return value
}
intergen.help = 'Generate TypeScript interfaces from all found classes' | typeString: typeToString(propType), | random_line_split |
Project.ts | import * as fs from 'fs-extra';
import * as path from 'path';
import * as log from './log';
import {GraphicsApi} from './GraphicsApi';
import {Architecture} from './Architecture';
import {AudioApi} from './AudioApi';
import {VrApi} from './VrApi';
import {RayTraceApi} from './RayTraceApi';
import {Options} from './Options';
import {Platform} from './Platform';
const uuid = require('uuid');
function getDefines(platform: string, rotated: boolean) {
let defines: string[] = [];
switch (platform) {
case Platform.iOS:
if (rotated) defines.push('ROTATE90');
break;
case Platform.Android:
if (rotated) defines.push('ROTATE90');
break;
}
return defines;
}
function contains(array: any[], value: any) {
for (const element of array) {
if (element === value) return true;
}
return false;
}
function containsDefine(array: Define[], value: Define) {
for (const element of array) {
if (element.value === value.value && element.config === value.config) return true;
}
return false;
}
function isAbsolute(path: string) {
return (path.length > 0 && path[0] === '/') || (path.length > 1 && path[1] === ':');
}
let projectInProgress = 0;
process.on('exit', (code: number) => {
if (projectInProgress > 0) {
console.error('Error: korefile did not call resolve, no project created.');
}
});
let scriptdir = '.';
// let lastScriptDir = '.';
let koreDir = '.';
async function loadProject(directory: string, korefile: string = 'kincfile.js'): Promise<Project> {
return new Promise<Project>((resolve, reject) => {
projectInProgress += 1;
let resolver = async (project: Project) => {
projectInProgress -= 1;
// TODO: This accidentally finds Kha/Backends/KoreHL
/*if (fs.existsSync(path.join(scriptdir, 'Backends'))) {
var libdirs = fs.readdirSync(path.join(scriptdir, 'Backends'));
for (var ld in libdirs) {
var libdir = path.join(scriptdir, 'Backends', libdirs[ld]);
if (fs.statSync(libdir).isDirectory()) {
var korefile = path.join(libdir, korefile);
if (fs.existsSync(korefile)) {
project.addSubProject(await Project.createProject(libdir, scriptdir));
}
}
}
}*/
resolve(project);
};
try {
scriptdir = directory;
let file = fs.readFileSync(path.resolve(directory, korefile), 'utf8');
let AsyncFunction = Object.getPrototypeOf(async () => {}).constructor;
let project = new AsyncFunction(
'log',
'Project',
'Platform',
'platform',
'GraphicsApi',
'graphics',
'Architecture',
'arch',
'AudioApi',
'audio',
'VrApi',
'vr',
'RayTraceApi',
'raytrace',
'require',
'resolve',
'reject',
'__dirname',
file)
(
log,
Project,
Platform,
Project.platform,
GraphicsApi,
Options.graphicsApi,
Architecture,
Options.architecture,
AudioApi,
Options.audioApi,
VrApi,
Options.vrApi,
RayTraceApi,
Options.rayTraceApi,
require,
resolver,
reject,
directory);
}
catch (error) {
log.error(error);
throw error;
}
});
}
export interface File {
file: string;
options: any;
projectDir: string;
projectName: string;
}
export class Define {
value: string;
config: string;
}
export class Project {
static platform: string;
static koreDir: string;
static root: string;
name: string;
safeName: string;
version: string;
id: string;
debugDir: string;
basedir: string;
uuid: string;
files: File[];
javadirs: string[];
subProjects: Project[];
includeDirs: string[];
defines: Define[];
libs: string[];
systemDependendLibraries: any;
includes: {file: string, options: any}[];
excludes: string[];
customs: {file: string, command: string, output: string}[];
cpp11: boolean;
c11: boolean;
kore: boolean;
targetOptions: any;
rotated: boolean;
cmd: boolean;
cFlags: string[] = [];
cppFlags: string[] = [];
stackSize: number;
icon: string = null;
constructor(name: string) {
this.name = name;
this.safeName = name.replace(/[^A-z0-9\-\_]/g, '-');;
this.version = '1.0';
this.debugDir = '';
this.basedir = scriptdir;
this.uuid = uuid.v4();
this.files = [];
this.customs = [];
this.javadirs = [];
this.subProjects = [];
this.includeDirs = [];
this.defines = [];
this.libs = [];
this.systemDependendLibraries = {};
this.includes = [];
this.excludes = [];
this.cpp11 = false;
this.c11 = false;
this.kore = true;
this.targetOptions = {
android: {},
xboxOne: {},
playStation4: {},
switch: {}
};
this.rotated = false;
this.cmd = false;
this.stackSize = 0;
}
flatten() {
for (let sub of this.subProjects) sub.flatten();
for (let sub of this.subProjects) {
if (sub.cpp11) {
this.cpp11 = true;
}
if (sub.c11) {
this.c11 = true;
}
if (sub.cmd) {
this.cmd = true;
}
let subbasedir = sub.basedir;
for (let tkey of Object.keys(sub.targetOptions)) {
const target = sub.targetOptions[tkey];
for (let key of Object.keys(target)) {
const options = this.targetOptions[tkey];
const option = target[key];
if (options[key] == null) options[key] = option;
// push library properties to current array instead
else if (Array.isArray(options[key]) && Array.isArray(option)) {
for (let value of option) {
if (!options[key].includes(value)) options[key].push(value);
}
}
}
}
for (let d of sub.defines) if (!containsDefine(this.defines, d)) this.defines.push(d);
for (let file of sub.files) {
let absolute = file.file;
if (!path.isAbsolute(absolute)) {
absolute = path.join(subbasedir, file.file);
}
this.files.push({file: absolute.replace(/\\/g, '/'), options: file.options, projectDir: subbasedir, projectName: sub.name });
}
for (const custom of sub.customs) {
let absolute = custom.file;
if (!path.isAbsolute(absolute)) {
absolute = path.join(subbasedir, custom.file);
}
this.customs.push({file: absolute.replace(/\\/g, '/'), command: custom.command, output: custom.output });
}
for (let i of sub.includeDirs) if (!contains(this.includeDirs, path.resolve(subbasedir, i))) this.includeDirs.push(path.resolve(subbasedir, i));
for (let j of sub.javadirs) if (!contains(this.javadirs, path.resolve(subbasedir, j))) this.javadirs.push(path.resolve(subbasedir, j));
for (let lib of sub.libs) {
if (lib.indexOf('/') < 0 && lib.indexOf('\\') < 0) {
if (!contains(this.libs, lib)) this.libs.push(lib);
}
else {
if (!contains(this.libs, path.resolve(subbasedir, lib))) this.libs.push(path.resolve(subbasedir, lib));
}
}
for (let system in sub.systemDependendLibraries) {
let libs = sub.systemDependendLibraries[system];
for (let lib of libs) {
if (this.systemDependendLibraries[system] === undefined) this.systemDependendLibraries[system] = [];
if (!contains(this.systemDependendLibraries[system], this.stringify(path.resolve(subbasedir, lib)))) {
if (!contains(lib, '/') && !contains(lib, '\\')) this.systemDependendLibraries[system].push(lib);
else this.systemDependendLibraries[system].push(this.stringify(path.resolve(subbasedir, lib)));
}
}
}
for (let flag of sub.cFlags) {
if (!this.cFlags.includes(flag)) {
this.cFlags.push(flag);
}
}
for (let flag of sub.cppFlags) {
if (!this.cppFlags.includes(flag)) {
this.cppFlags.push(flag);
}
}
}
this.subProjects = [];
}
getName() {
return this.name;
}
getSafeName() {
return this.safeName;
}
getUuid() {
return this.uuid;
}
matches(text: string, pattern: string) {
const regexstring = pattern.replace(/\./g, '\\.').replace(/\*\*/g, '.?').replace(/\*/g, '[^/]*').replace(/\?/g, '*');
const regex = new RegExp('^' + regexstring + '$', 'g');
return regex.test(text);
}
matchesAllSubdirs(dir: string, pattern: string) {
if (pattern.endsWith('/**')) {
return this.matches(this.stringify(dir), pattern.substr(0, pattern.length - 3));
}
else return false;
}
stringify(path: string) {
return path.replace(/\\/g, '/');
}
addCFlag(flag: string) {
this.cFlags.push(flag);
}
addCFlags() {
for (let i = 0; i < arguments.length; ++i) {
if (typeof arguments[i] === 'string') {
this.addCFlag(arguments[i]);
}
}
}
addCppFlag(flag: string) {
this.cppFlags.push(flag);
}
addCppFlags() {
for (let i = 0; i < arguments.length; ++i) {
if (typeof arguments[i] === 'string') {
this.addCppFlag(arguments[i]);
}
}
}
addFileForReal(file: string, options: any) {
for (let index in this.files) {
if (this.files[index].file === file) {
this.files[index] = {file: file, options: options, projectDir: this.basedir, projectName: this.name};
return;
}
}
this.files.push({file: file, options: options, projectDir: this.basedir, projectName: this.name});
}
searchFiles(current: any) {
if (current === undefined) {
for (let sub of this.subProjects) sub.searchFiles(undefined);
this.searchFiles(this.basedir);
for (let includeobject of this.includes) {
if (includeobject.file.startsWith('../')) {
let start = '../';
while (includeobject.file.startsWith(start)) {
start += '../';
}
this.searchFiles(path.resolve(this.basedir, start));
}
}
// std::set<std::string> starts;
// for (std::string include : includes) {
// if (!isAbsolute(include)) continue;
// std::string start = include.substr(0, firstIndexOf(include, '*'));
// if (starts.count(start) > 0) continue;
// starts.insert(start);
// searchFiles(Paths::get(start));
// }
return;
}
let files = fs.readdirSync(current);
nextfile: for (let f in files) {
let file = path.join(current, files[f]);
if (fs.statSync(file).isDirectory()) continue;
// if (!current.isAbsolute())
file = path.relative(this.basedir, file);
for (let exclude of this.excludes) {
if (this.matches(this.stringify(file), exclude)) continue nextfile;
}
for (let includeobject of this.includes) {
let include = includeobject.file;
if (isAbsolute(include)) {
let inc = include;
inc = path.relative(this.basedir, inc);
include = inc;
}
if (this.matches(this.stringify(file), include)) {
this.addFileForReal(this.stringify(file), includeobject.options);
}
}
}
let dirs = fs.readdirSync(current);
nextdir: for (let d of dirs) {
let dir = path.join(current, d);
if (d.startsWith('.')) continue;
if (!fs.statSync(dir).isDirectory()) continue;
for (let exclude of this.excludes) {
if (this.matchesAllSubdirs(path.relative(this.basedir, dir), exclude)) {
continue nextdir;
}
}
this.searchFiles(dir);
}
}
addFile(file: string, options: any) {
this.includes.push({file: file, options: options});
}
addCustomFile(file: string, command: string, output: string) {
this.customs.push({file, command, output});
}
addFiles() {
let options: any = undefined;
for (let i = 0; i < arguments.length; ++i) {
if (typeof arguments[i] !== 'string') {
options = arguments[i];
}
}
for (let i = 0; i < arguments.length; ++i) {
if (typeof arguments[i] === 'string') {
this.addFile(arguments[i], options);
}
}
}
addJavaDir(dir: string) {
this.javadirs.push(dir);
}
addJavaDirs() {
for (let i = 0; i < arguments.length; ++i) {
this.addJavaDir(arguments[i]);
}
}
addExclude(exclude: string) {
this.excludes.push(exclude);
}
addExcludes() {
for (let i = 0; i < arguments.length; ++i) |
}
addDefine(value: string, config: string = null) {
const define = {value, config};
if (containsDefine(this.defines, define)) return;
this.defines.push(define);
}
addDefines() {
for (let i = 0; i < arguments.length; ++i) {
this.addDefine(arguments[i]);
}
}
addIncludeDir(include: string) {
if (contains(this.includeDirs, include)) return;
this.includeDirs.push(include);
}
addIncludeDirs() {
for (let i = 0; i < arguments.length; ++i) {
this.addIncludeDir(arguments[i]);
}
}
addLib(lib: string) {
this.libs.push(lib);
}
addLibs() {
for (let i = 0; i < arguments.length; ++i) {
this.addLib(arguments[i]);
}
}
addLibFor(system: string, lib: string) {
if (this.systemDependendLibraries[system] === undefined) this.systemDependendLibraries[system] = [];
this.systemDependendLibraries[system].push(lib);
}
addLibsFor() {
if (this.systemDependendLibraries[arguments[0]] === undefined) this.systemDependendLibraries[arguments[0]] = [];
for (let i = 1; i < arguments.length; ++i) {
this.systemDependendLibraries[arguments[0]].push(arguments[i]);
}
}
getFiles() {
return this.files;
}
getJavaDirs() {
return this.javadirs;
}
getBasedir() {
return this.basedir;
}
getSubProjects() {
return this.subProjects;
}
getIncludeDirs() {
return this.includeDirs;
}
getDefines() {
return this.defines;
}
getLibs() {
return this.libs;
}
getLibsFor(system: string) {
if (this.systemDependendLibraries[system] === undefined) return [];
return this.systemDependendLibraries[system];
}
getDebugDir() {
return this.debugDir;
}
setDebugDir(debugDir: string) {
this.debugDir = path.resolve(this.basedir, debugDir);
}
async addProject(directory: string) {
this.subProjects.push(await loadProject(path.isAbsolute(directory) ? directory : path.join(this.basedir, directory)));
}
static async create(directory: string, platform: string, korefile: string) {
Project.koreDir = path.join(__dirname, '../../..');
Project.platform = platform;
let project = await loadProject(path.resolve(directory), korefile);
if (project.kore) {
await project.addProject(Project.koreDir);
}
let defines = getDefines(platform, project.isRotated());
for (let define of defines) {
project.addDefine(define);
}
return project;
}
isRotated() {
return this.rotated;
}
isCmd() {
return this.cmd;
}
setRotated() {
this.rotated = true;
}
setCmd() {
this.cmd = true;
}
// deprecated
static createProject(): Promise<void> {
log.info('Warning: createProject was removed, see updates.md for instructions.');
return new Promise<void>((resolve, reject) => {
resolve();
});
}
// deprecated
addSubProject() {
}
}
| {
this.addExclude(arguments[i]);
} | conditional_block |
Project.ts | import * as fs from 'fs-extra';
import * as path from 'path';
import * as log from './log';
import {GraphicsApi} from './GraphicsApi';
import {Architecture} from './Architecture';
import {AudioApi} from './AudioApi';
import {VrApi} from './VrApi';
import {RayTraceApi} from './RayTraceApi';
import {Options} from './Options';
import {Platform} from './Platform';
const uuid = require('uuid');
function getDefines(platform: string, rotated: boolean) {
let defines: string[] = [];
switch (platform) {
case Platform.iOS:
if (rotated) defines.push('ROTATE90');
break;
case Platform.Android:
if (rotated) defines.push('ROTATE90');
break;
}
return defines;
}
function contains(array: any[], value: any) {
for (const element of array) {
if (element === value) return true;
}
return false;
}
function containsDefine(array: Define[], value: Define) {
for (const element of array) {
if (element.value === value.value && element.config === value.config) return true;
}
return false;
}
function isAbsolute(path: string) {
return (path.length > 0 && path[0] === '/') || (path.length > 1 && path[1] === ':');
}
let projectInProgress = 0;
process.on('exit', (code: number) => {
if (projectInProgress > 0) {
console.error('Error: korefile did not call resolve, no project created.');
}
});
let scriptdir = '.';
// let lastScriptDir = '.';
let koreDir = '.';
async function loadProject(directory: string, korefile: string = 'kincfile.js'): Promise<Project> {
return new Promise<Project>((resolve, reject) => {
projectInProgress += 1;
let resolver = async (project: Project) => {
projectInProgress -= 1;
// TODO: This accidentally finds Kha/Backends/KoreHL
/*if (fs.existsSync(path.join(scriptdir, 'Backends'))) {
var libdirs = fs.readdirSync(path.join(scriptdir, 'Backends'));
for (var ld in libdirs) {
var libdir = path.join(scriptdir, 'Backends', libdirs[ld]);
if (fs.statSync(libdir).isDirectory()) {
var korefile = path.join(libdir, korefile);
if (fs.existsSync(korefile)) {
project.addSubProject(await Project.createProject(libdir, scriptdir));
}
}
}
}*/
resolve(project);
};
try {
scriptdir = directory;
let file = fs.readFileSync(path.resolve(directory, korefile), 'utf8');
let AsyncFunction = Object.getPrototypeOf(async () => {}).constructor;
let project = new AsyncFunction(
'log',
'Project',
'Platform',
'platform',
'GraphicsApi',
'graphics',
'Architecture',
'arch',
'AudioApi',
'audio',
'VrApi',
'vr',
'RayTraceApi',
'raytrace',
'require',
'resolve',
'reject',
'__dirname',
file)
(
log,
Project,
Platform,
Project.platform,
GraphicsApi,
Options.graphicsApi,
Architecture,
Options.architecture,
AudioApi,
Options.audioApi,
VrApi,
Options.vrApi,
RayTraceApi,
Options.rayTraceApi,
require,
resolver,
reject,
directory);
}
catch (error) {
log.error(error);
throw error;
}
});
}
export interface File {
file: string;
options: any;
projectDir: string;
projectName: string;
}
export class Define {
value: string;
config: string;
}
export class Project {
static platform: string;
static koreDir: string;
static root: string;
name: string;
safeName: string;
version: string;
id: string;
debugDir: string;
basedir: string;
uuid: string;
files: File[];
javadirs: string[];
subProjects: Project[];
includeDirs: string[];
defines: Define[];
libs: string[];
systemDependendLibraries: any;
includes: {file: string, options: any}[];
excludes: string[];
customs: {file: string, command: string, output: string}[];
cpp11: boolean;
c11: boolean;
kore: boolean;
targetOptions: any;
rotated: boolean;
cmd: boolean;
cFlags: string[] = [];
cppFlags: string[] = [];
stackSize: number;
icon: string = null;
constructor(name: string) {
this.name = name;
this.safeName = name.replace(/[^A-z0-9\-\_]/g, '-');;
this.version = '1.0';
this.debugDir = '';
this.basedir = scriptdir;
this.uuid = uuid.v4();
this.files = [];
this.customs = [];
this.javadirs = [];
this.subProjects = [];
this.includeDirs = [];
this.defines = [];
this.libs = [];
this.systemDependendLibraries = {};
this.includes = [];
this.excludes = [];
this.cpp11 = false;
this.c11 = false;
this.kore = true;
this.targetOptions = {
android: {},
xboxOne: {},
playStation4: {},
switch: {}
};
this.rotated = false;
this.cmd = false;
this.stackSize = 0;
}
flatten() {
for (let sub of this.subProjects) sub.flatten();
for (let sub of this.subProjects) {
if (sub.cpp11) {
this.cpp11 = true;
}
if (sub.c11) {
this.c11 = true;
}
if (sub.cmd) {
this.cmd = true;
}
let subbasedir = sub.basedir;
for (let tkey of Object.keys(sub.targetOptions)) {
const target = sub.targetOptions[tkey];
for (let key of Object.keys(target)) {
const options = this.targetOptions[tkey];
const option = target[key];
if (options[key] == null) options[key] = option;
// push library properties to current array instead
else if (Array.isArray(options[key]) && Array.isArray(option)) {
for (let value of option) {
if (!options[key].includes(value)) options[key].push(value);
}
}
}
}
for (let d of sub.defines) if (!containsDefine(this.defines, d)) this.defines.push(d);
for (let file of sub.files) {
let absolute = file.file;
if (!path.isAbsolute(absolute)) {
absolute = path.join(subbasedir, file.file);
}
this.files.push({file: absolute.replace(/\\/g, '/'), options: file.options, projectDir: subbasedir, projectName: sub.name });
}
for (const custom of sub.customs) {
let absolute = custom.file;
if (!path.isAbsolute(absolute)) {
absolute = path.join(subbasedir, custom.file);
}
this.customs.push({file: absolute.replace(/\\/g, '/'), command: custom.command, output: custom.output });
}
for (let i of sub.includeDirs) if (!contains(this.includeDirs, path.resolve(subbasedir, i))) this.includeDirs.push(path.resolve(subbasedir, i));
for (let j of sub.javadirs) if (!contains(this.javadirs, path.resolve(subbasedir, j))) this.javadirs.push(path.resolve(subbasedir, j));
for (let lib of sub.libs) {
if (lib.indexOf('/') < 0 && lib.indexOf('\\') < 0) {
if (!contains(this.libs, lib)) this.libs.push(lib);
}
else {
if (!contains(this.libs, path.resolve(subbasedir, lib))) this.libs.push(path.resolve(subbasedir, lib));
}
}
for (let system in sub.systemDependendLibraries) {
let libs = sub.systemDependendLibraries[system];
for (let lib of libs) {
if (this.systemDependendLibraries[system] === undefined) this.systemDependendLibraries[system] = [];
if (!contains(this.systemDependendLibraries[system], this.stringify(path.resolve(subbasedir, lib)))) {
if (!contains(lib, '/') && !contains(lib, '\\')) this.systemDependendLibraries[system].push(lib);
else this.systemDependendLibraries[system].push(this.stringify(path.resolve(subbasedir, lib)));
}
}
}
for (let flag of sub.cFlags) {
if (!this.cFlags.includes(flag)) {
this.cFlags.push(flag);
}
}
for (let flag of sub.cppFlags) {
if (!this.cppFlags.includes(flag)) {
this.cppFlags.push(flag);
}
}
}
this.subProjects = [];
}
getName() {
return this.name;
}
getSafeName() {
return this.safeName;
}
getUuid() {
return this.uuid;
}
matches(text: string, pattern: string) {
const regexstring = pattern.replace(/\./g, '\\.').replace(/\*\*/g, '.?').replace(/\*/g, '[^/]*').replace(/\?/g, '*');
const regex = new RegExp('^' + regexstring + '$', 'g');
return regex.test(text);
}
matchesAllSubdirs(dir: string, pattern: string) {
if (pattern.endsWith('/**')) {
return this.matches(this.stringify(dir), pattern.substr(0, pattern.length - 3));
}
else return false;
}
stringify(path: string) {
return path.replace(/\\/g, '/');
}
addCFlag(flag: string) {
this.cFlags.push(flag);
}
addCFlags() {
for (let i = 0; i < arguments.length; ++i) {
if (typeof arguments[i] === 'string') {
this.addCFlag(arguments[i]);
}
}
}
addCppFlag(flag: string) {
this.cppFlags.push(flag);
}
addCppFlags() {
for (let i = 0; i < arguments.length; ++i) {
if (typeof arguments[i] === 'string') {
this.addCppFlag(arguments[i]);
}
}
}
addFileForReal(file: string, options: any) {
for (let index in this.files) {
if (this.files[index].file === file) {
this.files[index] = {file: file, options: options, projectDir: this.basedir, projectName: this.name};
return;
}
}
this.files.push({file: file, options: options, projectDir: this.basedir, projectName: this.name});
}
searchFiles(current: any) {
if (current === undefined) {
for (let sub of this.subProjects) sub.searchFiles(undefined);
this.searchFiles(this.basedir);
for (let includeobject of this.includes) {
if (includeobject.file.startsWith('../')) {
let start = '../';
while (includeobject.file.startsWith(start)) {
start += '../';
}
this.searchFiles(path.resolve(this.basedir, start));
}
}
// std::set<std::string> starts;
// for (std::string include : includes) {
// if (!isAbsolute(include)) continue;
// std::string start = include.substr(0, firstIndexOf(include, '*'));
// if (starts.count(start) > 0) continue;
// starts.insert(start);
// searchFiles(Paths::get(start));
// }
return;
}
let files = fs.readdirSync(current);
nextfile: for (let f in files) {
let file = path.join(current, files[f]);
if (fs.statSync(file).isDirectory()) continue;
// if (!current.isAbsolute())
file = path.relative(this.basedir, file);
for (let exclude of this.excludes) {
if (this.matches(this.stringify(file), exclude)) continue nextfile;
}
for (let includeobject of this.includes) {
let include = includeobject.file;
if (isAbsolute(include)) {
let inc = include;
inc = path.relative(this.basedir, inc);
include = inc;
}
if (this.matches(this.stringify(file), include)) {
this.addFileForReal(this.stringify(file), includeobject.options);
}
}
}
let dirs = fs.readdirSync(current);
nextdir: for (let d of dirs) {
let dir = path.join(current, d);
if (d.startsWith('.')) continue;
if (!fs.statSync(dir).isDirectory()) continue;
for (let exclude of this.excludes) {
if (this.matchesAllSubdirs(path.relative(this.basedir, dir), exclude)) {
continue nextdir;
}
}
this.searchFiles(dir);
}
}
addFile(file: string, options: any) {
this.includes.push({file: file, options: options});
}
addCustomFile(file: string, command: string, output: string) {
this.customs.push({file, command, output});
}
addFiles() {
let options: any = undefined;
for (let i = 0; i < arguments.length; ++i) {
if (typeof arguments[i] !== 'string') {
options = arguments[i];
}
}
for (let i = 0; i < arguments.length; ++i) {
if (typeof arguments[i] === 'string') {
this.addFile(arguments[i], options);
}
}
}
addJavaDir(dir: string) {
this.javadirs.push(dir);
}
addJavaDirs() {
for (let i = 0; i < arguments.length; ++i) {
this.addJavaDir(arguments[i]);
}
}
addExclude(exclude: string) {
this.excludes.push(exclude);
}
addExcludes() {
for (let i = 0; i < arguments.length; ++i) {
this.addExclude(arguments[i]);
}
}
addDefine(value: string, config: string = null) {
const define = {value, config};
if (containsDefine(this.defines, define)) return;
this.defines.push(define);
}
addDefines() {
for (let i = 0; i < arguments.length; ++i) {
this.addDefine(arguments[i]);
}
}
addIncludeDir(include: string) {
if (contains(this.includeDirs, include)) return;
this.includeDirs.push(include);
}
addIncludeDirs() |
addLib(lib: string) {
this.libs.push(lib);
}
addLibs() {
for (let i = 0; i < arguments.length; ++i) {
this.addLib(arguments[i]);
}
}
addLibFor(system: string, lib: string) {
if (this.systemDependendLibraries[system] === undefined) this.systemDependendLibraries[system] = [];
this.systemDependendLibraries[system].push(lib);
}
addLibsFor() {
if (this.systemDependendLibraries[arguments[0]] === undefined) this.systemDependendLibraries[arguments[0]] = [];
for (let i = 1; i < arguments.length; ++i) {
this.systemDependendLibraries[arguments[0]].push(arguments[i]);
}
}
getFiles() {
return this.files;
}
getJavaDirs() {
return this.javadirs;
}
getBasedir() {
return this.basedir;
}
getSubProjects() {
return this.subProjects;
}
getIncludeDirs() {
return this.includeDirs;
}
getDefines() {
return this.defines;
}
getLibs() {
return this.libs;
}
getLibsFor(system: string) {
if (this.systemDependendLibraries[system] === undefined) return [];
return this.systemDependendLibraries[system];
}
getDebugDir() {
return this.debugDir;
}
setDebugDir(debugDir: string) {
this.debugDir = path.resolve(this.basedir, debugDir);
}
async addProject(directory: string) {
this.subProjects.push(await loadProject(path.isAbsolute(directory) ? directory : path.join(this.basedir, directory)));
}
static async create(directory: string, platform: string, korefile: string) {
Project.koreDir = path.join(__dirname, '../../..');
Project.platform = platform;
let project = await loadProject(path.resolve(directory), korefile);
if (project.kore) {
await project.addProject(Project.koreDir);
}
let defines = getDefines(platform, project.isRotated());
for (let define of defines) {
project.addDefine(define);
}
return project;
}
isRotated() {
return this.rotated;
}
isCmd() {
return this.cmd;
}
setRotated() {
this.rotated = true;
}
setCmd() {
this.cmd = true;
}
// deprecated
static createProject(): Promise<void> {
log.info('Warning: createProject was removed, see updates.md for instructions.');
return new Promise<void>((resolve, reject) => {
resolve();
});
}
// deprecated
addSubProject() {
}
}
| {
for (let i = 0; i < arguments.length; ++i) {
this.addIncludeDir(arguments[i]);
}
} | identifier_body |
Project.ts | import * as fs from 'fs-extra';
import * as path from 'path';
import * as log from './log';
import {GraphicsApi} from './GraphicsApi';
import {Architecture} from './Architecture';
import {AudioApi} from './AudioApi';
import {VrApi} from './VrApi';
import {RayTraceApi} from './RayTraceApi';
import {Options} from './Options';
import {Platform} from './Platform';
const uuid = require('uuid');
function getDefines(platform: string, rotated: boolean) {
let defines: string[] = [];
switch (platform) {
case Platform.iOS:
if (rotated) defines.push('ROTATE90');
break;
case Platform.Android:
if (rotated) defines.push('ROTATE90');
break;
}
return defines;
}
function contains(array: any[], value: any) {
for (const element of array) {
if (element === value) return true;
}
return false;
}
function containsDefine(array: Define[], value: Define) {
for (const element of array) {
if (element.value === value.value && element.config === value.config) return true;
}
return false;
}
function isAbsolute(path: string) {
return (path.length > 0 && path[0] === '/') || (path.length > 1 && path[1] === ':');
}
let projectInProgress = 0;
process.on('exit', (code: number) => {
if (projectInProgress > 0) {
console.error('Error: korefile did not call resolve, no project created.');
}
});
let scriptdir = '.';
// let lastScriptDir = '.';
let koreDir = '.';
async function loadProject(directory: string, korefile: string = 'kincfile.js'): Promise<Project> {
return new Promise<Project>((resolve, reject) => {
projectInProgress += 1;
let resolver = async (project: Project) => {
projectInProgress -= 1;
// TODO: This accidentally finds Kha/Backends/KoreHL
/*if (fs.existsSync(path.join(scriptdir, 'Backends'))) {
var libdirs = fs.readdirSync(path.join(scriptdir, 'Backends'));
for (var ld in libdirs) {
var libdir = path.join(scriptdir, 'Backends', libdirs[ld]);
if (fs.statSync(libdir).isDirectory()) {
var korefile = path.join(libdir, korefile);
if (fs.existsSync(korefile)) {
project.addSubProject(await Project.createProject(libdir, scriptdir));
}
}
}
}*/
resolve(project);
};
try {
scriptdir = directory;
let file = fs.readFileSync(path.resolve(directory, korefile), 'utf8');
let AsyncFunction = Object.getPrototypeOf(async () => {}).constructor;
let project = new AsyncFunction(
'log',
'Project',
'Platform',
'platform',
'GraphicsApi',
'graphics',
'Architecture',
'arch',
'AudioApi',
'audio',
'VrApi',
'vr',
'RayTraceApi',
'raytrace',
'require',
'resolve',
'reject',
'__dirname',
file)
(
log,
Project,
Platform,
Project.platform,
GraphicsApi,
Options.graphicsApi,
Architecture,
Options.architecture,
AudioApi,
Options.audioApi,
VrApi,
Options.vrApi,
RayTraceApi,
Options.rayTraceApi,
require,
resolver,
reject,
directory);
}
catch (error) {
log.error(error);
throw error;
}
});
}
export interface File {
file: string;
options: any;
projectDir: string;
projectName: string;
}
export class | {
value: string;
config: string;
}
export class Project {
static platform: string;
static koreDir: string;
static root: string;
name: string;
safeName: string;
version: string;
id: string;
debugDir: string;
basedir: string;
uuid: string;
files: File[];
javadirs: string[];
subProjects: Project[];
includeDirs: string[];
defines: Define[];
libs: string[];
systemDependendLibraries: any;
includes: {file: string, options: any}[];
excludes: string[];
customs: {file: string, command: string, output: string}[];
cpp11: boolean;
c11: boolean;
kore: boolean;
targetOptions: any;
rotated: boolean;
cmd: boolean;
cFlags: string[] = [];
cppFlags: string[] = [];
stackSize: number;
icon: string = null;
constructor(name: string) {
this.name = name;
this.safeName = name.replace(/[^A-z0-9\-\_]/g, '-');;
this.version = '1.0';
this.debugDir = '';
this.basedir = scriptdir;
this.uuid = uuid.v4();
this.files = [];
this.customs = [];
this.javadirs = [];
this.subProjects = [];
this.includeDirs = [];
this.defines = [];
this.libs = [];
this.systemDependendLibraries = {};
this.includes = [];
this.excludes = [];
this.cpp11 = false;
this.c11 = false;
this.kore = true;
this.targetOptions = {
android: {},
xboxOne: {},
playStation4: {},
switch: {}
};
this.rotated = false;
this.cmd = false;
this.stackSize = 0;
}
flatten() {
for (let sub of this.subProjects) sub.flatten();
for (let sub of this.subProjects) {
if (sub.cpp11) {
this.cpp11 = true;
}
if (sub.c11) {
this.c11 = true;
}
if (sub.cmd) {
this.cmd = true;
}
let subbasedir = sub.basedir;
for (let tkey of Object.keys(sub.targetOptions)) {
const target = sub.targetOptions[tkey];
for (let key of Object.keys(target)) {
const options = this.targetOptions[tkey];
const option = target[key];
if (options[key] == null) options[key] = option;
// push library properties to current array instead
else if (Array.isArray(options[key]) && Array.isArray(option)) {
for (let value of option) {
if (!options[key].includes(value)) options[key].push(value);
}
}
}
}
for (let d of sub.defines) if (!containsDefine(this.defines, d)) this.defines.push(d);
for (let file of sub.files) {
let absolute = file.file;
if (!path.isAbsolute(absolute)) {
absolute = path.join(subbasedir, file.file);
}
this.files.push({file: absolute.replace(/\\/g, '/'), options: file.options, projectDir: subbasedir, projectName: sub.name });
}
for (const custom of sub.customs) {
let absolute = custom.file;
if (!path.isAbsolute(absolute)) {
absolute = path.join(subbasedir, custom.file);
}
this.customs.push({file: absolute.replace(/\\/g, '/'), command: custom.command, output: custom.output });
}
for (let i of sub.includeDirs) if (!contains(this.includeDirs, path.resolve(subbasedir, i))) this.includeDirs.push(path.resolve(subbasedir, i));
for (let j of sub.javadirs) if (!contains(this.javadirs, path.resolve(subbasedir, j))) this.javadirs.push(path.resolve(subbasedir, j));
for (let lib of sub.libs) {
if (lib.indexOf('/') < 0 && lib.indexOf('\\') < 0) {
if (!contains(this.libs, lib)) this.libs.push(lib);
}
else {
if (!contains(this.libs, path.resolve(subbasedir, lib))) this.libs.push(path.resolve(subbasedir, lib));
}
}
for (let system in sub.systemDependendLibraries) {
let libs = sub.systemDependendLibraries[system];
for (let lib of libs) {
if (this.systemDependendLibraries[system] === undefined) this.systemDependendLibraries[system] = [];
if (!contains(this.systemDependendLibraries[system], this.stringify(path.resolve(subbasedir, lib)))) {
if (!contains(lib, '/') && !contains(lib, '\\')) this.systemDependendLibraries[system].push(lib);
else this.systemDependendLibraries[system].push(this.stringify(path.resolve(subbasedir, lib)));
}
}
}
for (let flag of sub.cFlags) {
if (!this.cFlags.includes(flag)) {
this.cFlags.push(flag);
}
}
for (let flag of sub.cppFlags) {
if (!this.cppFlags.includes(flag)) {
this.cppFlags.push(flag);
}
}
}
this.subProjects = [];
}
getName() {
return this.name;
}
getSafeName() {
return this.safeName;
}
getUuid() {
return this.uuid;
}
matches(text: string, pattern: string) {
const regexstring = pattern.replace(/\./g, '\\.').replace(/\*\*/g, '.?').replace(/\*/g, '[^/]*').replace(/\?/g, '*');
const regex = new RegExp('^' + regexstring + '$', 'g');
return regex.test(text);
}
matchesAllSubdirs(dir: string, pattern: string) {
if (pattern.endsWith('/**')) {
return this.matches(this.stringify(dir), pattern.substr(0, pattern.length - 3));
}
else return false;
}
stringify(path: string) {
return path.replace(/\\/g, '/');
}
addCFlag(flag: string) {
this.cFlags.push(flag);
}
addCFlags() {
for (let i = 0; i < arguments.length; ++i) {
if (typeof arguments[i] === 'string') {
this.addCFlag(arguments[i]);
}
}
}
addCppFlag(flag: string) {
this.cppFlags.push(flag);
}
addCppFlags() {
for (let i = 0; i < arguments.length; ++i) {
if (typeof arguments[i] === 'string') {
this.addCppFlag(arguments[i]);
}
}
}
addFileForReal(file: string, options: any) {
for (let index in this.files) {
if (this.files[index].file === file) {
this.files[index] = {file: file, options: options, projectDir: this.basedir, projectName: this.name};
return;
}
}
this.files.push({file: file, options: options, projectDir: this.basedir, projectName: this.name});
}
searchFiles(current: any) {
if (current === undefined) {
for (let sub of this.subProjects) sub.searchFiles(undefined);
this.searchFiles(this.basedir);
for (let includeobject of this.includes) {
if (includeobject.file.startsWith('../')) {
let start = '../';
while (includeobject.file.startsWith(start)) {
start += '../';
}
this.searchFiles(path.resolve(this.basedir, start));
}
}
// std::set<std::string> starts;
// for (std::string include : includes) {
// if (!isAbsolute(include)) continue;
// std::string start = include.substr(0, firstIndexOf(include, '*'));
// if (starts.count(start) > 0) continue;
// starts.insert(start);
// searchFiles(Paths::get(start));
// }
return;
}
let files = fs.readdirSync(current);
nextfile: for (let f in files) {
let file = path.join(current, files[f]);
if (fs.statSync(file).isDirectory()) continue;
// if (!current.isAbsolute())
file = path.relative(this.basedir, file);
for (let exclude of this.excludes) {
if (this.matches(this.stringify(file), exclude)) continue nextfile;
}
for (let includeobject of this.includes) {
let include = includeobject.file;
if (isAbsolute(include)) {
let inc = include;
inc = path.relative(this.basedir, inc);
include = inc;
}
if (this.matches(this.stringify(file), include)) {
this.addFileForReal(this.stringify(file), includeobject.options);
}
}
}
let dirs = fs.readdirSync(current);
nextdir: for (let d of dirs) {
let dir = path.join(current, d);
if (d.startsWith('.')) continue;
if (!fs.statSync(dir).isDirectory()) continue;
for (let exclude of this.excludes) {
if (this.matchesAllSubdirs(path.relative(this.basedir, dir), exclude)) {
continue nextdir;
}
}
this.searchFiles(dir);
}
}
addFile(file: string, options: any) {
this.includes.push({file: file, options: options});
}
addCustomFile(file: string, command: string, output: string) {
this.customs.push({file, command, output});
}
addFiles() {
let options: any = undefined;
for (let i = 0; i < arguments.length; ++i) {
if (typeof arguments[i] !== 'string') {
options = arguments[i];
}
}
for (let i = 0; i < arguments.length; ++i) {
if (typeof arguments[i] === 'string') {
this.addFile(arguments[i], options);
}
}
}
addJavaDir(dir: string) {
this.javadirs.push(dir);
}
addJavaDirs() {
for (let i = 0; i < arguments.length; ++i) {
this.addJavaDir(arguments[i]);
}
}
addExclude(exclude: string) {
this.excludes.push(exclude);
}
addExcludes() {
for (let i = 0; i < arguments.length; ++i) {
this.addExclude(arguments[i]);
}
}
addDefine(value: string, config: string = null) {
const define = {value, config};
if (containsDefine(this.defines, define)) return;
this.defines.push(define);
}
addDefines() {
for (let i = 0; i < arguments.length; ++i) {
this.addDefine(arguments[i]);
}
}
addIncludeDir(include: string) {
if (contains(this.includeDirs, include)) return;
this.includeDirs.push(include);
}
addIncludeDirs() {
for (let i = 0; i < arguments.length; ++i) {
this.addIncludeDir(arguments[i]);
}
}
addLib(lib: string) {
this.libs.push(lib);
}
addLibs() {
for (let i = 0; i < arguments.length; ++i) {
this.addLib(arguments[i]);
}
}
addLibFor(system: string, lib: string) {
if (this.systemDependendLibraries[system] === undefined) this.systemDependendLibraries[system] = [];
this.systemDependendLibraries[system].push(lib);
}
addLibsFor() {
if (this.systemDependendLibraries[arguments[0]] === undefined) this.systemDependendLibraries[arguments[0]] = [];
for (let i = 1; i < arguments.length; ++i) {
this.systemDependendLibraries[arguments[0]].push(arguments[i]);
}
}
getFiles() {
return this.files;
}
getJavaDirs() {
return this.javadirs;
}
getBasedir() {
return this.basedir;
}
getSubProjects() {
return this.subProjects;
}
getIncludeDirs() {
return this.includeDirs;
}
getDefines() {
return this.defines;
}
getLibs() {
return this.libs;
}
getLibsFor(system: string) {
if (this.systemDependendLibraries[system] === undefined) return [];
return this.systemDependendLibraries[system];
}
getDebugDir() {
return this.debugDir;
}
setDebugDir(debugDir: string) {
this.debugDir = path.resolve(this.basedir, debugDir);
}
async addProject(directory: string) {
this.subProjects.push(await loadProject(path.isAbsolute(directory) ? directory : path.join(this.basedir, directory)));
}
static async create(directory: string, platform: string, korefile: string) {
Project.koreDir = path.join(__dirname, '../../..');
Project.platform = platform;
let project = await loadProject(path.resolve(directory), korefile);
if (project.kore) {
await project.addProject(Project.koreDir);
}
let defines = getDefines(platform, project.isRotated());
for (let define of defines) {
project.addDefine(define);
}
return project;
}
isRotated() {
return this.rotated;
}
isCmd() {
return this.cmd;
}
setRotated() {
this.rotated = true;
}
setCmd() {
this.cmd = true;
}
// deprecated
static createProject(): Promise<void> {
log.info('Warning: createProject was removed, see updates.md for instructions.');
return new Promise<void>((resolve, reject) => {
resolve();
});
}
// deprecated
addSubProject() {
}
}
| Define | identifier_name |
Project.ts | import * as fs from 'fs-extra';
import * as path from 'path';
import * as log from './log';
import {GraphicsApi} from './GraphicsApi';
import {Architecture} from './Architecture';
import {AudioApi} from './AudioApi';
import {VrApi} from './VrApi';
import {RayTraceApi} from './RayTraceApi';
import {Options} from './Options';
import {Platform} from './Platform';
const uuid = require('uuid');
function getDefines(platform: string, rotated: boolean) {
let defines: string[] = [];
switch (platform) {
case Platform.iOS:
if (rotated) defines.push('ROTATE90');
break;
case Platform.Android:
if (rotated) defines.push('ROTATE90');
break;
}
return defines;
}
function contains(array: any[], value: any) {
for (const element of array) {
if (element === value) return true;
}
return false;
}
function containsDefine(array: Define[], value: Define) {
for (const element of array) {
if (element.value === value.value && element.config === value.config) return true;
}
return false;
}
function isAbsolute(path: string) {
return (path.length > 0 && path[0] === '/') || (path.length > 1 && path[1] === ':');
}
let projectInProgress = 0;
process.on('exit', (code: number) => {
if (projectInProgress > 0) {
console.error('Error: korefile did not call resolve, no project created.');
}
});
let scriptdir = '.';
// let lastScriptDir = '.';
let koreDir = '.';
async function loadProject(directory: string, korefile: string = 'kincfile.js'): Promise<Project> {
return new Promise<Project>((resolve, reject) => {
projectInProgress += 1;
let resolver = async (project: Project) => {
projectInProgress -= 1;
// TODO: This accidentally finds Kha/Backends/KoreHL
/*if (fs.existsSync(path.join(scriptdir, 'Backends'))) {
var libdirs = fs.readdirSync(path.join(scriptdir, 'Backends'));
for (var ld in libdirs) {
var libdir = path.join(scriptdir, 'Backends', libdirs[ld]);
if (fs.statSync(libdir).isDirectory()) {
var korefile = path.join(libdir, korefile);
if (fs.existsSync(korefile)) {
project.addSubProject(await Project.createProject(libdir, scriptdir));
}
}
}
}*/
resolve(project);
};
try {
scriptdir = directory;
let file = fs.readFileSync(path.resolve(directory, korefile), 'utf8');
let AsyncFunction = Object.getPrototypeOf(async () => {}).constructor;
let project = new AsyncFunction(
'log',
'Project',
'Platform',
'platform',
'GraphicsApi',
'graphics',
'Architecture',
'arch',
'AudioApi',
'audio',
'VrApi',
'vr',
'RayTraceApi',
'raytrace',
'require',
'resolve',
'reject',
'__dirname',
file)
(
log,
Project,
Platform,
Project.platform,
GraphicsApi,
Options.graphicsApi,
Architecture,
Options.architecture,
AudioApi,
Options.audioApi,
VrApi,
Options.vrApi,
RayTraceApi,
Options.rayTraceApi,
require,
resolver,
reject,
directory);
}
catch (error) {
log.error(error);
throw error;
}
});
}
export interface File {
file: string;
options: any;
projectDir: string;
projectName: string;
}
export class Define {
value: string;
config: string;
}
export class Project {
static platform: string;
static koreDir: string;
static root: string;
name: string;
safeName: string;
version: string;
id: string;
debugDir: string;
basedir: string;
uuid: string;
files: File[];
javadirs: string[];
subProjects: Project[];
includeDirs: string[];
defines: Define[];
libs: string[];
systemDependendLibraries: any;
includes: {file: string, options: any}[];
excludes: string[];
customs: {file: string, command: string, output: string}[];
cpp11: boolean;
c11: boolean;
kore: boolean;
targetOptions: any;
rotated: boolean;
cmd: boolean;
cFlags: string[] = [];
cppFlags: string[] = [];
stackSize: number;
icon: string = null;
constructor(name: string) {
this.name = name;
this.safeName = name.replace(/[^A-z0-9\-\_]/g, '-');;
this.version = '1.0';
this.debugDir = '';
this.basedir = scriptdir;
this.uuid = uuid.v4();
this.files = [];
this.customs = [];
this.javadirs = [];
this.subProjects = [];
this.includeDirs = [];
this.defines = [];
this.libs = [];
this.systemDependendLibraries = {};
this.includes = [];
this.excludes = [];
this.cpp11 = false;
this.c11 = false;
this.kore = true;
this.targetOptions = {
android: {},
xboxOne: {},
playStation4: {},
switch: {}
};
this.rotated = false;
this.cmd = false;
this.stackSize = 0;
}
flatten() {
for (let sub of this.subProjects) sub.flatten();
for (let sub of this.subProjects) {
if (sub.cpp11) {
this.cpp11 = true;
}
if (sub.c11) {
this.c11 = true;
}
if (sub.cmd) {
this.cmd = true;
}
let subbasedir = sub.basedir;
for (let tkey of Object.keys(sub.targetOptions)) {
| // push library properties to current array instead
else if (Array.isArray(options[key]) && Array.isArray(option)) {
for (let value of option) {
if (!options[key].includes(value)) options[key].push(value);
}
}
}
}
for (let d of sub.defines) if (!containsDefine(this.defines, d)) this.defines.push(d);
for (let file of sub.files) {
let absolute = file.file;
if (!path.isAbsolute(absolute)) {
absolute = path.join(subbasedir, file.file);
}
this.files.push({file: absolute.replace(/\\/g, '/'), options: file.options, projectDir: subbasedir, projectName: sub.name });
}
for (const custom of sub.customs) {
let absolute = custom.file;
if (!path.isAbsolute(absolute)) {
absolute = path.join(subbasedir, custom.file);
}
this.customs.push({file: absolute.replace(/\\/g, '/'), command: custom.command, output: custom.output });
}
for (let i of sub.includeDirs) if (!contains(this.includeDirs, path.resolve(subbasedir, i))) this.includeDirs.push(path.resolve(subbasedir, i));
for (let j of sub.javadirs) if (!contains(this.javadirs, path.resolve(subbasedir, j))) this.javadirs.push(path.resolve(subbasedir, j));
for (let lib of sub.libs) {
if (lib.indexOf('/') < 0 && lib.indexOf('\\') < 0) {
if (!contains(this.libs, lib)) this.libs.push(lib);
}
else {
if (!contains(this.libs, path.resolve(subbasedir, lib))) this.libs.push(path.resolve(subbasedir, lib));
}
}
for (let system in sub.systemDependendLibraries) {
let libs = sub.systemDependendLibraries[system];
for (let lib of libs) {
if (this.systemDependendLibraries[system] === undefined) this.systemDependendLibraries[system] = [];
if (!contains(this.systemDependendLibraries[system], this.stringify(path.resolve(subbasedir, lib)))) {
if (!contains(lib, '/') && !contains(lib, '\\')) this.systemDependendLibraries[system].push(lib);
else this.systemDependendLibraries[system].push(this.stringify(path.resolve(subbasedir, lib)));
}
}
}
for (let flag of sub.cFlags) {
if (!this.cFlags.includes(flag)) {
this.cFlags.push(flag);
}
}
for (let flag of sub.cppFlags) {
if (!this.cppFlags.includes(flag)) {
this.cppFlags.push(flag);
}
}
}
this.subProjects = [];
}
getName() {
return this.name;
}
getSafeName() {
return this.safeName;
}
getUuid() {
return this.uuid;
}
matches(text: string, pattern: string) {
const regexstring = pattern.replace(/\./g, '\\.').replace(/\*\*/g, '.?').replace(/\*/g, '[^/]*').replace(/\?/g, '*');
const regex = new RegExp('^' + regexstring + '$', 'g');
return regex.test(text);
}
matchesAllSubdirs(dir: string, pattern: string) {
if (pattern.endsWith('/**')) {
return this.matches(this.stringify(dir), pattern.substr(0, pattern.length - 3));
}
else return false;
}
stringify(path: string) {
return path.replace(/\\/g, '/');
}
addCFlag(flag: string) {
this.cFlags.push(flag);
}
addCFlags() {
for (let i = 0; i < arguments.length; ++i) {
if (typeof arguments[i] === 'string') {
this.addCFlag(arguments[i]);
}
}
}
addCppFlag(flag: string) {
this.cppFlags.push(flag);
}
addCppFlags() {
for (let i = 0; i < arguments.length; ++i) {
if (typeof arguments[i] === 'string') {
this.addCppFlag(arguments[i]);
}
}
}
addFileForReal(file: string, options: any) {
for (let index in this.files) {
if (this.files[index].file === file) {
this.files[index] = {file: file, options: options, projectDir: this.basedir, projectName: this.name};
return;
}
}
this.files.push({file: file, options: options, projectDir: this.basedir, projectName: this.name});
}
searchFiles(current: any) {
if (current === undefined) {
for (let sub of this.subProjects) sub.searchFiles(undefined);
this.searchFiles(this.basedir);
for (let includeobject of this.includes) {
if (includeobject.file.startsWith('../')) {
let start = '../';
while (includeobject.file.startsWith(start)) {
start += '../';
}
this.searchFiles(path.resolve(this.basedir, start));
}
}
// std::set<std::string> starts;
// for (std::string include : includes) {
// if (!isAbsolute(include)) continue;
// std::string start = include.substr(0, firstIndexOf(include, '*'));
// if (starts.count(start) > 0) continue;
// starts.insert(start);
// searchFiles(Paths::get(start));
// }
return;
}
let files = fs.readdirSync(current);
nextfile: for (let f in files) {
let file = path.join(current, files[f]);
if (fs.statSync(file).isDirectory()) continue;
// if (!current.isAbsolute())
file = path.relative(this.basedir, file);
for (let exclude of this.excludes) {
if (this.matches(this.stringify(file), exclude)) continue nextfile;
}
for (let includeobject of this.includes) {
let include = includeobject.file;
if (isAbsolute(include)) {
let inc = include;
inc = path.relative(this.basedir, inc);
include = inc;
}
if (this.matches(this.stringify(file), include)) {
this.addFileForReal(this.stringify(file), includeobject.options);
}
}
}
let dirs = fs.readdirSync(current);
nextdir: for (let d of dirs) {
let dir = path.join(current, d);
if (d.startsWith('.')) continue;
if (!fs.statSync(dir).isDirectory()) continue;
for (let exclude of this.excludes) {
if (this.matchesAllSubdirs(path.relative(this.basedir, dir), exclude)) {
continue nextdir;
}
}
this.searchFiles(dir);
}
}
addFile(file: string, options: any) {
this.includes.push({file: file, options: options});
}
addCustomFile(file: string, command: string, output: string) {
this.customs.push({file, command, output});
}
addFiles() {
let options: any = undefined;
for (let i = 0; i < arguments.length; ++i) {
if (typeof arguments[i] !== 'string') {
options = arguments[i];
}
}
for (let i = 0; i < arguments.length; ++i) {
if (typeof arguments[i] === 'string') {
this.addFile(arguments[i], options);
}
}
}
addJavaDir(dir: string) {
this.javadirs.push(dir);
}
addJavaDirs() {
for (let i = 0; i < arguments.length; ++i) {
this.addJavaDir(arguments[i]);
}
}
addExclude(exclude: string) {
this.excludes.push(exclude);
}
addExcludes() {
for (let i = 0; i < arguments.length; ++i) {
this.addExclude(arguments[i]);
}
}
addDefine(value: string, config: string = null) {
const define = {value, config};
if (containsDefine(this.defines, define)) return;
this.defines.push(define);
}
addDefines() {
for (let i = 0; i < arguments.length; ++i) {
this.addDefine(arguments[i]);
}
}
addIncludeDir(include: string) {
if (contains(this.includeDirs, include)) return;
this.includeDirs.push(include);
}
addIncludeDirs() {
for (let i = 0; i < arguments.length; ++i) {
this.addIncludeDir(arguments[i]);
}
}
addLib(lib: string) {
this.libs.push(lib);
}
addLibs() {
for (let i = 0; i < arguments.length; ++i) {
this.addLib(arguments[i]);
}
}
addLibFor(system: string, lib: string) {
if (this.systemDependendLibraries[system] === undefined) this.systemDependendLibraries[system] = [];
this.systemDependendLibraries[system].push(lib);
}
addLibsFor() {
if (this.systemDependendLibraries[arguments[0]] === undefined) this.systemDependendLibraries[arguments[0]] = [];
for (let i = 1; i < arguments.length; ++i) {
this.systemDependendLibraries[arguments[0]].push(arguments[i]);
}
}
getFiles() {
return this.files;
}
getJavaDirs() {
return this.javadirs;
}
getBasedir() {
return this.basedir;
}
getSubProjects() {
return this.subProjects;
}
getIncludeDirs() {
return this.includeDirs;
}
getDefines() {
return this.defines;
}
getLibs() {
return this.libs;
}
getLibsFor(system: string) {
if (this.systemDependendLibraries[system] === undefined) return [];
return this.systemDependendLibraries[system];
}
getDebugDir() {
return this.debugDir;
}
setDebugDir(debugDir: string) {
this.debugDir = path.resolve(this.basedir, debugDir);
}
async addProject(directory: string) {
this.subProjects.push(await loadProject(path.isAbsolute(directory) ? directory : path.join(this.basedir, directory)));
}
static async create(directory: string, platform: string, korefile: string) {
Project.koreDir = path.join(__dirname, '../../..');
Project.platform = platform;
let project = await loadProject(path.resolve(directory), korefile);
if (project.kore) {
await project.addProject(Project.koreDir);
}
let defines = getDefines(platform, project.isRotated());
for (let define of defines) {
project.addDefine(define);
}
return project;
}
isRotated() {
return this.rotated;
}
isCmd() {
return this.cmd;
}
setRotated() {
this.rotated = true;
}
setCmd() {
this.cmd = true;
}
// deprecated
static createProject(): Promise<void> {
log.info('Warning: createProject was removed, see updates.md for instructions.');
return new Promise<void>((resolve, reject) => {
resolve();
});
}
// deprecated
addSubProject() {
}
} | const target = sub.targetOptions[tkey];
for (let key of Object.keys(target)) {
const options = this.targetOptions[tkey];
const option = target[key];
if (options[key] == null) options[key] = option;
| random_line_split |
raft.go | package raft
//
// this is an outline of the API that raft must expose to
// the service (or tester). see comments below for
// each of these functions for more details.
//
// rf = Make(...)
// create a new Raft server.
// rf.Start(command interface{}) (index, term, isleader)
// start agreement on a new log entry
// rf.GetState() (term, isLeader)
// ask a Raft for its current term, and whether it thinks it is leader
// ApplyMsg
// each time a new entry is committed to the log, each Raft peer
// should send an ApplyMsg to the service (or tester)
// in the same server.
//
import (
"bytes"
"labgob"
"math/rand"
"sort"
"sync"
"sync/atomic"
"time"
)
import "labrpc"
// import "bytes"
// import "labgob"
//
// as each Raft peer becomes aware that successive log entries are
// committed, the peer should send an ApplyMsg to the service (or
// tester) on the same server, via the applyCh passed to Make(). set
// CommandValid to true to indicate that the ApplyMsg contains a newly
// committed log entry.
//
// in Lab 3 you'll want to send other kinds of messages (e.g.,
// snapshots) on the applyCh; at that point you can add fields to
// ApplyMsg, but set CommandValid to false for these other uses.
//
type ApplyMsg struct {
CommandValid bool
Command interface{}
CommandIndex int
}
const NULL = -1
// Server State
type State int
const (
Follower State = iota
Candidate
Leader
)
type Log struct {
Term int
Command interface{}
}
//
// A Go object implementing a single Raft peer.
//
type Raft struct {
mu sync.Mutex // Lock to protect shared access to this peer's state
peers []*labrpc.ClientEnd // RPC end points of all peers
persister *Persister // Object to hold this peer's persisted state
me int // this peer's index into peers[]
// Your data here (2A, 2B, 2C).
// Look at the paper's Figure 2 for a description of what
// state a Raft server must maintain.
state State
// All Server
currentTerm int
votedFor int
log []Log
commitIndex int
lastApplied int
nextIndex []int
matchIndex []int
// Channel
applyCh chan ApplyMsg
voteCh chan bool
appendLogCh chan bool
killCh chan bool
}
// return currentTerm and whether this server
// believes it is the leader.
func (rf *Raft) GetState() (int, bool) {
var term int
// Your code here (2A).
rf.mu.Lock()
defer rf.mu.Unlock()
term = rf.currentTerm
return term, rf.state == Leader
}
func (rf *Raft) GetId() int {
return rf.me
}
//
// save Raft's persistent state to stable storage,
// where it can later be retrieved after a crash and restart.
// see paper's Figure 2 for a description of what should be persistent.
//
func (rf *Raft) persist() {
// Your code here (2C).
// Example:
// w := new(bytes.Buffer)
// e := labgob.NewEncoder(w)
// e.Encode(rf.xxx)
// e.Encode(rf.yyy)
// data := w.Bytes()
// rf.persister.SaveRaftState(data)
w := new(bytes.Buffer)
e := labgob.NewEncoder(w)
e.Encode(rf.currentTerm)
e.Encode(rf.votedFor)
e.Encode(rf.log)
data := w.Bytes()
rf.persister.SaveRaftState(data)
}
//
// restore previously persisted state.
//
func (rf *Raft) readPersist(data []byte) {
if data == nil || len(data) < 1 { // bootstrap without any state?
return
}
// Your code here (2C).
// Example:
// r := bytes.NewBuffer(data)
// d := labgob.NewDecoder(r)
// var xxx
// var yyy
// if d.Decode(&xxx) != nil ||
// d.Decode(&yyy) != nil {
// error...
// } else {
// rf.xxx = xxx
// rf.yyy = yyy
// }
r := bytes.NewBuffer(data)
d := labgob.NewDecoder(r)
var currentTerm int
var votedFor int
var log []Log
if d.Decode(¤tTerm) != nil || d.Decode(&votedFor) != nil || d.Decode(&log) != nil {
} else {
rf.mu.Lock()
rf.currentTerm = currentTerm
rf.votedFor = votedFor
rf.log = log
rf.mu.Unlock()
}
}
//
// example RequestVote RPC arguments structure.
// field names must start with capital letters!
//
type RequestVoteArgs struct {
// Your data here (2A, 2B).
Term int
CandidateId int
LastLogIndex int
LastLogTerm int
}
//
// example RequestVote RPC reply structure.
// field names must start with capital letters!
//
type RequestVoteReply struct {
// Your data here (2A).
Term int
VoteGranted bool
}
type AppendEntriesArgs struct {
Term int
LeaderId int
PrevLogIndex int
PrevLogTerm int
Entries []Log
LeaderCommit int
}
type AppendEntriesReply struct {
Term int
Success bool
// 优化点,带上冲突的log索引,leader的nextIndex可以一次回退到位
ConflictIndex int
ConflictTerm int
}
//
// example RequestVote RPC handler.
//
func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {
// Your code here (2A, 2B).
//All Server rule
rf.mu.Lock()
defer rf.mu.Unlock()
if args.Term > rf.currentTerm {
rf.beFollower(args.Term)
// TODO check
// send(rf.voteCh)
}
reply.Term = rf.currentTerm
reply.VoteGranted = false
if (args.Term < rf.currentTerm) || (rf.votedFor != NULL && rf.votedFor != args.CandidateId) {
// Reply false if term < currentTerm (§5.1) If votedFor is not null and not candidateId,
} else if args.LastLogTerm < rf.getLastLogTerm() || (args.LastLogTerm == rf.getLastLogTerm() &&
args.LastLogIndex < rf.getLastLogIndex()) {
//If the logs have last entries with different terms, then the log with the later term is more up-to-date.
// If the logs end with the same term, then whichever log is longer is more up-to-date.
// Reply false if candidate’s log is at least as up-to-date as receiver’s log
} else {
//grant vote
rf.votedFor = args.CandidateId
reply.VoteGranted = true
rf.state = Follower
rf.persist()
send(rf.voteCh) //because If election timeout elapses without receiving granting vote to candidate, so wake up
}
}
// AppendEntries RPC Handler
func (rf *Raft) AppendEntries(args *AppendEntriesArgs, reply *AppendEntriesReply) {
rf.mu.Lock()
defer rf.mu.Unlock()
defer send(rf.appendLogCh)
if args.Term > rf.currentTerm {
rf.beFollower(args.Term)
}
reply.Success = false
reply.ConflictIndex = 0
reply.ConflictTerm = NULL
reply.Term = rf.currentTerm
prevLogIndexTerm := -1
logSize := len(rf.log)
// 2.check log index
if args.PrevLogIndex >= 0 && args.PrevLogIndex < logSize {
prevLogIndexTerm = rf.log[args.PrevLogIndex].Term
}
// 1.check term
if prevLogIndexTerm != args.PrevLogTerm {
reply.ConflictIndex = logSize
if prevLogIndexTerm == -1 {
} else {
reply.ConflictTerm = prevLogIndexTerm
i := 0
for ; i < logSize; i++ {
if rf.log[i].Term == reply.ConflictTerm {
reply.ConflictIndex = i
break
}
}
}
return
}
if args.Term < rf.currentTerm {
return
}
index := args.PrevLogIndex
for i := 0; i < len(args.Entries); i++ {
index++
if index < logSize {
if rf.log[index].Term == args.Entries[i].Term {
continue
} else { //3. If an existing entry conflicts with a new one (same index but different terms),
rf.log = rf.log[:index] //delete the existing entry and all that follow it (§5.3)
}
}
rf.log = append(rf.log, args.Entries[i:]...) //4. Append any new entries not already in the log
rf.persist()
break
}
if args.LeaderCommit > rf.commitIndex {
rf.commitIndex = Min(args.LeaderCommit, rf.getLastLogIndex())
rf.updateLastApplied()
}
reply.Success = true
}
func Min(x int, y int) int {
if x < y {
return x
}
return y
}
//
// example code to send a RequestVote RPC to a server.
// server is the index of the target server in rf.peers[].
// expects RPC arguments in args.
// fills in *reply with RPC reply, so caller should
// pass &reply.
// the types of the args and reply passed to Call() must be
// the same as the types of the arguments declared in the
// handler function (including whether they are pointers).
//
// The labrpc package simulates a lossy network, in which servers
// may be unreachable, and in which requests and replies may be lost.
// Call() sends a request and waits for a reply. If a reply arrives
// within a timeout interval, Call() returns true; otherwise
// Call() returns false. Thus Call() may not return for a while.
// A false return can be caused by a dead server, a live server that
// can't be reached, a lost request, or a lost reply.
//
// Call() is guaranteed to return (perhaps after a delay) *except* if the
// handler function on the server side does not return. Thus there
// is no need to implement your own timeouts around Call().
//
// look at the comments in ../labrpc/labrpc.go for more details.
//
// if you're having trouble getting RPC to work, check that you've
// capitalized all field names in structs passed over RPC, and
// that the caller passes the address of the reply struct with &, not
// the struct itself.
//
func (rf *Raft) sendRequestVote(server int, args *RequestVoteArgs, reply *RequestVoteReply) bool {
ok := rf.peers[server].Call("Raft.RequestVote", args, reply)
return ok
}
//
// the service using Raft (e.g. a k/v server) wants to start
// agreement on the next command to be appended to Raft's log. if this
// server isn't the leader, returns false. otherwise start the
// agreement and return immediately. there is no guarantee that this
// command will ever be committed to the Raft log, since the leader
// may fail or lose an election. even if the Raft instance has been killed,
// this function should return gracefully.
//
// the first return value is the index that the command will appear at
// if it's ever committed. the second return value is the current
// term. the third return value is true if this server believes it is
// the leader.
//
func (rf *Raft) Start(command interface{}) (int, int, bool) {
rf.mu.Lock()
defer rf.mu.Unlock()
index := -1
term := rf.currentTerm
isLeader := rf.state == Leader
// Your code here (2B).
if isLeader {
DPrintf("i am leader %v, and i send command %v", rf.me, command)
index = rf.getLastLogIndex() + 1
newLog := Log{
Term: rf.currentTerm,
Command: command,
}
rf.log = append(rf.log, newLog)
rf.persist()
//fmt.Println("i am leader,", rf.me)
}
return index, term, isLeader
}
//
// the tester calls Kill() when a Raft instance won't
// be needed again. you are not required to do anything
// in Kill(), but it might be convenient to (for example)
// turn off debug output from this instance.
//
func (rf *Raft) Kill() {
// Your code here, if desired.
send(rf.killCh)
}
func (rf *Raft) beCandidate() {
rf.state = Candidate
rf.currentTerm++
rf.votedFor = rf.me
rf.persist()
go rf.startElection()
}
func (rf *Raft) startElection() {
rf.mu.Lock()
args := RequestVoteArgs{
Term: rf.currentTerm,
CandidateId: rf.me,
LastLogIndex: rf.getLastLogIndex(),
LastLogTerm: rf.getLastLogTerm(),
}
rf.mu.Unlock()
var votes int32 = 1 // vote myself
for i := 0; i < len(rf.peers); i++ {
if i == rf.me {
continue
}
go func(idx int) {
reply := &RequestVoteReply{}
ret := rf.sendRequestVote(idx, &args, reply)
if ret {
rf.mu.Lock()
defer rf.mu.Unlock()
if reply.Term > rf.currentTerm {
rf.beFollower(reply.Term)
// TODO send(rf.voteCh)
return
}
if rf.state != Candidate || rf.currentTerm != args.Term {
return
}
if reply.VoteGranted {
atomic.AddInt32(&votes, 1)
}
if atomic.LoadInt32(&votes) > int32(len(rf.peers)/2) {
rf.beLeader()
send(rf.voteCh)
}
}
}(i)
}
}
func (rf *Raft) getLastLogIndex() int {
return len(rf.log) - 1
}
func (rf *Raft) getLastLogTerm() int {
idx := rf.getLastLogIndex()
if idx < 0 {
return -1
}
return rf.log[idx].Term
}
func (rf *Raft) beFollower(term int) {
rf.state = Follower
rf.currentTerm = term
rf.votedFor = NULL
rf.persist()
}
func (rf *Raft) beLeader() {
if rf.state != Candidate {
return
}
rf.state = Leader
rf.nextIndex = make([]int, len(rf.peers))
rf.matchIndex = make([]int, len(rf.peers))
for i := 0; i < len(rf.nextIndex); i++ {
rf.nextIndex[i] = rf.getLastLogIndex() + 1
}
}
func (rf *Raft) startAppendLog() {
for i := 0; i < len(rf.peers); i++ {
if i == rf.me {
continue
}
go func(idx int) {
rf.mu.Lock()
if rf.state != Leader {
rf.mu.Unlock()
return
}
args := AppendEntriesArgs{
Term: rf.currentTerm,
LeaderId: rf.me,
PrevLogIndex: rf.getPrevLogIdx(idx),
PrevLogTerm: rf.getPrevLogTerm(idx),
Entries: append([]Log{}, rf.log[rf.nextIndex[idx]:]...),
LeaderCommit: rf.commitIndex,
}
rf.mu.Unlock()
reply := &AppendEntriesReply{}
ret := rf.sendAppendEntries(idx, &args, reply)
rf.mu.Lock()
defer rf.mu.Unlock()
if !ret || rf.state != Leader || rf.currentTerm != args.Term {
return
}
if reply.Term > rf.currentTerm {
rf.beFollower(reply.Term)
return
}
if reply.Success {
rf.matchIndex[idx] = args.PrevLogIndex + len(args.Entries)
rf.nextIndex[idx] = rf.matchIndex[idx] + 1
rf.updateCommitIndex()
//rf.mu.Unlock()
return
} else {
// 优化点,nextIndex批量减
tarIndex := reply.ConflictIndex
if reply.ConflictTerm != NULL {
logSize := len(rf.log)
for i := 0; i < logSize; i++ {
if rf.log[i].Term != reply.ConflictTerm {
continue
}
for i < logSize && rf.log[i].Term == reply.ConflictTerm {
i++
}
tarIndex = i
}
}
rf.nextIndex[idx] = tarIndex
//rf.mu.Unlock()
}
}(i)
}
}
func (rf *Raft) getPrevLogIdx(i int) int {
return rf.nextIndex[i] - 1
}
func (rf *Raft) getPrevLogTerm(i int) int {
prevLogIdx := rf.getPrevLogIdx(i)
if prevLogIdx < 0 {
return -1
}
return rf.log[prevLogIdx].Term
}
func (rf *Raft) sendAppendEntries(server int, args *AppendEntriesArgs, reply * | y) bool {
ok := rf.peers[server].Call("Raft.AppendEntries", args, reply)
return ok
}
func (rf *Raft) updateCommitIndex() {
rf.matchIndex[rf.me] = len(rf.log) - 1
copyMatchIndex := make([]int, len(rf.matchIndex))
copy(copyMatchIndex, rf.matchIndex)
sort.Sort(sort.Reverse(sort.IntSlice(copyMatchIndex)))
//sort.Ints(copyMatchIndex)
//取半数节点的日志log
N := copyMatchIndex[len(copyMatchIndex)/2]
if N > rf.commitIndex && rf.log[N].Term == rf.currentTerm {
rf.commitIndex = N
rf.updateLastApplied()
}
}
func (rf *Raft) updateLastApplied() {
for rf.lastApplied < rf.commitIndex {
rf.lastApplied++
curLog := rf.log[rf.lastApplied]
applyMsg := ApplyMsg{
CommandValid: true,
Command: curLog.Command,
CommandIndex: rf.lastApplied,
}
//fmt.Printf("i am %v, commit log %v", rf.me, rf.lastApplied)
//fmt.Println()
rf.applyCh <- applyMsg
}
}
func send(ch chan bool) {
select {
case <-ch:
default:
}
ch <- true
}
//
// the service or tester wants to create a Raft server. the ports
// of all the Raft servers (including this one) are in peers[]. this
// server's port is peers[me]. all the servers' peers[] arrays
// have the same order. persister is a place for this server to
// save its persistent state, and also initially holds the most
// recent saved state, if any. applyCh is a channel on which the
// tester or service expects Raft to send ApplyMsg messages.
// Make() must return quickly, so it should start goroutines
// for any long-running work.
//
func Make(peers []*labrpc.ClientEnd, me int,
persister *Persister, applyCh chan ApplyMsg) *Raft {
rf := &Raft{}
rf.peers = peers
rf.persister = persister
rf.me = me
// Your initialization code here (2A, 2B, 2C).
rf.state = Follower
rf.currentTerm = 0
rf.votedFor = NULL
rf.log = make([]Log, 1)
rf.commitIndex = 0
rf.lastApplied = 0
rf.nextIndex = make([]int, len(peers))
rf.matchIndex = make([]int, len(peers))
rf.applyCh = applyCh
rf.voteCh = make(chan bool, 1)
rf.appendLogCh = make(chan bool, 1)
rf.killCh = make(chan bool, 1)
// initialize from state persisted before a crash
rf.readPersist(persister.ReadRaftState())
heartBeatTime := time.Duration(100) * time.Millisecond
go func() {
for {
select {
case <-rf.killCh:
return
default:
}
electionTime := time.Duration(rand.Intn(200)+300) * time.Millisecond
rf.mu.Lock()
state := rf.state
rf.mu.Unlock()
switch state {
case Follower, Candidate:
select {
case <-rf.voteCh:
case <-rf.appendLogCh:
case <-time.After(electionTime):
rf.mu.Lock()
rf.beCandidate()
rf.mu.Unlock()
}
case Leader:
rf.startAppendLog()
time.Sleep(heartBeatTime)
}
}
}()
return rf
}
| AppendEntriesRepl | identifier_name |
raft.go | package raft
//
// this is an outline of the API that raft must expose to
// the service (or tester). see comments below for
// each of these functions for more details.
//
// rf = Make(...)
// create a new Raft server.
// rf.Start(command interface{}) (index, term, isleader)
// start agreement on a new log entry
// rf.GetState() (term, isLeader)
// ask a Raft for its current term, and whether it thinks it is leader
// ApplyMsg
// each time a new entry is committed to the log, each Raft peer
// should send an ApplyMsg to the service (or tester)
// in the same server.
//
import (
"bytes"
"labgob"
"math/rand"
"sort"
"sync"
"sync/atomic"
"time"
)
import "labrpc"
// import "bytes"
// import "labgob"
//
// as each Raft peer becomes aware that successive log entries are
// committed, the peer should send an ApplyMsg to the service (or
// tester) on the same server, via the applyCh passed to Make(). set
// CommandValid to true to indicate that the ApplyMsg contains a newly
// committed log entry.
//
// in Lab 3 you'll want to send other kinds of messages (e.g.,
// snapshots) on the applyCh; at that point you can add fields to
// ApplyMsg, but set CommandValid to false for these other uses.
//
type ApplyMsg struct {
CommandValid bool
Command interface{}
CommandIndex int
}
const NULL = -1
// Server State
type State int
const (
Follower State = iota
Candidate
Leader
)
type Log struct {
Term int
Command interface{}
}
//
// A Go object implementing a single Raft peer.
//
type Raft struct {
mu sync.Mutex // Lock to protect shared access to this peer's state
peers []*labrpc.ClientEnd // RPC end points of all peers
persister *Persister // Object to hold this peer's persisted state
me int // this peer's index into peers[]
// Your data here (2A, 2B, 2C).
// Look at the paper's Figure 2 for a description of what
// state a Raft server must maintain.
state State
// All Server
currentTerm int
votedFor int
log []Log
commitIndex int
lastApplied int
nextIndex []int
matchIndex []int
// Channel
applyCh chan ApplyMsg
voteCh chan bool
appendLogCh chan bool
killCh chan bool
}
// return currentTerm and whether this server
// believes it is the leader.
func (rf *Raft) GetState() (int, bool) {
var term int
// Your code here (2A).
rf.mu.Lock()
defer rf.mu.Unlock()
term = rf.currentTerm
return term, rf.state == Leader
}
func (rf *Raft) GetId() int {
return rf.me
}
//
// save Raft's persistent state to stable storage,
// where it can later be retrieved after a crash and restart.
// see paper's Figure 2 for a description of what should be persistent.
//
func (rf *Raft) persist() {
// Your code here (2C).
// Example:
// w := new(bytes.Buffer)
// e := labgob.NewEncoder(w)
// e.Encode(rf.xxx)
// e.Encode(rf.yyy)
// data := w.Bytes()
// rf.persister.SaveRaftState(data)
w := new(bytes.Buffer)
e := labgob.NewEncoder(w)
e.Encode(rf.currentTerm)
e.Encode(rf.votedFor)
e.Encode(rf.log)
data := w.Bytes()
rf.persister.SaveRaftState(data)
}
//
// restore previously persisted state.
//
func (rf *Raft) readPersist(data []byte) {
if data == nil || len(data) < 1 { // bootstrap without any state?
return
}
// Your code here (2C).
// Example:
// r := bytes.NewBuffer(data)
// d := labgob.NewDecoder(r)
// var xxx
// var yyy
// if d.Decode(&xxx) != nil ||
// d.Decode(&yyy) != nil {
// error...
// } else {
// rf.xxx = xxx
// rf.yyy = yyy
// }
r := bytes.NewBuffer(data)
d := labgob.NewDecoder(r)
var currentTerm int
var votedFor int
var log []Log
if d.Decode(¤tTerm) != nil || d.Decode(&votedFor) != nil || d.Decode(&log) != nil {
} else {
rf.mu.Lock()
rf.currentTerm = currentTerm
rf.votedFor = votedFor
rf.log = log
rf.mu.Unlock()
}
}
//
// example RequestVote RPC arguments structure.
// field names must start with capital letters!
//
type RequestVoteArgs struct {
// Your data here (2A, 2B).
Term int
CandidateId int
LastLogIndex int
LastLogTerm int
}
//
// example RequestVote RPC reply structure.
// field names must start with capital letters!
//
type RequestVoteReply struct {
// Your data here (2A).
Term int
VoteGranted bool
}
type AppendEntriesArgs struct {
Term int
LeaderId int
PrevLogIndex int
PrevLogTerm int
Entries []Log
LeaderCommit int
}
type AppendEntriesReply struct {
Term int
Success bool
// 优化点,带上冲突的log索引,leader的nextIndex可以一次回退到位
ConflictIndex int
ConflictTerm int
}
//
// example RequestVote RPC handler.
//
func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {
// Your code here (2A, 2B).
//All Server rule
rf.mu.Lock()
defer rf.mu.Unlock()
if args.Term > rf.currentTerm {
rf.beFollower(args.Term)
// TODO check
// send(rf.voteCh)
}
reply.Term = rf.currentTerm
reply.VoteGranted = false
if (args.Term < rf.currentTerm) || (rf.votedFor != NULL && rf.votedFor != args.CandidateId) {
// Reply false if term < currentTerm (§5.1) If votedFor is not null and not candidateId,
} else if args.LastLogTerm < rf.getLastLogTerm() || (args.LastLogTerm == rf.getLastLogTerm() &&
args.LastLogIndex < rf.getLastLogIndex()) {
//If the logs have last entries with different terms, then the log with the later term is more up-to-date.
// If the logs end with the same term, then whichever log is longer is more up-to-date.
// Reply false if candidate’s log is at least as up-to-date as receiver’s log
} else {
//grant vote
rf.votedFor = args.CandidateId
reply.VoteGranted = true
rf.state = Follower
rf.persist()
send(rf.voteCh) //because If election timeout elapses without receiving granting vote to candidate, so wake up
}
}
// AppendEntries RPC Handler
func (rf *Raft) AppendEntries(args *AppendEntriesArgs, reply *AppendEntriesReply) {
rf.mu.Lock()
defer rf.mu.Unlock()
defer send(rf.appendLogCh)
if args.Term > rf.currentTerm {
rf.beFollower(args.Term)
}
reply.Success = false
reply.ConflictIndex = 0
reply.ConflictTerm = NULL
reply.Term = rf.currentTerm
prevLogIndexTerm := -1
logSize := len(rf.log)
// 2.check log index
if args.PrevLogIndex >= 0 && args.PrevLogIndex < logSize {
prevLogIndexTerm = rf.log[args.PrevLogIndex].Term
}
// 1.check term
if prevLogIndexTerm != args.PrevLogTerm {
reply.ConflictIndex = logSize
if prevLogIndexTerm == -1 {
} else {
reply.ConflictTerm = prevLogIndexTerm
i := 0
for ; i < logSize; i++ {
if rf.log[i].Term == reply.ConflictTerm {
reply.ConflictIndex = i
break
} | }
if args.Term < rf.currentTerm {
return
}
index := args.PrevLogIndex
for i := 0; i < len(args.Entries); i++ {
index++
if index < logSize {
if rf.log[index].Term == args.Entries[i].Term {
continue
} else { //3. If an existing entry conflicts with a new one (same index but different terms),
rf.log = rf.log[:index] //delete the existing entry and all that follow it (§5.3)
}
}
rf.log = append(rf.log, args.Entries[i:]...) //4. Append any new entries not already in the log
rf.persist()
break
}
if args.LeaderCommit > rf.commitIndex {
rf.commitIndex = Min(args.LeaderCommit, rf.getLastLogIndex())
rf.updateLastApplied()
}
reply.Success = true
}
func Min(x int, y int) int {
if x < y {
return x
}
return y
}
//
// example code to send a RequestVote RPC to a server.
// server is the index of the target server in rf.peers[].
// expects RPC arguments in args.
// fills in *reply with RPC reply, so caller should
// pass &reply.
// the types of the args and reply passed to Call() must be
// the same as the types of the arguments declared in the
// handler function (including whether they are pointers).
//
// The labrpc package simulates a lossy network, in which servers
// may be unreachable, and in which requests and replies may be lost.
// Call() sends a request and waits for a reply. If a reply arrives
// within a timeout interval, Call() returns true; otherwise
// Call() returns false. Thus Call() may not return for a while.
// A false return can be caused by a dead server, a live server that
// can't be reached, a lost request, or a lost reply.
//
// Call() is guaranteed to return (perhaps after a delay) *except* if the
// handler function on the server side does not return. Thus there
// is no need to implement your own timeouts around Call().
//
// look at the comments in ../labrpc/labrpc.go for more details.
//
// if you're having trouble getting RPC to work, check that you've
// capitalized all field names in structs passed over RPC, and
// that the caller passes the address of the reply struct with &, not
// the struct itself.
//
func (rf *Raft) sendRequestVote(server int, args *RequestVoteArgs, reply *RequestVoteReply) bool {
ok := rf.peers[server].Call("Raft.RequestVote", args, reply)
return ok
}
//
// the service using Raft (e.g. a k/v server) wants to start
// agreement on the next command to be appended to Raft's log. if this
// server isn't the leader, returns false. otherwise start the
// agreement and return immediately. there is no guarantee that this
// command will ever be committed to the Raft log, since the leader
// may fail or lose an election. even if the Raft instance has been killed,
// this function should return gracefully.
//
// the first return value is the index that the command will appear at
// if it's ever committed. the second return value is the current
// term. the third return value is true if this server believes it is
// the leader.
//
func (rf *Raft) Start(command interface{}) (int, int, bool) {
rf.mu.Lock()
defer rf.mu.Unlock()
index := -1
term := rf.currentTerm
isLeader := rf.state == Leader
// Your code here (2B).
if isLeader {
DPrintf("i am leader %v, and i send command %v", rf.me, command)
index = rf.getLastLogIndex() + 1
newLog := Log{
Term: rf.currentTerm,
Command: command,
}
rf.log = append(rf.log, newLog)
rf.persist()
//fmt.Println("i am leader,", rf.me)
}
return index, term, isLeader
}
//
// the tester calls Kill() when a Raft instance won't
// be needed again. you are not required to do anything
// in Kill(), but it might be convenient to (for example)
// turn off debug output from this instance.
//
func (rf *Raft) Kill() {
// Your code here, if desired.
send(rf.killCh)
}
func (rf *Raft) beCandidate() {
rf.state = Candidate
rf.currentTerm++
rf.votedFor = rf.me
rf.persist()
go rf.startElection()
}
func (rf *Raft) startElection() {
rf.mu.Lock()
args := RequestVoteArgs{
Term: rf.currentTerm,
CandidateId: rf.me,
LastLogIndex: rf.getLastLogIndex(),
LastLogTerm: rf.getLastLogTerm(),
}
rf.mu.Unlock()
var votes int32 = 1 // vote myself
for i := 0; i < len(rf.peers); i++ {
if i == rf.me {
continue
}
go func(idx int) {
reply := &RequestVoteReply{}
ret := rf.sendRequestVote(idx, &args, reply)
if ret {
rf.mu.Lock()
defer rf.mu.Unlock()
if reply.Term > rf.currentTerm {
rf.beFollower(reply.Term)
// TODO send(rf.voteCh)
return
}
if rf.state != Candidate || rf.currentTerm != args.Term {
return
}
if reply.VoteGranted {
atomic.AddInt32(&votes, 1)
}
if atomic.LoadInt32(&votes) > int32(len(rf.peers)/2) {
rf.beLeader()
send(rf.voteCh)
}
}
}(i)
}
}
func (rf *Raft) getLastLogIndex() int {
return len(rf.log) - 1
}
func (rf *Raft) getLastLogTerm() int {
idx := rf.getLastLogIndex()
if idx < 0 {
return -1
}
return rf.log[idx].Term
}
func (rf *Raft) beFollower(term int) {
rf.state = Follower
rf.currentTerm = term
rf.votedFor = NULL
rf.persist()
}
func (rf *Raft) beLeader() {
if rf.state != Candidate {
return
}
rf.state = Leader
rf.nextIndex = make([]int, len(rf.peers))
rf.matchIndex = make([]int, len(rf.peers))
for i := 0; i < len(rf.nextIndex); i++ {
rf.nextIndex[i] = rf.getLastLogIndex() + 1
}
}
func (rf *Raft) startAppendLog() {
for i := 0; i < len(rf.peers); i++ {
if i == rf.me {
continue
}
go func(idx int) {
rf.mu.Lock()
if rf.state != Leader {
rf.mu.Unlock()
return
}
args := AppendEntriesArgs{
Term: rf.currentTerm,
LeaderId: rf.me,
PrevLogIndex: rf.getPrevLogIdx(idx),
PrevLogTerm: rf.getPrevLogTerm(idx),
Entries: append([]Log{}, rf.log[rf.nextIndex[idx]:]...),
LeaderCommit: rf.commitIndex,
}
rf.mu.Unlock()
reply := &AppendEntriesReply{}
ret := rf.sendAppendEntries(idx, &args, reply)
rf.mu.Lock()
defer rf.mu.Unlock()
if !ret || rf.state != Leader || rf.currentTerm != args.Term {
return
}
if reply.Term > rf.currentTerm {
rf.beFollower(reply.Term)
return
}
if reply.Success {
rf.matchIndex[idx] = args.PrevLogIndex + len(args.Entries)
rf.nextIndex[idx] = rf.matchIndex[idx] + 1
rf.updateCommitIndex()
//rf.mu.Unlock()
return
} else {
// 优化点,nextIndex批量减
tarIndex := reply.ConflictIndex
if reply.ConflictTerm != NULL {
logSize := len(rf.log)
for i := 0; i < logSize; i++ {
if rf.log[i].Term != reply.ConflictTerm {
continue
}
for i < logSize && rf.log[i].Term == reply.ConflictTerm {
i++
}
tarIndex = i
}
}
rf.nextIndex[idx] = tarIndex
//rf.mu.Unlock()
}
}(i)
}
}
func (rf *Raft) getPrevLogIdx(i int) int {
return rf.nextIndex[i] - 1
}
func (rf *Raft) getPrevLogTerm(i int) int {
prevLogIdx := rf.getPrevLogIdx(i)
if prevLogIdx < 0 {
return -1
}
return rf.log[prevLogIdx].Term
}
func (rf *Raft) sendAppendEntries(server int, args *AppendEntriesArgs, reply *AppendEntriesReply) bool {
ok := rf.peers[server].Call("Raft.AppendEntries", args, reply)
return ok
}
func (rf *Raft) updateCommitIndex() {
rf.matchIndex[rf.me] = len(rf.log) - 1
copyMatchIndex := make([]int, len(rf.matchIndex))
copy(copyMatchIndex, rf.matchIndex)
sort.Sort(sort.Reverse(sort.IntSlice(copyMatchIndex)))
//sort.Ints(copyMatchIndex)
//取半数节点的日志log
N := copyMatchIndex[len(copyMatchIndex)/2]
if N > rf.commitIndex && rf.log[N].Term == rf.currentTerm {
rf.commitIndex = N
rf.updateLastApplied()
}
}
func (rf *Raft) updateLastApplied() {
for rf.lastApplied < rf.commitIndex {
rf.lastApplied++
curLog := rf.log[rf.lastApplied]
applyMsg := ApplyMsg{
CommandValid: true,
Command: curLog.Command,
CommandIndex: rf.lastApplied,
}
//fmt.Printf("i am %v, commit log %v", rf.me, rf.lastApplied)
//fmt.Println()
rf.applyCh <- applyMsg
}
}
func send(ch chan bool) {
select {
case <-ch:
default:
}
ch <- true
}
//
// the service or tester wants to create a Raft server. the ports
// of all the Raft servers (including this one) are in peers[]. this
// server's port is peers[me]. all the servers' peers[] arrays
// have the same order. persister is a place for this server to
// save its persistent state, and also initially holds the most
// recent saved state, if any. applyCh is a channel on which the
// tester or service expects Raft to send ApplyMsg messages.
// Make() must return quickly, so it should start goroutines
// for any long-running work.
//
func Make(peers []*labrpc.ClientEnd, me int,
persister *Persister, applyCh chan ApplyMsg) *Raft {
rf := &Raft{}
rf.peers = peers
rf.persister = persister
rf.me = me
// Your initialization code here (2A, 2B, 2C).
rf.state = Follower
rf.currentTerm = 0
rf.votedFor = NULL
rf.log = make([]Log, 1)
rf.commitIndex = 0
rf.lastApplied = 0
rf.nextIndex = make([]int, len(peers))
rf.matchIndex = make([]int, len(peers))
rf.applyCh = applyCh
rf.voteCh = make(chan bool, 1)
rf.appendLogCh = make(chan bool, 1)
rf.killCh = make(chan bool, 1)
// initialize from state persisted before a crash
rf.readPersist(persister.ReadRaftState())
heartBeatTime := time.Duration(100) * time.Millisecond
go func() {
for {
select {
case <-rf.killCh:
return
default:
}
electionTime := time.Duration(rand.Intn(200)+300) * time.Millisecond
rf.mu.Lock()
state := rf.state
rf.mu.Unlock()
switch state {
case Follower, Candidate:
select {
case <-rf.voteCh:
case <-rf.appendLogCh:
case <-time.After(electionTime):
rf.mu.Lock()
rf.beCandidate()
rf.mu.Unlock()
}
case Leader:
rf.startAppendLog()
time.Sleep(heartBeatTime)
}
}
}()
return rf
} | }
}
return | random_line_split |
raft.go | package raft
//
// this is an outline of the API that raft must expose to
// the service (or tester). see comments below for
// each of these functions for more details.
//
// rf = Make(...)
// create a new Raft server.
// rf.Start(command interface{}) (index, term, isleader)
// start agreement on a new log entry
// rf.GetState() (term, isLeader)
// ask a Raft for its current term, and whether it thinks it is leader
// ApplyMsg
// each time a new entry is committed to the log, each Raft peer
// should send an ApplyMsg to the service (or tester)
// in the same server.
//
import (
"bytes"
"labgob"
"math/rand"
"sort"
"sync"
"sync/atomic"
"time"
)
import "labrpc"
// import "bytes"
// import "labgob"
//
// as each Raft peer becomes aware that successive log entries are
// committed, the peer should send an ApplyMsg to the service (or
// tester) on the same server, via the applyCh passed to Make(). set
// CommandValid to true to indicate that the ApplyMsg contains a newly
// committed log entry.
//
// in Lab 3 you'll want to send other kinds of messages (e.g.,
// snapshots) on the applyCh; at that point you can add fields to
// ApplyMsg, but set CommandValid to false for these other uses.
//
type ApplyMsg struct {
CommandValid bool
Command interface{}
CommandIndex int
}
const NULL = -1
// Server State
type State int
const (
Follower State = iota
Candidate
Leader
)
type Log struct {
Term int
Command interface{}
}
//
// A Go object implementing a single Raft peer.
//
type Raft struct {
mu sync.Mutex // Lock to protect shared access to this peer's state
peers []*labrpc.ClientEnd // RPC end points of all peers
persister *Persister // Object to hold this peer's persisted state
me int // this peer's index into peers[]
// Your data here (2A, 2B, 2C).
// Look at the paper's Figure 2 for a description of what
// state a Raft server must maintain.
state State
// All Server
currentTerm int
votedFor int
log []Log
commitIndex int
lastApplied int
nextIndex []int
matchIndex []int
// Channel
applyCh chan ApplyMsg
voteCh chan bool
appendLogCh chan bool
killCh chan bool
}
// return currentTerm and whether this server
// believes it is the leader.
func (rf *Raft) GetState() (int, bool) {
var term int
// Your code here (2A).
rf.mu.Lock()
defer rf.mu.Unlock()
term = rf.currentTerm
return term, rf.state == Leader
}
func (rf *Raft) GetId() int {
return rf.me
}
//
// save Raft's persistent state to stable storage,
// where it can later be retrieved after a crash and restart.
// see paper's Figure 2 for a description of what should be persistent.
//
func (rf *Raft) persist() {
// Your code here (2C).
// Example:
// w := new(bytes.Buffer)
// e := labgob.NewEncoder(w)
// e.Encode(rf.xxx)
// e.Encode(rf.yyy)
// data := w.Bytes()
// rf.persister.SaveRaftState(data)
w := new(bytes.Buffer)
e := labgob.NewEncoder(w)
e.Encode(rf.currentTerm)
e.Encode(rf.votedFor)
e.Encode(rf.log)
data := w.Bytes()
rf.persister.SaveRaftState(data)
}
//
// restore previously persisted state.
//
func (rf *Raft) readPersist(data []byte) {
if data == nil || len(data) < 1 { // bootstrap without any state?
return
}
// Your code here (2C).
// Example:
// r := bytes.NewBuffer(data)
// d := labgob.NewDecoder(r)
// var xxx
// var yyy
// if d.Decode(&xxx) != nil ||
// d.Decode(&yyy) != nil {
// error...
// } else {
// rf.xxx = xxx
// rf.yyy = yyy
// }
r := bytes.NewBuffer(data)
d := labgob.NewDecoder(r)
var currentTerm int
var votedFor int
var log []Log
if d.Decode(¤tTerm) != nil || d.Decode(&votedFor) != nil || d.Decode(&log) != nil {
} else {
rf.mu.Lock()
rf.currentTerm = currentTerm
rf.votedFor = votedFor
rf.log = log
rf.mu.Unlock()
}
}
//
// example RequestVote RPC arguments structure.
// field names must start with capital letters!
//
type RequestVoteArgs struct {
// Your data here (2A, 2B).
Term int
CandidateId int
LastLogIndex int
LastLogTerm int
}
//
// example RequestVote RPC reply structure.
// field names must start with capital letters!
//
type RequestVoteReply struct {
// Your data here (2A).
Term int
VoteGranted bool
}
type AppendEntriesArgs struct {
Term int
LeaderId int
PrevLogIndex int
PrevLogTerm int
Entries []Log
LeaderCommit int
}
type AppendEntriesReply struct {
Term int
Success bool
// 优化点,带上冲突的log索引,leader的nextIndex可以一次回退到位
ConflictIndex int
ConflictTerm int
}
//
// example RequestVote RPC handler.
//
func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {
// Your code here (2A, 2B).
//All Server rule
rf.mu.Lock()
defer rf.mu.Unlock()
if args.Term > rf.currentTerm {
rf.beFollower(args.Term)
// TODO check
// send(rf.voteCh)
}
reply.Term = rf.currentTerm
reply.VoteGranted = false
if (args.Term < rf.currentTerm) || (rf.votedFor != NULL && rf.votedFor != args.CandidateId) {
// Reply false if term < currentTerm (§5.1) If votedFor is not null and not candidateId,
} else if args.LastLogTerm < rf.getLastLogTerm() || (args.LastLogTerm == rf.getLastLogTerm() &&
args.LastLogIndex < rf.getLastLogIndex()) {
//If the logs have last entries with different terms, then the log with the later term is more up-to-date.
// If the logs end with the same term, then whichever log is longer is more up-to-date.
// Reply false if candidate’s log is at least as up-to-date as receiver’s log
} else {
//grant vote
rf.votedFor = args.CandidateId
reply.VoteGranted = true
rf.state = Follower
rf.persist()
send(rf.voteCh) //because If election timeout elapses without receiving granting vote to candidate, so wake up
}
}
// AppendEntries RPC Handler
func (rf *Raft) AppendEntries(args *AppendEntriesArgs, reply *AppendEntriesReply) {
rf.mu.Lock()
defer rf.mu.Unlock()
defer send(rf.appendLogCh)
if args.Term > rf.currentTerm {
rf.beFollower(args.Term)
}
reply.Success = false
reply.ConflictIndex = 0
reply.ConflictTerm = NULL
reply.Term = rf.currentTerm
prevLogIndexTerm := -1
logSize := len(rf.log)
// 2.check log index
if args.PrevLogIndex >= 0 && args.PrevLogIndex < logSize {
prevLogIndexTerm = rf.log[args.PrevLogIndex].Term
}
// 1.check term
if prevLogIndexTerm != args.PrevLogTerm {
reply.ConflictIndex = logSize
if prevLogIndexTerm == -1 {
} else {
reply.ConflictTerm = prevLogIndexTerm
i := 0
for ; i < logSize; i++ {
if rf.log[i].Term == reply.ConflictTerm {
reply.ConflictIndex = i
break
}
}
}
return
}
if args.Term < rf.currentTerm {
return
}
index := args.PrevLogIndex
for i := 0; i < len(args.Entries); i++ {
index++
if index < logSize {
if rf.log[index].Term == args.Entries[i].Term {
continue
} else { //3. If an existing entry conflicts with a new one (same index but different terms),
rf.log = rf.log[:index] //delete the existing entry and all that follow it (§5.3)
}
}
rf.log = append(rf.log, args.Entries[i:]...) //4. Append any new entries not already in the log
rf.persist()
break
}
if args.LeaderCommit > rf.commitIndex {
rf.commitIndex = Min(args.LeaderCommit, rf.getLastLogIndex())
rf.updateLastApplied()
}
reply.Success = true
}
func Min(x int, y int) int {
if x < y {
return x
}
return y
}
//
// example code to send a RequestVote RPC to a server.
// server is the index of the target server in rf.peers[].
// expects RPC arguments in args.
// fills in *reply with RPC reply, so caller should
// pass &reply.
// the types of the args and reply passed to Call() must be
// the same as the types of the arguments declared in the
// handler function (including whether they are pointers).
//
// The labrpc package simulates a lossy network, in which servers
// may be unreachable, and in which requests and replies may be lost.
// Call() sends a request and waits for a reply. If a reply arrives
// within a timeout interval, Call() returns true; otherwise
// Call() returns false. Thus Call() may not return for a while.
// A false return can be caused by a dead server, a live server that
// can't be reached, a lost request, or a lost reply.
//
// Call() is guaranteed to return (perhaps after a delay) *except* if the
// handler function on the server side does not return. Thus there
// is no need to implement your own timeouts around Call().
//
// look at the comments in ../labrpc/labrpc.go for more details.
//
// if you're having trouble getting RPC to work, check that you've
// capitalized all field names in structs passed over RPC, and
// that the caller passes the address of the reply struct with &, not
// the struct itself.
//
func (rf *Raft) sendRequestVote(server int, args *RequestVoteArgs, reply *RequestVoteReply) bool {
ok := rf.peers[server].Call("Raft.RequestVote", args, reply)
return ok
}
//
// the service using Raft (e.g. a k/v server) wants to start
// agreement on the next command to be appended to Raft's log. if this
// server isn't the leader, returns false. otherwise start the
// agreement and return immediately. there is no guarantee that this
// command will ever be committed to the Raft log, since the leader
// may fail or lose an election. even if the Raft instance has been killed,
// this function should return gracefully.
//
// the first return value is the index that the command will appear at
// if it's ever committed. the second return value is the current
// term. the third return value is true if this server believes it is
// the leader.
//
func (rf *Raft) Start(command interface{}) (int, int, bool) {
rf.mu.Lock()
defer rf.mu.Unlock()
index := -1
term := rf.currentTerm
isLeader := rf.state == Leader
// Your code here (2B).
if isLeader {
DPrintf("i am leader %v, and i send command %v", rf.me, command)
index = rf.getLastLogIndex() + 1
newLog := Log{
Term: rf.currentTerm,
Command: command,
}
rf.log = append(rf.log, newLog)
rf.persist()
//fmt.Println("i am leader,", rf.me)
}
return index, term, isLeader
}
//
// the tester calls Kill() when a Raft instance won't
// be needed again. you are not required to do anything
// in Kill(), but it might be convenient to (for example)
// turn off debug output from this instance.
//
func (rf *Raft) Kill() {
// Your code here, if desired.
send(rf.killCh)
}
func (rf *Raft) beCandidate() {
rf.state = Candidate
rf.currentTerm++
rf.votedFor = rf.me
rf.persist()
go rf.startElection()
}
func (rf *Raft) startElection() {
rf.mu.Lock()
args := RequestVoteArgs{
Term: rf.currentTerm,
CandidateId: rf.me,
LastLogIndex: rf.getLastLogIndex(),
LastLogTerm: rf.getLastLogTerm(),
}
rf.mu.Unlock()
var votes int32 = 1 // vote myself
for i := 0; i < len(rf.peers); i++ {
if i == rf.me {
continue
}
go func(idx int) {
reply := &RequestVoteReply{}
ret := rf.sendRequestVote(idx, &args, reply)
if ret {
rf.mu.Lock()
defer rf.mu.Unlock()
if reply.Term > rf.currentTerm {
rf.beFollower(reply.Term)
// TODO send(rf.voteCh)
return
}
if rf.state != Candidate || rf.currentTerm != args.Term {
return
}
if reply.VoteGranted {
atomic.AddInt32(&votes, 1)
}
if atomic.LoadInt32(&votes) > int32(len(rf.peers)/2) {
rf.beLeader()
send(rf.voteCh)
}
}
}(i)
}
}
func (rf *Raft) getLastLogIndex() int {
return len(rf.log) - 1
}
func (rf *Raft) getLastLogTerm() int {
idx := rf.getLastLogIndex()
if idx < 0 {
return -1
}
return rf.log[idx].Term
}
func (rf *Raft) beFollower(term int) {
rf.state = Follower
rf.currentTerm = term
rf.votedFor = NULL
rf.persist()
}
func (rf *Raft) beLeader() {
if rf.state != Candidate {
return
}
rf.state = Leader
rf.nextIndex = make([]int, len(rf.peers))
rf.matchIndex = make([]int, len(rf.peers))
for i := 0; i < len(rf.nextIndex); i++ {
rf.nextIndex[i] = rf.getLastLogIndex() + 1
}
}
func (rf *Raft) startAppendLog() {
for i := 0; i < len(rf.peers); i++ {
if i == rf.me {
continue
}
go func(idx int) {
rf.mu.Lock()
if rf.state != Leader {
rf.mu.Unlock()
return
}
args := AppendEntriesArgs{
Term: rf.currentTerm,
LeaderId: rf.me,
PrevLogIndex: rf.getPrevLogIdx(idx),
PrevLogTerm: rf.getPrevLogTerm(idx),
Entries: append([]Log{}, rf.log[rf.nextIndex[idx]:]...),
LeaderCommit: rf.commitIndex,
}
rf.mu.Unlock()
reply := &AppendEntriesReply{}
ret := rf.sendAppendEntries(idx, &args, reply)
rf.mu.Lock()
defer rf.mu.Unlock()
if !ret || rf.state != Leader || rf.currentTerm != args.Term {
return
}
if reply.Term > rf.currentTerm {
rf.beFollower(reply.Term)
return
}
if reply.Success {
rf.matchIndex[idx] = args.PrevLogIndex + len(args.Entries)
rf.nextIndex[idx] = rf.matchIndex[idx] + 1
rf.updateCommitIndex()
//rf.mu.Unlock()
return
} else {
// 优化点,nextIndex批量减
tarIndex := reply.ConflictIndex
if reply.ConflictTerm != NULL {
logSize := len(rf.log)
for i := 0; i < logSize; i++ {
if rf.log[i].Term != reply.ConflictTerm {
continue
}
for i < logSize && rf.log[i].Term == reply.ConflictTerm {
i++
}
tarIndex = i
}
}
rf.nextIndex[idx] = tarIndex
//rf.mu.Unlock()
}
}(i)
}
}
func (rf *Raft) getPrevLogIdx(i int) int {
return rf.nextIndex[i] - 1
}
func (rf *Raft) getPrevLogTer | f.getPrevLogIdx(i)
if prevLogIdx < 0 {
return -1
}
return rf.log[prevLogIdx].Term
}
func (rf *Raft) sendAppendEntries(server int, args *AppendEntriesArgs, reply *AppendEntriesReply) bool {
ok := rf.peers[server].Call("Raft.AppendEntries", args, reply)
return ok
}
func (rf *Raft) updateCommitIndex() {
rf.matchIndex[rf.me] = len(rf.log) - 1
copyMatchIndex := make([]int, len(rf.matchIndex))
copy(copyMatchIndex, rf.matchIndex)
sort.Sort(sort.Reverse(sort.IntSlice(copyMatchIndex)))
//sort.Ints(copyMatchIndex)
//取半数节点的日志log
N := copyMatchIndex[len(copyMatchIndex)/2]
if N > rf.commitIndex && rf.log[N].Term == rf.currentTerm {
rf.commitIndex = N
rf.updateLastApplied()
}
}
func (rf *Raft) updateLastApplied() {
for rf.lastApplied < rf.commitIndex {
rf.lastApplied++
curLog := rf.log[rf.lastApplied]
applyMsg := ApplyMsg{
CommandValid: true,
Command: curLog.Command,
CommandIndex: rf.lastApplied,
}
//fmt.Printf("i am %v, commit log %v", rf.me, rf.lastApplied)
//fmt.Println()
rf.applyCh <- applyMsg
}
}
func send(ch chan bool) {
select {
case <-ch:
default:
}
ch <- true
}
//
// the service or tester wants to create a Raft server. the ports
// of all the Raft servers (including this one) are in peers[]. this
// server's port is peers[me]. all the servers' peers[] arrays
// have the same order. persister is a place for this server to
// save its persistent state, and also initially holds the most
// recent saved state, if any. applyCh is a channel on which the
// tester or service expects Raft to send ApplyMsg messages.
// Make() must return quickly, so it should start goroutines
// for any long-running work.
//
func Make(peers []*labrpc.ClientEnd, me int,
persister *Persister, applyCh chan ApplyMsg) *Raft {
rf := &Raft{}
rf.peers = peers
rf.persister = persister
rf.me = me
// Your initialization code here (2A, 2B, 2C).
rf.state = Follower
rf.currentTerm = 0
rf.votedFor = NULL
rf.log = make([]Log, 1)
rf.commitIndex = 0
rf.lastApplied = 0
rf.nextIndex = make([]int, len(peers))
rf.matchIndex = make([]int, len(peers))
rf.applyCh = applyCh
rf.voteCh = make(chan bool, 1)
rf.appendLogCh = make(chan bool, 1)
rf.killCh = make(chan bool, 1)
// initialize from state persisted before a crash
rf.readPersist(persister.ReadRaftState())
heartBeatTime := time.Duration(100) * time.Millisecond
go func() {
for {
select {
case <-rf.killCh:
return
default:
}
electionTime := time.Duration(rand.Intn(200)+300) * time.Millisecond
rf.mu.Lock()
state := rf.state
rf.mu.Unlock()
switch state {
case Follower, Candidate:
select {
case <-rf.voteCh:
case <-rf.appendLogCh:
case <-time.After(electionTime):
rf.mu.Lock()
rf.beCandidate()
rf.mu.Unlock()
}
case Leader:
rf.startAppendLog()
time.Sleep(heartBeatTime)
}
}
}()
return rf
}
| m(i int) int {
prevLogIdx := r | identifier_body |
raft.go | package raft
//
// this is an outline of the API that raft must expose to
// the service (or tester). see comments below for
// each of these functions for more details.
//
// rf = Make(...)
// create a new Raft server.
// rf.Start(command interface{}) (index, term, isleader)
// start agreement on a new log entry
// rf.GetState() (term, isLeader)
// ask a Raft for its current term, and whether it thinks it is leader
// ApplyMsg
// each time a new entry is committed to the log, each Raft peer
// should send an ApplyMsg to the service (or tester)
// in the same server.
//
import (
"bytes"
"labgob"
"math/rand"
"sort"
"sync"
"sync/atomic"
"time"
)
import "labrpc"
// import "bytes"
// import "labgob"
//
// as each Raft peer becomes aware that successive log entries are
// committed, the peer should send an ApplyMsg to the service (or
// tester) on the same server, via the applyCh passed to Make(). set
// CommandValid to true to indicate that the ApplyMsg contains a newly
// committed log entry.
//
// in Lab 3 you'll want to send other kinds of messages (e.g.,
// snapshots) on the applyCh; at that point you can add fields to
// ApplyMsg, but set CommandValid to false for these other uses.
//
type ApplyMsg struct {
CommandValid bool
Command interface{}
CommandIndex int
}
const NULL = -1
// Server State
type State int
const (
Follower State = iota
Candidate
Leader
)
type Log struct {
Term int
Command interface{}
}
//
// A Go object implementing a single Raft peer.
//
type Raft struct {
mu sync.Mutex // Lock to protect shared access to this peer's state
peers []*labrpc.ClientEnd // RPC end points of all peers
persister *Persister // Object to hold this peer's persisted state
me int // this peer's index into peers[]
// Your data here (2A, 2B, 2C).
// Look at the paper's Figure 2 for a description of what
// state a Raft server must maintain.
state State
// All Server
currentTerm int
votedFor int
log []Log
commitIndex int
lastApplied int
nextIndex []int
matchIndex []int
// Channel
applyCh chan ApplyMsg
voteCh chan bool
appendLogCh chan bool
killCh chan bool
}
// return currentTerm and whether this server
// believes it is the leader.
func (rf *Raft) GetState() (int, bool) {
var term int
// Your code here (2A).
rf.mu.Lock()
defer rf.mu.Unlock()
term = rf.currentTerm
return term, rf.state == Leader
}
func (rf *Raft) GetId() int {
return rf.me
}
//
// save Raft's persistent state to stable storage,
// where it can later be retrieved after a crash and restart.
// see paper's Figure 2 for a description of what should be persistent.
//
func (rf *Raft) persist() {
// Your code here (2C).
// Example:
// w := new(bytes.Buffer)
// e := labgob.NewEncoder(w)
// e.Encode(rf.xxx)
// e.Encode(rf.yyy)
// data := w.Bytes()
// rf.persister.SaveRaftState(data)
w := new(bytes.Buffer)
e := labgob.NewEncoder(w)
e.Encode(rf.currentTerm)
e.Encode(rf.votedFor)
e.Encode(rf.log)
data := w.Bytes()
rf.persister.SaveRaftState(data)
}
//
// restore previously persisted state.
//
func (rf *Raft) readPersist(data []byte) {
if data == nil || len(data) < 1 { // bootstrap without any state?
return
}
// Your code here (2C).
// Example:
// r := bytes.NewBuffer(data)
// d := labgob.NewDecoder(r)
// var xxx
// var yyy
// if d.Decode(&xxx) != nil ||
// d.Decode(&yyy) != nil {
// error...
// } else {
// rf.xxx = xxx
// rf.yyy = yyy
// }
r := bytes.NewBuffer(data)
d := labgob.NewDecoder(r)
var currentTerm int
var votedFor int
var log []Log
if d.Decode(¤tTerm) != nil || d.Decode(&votedFor) != nil || d.Decode(&log) != nil {
} else {
rf.mu.Lock()
rf.currentTerm = currentTerm
rf.votedFor = votedFor
rf.log = log
rf.mu.Unlock()
}
}
//
// example RequestVote RPC arguments structure.
// field names must start with capital letters!
//
type RequestVoteArgs struct {
// Your data here (2A, 2B).
Term int
CandidateId int
LastLogIndex int
LastLogTerm int
}
//
// example RequestVote RPC reply structure.
// field names must start with capital letters!
//
type RequestVoteReply struct {
// Your data here (2A).
Term int
VoteGranted bool
}
type AppendEntriesArgs struct {
Term int
LeaderId int
PrevLogIndex int
PrevLogTerm int
Entries []Log
LeaderCommit int
}
type AppendEntriesReply struct {
Term int
Success bool
// 优化点,带上冲突的log索引,leader的nextIndex可以一次回退到位
ConflictIndex int
ConflictTerm int
}
//
// example RequestVote RPC handler.
//
func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {
// Your code here (2A, 2B).
//All Server rule
rf.mu.Lock()
defer rf.mu.Unlock()
if args.Term > rf.currentTerm {
rf.beFollower(args.Term)
// TODO check
// send(rf.voteCh)
}
reply.Term = rf.currentTerm
reply.VoteGranted = false
if (args.Term < rf.currentTerm) || (rf.votedFor != NULL && rf.votedFor != args.CandidateId) {
// Reply false if term < currentTerm (§5.1) If votedFor is not null and not candidateId,
} else if args.LastLogTerm < rf.getLastLogTerm() || (args.LastLogTerm == rf.getLastLogTerm() &&
args.LastLogIndex < rf.getLastLogIndex()) {
//If the logs have last entries with different terms, then the log with the later term is more up-to-date.
// If the logs end with the same term, then whichever log is longer is more up-to-date.
// Reply false if candidate’s log is at least as up-to-date as receiver’s log
} else {
//grant vote
rf.votedFor = args.CandidateId
reply.VoteGranted = true
rf.state = Follower
rf.persist()
send(rf.voteCh) //because If election timeout elapses without receiving granting vote to candidate, so wake up
}
}
// AppendEntries RPC Handler
func (rf *Raft) AppendEntries(args *AppendEntriesArgs, reply *AppendEntriesReply) {
rf.mu.Lock()
defer rf.mu.Unlock()
defer send(rf.appendLogCh)
if args.Term > rf.currentTerm {
rf.beFollower(args.Term)
}
reply.Success = false
reply.ConflictIndex = 0
reply.ConflictTerm = NULL
reply.Term = rf.currentTerm
prevLogIndexTerm := -1
logSize := len(rf.log)
// 2.check log index
if args.PrevLogIndex >= 0 && args.PrevLogIndex < logSize {
prevLogIndexTerm = rf.log[args.PrevLogIndex].Term
}
// 1.check term
if prevLogIndexTerm != args.PrevLogTerm {
reply.ConflictIndex = logSize
if prevLogIndexTerm == -1 {
} else {
reply.ConflictTerm = prevLogIndexTerm
i := 0
for ; i < logSize; i++ {
if rf.log[i].Term == reply.ConflictTerm {
reply.ConflictIndex = i
break
}
}
}
return
}
if args.Term < rf.currentTerm {
return
}
index := args.PrevLogIndex
for i := 0; i < len(args.Entries); i++ {
index++
if index < logSize {
if rf.log[index].Term == args.Entries[i].Term {
continue
} else { //3. If an existing entry conflicts with a new one (same index but different terms),
rf.log = rf.log[:index] //delete the existing entry and all that follow it (§5.3)
}
}
rf.log = append(rf.log, args.Entries[i:]...) //4. Append any new entries not already in the log
rf.persist()
break
}
if args.LeaderCommit > rf.commitIndex {
rf.commitIndex = Min(args.LeaderCommit, rf.getLastLogIndex())
rf.updateLastApplied()
}
reply.Success = true
}
func Min(x int, y int) int {
if x < y {
return x
}
return y
}
//
// example code to send a RequestVote RPC to a server.
// server is the index of the target server in rf.peers[].
// expects RPC arguments in args.
// fills in *reply with RPC reply, so caller should
// pass &reply.
// the types of the args and reply passed to Call() must be
// the same as the types of the arguments declared in the
// handler function (including whether they are pointers).
//
// The labrpc package simulates a lossy network, in which servers
// may be unreachable, and in which requests and replies may be lost.
// Call() sends a request and waits for a reply. If a reply arrives
// within a timeout interval, Call() returns true; otherwise
// Call() returns false. Thus Call() may not return for a while.
// A false return can be caused by a dead server, a live server that
// can't be reached, a lost request, or a lost reply.
//
// Call() is guaranteed to return (perhaps after a delay) *except* if the
// handler function on the server side does not return. Thus there
// is no need to implement your own timeouts around Call().
//
// look at the comments in ../labrpc/labrpc.go for more details.
//
// if you're having trouble getting RPC to work, check that you've
// capitalized all field names in structs passed over RPC, and
// that the caller passes the address of the reply struct with &, not
// the struct itself.
//
func (rf *Raft) sendRequestVote(server int, args *RequestVoteArgs, reply *RequestVoteReply) bool {
ok := rf.peers[server].Call("Raft.RequestVote", args, reply)
return ok
}
//
// the service using Raft (e.g. a k/v server) wants to start
// agreement on the next command to be appended to Raft's log. if this
// server isn't the leader, returns false. otherwise start the
// agreement and return immediately. there is no guarantee that this
// command will ever be committed to the Raft log, since the leader
// may fail or lose an election. even if the Raft instance has been killed,
// this function should return gracefully.
//
// the first return value is the index that the command will appear at
// if it's ever committed. the second return value is the current
// term. the third return value is true if this server believes it is
// the leader.
//
func (rf *Raft) Start(command interface{}) (int, int, bool) {
rf.mu.Lock()
defer rf.mu.Unlock()
index := -1
term := rf.currentTerm
isLeader := rf.state == Leader
// Your code here (2B).
if isLeader {
DPrintf("i am leader %v, and i send command %v", rf.me, command)
index = rf.getLastLogIndex() + 1
newLog := Log{
Term: rf.currentTerm,
Command: command,
}
rf.log = append(rf.log, newLog)
rf.persist()
//fmt.Println("i am leader,", rf.me)
}
return index, term, isLeader
}
//
// the tester calls Kill() when a Raft instance won't
// be needed again. you are not required to do anything
// in Kill(), but it might be convenient to (for example)
// turn off debug output from this instance.
//
func (rf *Raft) Kill() {
// Your code here, if desired.
send(rf.killCh)
}
func (rf *Raft) beCandidate() {
rf.state = Candidate
rf.currentTerm++
rf.votedFor = rf.me
rf.persist()
go rf.startElection()
}
func (rf *Raft) startElection() {
rf.mu.Lock()
args := RequestVoteArgs{
Term: rf.currentTerm,
CandidateId: rf.me,
LastLogIndex: rf.getLastLogIndex(),
LastLogTerm: rf.getLastLogTerm(),
}
rf.mu.Unlock()
var votes int32 = 1 // vote myself
for i := 0; i < len(rf.peers); i++ {
if i == rf.me {
continue
}
go func(idx int) {
reply := &RequestVoteReply{}
ret := rf.sendRequestVote(idx, &args, reply)
if ret {
rf.mu.Lock()
defer rf.mu.Unlock()
if reply.Term > rf.currentTerm {
rf.beFollower(reply.Term)
// TODO send(rf.voteCh)
return
}
if rf.state != Candidate || rf.currentTerm != args.Term {
return
}
if reply.VoteGranted {
atomic.AddInt32(&votes, 1)
}
if atomic.LoadInt32(&votes) > int32(len(rf.peers)/2) {
rf.beLeader()
send(rf.voteCh)
}
}
}(i)
}
}
func (rf *Raft) getLastLogIndex() int {
return len(rf.log) - 1
}
func (rf *Raft) getLastLogTerm() int {
idx := rf.getLastLogIndex()
if idx < 0 {
return -1
}
return rf.log[idx].Term
}
func (rf *Raft) beFollower(term int) {
rf.state = Follower
rf.currentTerm = term
rf.votedFor = NULL
rf.persist()
}
func (rf *Raft) beLeader() {
if rf.state != Candidate {
return
}
rf.state = Leader
rf.nextIndex = make([]int, len(rf.peers))
rf.matchIndex = make([]int, len(rf.peers))
for i := 0; i < len(rf.nextIndex); i++ {
rf.nextIndex[i] = rf.getLastLogIndex() + 1
| 0; i < len(rf.peers); i++ {
if i == rf.me {
continue
}
go func(idx int) {
rf.mu.Lock()
if rf.state != Leader {
rf.mu.Unlock()
return
}
args := AppendEntriesArgs{
Term: rf.currentTerm,
LeaderId: rf.me,
PrevLogIndex: rf.getPrevLogIdx(idx),
PrevLogTerm: rf.getPrevLogTerm(idx),
Entries: append([]Log{}, rf.log[rf.nextIndex[idx]:]...),
LeaderCommit: rf.commitIndex,
}
rf.mu.Unlock()
reply := &AppendEntriesReply{}
ret := rf.sendAppendEntries(idx, &args, reply)
rf.mu.Lock()
defer rf.mu.Unlock()
if !ret || rf.state != Leader || rf.currentTerm != args.Term {
return
}
if reply.Term > rf.currentTerm {
rf.beFollower(reply.Term)
return
}
if reply.Success {
rf.matchIndex[idx] = args.PrevLogIndex + len(args.Entries)
rf.nextIndex[idx] = rf.matchIndex[idx] + 1
rf.updateCommitIndex()
//rf.mu.Unlock()
return
} else {
// 优化点,nextIndex批量减
tarIndex := reply.ConflictIndex
if reply.ConflictTerm != NULL {
logSize := len(rf.log)
for i := 0; i < logSize; i++ {
if rf.log[i].Term != reply.ConflictTerm {
continue
}
for i < logSize && rf.log[i].Term == reply.ConflictTerm {
i++
}
tarIndex = i
}
}
rf.nextIndex[idx] = tarIndex
//rf.mu.Unlock()
}
}(i)
}
}
func (rf *Raft) getPrevLogIdx(i int) int {
return rf.nextIndex[i] - 1
}
func (rf *Raft) getPrevLogTerm(i int) int {
prevLogIdx := rf.getPrevLogIdx(i)
if prevLogIdx < 0 {
return -1
}
return rf.log[prevLogIdx].Term
}
func (rf *Raft) sendAppendEntries(server int, args *AppendEntriesArgs, reply *AppendEntriesReply) bool {
ok := rf.peers[server].Call("Raft.AppendEntries", args, reply)
return ok
}
func (rf *Raft) updateCommitIndex() {
rf.matchIndex[rf.me] = len(rf.log) - 1
copyMatchIndex := make([]int, len(rf.matchIndex))
copy(copyMatchIndex, rf.matchIndex)
sort.Sort(sort.Reverse(sort.IntSlice(copyMatchIndex)))
//sort.Ints(copyMatchIndex)
//取半数节点的日志log
N := copyMatchIndex[len(copyMatchIndex)/2]
if N > rf.commitIndex && rf.log[N].Term == rf.currentTerm {
rf.commitIndex = N
rf.updateLastApplied()
}
}
func (rf *Raft) updateLastApplied() {
for rf.lastApplied < rf.commitIndex {
rf.lastApplied++
curLog := rf.log[rf.lastApplied]
applyMsg := ApplyMsg{
CommandValid: true,
Command: curLog.Command,
CommandIndex: rf.lastApplied,
}
//fmt.Printf("i am %v, commit log %v", rf.me, rf.lastApplied)
//fmt.Println()
rf.applyCh <- applyMsg
}
}
func send(ch chan bool) {
select {
case <-ch:
default:
}
ch <- true
}
//
// the service or tester wants to create a Raft server. the ports
// of all the Raft servers (including this one) are in peers[]. this
// server's port is peers[me]. all the servers' peers[] arrays
// have the same order. persister is a place for this server to
// save its persistent state, and also initially holds the most
// recent saved state, if any. applyCh is a channel on which the
// tester or service expects Raft to send ApplyMsg messages.
// Make() must return quickly, so it should start goroutines
// for any long-running work.
//
func Make(peers []*labrpc.ClientEnd, me int,
persister *Persister, applyCh chan ApplyMsg) *Raft {
rf := &Raft{}
rf.peers = peers
rf.persister = persister
rf.me = me
// Your initialization code here (2A, 2B, 2C).
rf.state = Follower
rf.currentTerm = 0
rf.votedFor = NULL
rf.log = make([]Log, 1)
rf.commitIndex = 0
rf.lastApplied = 0
rf.nextIndex = make([]int, len(peers))
rf.matchIndex = make([]int, len(peers))
rf.applyCh = applyCh
rf.voteCh = make(chan bool, 1)
rf.appendLogCh = make(chan bool, 1)
rf.killCh = make(chan bool, 1)
// initialize from state persisted before a crash
rf.readPersist(persister.ReadRaftState())
heartBeatTime := time.Duration(100) * time.Millisecond
go func() {
for {
select {
case <-rf.killCh:
return
default:
}
electionTime := time.Duration(rand.Intn(200)+300) * time.Millisecond
rf.mu.Lock()
state := rf.state
rf.mu.Unlock()
switch state {
case Follower, Candidate:
select {
case <-rf.voteCh:
case <-rf.appendLogCh:
case <-time.After(electionTime):
rf.mu.Lock()
rf.beCandidate()
rf.mu.Unlock()
}
case Leader:
rf.startAppendLog()
time.Sleep(heartBeatTime)
}
}
}()
return rf
}
| }
}
func (rf *Raft) startAppendLog() {
for i := | conditional_block |
proto2.rs | /*!
proto2.rs
A newer, simpler, more easily-extensible `grel` protocol. As of 2020-12-29,
this supersedes the `grel::protocol` lib.
2020-01-23
*/
use serde::{Serialize, Deserialize};
/** The `Op` enum represents one of the `Room` operator subcommands. It is
used in the `Msg::Op(...)` variant of the `Msg` enum.
*/
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Op {
/** Open the current Room, allowing in the general public. */
Open,
/** Close the current Room to anyone who hasn't been specifically
`Invite`d. */
Close,
/** Ban the user with the supplied user name from the Room (even if
it's `Open`), removing him if he's currently in it. */
Kick(String),
/** Allow the user to enter the current room, even if it's `Close`d.
Also sends an invitation message to the user. */
Invite(String),
/** Transfer operatorship to another user. (The user must be in the
current room to receive the mantle of operatorship.) */
Give(String),
}
/** The `Msg` enum is the structure that gets serialized to JSON and passed
along the TCP connections between the server and the various clients.
The first four variants, `Text`, `Ping`, `Priv` and `Logout` are
bi-directional, being used to send similar information both from client
to server and server to client.
The next six, `Name`, `Join`, `Query`, `Block`, `Unblock`, and `Op` are
for sending commands or requests from the client to the server.
The final three, `Info`, `Err`, and `Misc` are used to send information
from the server back to the client.
*/
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Msg {
// Bi-directional messages
/// Typical chunk of text to be exchanged while chatting.
Text {
#[serde(default)]
who: String,
lines: Vec<String>,
},
/** Request for or acknowledgement of proof of connection.
If the server hasn't received any data from the client in a while, it will
send one of these. The client can then respond with one to indicate it's
still connected.
*/
Ping,
/** A private message delivered only to the recipient.
When sent from the client to the server, `who` should be an identifier
for the _recipient_; when sent server to client, `who` is the name of
the source.
*/
Priv {
who: String,
text: String,
},
/** A message from the client indicating it would like to disconnect
cleanly from the server; in response, the server will send back one of
these with a message and close the connection.
*/
Logout(String),
// Client-to-server messages.
/// Name-change request from client to server.
Name(String),
/// Client request to create/join a room.
Join(String),
/** A request from the client to the server for some type of information,
like a list of users matching a pattern.
*/
Query {
what: String,
arg: String,
},
/** A request from the client to block messages (including private
messages) from the user with the matching name. */
Block(String),
/** A request to unblock the given user. */
Unblock(String),
/** One of the Room operator subcommands. See the `proto2::Op` enum. */
Op(Op),
// Server-to-client messages.
/** A non-error, miscellaneously-informative message sent form the server
to the client.
*/
Info(String),
/** A message from the server to the client indicating the client has
done something wrong, like sent an invalid message.
*/
Err(String),
/**
The `Misc` variant represents information that the client may want to
display in a structured manner (and not just as an unadorned line of
text). For any given "type" of `Misc` message, the client is free to
either implement its own form of displaying the information, or to
just use the contents of the provided `.alt` field.
Current Misc variants (with example field values):
``` ignore
// in response to a Query { what: "roster". ... }
Misc {
what: "roster".to_string(),
data: vec!["user1".to_string(), "user2".to_string()], # ...
alt: "[ comma-delimited list of Users in room ]".to_string(),
};
// when a user joins a channel
Misc {
what: "join".to_string(),
data: vec!["grel user".to_string(),
"room name".to_string()],
alt: "grel user joins [room name]".to_string(),
};
// when a user logs out or leaves a channel
Misc {
what: "leave".to_string(),
data: vec!["grel user".to_string(),
"moved to another room".to_string()],
alt: "grel user moved to another room".to_string(),
};
// when a user is kicked from the current channel
Misc {
what: "kick_other".to_string(),
data: vec!["Bad User".to_string(),
"This Room".to_string()],
alt: "Bad User has been kicked from This Room.".to_string(),
};
// When YOU are kicked from the current channel.
Misc {
what: "kick_you".to_string(),
data: vec!["This Room".to_string()],
alt: "You have been kicked from This Room.".to_string(),
};
// when a user changes his or her name
Misc {
what: "name".to_string(),
data: vec!["old name".to_string(),
"new name".to_string()],
alt: "\"old name\" is now known as \"new name\".".to_string(),
};
// when the Room operator changes
Misc {
what: "new_op".to_string(),
data: ["New Operator".to_string(),
"This Room".to_string()],
alt: "New Operator is now the operator of This Room".to_string(),
};
// in response to a Query { what: "addr", ... }
Misc {
what: "addr".to_string(),
data: vec!["127.0.0.1:33333".to_string()]
alt: "Your public address is 127.0.0.1:33333".to_string(),
};
// in response to a Query { what: "who", ... }
Misc {
what: "who".to_string(),
data: vec!["user1".to_string(), "user2".to_string(), ... ],
alt: "Matching names: \"user1\", \"user2\", ...".to_string(),
};
// echoes a private message back to the sender
Misc {
what: "priv_echo".to_string(),
data: vec!["recipient".to_string(), "text of message".to_string()],
alt: "$ You @ Recipient: text of message".to_string()
};
```
*/
Misc {
what: String,
data: Vec<String>,
alt: String,
},
}
/** Some of these are convenience functions for instantiating certain
variants.
*/
impl Msg {
pub fn logout(msg: &str) -> Msg { Msg::Logout(String::from(msg)) }
pub fn info(msg: &str) -> Msg { Msg::Info(String::from(msg)) }
pub fn err(msg: &str) -> Msg { Msg::Err(String::from(msg)) }
/// Return a JSON-encoded version of a `Msg`.
pub fn bytes(&self) -> Vec<u8> {
serde_json::to_vec_pretty(&self).unwrap()
}
/** Return whether a Msg should count against a user's "byte quota"
(for rate limiting). Generally, this is anything that causes "noise".
*/
pub fn counts(&self) -> bool {
match self {
Msg::Text { who: _, lines: _ } => true,
Msg::Priv { who: _, text: _ } => true,
Msg::Name(_) => true,
Msg::Join(_) => true,
_ => false,
}
}
}
/** The `Endpoint` enum specifies sources and destinations in an `Env`.
`User`s and `Room`s are stored in respective `HashMap`s with unique `u64`
IDs as keys.
*/
#[derive(Copy, Clone, Debug)]
pub enum Endpoint {
User(u64),
Room(u64),
Server,
All,
}
/** An `Env` (-elope) wraps the bytes of a JSON-encoded `Msg`, along with
unambiguous source and destination information. This metadata is necessary
because the encoded JSON is opaque to the server without decoding it.
*/
#[derive(Clone, Debug)]
pub struct Env {
pub source: Endpoint,
pub dest: Endpoint,
data: Vec<u8>,
}
impl Env {
/** Wrap a `Msg`. */
pub fn new(from: Endpoint, to: Endpoint, msg: &Msg) -> Env {
Env {
source: from,
dest: to,
data: msg.bytes(),
}
}
/** Get a reference to the encoded bytes. */
pub fn bytes(&self) -> &[u8] { &self.data }
/** Consume the `Env`, returning the owned vector of bytes. */
pub fn into_bytes(self) -> Vec<u8> { self.data }
}
#[cfg(test)]
mod test {
use super::*;
fn test_serde(m: &Msg) {
let stringd = serde_json::to_string_pretty(m).unwrap();
println!("{}\n", &stringd);
let newm: Msg = serde_json::from_str(&stringd).unwrap();
assert_eq!(*m, newm);
}
#[test]
fn visual_serde() |
}
| {
println!("Msg::Text variant");
let m = Msg::Text {
who: String::from("gre luser"),
lines: vec!["This is a first line of text.".to_string(),
"Following the first is a second line of text.".to_string()],
};
test_serde(&m);
println!("Msg::Ping variant");
let m = Msg::Ping;
test_serde(&m);
println!("Msg::Priv variant");
let m = Msg::Priv {
who: String::from("naggum"),
text: String::from("XML is bascially the Hitler of protocols."),
};
test_serde(&m);
println!("Msg::Logout variant");
let m = Msg::logout("You have been logged out because you touch yourself at night.");
test_serde(&m);
println!("Msg::Name variant");
let m = Msg::Name(String::from("New Lewser"));
test_serde(&m);
println!("Msg::Join variant");
let m = Msg::Join(String::from("Gay Space Communism"));
test_serde(&m);
println!("Msg::Query variant");
let m = Msg::Query {
what: String::from("who"),
arg: String::from("fink"),
};
test_serde(&m);
println!("Msg::Block variant");
let m = Msg::Block(String::from("Dickweed User"));
test_serde(&m);
println!("Msg::Unblock variant");
let m = Msg::Unblock(String::from("Misunderstood User"));
test_serde(&m);
println!("A couple of Msg::Op variants");
let m = Msg::Op(Op::Close);
test_serde(&m);
let m = Msg::Op(Op::Kick("FpS DoUgG".to_string()));
test_serde(&m);
println!("Msg::Info variant");
let m = Msg::info("Santa isn't real.");
test_serde(&m);
println!("Msg::Err variant");
let m = Msg::err("Unrecognized Query \"meaning of life\".");
test_serde(&m);
println!("Msg::Misc variant");
let m = Msg::Misc {
what: String::from("roster"),
data: vec!["you".to_string(), "me".to_string(),
"a dog named foo".to_string()],
alt: String::from("you, me, and a dog named foo"),
};
test_serde(&m);
} | identifier_body |
proto2.rs | /*!
proto2.rs
A newer, simpler, more easily-extensible `grel` protocol. As of 2020-12-29,
this supersedes the `grel::protocol` lib.
2020-01-23
*/
use serde::{Serialize, Deserialize};
/** The `Op` enum represents one of the `Room` operator subcommands. It is
used in the `Msg::Op(...)` variant of the `Msg` enum.
*/
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Op {
/** Open the current Room, allowing in the general public. */
Open,
/** Close the current Room to anyone who hasn't been specifically
`Invite`d. */
Close,
/** Ban the user with the supplied user name from the Room (even if
it's `Open`), removing him if he's currently in it. */
Kick(String),
/** Allow the user to enter the current room, even if it's `Close`d.
Also sends an invitation message to the user. */
Invite(String),
/** Transfer operatorship to another user. (The user must be in the
current room to receive the mantle of operatorship.) */
Give(String),
}
/** The `Msg` enum is the structure that gets serialized to JSON and passed
along the TCP connections between the server and the various clients.
The first four variants, `Text`, `Ping`, `Priv` and `Logout` are
bi-directional, being used to send similar information both from client
to server and server to client.
The next six, `Name`, `Join`, `Query`, `Block`, `Unblock`, and `Op` are
for sending commands or requests from the client to the server.
The final three, `Info`, `Err`, and `Misc` are used to send information
from the server back to the client.
*/
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Msg {
// Bi-directional messages
/// Typical chunk of text to be exchanged while chatting.
Text {
#[serde(default)]
who: String,
lines: Vec<String>,
},
/** Request for or acknowledgement of proof of connection.
If the server hasn't received any data from the client in a while, it will
send one of these. The client can then respond with one to indicate it's
still connected.
*/
Ping,
/** A private message delivered only to the recipient.
When sent from the client to the server, `who` should be an identifier
for the _recipient_; when sent server to client, `who` is the name of
the source.
*/
Priv {
who: String,
text: String,
},
/** A message from the client indicating it would like to disconnect
cleanly from the server; in response, the server will send back one of
these with a message and close the connection.
*/
Logout(String),
// Client-to-server messages.
/// Name-change request from client to server.
Name(String),
/// Client request to create/join a room.
Join(String),
/** A request from the client to the server for some type of information,
like a list of users matching a pattern.
*/
Query {
what: String,
arg: String,
},
/** A request from the client to block messages (including private
messages) from the user with the matching name. */
Block(String),
/** A request to unblock the given user. */
Unblock(String),
/** One of the Room operator subcommands. See the `proto2::Op` enum. */
Op(Op),
// Server-to-client messages.
/** A non-error, miscellaneously-informative message sent form the server
to the client.
*/
Info(String),
/** A message from the server to the client indicating the client has
done something wrong, like sent an invalid message.
*/
Err(String),
/**
The `Misc` variant represents information that the client may want to
display in a structured manner (and not just as an unadorned line of
text). For any given "type" of `Misc` message, the client is free to
either implement its own form of displaying the information, or to
just use the contents of the provided `.alt` field.
Current Misc variants (with example field values):
``` ignore
// in response to a Query { what: "roster". ... }
Misc {
what: "roster".to_string(),
data: vec!["user1".to_string(), "user2".to_string()], # ...
alt: "[ comma-delimited list of Users in room ]".to_string(),
};
// when a user joins a channel
Misc {
what: "join".to_string(),
data: vec!["grel user".to_string(),
"room name".to_string()],
alt: "grel user joins [room name]".to_string(),
};
// when a user logs out or leaves a channel
Misc {
what: "leave".to_string(),
data: vec!["grel user".to_string(),
"moved to another room".to_string()],
alt: "grel user moved to another room".to_string(),
};
// when a user is kicked from the current channel
Misc {
what: "kick_other".to_string(),
data: vec!["Bad User".to_string(),
"This Room".to_string()],
alt: "Bad User has been kicked from This Room.".to_string(),
};
// When YOU are kicked from the current channel.
Misc {
what: "kick_you".to_string(),
data: vec!["This Room".to_string()],
alt: "You have been kicked from This Room.".to_string(),
};
| Misc {
what: "name".to_string(),
data: vec!["old name".to_string(),
"new name".to_string()],
alt: "\"old name\" is now known as \"new name\".".to_string(),
};
// when the Room operator changes
Misc {
what: "new_op".to_string(),
data: ["New Operator".to_string(),
"This Room".to_string()],
alt: "New Operator is now the operator of This Room".to_string(),
};
// in response to a Query { what: "addr", ... }
Misc {
what: "addr".to_string(),
data: vec!["127.0.0.1:33333".to_string()]
alt: "Your public address is 127.0.0.1:33333".to_string(),
};
// in response to a Query { what: "who", ... }
Misc {
what: "who".to_string(),
data: vec!["user1".to_string(), "user2".to_string(), ... ],
alt: "Matching names: \"user1\", \"user2\", ...".to_string(),
};
// echoes a private message back to the sender
Misc {
what: "priv_echo".to_string(),
data: vec!["recipient".to_string(), "text of message".to_string()],
alt: "$ You @ Recipient: text of message".to_string()
};
```
*/
Misc {
what: String,
data: Vec<String>,
alt: String,
},
}
/** Some of these are convenience functions for instantiating certain
variants.
*/
impl Msg {
pub fn logout(msg: &str) -> Msg { Msg::Logout(String::from(msg)) }
pub fn info(msg: &str) -> Msg { Msg::Info(String::from(msg)) }
pub fn err(msg: &str) -> Msg { Msg::Err(String::from(msg)) }
/// Return a JSON-encoded version of a `Msg`.
pub fn bytes(&self) -> Vec<u8> {
serde_json::to_vec_pretty(&self).unwrap()
}
/** Return whether a Msg should count against a user's "byte quota"
(for rate limiting). Generally, this is anything that causes "noise".
*/
pub fn counts(&self) -> bool {
match self {
Msg::Text { who: _, lines: _ } => true,
Msg::Priv { who: _, text: _ } => true,
Msg::Name(_) => true,
Msg::Join(_) => true,
_ => false,
}
}
}
/** The `Endpoint` enum specifies sources and destinations in an `Env`.
`User`s and `Room`s are stored in respective `HashMap`s with unique `u64`
IDs as keys.
*/
#[derive(Copy, Clone, Debug)]
pub enum Endpoint {
User(u64),
Room(u64),
Server,
All,
}
/** An `Env` (-elope) wraps the bytes of a JSON-encoded `Msg`, along with
unambiguous source and destination information. This metadata is necessary
because the encoded JSON is opaque to the server without decoding it.
*/
#[derive(Clone, Debug)]
pub struct Env {
pub source: Endpoint,
pub dest: Endpoint,
data: Vec<u8>,
}
impl Env {
/** Wrap a `Msg`. */
pub fn new(from: Endpoint, to: Endpoint, msg: &Msg) -> Env {
Env {
source: from,
dest: to,
data: msg.bytes(),
}
}
/** Get a reference to the encoded bytes. */
pub fn bytes(&self) -> &[u8] { &self.data }
/** Consume the `Env`, returning the owned vector of bytes. */
pub fn into_bytes(self) -> Vec<u8> { self.data }
}
#[cfg(test)]
mod test {
use super::*;
fn test_serde(m: &Msg) {
let stringd = serde_json::to_string_pretty(m).unwrap();
println!("{}\n", &stringd);
let newm: Msg = serde_json::from_str(&stringd).unwrap();
assert_eq!(*m, newm);
}
#[test]
fn visual_serde() {
println!("Msg::Text variant");
let m = Msg::Text {
who: String::from("gre luser"),
lines: vec!["This is a first line of text.".to_string(),
"Following the first is a second line of text.".to_string()],
};
test_serde(&m);
println!("Msg::Ping variant");
let m = Msg::Ping;
test_serde(&m);
println!("Msg::Priv variant");
let m = Msg::Priv {
who: String::from("naggum"),
text: String::from("XML is bascially the Hitler of protocols."),
};
test_serde(&m);
println!("Msg::Logout variant");
let m = Msg::logout("You have been logged out because you touch yourself at night.");
test_serde(&m);
println!("Msg::Name variant");
let m = Msg::Name(String::from("New Lewser"));
test_serde(&m);
println!("Msg::Join variant");
let m = Msg::Join(String::from("Gay Space Communism"));
test_serde(&m);
println!("Msg::Query variant");
let m = Msg::Query {
what: String::from("who"),
arg: String::from("fink"),
};
test_serde(&m);
println!("Msg::Block variant");
let m = Msg::Block(String::from("Dickweed User"));
test_serde(&m);
println!("Msg::Unblock variant");
let m = Msg::Unblock(String::from("Misunderstood User"));
test_serde(&m);
println!("A couple of Msg::Op variants");
let m = Msg::Op(Op::Close);
test_serde(&m);
let m = Msg::Op(Op::Kick("FpS DoUgG".to_string()));
test_serde(&m);
println!("Msg::Info variant");
let m = Msg::info("Santa isn't real.");
test_serde(&m);
println!("Msg::Err variant");
let m = Msg::err("Unrecognized Query \"meaning of life\".");
test_serde(&m);
println!("Msg::Misc variant");
let m = Msg::Misc {
what: String::from("roster"),
data: vec!["you".to_string(), "me".to_string(),
"a dog named foo".to_string()],
alt: String::from("you, me, and a dog named foo"),
};
test_serde(&m);
}
} | // when a user changes his or her name | random_line_split |
proto2.rs | /*!
proto2.rs
A newer, simpler, more easily-extensible `grel` protocol. As of 2020-12-29,
this supersedes the `grel::protocol` lib.
2020-01-23
*/
use serde::{Serialize, Deserialize};
/** The `Op` enum represents one of the `Room` operator subcommands. It is
used in the `Msg::Op(...)` variant of the `Msg` enum.
*/
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Op {
/** Open the current Room, allowing in the general public. */
Open,
/** Close the current Room to anyone who hasn't been specifically
`Invite`d. */
Close,
/** Ban the user with the supplied user name from the Room (even if
it's `Open`), removing him if he's currently in it. */
Kick(String),
/** Allow the user to enter the current room, even if it's `Close`d.
Also sends an invitation message to the user. */
Invite(String),
/** Transfer operatorship to another user. (The user must be in the
current room to receive the mantle of operatorship.) */
Give(String),
}
/** The `Msg` enum is the structure that gets serialized to JSON and passed
along the TCP connections between the server and the various clients.
The first four variants, `Text`, `Ping`, `Priv` and `Logout` are
bi-directional, being used to send similar information both from client
to server and server to client.
The next six, `Name`, `Join`, `Query`, `Block`, `Unblock`, and `Op` are
for sending commands or requests from the client to the server.
The final three, `Info`, `Err`, and `Misc` are used to send information
from the server back to the client.
*/
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Msg {
// Bi-directional messages
/// Typical chunk of text to be exchanged while chatting.
Text {
#[serde(default)]
who: String,
lines: Vec<String>,
},
/** Request for or acknowledgement of proof of connection.
If the server hasn't received any data from the client in a while, it will
send one of these. The client can then respond with one to indicate it's
still connected.
*/
Ping,
/** A private message delivered only to the recipient.
When sent from the client to the server, `who` should be an identifier
for the _recipient_; when sent server to client, `who` is the name of
the source.
*/
Priv {
who: String,
text: String,
},
/** A message from the client indicating it would like to disconnect
cleanly from the server; in response, the server will send back one of
these with a message and close the connection.
*/
Logout(String),
// Client-to-server messages.
/// Name-change request from client to server.
Name(String),
/// Client request to create/join a room.
Join(String),
/** A request from the client to the server for some type of information,
like a list of users matching a pattern.
*/
Query {
what: String,
arg: String,
},
/** A request from the client to block messages (including private
messages) from the user with the matching name. */
Block(String),
/** A request to unblock the given user. */
Unblock(String),
/** One of the Room operator subcommands. See the `proto2::Op` enum. */
Op(Op),
// Server-to-client messages.
/** A non-error, miscellaneously-informative message sent form the server
to the client.
*/
Info(String),
/** A message from the server to the client indicating the client has
done something wrong, like sent an invalid message.
*/
Err(String),
/**
The `Misc` variant represents information that the client may want to
display in a structured manner (and not just as an unadorned line of
text). For any given "type" of `Misc` message, the client is free to
either implement its own form of displaying the information, or to
just use the contents of the provided `.alt` field.
Current Misc variants (with example field values):
``` ignore
// in response to a Query { what: "roster". ... }
Misc {
what: "roster".to_string(),
data: vec!["user1".to_string(), "user2".to_string()], # ...
alt: "[ comma-delimited list of Users in room ]".to_string(),
};
// when a user joins a channel
Misc {
what: "join".to_string(),
data: vec!["grel user".to_string(),
"room name".to_string()],
alt: "grel user joins [room name]".to_string(),
};
// when a user logs out or leaves a channel
Misc {
what: "leave".to_string(),
data: vec!["grel user".to_string(),
"moved to another room".to_string()],
alt: "grel user moved to another room".to_string(),
};
// when a user is kicked from the current channel
Misc {
what: "kick_other".to_string(),
data: vec!["Bad User".to_string(),
"This Room".to_string()],
alt: "Bad User has been kicked from This Room.".to_string(),
};
// When YOU are kicked from the current channel.
Misc {
what: "kick_you".to_string(),
data: vec!["This Room".to_string()],
alt: "You have been kicked from This Room.".to_string(),
};
// when a user changes his or her name
Misc {
what: "name".to_string(),
data: vec!["old name".to_string(),
"new name".to_string()],
alt: "\"old name\" is now known as \"new name\".".to_string(),
};
// when the Room operator changes
Misc {
what: "new_op".to_string(),
data: ["New Operator".to_string(),
"This Room".to_string()],
alt: "New Operator is now the operator of This Room".to_string(),
};
// in response to a Query { what: "addr", ... }
Misc {
what: "addr".to_string(),
data: vec!["127.0.0.1:33333".to_string()]
alt: "Your public address is 127.0.0.1:33333".to_string(),
};
// in response to a Query { what: "who", ... }
Misc {
what: "who".to_string(),
data: vec!["user1".to_string(), "user2".to_string(), ... ],
alt: "Matching names: \"user1\", \"user2\", ...".to_string(),
};
// echoes a private message back to the sender
Misc {
what: "priv_echo".to_string(),
data: vec!["recipient".to_string(), "text of message".to_string()],
alt: "$ You @ Recipient: text of message".to_string()
};
```
*/
Misc {
what: String,
data: Vec<String>,
alt: String,
},
}
/** Some of these are convenience functions for instantiating certain
variants.
*/
impl Msg {
pub fn logout(msg: &str) -> Msg { Msg::Logout(String::from(msg)) }
pub fn info(msg: &str) -> Msg { Msg::Info(String::from(msg)) }
pub fn err(msg: &str) -> Msg { Msg::Err(String::from(msg)) }
/// Return a JSON-encoded version of a `Msg`.
pub fn bytes(&self) -> Vec<u8> {
serde_json::to_vec_pretty(&self).unwrap()
}
/** Return whether a Msg should count against a user's "byte quota"
(for rate limiting). Generally, this is anything that causes "noise".
*/
pub fn counts(&self) -> bool {
match self {
Msg::Text { who: _, lines: _ } => true,
Msg::Priv { who: _, text: _ } => true,
Msg::Name(_) => true,
Msg::Join(_) => true,
_ => false,
}
}
}
/** The `Endpoint` enum specifies sources and destinations in an `Env`.
`User`s and `Room`s are stored in respective `HashMap`s with unique `u64`
IDs as keys.
*/
#[derive(Copy, Clone, Debug)]
pub enum Endpoint {
User(u64),
Room(u64),
Server,
All,
}
/** An `Env` (-elope) wraps the bytes of a JSON-encoded `Msg`, along with
unambiguous source and destination information. This metadata is necessary
because the encoded JSON is opaque to the server without decoding it.
*/
#[derive(Clone, Debug)]
pub struct Env {
pub source: Endpoint,
pub dest: Endpoint,
data: Vec<u8>,
}
impl Env {
/** Wrap a `Msg`. */
pub fn new(from: Endpoint, to: Endpoint, msg: &Msg) -> Env {
Env {
source: from,
dest: to,
data: msg.bytes(),
}
}
/** Get a reference to the encoded bytes. */
pub fn bytes(&self) -> &[u8] { &self.data }
/** Consume the `Env`, returning the owned vector of bytes. */
pub fn into_bytes(self) -> Vec<u8> { self.data }
}
#[cfg(test)]
mod test {
use super::*;
fn | (m: &Msg) {
let stringd = serde_json::to_string_pretty(m).unwrap();
println!("{}\n", &stringd);
let newm: Msg = serde_json::from_str(&stringd).unwrap();
assert_eq!(*m, newm);
}
#[test]
fn visual_serde() {
println!("Msg::Text variant");
let m = Msg::Text {
who: String::from("gre luser"),
lines: vec!["This is a first line of text.".to_string(),
"Following the first is a second line of text.".to_string()],
};
test_serde(&m);
println!("Msg::Ping variant");
let m = Msg::Ping;
test_serde(&m);
println!("Msg::Priv variant");
let m = Msg::Priv {
who: String::from("naggum"),
text: String::from("XML is bascially the Hitler of protocols."),
};
test_serde(&m);
println!("Msg::Logout variant");
let m = Msg::logout("You have been logged out because you touch yourself at night.");
test_serde(&m);
println!("Msg::Name variant");
let m = Msg::Name(String::from("New Lewser"));
test_serde(&m);
println!("Msg::Join variant");
let m = Msg::Join(String::from("Gay Space Communism"));
test_serde(&m);
println!("Msg::Query variant");
let m = Msg::Query {
what: String::from("who"),
arg: String::from("fink"),
};
test_serde(&m);
println!("Msg::Block variant");
let m = Msg::Block(String::from("Dickweed User"));
test_serde(&m);
println!("Msg::Unblock variant");
let m = Msg::Unblock(String::from("Misunderstood User"));
test_serde(&m);
println!("A couple of Msg::Op variants");
let m = Msg::Op(Op::Close);
test_serde(&m);
let m = Msg::Op(Op::Kick("FpS DoUgG".to_string()));
test_serde(&m);
println!("Msg::Info variant");
let m = Msg::info("Santa isn't real.");
test_serde(&m);
println!("Msg::Err variant");
let m = Msg::err("Unrecognized Query \"meaning of life\".");
test_serde(&m);
println!("Msg::Misc variant");
let m = Msg::Misc {
what: String::from("roster"),
data: vec!["you".to_string(), "me".to_string(),
"a dog named foo".to_string()],
alt: String::from("you, me, and a dog named foo"),
};
test_serde(&m);
}
}
| test_serde | identifier_name |
train_lstm.py | import sys, os
import time
import shutil
import argparse
import functools
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import torch.optim as optim
from nlgeval import NLGEval
sys.path.append(os.pardir)
from utils.utils import sec2str, count_parameters, weight_init, get_pretrained_from_txt
from langmodels.lstm import LSTMCaptioning
from langmodels.vocab import build_vocab, return_idx, return_sentences
from dataset.activitynet_train import ActivityNetCaptions_Train
from dataset.activitynet_valtest import ActivityNetCaptions_Val
import transforms.spatial_transforms as spt
import transforms.temporal_transforms as tpt
from options import parse_args
from utils.makemodel import generate_3dcnn, generate_rnn
def train_lstm(args):
# gpus
device = torch.device('cuda' if args.cuda and torch.cuda.is_available() else 'cpu')
# load vocabulary
annfiles = [os.path.join(args.root_path, pth) for pth in args.annpaths]
text_proc = build_vocab(annfiles, args.min_freq, args.max_seqlen)
vocab_size = len(text_proc.vocab)
# transforms
sp = spt.Compose([spt.CornerCrop(size=args.imsize), spt.ToTensor()])
tp = tpt.Compose([tpt.TemporalRandomCrop(args.clip_len), tpt.LoopPadding(args.clip_len)])
# dataloading
train_dset = ActivityNetCaptions_Train(args.root_path, ann_path='train_fps.json', sample_duration=args.clip_len, spatial_transform=sp, temporal_transform=tp)
trainloader = DataLoader(train_dset, batch_size=args.batch_size, shuffle=True, num_workers=args.n_cpu, drop_last=True, timeout=100)
max_train_it = int(len(train_dset) / args.batch_size)
val_dset = ActivityNetCaptions_Val(args.root_path, ann_path=['val_1_fps.json', 'val_2_fps.json'], sample_duration=args.clip_len, spatial_transform=sp, temporal_transform=tp)
valloader = DataLoader(val_dset, batch_size=args.batch_size, shuffle=True, num_workers=args.n_cpu, drop_last=True, timeout=100)
#max_val_it = int(len(val_dset) / args.batch_size)
max_val_it = 10
# models
video_encoder = generate_3dcnn(args)
caption_gen = generate_rnn(vocab_size, args)
models = [video_encoder, caption_gen]
# initialize pretrained embeddings
if args.emb_init is not None:
begin = time.time()
print("initializing embeddings from {}...".format(args.emb_init))
lookup = get_pretrained_from_txt(args.emb_init)
first = next(iter(lookup.values()))
try:
assert len(first) == args.embedding_size
except AssertionError:
print("embedding size not compatible with pretrained embeddings.")
print("specified size {}, pretrained model includes size {}".format(args.embedding_size, len(first)))
sys.exit(1)
matrix = torch.randn_like(caption_gen.emb.weight)
for char, vec in lookup.items():
if char in text_proc.vocab.stoi.keys():
id = text_proc.vocab.stoi[char]
matrix[id, :] = torch.tensor(vec)
caption_gen.init_embedding(matrix)
print("{} | successfully initialized".format(sec2str(time.time() - begin), args.emb_init))
# move models to device
n_gpu = torch.cuda.device_count()
if n_gpu > 1 and args.dataparallel:
video_encoder = nn.DataParallel(video_encoder)
caption_gen = nn.DataParallel(caption_gen)
else:
n_gpu = 1
print("using {} gpus...".format(n_gpu))
# loss function
criterion = nn.CrossEntropyLoss(ignore_index=text_proc.vocab.stoi['<pad>'])
# optimizer, scheduler
params = list(video_encoder.parameters()) + list(caption_gen.parameters())
optimizer = optim.SGD([
{"params" : video_encoder.parameters(), "lr" : args.lr_cnn, "momentum" : args.momentum_cnn},
{"params" : caption_gen.parameters(), "lr" : args.lr_rnn, "momentum" : args.momentum_rnn}],
weight_decay=args.weight_decay)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.9, patience=args.patience, verbose=True)
# count parameters
num_params = sum(count_parameters(model) for model in models)
print("# of params in model : {}".format(num_params))
# joint training loop
print("start training")
begin = time.time()
for ep in range(args.max_epochs):
# train for epoch
video_encoder, caption_gen, optimizer = train_epoch(trainloader, video_encoder, caption_gen, optimizer, criterion, device, text_proc, max_it=max_train_it, opt=args)
# save models
enc_save_dir = os.path.join(args.model_save_path, "encoder")
enc_filename = "ep{:04d}.pth".format(ep+1)
if not os.path.exists(enc_save_dir):
os.makedirs(enc_save_dir)
enc_save_path = os.path.join(enc_save_dir, enc_filename)
dec_save_dir = os.path.join(args.model_save_path, "decoder")
dec_filename = "ep{:04d}.pth".format(ep+1)
dec_save_path = os.path.join(dec_save_dir, dec_filename)
if not os.path.exists(dec_save_dir):
os.makedirs(dec_save_dir)
if n_gpu > 1 and args.dataparallel:
torch.save(video_encoder.module.state_dict(), enc_save_path)
torch.save(caption_gen.module.state_dict(), dec_save_path)
else:
torch.save(video_encoder.state_dict(), enc_save_path)
torch.save(caption_gen.state_dict(), dec_save_path)
print("saved encoder model to {}".format(enc_save_path))
print("saved decoder model to {}".format(dec_save_path))
# evaluate
print("begin evaluation for epoch {} ...".format(ep+1))
nll, ppl, metrics = validate(valloader, video_encoder, caption_gen, criterion, device, text_proc, max_it=max_val_it, opt=args)
if metrics is not None:
scheduler.step(metrics["METEOR"])
print("training time {}, epoch {:04d}/{:04d} done, validation loss: {:.06f}, perplexity: {:.03f}".format(sec2str(time.time()-begin), ep+1, args.max_epochs, nll, ppl))
print("end training")
def | (trainloader, encoder, decoder, optimizer, criterion, device, text_proc, max_it, opt):
if not opt.freeze:
encoder.train()
decoder.train()
ep_begin = time.time()
before = time.time()
for it, data in enumerate(trainloader):
# TODO: currently supports only one timestamp, enable more in the future
ids = data['id']
durations = data['duration']
sentences = data['sentences']
timestamps = data['timestamps']
fps = data['fps']
clip = data['clip']
captions = return_idx(sentences, text_proc)
optimizer.zero_grad()
# move to device
clip = clip.to(device)
captions = captions.to(device)
target = captions.clone().detach()
# flow through model
if opt.freeze:
with torch.no_grad():
feature = encoder(clip)
else:
feature = encoder(clip)
# feature : (bs x C')
output = decoder(feature, captions)
# caption : (batch_size, vocab_size, seq_len)
# backpropagate loss and store negative log likelihood
nll = criterion(output, target)
nll.backward()
# gradient norm clipping
# torch.nn.utils.clip_grad_norm_(caption_gen.parameters(), max_norm=1.0)
optimizer.step()
# log losses
if it % opt.log_every == (opt.log_every-1):
print("epoch {} | iter {:06d}/{:06d} | nll loss: {:.04f} | {:02.04f}s per loop".format(sec2str(time.time()-ep_begin), it+1, max_it, nll.cpu().item(), (time.time()-before)/opt.log_every), flush=True)
before = time.time()
return encoder, decoder, optimizer
def validate(valloader, encoder, decoder, criterion, device, text_proc, max_it, opt):
encoder.eval()
decoder.eval()
nll_list = []
ppl_list = []
begin = time.time()
before = time.time()
gt_list = []
ans_list = []
evaluator = NLGEval()
with torch.no_grad():
for it, data in enumerate(valloader):
# TODO: currently supports only batch size of 1, enable more in the future
ids = data['id']
durations = data['duration']
sentences = data['sentences']
timestamps = data['timestamps']
fps = data['fps']
clip = data['clip']
captions = return_idx(sentences, text_proc)
# move to device
clip = clip.to(device)
captions = captions.to(device)
target = captions.clone().detach()
# flow through model
# feature : (bs x C')
feature = encoder(clip)
# output : (batch_size, vocab_size, seq_len)
try:
output = decoder.sample(feature, captions)
# workaround for dataparallel
except AttributeError:
output = decoder.module.sample(feature, captions)
# sample : (seq_len)
sample = output.argmax(1)
# backpropagate loss and store negative log likelihood
nll = criterion(output, target).cpu().item()
nll_list.append(nll)
ppl = 2 ** nll
ppl_list.append(ppl)
gt_list.extend(sentences)
ans_list.extend(return_sentences(sample, text_proc))
if it % opt.log_every == (opt.log_every-1):
print("validation {} | iter {:06d}/{:06d} | perplexity: {:.04f} | {:02.04f}s per loop".format(sec2str(time.time()-begin), it+1, max_it, sum(ppl_list)/len(ppl_list), (time.time()-before)/opt.log_every), flush=True)
before = time.time()
samplesentence = return_sentences(sample, text_proc)
# evaluate for only 100 iterations
if it == max_it-1:
print("sample sentences:")
for s in ans_list[-10:]:
print(s)
print("---METRICS---", flush=True)
try:
metrics_dict = evaluator.compute_metrics(ref_list=[gt_list], hyp_list=ans_list)
for k, v in metrics_dict.items():
print("{}:\t\t{}".format(k, v))
except:
metrics_dict = None
print("could not evaluate, some sort of error in NLGEval", flush=True)
print("---METRICS---", flush=True)
break
meannll = sum(nll_list) / len(nll_list)
meanppl = sum(ppl_list) / len(ppl_list)
return meannll, meanppl, metrics_dict
if __name__ == '__main__':
args = parse_args()
print(args)
train_lstm(args)
| train_epoch | identifier_name |
train_lstm.py | import sys, os
import time
import shutil
import argparse
import functools
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import torch.optim as optim
from nlgeval import NLGEval
sys.path.append(os.pardir)
from utils.utils import sec2str, count_parameters, weight_init, get_pretrained_from_txt
from langmodels.lstm import LSTMCaptioning
from langmodels.vocab import build_vocab, return_idx, return_sentences
from dataset.activitynet_train import ActivityNetCaptions_Train
from dataset.activitynet_valtest import ActivityNetCaptions_Val
import transforms.spatial_transforms as spt
import transforms.temporal_transforms as tpt
from options import parse_args
from utils.makemodel import generate_3dcnn, generate_rnn
def train_lstm(args):
# gpus
device = torch.device('cuda' if args.cuda and torch.cuda.is_available() else 'cpu')
# load vocabulary
annfiles = [os.path.join(args.root_path, pth) for pth in args.annpaths]
text_proc = build_vocab(annfiles, args.min_freq, args.max_seqlen)
vocab_size = len(text_proc.vocab)
# transforms
sp = spt.Compose([spt.CornerCrop(size=args.imsize), spt.ToTensor()])
tp = tpt.Compose([tpt.TemporalRandomCrop(args.clip_len), tpt.LoopPadding(args.clip_len)])
# dataloading
train_dset = ActivityNetCaptions_Train(args.root_path, ann_path='train_fps.json', sample_duration=args.clip_len, spatial_transform=sp, temporal_transform=tp)
trainloader = DataLoader(train_dset, batch_size=args.batch_size, shuffle=True, num_workers=args.n_cpu, drop_last=True, timeout=100)
max_train_it = int(len(train_dset) / args.batch_size)
val_dset = ActivityNetCaptions_Val(args.root_path, ann_path=['val_1_fps.json', 'val_2_fps.json'], sample_duration=args.clip_len, spatial_transform=sp, temporal_transform=tp)
valloader = DataLoader(val_dset, batch_size=args.batch_size, shuffle=True, num_workers=args.n_cpu, drop_last=True, timeout=100)
#max_val_it = int(len(val_dset) / args.batch_size)
max_val_it = 10
# models
video_encoder = generate_3dcnn(args)
caption_gen = generate_rnn(vocab_size, args)
models = [video_encoder, caption_gen]
# initialize pretrained embeddings
if args.emb_init is not None:
begin = time.time()
print("initializing embeddings from {}...".format(args.emb_init))
lookup = get_pretrained_from_txt(args.emb_init)
first = next(iter(lookup.values()))
try:
assert len(first) == args.embedding_size
except AssertionError:
print("embedding size not compatible with pretrained embeddings.")
print("specified size {}, pretrained model includes size {}".format(args.embedding_size, len(first)))
sys.exit(1)
matrix = torch.randn_like(caption_gen.emb.weight)
for char, vec in lookup.items():
if char in text_proc.vocab.stoi.keys():
id = text_proc.vocab.stoi[char]
matrix[id, :] = torch.tensor(vec)
caption_gen.init_embedding(matrix)
print("{} | successfully initialized".format(sec2str(time.time() - begin), args.emb_init))
# move models to device
n_gpu = torch.cuda.device_count()
if n_gpu > 1 and args.dataparallel:
video_encoder = nn.DataParallel(video_encoder)
caption_gen = nn.DataParallel(caption_gen)
else:
n_gpu = 1
print("using {} gpus...".format(n_gpu))
# loss function
criterion = nn.CrossEntropyLoss(ignore_index=text_proc.vocab.stoi['<pad>'])
# optimizer, scheduler
params = list(video_encoder.parameters()) + list(caption_gen.parameters())
optimizer = optim.SGD([
{"params" : video_encoder.parameters(), "lr" : args.lr_cnn, "momentum" : args.momentum_cnn},
{"params" : caption_gen.parameters(), "lr" : args.lr_rnn, "momentum" : args.momentum_rnn}],
weight_decay=args.weight_decay)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.9, patience=args.patience, verbose=True)
# count parameters
num_params = sum(count_parameters(model) for model in models)
print("# of params in model : {}".format(num_params))
# joint training loop
print("start training")
begin = time.time()
for ep in range(args.max_epochs):
# train for epoch
video_encoder, caption_gen, optimizer = train_epoch(trainloader, video_encoder, caption_gen, optimizer, criterion, device, text_proc, max_it=max_train_it, opt=args)
# save models
enc_save_dir = os.path.join(args.model_save_path, "encoder")
enc_filename = "ep{:04d}.pth".format(ep+1)
if not os.path.exists(enc_save_dir):
os.makedirs(enc_save_dir)
enc_save_path = os.path.join(enc_save_dir, enc_filename)
dec_save_dir = os.path.join(args.model_save_path, "decoder")
dec_filename = "ep{:04d}.pth".format(ep+1)
dec_save_path = os.path.join(dec_save_dir, dec_filename)
if not os.path.exists(dec_save_dir):
os.makedirs(dec_save_dir)
if n_gpu > 1 and args.dataparallel:
torch.save(video_encoder.module.state_dict(), enc_save_path)
torch.save(caption_gen.module.state_dict(), dec_save_path)
else:
torch.save(video_encoder.state_dict(), enc_save_path)
torch.save(caption_gen.state_dict(), dec_save_path)
print("saved encoder model to {}".format(enc_save_path))
print("saved decoder model to {}".format(dec_save_path))
# evaluate
print("begin evaluation for epoch {} ...".format(ep+1))
nll, ppl, metrics = validate(valloader, video_encoder, caption_gen, criterion, device, text_proc, max_it=max_val_it, opt=args)
if metrics is not None:
scheduler.step(metrics["METEOR"])
print("training time {}, epoch {:04d}/{:04d} done, validation loss: {:.06f}, perplexity: {:.03f}".format(sec2str(time.time()-begin), ep+1, args.max_epochs, nll, ppl))
print("end training")
def train_epoch(trainloader, encoder, decoder, optimizer, criterion, device, text_proc, max_it, opt):
|
def validate(valloader, encoder, decoder, criterion, device, text_proc, max_it, opt):
encoder.eval()
decoder.eval()
nll_list = []
ppl_list = []
begin = time.time()
before = time.time()
gt_list = []
ans_list = []
evaluator = NLGEval()
with torch.no_grad():
for it, data in enumerate(valloader):
# TODO: currently supports only batch size of 1, enable more in the future
ids = data['id']
durations = data['duration']
sentences = data['sentences']
timestamps = data['timestamps']
fps = data['fps']
clip = data['clip']
captions = return_idx(sentences, text_proc)
# move to device
clip = clip.to(device)
captions = captions.to(device)
target = captions.clone().detach()
# flow through model
# feature : (bs x C')
feature = encoder(clip)
# output : (batch_size, vocab_size, seq_len)
try:
output = decoder.sample(feature, captions)
# workaround for dataparallel
except AttributeError:
output = decoder.module.sample(feature, captions)
# sample : (seq_len)
sample = output.argmax(1)
# backpropagate loss and store negative log likelihood
nll = criterion(output, target).cpu().item()
nll_list.append(nll)
ppl = 2 ** nll
ppl_list.append(ppl)
gt_list.extend(sentences)
ans_list.extend(return_sentences(sample, text_proc))
if it % opt.log_every == (opt.log_every-1):
print("validation {} | iter {:06d}/{:06d} | perplexity: {:.04f} | {:02.04f}s per loop".format(sec2str(time.time()-begin), it+1, max_it, sum(ppl_list)/len(ppl_list), (time.time()-before)/opt.log_every), flush=True)
before = time.time()
samplesentence = return_sentences(sample, text_proc)
# evaluate for only 100 iterations
if it == max_it-1:
print("sample sentences:")
for s in ans_list[-10:]:
print(s)
print("---METRICS---", flush=True)
try:
metrics_dict = evaluator.compute_metrics(ref_list=[gt_list], hyp_list=ans_list)
for k, v in metrics_dict.items():
print("{}:\t\t{}".format(k, v))
except:
metrics_dict = None
print("could not evaluate, some sort of error in NLGEval", flush=True)
print("---METRICS---", flush=True)
break
meannll = sum(nll_list) / len(nll_list)
meanppl = sum(ppl_list) / len(ppl_list)
return meannll, meanppl, metrics_dict
if __name__ == '__main__':
args = parse_args()
print(args)
train_lstm(args)
| if not opt.freeze:
encoder.train()
decoder.train()
ep_begin = time.time()
before = time.time()
for it, data in enumerate(trainloader):
# TODO: currently supports only one timestamp, enable more in the future
ids = data['id']
durations = data['duration']
sentences = data['sentences']
timestamps = data['timestamps']
fps = data['fps']
clip = data['clip']
captions = return_idx(sentences, text_proc)
optimizer.zero_grad()
# move to device
clip = clip.to(device)
captions = captions.to(device)
target = captions.clone().detach()
# flow through model
if opt.freeze:
with torch.no_grad():
feature = encoder(clip)
else:
feature = encoder(clip)
# feature : (bs x C')
output = decoder(feature, captions)
# caption : (batch_size, vocab_size, seq_len)
# backpropagate loss and store negative log likelihood
nll = criterion(output, target)
nll.backward()
# gradient norm clipping
# torch.nn.utils.clip_grad_norm_(caption_gen.parameters(), max_norm=1.0)
optimizer.step()
# log losses
if it % opt.log_every == (opt.log_every-1):
print("epoch {} | iter {:06d}/{:06d} | nll loss: {:.04f} | {:02.04f}s per loop".format(sec2str(time.time()-ep_begin), it+1, max_it, nll.cpu().item(), (time.time()-before)/opt.log_every), flush=True)
before = time.time()
return encoder, decoder, optimizer | identifier_body |
train_lstm.py | import sys, os
import time
import shutil
import argparse
import functools
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import torch.optim as optim
from nlgeval import NLGEval
sys.path.append(os.pardir)
from utils.utils import sec2str, count_parameters, weight_init, get_pretrained_from_txt
from langmodels.lstm import LSTMCaptioning
from langmodels.vocab import build_vocab, return_idx, return_sentences
from dataset.activitynet_train import ActivityNetCaptions_Train
from dataset.activitynet_valtest import ActivityNetCaptions_Val
import transforms.spatial_transforms as spt
import transforms.temporal_transforms as tpt
from options import parse_args
from utils.makemodel import generate_3dcnn, generate_rnn
def train_lstm(args):
# gpus
device = torch.device('cuda' if args.cuda and torch.cuda.is_available() else 'cpu')
# load vocabulary
annfiles = [os.path.join(args.root_path, pth) for pth in args.annpaths]
text_proc = build_vocab(annfiles, args.min_freq, args.max_seqlen)
vocab_size = len(text_proc.vocab)
# transforms
sp = spt.Compose([spt.CornerCrop(size=args.imsize), spt.ToTensor()])
tp = tpt.Compose([tpt.TemporalRandomCrop(args.clip_len), tpt.LoopPadding(args.clip_len)])
# dataloading
train_dset = ActivityNetCaptions_Train(args.root_path, ann_path='train_fps.json', sample_duration=args.clip_len, spatial_transform=sp, temporal_transform=tp)
trainloader = DataLoader(train_dset, batch_size=args.batch_size, shuffle=True, num_workers=args.n_cpu, drop_last=True, timeout=100)
max_train_it = int(len(train_dset) / args.batch_size)
val_dset = ActivityNetCaptions_Val(args.root_path, ann_path=['val_1_fps.json', 'val_2_fps.json'], sample_duration=args.clip_len, spatial_transform=sp, temporal_transform=tp)
valloader = DataLoader(val_dset, batch_size=args.batch_size, shuffle=True, num_workers=args.n_cpu, drop_last=True, timeout=100)
#max_val_it = int(len(val_dset) / args.batch_size)
max_val_it = 10
# models
video_encoder = generate_3dcnn(args)
caption_gen = generate_rnn(vocab_size, args)
models = [video_encoder, caption_gen]
# initialize pretrained embeddings
if args.emb_init is not None:
begin = time.time()
print("initializing embeddings from {}...".format(args.emb_init))
lookup = get_pretrained_from_txt(args.emb_init)
first = next(iter(lookup.values()))
try:
assert len(first) == args.embedding_size
except AssertionError:
print("embedding size not compatible with pretrained embeddings.")
print("specified size {}, pretrained model includes size {}".format(args.embedding_size, len(first)))
sys.exit(1)
matrix = torch.randn_like(caption_gen.emb.weight)
for char, vec in lookup.items():
if char in text_proc.vocab.stoi.keys():
id = text_proc.vocab.stoi[char]
matrix[id, :] = torch.tensor(vec)
caption_gen.init_embedding(matrix)
print("{} | successfully initialized".format(sec2str(time.time() - begin), args.emb_init))
# move models to device
n_gpu = torch.cuda.device_count()
if n_gpu > 1 and args.dataparallel:
video_encoder = nn.DataParallel(video_encoder)
caption_gen = nn.DataParallel(caption_gen)
else:
n_gpu = 1
print("using {} gpus...".format(n_gpu))
# loss function
criterion = nn.CrossEntropyLoss(ignore_index=text_proc.vocab.stoi['<pad>'])
# optimizer, scheduler
params = list(video_encoder.parameters()) + list(caption_gen.parameters())
optimizer = optim.SGD([
{"params" : video_encoder.parameters(), "lr" : args.lr_cnn, "momentum" : args.momentum_cnn},
{"params" : caption_gen.parameters(), "lr" : args.lr_rnn, "momentum" : args.momentum_rnn}],
weight_decay=args.weight_decay)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.9, patience=args.patience, verbose=True)
# count parameters
num_params = sum(count_parameters(model) for model in models)
print("# of params in model : {}".format(num_params))
# joint training loop
print("start training")
begin = time.time()
for ep in range(args.max_epochs):
# train for epoch
video_encoder, caption_gen, optimizer = train_epoch(trainloader, video_encoder, caption_gen, optimizer, criterion, device, text_proc, max_it=max_train_it, opt=args)
# save models
enc_save_dir = os.path.join(args.model_save_path, "encoder")
enc_filename = "ep{:04d}.pth".format(ep+1)
if not os.path.exists(enc_save_dir):
os.makedirs(enc_save_dir)
enc_save_path = os.path.join(enc_save_dir, enc_filename)
dec_save_dir = os.path.join(args.model_save_path, "decoder")
dec_filename = "ep{:04d}.pth".format(ep+1)
dec_save_path = os.path.join(dec_save_dir, dec_filename)
if not os.path.exists(dec_save_dir):
os.makedirs(dec_save_dir)
if n_gpu > 1 and args.dataparallel:
torch.save(video_encoder.module.state_dict(), enc_save_path)
torch.save(caption_gen.module.state_dict(), dec_save_path)
else:
torch.save(video_encoder.state_dict(), enc_save_path)
torch.save(caption_gen.state_dict(), dec_save_path)
print("saved encoder model to {}".format(enc_save_path))
print("saved decoder model to {}".format(dec_save_path))
# evaluate
print("begin evaluation for epoch {} ...".format(ep+1))
nll, ppl, metrics = validate(valloader, video_encoder, caption_gen, criterion, device, text_proc, max_it=max_val_it, opt=args)
if metrics is not None:
scheduler.step(metrics["METEOR"])
print("training time {}, epoch {:04d}/{:04d} done, validation loss: {:.06f}, perplexity: {:.03f}".format(sec2str(time.time()-begin), ep+1, args.max_epochs, nll, ppl))
print("end training")
def train_epoch(trainloader, encoder, decoder, optimizer, criterion, device, text_proc, max_it, opt):
if not opt.freeze:
encoder.train()
decoder.train()
ep_begin = time.time()
before = time.time()
for it, data in enumerate(trainloader):
# TODO: currently supports only one timestamp, enable more in the future
ids = data['id']
durations = data['duration']
sentences = data['sentences']
timestamps = data['timestamps']
fps = data['fps']
clip = data['clip']
captions = return_idx(sentences, text_proc)
optimizer.zero_grad()
# move to device
clip = clip.to(device)
captions = captions.to(device)
target = captions.clone().detach()
# flow through model
if opt.freeze:
with torch.no_grad():
feature = encoder(clip)
else:
feature = encoder(clip)
# feature : (bs x C')
output = decoder(feature, captions)
# caption : (batch_size, vocab_size, seq_len)
# backpropagate loss and store negative log likelihood
nll = criterion(output, target)
nll.backward()
# gradient norm clipping
# torch.nn.utils.clip_grad_norm_(caption_gen.parameters(), max_norm=1.0)
optimizer.step()
# log losses
if it % opt.log_every == (opt.log_every-1):
print("epoch {} | iter {:06d}/{:06d} | nll loss: {:.04f} | {:02.04f}s per loop".format(sec2str(time.time()-ep_begin), it+1, max_it, nll.cpu().item(), (time.time()-before)/opt.log_every), flush=True)
before = time.time()
return encoder, decoder, optimizer
def validate(valloader, encoder, decoder, criterion, device, text_proc, max_it, opt):
encoder.eval()
decoder.eval()
nll_list = []
ppl_list = []
begin = time.time()
before = time.time()
gt_list = []
ans_list = []
evaluator = NLGEval()
with torch.no_grad():
for it, data in enumerate(valloader):
# TODO: currently supports only batch size of 1, enable more in the future
ids = data['id']
durations = data['duration']
sentences = data['sentences']
timestamps = data['timestamps']
fps = data['fps']
clip = data['clip']
captions = return_idx(sentences, text_proc)
# move to device
clip = clip.to(device)
captions = captions.to(device)
target = captions.clone().detach()
# flow through model
# feature : (bs x C')
feature = encoder(clip)
# output : (batch_size, vocab_size, seq_len)
try:
output = decoder.sample(feature, captions)
# workaround for dataparallel
except AttributeError:
output = decoder.module.sample(feature, captions)
# sample : (seq_len)
sample = output.argmax(1)
# backpropagate loss and store negative log likelihood
nll = criterion(output, target).cpu().item()
nll_list.append(nll)
ppl = 2 ** nll
ppl_list.append(ppl)
gt_list.extend(sentences)
ans_list.extend(return_sentences(sample, text_proc))
if it % opt.log_every == (opt.log_every-1):
print("validation {} | iter {:06d}/{:06d} | perplexity: {:.04f} | {:02.04f}s per loop".format(sec2str(time.time()-begin), it+1, max_it, sum(ppl_list)/len(ppl_list), (time.time()-before)/opt.log_every), flush=True) | before = time.time()
samplesentence = return_sentences(sample, text_proc)
# evaluate for only 100 iterations
if it == max_it-1:
print("sample sentences:")
for s in ans_list[-10:]:
print(s)
print("---METRICS---", flush=True)
try:
metrics_dict = evaluator.compute_metrics(ref_list=[gt_list], hyp_list=ans_list)
for k, v in metrics_dict.items():
print("{}:\t\t{}".format(k, v))
except:
metrics_dict = None
print("could not evaluate, some sort of error in NLGEval", flush=True)
print("---METRICS---", flush=True)
break
meannll = sum(nll_list) / len(nll_list)
meanppl = sum(ppl_list) / len(ppl_list)
return meannll, meanppl, metrics_dict
if __name__ == '__main__':
args = parse_args()
print(args)
train_lstm(args) | random_line_split | |
train_lstm.py | import sys, os
import time
import shutil
import argparse
import functools
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import torch.optim as optim
from nlgeval import NLGEval
sys.path.append(os.pardir)
from utils.utils import sec2str, count_parameters, weight_init, get_pretrained_from_txt
from langmodels.lstm import LSTMCaptioning
from langmodels.vocab import build_vocab, return_idx, return_sentences
from dataset.activitynet_train import ActivityNetCaptions_Train
from dataset.activitynet_valtest import ActivityNetCaptions_Val
import transforms.spatial_transforms as spt
import transforms.temporal_transforms as tpt
from options import parse_args
from utils.makemodel import generate_3dcnn, generate_rnn
def train_lstm(args):
# gpus
device = torch.device('cuda' if args.cuda and torch.cuda.is_available() else 'cpu')
# load vocabulary
annfiles = [os.path.join(args.root_path, pth) for pth in args.annpaths]
text_proc = build_vocab(annfiles, args.min_freq, args.max_seqlen)
vocab_size = len(text_proc.vocab)
# transforms
sp = spt.Compose([spt.CornerCrop(size=args.imsize), spt.ToTensor()])
tp = tpt.Compose([tpt.TemporalRandomCrop(args.clip_len), tpt.LoopPadding(args.clip_len)])
# dataloading
train_dset = ActivityNetCaptions_Train(args.root_path, ann_path='train_fps.json', sample_duration=args.clip_len, spatial_transform=sp, temporal_transform=tp)
trainloader = DataLoader(train_dset, batch_size=args.batch_size, shuffle=True, num_workers=args.n_cpu, drop_last=True, timeout=100)
max_train_it = int(len(train_dset) / args.batch_size)
val_dset = ActivityNetCaptions_Val(args.root_path, ann_path=['val_1_fps.json', 'val_2_fps.json'], sample_duration=args.clip_len, spatial_transform=sp, temporal_transform=tp)
valloader = DataLoader(val_dset, batch_size=args.batch_size, shuffle=True, num_workers=args.n_cpu, drop_last=True, timeout=100)
#max_val_it = int(len(val_dset) / args.batch_size)
max_val_it = 10
# models
video_encoder = generate_3dcnn(args)
caption_gen = generate_rnn(vocab_size, args)
models = [video_encoder, caption_gen]
# initialize pretrained embeddings
if args.emb_init is not None:
begin = time.time()
print("initializing embeddings from {}...".format(args.emb_init))
lookup = get_pretrained_from_txt(args.emb_init)
first = next(iter(lookup.values()))
try:
assert len(first) == args.embedding_size
except AssertionError:
print("embedding size not compatible with pretrained embeddings.")
print("specified size {}, pretrained model includes size {}".format(args.embedding_size, len(first)))
sys.exit(1)
matrix = torch.randn_like(caption_gen.emb.weight)
for char, vec in lookup.items():
if char in text_proc.vocab.stoi.keys():
id = text_proc.vocab.stoi[char]
matrix[id, :] = torch.tensor(vec)
caption_gen.init_embedding(matrix)
print("{} | successfully initialized".format(sec2str(time.time() - begin), args.emb_init))
# move models to device
n_gpu = torch.cuda.device_count()
if n_gpu > 1 and args.dataparallel:
video_encoder = nn.DataParallel(video_encoder)
caption_gen = nn.DataParallel(caption_gen)
else:
n_gpu = 1
print("using {} gpus...".format(n_gpu))
# loss function
criterion = nn.CrossEntropyLoss(ignore_index=text_proc.vocab.stoi['<pad>'])
# optimizer, scheduler
params = list(video_encoder.parameters()) + list(caption_gen.parameters())
optimizer = optim.SGD([
{"params" : video_encoder.parameters(), "lr" : args.lr_cnn, "momentum" : args.momentum_cnn},
{"params" : caption_gen.parameters(), "lr" : args.lr_rnn, "momentum" : args.momentum_rnn}],
weight_decay=args.weight_decay)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.9, patience=args.patience, verbose=True)
# count parameters
num_params = sum(count_parameters(model) for model in models)
print("# of params in model : {}".format(num_params))
# joint training loop
print("start training")
begin = time.time()
for ep in range(args.max_epochs):
# train for epoch
video_encoder, caption_gen, optimizer = train_epoch(trainloader, video_encoder, caption_gen, optimizer, criterion, device, text_proc, max_it=max_train_it, opt=args)
# save models
enc_save_dir = os.path.join(args.model_save_path, "encoder")
enc_filename = "ep{:04d}.pth".format(ep+1)
if not os.path.exists(enc_save_dir):
os.makedirs(enc_save_dir)
enc_save_path = os.path.join(enc_save_dir, enc_filename)
dec_save_dir = os.path.join(args.model_save_path, "decoder")
dec_filename = "ep{:04d}.pth".format(ep+1)
dec_save_path = os.path.join(dec_save_dir, dec_filename)
if not os.path.exists(dec_save_dir):
os.makedirs(dec_save_dir)
if n_gpu > 1 and args.dataparallel:
torch.save(video_encoder.module.state_dict(), enc_save_path)
torch.save(caption_gen.module.state_dict(), dec_save_path)
else:
torch.save(video_encoder.state_dict(), enc_save_path)
torch.save(caption_gen.state_dict(), dec_save_path)
print("saved encoder model to {}".format(enc_save_path))
print("saved decoder model to {}".format(dec_save_path))
# evaluate
print("begin evaluation for epoch {} ...".format(ep+1))
nll, ppl, metrics = validate(valloader, video_encoder, caption_gen, criterion, device, text_proc, max_it=max_val_it, opt=args)
if metrics is not None:
scheduler.step(metrics["METEOR"])
print("training time {}, epoch {:04d}/{:04d} done, validation loss: {:.06f}, perplexity: {:.03f}".format(sec2str(time.time()-begin), ep+1, args.max_epochs, nll, ppl))
print("end training")
def train_epoch(trainloader, encoder, decoder, optimizer, criterion, device, text_proc, max_it, opt):
if not opt.freeze:
encoder.train()
decoder.train()
ep_begin = time.time()
before = time.time()
for it, data in enumerate(trainloader):
# TODO: currently supports only one timestamp, enable more in the future
ids = data['id']
durations = data['duration']
sentences = data['sentences']
timestamps = data['timestamps']
fps = data['fps']
clip = data['clip']
captions = return_idx(sentences, text_proc)
optimizer.zero_grad()
# move to device
clip = clip.to(device)
captions = captions.to(device)
target = captions.clone().detach()
# flow through model
if opt.freeze:
with torch.no_grad():
feature = encoder(clip)
else:
feature = encoder(clip)
# feature : (bs x C')
output = decoder(feature, captions)
# caption : (batch_size, vocab_size, seq_len)
# backpropagate loss and store negative log likelihood
nll = criterion(output, target)
nll.backward()
# gradient norm clipping
# torch.nn.utils.clip_grad_norm_(caption_gen.parameters(), max_norm=1.0)
optimizer.step()
# log losses
if it % opt.log_every == (opt.log_every-1):
print("epoch {} | iter {:06d}/{:06d} | nll loss: {:.04f} | {:02.04f}s per loop".format(sec2str(time.time()-ep_begin), it+1, max_it, nll.cpu().item(), (time.time()-before)/opt.log_every), flush=True)
before = time.time()
return encoder, decoder, optimizer
def validate(valloader, encoder, decoder, criterion, device, text_proc, max_it, opt):
encoder.eval()
decoder.eval()
nll_list = []
ppl_list = []
begin = time.time()
before = time.time()
gt_list = []
ans_list = []
evaluator = NLGEval()
with torch.no_grad():
for it, data in enumerate(valloader):
# TODO: currently supports only batch size of 1, enable more in the future
ids = data['id']
durations = data['duration']
sentences = data['sentences']
timestamps = data['timestamps']
fps = data['fps']
clip = data['clip']
captions = return_idx(sentences, text_proc)
# move to device
clip = clip.to(device)
captions = captions.to(device)
target = captions.clone().detach()
# flow through model
# feature : (bs x C')
feature = encoder(clip)
# output : (batch_size, vocab_size, seq_len)
try:
output = decoder.sample(feature, captions)
# workaround for dataparallel
except AttributeError:
output = decoder.module.sample(feature, captions)
# sample : (seq_len)
sample = output.argmax(1)
# backpropagate loss and store negative log likelihood
nll = criterion(output, target).cpu().item()
nll_list.append(nll)
ppl = 2 ** nll
ppl_list.append(ppl)
gt_list.extend(sentences)
ans_list.extend(return_sentences(sample, text_proc))
if it % opt.log_every == (opt.log_every-1):
print("validation {} | iter {:06d}/{:06d} | perplexity: {:.04f} | {:02.04f}s per loop".format(sec2str(time.time()-begin), it+1, max_it, sum(ppl_list)/len(ppl_list), (time.time()-before)/opt.log_every), flush=True)
before = time.time()
samplesentence = return_sentences(sample, text_proc)
# evaluate for only 100 iterations
if it == max_it-1:
print("sample sentences:")
for s in ans_list[-10:]:
print(s)
print("---METRICS---", flush=True)
try:
metrics_dict = evaluator.compute_metrics(ref_list=[gt_list], hyp_list=ans_list)
for k, v in metrics_dict.items():
|
except:
metrics_dict = None
print("could not evaluate, some sort of error in NLGEval", flush=True)
print("---METRICS---", flush=True)
break
meannll = sum(nll_list) / len(nll_list)
meanppl = sum(ppl_list) / len(ppl_list)
return meannll, meanppl, metrics_dict
if __name__ == '__main__':
args = parse_args()
print(args)
train_lstm(args)
| print("{}:\t\t{}".format(k, v)) | conditional_block |
promise_future_glue.rs | //! Gluing Rust's `Future` and JavaScript's `Promise` together.
//!
//! JavaScript's `Promise` and Rust's `Future` are two abstractions with the
//! same goal: asynchronous programming with eventual values. Both `Promise`s
//! and `Future`s represent a value that may or may not have been computed
//! yet. However, the way that you interact with each of them is very different.
//!
//! JavaScript's `Promise` follows a completion-based model. You register
//! callbacks on a promise, and the runtime invokes each of the registered
//! callbacks (there may be many!) when the promise is resolved with a
//! value. You build larger asynchronous computations by chaining promises;
//! registering a callback with a promise returns a new promise of that
//! callback's result. Dependencies between promises are managed by the runtime.
//!
//! Rust's `Future` follows a readiness-based model. You define a `poll` method
//! that either returns the future's value if it is ready, or a sentinel that
//! says the future's value is not ready yet. You build larger asynchronous
//! computations by defining (or using existing) combinators that wrap some
//! inner future (or futures). These combinators delegate polling to the inner
//! future, and once the inner future's value is ready, perform their
//! computation on the value, and return the transformed result. Dependencies
//! between futures are managed by the futures themselves, while scheduling
//! polling is the runtime's responsibility.
//!
//! To translate between futures and promises we take two different approaches
//! for each direction, by necessity.
//!
//! To treat a Rust `Future` as a JavaScript `Promise`, we define a `Future`
//! combinator called `Future2Promise`. It takes a fresh, un-resolved `Promise`
//! object, and an inner future upon construction. It's `poll` method delegates
//! to the inner future's `poll`, and once the inner future's value is ready, it
//! resolves the promise with the ready value, and the JavaScript runtime
//! ensures that the promise's registered callbacks are invoked appropriately.
//!
//! To treat a JavaScript `Promise` as a Rust `Future`, we register a callback
//! to the promise that sends the resolution value over a one-shot
//! channel. One-shot channels are split into their two halves: sender and
//! receiver. The sender half moves into the callback, but the receiver half is
//! a future, and it represents the future resolution value of the original
//! promise.
//!
//! The final concern to address is that the runtime scheduling the polling of
//! Rust `Future`s (`tokio`) still knows which futures to poll despite it only
//! seeing half the picture now. I say "half the picture" because dependencies
//! that would otherwise live fully within the futures ecosystem are now hidden
//! in promises inside the JavaScript runtime.
//!
//! First, we must understand how `tokio` schedules polling. It is not busy
//! spinning and calling `poll` continuously in a loop. `tokio` maintains a set
//! of "root" futures. These are the futures passed to `Core::run` and
//! `Handle::spawn` directly. When `tokio` polls a "root" future, that `poll`
//! call will transitively reach down and call `poll` on "leaf" futures that
//! wrap file descriptors and sockets and such things. It is these "leaf"
//! futures' responsibilty to use OS APIs to trigger wake ups when new data is
//! available on a socket or what have you, and then it is `tokio`'s
//! responsibilty to map that wake up back to which "root" future it should poll
//! again. If the "leaf" futures do not properly register to be woken up again,
//! `tokio` will never poll that "root" future again, effectively dead locking
//! it.
//!
//! So we must ensure that our `Promise`-backed futures will always be polled
//! again by making sure that they have proper "leaf" futures. Luckily, the
//! receiver half of a one-shot channel is such a "leaf" future that properly
//! registers future wake ups. If instead, for example, we tried directly
//! checking the promise's state in `poll` with JSAPI methods, we *wouldn't*
//! register any wake ups, `tokio` would never `poll` the future again, and the
//! future would dead lock.
use super::{Error, ErrorKind};
use futures::{self, Async, Future, Poll, Select};
use futures::sync::oneshot;
use future_ext::{ready, FutureExt};
use gc_roots::GcRoot;
use js::conversions::{ConversionResult, FromJSValConvertible, ToJSValConvertible};
use js::glue::ReportError;
use js::jsapi;
use js::jsval;
use js::rust::Runtime as JsRuntime;
use state_machine_future::RentToOwn;
use std::marker::PhantomData;
use std::os::raw;
use std::ptr;
use task;
use void::Void;
type GenericVoid<T> = (Void, PhantomData<T>);
/// A future that resolves a promise with its inner future's value when ready.
#[derive(StateMachineFuture)]
#[allow(dead_code)]
enum Future2Promise<F>
where
F: Future,
<F as Future>::Item: ToJSValConvertible,
<F as Future>::Error: ToJSValConvertible,
{
/// Initially, we are waiting on the inner future to be ready or error.
#[state_machine_future(start, transitions(Finished, NotifyingOfError))]
WaitingOnInner {
future: F,
promise: GcRoot<*mut jsapi::JSObject>,
},
/// If we encountered an error that needs to propagate, we must send it to
/// the task.
#[state_machine_future(transitions(Finished))]
NotifyingOfError {
notify: futures::sink::Send<futures::sync::mpsc::Sender<task::TaskMessage>>,
phantom: PhantomData<F>,
},
/// All done.
#[state_machine_future(ready)]
Finished(PhantomData<F>),
/// We explicitly handle all errors, so make `Future::Error` impossible to
/// construct.
#[state_machine_future(error)]
Impossible(GenericVoid<F>),
}
impl<F> PollFuture2Promise<F> for Future2Promise<F>
where
F: Future,
<F as Future>::Item: ToJSValConvertible,
<F as Future>::Error: ToJSValConvertible,
{
fn | <'a>(
waiting: &'a mut RentToOwn<'a, WaitingOnInner<F>>,
) -> Poll<AfterWaitingOnInner<F>, GenericVoid<F>> {
let error = match waiting.future.poll() {
Ok(Async::NotReady) => {
return Ok(Async::NotReady);
}
Ok(Async::Ready(t)) => {
let cx = JsRuntime::get();
unsafe {
rooted!(in(cx) let mut val = jsval::UndefinedValue());
t.to_jsval(cx, val.handle_mut());
rooted!(in(cx) let promise = waiting.promise.raw());
assert!(jsapi::JS::ResolvePromise(
cx,
promise.handle(),
val.handle()
));
if let Err(e) = task::drain_micro_task_queue() {
e
} else {
return ready(Finished(PhantomData));
}
}
}
Err(error) => {
let cx = JsRuntime::get();
unsafe {
rooted!(in(cx) let mut val = jsval::UndefinedValue());
error.to_jsval(cx, val.handle_mut());
rooted!(in(cx) let promise = waiting.promise.raw());
assert!(jsapi::JS::RejectPromise(cx, promise.handle(), val.handle()));
if let Err(e) = task::drain_micro_task_queue() {
e
} else {
return ready(Finished(PhantomData));
}
}
}
};
let msg = task::TaskMessage::UnhandledRejectedPromise { error };
ready(NotifyingOfError {
notify: task::this_task().send(msg),
phantom: PhantomData,
})
}
fn poll_notifying_of_error<'a>(
notification: &'a mut RentToOwn<'a, NotifyingOfError<F>>,
) -> Poll<AfterNotifyingOfError<F>, GenericVoid<F>> {
match notification.notify.poll() {
Ok(Async::NotReady) => {
return Ok(Async::NotReady);
}
// The only way we can get an error here is if we lost a
// race between notifying the task of an error and the task
// finishing.
Err(_) | Ok(Async::Ready(_)) => ready(Finished(PhantomData)),
}
}
}
/// Convert a Rust `Future` into a JavaScript `Promise`.
///
/// The `Future` is spawned onto the current thread's `tokio` event loop.
pub fn future_to_promise<F, T, E>(future: F) -> GcRoot<*mut jsapi::JSObject>
where
F: 'static + Future<Item = T, Error = E>,
T: 'static + ToJSValConvertible,
E: 'static + ToJSValConvertible,
{
let cx = JsRuntime::get();
rooted!(in(cx) let executor = ptr::null_mut());
rooted!(in(cx) let proto = ptr::null_mut());
rooted!(in(cx) let promise = unsafe {
jsapi::JS::NewPromiseObject(
cx,
executor.handle(),
proto.handle()
)
});
assert!(!promise.get().is_null());
let promise = GcRoot::new(promise.get());
let future = Future2Promise::start(future, promise.clone()).ignore_results();
task::event_loop().spawn(future);
promise
}
const CLOSURE_SLOT: usize = 0;
// JSNative that forwards the call `f`.
unsafe extern "C" fn trampoline<F>(
cx: *mut jsapi::JSContext,
argc: raw::c_uint,
vp: *mut jsapi::JS::Value,
) -> bool
where
F: 'static + FnOnce(*mut jsapi::JSContext, &jsapi::JS::CallArgs) -> bool,
{
let args = jsapi::JS::CallArgs::from_vp(vp, argc);
rooted!(in(cx) let callee = args.callee());
let private = jsapi::js::GetFunctionNativeReserved(callee.get(), CLOSURE_SLOT);
let f = (*private).to_private() as *mut F;
if f.is_null() {
ReportError(cx, b"May only be called once\0".as_ptr() as *const _);
return false;
}
let private = jsval::PrivateValue(ptr::null());
jsapi::js::SetFunctionNativeReserved(callee.get(), CLOSURE_SLOT, &private);
let f = Box::from_raw(f);
f(cx, &args)
}
/// This is unsafe because the resulting function object will _not_ trace `f`'s
/// closed over values. Don't close over GC things!
unsafe fn make_js_fn<F>(f: F) -> GcRoot<*mut jsapi::JSObject>
where
F: 'static + FnOnce(*mut jsapi::JSContext, &jsapi::JS::CallArgs) -> bool,
{
let cx = JsRuntime::get();
rooted!(in(cx) let func = jsapi::js::NewFunctionWithReserved(
cx,
Some(trampoline::<F>),
0, // nargs
0, // flags
ptr::null_mut() // name
));
assert!(!func.get().is_null());
let private = Box::new(f);
let private = jsval::PrivateValue(Box::into_raw(private) as *const _);
jsapi::js::SetFunctionNativeReserved(func.get() as *mut _, CLOSURE_SLOT, &private);
GcRoot::new(func.get() as *mut jsapi::JSObject)
}
type ResultReceiver<T, E> = oneshot::Receiver<super::Result<Result<T, E>>>;
/// A future of either a JavaScript promise's resolution `T` or rejection `E`.
pub struct Promise2Future<T, E>
where
T: 'static + FromJSValConvertible<Config = ()>,
E: 'static + FromJSValConvertible<Config = ()>,
{
inner: Select<ResultReceiver<T, E>, ResultReceiver<T, E>>,
}
impl<T, E> ::std::fmt::Debug for Promise2Future<T, E>
where
T: 'static + FromJSValConvertible<Config = ()>,
E: 'static + FromJSValConvertible<Config = ()>,
{
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
write!(f, "Promise2Future {{ .. }}")
}
}
impl<T, E> Future for Promise2Future<T, E>
where
T: 'static + FromJSValConvertible<Config = ()>,
E: 'static + FromJSValConvertible<Config = ()>,
{
type Item = Result<T, E>;
type Error = super::Error;
fn poll(&mut self) -> Result<Async<Self::Item>, Self::Error> {
match self.inner.poll() {
Err((oneshot::Canceled, _)) => {
Err(ErrorKind::JavaScriptPromiseCollectedWithoutSettling.into())
}
Ok(Async::NotReady) => Ok(Async::NotReady),
// One of the handlers was called, but then we encountered an error
// converting the value from JS into Rust or something like that.
Ok(Async::Ready((Err(e), _))) => Err(e),
Ok(Async::Ready((Ok(result), _))) => Ok(Async::Ready(result)),
}
}
}
/// Convert the given JavaScript `Promise` object into a future.
///
/// The resulting future is of either an `Ok(T)` if the promise gets resolved,
/// or an `Err(E)` if the promise is rejected.
///
/// Failure to convert the resolution or rejection JavaScript value into a `T`
/// or `E` will cause the resulting future's `poll` to return an error.
///
/// If the promise object is reclaimed by the garbage collector without being
/// resolved or rejected, then the resulting future's `poll` will return an
/// error of kind `ErrorKind::JavaScriptPromiseCollectedWithoutSettling`.
pub fn promise_to_future<T, E>(promise: &GcRoot<*mut jsapi::JSObject>) -> Promise2Future<T, E>
where
T: 'static + FromJSValConvertible<Config = ()>,
E: 'static + FromJSValConvertible<Config = ()>,
{
unsafe {
let cx = JsRuntime::get();
let (resolve_sender, resolve_receiver) = oneshot::channel();
let on_resolve = make_js_fn(move |cx, args| {
match T::from_jsval(cx, args.get(0), ()) {
Err(()) => {
let err = Error::from_cx(cx);
let _ = resolve_sender.send(Err(err));
}
Ok(ConversionResult::Failure(s)) => {
let err = Err(ErrorKind::Msg(s.to_string()).into());
let _ = resolve_sender.send(err);
}
Ok(ConversionResult::Success(t)) => {
let _ = resolve_sender.send(Ok(Ok(t)));
}
}
true
});
let (reject_sender, reject_receiver) = oneshot::channel();
let on_reject = make_js_fn(move |cx, args| {
match E::from_jsval(cx, args.get(0), ()) {
Err(()) => {
let err = Error::from_cx(cx);
let _ = reject_sender.send(Err(err));
}
Ok(ConversionResult::Failure(s)) => {
let err = Err(ErrorKind::Msg(s.to_string()).into());
let _ = reject_sender.send(err);
}
Ok(ConversionResult::Success(t)) => {
let _ = reject_sender.send(Ok(Err(t)));
}
}
true
});
rooted!(in(cx) let promise = promise.raw());
rooted!(in(cx) let on_resolve = on_resolve.raw());
rooted!(in(cx) let on_reject = on_reject.raw());
assert!(jsapi::JS::AddPromiseReactions(
cx,
promise.handle(),
on_resolve.handle(),
on_reject.handle()
));
Promise2Future {
inner: resolve_receiver.select(reject_receiver),
}
}
}
| poll_waiting_on_inner | identifier_name |
promise_future_glue.rs | //! Gluing Rust's `Future` and JavaScript's `Promise` together.
//!
//! JavaScript's `Promise` and Rust's `Future` are two abstractions with the
//! same goal: asynchronous programming with eventual values. Both `Promise`s
//! and `Future`s represent a value that may or may not have been computed
//! yet. However, the way that you interact with each of them is very different.
//!
//! JavaScript's `Promise` follows a completion-based model. You register
//! callbacks on a promise, and the runtime invokes each of the registered
//! callbacks (there may be many!) when the promise is resolved with a
//! value. You build larger asynchronous computations by chaining promises;
//! registering a callback with a promise returns a new promise of that
//! callback's result. Dependencies between promises are managed by the runtime.
//!
//! Rust's `Future` follows a readiness-based model. You define a `poll` method
//! that either returns the future's value if it is ready, or a sentinel that
//! says the future's value is not ready yet. You build larger asynchronous
//! computations by defining (or using existing) combinators that wrap some
//! inner future (or futures). These combinators delegate polling to the inner
//! future, and once the inner future's value is ready, perform their
//! computation on the value, and return the transformed result. Dependencies
//! between futures are managed by the futures themselves, while scheduling
//! polling is the runtime's responsibility.
//!
//! To translate between futures and promises we take two different approaches
//! for each direction, by necessity.
//!
//! To treat a Rust `Future` as a JavaScript `Promise`, we define a `Future`
//! combinator called `Future2Promise`. It takes a fresh, un-resolved `Promise`
//! object, and an inner future upon construction. It's `poll` method delegates
//! to the inner future's `poll`, and once the inner future's value is ready, it
//! resolves the promise with the ready value, and the JavaScript runtime
//! ensures that the promise's registered callbacks are invoked appropriately.
//!
//! To treat a JavaScript `Promise` as a Rust `Future`, we register a callback
//! to the promise that sends the resolution value over a one-shot
//! channel. One-shot channels are split into their two halves: sender and
//! receiver. The sender half moves into the callback, but the receiver half is
//! a future, and it represents the future resolution value of the original
//! promise.
//!
//! The final concern to address is that the runtime scheduling the polling of
//! Rust `Future`s (`tokio`) still knows which futures to poll despite it only
//! seeing half the picture now. I say "half the picture" because dependencies
//! that would otherwise live fully within the futures ecosystem are now hidden
//! in promises inside the JavaScript runtime.
//!
//! First, we must understand how `tokio` schedules polling. It is not busy
//! spinning and calling `poll` continuously in a loop. `tokio` maintains a set
//! of "root" futures. These are the futures passed to `Core::run` and
//! `Handle::spawn` directly. When `tokio` polls a "root" future, that `poll`
//! call will transitively reach down and call `poll` on "leaf" futures that
//! wrap file descriptors and sockets and such things. It is these "leaf"
//! futures' responsibilty to use OS APIs to trigger wake ups when new data is
//! available on a socket or what have you, and then it is `tokio`'s
//! responsibilty to map that wake up back to which "root" future it should poll
//! again. If the "leaf" futures do not properly register to be woken up again,
//! `tokio` will never poll that "root" future again, effectively dead locking
//! it.
//!
//! So we must ensure that our `Promise`-backed futures will always be polled
//! again by making sure that they have proper "leaf" futures. Luckily, the
//! receiver half of a one-shot channel is such a "leaf" future that properly
//! registers future wake ups. If instead, for example, we tried directly
//! checking the promise's state in `poll` with JSAPI methods, we *wouldn't*
//! register any wake ups, `tokio` would never `poll` the future again, and the
//! future would dead lock.
use super::{Error, ErrorKind};
use futures::{self, Async, Future, Poll, Select};
use futures::sync::oneshot;
use future_ext::{ready, FutureExt};
use gc_roots::GcRoot;
use js::conversions::{ConversionResult, FromJSValConvertible, ToJSValConvertible};
use js::glue::ReportError;
use js::jsapi;
use js::jsval;
use js::rust::Runtime as JsRuntime;
use state_machine_future::RentToOwn;
use std::marker::PhantomData;
use std::os::raw;
use std::ptr;
use task;
use void::Void;
type GenericVoid<T> = (Void, PhantomData<T>);
/// A future that resolves a promise with its inner future's value when ready.
#[derive(StateMachineFuture)]
#[allow(dead_code)] | enum Future2Promise<F>
where
F: Future,
<F as Future>::Item: ToJSValConvertible,
<F as Future>::Error: ToJSValConvertible,
{
/// Initially, we are waiting on the inner future to be ready or error.
#[state_machine_future(start, transitions(Finished, NotifyingOfError))]
WaitingOnInner {
future: F,
promise: GcRoot<*mut jsapi::JSObject>,
},
/// If we encountered an error that needs to propagate, we must send it to
/// the task.
#[state_machine_future(transitions(Finished))]
NotifyingOfError {
notify: futures::sink::Send<futures::sync::mpsc::Sender<task::TaskMessage>>,
phantom: PhantomData<F>,
},
/// All done.
#[state_machine_future(ready)]
Finished(PhantomData<F>),
/// We explicitly handle all errors, so make `Future::Error` impossible to
/// construct.
#[state_machine_future(error)]
Impossible(GenericVoid<F>),
}
impl<F> PollFuture2Promise<F> for Future2Promise<F>
where
F: Future,
<F as Future>::Item: ToJSValConvertible,
<F as Future>::Error: ToJSValConvertible,
{
fn poll_waiting_on_inner<'a>(
waiting: &'a mut RentToOwn<'a, WaitingOnInner<F>>,
) -> Poll<AfterWaitingOnInner<F>, GenericVoid<F>> {
let error = match waiting.future.poll() {
Ok(Async::NotReady) => {
return Ok(Async::NotReady);
}
Ok(Async::Ready(t)) => {
let cx = JsRuntime::get();
unsafe {
rooted!(in(cx) let mut val = jsval::UndefinedValue());
t.to_jsval(cx, val.handle_mut());
rooted!(in(cx) let promise = waiting.promise.raw());
assert!(jsapi::JS::ResolvePromise(
cx,
promise.handle(),
val.handle()
));
if let Err(e) = task::drain_micro_task_queue() {
e
} else {
return ready(Finished(PhantomData));
}
}
}
Err(error) => {
let cx = JsRuntime::get();
unsafe {
rooted!(in(cx) let mut val = jsval::UndefinedValue());
error.to_jsval(cx, val.handle_mut());
rooted!(in(cx) let promise = waiting.promise.raw());
assert!(jsapi::JS::RejectPromise(cx, promise.handle(), val.handle()));
if let Err(e) = task::drain_micro_task_queue() {
e
} else {
return ready(Finished(PhantomData));
}
}
}
};
let msg = task::TaskMessage::UnhandledRejectedPromise { error };
ready(NotifyingOfError {
notify: task::this_task().send(msg),
phantom: PhantomData,
})
}
fn poll_notifying_of_error<'a>(
notification: &'a mut RentToOwn<'a, NotifyingOfError<F>>,
) -> Poll<AfterNotifyingOfError<F>, GenericVoid<F>> {
match notification.notify.poll() {
Ok(Async::NotReady) => {
return Ok(Async::NotReady);
}
// The only way we can get an error here is if we lost a
// race between notifying the task of an error and the task
// finishing.
Err(_) | Ok(Async::Ready(_)) => ready(Finished(PhantomData)),
}
}
}
/// Convert a Rust `Future` into a JavaScript `Promise`.
///
/// The `Future` is spawned onto the current thread's `tokio` event loop.
pub fn future_to_promise<F, T, E>(future: F) -> GcRoot<*mut jsapi::JSObject>
where
F: 'static + Future<Item = T, Error = E>,
T: 'static + ToJSValConvertible,
E: 'static + ToJSValConvertible,
{
let cx = JsRuntime::get();
rooted!(in(cx) let executor = ptr::null_mut());
rooted!(in(cx) let proto = ptr::null_mut());
rooted!(in(cx) let promise = unsafe {
jsapi::JS::NewPromiseObject(
cx,
executor.handle(),
proto.handle()
)
});
assert!(!promise.get().is_null());
let promise = GcRoot::new(promise.get());
let future = Future2Promise::start(future, promise.clone()).ignore_results();
task::event_loop().spawn(future);
promise
}
const CLOSURE_SLOT: usize = 0;
// JSNative that forwards the call `f`.
unsafe extern "C" fn trampoline<F>(
cx: *mut jsapi::JSContext,
argc: raw::c_uint,
vp: *mut jsapi::JS::Value,
) -> bool
where
F: 'static + FnOnce(*mut jsapi::JSContext, &jsapi::JS::CallArgs) -> bool,
{
let args = jsapi::JS::CallArgs::from_vp(vp, argc);
rooted!(in(cx) let callee = args.callee());
let private = jsapi::js::GetFunctionNativeReserved(callee.get(), CLOSURE_SLOT);
let f = (*private).to_private() as *mut F;
if f.is_null() {
ReportError(cx, b"May only be called once\0".as_ptr() as *const _);
return false;
}
let private = jsval::PrivateValue(ptr::null());
jsapi::js::SetFunctionNativeReserved(callee.get(), CLOSURE_SLOT, &private);
let f = Box::from_raw(f);
f(cx, &args)
}
/// This is unsafe because the resulting function object will _not_ trace `f`'s
/// closed over values. Don't close over GC things!
unsafe fn make_js_fn<F>(f: F) -> GcRoot<*mut jsapi::JSObject>
where
F: 'static + FnOnce(*mut jsapi::JSContext, &jsapi::JS::CallArgs) -> bool,
{
let cx = JsRuntime::get();
rooted!(in(cx) let func = jsapi::js::NewFunctionWithReserved(
cx,
Some(trampoline::<F>),
0, // nargs
0, // flags
ptr::null_mut() // name
));
assert!(!func.get().is_null());
let private = Box::new(f);
let private = jsval::PrivateValue(Box::into_raw(private) as *const _);
jsapi::js::SetFunctionNativeReserved(func.get() as *mut _, CLOSURE_SLOT, &private);
GcRoot::new(func.get() as *mut jsapi::JSObject)
}
type ResultReceiver<T, E> = oneshot::Receiver<super::Result<Result<T, E>>>;
/// A future of either a JavaScript promise's resolution `T` or rejection `E`.
pub struct Promise2Future<T, E>
where
T: 'static + FromJSValConvertible<Config = ()>,
E: 'static + FromJSValConvertible<Config = ()>,
{
inner: Select<ResultReceiver<T, E>, ResultReceiver<T, E>>,
}
impl<T, E> ::std::fmt::Debug for Promise2Future<T, E>
where
T: 'static + FromJSValConvertible<Config = ()>,
E: 'static + FromJSValConvertible<Config = ()>,
{
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
write!(f, "Promise2Future {{ .. }}")
}
}
impl<T, E> Future for Promise2Future<T, E>
where
T: 'static + FromJSValConvertible<Config = ()>,
E: 'static + FromJSValConvertible<Config = ()>,
{
type Item = Result<T, E>;
type Error = super::Error;
fn poll(&mut self) -> Result<Async<Self::Item>, Self::Error> {
match self.inner.poll() {
Err((oneshot::Canceled, _)) => {
Err(ErrorKind::JavaScriptPromiseCollectedWithoutSettling.into())
}
Ok(Async::NotReady) => Ok(Async::NotReady),
// One of the handlers was called, but then we encountered an error
// converting the value from JS into Rust or something like that.
Ok(Async::Ready((Err(e), _))) => Err(e),
Ok(Async::Ready((Ok(result), _))) => Ok(Async::Ready(result)),
}
}
}
/// Convert the given JavaScript `Promise` object into a future.
///
/// The resulting future is of either an `Ok(T)` if the promise gets resolved,
/// or an `Err(E)` if the promise is rejected.
///
/// Failure to convert the resolution or rejection JavaScript value into a `T`
/// or `E` will cause the resulting future's `poll` to return an error.
///
/// If the promise object is reclaimed by the garbage collector without being
/// resolved or rejected, then the resulting future's `poll` will return an
/// error of kind `ErrorKind::JavaScriptPromiseCollectedWithoutSettling`.
pub fn promise_to_future<T, E>(promise: &GcRoot<*mut jsapi::JSObject>) -> Promise2Future<T, E>
where
T: 'static + FromJSValConvertible<Config = ()>,
E: 'static + FromJSValConvertible<Config = ()>,
{
unsafe {
let cx = JsRuntime::get();
let (resolve_sender, resolve_receiver) = oneshot::channel();
let on_resolve = make_js_fn(move |cx, args| {
match T::from_jsval(cx, args.get(0), ()) {
Err(()) => {
let err = Error::from_cx(cx);
let _ = resolve_sender.send(Err(err));
}
Ok(ConversionResult::Failure(s)) => {
let err = Err(ErrorKind::Msg(s.to_string()).into());
let _ = resolve_sender.send(err);
}
Ok(ConversionResult::Success(t)) => {
let _ = resolve_sender.send(Ok(Ok(t)));
}
}
true
});
let (reject_sender, reject_receiver) = oneshot::channel();
let on_reject = make_js_fn(move |cx, args| {
match E::from_jsval(cx, args.get(0), ()) {
Err(()) => {
let err = Error::from_cx(cx);
let _ = reject_sender.send(Err(err));
}
Ok(ConversionResult::Failure(s)) => {
let err = Err(ErrorKind::Msg(s.to_string()).into());
let _ = reject_sender.send(err);
}
Ok(ConversionResult::Success(t)) => {
let _ = reject_sender.send(Ok(Err(t)));
}
}
true
});
rooted!(in(cx) let promise = promise.raw());
rooted!(in(cx) let on_resolve = on_resolve.raw());
rooted!(in(cx) let on_reject = on_reject.raw());
assert!(jsapi::JS::AddPromiseReactions(
cx,
promise.handle(),
on_resolve.handle(),
on_reject.handle()
));
Promise2Future {
inner: resolve_receiver.select(reject_receiver),
}
}
} | random_line_split | |
promise_future_glue.rs | //! Gluing Rust's `Future` and JavaScript's `Promise` together.
//!
//! JavaScript's `Promise` and Rust's `Future` are two abstractions with the
//! same goal: asynchronous programming with eventual values. Both `Promise`s
//! and `Future`s represent a value that may or may not have been computed
//! yet. However, the way that you interact with each of them is very different.
//!
//! JavaScript's `Promise` follows a completion-based model. You register
//! callbacks on a promise, and the runtime invokes each of the registered
//! callbacks (there may be many!) when the promise is resolved with a
//! value. You build larger asynchronous computations by chaining promises;
//! registering a callback with a promise returns a new promise of that
//! callback's result. Dependencies between promises are managed by the runtime.
//!
//! Rust's `Future` follows a readiness-based model. You define a `poll` method
//! that either returns the future's value if it is ready, or a sentinel that
//! says the future's value is not ready yet. You build larger asynchronous
//! computations by defining (or using existing) combinators that wrap some
//! inner future (or futures). These combinators delegate polling to the inner
//! future, and once the inner future's value is ready, perform their
//! computation on the value, and return the transformed result. Dependencies
//! between futures are managed by the futures themselves, while scheduling
//! polling is the runtime's responsibility.
//!
//! To translate between futures and promises we take two different approaches
//! for each direction, by necessity.
//!
//! To treat a Rust `Future` as a JavaScript `Promise`, we define a `Future`
//! combinator called `Future2Promise`. It takes a fresh, un-resolved `Promise`
//! object, and an inner future upon construction. It's `poll` method delegates
//! to the inner future's `poll`, and once the inner future's value is ready, it
//! resolves the promise with the ready value, and the JavaScript runtime
//! ensures that the promise's registered callbacks are invoked appropriately.
//!
//! To treat a JavaScript `Promise` as a Rust `Future`, we register a callback
//! to the promise that sends the resolution value over a one-shot
//! channel. One-shot channels are split into their two halves: sender and
//! receiver. The sender half moves into the callback, but the receiver half is
//! a future, and it represents the future resolution value of the original
//! promise.
//!
//! The final concern to address is that the runtime scheduling the polling of
//! Rust `Future`s (`tokio`) still knows which futures to poll despite it only
//! seeing half the picture now. I say "half the picture" because dependencies
//! that would otherwise live fully within the futures ecosystem are now hidden
//! in promises inside the JavaScript runtime.
//!
//! First, we must understand how `tokio` schedules polling. It is not busy
//! spinning and calling `poll` continuously in a loop. `tokio` maintains a set
//! of "root" futures. These are the futures passed to `Core::run` and
//! `Handle::spawn` directly. When `tokio` polls a "root" future, that `poll`
//! call will transitively reach down and call `poll` on "leaf" futures that
//! wrap file descriptors and sockets and such things. It is these "leaf"
//! futures' responsibilty to use OS APIs to trigger wake ups when new data is
//! available on a socket or what have you, and then it is `tokio`'s
//! responsibilty to map that wake up back to which "root" future it should poll
//! again. If the "leaf" futures do not properly register to be woken up again,
//! `tokio` will never poll that "root" future again, effectively dead locking
//! it.
//!
//! So we must ensure that our `Promise`-backed futures will always be polled
//! again by making sure that they have proper "leaf" futures. Luckily, the
//! receiver half of a one-shot channel is such a "leaf" future that properly
//! registers future wake ups. If instead, for example, we tried directly
//! checking the promise's state in `poll` with JSAPI methods, we *wouldn't*
//! register any wake ups, `tokio` would never `poll` the future again, and the
//! future would dead lock.
use super::{Error, ErrorKind};
use futures::{self, Async, Future, Poll, Select};
use futures::sync::oneshot;
use future_ext::{ready, FutureExt};
use gc_roots::GcRoot;
use js::conversions::{ConversionResult, FromJSValConvertible, ToJSValConvertible};
use js::glue::ReportError;
use js::jsapi;
use js::jsval;
use js::rust::Runtime as JsRuntime;
use state_machine_future::RentToOwn;
use std::marker::PhantomData;
use std::os::raw;
use std::ptr;
use task;
use void::Void;
type GenericVoid<T> = (Void, PhantomData<T>);
/// A future that resolves a promise with its inner future's value when ready.
#[derive(StateMachineFuture)]
#[allow(dead_code)]
enum Future2Promise<F>
where
F: Future,
<F as Future>::Item: ToJSValConvertible,
<F as Future>::Error: ToJSValConvertible,
{
/// Initially, we are waiting on the inner future to be ready or error.
#[state_machine_future(start, transitions(Finished, NotifyingOfError))]
WaitingOnInner {
future: F,
promise: GcRoot<*mut jsapi::JSObject>,
},
/// If we encountered an error that needs to propagate, we must send it to
/// the task.
#[state_machine_future(transitions(Finished))]
NotifyingOfError {
notify: futures::sink::Send<futures::sync::mpsc::Sender<task::TaskMessage>>,
phantom: PhantomData<F>,
},
/// All done.
#[state_machine_future(ready)]
Finished(PhantomData<F>),
/// We explicitly handle all errors, so make `Future::Error` impossible to
/// construct.
#[state_machine_future(error)]
Impossible(GenericVoid<F>),
}
impl<F> PollFuture2Promise<F> for Future2Promise<F>
where
F: Future,
<F as Future>::Item: ToJSValConvertible,
<F as Future>::Error: ToJSValConvertible,
{
fn poll_waiting_on_inner<'a>(
waiting: &'a mut RentToOwn<'a, WaitingOnInner<F>>,
) -> Poll<AfterWaitingOnInner<F>, GenericVoid<F>> |
fn poll_notifying_of_error<'a>(
notification: &'a mut RentToOwn<'a, NotifyingOfError<F>>,
) -> Poll<AfterNotifyingOfError<F>, GenericVoid<F>> {
match notification.notify.poll() {
Ok(Async::NotReady) => {
return Ok(Async::NotReady);
}
// The only way we can get an error here is if we lost a
// race between notifying the task of an error and the task
// finishing.
Err(_) | Ok(Async::Ready(_)) => ready(Finished(PhantomData)),
}
}
}
/// Convert a Rust `Future` into a JavaScript `Promise`.
///
/// The `Future` is spawned onto the current thread's `tokio` event loop.
pub fn future_to_promise<F, T, E>(future: F) -> GcRoot<*mut jsapi::JSObject>
where
F: 'static + Future<Item = T, Error = E>,
T: 'static + ToJSValConvertible,
E: 'static + ToJSValConvertible,
{
let cx = JsRuntime::get();
rooted!(in(cx) let executor = ptr::null_mut());
rooted!(in(cx) let proto = ptr::null_mut());
rooted!(in(cx) let promise = unsafe {
jsapi::JS::NewPromiseObject(
cx,
executor.handle(),
proto.handle()
)
});
assert!(!promise.get().is_null());
let promise = GcRoot::new(promise.get());
let future = Future2Promise::start(future, promise.clone()).ignore_results();
task::event_loop().spawn(future);
promise
}
const CLOSURE_SLOT: usize = 0;
// JSNative that forwards the call `f`.
unsafe extern "C" fn trampoline<F>(
cx: *mut jsapi::JSContext,
argc: raw::c_uint,
vp: *mut jsapi::JS::Value,
) -> bool
where
F: 'static + FnOnce(*mut jsapi::JSContext, &jsapi::JS::CallArgs) -> bool,
{
let args = jsapi::JS::CallArgs::from_vp(vp, argc);
rooted!(in(cx) let callee = args.callee());
let private = jsapi::js::GetFunctionNativeReserved(callee.get(), CLOSURE_SLOT);
let f = (*private).to_private() as *mut F;
if f.is_null() {
ReportError(cx, b"May only be called once\0".as_ptr() as *const _);
return false;
}
let private = jsval::PrivateValue(ptr::null());
jsapi::js::SetFunctionNativeReserved(callee.get(), CLOSURE_SLOT, &private);
let f = Box::from_raw(f);
f(cx, &args)
}
/// This is unsafe because the resulting function object will _not_ trace `f`'s
/// closed over values. Don't close over GC things!
unsafe fn make_js_fn<F>(f: F) -> GcRoot<*mut jsapi::JSObject>
where
F: 'static + FnOnce(*mut jsapi::JSContext, &jsapi::JS::CallArgs) -> bool,
{
let cx = JsRuntime::get();
rooted!(in(cx) let func = jsapi::js::NewFunctionWithReserved(
cx,
Some(trampoline::<F>),
0, // nargs
0, // flags
ptr::null_mut() // name
));
assert!(!func.get().is_null());
let private = Box::new(f);
let private = jsval::PrivateValue(Box::into_raw(private) as *const _);
jsapi::js::SetFunctionNativeReserved(func.get() as *mut _, CLOSURE_SLOT, &private);
GcRoot::new(func.get() as *mut jsapi::JSObject)
}
type ResultReceiver<T, E> = oneshot::Receiver<super::Result<Result<T, E>>>;
/// A future of either a JavaScript promise's resolution `T` or rejection `E`.
pub struct Promise2Future<T, E>
where
T: 'static + FromJSValConvertible<Config = ()>,
E: 'static + FromJSValConvertible<Config = ()>,
{
inner: Select<ResultReceiver<T, E>, ResultReceiver<T, E>>,
}
impl<T, E> ::std::fmt::Debug for Promise2Future<T, E>
where
T: 'static + FromJSValConvertible<Config = ()>,
E: 'static + FromJSValConvertible<Config = ()>,
{
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
write!(f, "Promise2Future {{ .. }}")
}
}
impl<T, E> Future for Promise2Future<T, E>
where
T: 'static + FromJSValConvertible<Config = ()>,
E: 'static + FromJSValConvertible<Config = ()>,
{
type Item = Result<T, E>;
type Error = super::Error;
fn poll(&mut self) -> Result<Async<Self::Item>, Self::Error> {
match self.inner.poll() {
Err((oneshot::Canceled, _)) => {
Err(ErrorKind::JavaScriptPromiseCollectedWithoutSettling.into())
}
Ok(Async::NotReady) => Ok(Async::NotReady),
// One of the handlers was called, but then we encountered an error
// converting the value from JS into Rust or something like that.
Ok(Async::Ready((Err(e), _))) => Err(e),
Ok(Async::Ready((Ok(result), _))) => Ok(Async::Ready(result)),
}
}
}
/// Convert the given JavaScript `Promise` object into a future.
///
/// The resulting future is of either an `Ok(T)` if the promise gets resolved,
/// or an `Err(E)` if the promise is rejected.
///
/// Failure to convert the resolution or rejection JavaScript value into a `T`
/// or `E` will cause the resulting future's `poll` to return an error.
///
/// If the promise object is reclaimed by the garbage collector without being
/// resolved or rejected, then the resulting future's `poll` will return an
/// error of kind `ErrorKind::JavaScriptPromiseCollectedWithoutSettling`.
pub fn promise_to_future<T, E>(promise: &GcRoot<*mut jsapi::JSObject>) -> Promise2Future<T, E>
where
T: 'static + FromJSValConvertible<Config = ()>,
E: 'static + FromJSValConvertible<Config = ()>,
{
unsafe {
let cx = JsRuntime::get();
let (resolve_sender, resolve_receiver) = oneshot::channel();
let on_resolve = make_js_fn(move |cx, args| {
match T::from_jsval(cx, args.get(0), ()) {
Err(()) => {
let err = Error::from_cx(cx);
let _ = resolve_sender.send(Err(err));
}
Ok(ConversionResult::Failure(s)) => {
let err = Err(ErrorKind::Msg(s.to_string()).into());
let _ = resolve_sender.send(err);
}
Ok(ConversionResult::Success(t)) => {
let _ = resolve_sender.send(Ok(Ok(t)));
}
}
true
});
let (reject_sender, reject_receiver) = oneshot::channel();
let on_reject = make_js_fn(move |cx, args| {
match E::from_jsval(cx, args.get(0), ()) {
Err(()) => {
let err = Error::from_cx(cx);
let _ = reject_sender.send(Err(err));
}
Ok(ConversionResult::Failure(s)) => {
let err = Err(ErrorKind::Msg(s.to_string()).into());
let _ = reject_sender.send(err);
}
Ok(ConversionResult::Success(t)) => {
let _ = reject_sender.send(Ok(Err(t)));
}
}
true
});
rooted!(in(cx) let promise = promise.raw());
rooted!(in(cx) let on_resolve = on_resolve.raw());
rooted!(in(cx) let on_reject = on_reject.raw());
assert!(jsapi::JS::AddPromiseReactions(
cx,
promise.handle(),
on_resolve.handle(),
on_reject.handle()
));
Promise2Future {
inner: resolve_receiver.select(reject_receiver),
}
}
}
| {
let error = match waiting.future.poll() {
Ok(Async::NotReady) => {
return Ok(Async::NotReady);
}
Ok(Async::Ready(t)) => {
let cx = JsRuntime::get();
unsafe {
rooted!(in(cx) let mut val = jsval::UndefinedValue());
t.to_jsval(cx, val.handle_mut());
rooted!(in(cx) let promise = waiting.promise.raw());
assert!(jsapi::JS::ResolvePromise(
cx,
promise.handle(),
val.handle()
));
if let Err(e) = task::drain_micro_task_queue() {
e
} else {
return ready(Finished(PhantomData));
}
}
}
Err(error) => {
let cx = JsRuntime::get();
unsafe {
rooted!(in(cx) let mut val = jsval::UndefinedValue());
error.to_jsval(cx, val.handle_mut());
rooted!(in(cx) let promise = waiting.promise.raw());
assert!(jsapi::JS::RejectPromise(cx, promise.handle(), val.handle()));
if let Err(e) = task::drain_micro_task_queue() {
e
} else {
return ready(Finished(PhantomData));
}
}
}
};
let msg = task::TaskMessage::UnhandledRejectedPromise { error };
ready(NotifyingOfError {
notify: task::this_task().send(msg),
phantom: PhantomData,
})
} | identifier_body |
rainwater.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
tCopyright (c) 2020 Octavio Gonzalez-Lugo
o use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
@author: Octavio Gonzalez-Lugo
"""
###############################################################################
# Loading packages
###############################################################################
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
###############################################################################
# Plotting functions
###############################################################################
MonthsNames=['January','February','March','April','May','June','July','August','September','October','November','December']
def PlotStyle(Axes):
"""
Parameters
----------
Axes : Matplotlib axes object
Applies a general style to the matplotlib object
Returns
-------
None.
"""
Axes.spines['top'].set_visible(False)
Axes.spines['bottom'].set_visible(True)
Axes.spines['left'].set_visible(True)
Axes.spines['right'].set_visible(False)
Axes.xaxis.set_tick_params(labelsize=13)
Axes.yaxis.set_tick_params(labelsize=13)
def GetGridShape(TotalNumberOfElements):
"""
Parameters
----------
TotalNumberOfElements : int
Total number of elements in the plot.
Returns
-------
nrows : int
number of rows in the plot.
ncolumns : int
number of columns in the plot.
"""
numberOfUnique=TotalNumberOfElements
squaredUnique=int(np.sqrt(numberOfUnique))
if squaredUnique*squaredUnique==numberOfUnique:
nrows,ncolumns=squaredUnique,squaredUnique
elif squaredUnique*(squaredUnique+1)<numberOfUnique:
nrows,ncolumns=squaredUnique+1,squaredUnique+1
else:
|
return nrows,ncolumns
def MakeCorrelationPanel(Data,Headers,PlotSize):
'''
Parameters
----------
Data : pandas dataframe
Contains the data set to be analyzed.
Headers : list
list of strings with the data headers inside Data.
PlotSize : tuple
contains the size of the generated plot.
Returns
-------
None.
'''
nrows,ncolumns=GetGridShape(len(Headers)*len(Headers))
subPlotIndexs=[(j,k) for j in range(nrows) for k in range(ncolumns)]
fig,axes=plt.subplots(nrows,ncolumns,figsize=PlotSize)
counter=0
for val in Headers:
for sal in Headers:
axes[subPlotIndexs[counter]].plot(Data[val],Data[sal],'bo')
axes[subPlotIndexs[counter]].set_xlabel(val)
axes[subPlotIndexs[counter]].set_ylabel(sal)
counter=counter+1
plt.tight_layout()
[PlotStyle(axes[val]) for val in subPlotIndexs]
def MakeMeanPlot(Data,figsize):
'''
Parameters
----------
Data : pandas dataframe
Contains the data set to be analyzed.
figsize : tuple
contains the size of the generated plot.
Returns
-------
None.
'''
fig,axes=plt.subplots(1,3,figsize=figsize,subplot_kw=dict(polar=True))
dataHeaders=['DayOfWeek','Month','Year']
for ax,name in zip(axes,dataHeaders):
values=Data.groupby(name).mean()["PRECIP"]
xticks=values.keys()
data=values.tolist()
data+=data[:1]
angles=np.linspace(0,2*np.pi,len(data))
ax.plot(angles,data)
ax.fill(angles,data,'b',alpha=0.1)
ax.set_xticks(angles)
if name=='Month':
ax.set_xticklabels(MonthsNames)
else:
ax.set_xticklabels(xticks)
plt.tight_layout()
def MakeMonthlyPlot(Data,figsize):
'''
Parameters
----------
Data : pandas dataframe
Contains the data set to be analyzed.
figsize : tuple
contains the size of the generated plot.
Returns
-------
None.
'''
fig,axes=plt.subplots(4,3,figsize=figsize,subplot_kw=dict(polar=True))
dataValues=[1,2,3,4,5,6,7,8,9,10,11,12]
flattenAxes=axes.ravel()
for ax,name in zip(flattenAxes,dataValues):
values=Data[Data["Month"]==name].groupby('DayOfWeek').mean()["PRECIP"]
data=values.tolist()
xticks=values.keys()
data+=data[:1]
angles=np.linspace(0,2*np.pi,len(data))
ax.plot(angles,data)
ax.fill(angles,data,'b',alpha=0.1)
ax.set_xticks(angles)
ax.set_xticklabels(xticks)
ax.set_title(MonthsNames[name-1],loc='right')
plt.tight_layout()
###############################################################################
# Loading the data
###############################################################################
GlobalDirectory=r"/home/tavoglc/LocalData/"
DataDir=GlobalDirectory + "Climate.csv"
Data=pd.read_csv(DataDir)
###############################################################################
# Time series custom features
###############################################################################
#Wrapper function for the days of the week
def ToDayOfWeek(DayNumber):
Days=['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday','Sunday']
return Days[DayNumber]
Data['Date']=pd.to_datetime(Data[' FECHA'],format='%d/%m/%Y')
Data['DayOfWeek']=Data['Date'].dt.dayofweek.apply(ToDayOfWeek)
Data['Month']=Data['Date'].dt.month.apply(int)
Data['Year']=Data['Date'].dt.year.apply(int)
climateHeaders=['PRECIP',' TMAX',' TMIN']
###############################################################################
# Data Visualization
###############################################################################
MakeCorrelationPanel(Data,climateHeaders,(10,10))
MakeMeanPlot(Data,(10,5))
MakeMonthlyPlot(Data,(12,12))
###############################################################################
# Approximate harvesting
###############################################################################
AvaliableArea=300
MeanHarvestedWater=AvaliableArea*Data.groupby(['Year','Month']).sum().groupby('Month').mean()["PRECIP"]
lowEstimation=Data.groupby(['Year','Month']).sum().groupby('Month').mean()["PRECIP"]-Data.groupby(['Year','Month']).sum().groupby('Month').std()["PRECIP"]
LowEstimationHarvestedWater=[]
for val in lowEstimation:
if val>0:
LowEstimationHarvestedWater.append(val*AvaliableArea)
else:
LowEstimationHarvestedWater.append(0)
HighEstimation=AvaliableArea*(Data.groupby(['Year','Month']).sum().groupby('Month').mean()["PRECIP"]+Data.groupby(['Year','Month']).sum().groupby('Month').std()["PRECIP"])
plt.figure()
plt.plot(MeanHarvestedWater,label="Mean Forecast")
plt.plot(LowEstimationHarvestedWater,label="Low Forecast")
plt.plot(HighEstimation,label="High Forecast")
ax=plt.gca()
ax.set_xlabel("Months")
ax.set_ylabel("Harvested Water (liters)")
ax.set_xticks([0,1,2,3,4,5,6,7,8,9,10,11])
ax.set_xticklabels(MonthsNames,rotation=45)
ax.legend()
PlotStyle(ax)
###############################################################################
# Water reserve sizing
###############################################################################
MonthlyWaterConsumption=20000
def ReserveWaterState(Reserve,WaterWeights):
'''
Parameters
----------
Reserve : int
size of the water reserve.
WaterWeights : list
list with the approximate harvested water.
Returns
-------
container : list
contains the remaining water during each month of a water reserve of size
Reserve.
'''
remaning=Reserve
container=[]
for val in WaterWeights:
consumption=remaning-MonthlyWaterConsumption
if consumption+val>=Reserve:
remaning=Reserve
else:
remaning=consumption+val
container.append(remaning)
return container
#Wrapper function to adjust negative values
def ObjetiveFunction(Reserve,Harvested):
Months=12-np.sum(np.sign(ReserveWaterState(Reserve,Harvested)))
if Months>=12:
return 12
else:
return Months
###############################################################################
# Water reserve sizing fixed use
###############################################################################
ReserveSize=[k for k in range(1,100000,1000)]
meanEstimation=[ObjetiveFunction(val,MeanHarvestedWater) for val in ReserveSize]
lowEstimation=[ObjetiveFunction(val,LowEstimationHarvestedWater) for val in ReserveSize]
highEstimation=[ObjetiveFunction(val,HighEstimation) for val in ReserveSize]
plt.figure()
plt.plot(meanEstimation,label="Mean Forecast")
plt.plot(lowEstimation,label="Low Forecast")
plt.plot(highEstimation,label="High Forecast")
ax=plt.gca()
ax.set_xlabel("Reserve Size (m3)")
ax.set_ylabel("Months without water")
ax.legend()
PlotStyle(ax)
###############################################################################
# Water reserve sizing variable use
###############################################################################
MonthlyUse=[3000,3000,6000,9000,12000,15000,20000,20000,20000,20000,20000,20000]
def VariableReserveWaterState(Reserve,WaterWeights,UsageWeights):
'''
Parameters
----------
Reserve : int
size of the water reserve.
WaterWeights : list
list with the approximate harvested water.
UsageWeights : list
list with the approximate monthly water usage.
Returns
-------
container : list
contains the remaining water during each month of a water reserve of size
Reserve.
'''
remaning=Reserve
container=[]
for val,sal in zip(WaterWeights,UsageWeights):
consumption=remaning-sal
if consumption+val>=Reserve:
remaning=Reserve
else:
remaning=consumption+val
container.append(remaning)
return container
#Wrapper function to adjust for negative values
def ObjetiveFunctionVariable(Reserve,Harvested,Usage):
Months=12-np.sum(np.sign(VariableReserveWaterState(Reserve,Harvested,Usage)))
if Months>=12:
return 12
else:
return Months
meanEstimationV=[ObjetiveFunctionVariable(val,MeanHarvestedWater,MonthlyUse) for val in ReserveSize]
lowEstimationV=[ObjetiveFunctionVariable(val,LowEstimationHarvestedWater,MonthlyUse) for val in ReserveSize]
highEstimationV=[ObjetiveFunctionVariable(val,HighEstimation,MonthlyUse) for val in ReserveSize]
plt.figure()
plt.plot(meanEstimationV,label="Mean Forecast")
plt.plot(lowEstimationV,label="Low Forecast")
plt.plot(highEstimationV,label="High Forecast")
ax=plt.gca()
ax.set_xlabel("Reserve Size (m3)")
ax.set_ylabel("Months without water")
ax.legend()
PlotStyle(ax)
| nrows,ncolumns=squaredUnique,squaredUnique+1 | conditional_block |
rainwater.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
tCopyright (c) 2020 Octavio Gonzalez-Lugo
o use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
@author: Octavio Gonzalez-Lugo
"""
###############################################################################
# Loading packages
###############################################################################
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
###############################################################################
# Plotting functions
###############################################################################
MonthsNames=['January','February','March','April','May','June','July','August','September','October','November','December']
def PlotStyle(Axes):
"""
Parameters
----------
Axes : Matplotlib axes object
Applies a general style to the matplotlib object
Returns
-------
None.
"""
Axes.spines['top'].set_visible(False)
Axes.spines['bottom'].set_visible(True)
Axes.spines['left'].set_visible(True)
Axes.spines['right'].set_visible(False)
Axes.xaxis.set_tick_params(labelsize=13)
Axes.yaxis.set_tick_params(labelsize=13)
def GetGridShape(TotalNumberOfElements):
|
def MakeCorrelationPanel(Data,Headers,PlotSize):
'''
Parameters
----------
Data : pandas dataframe
Contains the data set to be analyzed.
Headers : list
list of strings with the data headers inside Data.
PlotSize : tuple
contains the size of the generated plot.
Returns
-------
None.
'''
nrows,ncolumns=GetGridShape(len(Headers)*len(Headers))
subPlotIndexs=[(j,k) for j in range(nrows) for k in range(ncolumns)]
fig,axes=plt.subplots(nrows,ncolumns,figsize=PlotSize)
counter=0
for val in Headers:
for sal in Headers:
axes[subPlotIndexs[counter]].plot(Data[val],Data[sal],'bo')
axes[subPlotIndexs[counter]].set_xlabel(val)
axes[subPlotIndexs[counter]].set_ylabel(sal)
counter=counter+1
plt.tight_layout()
[PlotStyle(axes[val]) for val in subPlotIndexs]
def MakeMeanPlot(Data,figsize):
'''
Parameters
----------
Data : pandas dataframe
Contains the data set to be analyzed.
figsize : tuple
contains the size of the generated plot.
Returns
-------
None.
'''
fig,axes=plt.subplots(1,3,figsize=figsize,subplot_kw=dict(polar=True))
dataHeaders=['DayOfWeek','Month','Year']
for ax,name in zip(axes,dataHeaders):
values=Data.groupby(name).mean()["PRECIP"]
xticks=values.keys()
data=values.tolist()
data+=data[:1]
angles=np.linspace(0,2*np.pi,len(data))
ax.plot(angles,data)
ax.fill(angles,data,'b',alpha=0.1)
ax.set_xticks(angles)
if name=='Month':
ax.set_xticklabels(MonthsNames)
else:
ax.set_xticklabels(xticks)
plt.tight_layout()
def MakeMonthlyPlot(Data,figsize):
'''
Parameters
----------
Data : pandas dataframe
Contains the data set to be analyzed.
figsize : tuple
contains the size of the generated plot.
Returns
-------
None.
'''
fig,axes=plt.subplots(4,3,figsize=figsize,subplot_kw=dict(polar=True))
dataValues=[1,2,3,4,5,6,7,8,9,10,11,12]
flattenAxes=axes.ravel()
for ax,name in zip(flattenAxes,dataValues):
values=Data[Data["Month"]==name].groupby('DayOfWeek').mean()["PRECIP"]
data=values.tolist()
xticks=values.keys()
data+=data[:1]
angles=np.linspace(0,2*np.pi,len(data))
ax.plot(angles,data)
ax.fill(angles,data,'b',alpha=0.1)
ax.set_xticks(angles)
ax.set_xticklabels(xticks)
ax.set_title(MonthsNames[name-1],loc='right')
plt.tight_layout()
###############################################################################
# Loading the data
###############################################################################
GlobalDirectory=r"/home/tavoglc/LocalData/"
DataDir=GlobalDirectory + "Climate.csv"
Data=pd.read_csv(DataDir)
###############################################################################
# Time series custom features
###############################################################################
#Wrapper function for the days of the week
def ToDayOfWeek(DayNumber):
Days=['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday','Sunday']
return Days[DayNumber]
Data['Date']=pd.to_datetime(Data[' FECHA'],format='%d/%m/%Y')
Data['DayOfWeek']=Data['Date'].dt.dayofweek.apply(ToDayOfWeek)
Data['Month']=Data['Date'].dt.month.apply(int)
Data['Year']=Data['Date'].dt.year.apply(int)
climateHeaders=['PRECIP',' TMAX',' TMIN']
###############################################################################
# Data Visualization
###############################################################################
MakeCorrelationPanel(Data,climateHeaders,(10,10))
MakeMeanPlot(Data,(10,5))
MakeMonthlyPlot(Data,(12,12))
###############################################################################
# Approximate harvesting
###############################################################################
AvaliableArea=300
MeanHarvestedWater=AvaliableArea*Data.groupby(['Year','Month']).sum().groupby('Month').mean()["PRECIP"]
lowEstimation=Data.groupby(['Year','Month']).sum().groupby('Month').mean()["PRECIP"]-Data.groupby(['Year','Month']).sum().groupby('Month').std()["PRECIP"]
LowEstimationHarvestedWater=[]
for val in lowEstimation:
if val>0:
LowEstimationHarvestedWater.append(val*AvaliableArea)
else:
LowEstimationHarvestedWater.append(0)
HighEstimation=AvaliableArea*(Data.groupby(['Year','Month']).sum().groupby('Month').mean()["PRECIP"]+Data.groupby(['Year','Month']).sum().groupby('Month').std()["PRECIP"])
plt.figure()
plt.plot(MeanHarvestedWater,label="Mean Forecast")
plt.plot(LowEstimationHarvestedWater,label="Low Forecast")
plt.plot(HighEstimation,label="High Forecast")
ax=plt.gca()
ax.set_xlabel("Months")
ax.set_ylabel("Harvested Water (liters)")
ax.set_xticks([0,1,2,3,4,5,6,7,8,9,10,11])
ax.set_xticklabels(MonthsNames,rotation=45)
ax.legend()
PlotStyle(ax)
###############################################################################
# Water reserve sizing
###############################################################################
MonthlyWaterConsumption=20000
def ReserveWaterState(Reserve,WaterWeights):
'''
Parameters
----------
Reserve : int
size of the water reserve.
WaterWeights : list
list with the approximate harvested water.
Returns
-------
container : list
contains the remaining water during each month of a water reserve of size
Reserve.
'''
remaning=Reserve
container=[]
for val in WaterWeights:
consumption=remaning-MonthlyWaterConsumption
if consumption+val>=Reserve:
remaning=Reserve
else:
remaning=consumption+val
container.append(remaning)
return container
#Wrapper function to adjust negative values
def ObjetiveFunction(Reserve,Harvested):
Months=12-np.sum(np.sign(ReserveWaterState(Reserve,Harvested)))
if Months>=12:
return 12
else:
return Months
###############################################################################
# Water reserve sizing fixed use
###############################################################################
ReserveSize=[k for k in range(1,100000,1000)]
meanEstimation=[ObjetiveFunction(val,MeanHarvestedWater) for val in ReserveSize]
lowEstimation=[ObjetiveFunction(val,LowEstimationHarvestedWater) for val in ReserveSize]
highEstimation=[ObjetiveFunction(val,HighEstimation) for val in ReserveSize]
plt.figure()
plt.plot(meanEstimation,label="Mean Forecast")
plt.plot(lowEstimation,label="Low Forecast")
plt.plot(highEstimation,label="High Forecast")
ax=plt.gca()
ax.set_xlabel("Reserve Size (m3)")
ax.set_ylabel("Months without water")
ax.legend()
PlotStyle(ax)
###############################################################################
# Water reserve sizing variable use
###############################################################################
MonthlyUse=[3000,3000,6000,9000,12000,15000,20000,20000,20000,20000,20000,20000]
def VariableReserveWaterState(Reserve,WaterWeights,UsageWeights):
'''
Parameters
----------
Reserve : int
size of the water reserve.
WaterWeights : list
list with the approximate harvested water.
UsageWeights : list
list with the approximate monthly water usage.
Returns
-------
container : list
contains the remaining water during each month of a water reserve of size
Reserve.
'''
remaning=Reserve
container=[]
for val,sal in zip(WaterWeights,UsageWeights):
consumption=remaning-sal
if consumption+val>=Reserve:
remaning=Reserve
else:
remaning=consumption+val
container.append(remaning)
return container
#Wrapper function to adjust for negative values
def ObjetiveFunctionVariable(Reserve,Harvested,Usage):
Months=12-np.sum(np.sign(VariableReserveWaterState(Reserve,Harvested,Usage)))
if Months>=12:
return 12
else:
return Months
meanEstimationV=[ObjetiveFunctionVariable(val,MeanHarvestedWater,MonthlyUse) for val in ReserveSize]
lowEstimationV=[ObjetiveFunctionVariable(val,LowEstimationHarvestedWater,MonthlyUse) for val in ReserveSize]
highEstimationV=[ObjetiveFunctionVariable(val,HighEstimation,MonthlyUse) for val in ReserveSize]
plt.figure()
plt.plot(meanEstimationV,label="Mean Forecast")
plt.plot(lowEstimationV,label="Low Forecast")
plt.plot(highEstimationV,label="High Forecast")
ax=plt.gca()
ax.set_xlabel("Reserve Size (m3)")
ax.set_ylabel("Months without water")
ax.legend()
PlotStyle(ax)
| """
Parameters
----------
TotalNumberOfElements : int
Total number of elements in the plot.
Returns
-------
nrows : int
number of rows in the plot.
ncolumns : int
number of columns in the plot.
"""
numberOfUnique=TotalNumberOfElements
squaredUnique=int(np.sqrt(numberOfUnique))
if squaredUnique*squaredUnique==numberOfUnique:
nrows,ncolumns=squaredUnique,squaredUnique
elif squaredUnique*(squaredUnique+1)<numberOfUnique:
nrows,ncolumns=squaredUnique+1,squaredUnique+1
else:
nrows,ncolumns=squaredUnique,squaredUnique+1
return nrows,ncolumns | identifier_body |
rainwater.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
tCopyright (c) 2020 Octavio Gonzalez-Lugo
o use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
@author: Octavio Gonzalez-Lugo
"""
###############################################################################
# Loading packages
###############################################################################
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
###############################################################################
# Plotting functions
###############################################################################
MonthsNames=['January','February','March','April','May','June','July','August','September','October','November','December']
def | (Axes):
"""
Parameters
----------
Axes : Matplotlib axes object
Applies a general style to the matplotlib object
Returns
-------
None.
"""
Axes.spines['top'].set_visible(False)
Axes.spines['bottom'].set_visible(True)
Axes.spines['left'].set_visible(True)
Axes.spines['right'].set_visible(False)
Axes.xaxis.set_tick_params(labelsize=13)
Axes.yaxis.set_tick_params(labelsize=13)
def GetGridShape(TotalNumberOfElements):
"""
Parameters
----------
TotalNumberOfElements : int
Total number of elements in the plot.
Returns
-------
nrows : int
number of rows in the plot.
ncolumns : int
number of columns in the plot.
"""
numberOfUnique=TotalNumberOfElements
squaredUnique=int(np.sqrt(numberOfUnique))
if squaredUnique*squaredUnique==numberOfUnique:
nrows,ncolumns=squaredUnique,squaredUnique
elif squaredUnique*(squaredUnique+1)<numberOfUnique:
nrows,ncolumns=squaredUnique+1,squaredUnique+1
else:
nrows,ncolumns=squaredUnique,squaredUnique+1
return nrows,ncolumns
def MakeCorrelationPanel(Data,Headers,PlotSize):
'''
Parameters
----------
Data : pandas dataframe
Contains the data set to be analyzed.
Headers : list
list of strings with the data headers inside Data.
PlotSize : tuple
contains the size of the generated plot.
Returns
-------
None.
'''
nrows,ncolumns=GetGridShape(len(Headers)*len(Headers))
subPlotIndexs=[(j,k) for j in range(nrows) for k in range(ncolumns)]
fig,axes=plt.subplots(nrows,ncolumns,figsize=PlotSize)
counter=0
for val in Headers:
for sal in Headers:
axes[subPlotIndexs[counter]].plot(Data[val],Data[sal],'bo')
axes[subPlotIndexs[counter]].set_xlabel(val)
axes[subPlotIndexs[counter]].set_ylabel(sal)
counter=counter+1
plt.tight_layout()
[PlotStyle(axes[val]) for val in subPlotIndexs]
def MakeMeanPlot(Data,figsize):
'''
Parameters
----------
Data : pandas dataframe
Contains the data set to be analyzed.
figsize : tuple
contains the size of the generated plot.
Returns
-------
None.
'''
fig,axes=plt.subplots(1,3,figsize=figsize,subplot_kw=dict(polar=True))
dataHeaders=['DayOfWeek','Month','Year']
for ax,name in zip(axes,dataHeaders):
values=Data.groupby(name).mean()["PRECIP"]
xticks=values.keys()
data=values.tolist()
data+=data[:1]
angles=np.linspace(0,2*np.pi,len(data))
ax.plot(angles,data)
ax.fill(angles,data,'b',alpha=0.1)
ax.set_xticks(angles)
if name=='Month':
ax.set_xticklabels(MonthsNames)
else:
ax.set_xticklabels(xticks)
plt.tight_layout()
def MakeMonthlyPlot(Data,figsize):
'''
Parameters
----------
Data : pandas dataframe
Contains the data set to be analyzed.
figsize : tuple
contains the size of the generated plot.
Returns
-------
None.
'''
fig,axes=plt.subplots(4,3,figsize=figsize,subplot_kw=dict(polar=True))
dataValues=[1,2,3,4,5,6,7,8,9,10,11,12]
flattenAxes=axes.ravel()
for ax,name in zip(flattenAxes,dataValues):
values=Data[Data["Month"]==name].groupby('DayOfWeek').mean()["PRECIP"]
data=values.tolist()
xticks=values.keys()
data+=data[:1]
angles=np.linspace(0,2*np.pi,len(data))
ax.plot(angles,data)
ax.fill(angles,data,'b',alpha=0.1)
ax.set_xticks(angles)
ax.set_xticklabels(xticks)
ax.set_title(MonthsNames[name-1],loc='right')
plt.tight_layout()
###############################################################################
# Loading the data
###############################################################################
GlobalDirectory=r"/home/tavoglc/LocalData/"
DataDir=GlobalDirectory + "Climate.csv"
Data=pd.read_csv(DataDir)
###############################################################################
# Time series custom features
###############################################################################
#Wrapper function for the days of the week
def ToDayOfWeek(DayNumber):
Days=['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday','Sunday']
return Days[DayNumber]
Data['Date']=pd.to_datetime(Data[' FECHA'],format='%d/%m/%Y')
Data['DayOfWeek']=Data['Date'].dt.dayofweek.apply(ToDayOfWeek)
Data['Month']=Data['Date'].dt.month.apply(int)
Data['Year']=Data['Date'].dt.year.apply(int)
climateHeaders=['PRECIP',' TMAX',' TMIN']
###############################################################################
# Data Visualization
###############################################################################
MakeCorrelationPanel(Data,climateHeaders,(10,10))
MakeMeanPlot(Data,(10,5))
MakeMonthlyPlot(Data,(12,12))
###############################################################################
# Approximate harvesting
###############################################################################
AvaliableArea=300
MeanHarvestedWater=AvaliableArea*Data.groupby(['Year','Month']).sum().groupby('Month').mean()["PRECIP"]
lowEstimation=Data.groupby(['Year','Month']).sum().groupby('Month').mean()["PRECIP"]-Data.groupby(['Year','Month']).sum().groupby('Month').std()["PRECIP"]
LowEstimationHarvestedWater=[]
for val in lowEstimation:
if val>0:
LowEstimationHarvestedWater.append(val*AvaliableArea)
else:
LowEstimationHarvestedWater.append(0)
HighEstimation=AvaliableArea*(Data.groupby(['Year','Month']).sum().groupby('Month').mean()["PRECIP"]+Data.groupby(['Year','Month']).sum().groupby('Month').std()["PRECIP"])
plt.figure()
plt.plot(MeanHarvestedWater,label="Mean Forecast")
plt.plot(LowEstimationHarvestedWater,label="Low Forecast")
plt.plot(HighEstimation,label="High Forecast")
ax=plt.gca()
ax.set_xlabel("Months")
ax.set_ylabel("Harvested Water (liters)")
ax.set_xticks([0,1,2,3,4,5,6,7,8,9,10,11])
ax.set_xticklabels(MonthsNames,rotation=45)
ax.legend()
PlotStyle(ax)
###############################################################################
# Water reserve sizing
###############################################################################
MonthlyWaterConsumption=20000
def ReserveWaterState(Reserve,WaterWeights):
'''
Parameters
----------
Reserve : int
size of the water reserve.
WaterWeights : list
list with the approximate harvested water.
Returns
-------
container : list
contains the remaining water during each month of a water reserve of size
Reserve.
'''
remaning=Reserve
container=[]
for val in WaterWeights:
consumption=remaning-MonthlyWaterConsumption
if consumption+val>=Reserve:
remaning=Reserve
else:
remaning=consumption+val
container.append(remaning)
return container
#Wrapper function to adjust negative values
def ObjetiveFunction(Reserve,Harvested):
Months=12-np.sum(np.sign(ReserveWaterState(Reserve,Harvested)))
if Months>=12:
return 12
else:
return Months
###############################################################################
# Water reserve sizing fixed use
###############################################################################
ReserveSize=[k for k in range(1,100000,1000)]
meanEstimation=[ObjetiveFunction(val,MeanHarvestedWater) for val in ReserveSize]
lowEstimation=[ObjetiveFunction(val,LowEstimationHarvestedWater) for val in ReserveSize]
highEstimation=[ObjetiveFunction(val,HighEstimation) for val in ReserveSize]
plt.figure()
plt.plot(meanEstimation,label="Mean Forecast")
plt.plot(lowEstimation,label="Low Forecast")
plt.plot(highEstimation,label="High Forecast")
ax=plt.gca()
ax.set_xlabel("Reserve Size (m3)")
ax.set_ylabel("Months without water")
ax.legend()
PlotStyle(ax)
###############################################################################
# Water reserve sizing variable use
###############################################################################
MonthlyUse=[3000,3000,6000,9000,12000,15000,20000,20000,20000,20000,20000,20000]
def VariableReserveWaterState(Reserve,WaterWeights,UsageWeights):
'''
Parameters
----------
Reserve : int
size of the water reserve.
WaterWeights : list
list with the approximate harvested water.
UsageWeights : list
list with the approximate monthly water usage.
Returns
-------
container : list
contains the remaining water during each month of a water reserve of size
Reserve.
'''
remaning=Reserve
container=[]
for val,sal in zip(WaterWeights,UsageWeights):
consumption=remaning-sal
if consumption+val>=Reserve:
remaning=Reserve
else:
remaning=consumption+val
container.append(remaning)
return container
#Wrapper function to adjust for negative values
def ObjetiveFunctionVariable(Reserve,Harvested,Usage):
Months=12-np.sum(np.sign(VariableReserveWaterState(Reserve,Harvested,Usage)))
if Months>=12:
return 12
else:
return Months
meanEstimationV=[ObjetiveFunctionVariable(val,MeanHarvestedWater,MonthlyUse) for val in ReserveSize]
lowEstimationV=[ObjetiveFunctionVariable(val,LowEstimationHarvestedWater,MonthlyUse) for val in ReserveSize]
highEstimationV=[ObjetiveFunctionVariable(val,HighEstimation,MonthlyUse) for val in ReserveSize]
plt.figure()
plt.plot(meanEstimationV,label="Mean Forecast")
plt.plot(lowEstimationV,label="Low Forecast")
plt.plot(highEstimationV,label="High Forecast")
ax=plt.gca()
ax.set_xlabel("Reserve Size (m3)")
ax.set_ylabel("Months without water")
ax.legend()
PlotStyle(ax)
| PlotStyle | identifier_name |
rainwater.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
tCopyright (c) 2020 Octavio Gonzalez-Lugo
o use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
@author: Octavio Gonzalez-Lugo
"""
###############################################################################
# Loading packages
###############################################################################
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
###############################################################################
# Plotting functions
###############################################################################
MonthsNames=['January','February','March','April','May','June','July','August','September','October','November','December']
def PlotStyle(Axes):
"""
Parameters
----------
Axes : Matplotlib axes object
Applies a general style to the matplotlib object
Returns
-------
None.
"""
Axes.spines['top'].set_visible(False)
Axes.spines['bottom'].set_visible(True)
Axes.spines['left'].set_visible(True)
Axes.spines['right'].set_visible(False)
Axes.xaxis.set_tick_params(labelsize=13)
Axes.yaxis.set_tick_params(labelsize=13)
def GetGridShape(TotalNumberOfElements):
"""
Parameters
----------
TotalNumberOfElements : int
Total number of elements in the plot.
Returns
-------
nrows : int
number of rows in the plot.
ncolumns : int
number of columns in the plot.
"""
numberOfUnique=TotalNumberOfElements
squaredUnique=int(np.sqrt(numberOfUnique))
if squaredUnique*squaredUnique==numberOfUnique:
nrows,ncolumns=squaredUnique,squaredUnique
elif squaredUnique*(squaredUnique+1)<numberOfUnique:
nrows,ncolumns=squaredUnique+1,squaredUnique+1
else:
nrows,ncolumns=squaredUnique,squaredUnique+1
return nrows,ncolumns
def MakeCorrelationPanel(Data,Headers,PlotSize):
'''
Parameters
----------
Data : pandas dataframe
Contains the data set to be analyzed.
Headers : list
list of strings with the data headers inside Data.
PlotSize : tuple
contains the size of the generated plot.
Returns
-------
None.
'''
nrows,ncolumns=GetGridShape(len(Headers)*len(Headers))
subPlotIndexs=[(j,k) for j in range(nrows) for k in range(ncolumns)]
fig,axes=plt.subplots(nrows,ncolumns,figsize=PlotSize)
counter=0
for val in Headers:
for sal in Headers:
axes[subPlotIndexs[counter]].plot(Data[val],Data[sal],'bo')
axes[subPlotIndexs[counter]].set_xlabel(val)
axes[subPlotIndexs[counter]].set_ylabel(sal)
counter=counter+1
plt.tight_layout()
[PlotStyle(axes[val]) for val in subPlotIndexs]
def MakeMeanPlot(Data,figsize):
'''
Parameters
----------
Data : pandas dataframe
Contains the data set to be analyzed.
figsize : tuple
contains the size of the generated plot.
Returns
-------
None.
| values=Data.groupby(name).mean()["PRECIP"]
xticks=values.keys()
data=values.tolist()
data+=data[:1]
angles=np.linspace(0,2*np.pi,len(data))
ax.plot(angles,data)
ax.fill(angles,data,'b',alpha=0.1)
ax.set_xticks(angles)
if name=='Month':
ax.set_xticklabels(MonthsNames)
else:
ax.set_xticklabels(xticks)
plt.tight_layout()
def MakeMonthlyPlot(Data,figsize):
'''
Parameters
----------
Data : pandas dataframe
Contains the data set to be analyzed.
figsize : tuple
contains the size of the generated plot.
Returns
-------
None.
'''
fig,axes=plt.subplots(4,3,figsize=figsize,subplot_kw=dict(polar=True))
dataValues=[1,2,3,4,5,6,7,8,9,10,11,12]
flattenAxes=axes.ravel()
for ax,name in zip(flattenAxes,dataValues):
values=Data[Data["Month"]==name].groupby('DayOfWeek').mean()["PRECIP"]
data=values.tolist()
xticks=values.keys()
data+=data[:1]
angles=np.linspace(0,2*np.pi,len(data))
ax.plot(angles,data)
ax.fill(angles,data,'b',alpha=0.1)
ax.set_xticks(angles)
ax.set_xticklabels(xticks)
ax.set_title(MonthsNames[name-1],loc='right')
plt.tight_layout()
###############################################################################
# Loading the data
###############################################################################
GlobalDirectory=r"/home/tavoglc/LocalData/"
DataDir=GlobalDirectory + "Climate.csv"
Data=pd.read_csv(DataDir)
###############################################################################
# Time series custom features
###############################################################################
#Wrapper function for the days of the week
def ToDayOfWeek(DayNumber):
Days=['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday','Sunday']
return Days[DayNumber]
Data['Date']=pd.to_datetime(Data[' FECHA'],format='%d/%m/%Y')
Data['DayOfWeek']=Data['Date'].dt.dayofweek.apply(ToDayOfWeek)
Data['Month']=Data['Date'].dt.month.apply(int)
Data['Year']=Data['Date'].dt.year.apply(int)
climateHeaders=['PRECIP',' TMAX',' TMIN']
###############################################################################
# Data Visualization
###############################################################################
MakeCorrelationPanel(Data,climateHeaders,(10,10))
MakeMeanPlot(Data,(10,5))
MakeMonthlyPlot(Data,(12,12))
###############################################################################
# Approximate harvesting
###############################################################################
AvaliableArea=300
MeanHarvestedWater=AvaliableArea*Data.groupby(['Year','Month']).sum().groupby('Month').mean()["PRECIP"]
lowEstimation=Data.groupby(['Year','Month']).sum().groupby('Month').mean()["PRECIP"]-Data.groupby(['Year','Month']).sum().groupby('Month').std()["PRECIP"]
LowEstimationHarvestedWater=[]
for val in lowEstimation:
if val>0:
LowEstimationHarvestedWater.append(val*AvaliableArea)
else:
LowEstimationHarvestedWater.append(0)
HighEstimation=AvaliableArea*(Data.groupby(['Year','Month']).sum().groupby('Month').mean()["PRECIP"]+Data.groupby(['Year','Month']).sum().groupby('Month').std()["PRECIP"])
plt.figure()
plt.plot(MeanHarvestedWater,label="Mean Forecast")
plt.plot(LowEstimationHarvestedWater,label="Low Forecast")
plt.plot(HighEstimation,label="High Forecast")
ax=plt.gca()
ax.set_xlabel("Months")
ax.set_ylabel("Harvested Water (liters)")
ax.set_xticks([0,1,2,3,4,5,6,7,8,9,10,11])
ax.set_xticklabels(MonthsNames,rotation=45)
ax.legend()
PlotStyle(ax)
###############################################################################
# Water reserve sizing
###############################################################################
MonthlyWaterConsumption=20000
def ReserveWaterState(Reserve,WaterWeights):
'''
Parameters
----------
Reserve : int
size of the water reserve.
WaterWeights : list
list with the approximate harvested water.
Returns
-------
container : list
contains the remaining water during each month of a water reserve of size
Reserve.
'''
remaning=Reserve
container=[]
for val in WaterWeights:
consumption=remaning-MonthlyWaterConsumption
if consumption+val>=Reserve:
remaning=Reserve
else:
remaning=consumption+val
container.append(remaning)
return container
#Wrapper function to adjust negative values
def ObjetiveFunction(Reserve,Harvested):
Months=12-np.sum(np.sign(ReserveWaterState(Reserve,Harvested)))
if Months>=12:
return 12
else:
return Months
###############################################################################
# Water reserve sizing fixed use
###############################################################################
ReserveSize=[k for k in range(1,100000,1000)]
meanEstimation=[ObjetiveFunction(val,MeanHarvestedWater) for val in ReserveSize]
lowEstimation=[ObjetiveFunction(val,LowEstimationHarvestedWater) for val in ReserveSize]
highEstimation=[ObjetiveFunction(val,HighEstimation) for val in ReserveSize]
plt.figure()
plt.plot(meanEstimation,label="Mean Forecast")
plt.plot(lowEstimation,label="Low Forecast")
plt.plot(highEstimation,label="High Forecast")
ax=plt.gca()
ax.set_xlabel("Reserve Size (m3)")
ax.set_ylabel("Months without water")
ax.legend()
PlotStyle(ax)
###############################################################################
# Water reserve sizing variable use
###############################################################################
MonthlyUse=[3000,3000,6000,9000,12000,15000,20000,20000,20000,20000,20000,20000]
def VariableReserveWaterState(Reserve,WaterWeights,UsageWeights):
'''
Parameters
----------
Reserve : int
size of the water reserve.
WaterWeights : list
list with the approximate harvested water.
UsageWeights : list
list with the approximate monthly water usage.
Returns
-------
container : list
contains the remaining water during each month of a water reserve of size
Reserve.
'''
remaning=Reserve
container=[]
for val,sal in zip(WaterWeights,UsageWeights):
consumption=remaning-sal
if consumption+val>=Reserve:
remaning=Reserve
else:
remaning=consumption+val
container.append(remaning)
return container
#Wrapper function to adjust for negative values
def ObjetiveFunctionVariable(Reserve,Harvested,Usage):
Months=12-np.sum(np.sign(VariableReserveWaterState(Reserve,Harvested,Usage)))
if Months>=12:
return 12
else:
return Months
meanEstimationV=[ObjetiveFunctionVariable(val,MeanHarvestedWater,MonthlyUse) for val in ReserveSize]
lowEstimationV=[ObjetiveFunctionVariable(val,LowEstimationHarvestedWater,MonthlyUse) for val in ReserveSize]
highEstimationV=[ObjetiveFunctionVariable(val,HighEstimation,MonthlyUse) for val in ReserveSize]
plt.figure()
plt.plot(meanEstimationV,label="Mean Forecast")
plt.plot(lowEstimationV,label="Low Forecast")
plt.plot(highEstimationV,label="High Forecast")
ax=plt.gca()
ax.set_xlabel("Reserve Size (m3)")
ax.set_ylabel("Months without water")
ax.legend()
PlotStyle(ax) | '''
fig,axes=plt.subplots(1,3,figsize=figsize,subplot_kw=dict(polar=True))
dataHeaders=['DayOfWeek','Month','Year']
for ax,name in zip(axes,dataHeaders): | random_line_split |
lib.rs | //! An implementation of the [MD6 hash function](http://groups.csail.mit.edu/cis/md6), via FFI to reference implementation.
//!
//! For more information about MD6 visit its [official homepage](http://groups.csail.mit.edu/cis/md6).
//!
//! There are two APIs provided: one for single-chunk hashing and one for hashing of multiple data segments.
//!
//! # Examples
//!
//! Hashing a single chunk of data with a 256-bit MD6 hash function, then verifying the result.
//!
//! ```
//! # use md6::Md6;
//! # use std::iter::FromIterator;
//! let mut result = [0; 32];
//! md6::hash(256, b"The lazy fox jumps over the lazy dog", &mut result).unwrap();
//!
//! assert_eq!(Vec::from_iter(result.iter().map(|&i| i)),
//! vec![0xE4, 0x55, 0x51, 0xAA, 0xE2, 0x66, 0xE1, 0x48,
//! 0x2A, 0xC9, 0x8E, 0x24, 0x22, 0x9B, 0x3E, 0x90,
//! 0xDC, 0x06, 0x61, 0x77, 0xF8, 0xFB, 0x1A, 0x52,
//! 0x6E, 0x9D, 0xA2, 0xCC, 0x95, 0x71, 0x97, 0xAA]);
//! ```
//!
//! Hashing multiple chunks of data with a 512-bit MD6 hash function, then verifying the result.
//!
//! ```
//! # use md6::Md6;
//! # use std::iter::FromIterator;
//! let mut result = [0; 64];
//! let mut state = Md6::new(512).unwrap();
//!
//! state.update("Zażółć ".as_bytes());
//! state.update("gęślą ".as_bytes());
//! state.update("jaźń".as_bytes());
//!
//! state.finalise(&mut result);
//! assert_eq!(Vec::from_iter(result.iter().map(|&i| i)),
//! vec![0x92, 0x4E, 0x91, 0x6A, 0x01, 0x2C, 0x1A, 0x8D,
//! 0x0F, 0xB7, 0x9A, 0x4A, 0xD4, 0x9C, 0x55, 0x5E,
//! 0xBD, 0xCA, 0x59, 0xB8, 0x1B, 0x4C, 0x13, 0x41,
//! 0x2E, 0x32, 0xA5, 0xC9, 0x3B, 0x61, 0xAD, 0xB8,
//! 0x4D, 0xB3, 0xF9, 0x0C, 0x03, 0x51, 0xB2, 0x9E,
//! 0x7B, 0xAE, 0x46, 0x9F, 0x8D, 0x60, 0x5D, 0xED,
//! 0xFF, 0x51, 0x72, 0xDE, 0xA1, 0x6F, 0x00, 0xF7,
//! 0xB4, 0x82, 0xEF, 0x87, 0xED, 0x77, 0xD9, 0x1A]);
//! ```
//!
//! Comparing result of single- and multi-chunk hash methods hashing the same effective message with a 64-bit MD6 hash
//! function.
//!
//! ```
//! # use md6::Md6;
//! # use std::iter::FromIterator;
//! let mut result_multi = [0; 8];
//! let mut result_single = [0; 8];
//!
//! let mut state = Md6::new(64).unwrap();
//! state.update("Zażółć ".as_bytes());
//! state.update("gęślą ".as_bytes());
//! state.update("jaźń".as_bytes());
//! state.finalise(&mut result_multi);
//!
//! md6::hash(64, "Zażółć gęślą jaźń".as_bytes(), &mut result_single).unwrap();
//!
//! assert_eq!(Vec::from_iter(result_multi .iter().map(|&i| i)),
//! Vec::from_iter(result_single.iter().map(|&i| i)));
//! ```
//!
//! # Special thanks
//!
//! To all who support further development on [Patreon](https://patreon.com/nabijaczleweli), in particular:
//!
//! * ThePhD
//! * Embark Studios
extern crate libc;
mod native;
use std::error::Error;
use std::fmt;
use std::io;
/// Helper result type containing `Md6Error`.
pub type Result<T> = std::result::Result<T, Md6Error>;
/// Hash all data in one fell swoop.
///
/// Refer to individual functions for extended documentation.
///
/// # Example
///
/// ```
/// # use md6::Md6;
/// # use std::iter::FromIterator;
/// let mut result_256 = [0; 32];
/// let mut result_512 = [0; 64];
///
/// md6::hash(256, &[], &mut result_256).unwrap();
/// md6::hash(512, &[], &mut result_512).unwrap();
///
/// assert_eq!(Vec::from_iter(result_256.iter().map(|&i| i)),
/// vec![0xBC, 0xA3, 0x8B, 0x24, 0xA8, 0x04, 0xAA, 0x37,
/// 0xD8, 0x21, 0xD3, 0x1A, 0xF0, 0x0F, 0x55, 0x98,
/// 0x23, 0x01, 0x22, 0xC5, 0xBB, 0xFC, 0x4C, 0x4A,
/// 0xD5, 0xED, 0x40, 0xE4, 0x25, 0x8F, 0x04, 0xCA]);
/// assert_eq!(Vec::from_iter(result_512.iter().map(|&i| i)),
/// vec![0x6B, 0x7F, 0x33, 0x82, 0x1A, 0x2C, 0x06, 0x0E,
/// 0xCD, 0xD8, 0x1A, 0xEF, 0xDD, 0xEA, 0x2F, 0xD3,
/// 0xC4, 0x72, 0x02, 0x70, 0xE1, 0x86, 0x54, 0xF4,
/// 0xCB, 0x08, 0xEC, 0xE4, 0x9C, 0xCB, 0x46, 0x9F,
/// 0x8B, 0xEE, 0xEE, 0x7C, 0x83, 0x12, 0x06, 0xBD,
/// 0x57, 0x7F, 0x9F, 0x26, 0x30, 0xD9, 0x17, 0x79,
/// 0x79, 0x20, 0x3A, 0x94, 0x89, 0xE4, 0x7E, 0x04,
/// 0xDF, 0x4E, 0x6D, 0xEA, 0xA0, 0xF8, 0xE0, 0xC0]);
/// ```
pub fn hash(hashbitlen: i32, data: &[u8], hashval: &mut [u8]) -> Result<()> {
match unsafe { native::MD6_Hash_Hash(hashbitlen, data.as_ptr(), data.len() as u64 * 8, hashval.as_mut_ptr()) } {
0 => Ok(()),
e => Err(Md6Error::from(e)),
}
}
/// Hashing state for multiple data sets.
///
/// # Example
///
/// Hashing a string split into multiple chunks.
///
/// ```
/// # use md6::Md6;
/// # use std::iter::FromIterator;
/// let mut state = Md6::new(256).unwrap();
///
/// state.update(b"Abolish ");
/// state.update(b"the ");
/// state.update(b"bourgeoisie");
/// state.update(b"!");
///
/// let mut result = [0; 32];
/// state.finalise(&mut result);
/// assert_eq!(Vec::from_iter(result.iter().map(|&i| i)),
/// vec![0x49, 0x23, 0xE7, 0xB0, 0x53, 0x32, 0x05, 0xB0,
/// 0x25, 0xC5, 0xD4, 0xDB, 0x37, 0xB8, 0x99, 0x12,
/// 0x16, 0x2E, 0xFD, 0xF4, 0xDA, 0xC2, 0x2C, 0xFF,
/// 0xE6, 0x27, 0xF1, 0x11, 0xEC, 0x05, 0x2F, 0xB5]);
/// ```
///
/// A `Write` implementation is also provided:
///
/// ```
/// # use std::iter::FromIterator;
/// # use md6::Md6;
/// # use std::io;
/// let mut state = Md6::new(256).unwrap();
/// io::copy(&mut &b"The lazy fox jumps over the lazy dog."[..], &mut state).unwrap();
///
/// let mut result = [0; 32];
/// state.finalise(&mut result);
/// assert_eq!(Vec::from_iter(result.iter().map(|&i| i)),
/// vec![0x06, 0x60, 0xBB, 0x89, 0x85, 0x06, 0xE4, 0xD9,
/// 0x29, 0x8C, 0xD1, 0xB0, 0x40, 0x73, 0x49, 0x60,
/// 0x47, 0x3E, 0x25, 0xA4, 0x9D, 0x52, 0x34, 0xBB,
/// 0x2A, 0xCA, 0x31, 0x57, 0xD1, 0xAF, 0x27, 0xAA]);
/// ```
pub struct Md6 {
raw_state: native::FFIHashState,
}
/// Some functions in the library can fail, this enum represents all the possible ways they can.
#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)]
pub enum Md6Error {
/// Generic failure state
Fail,
/// `hashbitlen` passed to `Md6::new()` or `hash()` incorrect
BadHashbitlen,
}
impl Md6 {
/// Create a new hash state and initialise it with the given bit length.
///
/// `hashbitlen` is the hash output length. Must be between `1` and `512`.
///
/// Returns:
///
/// * `Err(Md6Error::BadHashbitlen)` if `hashbitlen` is not any of the mentioned above, or
/// * `Ok(Md6)` if initialisation succeeds.
///
/// # Examples
///
/// Incorrect `hashbitlen`
///
/// ```
/// # use md6::Md6;
/// assert_eq!(Md6::new(0).map(|_| ()), Err(md6::Md6Error::BadHashbitlen));
/// assert_eq!(Md6::new(1024).map(|_| ()), Err(md6::Md6Error::BadHashbitlen));
/// ```
///
/// Creating a 512-long state
///
/// ```
/// # use md6::Md6;
/// Md6::new(512).unwrap();
/// ```
pub fn new(hashbitlen: i32) -> Result<Md6> {
let mut raw_state = native::malloc_hash_state();
match unsafe { native::MD6_Hash_Init(raw_state, hashbitlen) } {
0 => Ok(Md6 { raw_state: raw_state }),
e => {
native::free_hash_state(&mut raw_state);
Err(Md6Error::from(e))
}
}
}
/// Append the provided data to the hash function.
///
/// # Examples
///
/// Hashing a part of [a short story](http://nabijaczleweli.xyz/capitalism/writing/Świat_to_kilka_takich_pokoi/)
///
/// ```
/// # use md6::Md6;
/// # use std::iter::FromIterator;
/// let mut result = [0; 64];
///
/// let mut state = Md6::new(512).unwrap();
/// state.update(" Serbiańcy znowu się pochlali, ale w sumie".as_bytes());
/// state.update("czegoż się po wschodnich słowianach spodziewać, swoją".as_bytes());
/// state.update("drogą. I, jak to wszystkim homo sapiensom się dzieje".as_bytes());
/// state.update("filozofować poczęli.".as_bytes());
/// state.finalise(&mut result);
///
/// assert_eq!(Vec::from_iter(result.iter().map(|&i| i)),
/// vec![0xD4, 0xAC, 0x5B, 0xDA, 0x95, 0x44, 0xCC, 0x3F,
/// 0xFB, 0x59, 0x4B, 0x62, 0x84, 0xEF, 0x07, 0xDD,
/// 0x59, 0xE7, 0x94, 0x2D, 0xCA, 0xCA, 0x07, 0x52,
/// 0x14, 0x13, 0xE8, 0x06, 0xBD, 0x84, 0xB8, 0xC7,
/// 0x8F, 0xB8, 0x03, 0x24, 0x39, 0xC8, 0x2E, 0xEC,
/// 0x9F, 0x7F, 0x4F, 0xDA, 0xF8, 0x8A, 0x4B, 0x5F,
/// 0x9D, 0xF8, 0xFD, 0x47, 0x0C, 0x4F, 0x2F, 0x4B,
/// 0xCD, 0xDF, 0xAF, 0x13, 0xE1, 0xE1, 0x4D, 0x9D]);
/// ```
pub fn update(&mut self, data: &[u8]) {
unsafe {
native::MD6_Hash_Update(self.raw_state, data.as_ptr(), data.len() as u64 * 8);
}
}
/// Finish hashing and store the output result in the provided space.
///
/// The provided space must not be smaller than the hash function's size,
/// if the provided space is smaller than the hash function's size, the behaviour is undefined. | ///
/// Storing and verifying results of all possible sizes.
///
/// ```
/// # use md6::Md6;
/// # use std::iter::FromIterator;
/// let mut result_64 = [0; 8];
/// let mut result_128 = [0; 16];
/// let mut result_256 = [0; 32];
/// let mut result_512 = [0; 64];
///
/// let mut state_64 = Md6::new(64) .unwrap();
/// let mut state_128 = Md6::new(128).unwrap();
/// let mut state_256 = Md6::new(256).unwrap();
/// let mut state_512 = Md6::new(512).unwrap();
///
/// state_64 .update(b"The lazy fox jumps over the lazy dog.");
/// state_128.update(b"The lazy fox jumps over the lazy dog.");
/// state_256.update(b"The lazy fox jumps over the lazy dog.");
/// state_512.update(b"The lazy fox jumps over the lazy dog.");
///
/// state_64 .finalise(&mut result_64);
/// state_128.finalise(&mut result_128);
/// state_256.finalise(&mut result_256);
/// state_512.finalise(&mut result_512);
///
/// assert_eq!(Vec::from_iter(result_64.iter().map(|&i| i)),
/// vec![0xF3, 0x50, 0x60, 0xAE, 0xD7, 0xF0, 0xB0, 0x96]);
/// assert_eq!(Vec::from_iter(result_128.iter().map(|&i| i)),
/// vec![0x08, 0x5E, 0xA5, 0xF6, 0x6D, 0x2A, 0xC1, 0xF3,
/// 0xCF, 0xC5, 0x6F, 0xA3, 0x7D, 0x1B, 0xEC, 0x9C]);
/// assert_eq!(Vec::from_iter(result_256.iter().map(|&i| i)),
/// vec![0x06, 0x60, 0xBB, 0x89, 0x85, 0x06, 0xE4, 0xD9,
/// 0x29, 0x8C, 0xD1, 0xB0, 0x40, 0x73, 0x49, 0x60,
/// 0x47, 0x3E, 0x25, 0xA4, 0x9D, 0x52, 0x34, 0xBB,
/// 0x2A, 0xCA, 0x31, 0x57, 0xD1, 0xAF, 0x27, 0xAA]);
/// assert_eq!(Vec::from_iter(result_512.iter().map(|&i| i)),
/// vec![0xA5, 0xFE, 0xC7, 0x36, 0x81, 0xFA, 0x64, 0xBE,
/// 0xE7, 0x2D, 0xB6, 0x05, 0x35, 0x26, 0x6C, 0x00,
/// 0x6B, 0x2A, 0x49, 0x54, 0x04, 0x7E, 0x39, 0x05,
/// 0xD1, 0xFE, 0xB3, 0x25, 0x21, 0x01, 0x81, 0x2D,
/// 0xF2, 0x20, 0xC9, 0x09, 0xD4, 0xD7, 0xB7, 0x94,
/// 0x53, 0xB4, 0x2D, 0xAD, 0x6D, 0x75, 0x52, 0xC7,
/// 0x82, 0xE8, 0x4E, 0xFC, 0x3C, 0x34, 0x5B, 0x0C,
/// 0xFF, 0x72, 0x1B, 0x56, 0x73, 0x05, 0x6B, 0x75]);
/// ```
pub fn finalise(&mut self, hashval: &mut [u8]) {
unsafe {
native::MD6_Hash_Final(self.raw_state, hashval.as_mut_ptr());
}
}
}
/// The `Write` implementation updates the state with the provided data.
///
/// For example, to hash a file:
///
/// ```
/// # use std::iter::FromIterator;
/// # use std::fs::File;
/// # use md6::Md6;
/// # use std::io;
/// let mut state = Md6::new(256).unwrap();
/// io::copy(&mut File::open("LICENSE").unwrap(), &mut state).unwrap();
///
/// let mut result = [0; 32];
/// state.finalise(&mut result);
/// assert_eq!(Vec::from_iter(result.iter().map(|&i| i)),
/// vec![0xB7, 0x82, 0xA1, 0xEA, 0xDE, 0xC5, 0x46, 0x3E,
/// 0x1D, 0xCF, 0x56, 0xA2, 0xD7, 0x52, 0x23, 0x82,
/// 0xA3, 0x02, 0xE6, 0xB6, 0x1D, 0x45, 0xA8, 0xBF,
/// 0x95, 0x12, 0x92, 0x1E, 0xAD, 0x21, 0x3E, 0x47]);
/// ```
impl io::Write for Md6 {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.update(buf);
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl Drop for Md6 {
fn drop(&mut self) {
native::free_hash_state(&mut self.raw_state);
}
}
impl Error for Md6Error {
fn description(&self) -> &str {
match self {
&Md6Error::Fail => "Generic MD6 fail",
&Md6Error::BadHashbitlen => "Incorrect hashbitlen",
}
}
}
impl From<i32> for Md6Error {
/// Passing incorrect error values yields unspecified behaviour.
fn from(i: i32) -> Self {
match i {
0 => panic!("Not an error"),
1 => Md6Error::Fail,
2 => Md6Error::BadHashbitlen,
_ => panic!("Incorrect error number"),
}
}
}
impl fmt::Display for Md6Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self)
}
} | ///
/// # Examples | random_line_split |
lib.rs | //! An implementation of the [MD6 hash function](http://groups.csail.mit.edu/cis/md6), via FFI to reference implementation.
//!
//! For more information about MD6 visit its [official homepage](http://groups.csail.mit.edu/cis/md6).
//!
//! There are two APIs provided: one for single-chunk hashing and one for hashing of multiple data segments.
//!
//! # Examples
//!
//! Hashing a single chunk of data with a 256-bit MD6 hash function, then verifying the result.
//!
//! ```
//! # use md6::Md6;
//! # use std::iter::FromIterator;
//! let mut result = [0; 32];
//! md6::hash(256, b"The lazy fox jumps over the lazy dog", &mut result).unwrap();
//!
//! assert_eq!(Vec::from_iter(result.iter().map(|&i| i)),
//! vec![0xE4, 0x55, 0x51, 0xAA, 0xE2, 0x66, 0xE1, 0x48,
//! 0x2A, 0xC9, 0x8E, 0x24, 0x22, 0x9B, 0x3E, 0x90,
//! 0xDC, 0x06, 0x61, 0x77, 0xF8, 0xFB, 0x1A, 0x52,
//! 0x6E, 0x9D, 0xA2, 0xCC, 0x95, 0x71, 0x97, 0xAA]);
//! ```
//!
//! Hashing multiple chunks of data with a 512-bit MD6 hash function, then verifying the result.
//!
//! ```
//! # use md6::Md6;
//! # use std::iter::FromIterator;
//! let mut result = [0; 64];
//! let mut state = Md6::new(512).unwrap();
//!
//! state.update("Zażółć ".as_bytes());
//! state.update("gęślą ".as_bytes());
//! state.update("jaźń".as_bytes());
//!
//! state.finalise(&mut result);
//! assert_eq!(Vec::from_iter(result.iter().map(|&i| i)),
//! vec![0x92, 0x4E, 0x91, 0x6A, 0x01, 0x2C, 0x1A, 0x8D,
//! 0x0F, 0xB7, 0x9A, 0x4A, 0xD4, 0x9C, 0x55, 0x5E,
//! 0xBD, 0xCA, 0x59, 0xB8, 0x1B, 0x4C, 0x13, 0x41,
//! 0x2E, 0x32, 0xA5, 0xC9, 0x3B, 0x61, 0xAD, 0xB8,
//! 0x4D, 0xB3, 0xF9, 0x0C, 0x03, 0x51, 0xB2, 0x9E,
//! 0x7B, 0xAE, 0x46, 0x9F, 0x8D, 0x60, 0x5D, 0xED,
//! 0xFF, 0x51, 0x72, 0xDE, 0xA1, 0x6F, 0x00, 0xF7,
//! 0xB4, 0x82, 0xEF, 0x87, 0xED, 0x77, 0xD9, 0x1A]);
//! ```
//!
//! Comparing result of single- and multi-chunk hash methods hashing the same effective message with a 64-bit MD6 hash
//! function.
//!
//! ```
//! # use md6::Md6;
//! # use std::iter::FromIterator;
//! let mut result_multi = [0; 8];
//! let mut result_single = [0; 8];
//!
//! let mut state = Md6::new(64).unwrap();
//! state.update("Zażółć ".as_bytes());
//! state.update("gęślą ".as_bytes());
//! state.update("jaźń".as_bytes());
//! state.finalise(&mut result_multi);
//!
//! md6::hash(64, "Zażółć gęślą jaźń".as_bytes(), &mut result_single).unwrap();
//!
//! assert_eq!(Vec::from_iter(result_multi .iter().map(|&i| i)),
//! Vec::from_iter(result_single.iter().map(|&i| i)));
//! ```
//!
//! # Special thanks
//!
//! To all who support further development on [Patreon](https://patreon.com/nabijaczleweli), in particular:
//!
//! * ThePhD
//! * Embark Studios
extern crate libc;
mod native;
use std::error::Error;
use std::fmt;
use std::io;
/// Helper result type containing `Md6Error`.
pub type Result<T> = std::result::Result<T, Md6Error>;
/// Hash all data in one fell swoop.
///
/// Refer to individual functions for extended documentation.
///
/// # Example
///
/// ```
/// # use md6::Md6;
/// # use std::iter::FromIterator;
/// let mut result_256 = [0; 32];
/// let mut result_512 = [0; 64];
///
/// md6::hash(256, &[], &mut result_256).unwrap();
/// md6::hash(512, &[], &mut result_512).unwrap();
///
/// assert_eq!(Vec::from_iter(result_256.iter().map(|&i| i)),
/// vec![0xBC, 0xA3, 0x8B, 0x24, 0xA8, 0x04, 0xAA, 0x37,
/// 0xD8, 0x21, 0xD3, 0x1A, 0xF0, 0x0F, 0x55, 0x98,
/// 0x23, 0x01, 0x22, 0xC5, 0xBB, 0xFC, 0x4C, 0x4A,
/// 0xD5, 0xED, 0x40, 0xE4, 0x25, 0x8F, 0x04, 0xCA]);
/// assert_eq!(Vec::from_iter(result_512.iter().map(|&i| i)),
/// vec![0x6B, 0x7F, 0x33, 0x82, 0x1A, 0x2C, 0x06, 0x0E,
/// 0xCD, 0xD8, 0x1A, 0xEF, 0xDD, 0xEA, 0x2F, 0xD3,
/// 0xC4, 0x72, 0x02, 0x70, 0xE1, 0x86, 0x54, 0xF4,
/// 0xCB, 0x08, 0xEC, 0xE4, 0x9C, 0xCB, 0x46, 0x9F,
/// 0x8B, 0xEE, 0xEE, 0x7C, 0x83, 0x12, 0x06, 0xBD,
/// 0x57, 0x7F, 0x9F, 0x26, 0x30, 0xD9, 0x17, 0x79,
/// 0x79, 0x20, 0x3A, 0x94, 0x89, 0xE4, 0x7E, 0x04,
/// 0xDF, 0x4E, 0x6D, 0xEA, 0xA0, 0xF8, 0xE0, 0xC0]);
/// ```
pub fn hash(hashbitlen: i32, data: &[u8], hashval: &mut [u8]) -> Result<()> {
match unsafe { native::MD6_Hash_Hash(hashbitlen, data.as_ptr(), data.len() as u64 * 8, hashval.as_mut_ptr()) } {
0 => Ok(()),
e => Err(Md6Error::from(e)),
}
}
/// Hashing state for multiple data sets.
///
/// # Example
///
/// Hashing a string split into multiple chunks.
///
/// ```
/// # use md6::Md6;
/// # use std::iter::FromIterator;
/// let mut state = Md6::new(256).unwrap();
///
/// state.update(b"Abolish ");
/// state.update(b"the ");
/// state.update(b"bourgeoisie");
/// state.update(b"!");
///
/// let mut result = [0; 32];
/// state.finalise(&mut result);
/// assert_eq!(Vec::from_iter(result.iter().map(|&i| i)),
/// vec![0x49, 0x23, 0xE7, 0xB0, 0x53, 0x32, 0x05, 0xB0,
/// 0x25, 0xC5, 0xD4, 0xDB, 0x37, 0xB8, 0x99, 0x12,
/// 0x16, 0x2E, 0xFD, 0xF4, 0xDA, 0xC2, 0x2C, 0xFF,
/// 0xE6, 0x27, 0xF1, 0x11, 0xEC, 0x05, 0x2F, 0xB5]);
/// ```
///
/// A `Write` implementation is also provided:
///
/// ```
/// # use std::iter::FromIterator;
/// # use md6::Md6;
/// # use std::io;
/// let mut state = Md6::new(256).unwrap();
/// io::copy(&mut &b"The lazy fox jumps over the lazy dog."[..], &mut state).unwrap();
///
/// let mut result = [0; 32];
/// state.finalise(&mut result);
/// assert_eq!(Vec::from_iter(result.iter().map(|&i| i)),
/// vec![0x06, 0x60, 0xBB, 0x89, 0x85, 0x06, 0xE4, 0xD9,
/// 0x29, 0x8C, 0xD1, 0xB0, 0x40, 0x73, 0x49, 0x60,
/// 0x47, 0x3E, 0x25, 0xA4, 0x9D, 0x52, 0x34, 0xBB,
/// 0x2A, 0xCA, 0x31, 0x57, 0xD1, 0xAF, 0x27, 0xAA]);
/// ```
pub struct Md6 {
raw_state: native::FFIHashState,
}
/// Some functions in the library can fail, this enum represents all the possible ways they can.
#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)]
pub enum Md6Error {
/// Generic failure state
Fail,
/// `hashbitlen` passed to `Md6::new()` or `hash()` incorrect
BadHashbitlen,
}
impl Md6 {
/// Create a new hash state and initialise it with the given bit length.
///
/// `hashbitlen` is the hash output length. Must be between `1` and `512`.
///
/// Returns:
///
/// * `Err(Md6Error::BadHashbitlen)` if `hashbitlen` is not any of the mentioned above, or
/// * `Ok(Md6)` if initialisation succeeds.
///
/// # Examples
///
/// Incorrect `hashbitlen`
///
/// ```
/// # use md6::Md6;
/// assert_eq!(Md6::new(0).map(|_| ()), Err(md6::Md6Error::BadHashbitlen));
/// assert_eq!(Md6::new(1024).map(|_| ()), Err(md6::Md6Error::BadHashbitlen));
/// ```
///
/// Creating a 512-long state
///
/// ```
/// # use md6::Md6;
/// Md6::new(512).unwrap();
/// ```
pub fn new(hashbitlen: i32) -> Result<Md6> {
let mut raw_state = native::malloc_hash_state();
match unsafe { native::MD6_Hash_Init(raw_state, hashbitlen) } {
0 => Ok(Md6 { raw_state: raw_state }),
e => {
native::free_hash_state(&mut raw_state);
Err(Md6Error::from(e))
}
}
}
/// Append the provided data to the hash function.
///
/// # Examples
///
/// Hashing a part of [a short story](http://nabijaczleweli.xyz/capitalism/writing/Świat_to_kilka_takich_pokoi/)
///
/// ```
/// # use md6::Md6;
/// # use std::iter::FromIterator;
/// let mut result = [0; 64];
///
/// let mut state = Md6::new(512).unwrap();
/// state.update(" Serbiańcy znowu się pochlali, ale w sumie".as_bytes());
/// state.update("czegoż się po wschodnich słowianach spodziewać, swoją".as_bytes());
/// state.update("drogą. I, jak to wszystkim homo sapiensom się dzieje".as_bytes());
/// state.update("filozofować poczęli.".as_bytes());
/// state.finalise(&mut result);
///
/// assert_eq!(Vec::from_iter(result.iter().map(|&i| i)),
/// vec![0xD4, 0xAC, 0x5B, 0xDA, 0x95, 0x44, 0xCC, 0x3F,
/// 0xFB, 0x59, 0x4B, 0x62, 0x84, 0xEF, 0x07, 0xDD,
/// 0x59, 0xE7, 0x94, 0x2D, 0xCA, 0xCA, 0x07, 0x52,
/// 0x14, 0x13, 0xE8, 0x06, 0xBD, 0x84, 0xB8, 0xC7,
/// 0x8F, 0xB8, 0x03, 0x24, 0x39, 0xC8, 0x2E, 0xEC,
/// 0x9F, 0x7F, 0x4F, 0xDA, 0xF8, 0x8A, 0x4B, 0x5F,
/// 0x9D, 0xF8, 0xFD, 0x47, 0x0C, 0x4F, 0x2F, 0x4B,
/// 0xCD, 0xDF, 0xAF, 0x13, 0xE1, 0xE1, 0x4D, 0x9D]);
/// ```
pub fn update(&mut self, data: &[u8]) {
unsafe {
native::MD6_Hash_Update(self.raw_state, data.as_ptr(), data.len() as u64 * 8);
}
}
/// Finish hashing and store the output result in the provided space.
///
/// The provided space must not be smaller than the hash function's size,
/// if the provided space is smaller than the hash function's size, the behaviour is undefined.
///
/// # Examples
///
/// Storing and verifying results of all possible sizes.
///
/// ```
/// # use md6::Md6;
/// # use std::iter::FromIterator;
/// let mut result_64 = [0; 8];
/// let mut result_128 = [0; 16];
/// let mut result_256 = [0; 32];
/// let mut result_512 = [0; 64];
///
/// let mut state_64 = Md6::new(64) .unwrap();
/// let mut state_128 = Md6::new(128).unwrap();
/// let mut state_256 = Md6::new(256).unwrap();
/// let mut state_512 = Md6::new(512).unwrap();
///
/// state_64 .update(b"The lazy fox jumps over the lazy dog.");
/// state_128.update(b"The lazy fox jumps over the lazy dog.");
/// state_256.update(b"The lazy fox jumps over the lazy dog.");
/// state_512.update(b"The lazy fox jumps over the lazy dog.");
///
/// state_64 .finalise(&mut result_64);
/// state_128.finalise(&mut result_128);
/// state_256.finalise(&mut result_256);
/// state_512.finalise(&mut result_512);
///
/// assert_eq!(Vec::from_iter(result_64.iter().map(|&i| i)),
/// vec![0xF3, 0x50, 0x60, 0xAE, 0xD7, 0xF0, 0xB0, 0x96]);
/// assert_eq!(Vec::from_iter(result_128.iter().map(|&i| i)),
/// vec![0x08, 0x5E, 0xA5, 0xF6, 0x6D, 0x2A, 0xC1, 0xF3,
/// 0xCF, 0xC5, 0x6F, 0xA3, 0x7D, 0x1B, 0xEC, 0x9C]);
/// assert_eq!(Vec::from_iter(result_256.iter().map(|&i| i)),
/// vec![0x06, 0x60, 0xBB, 0x89, 0x85, 0x06, 0xE4, 0xD9,
/// 0x29, 0x8C, 0xD1, 0xB0, 0x40, 0x73, 0x49, 0x60,
/// 0x47, 0x3E, 0x25, 0xA4, 0x9D, 0x52, 0x34, 0xBB,
/// 0x2A, 0xCA, 0x31, 0x57, 0xD1, 0xAF, 0x27, 0xAA]);
/// assert_eq!(Vec::from_iter(result_512.iter().map(|&i| i)),
/// vec![0xA5, 0xFE, 0xC7, 0x36, 0x81, 0xFA, 0x64, 0xBE,
/// 0xE7, 0x2D, 0xB6, 0x05, 0x35, 0x26, 0x6C, 0x00,
/// 0x6B, 0x2A, 0x49, 0x54, 0x04, 0x7E, 0x39, 0x05,
/// 0xD1, 0xFE, 0xB3, 0x25, 0x21, 0x01, 0x81, 0x2D,
/// 0xF2, 0x20, 0xC9, 0x09, 0xD4, 0xD7, 0xB7, 0x94,
/// 0x53, 0xB4, 0x2D, 0xAD, 0x6D, 0x75, 0x52, 0xC7,
/// 0x82, 0xE8, 0x4E, 0xFC, 0x3C, 0x34, 0x5B, 0x0C,
/// 0xFF, 0x72, 0x1B, 0x56, 0x73, 0x05, 0x6B, 0x75]);
/// ```
pub fn finalise(&mut self, hashval: &mut [u8]) {
unsafe {
native::MD6_Hash_Final(self.raw_state, hashval.as_mut_ptr());
}
}
}
/// The `Write` implementation updates the state with the provided data.
///
/// For example, to hash a file:
///
/// ```
/// # use std::iter::FromIterator;
/// # use std::fs::File;
/// # use md6::Md6;
/// # use std::io;
/// let mut state = Md6::new(256).unwrap();
/// io::copy(&mut File::open("LICENSE").unwrap(), &mut state).unwrap();
///
/// let mut result = [0; 32];
/// state.finalise(&mut result);
/// assert_eq!(Vec::from_iter(result.iter().map(|&i| i)),
/// vec![0xB7, 0x82, 0xA1, 0xEA, 0xDE, 0xC5, 0x46, 0x3E,
/// 0x1D, 0xCF, 0x56, 0xA2, 0xD7, 0x52, 0x23, 0x82,
/// 0xA3, 0x02, 0xE6, 0xB6, 0x1D, 0x45, 0xA8, 0xBF,
/// 0x95, 0x12, 0x92, 0x1E, 0xAD, 0x21, 0x3E, 0x47]);
/// ```
impl io::Write for Md6 {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.update(buf);
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl Drop for Md6 {
fn drop(&mut self) {
native::free_ | _state(&mut self.raw_state);
}
}
impl Error for Md6Error {
fn description(&self) -> &str {
match self {
&Md6Error::Fail => "Generic MD6 fail",
&Md6Error::BadHashbitlen => "Incorrect hashbitlen",
}
}
}
impl From<i32> for Md6Error {
/// Passing incorrect error values yields unspecified behaviour.
fn from(i: i32) -> Self {
match i {
0 => panic!("Not an error"),
1 => Md6Error::Fail,
2 => Md6Error::BadHashbitlen,
_ => panic!("Incorrect error number"),
}
}
}
impl fmt::Display for Md6Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self)
}
}
| hash | identifier_name |
classes.ts |
import {
Iuser, Ijugada, Iplayer,
GamePhase, Igame, Iroom,
Ifactory, Itile, IrowLeft, IobjectiveTile, IgameMode
} from "../../common/interfaces"
export class Room implements Iroom{
users: Iuser[]=[];
name: string="";
constructor(name:string) {
this.name=name
}
addUser(user:Iuser):void{
this.users.push(user)
}
removeUser(user: Iuser):void{
this.users = this.users.filter((x)=>{x.conn!=user.conn})
}
nameUserExist(name:string):boolean{
let tmp:Iuser[] = this.users.filter((x) => { x.name == name })
return tmp.length>0?true:false
}
getUserByName(name:string):(Iuser|null){
let tmp:Iuser[] = this.users.filter((x)=>{x.name ==name})
return tmp.length>0?tmp[0]:null
}
getUserByConn(conn: string): (Iuser | null){
let tmp: Iuser[] = this.users.filter((x) => { x.conn == conn })
return tmp.length > 0 ? tmp[0] : null
}
updateUser(user:Iuser):void{
this.users = this.users.map((x) => {
x.conn= (x.name == user.name )? user.conn:x.conn;
return x
})
}
}
export class Player implements Iplayer{
rowsLefts: IrowLeft[];
rowsRight: IobjectiveTile[][];
user: Iuser;
points: number;
hazard: string[];
firstPlayerToken: boolean;
constructor(user:Iuser, gameMode:IgameMode){
this.points = 0
this.hazard = []
this.firstPlayerToken = false
this.user = user
this.rowsLefts = gameMode.rowsLefts
this.rowsRight = gameMode.rowsRight
}
}
export class GameMode implements IgameMode{
colors: string[]= []
privateBoardRows: number =0
rowsLefts: IrowLeft[] =[];
rowsRight: IobjectiveTile[][] =[];
hazard:string []
constructor(modeName:String){
switch (modeName){
case "normal":
let tempRowLeft:IrowLeft;
let tempTile:string
this.colors = ["red", "black", "yellow", "blue", "pink"]
this.privateBoardRows = 5
let objTile: IobjectiveTile
let objetiveRow: IobjectiveTile[]
for (let i = 0; i < this.privateBoardRows; i++) {
tempRowLeft={color:"",used:0,max:i+1}
this.rowsLefts.push(tempRowLeft)
objetiveRow = this.colors.map((color) => {
objTile = { active: false, color: color }
return objTile
})
this.rowsRight.push(objetiveRow)
let tempColor:string
tempColor = this.colors.pop()
this.colors.reverse()
this.colors.push(tempColor)
this.colors.reverse()
}
break
}
}
}
export class Factory implements Ifactory{
tiles: Itile[]=[]
constructor(colors:string[]){
colors.forEach(element => {
this.tiles.push({ color: element, amount: 0 })
});
}
add(color: string, amount: number):void{
this.tiles.map((x)=>{
if(x.color==color){
x.amount +=amount
}
return x
})
}
remove(color: string, amount: number=0): number{
let result:number =amount
this.tiles.map((x) => {
if (x.color == color) {
if(amount ==0){
result =x.amount
x.amount = 0
}else{
x.amount -= amount
}
}
return x
})
return result
}
addRandomColor(): void{
let allColors:string[] = this.tiles.map((x)=>{return x.color})
this.add(
allColors[
Math.floor(Math.random()*allColors.length)
],
1)
}
}
export class Game implements Igame{
mode: IgameMode;
roomName: string;
players: Iplayer[]=[];
phase: GamePhase;
factories: Ifactory[];
bag: Ifactory
trash: Ifactory
turn: number;
constructor(mode:string,room:Iroom){
this.mode =new GameMode(mode)
this.roomName =room.name
this.bag = new Factory(this.mode.colors)
this.trash = new Factory(this.mode.colors)
this.factories = []
let newPlayer:Iplayer;
for (let index = 0; index < room.users.length; index++) {
newPlayer = new Player(room.users[index], this.mode)
this.players.push(newPlayer)
}
// create bag and trash
this.mode.colors.forEach(color => {
this.bag.add(color,20)
});
//create public board
let fabricQuant: number
fabricQuant = room.users.length == 2 ? 6 : 6 + ((room.users.length - 2) * 2)
for (let index = 0; index < fabricQuant; index++) {
this.factories.push(new Factory(this.mode.colors))
}
//assign to fabrics
this.bagToFabrics(4)
this.turn = Math.floor(Math.random() * (this.players.length - 1) + 1)
}
bagToFabrics(amountPerFabric:number):void{
let tempBag: Ifactory = new Factory(this.mode.colors)
let RND: number
let RNDcolor: string
let cantidadFactories:number = this.factories.length
for (let fabricI = 1; fabricI < cantidadFactories; fabricI++) {
for (let index = 0; index < amountPerFabric; index++) {
tempBag.tiles = this.bag.tiles.filter((x) => { return x.amount > 0 })
if (tempBag.tiles.length ==0) |
RND = Math.floor(Math.random() * (tempBag.tiles.length - 1) + 1)
RNDcolor = tempBag.tiles[RND].color
this.bag.tiles = this.bag.tiles.map((x)=>{
x.amount = x.color == RNDcolor ? x.amount - 1 : x.amount
return x
})
this.factories[fabricI].tiles = this.factories[fabricI].tiles.map((x) => {
x.amount = x.color == RNDcolor ? x.amount + 1 : x.amount
return x
})
}
}
}
moveTrashToBag():void{
this.trash.tiles.map((x)=>{
this.bag.add(x.color,x.amount)
this.trash.remove(x.color, x.amount)
})
}
pickTile(jugada:Ijugada):object{
let rowId: number = jugada.row
let row: IrowLeft = jugada.player.rowsLefts[rowId]
row.color = row.color == "" ? jugada.color : row.color
if (row.color != jugada.color) {
return {event:"error",reason:"jugada invalida"}
} else {
let amount: number = this.factories[0].remove(jugada.color)
//public board
if (jugada.fabricIndex> 0) {
let tempFabric:Ifactory = this.factories[jugada.fabricIndex]
tempFabric.tiles.forEach(tile => {
this.factories[0].add(tile.color,tile.amount)
});
this.factories[jugada.fabricIndex] = new Factory(this.mode.colors)
}
//private board
let total: number = row.used + amount
if (total > row.max) {
jugada.player.hazard.push(jugada.color)
} else {
row.used += amount
}
jugada.player.rowsLefts[rowId] = row
return{
event:"ok"
}
}
}
partialScore():void{
let ejeX:number
let ejeY:number
let checkNext=(x:number,y:number,player:Iplayer,position:string):number=>{
//position["+x","-x","-y","+x"]
let score:number=0;
if(x<0 ||y <0 || y > player.rowsRight.length ||
x>player.rowsRight[0].length
){
return 0
}
if (!player.rowsRight[y][x]){
return 0
}
score +=1
switch (position){
case "+x":
score +=checkNext(x+1,y,player,position)
break
case "-x":
score +=checkNext(x-1, y, player, position)
break
case "+y":
score +=checkNext(x, y+1, player, position)
break
case "-y":
score +=checkNext(x, y-1, player, position)
break
}
return score
}
this.players.forEach(player => {
for (ejeY = 0; ejeY < this.mode.privateBoardRows; ejeY++) {
if (player.rowsLefts[ejeY].max == player.rowsLefts[ejeY].used){
ejeX = player.rowsRight[ejeY].indexOf({color: player.rowsLefts[ejeY].color,active:false})
player.rowsRight[ejeY][ejeX].active= true
let tempX1:number =0
let tempX2: number = 0
let tempY1: number = 0
let tempY2: number = 0
let subtotal:number =0
tempX1 = checkNext(ejeX,ejeY,player,"+x")
tempX2 = checkNext(ejeX, ejeY, player, "-x") -1
tempY1 = checkNext(ejeX, ejeY, player, "+y")
tempY2 = checkNext(ejeX, ejeY, player, "-x") -1
subtotal+= tempX1 + tempX2 - 2 == 0 ? 0:tempX1 + tempX2 - 1
subtotal += tempY1 + tempY2 - 2 == 0 ? 0 : tempY1 + tempY2 - 1
player.rowsLefts[ejeY].used = 0
player.rowsLefts[ejeY].color = ""
}
}
});
}
finalScore():void{
}
updateUser(user:Iuser):void{
this.players.map((x)=>{
x.user.conn =x.user.name == user.name? user.conn:x.user.conn
return x
})
}
} | {
this.moveTrashToBag()
tempBag.tiles = this.bag.tiles
} | conditional_block |
classes.ts |
import {
Iuser, Ijugada, Iplayer,
GamePhase, Igame, Iroom,
Ifactory, Itile, IrowLeft, IobjectiveTile, IgameMode
} from "../../common/interfaces"
export class Room implements Iroom{
users: Iuser[]=[];
name: string="";
constructor(name:string) {
this.name=name
}
addUser(user:Iuser):void{
this.users.push(user)
}
removeUser(user: Iuser):void{
this.users = this.users.filter((x)=>{x.conn!=user.conn})
}
nameUserExist(name:string):boolean{
let tmp:Iuser[] = this.users.filter((x) => { x.name == name })
return tmp.length>0?true:false
}
getUserByName(name:string):(Iuser|null){
let tmp:Iuser[] = this.users.filter((x)=>{x.name ==name})
return tmp.length>0?tmp[0]:null
}
getUserByConn(conn: string): (Iuser | null){
let tmp: Iuser[] = this.users.filter((x) => { x.conn == conn })
return tmp.length > 0 ? tmp[0] : null
}
updateUser(user:Iuser):void |
}
export class Player implements Iplayer{
rowsLefts: IrowLeft[];
rowsRight: IobjectiveTile[][];
user: Iuser;
points: number;
hazard: string[];
firstPlayerToken: boolean;
constructor(user:Iuser, gameMode:IgameMode){
this.points = 0
this.hazard = []
this.firstPlayerToken = false
this.user = user
this.rowsLefts = gameMode.rowsLefts
this.rowsRight = gameMode.rowsRight
}
}
export class GameMode implements IgameMode{
colors: string[]= []
privateBoardRows: number =0
rowsLefts: IrowLeft[] =[];
rowsRight: IobjectiveTile[][] =[];
hazard:string []
constructor(modeName:String){
switch (modeName){
case "normal":
let tempRowLeft:IrowLeft;
let tempTile:string
this.colors = ["red", "black", "yellow", "blue", "pink"]
this.privateBoardRows = 5
let objTile: IobjectiveTile
let objetiveRow: IobjectiveTile[]
for (let i = 0; i < this.privateBoardRows; i++) {
tempRowLeft={color:"",used:0,max:i+1}
this.rowsLefts.push(tempRowLeft)
objetiveRow = this.colors.map((color) => {
objTile = { active: false, color: color }
return objTile
})
this.rowsRight.push(objetiveRow)
let tempColor:string
tempColor = this.colors.pop()
this.colors.reverse()
this.colors.push(tempColor)
this.colors.reverse()
}
break
}
}
}
export class Factory implements Ifactory{
tiles: Itile[]=[]
constructor(colors:string[]){
colors.forEach(element => {
this.tiles.push({ color: element, amount: 0 })
});
}
add(color: string, amount: number):void{
this.tiles.map((x)=>{
if(x.color==color){
x.amount +=amount
}
return x
})
}
remove(color: string, amount: number=0): number{
let result:number =amount
this.tiles.map((x) => {
if (x.color == color) {
if(amount ==0){
result =x.amount
x.amount = 0
}else{
x.amount -= amount
}
}
return x
})
return result
}
addRandomColor(): void{
let allColors:string[] = this.tiles.map((x)=>{return x.color})
this.add(
allColors[
Math.floor(Math.random()*allColors.length)
],
1)
}
}
export class Game implements Igame{
mode: IgameMode;
roomName: string;
players: Iplayer[]=[];
phase: GamePhase;
factories: Ifactory[];
bag: Ifactory
trash: Ifactory
turn: number;
constructor(mode:string,room:Iroom){
this.mode =new GameMode(mode)
this.roomName =room.name
this.bag = new Factory(this.mode.colors)
this.trash = new Factory(this.mode.colors)
this.factories = []
let newPlayer:Iplayer;
for (let index = 0; index < room.users.length; index++) {
newPlayer = new Player(room.users[index], this.mode)
this.players.push(newPlayer)
}
// create bag and trash
this.mode.colors.forEach(color => {
this.bag.add(color,20)
});
//create public board
let fabricQuant: number
fabricQuant = room.users.length == 2 ? 6 : 6 + ((room.users.length - 2) * 2)
for (let index = 0; index < fabricQuant; index++) {
this.factories.push(new Factory(this.mode.colors))
}
//assign to fabrics
this.bagToFabrics(4)
this.turn = Math.floor(Math.random() * (this.players.length - 1) + 1)
}
bagToFabrics(amountPerFabric:number):void{
let tempBag: Ifactory = new Factory(this.mode.colors)
let RND: number
let RNDcolor: string
let cantidadFactories:number = this.factories.length
for (let fabricI = 1; fabricI < cantidadFactories; fabricI++) {
for (let index = 0; index < amountPerFabric; index++) {
tempBag.tiles = this.bag.tiles.filter((x) => { return x.amount > 0 })
if (tempBag.tiles.length ==0){
this.moveTrashToBag()
tempBag.tiles = this.bag.tiles
}
RND = Math.floor(Math.random() * (tempBag.tiles.length - 1) + 1)
RNDcolor = tempBag.tiles[RND].color
this.bag.tiles = this.bag.tiles.map((x)=>{
x.amount = x.color == RNDcolor ? x.amount - 1 : x.amount
return x
})
this.factories[fabricI].tiles = this.factories[fabricI].tiles.map((x) => {
x.amount = x.color == RNDcolor ? x.amount + 1 : x.amount
return x
})
}
}
}
moveTrashToBag():void{
this.trash.tiles.map((x)=>{
this.bag.add(x.color,x.amount)
this.trash.remove(x.color, x.amount)
})
}
pickTile(jugada:Ijugada):object{
let rowId: number = jugada.row
let row: IrowLeft = jugada.player.rowsLefts[rowId]
row.color = row.color == "" ? jugada.color : row.color
if (row.color != jugada.color) {
return {event:"error",reason:"jugada invalida"}
} else {
let amount: number = this.factories[0].remove(jugada.color)
//public board
if (jugada.fabricIndex> 0) {
let tempFabric:Ifactory = this.factories[jugada.fabricIndex]
tempFabric.tiles.forEach(tile => {
this.factories[0].add(tile.color,tile.amount)
});
this.factories[jugada.fabricIndex] = new Factory(this.mode.colors)
}
//private board
let total: number = row.used + amount
if (total > row.max) {
jugada.player.hazard.push(jugada.color)
} else {
row.used += amount
}
jugada.player.rowsLefts[rowId] = row
return{
event:"ok"
}
}
}
partialScore():void{
let ejeX:number
let ejeY:number
let checkNext=(x:number,y:number,player:Iplayer,position:string):number=>{
//position["+x","-x","-y","+x"]
let score:number=0;
if(x<0 ||y <0 || y > player.rowsRight.length ||
x>player.rowsRight[0].length
){
return 0
}
if (!player.rowsRight[y][x]){
return 0
}
score +=1
switch (position){
case "+x":
score +=checkNext(x+1,y,player,position)
break
case "-x":
score +=checkNext(x-1, y, player, position)
break
case "+y":
score +=checkNext(x, y+1, player, position)
break
case "-y":
score +=checkNext(x, y-1, player, position)
break
}
return score
}
this.players.forEach(player => {
for (ejeY = 0; ejeY < this.mode.privateBoardRows; ejeY++) {
if (player.rowsLefts[ejeY].max == player.rowsLefts[ejeY].used){
ejeX = player.rowsRight[ejeY].indexOf({color: player.rowsLefts[ejeY].color,active:false})
player.rowsRight[ejeY][ejeX].active= true
let tempX1:number =0
let tempX2: number = 0
let tempY1: number = 0
let tempY2: number = 0
let subtotal:number =0
tempX1 = checkNext(ejeX,ejeY,player,"+x")
tempX2 = checkNext(ejeX, ejeY, player, "-x") -1
tempY1 = checkNext(ejeX, ejeY, player, "+y")
tempY2 = checkNext(ejeX, ejeY, player, "-x") -1
subtotal+= tempX1 + tempX2 - 2 == 0 ? 0:tempX1 + tempX2 - 1
subtotal += tempY1 + tempY2 - 2 == 0 ? 0 : tempY1 + tempY2 - 1
player.rowsLefts[ejeY].used = 0
player.rowsLefts[ejeY].color = ""
}
}
});
}
finalScore():void{
}
updateUser(user:Iuser):void{
this.players.map((x)=>{
x.user.conn =x.user.name == user.name? user.conn:x.user.conn
return x
})
}
} | {
this.users = this.users.map((x) => {
x.conn= (x.name == user.name )? user.conn:x.conn;
return x
})
} | identifier_body |
classes.ts | import {
Iuser, Ijugada, Iplayer,
GamePhase, Igame, Iroom,
Ifactory, Itile, IrowLeft, IobjectiveTile, IgameMode
} from "../../common/interfaces"
export class Room implements Iroom{
users: Iuser[]=[];
name: string="";
constructor(name:string) {
this.name=name
}
addUser(user:Iuser):void{
this.users.push(user)
}
removeUser(user: Iuser):void{
this.users = this.users.filter((x)=>{x.conn!=user.conn})
}
nameUserExist(name:string):boolean{
let tmp:Iuser[] = this.users.filter((x) => { x.name == name })
return tmp.length>0?true:false
}
getUserByName(name:string):(Iuser|null){
let tmp:Iuser[] = this.users.filter((x)=>{x.name ==name})
return tmp.length>0?tmp[0]:null
}
getUserByConn(conn: string): (Iuser | null){
let tmp: Iuser[] = this.users.filter((x) => { x.conn == conn })
return tmp.length > 0 ? tmp[0] : null
}
updateUser(user:Iuser):void{
this.users = this.users.map((x) => {
x.conn= (x.name == user.name )? user.conn:x.conn;
return x
})
}
}
export class Player implements Iplayer{
rowsLefts: IrowLeft[];
rowsRight: IobjectiveTile[][];
user: Iuser;
points: number;
hazard: string[];
firstPlayerToken: boolean;
constructor(user:Iuser, gameMode:IgameMode){
this.points = 0
this.hazard = []
this.firstPlayerToken = false
this.user = user
this.rowsLefts = gameMode.rowsLefts
this.rowsRight = gameMode.rowsRight
}
}
export class GameMode implements IgameMode{
colors: string[]= []
privateBoardRows: number =0
rowsLefts: IrowLeft[] =[];
rowsRight: IobjectiveTile[][] =[];
hazard:string []
constructor(modeName:String){
switch (modeName){
case "normal":
let tempRowLeft:IrowLeft;
let tempTile:string
this.colors = ["red", "black", "yellow", "blue", "pink"]
this.privateBoardRows = 5
let objTile: IobjectiveTile
let objetiveRow: IobjectiveTile[]
for (let i = 0; i < this.privateBoardRows; i++) {
tempRowLeft={color:"",used:0,max:i+1}
this.rowsLefts.push(tempRowLeft)
objetiveRow = this.colors.map((color) => {
objTile = { active: false, color: color }
return objTile
})
this.rowsRight.push(objetiveRow)
let tempColor:string
tempColor = this.colors.pop()
this.colors.reverse()
this.colors.push(tempColor)
this.colors.reverse()
}
break
}
}
}
export class Factory implements Ifactory{
tiles: Itile[]=[]
constructor(colors:string[]){
colors.forEach(element => {
this.tiles.push({ color: element, amount: 0 })
});
}
add(color: string, amount: number):void{
this.tiles.map((x)=>{
if(x.color==color){
x.amount +=amount
}
return x
})
}
remove(color: string, amount: number=0): number{
let result:number =amount
this.tiles.map((x) => {
if (x.color == color) {
if(amount ==0){
result =x.amount
x.amount = 0
}else{
x.amount -= amount
}
}
return x
})
return result
}
addRandomColor(): void{
let allColors:string[] = this.tiles.map((x)=>{return x.color})
this.add(
allColors[
Math.floor(Math.random()*allColors.length)
],
1)
}
}
export class Game implements Igame{
mode: IgameMode;
roomName: string;
players: Iplayer[]=[];
phase: GamePhase;
factories: Ifactory[];
bag: Ifactory
trash: Ifactory
turn: number;
constructor(mode:string,room:Iroom){
this.mode =new GameMode(mode)
this.roomName =room.name
this.bag = new Factory(this.mode.colors)
this.trash = new Factory(this.mode.colors)
this.factories = []
let newPlayer:Iplayer;
for (let index = 0; index < room.users.length; index++) {
newPlayer = new Player(room.users[index], this.mode)
this.players.push(newPlayer)
}
// create bag and trash
this.mode.colors.forEach(color => { | let fabricQuant: number
fabricQuant = room.users.length == 2 ? 6 : 6 + ((room.users.length - 2) * 2)
for (let index = 0; index < fabricQuant; index++) {
this.factories.push(new Factory(this.mode.colors))
}
//assign to fabrics
this.bagToFabrics(4)
this.turn = Math.floor(Math.random() * (this.players.length - 1) + 1)
}
bagToFabrics(amountPerFabric:number):void{
let tempBag: Ifactory = new Factory(this.mode.colors)
let RND: number
let RNDcolor: string
let cantidadFactories:number = this.factories.length
for (let fabricI = 1; fabricI < cantidadFactories; fabricI++) {
for (let index = 0; index < amountPerFabric; index++) {
tempBag.tiles = this.bag.tiles.filter((x) => { return x.amount > 0 })
if (tempBag.tiles.length ==0){
this.moveTrashToBag()
tempBag.tiles = this.bag.tiles
}
RND = Math.floor(Math.random() * (tempBag.tiles.length - 1) + 1)
RNDcolor = tempBag.tiles[RND].color
this.bag.tiles = this.bag.tiles.map((x)=>{
x.amount = x.color == RNDcolor ? x.amount - 1 : x.amount
return x
})
this.factories[fabricI].tiles = this.factories[fabricI].tiles.map((x) => {
x.amount = x.color == RNDcolor ? x.amount + 1 : x.amount
return x
})
}
}
}
moveTrashToBag():void{
this.trash.tiles.map((x)=>{
this.bag.add(x.color,x.amount)
this.trash.remove(x.color, x.amount)
})
}
pickTile(jugada:Ijugada):object{
let rowId: number = jugada.row
let row: IrowLeft = jugada.player.rowsLefts[rowId]
row.color = row.color == "" ? jugada.color : row.color
if (row.color != jugada.color) {
return {event:"error",reason:"jugada invalida"}
} else {
let amount: number = this.factories[0].remove(jugada.color)
//public board
if (jugada.fabricIndex> 0) {
let tempFabric:Ifactory = this.factories[jugada.fabricIndex]
tempFabric.tiles.forEach(tile => {
this.factories[0].add(tile.color,tile.amount)
});
this.factories[jugada.fabricIndex] = new Factory(this.mode.colors)
}
//private board
let total: number = row.used + amount
if (total > row.max) {
jugada.player.hazard.push(jugada.color)
} else {
row.used += amount
}
jugada.player.rowsLefts[rowId] = row
return{
event:"ok"
}
}
}
partialScore():void{
let ejeX:number
let ejeY:number
let checkNext=(x:number,y:number,player:Iplayer,position:string):number=>{
//position["+x","-x","-y","+x"]
let score:number=0;
if(x<0 ||y <0 || y > player.rowsRight.length ||
x>player.rowsRight[0].length
){
return 0
}
if (!player.rowsRight[y][x]){
return 0
}
score +=1
switch (position){
case "+x":
score +=checkNext(x+1,y,player,position)
break
case "-x":
score +=checkNext(x-1, y, player, position)
break
case "+y":
score +=checkNext(x, y+1, player, position)
break
case "-y":
score +=checkNext(x, y-1, player, position)
break
}
return score
}
this.players.forEach(player => {
for (ejeY = 0; ejeY < this.mode.privateBoardRows; ejeY++) {
if (player.rowsLefts[ejeY].max == player.rowsLefts[ejeY].used){
ejeX = player.rowsRight[ejeY].indexOf({color: player.rowsLefts[ejeY].color,active:false})
player.rowsRight[ejeY][ejeX].active= true
let tempX1:number =0
let tempX2: number = 0
let tempY1: number = 0
let tempY2: number = 0
let subtotal:number =0
tempX1 = checkNext(ejeX,ejeY,player,"+x")
tempX2 = checkNext(ejeX, ejeY, player, "-x") -1
tempY1 = checkNext(ejeX, ejeY, player, "+y")
tempY2 = checkNext(ejeX, ejeY, player, "-x") -1
subtotal+= tempX1 + tempX2 - 2 == 0 ? 0:tempX1 + tempX2 - 1
subtotal += tempY1 + tempY2 - 2 == 0 ? 0 : tempY1 + tempY2 - 1
player.rowsLefts[ejeY].used = 0
player.rowsLefts[ejeY].color = ""
}
}
});
}
finalScore():void{
}
updateUser(user:Iuser):void{
this.players.map((x)=>{
x.user.conn =x.user.name == user.name? user.conn:x.user.conn
return x
})
}
} | this.bag.add(color,20)
});
//create public board | random_line_split |
classes.ts |
import {
Iuser, Ijugada, Iplayer,
GamePhase, Igame, Iroom,
Ifactory, Itile, IrowLeft, IobjectiveTile, IgameMode
} from "../../common/interfaces"
export class Room implements Iroom{
users: Iuser[]=[];
name: string="";
constructor(name:string) {
this.name=name
}
addUser(user:Iuser):void{
this.users.push(user)
}
removeUser(user: Iuser):void{
this.users = this.users.filter((x)=>{x.conn!=user.conn})
}
| (name:string):boolean{
let tmp:Iuser[] = this.users.filter((x) => { x.name == name })
return tmp.length>0?true:false
}
getUserByName(name:string):(Iuser|null){
let tmp:Iuser[] = this.users.filter((x)=>{x.name ==name})
return tmp.length>0?tmp[0]:null
}
getUserByConn(conn: string): (Iuser | null){
let tmp: Iuser[] = this.users.filter((x) => { x.conn == conn })
return tmp.length > 0 ? tmp[0] : null
}
updateUser(user:Iuser):void{
this.users = this.users.map((x) => {
x.conn= (x.name == user.name )? user.conn:x.conn;
return x
})
}
}
export class Player implements Iplayer{
rowsLefts: IrowLeft[];
rowsRight: IobjectiveTile[][];
user: Iuser;
points: number;
hazard: string[];
firstPlayerToken: boolean;
constructor(user:Iuser, gameMode:IgameMode){
this.points = 0
this.hazard = []
this.firstPlayerToken = false
this.user = user
this.rowsLefts = gameMode.rowsLefts
this.rowsRight = gameMode.rowsRight
}
}
export class GameMode implements IgameMode{
colors: string[]= []
privateBoardRows: number =0
rowsLefts: IrowLeft[] =[];
rowsRight: IobjectiveTile[][] =[];
hazard:string []
constructor(modeName:String){
switch (modeName){
case "normal":
let tempRowLeft:IrowLeft;
let tempTile:string
this.colors = ["red", "black", "yellow", "blue", "pink"]
this.privateBoardRows = 5
let objTile: IobjectiveTile
let objetiveRow: IobjectiveTile[]
for (let i = 0; i < this.privateBoardRows; i++) {
tempRowLeft={color:"",used:0,max:i+1}
this.rowsLefts.push(tempRowLeft)
objetiveRow = this.colors.map((color) => {
objTile = { active: false, color: color }
return objTile
})
this.rowsRight.push(objetiveRow)
let tempColor:string
tempColor = this.colors.pop()
this.colors.reverse()
this.colors.push(tempColor)
this.colors.reverse()
}
break
}
}
}
export class Factory implements Ifactory{
tiles: Itile[]=[]
constructor(colors:string[]){
colors.forEach(element => {
this.tiles.push({ color: element, amount: 0 })
});
}
add(color: string, amount: number):void{
this.tiles.map((x)=>{
if(x.color==color){
x.amount +=amount
}
return x
})
}
remove(color: string, amount: number=0): number{
let result:number =amount
this.tiles.map((x) => {
if (x.color == color) {
if(amount ==0){
result =x.amount
x.amount = 0
}else{
x.amount -= amount
}
}
return x
})
return result
}
addRandomColor(): void{
let allColors:string[] = this.tiles.map((x)=>{return x.color})
this.add(
allColors[
Math.floor(Math.random()*allColors.length)
],
1)
}
}
export class Game implements Igame{
mode: IgameMode;
roomName: string;
players: Iplayer[]=[];
phase: GamePhase;
factories: Ifactory[];
bag: Ifactory
trash: Ifactory
turn: number;
constructor(mode:string,room:Iroom){
this.mode =new GameMode(mode)
this.roomName =room.name
this.bag = new Factory(this.mode.colors)
this.trash = new Factory(this.mode.colors)
this.factories = []
let newPlayer:Iplayer;
for (let index = 0; index < room.users.length; index++) {
newPlayer = new Player(room.users[index], this.mode)
this.players.push(newPlayer)
}
// create bag and trash
this.mode.colors.forEach(color => {
this.bag.add(color,20)
});
//create public board
let fabricQuant: number
fabricQuant = room.users.length == 2 ? 6 : 6 + ((room.users.length - 2) * 2)
for (let index = 0; index < fabricQuant; index++) {
this.factories.push(new Factory(this.mode.colors))
}
//assign to fabrics
this.bagToFabrics(4)
this.turn = Math.floor(Math.random() * (this.players.length - 1) + 1)
}
bagToFabrics(amountPerFabric:number):void{
let tempBag: Ifactory = new Factory(this.mode.colors)
let RND: number
let RNDcolor: string
let cantidadFactories:number = this.factories.length
for (let fabricI = 1; fabricI < cantidadFactories; fabricI++) {
for (let index = 0; index < amountPerFabric; index++) {
tempBag.tiles = this.bag.tiles.filter((x) => { return x.amount > 0 })
if (tempBag.tiles.length ==0){
this.moveTrashToBag()
tempBag.tiles = this.bag.tiles
}
RND = Math.floor(Math.random() * (tempBag.tiles.length - 1) + 1)
RNDcolor = tempBag.tiles[RND].color
this.bag.tiles = this.bag.tiles.map((x)=>{
x.amount = x.color == RNDcolor ? x.amount - 1 : x.amount
return x
})
this.factories[fabricI].tiles = this.factories[fabricI].tiles.map((x) => {
x.amount = x.color == RNDcolor ? x.amount + 1 : x.amount
return x
})
}
}
}
moveTrashToBag():void{
this.trash.tiles.map((x)=>{
this.bag.add(x.color,x.amount)
this.trash.remove(x.color, x.amount)
})
}
pickTile(jugada:Ijugada):object{
let rowId: number = jugada.row
let row: IrowLeft = jugada.player.rowsLefts[rowId]
row.color = row.color == "" ? jugada.color : row.color
if (row.color != jugada.color) {
return {event:"error",reason:"jugada invalida"}
} else {
let amount: number = this.factories[0].remove(jugada.color)
//public board
if (jugada.fabricIndex> 0) {
let tempFabric:Ifactory = this.factories[jugada.fabricIndex]
tempFabric.tiles.forEach(tile => {
this.factories[0].add(tile.color,tile.amount)
});
this.factories[jugada.fabricIndex] = new Factory(this.mode.colors)
}
//private board
let total: number = row.used + amount
if (total > row.max) {
jugada.player.hazard.push(jugada.color)
} else {
row.used += amount
}
jugada.player.rowsLefts[rowId] = row
return{
event:"ok"
}
}
}
partialScore():void{
let ejeX:number
let ejeY:number
let checkNext=(x:number,y:number,player:Iplayer,position:string):number=>{
//position["+x","-x","-y","+x"]
let score:number=0;
if(x<0 ||y <0 || y > player.rowsRight.length ||
x>player.rowsRight[0].length
){
return 0
}
if (!player.rowsRight[y][x]){
return 0
}
score +=1
switch (position){
case "+x":
score +=checkNext(x+1,y,player,position)
break
case "-x":
score +=checkNext(x-1, y, player, position)
break
case "+y":
score +=checkNext(x, y+1, player, position)
break
case "-y":
score +=checkNext(x, y-1, player, position)
break
}
return score
}
this.players.forEach(player => {
for (ejeY = 0; ejeY < this.mode.privateBoardRows; ejeY++) {
if (player.rowsLefts[ejeY].max == player.rowsLefts[ejeY].used){
ejeX = player.rowsRight[ejeY].indexOf({color: player.rowsLefts[ejeY].color,active:false})
player.rowsRight[ejeY][ejeX].active= true
let tempX1:number =0
let tempX2: number = 0
let tempY1: number = 0
let tempY2: number = 0
let subtotal:number =0
tempX1 = checkNext(ejeX,ejeY,player,"+x")
tempX2 = checkNext(ejeX, ejeY, player, "-x") -1
tempY1 = checkNext(ejeX, ejeY, player, "+y")
tempY2 = checkNext(ejeX, ejeY, player, "-x") -1
subtotal+= tempX1 + tempX2 - 2 == 0 ? 0:tempX1 + tempX2 - 1
subtotal += tempY1 + tempY2 - 2 == 0 ? 0 : tempY1 + tempY2 - 1
player.rowsLefts[ejeY].used = 0
player.rowsLefts[ejeY].color = ""
}
}
});
}
finalScore():void{
}
updateUser(user:Iuser):void{
this.players.map((x)=>{
x.user.conn =x.user.name == user.name? user.conn:x.user.conn
return x
})
}
} | nameUserExist | identifier_name |
graphbuilder.go | /*
Copyright 2021 The KubeDiag Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package graphbuilder
import (
"context"
"fmt"
"github.com/go-logr/logr"
"github.com/prometheus/client_golang/prometheus"
"gonum.org/v1/gonum/graph/simple"
"gonum.org/v1/gonum/graph/topo"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/metrics"
diagnosisv1 "github.com/kubediag/kubediag/api/v1"
"github.com/kubediag/kubediag/pkg/util"
)
var (
graphbuilderSyncSuccessCount = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "graphbuilder_sync_success_count",
Help: "Counter of successful operationset syncs by graphbuilder",
},
)
graphbuilderSyncSkipCount = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "graphbuilder_sync_skip_count",
Help: "Counter of skipped operationset syncs by graphbuilder",
},
)
graphbuilderSyncErrorCount = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "graphbuilder_sync_error_count",
Help: "Counter of erroneous operationset syncs by graphbuilder",
},
)
)
// GraphBuilder analyzes directed acyclic graph defined in operation set.
type GraphBuilder interface {
// Context carries values across API boundaries.
context.Context
// Logger represents the ability to log messages.
logr.Logger
// Run runs the GraphBuilder.
Run(<-chan struct{})
}
// graphBuilder validates directed acyclic graph defined in the operation set and generates paths
// according to the directed acyclic graph.
type graphBuilder struct {
// Context carries values across API boundaries.
context.Context
// Logger represents the ability to log messages.
logr.Logger
// client knows how to perform CRUD operations on Kubernetes objects.
client client.Client
// eventRecorder knows how to record events on behalf of an EventSource.
eventRecorder record.EventRecorder
// scheme defines methods for serializing and deserializing API objects.
scheme *runtime.Scheme
// cache knows how to load Kubernetes objects.
cache cache.Cache
// graphBuilderCh is a channel for queuing OperationSets to be processed by graph builder.
graphBuilderCh chan diagnosisv1.OperationSet
}
// NewGraphBuilder creates a new graph builder.
func NewGraphBuilder(
ctx context.Context,
logger logr.Logger,
cli client.Client,
eventRecorder record.EventRecorder,
scheme *runtime.Scheme,
cache cache.Cache,
graphBuilderCh chan diagnosisv1.OperationSet,
) GraphBuilder {
metrics.Registry.MustRegister(
graphbuilderSyncSuccessCount,
graphbuilderSyncSkipCount,
graphbuilderSyncErrorCount,
)
return &graphBuilder{
Context: ctx,
Logger: logger,
client: cli,
eventRecorder: eventRecorder,
scheme: scheme,
cache: cache,
graphBuilderCh: graphBuilderCh,
}
}
// Run runs the graph builder.
// TODO: Prometheus metrics.
func (gb *graphBuilder) Run(stopCh <-chan struct{}) {
// Wait for all caches to sync before processing.
if !gb.cache.WaitForCacheSync(stopCh) {
return
}
for {
select {
// Process operation sets queuing in graph builder channel.
case operationSet := <-gb.graphBuilderCh:
err := gb.client.Get(gb, client.ObjectKey{
Name: operationSet.Name,
}, &operationSet)
if err != nil {
if apierrors.IsNotFound(err) {
continue
}
gb.addDiagnosisToGraphBuilderQueue(operationSet)
continue
}
// Only process unready operation set.
if operationSet.Status.Ready {
graphbuilderSyncSkipCount.Inc()
continue
}
operationSet, err = gb.syncOperationSet(operationSet)
if err != nil {
gb.Error(err, "failed to sync OperationSet", "operationSet", operationSet)
gb.addDiagnosisToGraphBuilderQueue(operationSet)
continue
}
graphbuilderSyncSuccessCount.Inc()
// Stop graph builder on stop signal.
case <-stopCh:
return
}
}
}
// syncOperationSet syncs operation sets.
// TODO: Update conditions on error.
func (gb *graphBuilder) | (operationSet diagnosisv1.OperationSet) (diagnosisv1.OperationSet, error) {
// Build directed graph from adjacency list.
graph, err := newGraphFromAdjacencyList(operationSet.Spec.AdjacencyList)
if err != nil {
return operationSet, err
}
nodes := graph.Nodes()
for nodes.Next() {
node := nodes.Node()
toNodes := graph.To(node.ID())
source := true
for toNodes.Next() {
source = false
}
if node.ID() == 0 {
if !source {
// Return error if start node is the destination of other node in the graph.
return operationSet, fmt.Errorf("indegree of start node is not 0")
}
} else {
if source {
// Return error if some node is unreachable from start node in the graph.
return operationSet, fmt.Errorf("node %d is unreachable from start node", node.ID())
}
}
}
// Validate the graph does not have any cycles.
_, err = topo.Sort(graph)
if err != nil {
return operationSet, fmt.Errorf("invalid directed acyclic graph: %s", err)
}
// Search all paths from start node to any node with outdegree of 0.
diagnosisPaths, err := searchDiagnosisPaths(graph, len(operationSet.Spec.AdjacencyList))
if err != nil {
return operationSet, fmt.Errorf("unable to search diagnosis path: %s", err)
}
// Set operation set status with diagnosis paths.
paths := make([]diagnosisv1.Path, 0)
for _, diagnosisPath := range diagnosisPaths {
path := make(diagnosisv1.Path, 0)
for _, id := range diagnosisPath {
if operationSet.Spec.AdjacencyList[int(id)].ID != 0 {
path = append(path, operationSet.Spec.AdjacencyList[int(id)])
}
}
paths = append(paths, path)
}
operationSet.Status.Paths = paths
operationSet.Status.Ready = true
if err := gb.client.Status().Update(gb, &operationSet); err != nil {
return operationSet, fmt.Errorf("unable to update OperationSet: %s", err)
}
return operationSet, nil
}
// addDiagnosisToGraphBuilderQueue adds OperationSets to the queue processed by graph builder.
func (gb *graphBuilder) addDiagnosisToGraphBuilderQueue(operationSet diagnosisv1.OperationSet) {
graphbuilderSyncErrorCount.Inc()
err := util.QueueOperationSet(gb, gb.graphBuilderCh, operationSet)
if err != nil {
gb.Error(err, "failed to send operation set to graph builder queue", "operationset", client.ObjectKey{
Name: operationSet.Name,
})
}
}
// newGraphFromAdjacencyList builds a directed graph from a adjacency list.
// TODO: Panic recovery.
func newGraphFromAdjacencyList(adjacencyList []diagnosisv1.Node) (*simple.DirectedGraph, error) {
graph := simple.NewDirectedGraph()
for id, node := range adjacencyList {
if graph.Node(int64(id)) == nil {
graph.AddNode(simple.Node(id))
}
for _, to := range node.To {
graph.SetEdge(graph.NewEdge(simple.Node(id), simple.Node(to)))
}
}
return graph, nil
}
// searchDiagnosisPaths traverses all nodes in the directed acyclic graph from start node with id of 0.
// It returns all paths from start node to any node with outdegree of 0 and an error.
func searchDiagnosisPaths(graph *simple.DirectedGraph, nodeCount int) ([][]int64, error) {
var queue NodeQueue
visited := make([]bool, nodeCount)
nodePathCache := make([][][]int64, nodeCount)
sinkNodes := make([]int64, 0)
// Validate the graph contains start node with id of 0.
start := graph.Node(0)
if start == nil {
return nil, fmt.Errorf("start node not found in graph")
}
// Set start node as visited and enqueue all nodes that can reach directly from it.
visited[start.ID()] = true
fromNodes := graph.From(start.ID())
for fromNodes.Next() {
fromNode := fromNodes.Node()
queue.Enqueue(fromNode)
}
// Initialize node path cache with start node.
nodePaths := make([][]int64, 0)
nodePaths = append(nodePaths, []int64{start.ID()})
nodePathCache[start.ID()] = nodePaths
for queue.Len() != 0 {
// Dequeue a node from queue and retrieve all nodes that can reach directly to or from current node.
current := queue.Dequeue()
toNodes := graph.To(current.ID())
fromNodes := graph.From(current.ID())
// Skip current node if it has already been visited.
if visited[current.ID()] {
continue
}
// Set current node as visited if all nodes that can reach directly to current node are visited.
// Otherwise, enqueue current node.
visited[current.ID()] = true
for toNodes.Next() {
toNode := toNodes.Node()
if !visited[toNode.ID()] {
visited[current.ID()] = false
queue.Enqueue(current)
break
}
}
if visited[current.ID()] {
// Update node path of current node with visited node that can reach directly to current node.
toNodes.Reset()
for toNodes.Next() {
toNode := toNodes.Node()
nodePaths := nodePathCache[current.ID()]
if nodePaths == nil {
nodePaths = make([][]int64, 0)
}
toNodePaths := nodePathCache[toNode.ID()]
for _, toNodePath := range toNodePaths {
nodePath := make([]int64, len(toNodePath))
copy(nodePath, toNodePath)
nodePath = append(nodePath, current.ID())
nodePaths = append(nodePaths, nodePath)
}
// Node path appended by current node is updated as node path of current node.
nodePathCache[current.ID()] = nodePaths
}
// Enqueue all nodes that can reach directly from current node if current node is visited.
sink := true
for fromNodes.Next() {
sink = false
fromNode := fromNodes.Node()
queue.Enqueue(fromNode)
}
// Set current node as sink if its outdegree is 0.
if sink {
sinkNodes = append(sinkNodes, current.ID())
}
}
}
// Set diagnosis paths with all node paths of nodes which has outdegree of 0.
diagnosisPaths := make([][]int64, 0)
for _, id := range sinkNodes {
paths := nodePathCache[id]
diagnosisPaths = append(diagnosisPaths, paths...)
}
return diagnosisPaths, nil
}
| syncOperationSet | identifier_name |
graphbuilder.go | /*
Copyright 2021 The KubeDiag Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package graphbuilder
import (
"context"
"fmt"
"github.com/go-logr/logr"
"github.com/prometheus/client_golang/prometheus"
"gonum.org/v1/gonum/graph/simple"
"gonum.org/v1/gonum/graph/topo"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/metrics"
diagnosisv1 "github.com/kubediag/kubediag/api/v1"
"github.com/kubediag/kubediag/pkg/util"
)
var (
graphbuilderSyncSuccessCount = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "graphbuilder_sync_success_count",
Help: "Counter of successful operationset syncs by graphbuilder",
},
)
graphbuilderSyncSkipCount = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "graphbuilder_sync_skip_count",
Help: "Counter of skipped operationset syncs by graphbuilder",
},
)
graphbuilderSyncErrorCount = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "graphbuilder_sync_error_count",
Help: "Counter of erroneous operationset syncs by graphbuilder",
},
)
)
// GraphBuilder analyzes directed acyclic graph defined in operation set.
type GraphBuilder interface {
// Context carries values across API boundaries.
context.Context
// Logger represents the ability to log messages.
logr.Logger
// Run runs the GraphBuilder.
Run(<-chan struct{})
}
// graphBuilder validates directed acyclic graph defined in the operation set and generates paths
// according to the directed acyclic graph.
type graphBuilder struct {
// Context carries values across API boundaries.
context.Context
// Logger represents the ability to log messages.
logr.Logger
// client knows how to perform CRUD operations on Kubernetes objects.
client client.Client
// eventRecorder knows how to record events on behalf of an EventSource.
eventRecorder record.EventRecorder
// scheme defines methods for serializing and deserializing API objects.
scheme *runtime.Scheme
// cache knows how to load Kubernetes objects.
cache cache.Cache
// graphBuilderCh is a channel for queuing OperationSets to be processed by graph builder.
graphBuilderCh chan diagnosisv1.OperationSet
}
// NewGraphBuilder creates a new graph builder.
func NewGraphBuilder(
ctx context.Context,
logger logr.Logger,
cli client.Client,
eventRecorder record.EventRecorder,
scheme *runtime.Scheme,
cache cache.Cache,
graphBuilderCh chan diagnosisv1.OperationSet,
) GraphBuilder {
metrics.Registry.MustRegister(
graphbuilderSyncSuccessCount,
graphbuilderSyncSkipCount,
graphbuilderSyncErrorCount,
)
return &graphBuilder{
Context: ctx,
Logger: logger,
client: cli,
eventRecorder: eventRecorder,
scheme: scheme,
cache: cache,
graphBuilderCh: graphBuilderCh,
}
}
// Run runs the graph builder.
// TODO: Prometheus metrics.
func (gb *graphBuilder) Run(stopCh <-chan struct{}) {
// Wait for all caches to sync before processing.
if !gb.cache.WaitForCacheSync(stopCh) {
return
}
for {
select {
// Process operation sets queuing in graph builder channel.
case operationSet := <-gb.graphBuilderCh:
err := gb.client.Get(gb, client.ObjectKey{
Name: operationSet.Name,
}, &operationSet)
if err != nil {
if apierrors.IsNotFound(err) {
continue
}
gb.addDiagnosisToGraphBuilderQueue(operationSet)
continue
}
// Only process unready operation set.
if operationSet.Status.Ready {
graphbuilderSyncSkipCount.Inc()
continue
}
operationSet, err = gb.syncOperationSet(operationSet)
if err != nil {
gb.Error(err, "failed to sync OperationSet", "operationSet", operationSet)
gb.addDiagnosisToGraphBuilderQueue(operationSet)
continue
}
graphbuilderSyncSuccessCount.Inc()
// Stop graph builder on stop signal.
case <-stopCh:
return
}
}
}
// syncOperationSet syncs operation sets.
// TODO: Update conditions on error.
func (gb *graphBuilder) syncOperationSet(operationSet diagnosisv1.OperationSet) (diagnosisv1.OperationSet, error) |
// addDiagnosisToGraphBuilderQueue adds OperationSets to the queue processed by graph builder.
func (gb *graphBuilder) addDiagnosisToGraphBuilderQueue(operationSet diagnosisv1.OperationSet) {
graphbuilderSyncErrorCount.Inc()
err := util.QueueOperationSet(gb, gb.graphBuilderCh, operationSet)
if err != nil {
gb.Error(err, "failed to send operation set to graph builder queue", "operationset", client.ObjectKey{
Name: operationSet.Name,
})
}
}
// newGraphFromAdjacencyList builds a directed graph from a adjacency list.
// TODO: Panic recovery.
func newGraphFromAdjacencyList(adjacencyList []diagnosisv1.Node) (*simple.DirectedGraph, error) {
graph := simple.NewDirectedGraph()
for id, node := range adjacencyList {
if graph.Node(int64(id)) == nil {
graph.AddNode(simple.Node(id))
}
for _, to := range node.To {
graph.SetEdge(graph.NewEdge(simple.Node(id), simple.Node(to)))
}
}
return graph, nil
}
// searchDiagnosisPaths traverses all nodes in the directed acyclic graph from start node with id of 0.
// It returns all paths from start node to any node with outdegree of 0 and an error.
func searchDiagnosisPaths(graph *simple.DirectedGraph, nodeCount int) ([][]int64, error) {
var queue NodeQueue
visited := make([]bool, nodeCount)
nodePathCache := make([][][]int64, nodeCount)
sinkNodes := make([]int64, 0)
// Validate the graph contains start node with id of 0.
start := graph.Node(0)
if start == nil {
return nil, fmt.Errorf("start node not found in graph")
}
// Set start node as visited and enqueue all nodes that can reach directly from it.
visited[start.ID()] = true
fromNodes := graph.From(start.ID())
for fromNodes.Next() {
fromNode := fromNodes.Node()
queue.Enqueue(fromNode)
}
// Initialize node path cache with start node.
nodePaths := make([][]int64, 0)
nodePaths = append(nodePaths, []int64{start.ID()})
nodePathCache[start.ID()] = nodePaths
for queue.Len() != 0 {
// Dequeue a node from queue and retrieve all nodes that can reach directly to or from current node.
current := queue.Dequeue()
toNodes := graph.To(current.ID())
fromNodes := graph.From(current.ID())
// Skip current node if it has already been visited.
if visited[current.ID()] {
continue
}
// Set current node as visited if all nodes that can reach directly to current node are visited.
// Otherwise, enqueue current node.
visited[current.ID()] = true
for toNodes.Next() {
toNode := toNodes.Node()
if !visited[toNode.ID()] {
visited[current.ID()] = false
queue.Enqueue(current)
break
}
}
if visited[current.ID()] {
// Update node path of current node with visited node that can reach directly to current node.
toNodes.Reset()
for toNodes.Next() {
toNode := toNodes.Node()
nodePaths := nodePathCache[current.ID()]
if nodePaths == nil {
nodePaths = make([][]int64, 0)
}
toNodePaths := nodePathCache[toNode.ID()]
for _, toNodePath := range toNodePaths {
nodePath := make([]int64, len(toNodePath))
copy(nodePath, toNodePath)
nodePath = append(nodePath, current.ID())
nodePaths = append(nodePaths, nodePath)
}
// Node path appended by current node is updated as node path of current node.
nodePathCache[current.ID()] = nodePaths
}
// Enqueue all nodes that can reach directly from current node if current node is visited.
sink := true
for fromNodes.Next() {
sink = false
fromNode := fromNodes.Node()
queue.Enqueue(fromNode)
}
// Set current node as sink if its outdegree is 0.
if sink {
sinkNodes = append(sinkNodes, current.ID())
}
}
}
// Set diagnosis paths with all node paths of nodes which has outdegree of 0.
diagnosisPaths := make([][]int64, 0)
for _, id := range sinkNodes {
paths := nodePathCache[id]
diagnosisPaths = append(diagnosisPaths, paths...)
}
return diagnosisPaths, nil
}
| {
// Build directed graph from adjacency list.
graph, err := newGraphFromAdjacencyList(operationSet.Spec.AdjacencyList)
if err != nil {
return operationSet, err
}
nodes := graph.Nodes()
for nodes.Next() {
node := nodes.Node()
toNodes := graph.To(node.ID())
source := true
for toNodes.Next() {
source = false
}
if node.ID() == 0 {
if !source {
// Return error if start node is the destination of other node in the graph.
return operationSet, fmt.Errorf("indegree of start node is not 0")
}
} else {
if source {
// Return error if some node is unreachable from start node in the graph.
return operationSet, fmt.Errorf("node %d is unreachable from start node", node.ID())
}
}
}
// Validate the graph does not have any cycles.
_, err = topo.Sort(graph)
if err != nil {
return operationSet, fmt.Errorf("invalid directed acyclic graph: %s", err)
}
// Search all paths from start node to any node with outdegree of 0.
diagnosisPaths, err := searchDiagnosisPaths(graph, len(operationSet.Spec.AdjacencyList))
if err != nil {
return operationSet, fmt.Errorf("unable to search diagnosis path: %s", err)
}
// Set operation set status with diagnosis paths.
paths := make([]diagnosisv1.Path, 0)
for _, diagnosisPath := range diagnosisPaths {
path := make(diagnosisv1.Path, 0)
for _, id := range diagnosisPath {
if operationSet.Spec.AdjacencyList[int(id)].ID != 0 {
path = append(path, operationSet.Spec.AdjacencyList[int(id)])
}
}
paths = append(paths, path)
}
operationSet.Status.Paths = paths
operationSet.Status.Ready = true
if err := gb.client.Status().Update(gb, &operationSet); err != nil {
return operationSet, fmt.Errorf("unable to update OperationSet: %s", err)
}
return operationSet, nil
} | identifier_body |
graphbuilder.go | /*
Copyright 2021 The KubeDiag Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package graphbuilder
import (
"context"
"fmt"
"github.com/go-logr/logr"
"github.com/prometheus/client_golang/prometheus"
"gonum.org/v1/gonum/graph/simple"
"gonum.org/v1/gonum/graph/topo"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/metrics"
diagnosisv1 "github.com/kubediag/kubediag/api/v1"
"github.com/kubediag/kubediag/pkg/util"
)
var (
graphbuilderSyncSuccessCount = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "graphbuilder_sync_success_count",
Help: "Counter of successful operationset syncs by graphbuilder",
},
)
graphbuilderSyncSkipCount = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "graphbuilder_sync_skip_count",
Help: "Counter of skipped operationset syncs by graphbuilder",
},
)
graphbuilderSyncErrorCount = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "graphbuilder_sync_error_count",
Help: "Counter of erroneous operationset syncs by graphbuilder",
},
)
)
// GraphBuilder analyzes directed acyclic graph defined in operation set.
type GraphBuilder interface {
// Context carries values across API boundaries.
context.Context
// Logger represents the ability to log messages.
logr.Logger
// Run runs the GraphBuilder.
Run(<-chan struct{})
}
// graphBuilder validates directed acyclic graph defined in the operation set and generates paths
// according to the directed acyclic graph.
type graphBuilder struct {
// Context carries values across API boundaries.
context.Context
// Logger represents the ability to log messages.
logr.Logger
// client knows how to perform CRUD operations on Kubernetes objects.
client client.Client
// eventRecorder knows how to record events on behalf of an EventSource.
eventRecorder record.EventRecorder
// scheme defines methods for serializing and deserializing API objects.
scheme *runtime.Scheme
// cache knows how to load Kubernetes objects.
cache cache.Cache
// graphBuilderCh is a channel for queuing OperationSets to be processed by graph builder.
graphBuilderCh chan diagnosisv1.OperationSet
}
// NewGraphBuilder creates a new graph builder.
func NewGraphBuilder(
ctx context.Context,
logger logr.Logger,
cli client.Client,
eventRecorder record.EventRecorder,
scheme *runtime.Scheme,
cache cache.Cache,
graphBuilderCh chan diagnosisv1.OperationSet,
) GraphBuilder {
metrics.Registry.MustRegister(
graphbuilderSyncSuccessCount,
graphbuilderSyncSkipCount,
graphbuilderSyncErrorCount,
)
return &graphBuilder{
Context: ctx,
Logger: logger,
client: cli,
eventRecorder: eventRecorder,
scheme: scheme,
cache: cache,
graphBuilderCh: graphBuilderCh,
}
}
// Run runs the graph builder.
// TODO: Prometheus metrics.
func (gb *graphBuilder) Run(stopCh <-chan struct{}) {
// Wait for all caches to sync before processing.
if !gb.cache.WaitForCacheSync(stopCh) {
return
}
for {
select {
// Process operation sets queuing in graph builder channel.
case operationSet := <-gb.graphBuilderCh:
err := gb.client.Get(gb, client.ObjectKey{
Name: operationSet.Name,
}, &operationSet)
if err != nil {
if apierrors.IsNotFound(err) {
continue
}
gb.addDiagnosisToGraphBuilderQueue(operationSet)
continue
}
// Only process unready operation set.
if operationSet.Status.Ready {
graphbuilderSyncSkipCount.Inc()
continue
}
operationSet, err = gb.syncOperationSet(operationSet)
if err != nil {
gb.Error(err, "failed to sync OperationSet", "operationSet", operationSet)
gb.addDiagnosisToGraphBuilderQueue(operationSet)
continue
}
graphbuilderSyncSuccessCount.Inc()
// Stop graph builder on stop signal.
case <-stopCh:
return
}
}
}
// syncOperationSet syncs operation sets.
// TODO: Update conditions on error.
func (gb *graphBuilder) syncOperationSet(operationSet diagnosisv1.OperationSet) (diagnosisv1.OperationSet, error) {
// Build directed graph from adjacency list.
graph, err := newGraphFromAdjacencyList(operationSet.Spec.AdjacencyList)
if err != nil {
return operationSet, err
}
nodes := graph.Nodes()
for nodes.Next() {
node := nodes.Node()
toNodes := graph.To(node.ID())
source := true
for toNodes.Next() {
source = false
}
if node.ID() == 0 {
if !source {
// Return error if start node is the destination of other node in the graph.
return operationSet, fmt.Errorf("indegree of start node is not 0")
}
} else {
if source {
// Return error if some node is unreachable from start node in the graph.
return operationSet, fmt.Errorf("node %d is unreachable from start node", node.ID())
}
}
}
// Validate the graph does not have any cycles.
_, err = topo.Sort(graph)
if err != nil {
return operationSet, fmt.Errorf("invalid directed acyclic graph: %s", err)
}
// Search all paths from start node to any node with outdegree of 0.
diagnosisPaths, err := searchDiagnosisPaths(graph, len(operationSet.Spec.AdjacencyList))
if err != nil |
// Set operation set status with diagnosis paths.
paths := make([]diagnosisv1.Path, 0)
for _, diagnosisPath := range diagnosisPaths {
path := make(diagnosisv1.Path, 0)
for _, id := range diagnosisPath {
if operationSet.Spec.AdjacencyList[int(id)].ID != 0 {
path = append(path, operationSet.Spec.AdjacencyList[int(id)])
}
}
paths = append(paths, path)
}
operationSet.Status.Paths = paths
operationSet.Status.Ready = true
if err := gb.client.Status().Update(gb, &operationSet); err != nil {
return operationSet, fmt.Errorf("unable to update OperationSet: %s", err)
}
return operationSet, nil
}
// addDiagnosisToGraphBuilderQueue adds OperationSets to the queue processed by graph builder.
func (gb *graphBuilder) addDiagnosisToGraphBuilderQueue(operationSet diagnosisv1.OperationSet) {
graphbuilderSyncErrorCount.Inc()
err := util.QueueOperationSet(gb, gb.graphBuilderCh, operationSet)
if err != nil {
gb.Error(err, "failed to send operation set to graph builder queue", "operationset", client.ObjectKey{
Name: operationSet.Name,
})
}
}
// newGraphFromAdjacencyList builds a directed graph from a adjacency list.
// TODO: Panic recovery.
func newGraphFromAdjacencyList(adjacencyList []diagnosisv1.Node) (*simple.DirectedGraph, error) {
graph := simple.NewDirectedGraph()
for id, node := range adjacencyList {
if graph.Node(int64(id)) == nil {
graph.AddNode(simple.Node(id))
}
for _, to := range node.To {
graph.SetEdge(graph.NewEdge(simple.Node(id), simple.Node(to)))
}
}
return graph, nil
}
// searchDiagnosisPaths traverses all nodes in the directed acyclic graph from start node with id of 0.
// It returns all paths from start node to any node with outdegree of 0 and an error.
func searchDiagnosisPaths(graph *simple.DirectedGraph, nodeCount int) ([][]int64, error) {
var queue NodeQueue
visited := make([]bool, nodeCount)
nodePathCache := make([][][]int64, nodeCount)
sinkNodes := make([]int64, 0)
// Validate the graph contains start node with id of 0.
start := graph.Node(0)
if start == nil {
return nil, fmt.Errorf("start node not found in graph")
}
// Set start node as visited and enqueue all nodes that can reach directly from it.
visited[start.ID()] = true
fromNodes := graph.From(start.ID())
for fromNodes.Next() {
fromNode := fromNodes.Node()
queue.Enqueue(fromNode)
}
// Initialize node path cache with start node.
nodePaths := make([][]int64, 0)
nodePaths = append(nodePaths, []int64{start.ID()})
nodePathCache[start.ID()] = nodePaths
for queue.Len() != 0 {
// Dequeue a node from queue and retrieve all nodes that can reach directly to or from current node.
current := queue.Dequeue()
toNodes := graph.To(current.ID())
fromNodes := graph.From(current.ID())
// Skip current node if it has already been visited.
if visited[current.ID()] {
continue
}
// Set current node as visited if all nodes that can reach directly to current node are visited.
// Otherwise, enqueue current node.
visited[current.ID()] = true
for toNodes.Next() {
toNode := toNodes.Node()
if !visited[toNode.ID()] {
visited[current.ID()] = false
queue.Enqueue(current)
break
}
}
if visited[current.ID()] {
// Update node path of current node with visited node that can reach directly to current node.
toNodes.Reset()
for toNodes.Next() {
toNode := toNodes.Node()
nodePaths := nodePathCache[current.ID()]
if nodePaths == nil {
nodePaths = make([][]int64, 0)
}
toNodePaths := nodePathCache[toNode.ID()]
for _, toNodePath := range toNodePaths {
nodePath := make([]int64, len(toNodePath))
copy(nodePath, toNodePath)
nodePath = append(nodePath, current.ID())
nodePaths = append(nodePaths, nodePath)
}
// Node path appended by current node is updated as node path of current node.
nodePathCache[current.ID()] = nodePaths
}
// Enqueue all nodes that can reach directly from current node if current node is visited.
sink := true
for fromNodes.Next() {
sink = false
fromNode := fromNodes.Node()
queue.Enqueue(fromNode)
}
// Set current node as sink if its outdegree is 0.
if sink {
sinkNodes = append(sinkNodes, current.ID())
}
}
}
// Set diagnosis paths with all node paths of nodes which has outdegree of 0.
diagnosisPaths := make([][]int64, 0)
for _, id := range sinkNodes {
paths := nodePathCache[id]
diagnosisPaths = append(diagnosisPaths, paths...)
}
return diagnosisPaths, nil
}
| {
return operationSet, fmt.Errorf("unable to search diagnosis path: %s", err)
} | conditional_block |
graphbuilder.go | /*
Copyright 2021 The KubeDiag Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package graphbuilder
import (
"context"
"fmt"
"github.com/go-logr/logr"
"github.com/prometheus/client_golang/prometheus"
"gonum.org/v1/gonum/graph/simple"
"gonum.org/v1/gonum/graph/topo"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/metrics"
diagnosisv1 "github.com/kubediag/kubediag/api/v1"
"github.com/kubediag/kubediag/pkg/util"
)
var (
graphbuilderSyncSuccessCount = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "graphbuilder_sync_success_count",
Help: "Counter of successful operationset syncs by graphbuilder",
},
)
graphbuilderSyncSkipCount = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "graphbuilder_sync_skip_count",
Help: "Counter of skipped operationset syncs by graphbuilder",
},
)
graphbuilderSyncErrorCount = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "graphbuilder_sync_error_count",
Help: "Counter of erroneous operationset syncs by graphbuilder",
},
)
)
// GraphBuilder analyzes directed acyclic graph defined in operation set.
type GraphBuilder interface {
// Context carries values across API boundaries.
context.Context
// Logger represents the ability to log messages.
logr.Logger
// Run runs the GraphBuilder.
Run(<-chan struct{})
}
// graphBuilder validates directed acyclic graph defined in the operation set and generates paths
// according to the directed acyclic graph.
type graphBuilder struct {
// Context carries values across API boundaries.
context.Context
// Logger represents the ability to log messages.
logr.Logger
// client knows how to perform CRUD operations on Kubernetes objects.
client client.Client
// eventRecorder knows how to record events on behalf of an EventSource.
eventRecorder record.EventRecorder
// scheme defines methods for serializing and deserializing API objects.
scheme *runtime.Scheme
// cache knows how to load Kubernetes objects.
cache cache.Cache
// graphBuilderCh is a channel for queuing OperationSets to be processed by graph builder.
graphBuilderCh chan diagnosisv1.OperationSet
}
// NewGraphBuilder creates a new graph builder.
func NewGraphBuilder(
ctx context.Context,
logger logr.Logger,
cli client.Client,
eventRecorder record.EventRecorder,
scheme *runtime.Scheme,
cache cache.Cache,
graphBuilderCh chan diagnosisv1.OperationSet,
) GraphBuilder {
metrics.Registry.MustRegister(
graphbuilderSyncSuccessCount,
graphbuilderSyncSkipCount,
graphbuilderSyncErrorCount,
)
return &graphBuilder{
Context: ctx,
Logger: logger,
client: cli,
eventRecorder: eventRecorder,
scheme: scheme,
cache: cache,
graphBuilderCh: graphBuilderCh,
}
}
// Run runs the graph builder.
// TODO: Prometheus metrics.
func (gb *graphBuilder) Run(stopCh <-chan struct{}) {
// Wait for all caches to sync before processing.
if !gb.cache.WaitForCacheSync(stopCh) {
return
}
for {
select { | }, &operationSet)
if err != nil {
if apierrors.IsNotFound(err) {
continue
}
gb.addDiagnosisToGraphBuilderQueue(operationSet)
continue
}
// Only process unready operation set.
if operationSet.Status.Ready {
graphbuilderSyncSkipCount.Inc()
continue
}
operationSet, err = gb.syncOperationSet(operationSet)
if err != nil {
gb.Error(err, "failed to sync OperationSet", "operationSet", operationSet)
gb.addDiagnosisToGraphBuilderQueue(operationSet)
continue
}
graphbuilderSyncSuccessCount.Inc()
// Stop graph builder on stop signal.
case <-stopCh:
return
}
}
}
// syncOperationSet syncs operation sets.
// TODO: Update conditions on error.
func (gb *graphBuilder) syncOperationSet(operationSet diagnosisv1.OperationSet) (diagnosisv1.OperationSet, error) {
// Build directed graph from adjacency list.
graph, err := newGraphFromAdjacencyList(operationSet.Spec.AdjacencyList)
if err != nil {
return operationSet, err
}
nodes := graph.Nodes()
for nodes.Next() {
node := nodes.Node()
toNodes := graph.To(node.ID())
source := true
for toNodes.Next() {
source = false
}
if node.ID() == 0 {
if !source {
// Return error if start node is the destination of other node in the graph.
return operationSet, fmt.Errorf("indegree of start node is not 0")
}
} else {
if source {
// Return error if some node is unreachable from start node in the graph.
return operationSet, fmt.Errorf("node %d is unreachable from start node", node.ID())
}
}
}
// Validate the graph does not have any cycles.
_, err = topo.Sort(graph)
if err != nil {
return operationSet, fmt.Errorf("invalid directed acyclic graph: %s", err)
}
// Search all paths from start node to any node with outdegree of 0.
diagnosisPaths, err := searchDiagnosisPaths(graph, len(operationSet.Spec.AdjacencyList))
if err != nil {
return operationSet, fmt.Errorf("unable to search diagnosis path: %s", err)
}
// Set operation set status with diagnosis paths.
paths := make([]diagnosisv1.Path, 0)
for _, diagnosisPath := range diagnosisPaths {
path := make(diagnosisv1.Path, 0)
for _, id := range diagnosisPath {
if operationSet.Spec.AdjacencyList[int(id)].ID != 0 {
path = append(path, operationSet.Spec.AdjacencyList[int(id)])
}
}
paths = append(paths, path)
}
operationSet.Status.Paths = paths
operationSet.Status.Ready = true
if err := gb.client.Status().Update(gb, &operationSet); err != nil {
return operationSet, fmt.Errorf("unable to update OperationSet: %s", err)
}
return operationSet, nil
}
// addDiagnosisToGraphBuilderQueue adds OperationSets to the queue processed by graph builder.
func (gb *graphBuilder) addDiagnosisToGraphBuilderQueue(operationSet diagnosisv1.OperationSet) {
graphbuilderSyncErrorCount.Inc()
err := util.QueueOperationSet(gb, gb.graphBuilderCh, operationSet)
if err != nil {
gb.Error(err, "failed to send operation set to graph builder queue", "operationset", client.ObjectKey{
Name: operationSet.Name,
})
}
}
// newGraphFromAdjacencyList builds a directed graph from a adjacency list.
// TODO: Panic recovery.
func newGraphFromAdjacencyList(adjacencyList []diagnosisv1.Node) (*simple.DirectedGraph, error) {
graph := simple.NewDirectedGraph()
for id, node := range adjacencyList {
if graph.Node(int64(id)) == nil {
graph.AddNode(simple.Node(id))
}
for _, to := range node.To {
graph.SetEdge(graph.NewEdge(simple.Node(id), simple.Node(to)))
}
}
return graph, nil
}
// searchDiagnosisPaths traverses all nodes in the directed acyclic graph from start node with id of 0.
// It returns all paths from start node to any node with outdegree of 0 and an error.
func searchDiagnosisPaths(graph *simple.DirectedGraph, nodeCount int) ([][]int64, error) {
var queue NodeQueue
visited := make([]bool, nodeCount)
nodePathCache := make([][][]int64, nodeCount)
sinkNodes := make([]int64, 0)
// Validate the graph contains start node with id of 0.
start := graph.Node(0)
if start == nil {
return nil, fmt.Errorf("start node not found in graph")
}
// Set start node as visited and enqueue all nodes that can reach directly from it.
visited[start.ID()] = true
fromNodes := graph.From(start.ID())
for fromNodes.Next() {
fromNode := fromNodes.Node()
queue.Enqueue(fromNode)
}
// Initialize node path cache with start node.
nodePaths := make([][]int64, 0)
nodePaths = append(nodePaths, []int64{start.ID()})
nodePathCache[start.ID()] = nodePaths
for queue.Len() != 0 {
// Dequeue a node from queue and retrieve all nodes that can reach directly to or from current node.
current := queue.Dequeue()
toNodes := graph.To(current.ID())
fromNodes := graph.From(current.ID())
// Skip current node if it has already been visited.
if visited[current.ID()] {
continue
}
// Set current node as visited if all nodes that can reach directly to current node are visited.
// Otherwise, enqueue current node.
visited[current.ID()] = true
for toNodes.Next() {
toNode := toNodes.Node()
if !visited[toNode.ID()] {
visited[current.ID()] = false
queue.Enqueue(current)
break
}
}
if visited[current.ID()] {
// Update node path of current node with visited node that can reach directly to current node.
toNodes.Reset()
for toNodes.Next() {
toNode := toNodes.Node()
nodePaths := nodePathCache[current.ID()]
if nodePaths == nil {
nodePaths = make([][]int64, 0)
}
toNodePaths := nodePathCache[toNode.ID()]
for _, toNodePath := range toNodePaths {
nodePath := make([]int64, len(toNodePath))
copy(nodePath, toNodePath)
nodePath = append(nodePath, current.ID())
nodePaths = append(nodePaths, nodePath)
}
// Node path appended by current node is updated as node path of current node.
nodePathCache[current.ID()] = nodePaths
}
// Enqueue all nodes that can reach directly from current node if current node is visited.
sink := true
for fromNodes.Next() {
sink = false
fromNode := fromNodes.Node()
queue.Enqueue(fromNode)
}
// Set current node as sink if its outdegree is 0.
if sink {
sinkNodes = append(sinkNodes, current.ID())
}
}
}
// Set diagnosis paths with all node paths of nodes which has outdegree of 0.
diagnosisPaths := make([][]int64, 0)
for _, id := range sinkNodes {
paths := nodePathCache[id]
diagnosisPaths = append(diagnosisPaths, paths...)
}
return diagnosisPaths, nil
} | // Process operation sets queuing in graph builder channel.
case operationSet := <-gb.graphBuilderCh:
err := gb.client.Get(gb, client.ObjectKey{
Name: operationSet.Name, | random_line_split |
lib.rs | //!
//! Basic account support with an authenticator. Primarly used for development/testing.
//! Uses a 'Trust Anchor' approach to bootstrapping users: Genesis accounts can create other accounts.
//!
use anyhow::{bail, ensure};
use borsh::{BorshDeserialize, BorshSerialize};
use exonum_crypto::{hash, PublicKey, PUBLIC_KEY_LENGTH};
use rapido_core::{
verify_tx_signature, AccountId, AppModule, Authenticator, Context, SignedTransaction, Store,
StoreView,
};
#[macro_use]
extern crate rapido_core;
const ACCOUNT_APP_NAME: &str = "rapido.account";
const ACCOUNT_STORE_NAME: &str = "rapido.account.store";
pub type PublicKeyBytes = [u8; PUBLIC_KEY_LENGTH];
// Format of the account id: base58(hash(pubkey))
fn generate_account_id(pk: &PublicKey) -> Vec<u8> {
let hash = hash(&pk.as_bytes());
bs58::encode(&hash.as_bytes()).into_vec()
}
/// Account Model
#[derive(BorshDeserialize, BorshSerialize, Debug, PartialEq, Clone)]
pub struct Account {
pub id: AccountId,
pub nonce: u64,
pub pubkey: PublicKeyBytes,
// flag: can this entity create accounts
trustanchor: bool,
}
impl Account {
/// Create a new account given a public key
pub fn create(pk: &PublicKey, is_ta: bool) -> Self {
Self {
id: generate_account_id(pk),
nonce: 0u64,
pubkey: pk.as_bytes(),
trustanchor: is_ta,
}
}
pub fn id(&self) -> Vec<u8> {
self.id.clone()
}
/// Return the base58 account id
pub fn id_to_str(&self) -> anyhow::Result<String, anyhow::Error> {
let i = String::from_utf8(self.id.clone());
ensure!(i.is_ok(), "problem decoding account id to string");
Ok(i.unwrap())
}
/// Is the account a trust anchor?
pub fn is_trust_anchor(&self) -> bool {
self.trustanchor
}
pub fn update_pubkey(&self, pk: PublicKeyBytes) -> Self {
Self {
id: self.id.clone(),
nonce: self.nonce,
pubkey: pk,
trustanchor: self.trustanchor,
}
}
/// Increment the nonce for the account
pub fn increment_nonce(&self) -> Self {
Self {
id: self.id.clone(),
nonce: self.nonce + 1,
pubkey: self.pubkey,
trustanchor: self.trustanchor,
}
}
}
impl_store_values!(Account);
/// Account Store
pub(crate) struct AccountStore;
impl Store for AccountStore {
type Key = AccountId;
type Value = Account;
fn name(&self) -> String {
ACCOUNT_STORE_NAME.into()
}
}
impl AccountStore {
pub fn new() -> Self {
AccountStore {}
}
}
/// Message used in Transactions
#[derive(BorshDeserialize, BorshSerialize, Debug, PartialEq, Clone)]
pub enum Msgs {
Create(PublicKeyBytes),
ChangePubKey(PublicKeyBytes),
}
pub struct AccountModule {
// PublicKeys of genesis accounts
genesis: Vec<[u8; 32]>,
}
impl AccountModule {
pub fn new(genesis: Vec<[u8; 32]>) -> Self {
Self { genesis }
}
}
impl AppModule for AccountModule {
fn name(&self) -> String {
ACCOUNT_APP_NAME.into()
}
// Load genesis accounts. These entries become the trust anchors
fn initialize(&self, view: &mut StoreView) -> Result<(), anyhow::Error> {
let store = AccountStore::new();
for pk in &self.genesis {
let pubkey = PublicKey::from_slice(&pk[..]).expect("genesis: decode public key");
let account = Account::create(&pubkey, true); // <= make them a trust anchor
store.put(account.id(), account, view)
}
Ok(())
}
fn handle_tx(&self, ctx: &Context, view: &mut StoreView) -> Result<(), anyhow::Error> {
let msg: Msgs = ctx.decode_msg()?;
match msg {
// Create an account. The origin of this call, must be a trust anchor
Msgs::Create(pubkey) => {
let store = AccountStore::new();
// Ensure the caller's account exists and they are a trust anchor
let caller_acct = store.get(ctx.sender(), &view);
ensure!(caller_acct.is_some(), "user not found");
let acct = caller_acct.unwrap();
ensure!(
acct.is_trust_anchor(),
"only a trust anchor can create an account"
);
let pk = PublicKey::from_slice(&pubkey[..]);
ensure!(pk.is_some(), "problem decoding the public key");
// Create the new account
let new_account = Account::create(&pk.unwrap(), false);
store.put(new_account.id(), new_account, view);
Ok(())
}
// Change an existing publickey. The origin of this call is the owner
// of the publickey
Msgs::ChangePubKey(pubkey) => |
}
}
fn handle_query(
&self,
path: &str,
key: Vec<u8>,
view: &StoreView,
) -> Result<Vec<u8>, anyhow::Error> {
ensure!(key.len() > 0, "bad account key");
// return a serialized account for the given id.
match path {
"/" => {
let account = key;
let store = AccountStore::new();
let req_acct = store.get(account, &view);
ensure!(req_acct.is_some(), "account not found");
let acct: Account = req_acct.unwrap();
let bits = acct.try_to_vec()?;
Ok(bits)
}
_ => bail!("{:} not found", path),
}
}
}
// Authenticator
pub struct AccountAuthenticator;
impl Authenticator for AccountAuthenticator {
fn validate(
&self,
tx: &SignedTransaction,
view: &StoreView,
) -> anyhow::Result<(), anyhow::Error> {
let caller = tx.sender();
let txnonce = tx.nonce();
let store = AccountStore::new();
let caller_acct = store.get(caller, &view);
ensure!(caller_acct.is_some(), "user not found");
let acct = caller_acct.unwrap();
let caller_pubkey = PublicKey::from_slice(&acct.pubkey[..]);
ensure!(
caller_pubkey.is_some(),
"problem decoding the user's public key"
);
// Validate signature
ensure!(
verify_tx_signature(&tx, &caller_pubkey.unwrap()),
"bad signature"
);
// Check nonce
ensure!(acct.nonce == txnonce, "nonce don't match");
Ok(())
}
fn increment_nonce(
&self,
tx: &SignedTransaction,
view: &mut StoreView,
) -> anyhow::Result<(), anyhow::Error> {
let caller = tx.sender();
let store = AccountStore::new();
let caller_acct = store.get(caller.clone(), &view);
ensure!(caller_acct.is_some(), "user not found");
let acct = caller_acct.unwrap();
let unonce = acct.increment_nonce();
store.put(caller.clone(), unonce, view);
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use exonum_crypto::{gen_keypair, SecretKey};
use rapido_core::{testing_keypair, AppBuilder, TestKit};
fn create_account(name: &str) -> (Vec<u8>, PublicKeyBytes, SecretKey) {
let (pk, sk) = testing_keypair(name);
let acct = Account::create(&pk, true);
(acct.id(), acct.pubkey, sk)
}
fn get_genesis_accounts() -> Vec<[u8; 32]> {
vec![
create_account("bob").1,
create_account("alice").1,
create_account("tom").1,
]
}
fn gen_tx(user: Vec<u8>, secret_key: &SecretKey, nonce: u64) -> SignedTransaction {
let mut tx = SignedTransaction::create(
user,
ACCOUNT_APP_NAME,
Msgs::Create([1u8; 32]), // fake data
nonce,
);
tx.sign(&secret_key);
tx
}
#[test]
fn test_account_authenticator() {
// Check signature verification and nonce rules are enforced
let app = AppBuilder::new()
.set_authenticator(AccountAuthenticator {})
.with_app(AccountModule::new(get_genesis_accounts()));
let mut tester = TestKit::create(app);
tester.start();
let (bob, _bpk, bsk) = create_account("bob");
// Check signatures and correct nonce
let txs = &[
&gen_tx(bob.clone(), &bsk, 0u64),
&gen_tx(bob.clone(), &bsk, 1u64),
&gen_tx(bob.clone(), &bsk, 2u64),
&gen_tx(bob.clone(), &bsk, 3u64),
];
assert!(tester.check_tx(txs).is_ok());
// Wrong nonce
assert!(tester
.check_tx(&[&gen_tx(bob.clone(), &bsk, 5u64)])
.is_err());
// Bad signature: bob's ID but signed with wrong key
let (_rpk, rsk) = gen_keypair();
assert!(tester
.check_tx(&[&gen_tx(bob.clone(), &rsk, 0u64)])
.is_err());
}
#[test]
fn test_ta_account_create() {
// Bob will create an account for Carol
// Carol will try to create an account for Andy...but it'll fail
let app = AppBuilder::new()
.set_authenticator(AccountAuthenticator {})
.with_app(AccountModule::new(get_genesis_accounts()));
let mut tester = TestKit::create(app);
tester.start();
let (bob, _bpk, bsk) = create_account("bob");
let (carol, cpk, csk) = create_account("carol");
let (_andy, apk, _) = create_account("andy");
let mut tx =
SignedTransaction::create(bob.clone(), ACCOUNT_APP_NAME, Msgs::Create(cpk), 0u64);
tx.sign(&bsk);
assert!(tester.check_tx(&[&tx]).is_ok());
assert!(tester.commit_tx(&[&tx]).is_ok());
assert!(tester.query("rapido.account", carol.clone()).is_ok());
let mut tx1 =
SignedTransaction::create(carol.clone(), ACCOUNT_APP_NAME, Msgs::Create(apk), 0u64);
tx1.sign(&csk);
// Check passes...but
assert!(tester.check_tx(&[&tx1]).is_ok());
// deliver fails...carol is not a TA
assert!(tester.commit_tx(&[&tx1]).is_err());
}
#[test]
fn test_account_chng_pubkey() {
// Bob will change is pubkey. Make sure he can authenticate with it
assert!(true)
}
}
| {
let store = AccountStore::new();
let caller_acct = store.get(ctx.sender(), &view);
ensure!(caller_acct.is_some(), "user not found");
let acct = caller_acct.unwrap();
let updated = acct.update_pubkey(pubkey);
store.put(updated.id(), updated, view);
Ok(())
} | conditional_block |
lib.rs | //!
//! Basic account support with an authenticator. Primarly used for development/testing.
//! Uses a 'Trust Anchor' approach to bootstrapping users: Genesis accounts can create other accounts.
//!
use anyhow::{bail, ensure};
use borsh::{BorshDeserialize, BorshSerialize};
use exonum_crypto::{hash, PublicKey, PUBLIC_KEY_LENGTH};
use rapido_core::{
verify_tx_signature, AccountId, AppModule, Authenticator, Context, SignedTransaction, Store,
StoreView,
};
#[macro_use]
extern crate rapido_core;
const ACCOUNT_APP_NAME: &str = "rapido.account";
const ACCOUNT_STORE_NAME: &str = "rapido.account.store";
pub type PublicKeyBytes = [u8; PUBLIC_KEY_LENGTH];
// Format of the account id: base58(hash(pubkey))
fn generate_account_id(pk: &PublicKey) -> Vec<u8> {
let hash = hash(&pk.as_bytes());
bs58::encode(&hash.as_bytes()).into_vec()
}
/// Account Model
#[derive(BorshDeserialize, BorshSerialize, Debug, PartialEq, Clone)]
pub struct Account {
pub id: AccountId,
pub nonce: u64,
pub pubkey: PublicKeyBytes,
// flag: can this entity create accounts
trustanchor: bool,
}
impl Account {
/// Create a new account given a public key
pub fn create(pk: &PublicKey, is_ta: bool) -> Self {
Self {
id: generate_account_id(pk),
nonce: 0u64,
pubkey: pk.as_bytes(),
trustanchor: is_ta,
}
}
pub fn id(&self) -> Vec<u8> {
self.id.clone()
}
/// Return the base58 account id
pub fn id_to_str(&self) -> anyhow::Result<String, anyhow::Error> {
let i = String::from_utf8(self.id.clone());
ensure!(i.is_ok(), "problem decoding account id to string");
Ok(i.unwrap())
}
/// Is the account a trust anchor?
pub fn is_trust_anchor(&self) -> bool {
self.trustanchor
}
pub fn update_pubkey(&self, pk: PublicKeyBytes) -> Self {
Self {
id: self.id.clone(),
nonce: self.nonce,
pubkey: pk,
trustanchor: self.trustanchor,
}
}
/// Increment the nonce for the account
pub fn increment_nonce(&self) -> Self {
Self {
id: self.id.clone(),
nonce: self.nonce + 1,
pubkey: self.pubkey,
trustanchor: self.trustanchor,
}
}
}
impl_store_values!(Account);
/// Account Store
pub(crate) struct AccountStore;
impl Store for AccountStore {
type Key = AccountId;
type Value = Account;
fn name(&self) -> String {
ACCOUNT_STORE_NAME.into()
}
}
impl AccountStore {
pub fn new() -> Self {
AccountStore {}
}
}
/// Message used in Transactions |
pub struct AccountModule {
// PublicKeys of genesis accounts
genesis: Vec<[u8; 32]>,
}
impl AccountModule {
pub fn new(genesis: Vec<[u8; 32]>) -> Self {
Self { genesis }
}
}
impl AppModule for AccountModule {
fn name(&self) -> String {
ACCOUNT_APP_NAME.into()
}
// Load genesis accounts. These entries become the trust anchors
fn initialize(&self, view: &mut StoreView) -> Result<(), anyhow::Error> {
let store = AccountStore::new();
for pk in &self.genesis {
let pubkey = PublicKey::from_slice(&pk[..]).expect("genesis: decode public key");
let account = Account::create(&pubkey, true); // <= make them a trust anchor
store.put(account.id(), account, view)
}
Ok(())
}
fn handle_tx(&self, ctx: &Context, view: &mut StoreView) -> Result<(), anyhow::Error> {
let msg: Msgs = ctx.decode_msg()?;
match msg {
// Create an account. The origin of this call, must be a trust anchor
Msgs::Create(pubkey) => {
let store = AccountStore::new();
// Ensure the caller's account exists and they are a trust anchor
let caller_acct = store.get(ctx.sender(), &view);
ensure!(caller_acct.is_some(), "user not found");
let acct = caller_acct.unwrap();
ensure!(
acct.is_trust_anchor(),
"only a trust anchor can create an account"
);
let pk = PublicKey::from_slice(&pubkey[..]);
ensure!(pk.is_some(), "problem decoding the public key");
// Create the new account
let new_account = Account::create(&pk.unwrap(), false);
store.put(new_account.id(), new_account, view);
Ok(())
}
// Change an existing publickey. The origin of this call is the owner
// of the publickey
Msgs::ChangePubKey(pubkey) => {
let store = AccountStore::new();
let caller_acct = store.get(ctx.sender(), &view);
ensure!(caller_acct.is_some(), "user not found");
let acct = caller_acct.unwrap();
let updated = acct.update_pubkey(pubkey);
store.put(updated.id(), updated, view);
Ok(())
}
}
}
fn handle_query(
&self,
path: &str,
key: Vec<u8>,
view: &StoreView,
) -> Result<Vec<u8>, anyhow::Error> {
ensure!(key.len() > 0, "bad account key");
// return a serialized account for the given id.
match path {
"/" => {
let account = key;
let store = AccountStore::new();
let req_acct = store.get(account, &view);
ensure!(req_acct.is_some(), "account not found");
let acct: Account = req_acct.unwrap();
let bits = acct.try_to_vec()?;
Ok(bits)
}
_ => bail!("{:} not found", path),
}
}
}
// Authenticator
pub struct AccountAuthenticator;
impl Authenticator for AccountAuthenticator {
fn validate(
&self,
tx: &SignedTransaction,
view: &StoreView,
) -> anyhow::Result<(), anyhow::Error> {
let caller = tx.sender();
let txnonce = tx.nonce();
let store = AccountStore::new();
let caller_acct = store.get(caller, &view);
ensure!(caller_acct.is_some(), "user not found");
let acct = caller_acct.unwrap();
let caller_pubkey = PublicKey::from_slice(&acct.pubkey[..]);
ensure!(
caller_pubkey.is_some(),
"problem decoding the user's public key"
);
// Validate signature
ensure!(
verify_tx_signature(&tx, &caller_pubkey.unwrap()),
"bad signature"
);
// Check nonce
ensure!(acct.nonce == txnonce, "nonce don't match");
Ok(())
}
fn increment_nonce(
&self,
tx: &SignedTransaction,
view: &mut StoreView,
) -> anyhow::Result<(), anyhow::Error> {
let caller = tx.sender();
let store = AccountStore::new();
let caller_acct = store.get(caller.clone(), &view);
ensure!(caller_acct.is_some(), "user not found");
let acct = caller_acct.unwrap();
let unonce = acct.increment_nonce();
store.put(caller.clone(), unonce, view);
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use exonum_crypto::{gen_keypair, SecretKey};
use rapido_core::{testing_keypair, AppBuilder, TestKit};
fn create_account(name: &str) -> (Vec<u8>, PublicKeyBytes, SecretKey) {
let (pk, sk) = testing_keypair(name);
let acct = Account::create(&pk, true);
(acct.id(), acct.pubkey, sk)
}
fn get_genesis_accounts() -> Vec<[u8; 32]> {
vec![
create_account("bob").1,
create_account("alice").1,
create_account("tom").1,
]
}
fn gen_tx(user: Vec<u8>, secret_key: &SecretKey, nonce: u64) -> SignedTransaction {
let mut tx = SignedTransaction::create(
user,
ACCOUNT_APP_NAME,
Msgs::Create([1u8; 32]), // fake data
nonce,
);
tx.sign(&secret_key);
tx
}
#[test]
fn test_account_authenticator() {
// Check signature verification and nonce rules are enforced
let app = AppBuilder::new()
.set_authenticator(AccountAuthenticator {})
.with_app(AccountModule::new(get_genesis_accounts()));
let mut tester = TestKit::create(app);
tester.start();
let (bob, _bpk, bsk) = create_account("bob");
// Check signatures and correct nonce
let txs = &[
&gen_tx(bob.clone(), &bsk, 0u64),
&gen_tx(bob.clone(), &bsk, 1u64),
&gen_tx(bob.clone(), &bsk, 2u64),
&gen_tx(bob.clone(), &bsk, 3u64),
];
assert!(tester.check_tx(txs).is_ok());
// Wrong nonce
assert!(tester
.check_tx(&[&gen_tx(bob.clone(), &bsk, 5u64)])
.is_err());
// Bad signature: bob's ID but signed with wrong key
let (_rpk, rsk) = gen_keypair();
assert!(tester
.check_tx(&[&gen_tx(bob.clone(), &rsk, 0u64)])
.is_err());
}
#[test]
fn test_ta_account_create() {
// Bob will create an account for Carol
// Carol will try to create an account for Andy...but it'll fail
let app = AppBuilder::new()
.set_authenticator(AccountAuthenticator {})
.with_app(AccountModule::new(get_genesis_accounts()));
let mut tester = TestKit::create(app);
tester.start();
let (bob, _bpk, bsk) = create_account("bob");
let (carol, cpk, csk) = create_account("carol");
let (_andy, apk, _) = create_account("andy");
let mut tx =
SignedTransaction::create(bob.clone(), ACCOUNT_APP_NAME, Msgs::Create(cpk), 0u64);
tx.sign(&bsk);
assert!(tester.check_tx(&[&tx]).is_ok());
assert!(tester.commit_tx(&[&tx]).is_ok());
assert!(tester.query("rapido.account", carol.clone()).is_ok());
let mut tx1 =
SignedTransaction::create(carol.clone(), ACCOUNT_APP_NAME, Msgs::Create(apk), 0u64);
tx1.sign(&csk);
// Check passes...but
assert!(tester.check_tx(&[&tx1]).is_ok());
// deliver fails...carol is not a TA
assert!(tester.commit_tx(&[&tx1]).is_err());
}
#[test]
fn test_account_chng_pubkey() {
// Bob will change is pubkey. Make sure he can authenticate with it
assert!(true)
}
} | #[derive(BorshDeserialize, BorshSerialize, Debug, PartialEq, Clone)]
pub enum Msgs {
Create(PublicKeyBytes),
ChangePubKey(PublicKeyBytes),
} | random_line_split |
lib.rs | //!
//! Basic account support with an authenticator. Primarly used for development/testing.
//! Uses a 'Trust Anchor' approach to bootstrapping users: Genesis accounts can create other accounts.
//!
use anyhow::{bail, ensure};
use borsh::{BorshDeserialize, BorshSerialize};
use exonum_crypto::{hash, PublicKey, PUBLIC_KEY_LENGTH};
use rapido_core::{
verify_tx_signature, AccountId, AppModule, Authenticator, Context, SignedTransaction, Store,
StoreView,
};
#[macro_use]
extern crate rapido_core;
const ACCOUNT_APP_NAME: &str = "rapido.account";
const ACCOUNT_STORE_NAME: &str = "rapido.account.store";
pub type PublicKeyBytes = [u8; PUBLIC_KEY_LENGTH];
// Format of the account id: base58(hash(pubkey))
fn generate_account_id(pk: &PublicKey) -> Vec<u8> {
let hash = hash(&pk.as_bytes());
bs58::encode(&hash.as_bytes()).into_vec()
}
/// Account Model
#[derive(BorshDeserialize, BorshSerialize, Debug, PartialEq, Clone)]
pub struct Account {
pub id: AccountId,
pub nonce: u64,
pub pubkey: PublicKeyBytes,
// flag: can this entity create accounts
trustanchor: bool,
}
impl Account {
/// Create a new account given a public key
pub fn create(pk: &PublicKey, is_ta: bool) -> Self {
Self {
id: generate_account_id(pk),
nonce: 0u64,
pubkey: pk.as_bytes(),
trustanchor: is_ta,
}
}
pub fn id(&self) -> Vec<u8> {
self.id.clone()
}
/// Return the base58 account id
pub fn id_to_str(&self) -> anyhow::Result<String, anyhow::Error> {
let i = String::from_utf8(self.id.clone());
ensure!(i.is_ok(), "problem decoding account id to string");
Ok(i.unwrap())
}
/// Is the account a trust anchor?
pub fn is_trust_anchor(&self) -> bool {
self.trustanchor
}
pub fn update_pubkey(&self, pk: PublicKeyBytes) -> Self {
Self {
id: self.id.clone(),
nonce: self.nonce,
pubkey: pk,
trustanchor: self.trustanchor,
}
}
/// Increment the nonce for the account
pub fn increment_nonce(&self) -> Self {
Self {
id: self.id.clone(),
nonce: self.nonce + 1,
pubkey: self.pubkey,
trustanchor: self.trustanchor,
}
}
}
impl_store_values!(Account);
/// Account Store
pub(crate) struct AccountStore;
impl Store for AccountStore {
type Key = AccountId;
type Value = Account;
fn name(&self) -> String {
ACCOUNT_STORE_NAME.into()
}
}
impl AccountStore {
pub fn new() -> Self {
AccountStore {}
}
}
/// Message used in Transactions
#[derive(BorshDeserialize, BorshSerialize, Debug, PartialEq, Clone)]
pub enum | {
Create(PublicKeyBytes),
ChangePubKey(PublicKeyBytes),
}
pub struct AccountModule {
// PublicKeys of genesis accounts
genesis: Vec<[u8; 32]>,
}
impl AccountModule {
pub fn new(genesis: Vec<[u8; 32]>) -> Self {
Self { genesis }
}
}
impl AppModule for AccountModule {
fn name(&self) -> String {
ACCOUNT_APP_NAME.into()
}
// Load genesis accounts. These entries become the trust anchors
fn initialize(&self, view: &mut StoreView) -> Result<(), anyhow::Error> {
let store = AccountStore::new();
for pk in &self.genesis {
let pubkey = PublicKey::from_slice(&pk[..]).expect("genesis: decode public key");
let account = Account::create(&pubkey, true); // <= make them a trust anchor
store.put(account.id(), account, view)
}
Ok(())
}
fn handle_tx(&self, ctx: &Context, view: &mut StoreView) -> Result<(), anyhow::Error> {
let msg: Msgs = ctx.decode_msg()?;
match msg {
// Create an account. The origin of this call, must be a trust anchor
Msgs::Create(pubkey) => {
let store = AccountStore::new();
// Ensure the caller's account exists and they are a trust anchor
let caller_acct = store.get(ctx.sender(), &view);
ensure!(caller_acct.is_some(), "user not found");
let acct = caller_acct.unwrap();
ensure!(
acct.is_trust_anchor(),
"only a trust anchor can create an account"
);
let pk = PublicKey::from_slice(&pubkey[..]);
ensure!(pk.is_some(), "problem decoding the public key");
// Create the new account
let new_account = Account::create(&pk.unwrap(), false);
store.put(new_account.id(), new_account, view);
Ok(())
}
// Change an existing publickey. The origin of this call is the owner
// of the publickey
Msgs::ChangePubKey(pubkey) => {
let store = AccountStore::new();
let caller_acct = store.get(ctx.sender(), &view);
ensure!(caller_acct.is_some(), "user not found");
let acct = caller_acct.unwrap();
let updated = acct.update_pubkey(pubkey);
store.put(updated.id(), updated, view);
Ok(())
}
}
}
fn handle_query(
&self,
path: &str,
key: Vec<u8>,
view: &StoreView,
) -> Result<Vec<u8>, anyhow::Error> {
ensure!(key.len() > 0, "bad account key");
// return a serialized account for the given id.
match path {
"/" => {
let account = key;
let store = AccountStore::new();
let req_acct = store.get(account, &view);
ensure!(req_acct.is_some(), "account not found");
let acct: Account = req_acct.unwrap();
let bits = acct.try_to_vec()?;
Ok(bits)
}
_ => bail!("{:} not found", path),
}
}
}
// Authenticator
pub struct AccountAuthenticator;
impl Authenticator for AccountAuthenticator {
fn validate(
&self,
tx: &SignedTransaction,
view: &StoreView,
) -> anyhow::Result<(), anyhow::Error> {
let caller = tx.sender();
let txnonce = tx.nonce();
let store = AccountStore::new();
let caller_acct = store.get(caller, &view);
ensure!(caller_acct.is_some(), "user not found");
let acct = caller_acct.unwrap();
let caller_pubkey = PublicKey::from_slice(&acct.pubkey[..]);
ensure!(
caller_pubkey.is_some(),
"problem decoding the user's public key"
);
// Validate signature
ensure!(
verify_tx_signature(&tx, &caller_pubkey.unwrap()),
"bad signature"
);
// Check nonce
ensure!(acct.nonce == txnonce, "nonce don't match");
Ok(())
}
fn increment_nonce(
&self,
tx: &SignedTransaction,
view: &mut StoreView,
) -> anyhow::Result<(), anyhow::Error> {
let caller = tx.sender();
let store = AccountStore::new();
let caller_acct = store.get(caller.clone(), &view);
ensure!(caller_acct.is_some(), "user not found");
let acct = caller_acct.unwrap();
let unonce = acct.increment_nonce();
store.put(caller.clone(), unonce, view);
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use exonum_crypto::{gen_keypair, SecretKey};
use rapido_core::{testing_keypair, AppBuilder, TestKit};
fn create_account(name: &str) -> (Vec<u8>, PublicKeyBytes, SecretKey) {
let (pk, sk) = testing_keypair(name);
let acct = Account::create(&pk, true);
(acct.id(), acct.pubkey, sk)
}
fn get_genesis_accounts() -> Vec<[u8; 32]> {
vec![
create_account("bob").1,
create_account("alice").1,
create_account("tom").1,
]
}
fn gen_tx(user: Vec<u8>, secret_key: &SecretKey, nonce: u64) -> SignedTransaction {
let mut tx = SignedTransaction::create(
user,
ACCOUNT_APP_NAME,
Msgs::Create([1u8; 32]), // fake data
nonce,
);
tx.sign(&secret_key);
tx
}
#[test]
fn test_account_authenticator() {
// Check signature verification and nonce rules are enforced
let app = AppBuilder::new()
.set_authenticator(AccountAuthenticator {})
.with_app(AccountModule::new(get_genesis_accounts()));
let mut tester = TestKit::create(app);
tester.start();
let (bob, _bpk, bsk) = create_account("bob");
// Check signatures and correct nonce
let txs = &[
&gen_tx(bob.clone(), &bsk, 0u64),
&gen_tx(bob.clone(), &bsk, 1u64),
&gen_tx(bob.clone(), &bsk, 2u64),
&gen_tx(bob.clone(), &bsk, 3u64),
];
assert!(tester.check_tx(txs).is_ok());
// Wrong nonce
assert!(tester
.check_tx(&[&gen_tx(bob.clone(), &bsk, 5u64)])
.is_err());
// Bad signature: bob's ID but signed with wrong key
let (_rpk, rsk) = gen_keypair();
assert!(tester
.check_tx(&[&gen_tx(bob.clone(), &rsk, 0u64)])
.is_err());
}
#[test]
fn test_ta_account_create() {
// Bob will create an account for Carol
// Carol will try to create an account for Andy...but it'll fail
let app = AppBuilder::new()
.set_authenticator(AccountAuthenticator {})
.with_app(AccountModule::new(get_genesis_accounts()));
let mut tester = TestKit::create(app);
tester.start();
let (bob, _bpk, bsk) = create_account("bob");
let (carol, cpk, csk) = create_account("carol");
let (_andy, apk, _) = create_account("andy");
let mut tx =
SignedTransaction::create(bob.clone(), ACCOUNT_APP_NAME, Msgs::Create(cpk), 0u64);
tx.sign(&bsk);
assert!(tester.check_tx(&[&tx]).is_ok());
assert!(tester.commit_tx(&[&tx]).is_ok());
assert!(tester.query("rapido.account", carol.clone()).is_ok());
let mut tx1 =
SignedTransaction::create(carol.clone(), ACCOUNT_APP_NAME, Msgs::Create(apk), 0u64);
tx1.sign(&csk);
// Check passes...but
assert!(tester.check_tx(&[&tx1]).is_ok());
// deliver fails...carol is not a TA
assert!(tester.commit_tx(&[&tx1]).is_err());
}
#[test]
fn test_account_chng_pubkey() {
// Bob will change is pubkey. Make sure he can authenticate with it
assert!(true)
}
}
| Msgs | identifier_name |
lib.rs | //!
//! Basic account support with an authenticator. Primarly used for development/testing.
//! Uses a 'Trust Anchor' approach to bootstrapping users: Genesis accounts can create other accounts.
//!
use anyhow::{bail, ensure};
use borsh::{BorshDeserialize, BorshSerialize};
use exonum_crypto::{hash, PublicKey, PUBLIC_KEY_LENGTH};
use rapido_core::{
verify_tx_signature, AccountId, AppModule, Authenticator, Context, SignedTransaction, Store,
StoreView,
};
#[macro_use]
extern crate rapido_core;
const ACCOUNT_APP_NAME: &str = "rapido.account";
const ACCOUNT_STORE_NAME: &str = "rapido.account.store";
pub type PublicKeyBytes = [u8; PUBLIC_KEY_LENGTH];
// Format of the account id: base58(hash(pubkey))
fn generate_account_id(pk: &PublicKey) -> Vec<u8> {
let hash = hash(&pk.as_bytes());
bs58::encode(&hash.as_bytes()).into_vec()
}
/// Account Model
#[derive(BorshDeserialize, BorshSerialize, Debug, PartialEq, Clone)]
pub struct Account {
pub id: AccountId,
pub nonce: u64,
pub pubkey: PublicKeyBytes,
// flag: can this entity create accounts
trustanchor: bool,
}
impl Account {
/// Create a new account given a public key
pub fn create(pk: &PublicKey, is_ta: bool) -> Self {
Self {
id: generate_account_id(pk),
nonce: 0u64,
pubkey: pk.as_bytes(),
trustanchor: is_ta,
}
}
pub fn id(&self) -> Vec<u8> {
self.id.clone()
}
/// Return the base58 account id
pub fn id_to_str(&self) -> anyhow::Result<String, anyhow::Error> {
let i = String::from_utf8(self.id.clone());
ensure!(i.is_ok(), "problem decoding account id to string");
Ok(i.unwrap())
}
/// Is the account a trust anchor?
pub fn is_trust_anchor(&self) -> bool {
self.trustanchor
}
pub fn update_pubkey(&self, pk: PublicKeyBytes) -> Self {
Self {
id: self.id.clone(),
nonce: self.nonce,
pubkey: pk,
trustanchor: self.trustanchor,
}
}
/// Increment the nonce for the account
pub fn increment_nonce(&self) -> Self {
Self {
id: self.id.clone(),
nonce: self.nonce + 1,
pubkey: self.pubkey,
trustanchor: self.trustanchor,
}
}
}
impl_store_values!(Account);
/// Account Store
pub(crate) struct AccountStore;
impl Store for AccountStore {
type Key = AccountId;
type Value = Account;
fn name(&self) -> String {
ACCOUNT_STORE_NAME.into()
}
}
impl AccountStore {
pub fn new() -> Self {
AccountStore {}
}
}
/// Message used in Transactions
#[derive(BorshDeserialize, BorshSerialize, Debug, PartialEq, Clone)]
pub enum Msgs {
Create(PublicKeyBytes),
ChangePubKey(PublicKeyBytes),
}
pub struct AccountModule {
// PublicKeys of genesis accounts
genesis: Vec<[u8; 32]>,
}
impl AccountModule {
pub fn new(genesis: Vec<[u8; 32]>) -> Self {
Self { genesis }
}
}
impl AppModule for AccountModule {
fn name(&self) -> String {
ACCOUNT_APP_NAME.into()
}
// Load genesis accounts. These entries become the trust anchors
fn initialize(&self, view: &mut StoreView) -> Result<(), anyhow::Error> {
let store = AccountStore::new();
for pk in &self.genesis {
let pubkey = PublicKey::from_slice(&pk[..]).expect("genesis: decode public key");
let account = Account::create(&pubkey, true); // <= make them a trust anchor
store.put(account.id(), account, view)
}
Ok(())
}
fn handle_tx(&self, ctx: &Context, view: &mut StoreView) -> Result<(), anyhow::Error> {
let msg: Msgs = ctx.decode_msg()?;
match msg {
// Create an account. The origin of this call, must be a trust anchor
Msgs::Create(pubkey) => {
let store = AccountStore::new();
// Ensure the caller's account exists and they are a trust anchor
let caller_acct = store.get(ctx.sender(), &view);
ensure!(caller_acct.is_some(), "user not found");
let acct = caller_acct.unwrap();
ensure!(
acct.is_trust_anchor(),
"only a trust anchor can create an account"
);
let pk = PublicKey::from_slice(&pubkey[..]);
ensure!(pk.is_some(), "problem decoding the public key");
// Create the new account
let new_account = Account::create(&pk.unwrap(), false);
store.put(new_account.id(), new_account, view);
Ok(())
}
// Change an existing publickey. The origin of this call is the owner
// of the publickey
Msgs::ChangePubKey(pubkey) => {
let store = AccountStore::new();
let caller_acct = store.get(ctx.sender(), &view);
ensure!(caller_acct.is_some(), "user not found");
let acct = caller_acct.unwrap();
let updated = acct.update_pubkey(pubkey);
store.put(updated.id(), updated, view);
Ok(())
}
}
}
fn handle_query(
&self,
path: &str,
key: Vec<u8>,
view: &StoreView,
) -> Result<Vec<u8>, anyhow::Error> {
ensure!(key.len() > 0, "bad account key");
// return a serialized account for the given id.
match path {
"/" => {
let account = key;
let store = AccountStore::new();
let req_acct = store.get(account, &view);
ensure!(req_acct.is_some(), "account not found");
let acct: Account = req_acct.unwrap();
let bits = acct.try_to_vec()?;
Ok(bits)
}
_ => bail!("{:} not found", path),
}
}
}
// Authenticator
pub struct AccountAuthenticator;
impl Authenticator for AccountAuthenticator {
fn validate(
&self,
tx: &SignedTransaction,
view: &StoreView,
) -> anyhow::Result<(), anyhow::Error> {
let caller = tx.sender();
let txnonce = tx.nonce();
let store = AccountStore::new();
let caller_acct = store.get(caller, &view);
ensure!(caller_acct.is_some(), "user not found");
let acct = caller_acct.unwrap();
let caller_pubkey = PublicKey::from_slice(&acct.pubkey[..]);
ensure!(
caller_pubkey.is_some(),
"problem decoding the user's public key"
);
// Validate signature
ensure!(
verify_tx_signature(&tx, &caller_pubkey.unwrap()),
"bad signature"
);
// Check nonce
ensure!(acct.nonce == txnonce, "nonce don't match");
Ok(())
}
fn increment_nonce(
&self,
tx: &SignedTransaction,
view: &mut StoreView,
) -> anyhow::Result<(), anyhow::Error> {
let caller = tx.sender();
let store = AccountStore::new();
let caller_acct = store.get(caller.clone(), &view);
ensure!(caller_acct.is_some(), "user not found");
let acct = caller_acct.unwrap();
let unonce = acct.increment_nonce();
store.put(caller.clone(), unonce, view);
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use exonum_crypto::{gen_keypair, SecretKey};
use rapido_core::{testing_keypair, AppBuilder, TestKit};
fn create_account(name: &str) -> (Vec<u8>, PublicKeyBytes, SecretKey) {
let (pk, sk) = testing_keypair(name);
let acct = Account::create(&pk, true);
(acct.id(), acct.pubkey, sk)
}
fn get_genesis_accounts() -> Vec<[u8; 32]> {
vec![
create_account("bob").1,
create_account("alice").1,
create_account("tom").1,
]
}
fn gen_tx(user: Vec<u8>, secret_key: &SecretKey, nonce: u64) -> SignedTransaction |
#[test]
fn test_account_authenticator() {
// Check signature verification and nonce rules are enforced
let app = AppBuilder::new()
.set_authenticator(AccountAuthenticator {})
.with_app(AccountModule::new(get_genesis_accounts()));
let mut tester = TestKit::create(app);
tester.start();
let (bob, _bpk, bsk) = create_account("bob");
// Check signatures and correct nonce
let txs = &[
&gen_tx(bob.clone(), &bsk, 0u64),
&gen_tx(bob.clone(), &bsk, 1u64),
&gen_tx(bob.clone(), &bsk, 2u64),
&gen_tx(bob.clone(), &bsk, 3u64),
];
assert!(tester.check_tx(txs).is_ok());
// Wrong nonce
assert!(tester
.check_tx(&[&gen_tx(bob.clone(), &bsk, 5u64)])
.is_err());
// Bad signature: bob's ID but signed with wrong key
let (_rpk, rsk) = gen_keypair();
assert!(tester
.check_tx(&[&gen_tx(bob.clone(), &rsk, 0u64)])
.is_err());
}
#[test]
fn test_ta_account_create() {
// Bob will create an account for Carol
// Carol will try to create an account for Andy...but it'll fail
let app = AppBuilder::new()
.set_authenticator(AccountAuthenticator {})
.with_app(AccountModule::new(get_genesis_accounts()));
let mut tester = TestKit::create(app);
tester.start();
let (bob, _bpk, bsk) = create_account("bob");
let (carol, cpk, csk) = create_account("carol");
let (_andy, apk, _) = create_account("andy");
let mut tx =
SignedTransaction::create(bob.clone(), ACCOUNT_APP_NAME, Msgs::Create(cpk), 0u64);
tx.sign(&bsk);
assert!(tester.check_tx(&[&tx]).is_ok());
assert!(tester.commit_tx(&[&tx]).is_ok());
assert!(tester.query("rapido.account", carol.clone()).is_ok());
let mut tx1 =
SignedTransaction::create(carol.clone(), ACCOUNT_APP_NAME, Msgs::Create(apk), 0u64);
tx1.sign(&csk);
// Check passes...but
assert!(tester.check_tx(&[&tx1]).is_ok());
// deliver fails...carol is not a TA
assert!(tester.commit_tx(&[&tx1]).is_err());
}
#[test]
fn test_account_chng_pubkey() {
// Bob will change is pubkey. Make sure he can authenticate with it
assert!(true)
}
}
| {
let mut tx = SignedTransaction::create(
user,
ACCOUNT_APP_NAME,
Msgs::Create([1u8; 32]), // fake data
nonce,
);
tx.sign(&secret_key);
tx
} | identifier_body |
processor.go | // Copyright 2018 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package system_updater
import (
"bufio"
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"syscall"
"syscall/zx"
"syscall/zx/fdio"
fuchsiaio "fidl/fuchsia/io"
"fidl/fuchsia/mem"
"fidl/fuchsia/paver"
"fidl/fuchsia/pkg"
"fuchsia.googlesource.com/component"
"fuchsia.googlesource.com/syslog"
)
// When this suffix is found in the "images" file, it indicates a typed image
// that looks for all matches within the update package.
const ImageTypeSuffix = "[_type]"
func ConnectToPackageResolver(ctx *component.Context) (*pkg.PackageResolverWithCtxInterface, error) {
req, pxy, err := pkg.NewPackageResolverWithCtxInterfaceRequest()
if err != nil {
syslog.Errorf("control interface could not be acquired: %s", err)
return nil, err
}
ctx.ConnectToEnvService(req)
return pxy, nil
}
func ConnectToPaver(ctx *component.Context) (*paver.DataSinkWithCtxInterface, *paver.BootManagerWithCtxInterface, error) {
req, pxy, err := paver.NewPaverWithCtxInterfaceRequest()
if err != nil {
syslog.Errorf("control interface could not be acquired: %s", err)
return nil, nil, err
}
defer pxy.Close()
ctx.ConnectToEnvService(req)
dataSinkReq, dataSinkPxy, err := paver.NewDataSinkWithCtxInterfaceRequest()
if err != nil {
syslog.Errorf("data sink interface could not be acquired: %s", err)
return nil, nil, err
}
err = pxy.FindDataSink(context.Background(), dataSinkReq)
if err != nil {
syslog.Errorf("could not find data sink: %s", err)
return nil, nil, err
}
bootManagerReq, bootManagerPxy, err := paver.NewBootManagerWithCtxInterfaceRequest()
if err != nil {
syslog.Errorf("boot manager interface could not be acquired: %s", err)
return nil, nil, err
}
err = pxy.FindBootManager(context.Background(), bootManagerReq)
if err != nil {
syslog.Errorf("could not find boot manager: %s", err)
return nil, nil, err
}
return dataSinkPxy, bootManagerPxy, nil
}
// CacheUpdatePackage caches the requested, possibly merkle-pinned, update
// package URL and returns the pkgfs path to the package.
func CacheUpdatePackage(updateURL string, resolver *pkg.PackageResolverWithCtxInterface) (*UpdatePackage, error) {
dirPxy, err := resolvePackage(updateURL, resolver)
if err != nil {
return nil, err
}
pkg, err := NewUpdatePackage(dirPxy)
if err != nil {
return nil, err
}
merkle, err := pkg.Merkleroot()
if err != nil {
pkg.Close()
return nil, err
}
syslog.Infof("resolved %s as %s", updateURL, merkle)
return pkg, nil
}
// An image name and type string.
type Image struct {
// The base name of the image.
Name string
// A type string, default "".
Type string
}
// Returns an Image's filename in an update package.
//
// If a type is given, the filename in the package will be <name>_<type>, e.g.:
// name="foo", type="" -> "foo"
// name="foo", type="bar" -> "foo_bar"
func (i *Image) Filename() string {
if i.Type == "" {
return i.Name
}
return fmt.Sprintf("%s_%s", i.Name, i.Type)
}
func ParseRequirements(updatePkg *UpdatePackage) ([]string, []Image, error) {
// First, figure out which packages files we should parse
parseJson := true
pkgSrc, err := updatePkg.Open("packages.json")
// Fall back to line formatted packages file if packages.json not present
// Ideally, we'd fall back if specifically given the "file not found" error,
// though it's unclear which error that is (syscall.ENOENT did not work)
if err != nil {
syslog.Infof("parse_requirements: could not open packages.json, falling back to packages.")
parseJson = false
pkgSrc, err = updatePkg.Open("packages")
}
if err != nil {
return nil, nil, fmt.Errorf("error opening packages data file! %s", err)
}
defer pkgSrc.Close()
// Now that we know which packages file to parse, we can parse it.
pkgs := []string{}
if parseJson {
pkgs, err = ParsePackagesJson(pkgSrc)
} else {
pkgs, err = ParsePackagesLineFormatted(pkgSrc)
}
if err != nil {
return nil, nil, fmt.Errorf("failed to parse packages: %v", err)
}
// Finally, we parse images
imgSrc, err := os.Open(filepath.Join("/pkg", "data", "images"))
if err != nil {
return nil, nil, fmt.Errorf("error opening images data file! %s", err)
}
defer imgSrc.Close()
filenames, err := updatePkg.ListFiles()
if err != nil {
return nil, nil, fmt.Errorf("failed to list package files: %v", err)
}
imgs, err := ParseImages(imgSrc, filenames)
if err != nil {
return nil, nil, fmt.Errorf("failed to parse images: %v", err)
}
return pkgs, imgs, nil
}
// Packages deserializes the packages.json file in the system update package.
// NOTE: Fields must be exported for json decoding.
type packages struct {
Version intOrStr `json:"version"`
// A list of fully qualified URIs.
URIs []string `json:"content"`
}
type intOrStr int
// Enables us to support version as either a string or int.
func (i *intOrStr) UnmarshalJSON(b []byte) error {
var s string
if err := json.Unmarshal(b, &s); err == nil {
b = []byte(s)
}
return json.Unmarshal(b, (*int)(i))
}
func ParsePackagesJson(pkgSrc io.ReadCloser) ([]string, error) {
bytes, err := ioutil.ReadAll(pkgSrc)
if err != nil {
return nil, fmt.Errorf("failed to read packages.json with error: %v", err)
}
var packages packages
if err := json.Unmarshal(bytes, &packages); err != nil {
return nil, fmt.Errorf("failed to unmarshal packages.json: %v", err)
}
if packages.Version != 1 {
return nil, fmt.Errorf("unsupported version of packages.json: %v", packages.Version)
}
return packages.URIs, nil
}
func ParsePackagesLineFormatted(pkgSrc io.ReadCloser) ([]string, error) {
pkgs := []string{}
rdr := bufio.NewReader(pkgSrc)
for {
l, err := rdr.ReadString('\n')
s := strings.TrimSpace(l)
if (err == nil || err == io.EOF) && len(s) > 0 {
entry := strings.Split(s, "=")
if len(entry) != 2 {
return nil, fmt.Errorf("parser: entry format %q", s)
} else {
pkgURI := fmt.Sprintf("fuchsia-pkg://fuchsia.com/%s?hash=%s", entry[0], entry[1])
pkgs = append(pkgs, pkgURI)
}
}
if err != nil {
if err != io.EOF {
return nil, fmt.Errorf("parser: got error reading packages file %s", err)
}
break
}
}
return pkgs, nil
}
// Finds all images that match |basename| in |filenames|.
//
// A match is one of:
// <basename>
// <basename>_<type>
func FindTypedImages(basename string, filenames []string) []Image {
var images []Image
for _, name := range filenames {
if strings.HasPrefix(name, basename) {
suffix := name[len(basename):]
if len(suffix) == 0 {
// The base name alone indicates default type (empty string).
images = append(images, Image{Name: basename, Type: ""})
} else if suffix[0] == '_' {
images = append(images, Image{Name: basename, Type: suffix[1:]})
}
}
}
return images
}
// Returns a list of images derived from the "images" file.
//
// Untyped images (those without the [_type] suffix) are included in the return
// slice no matter what.
//
// Typed images, on the other hand, will only include matches that exist in
// |filenames|.
func ParseImages(imgSrc io.ReadCloser, filenames []string) ([]Image, error) {
rdr := bufio.NewReader(imgSrc)
imgs := []Image{}
for {
l, err := rdr.ReadString('\n')
s := strings.TrimSpace(l)
if (err == nil || err == io.EOF) && len(s) > 0 {
if strings.HasSuffix(s, ImageTypeSuffix) {
// Typed image: look for all matching images in the package.
basename := strings.TrimSuffix(s, ImageTypeSuffix)
imgs = append(imgs, FindTypedImages(basename, filenames)...)
} else {
imgs = append(imgs, Image{Name: s, Type: ""})
}
}
if err != nil {
if err != io.EOF {
return nil, fmt.Errorf("parser: got error reading images file %s", err)
}
break
}
}
return imgs, nil
}
// Types to deserialize the update-mode file. NOTE: Fields must be exported for json decoding.
// Expected form for update-mode file is:
// {
// "version": "1",
// "content": {
// "mode": "normal" / "force-recovery",
// }
// }
type updateModeFileContent struct {
Mode string `json:"mode"`
}
type updateModeFile struct {
Version string `json:"version"`
Content updateModeFileContent `json:"content"`
}
// Type to describe the supported update modes.
// Note: exporting since this will be used in main (to be consistent with the rest of the code).
type UpdateMode string
const (
UpdateModeNormal UpdateMode = "normal"
UpdateModeForceRecovery = "force-recovery"
)
// We define custom error wrappers so we can test the proper error is being returned.
type updateModeNotSupportedError UpdateMode
func (e updateModeNotSupportedError) Error() string {
return fmt.Sprintf("unsupported update mode: %s", string(e))
}
type jsonUnmarshalError struct {
err error
}
func (e jsonUnmarshalError) Error() string {
return fmt.Sprintf("failed to unmarshal update-mode: %v", e.err)
}
// Note: exporting since this will be used in main (to be consistent with the rest of the code).
func ParseUpdateMode(updatePkg *UpdatePackage) (UpdateMode, error) {
// Fall back to normal if the update-mode file does not exist.
// Ideally, we'd fall back if specifically given the "file not found" error,
// though it's unclear which error that is (syscall.ENOENT did not work).
modeSrc, err := updatePkg.Open("update-mode")
if err != nil {
syslog.Infof("parse_update_mode: could not open update-mode file, assuming normal system update flow.")
return UpdateModeNormal, nil
}
defer modeSrc.Close()
// Read the raw bytes.
b, err := ioutil.ReadAll(modeSrc)
if err != nil {
return "", fmt.Errorf("failed to read mode file: %w", err)
}
// Convert to json.
var updateModeFile updateModeFile
if err := json.Unmarshal(b, &updateModeFile); err != nil {
return "", jsonUnmarshalError{err}
}
// Confirm we support this mode.
mode := UpdateMode(updateModeFile.Content.Mode)
if mode != UpdateModeNormal && mode != UpdateModeForceRecovery {
return "", updateModeNotSupportedError(mode)
}
return mode, nil
}
func FetchPackages(pkgs []string, resolver *pkg.PackageResolverWithCtxInterface) error {
var errCount int
var firstErr error
for _, pkgURI := range pkgs {
if err := fetchPackage(pkgURI, resolver); err != nil {
syslog.Errorf("fetch error: %s", err)
errCount++
if firstErr == nil {
firstErr = err
}
}
}
if errCount > 0 {
syslog.Errorf("system update failed, %d packages had errors", errCount)
return firstErr
}
return nil
}
func fetchPackage(pkgURI string, resolver *pkg.PackageResolverWithCtxInterface) error {
dirPxy, err := resolvePackage(pkgURI, resolver)
if dirPxy != nil {
dirPxy.Close(context.Background())
}
return err
}
func resolvePackage(pkgURI string, resolver *pkg.PackageResolverWithCtxInterface) (*fuchsiaio.DirectoryWithCtxInterface, error) {
selectors := []string{}
updatePolicy := pkg.UpdatePolicy{}
dirReq, dirPxy, err := fuchsiaio.NewDirectoryWithCtxInterfaceRequest()
if err != nil {
return nil, err
}
syslog.Infof("requesting %s from update system", pkgURI)
status, err := resolver.Resolve(context.Background(), pkgURI, selectors, updatePolicy, dirReq)
if err != nil {
dirPxy.Close(context.Background())
return nil, fmt.Errorf("fetch: Resolve error: %s", err)
}
statusErr := zx.Status(status)
if statusErr != zx.ErrOk {
dirPxy.Close(context.Background())
return nil, fmt.Errorf("fetch: Resolve status: %s", statusErr)
}
return dirPxy, nil
}
func ValidateUpdatePackage(updatePkg *UpdatePackage) error {
actual, err := updatePkg.ReadFile("board")
if err == nil {
expected, err := ioutil.ReadFile("/config/build-info/board")
if err != nil {
return err
}
if !bytes.Equal(actual, expected) {
return fmt.Errorf("parser: expected board name %s found %s", expected, actual)
}
} else if !os.IsNotExist(err) {
return err
}
return nil
}
func ValidateImgs(imgs []Image, updatePkg *UpdatePackage, updateMode UpdateMode) error {
found := false
for _, img := range []string{"zbi", "zbi.signed"} {
if _, err := updatePkg.Stat(img); err == nil {
found = true
break
}
}
// Update package with normal mode should have a `zbi` or `zbi.signed`.
if updateMode == UpdateModeNormal && !found {
return fmt.Errorf("parser: missing 'zbi' or 'zbi.signed', this is required in normal update mode")
}
// Update package with force-recovery mode should NOT have a `zbi` nor `zbi.signed`.
if updateMode == UpdateModeForceRecovery && found {
return fmt.Errorf("parser: contains 'zbi' or 'zbi.signed', this is not allowed in force-recovery update mode")
}
return nil
}
func WriteImgs(dataSink *paver.DataSinkWithCtxInterface, bootManager *paver.BootManagerWithCtxInterface, imgs []Image, updatePkg *UpdatePackage, updateMode UpdateMode, skipRecovery bool) error {
if updateMode == UpdateModeForceRecovery && skipRecovery == true {
return fmt.Errorf("can't force recovery when skipping recovery image installation")
}
syslog.Infof("Writing images %+v from update package", imgs)
activeConfig, err := queryActiveConfig(bootManager)
if err != nil {
return fmt.Errorf("querying target config: %v", err)
}
// If we have an active config (and thus support ABR), compute the
// target config. Otherwise set the target config to nil so we fall
// back to the legacy behavior where we write to the A partition, and
// attempt to write to the B partition.
var targetConfig *paver.Configuration
if activeConfig == nil {
targetConfig = nil
} else {
targetConfig, err = calculateTargetConfig(*activeConfig)
if err != nil {
return err
}
}
for _, img := range imgs {
if err := writeImg(dataSink, img, updatePkg, targetConfig, skipRecovery); err != nil {
return err
}
}
if updateMode == UpdateModeNormal && targetConfig != nil {
if err := setConfigurationActive(bootManager, *targetConfig); err != nil {
return err
}
} else if updateMode == UpdateModeForceRecovery {
for _, config := range []paver.Configuration{paver.ConfigurationA, paver.ConfigurationB} {
if err := setConfigurationUnbootable(bootManager, config); err != nil {
return fmt.Errorf("failed to set configuration unbootable: %v", err)
}
}
}
if err = flushDataSink(dataSink); err != nil {
return fmt.Errorf("img_writer: failed to flush data sink. %v", err)
}
if targetConfig != nil {
if err = flushBootManager(bootManager); err != nil {
return fmt.Errorf("img_writer: failed to flush boot manager. %v", err)
}
}
return nil
}
func flushDataSink(dataSink *paver.DataSinkWithCtxInterface) error {
status, err := dataSink.Flush(context.Background())
if err != nil {
return err
}
if statusErr := zx.Status(status); statusErr != zx.ErrOk {
return &zx.Error{Status: statusErr}
}
return nil
}
func flushBootManager(bootManager *paver.BootManagerWithCtxInterface) error {
status, err := bootManager.Flush(context.Background())
if err != nil {
return err
}
if statusErr := zx.Status(status); statusErr != zx.ErrOk {
return &zx.Error{Status: statusErr}
}
return nil
}
// queryActiveConfig asks the boot manager what partition the device booted
// from. If the device does not support ABR, it returns nil as the
// configuration.
func queryActiveConfig(bootManager *paver.BootManagerWithCtxInterface) (*paver.Configuration, error) {
activeConfig, err := bootManager.QueryActiveConfiguration(context.Background())
if err != nil {
// FIXME(fxb/43577): If the paver service runs into a problem
// creating a boot manager, it will close the channel with an
// epitaph. The error we are particularly interested in is
// whether or not the current device supports ABR.
// Unfortunately the go fidl bindings do not support epitaphs,
// so we can't actually check for this error condition. All we
// can observe is that the channel has been closed, so treat
// this condition as the device does not support ABR.
if err, ok := err.(*zx.Error); ok && err.Status == zx.ErrPeerClosed {
syslog.Warnf("img_writer: boot manager channel closed, assuming device does not support ABR")
return nil, nil
}
return nil, fmt.Errorf("querying active config: %v", err)
}
if activeConfig.Which() == paver.BootManagerQueryActiveConfigurationResultResponse {
syslog.Infof("img_writer: device supports ABR")
return &activeConfig.Response.Configuration, nil
}
statusErr := zx.Status(activeConfig.Err)
if statusErr == zx.ErrNotSupported {
// this device doesn't support ABR, so fall back to the
// legacy workflow.
syslog.Infof("img_writer: device does not support ABR")
return nil, nil
}
return nil, &zx.Error{Status: statusErr}
}
func calculateTargetConfig(activeConfig paver.Configuration) (*paver.Configuration, error) {
var config paver.Configuration
switch activeConfig {
case paver.ConfigurationA:
config = paver.ConfigurationB
case paver.ConfigurationB:
config = paver.ConfigurationA
case paver.ConfigurationRecovery:
syslog.Warnf("img_writer: configured for recovery, using partition A instead")
config = paver.ConfigurationA
default:
return nil, fmt.Errorf("img_writer: unknown config: %s", activeConfig)
}
syslog.Infof("img_writer: writing to configuration %s", config)
return &config, nil
}
func setConfigurationActive(bootManager *paver.BootManagerWithCtxInterface, targetConfig paver.Configuration) error {
syslog.Infof("img_writer: setting configuration %s active", targetConfig)
status, err := bootManager.SetConfigurationActive(context.Background(), targetConfig)
if err != nil {
return err
}
statusErr := zx.Status(status)
if statusErr != zx.ErrOk {
return &zx.Error{Status: statusErr}
}
return nil
}
func setConfigurationUnbootable(bootManager *paver.BootManagerWithCtxInterface, targetConfig paver.Configuration) error {
syslog.Infof("img_writer: setting configuration %s unbootable", targetConfig)
status, err := bootManager.SetConfigurationUnbootable(context.Background(), targetConfig)
if err != nil {
return err
}
statusErr := zx.Status(status)
if statusErr != zx.ErrOk {
return &zx.Error{Status: statusErr}
}
return nil
}
func writeAsset(svc *paver.DataSinkWithCtxInterface, configuration paver.Configuration, asset paver.Asset, payload *mem.Buffer) error {
syslog.Infof("img_writer: writing asset %q to %q", asset, configuration)
status, err := svc.WriteAsset(context.Background(), configuration, asset, *payload)
if err != nil {
syslog.Errorf("img_writer: failed to write asset %q: %s", asset, err)
return err
}
statusErr := zx.Status(status)
if statusErr != zx.ErrOk {
return &zx.Error{Status: statusErr}
}
return nil
}
func writeImg(svc *paver.DataSinkWithCtxInterface, img Image, updatePkg *UpdatePackage, targetConfig *paver.Configuration, skipRecovery bool) error {
f, err := updatePkg.Open(img.Filename())
if err != nil {
syslog.Warnf("img_writer: %q image not found, skipping", img.Filename())
return nil
}
if fi, err := f.Stat(); err != nil || fi.Size() == 0 {
syslog.Warnf("img_writer: %q zero length, skipping", img.Filename())
return nil
}
defer f.Close()
buffer, err := bufferForFile(f)
if err != nil {
return fmt.Errorf("img_writer: while getting vmo for %q: %q", img.Filename(), err)
}
defer buffer.Vmo.Close()
var writeImg func() error
switch img.Name {
case "zbi", "zbi.signed":
childVmo, err := buffer.Vmo.CreateChild(zx.VMOChildOptionCopyOnWrite|zx.VMOChildOptionResizable, 0, buffer.Size)
if err != nil {
return fmt.Errorf("img_writer: while getting vmo for %q: %q", img.Filename(), err)
}
buffer2 := &mem.Buffer{
Vmo: childVmo,
Size: buffer.Size,
}
defer buffer2.Vmo.Close()
if targetConfig == nil {
// device does not support ABR, so write the ZBI to the
// A partition. We also try to write to the B partition
// in order to be forwards compatible with devices that
// will eventually support ABR, but we ignore errors
// because some devices won't have a B partition.
writeImg = func() error {
if err := writeAsset(svc, paver.ConfigurationA, paver.AssetKernel, buffer); err != nil {
return err
}
if err := writeAsset(svc, paver.ConfigurationB, paver.AssetKernel, buffer2); err != nil {
asZxErr, ok := err.(*zx.Error)
if ok && asZxErr.Status == zx.ErrNotSupported {
syslog.Warnf("img_writer: skipping writing %q to B: %v", img.Filename(), err)
} else {
return err
}
}
return nil
}
} else {
// device supports ABR, so only write the ZB to the
// target partition.
writeImg = func() error {
return writeAsset(svc, *targetConfig, paver.AssetKernel, buffer)
}
}
case "fuchsia.vbmeta":
childVmo, err := buffer.Vmo.CreateChild(zx.VMOChildOptionCopyOnWrite|zx.VMOChildOptionResizable, 0, buffer.Size)
if err != nil {
return fmt.Errorf("img_writer: while getting vmo for %q: %q", img.Filename(), err)
}
buffer2 := &mem.Buffer{
Vmo: childVmo,
Size: buffer.Size,
}
defer buffer2.Vmo.Close()
if targetConfig == nil {
// device does not support ABR, so write vbmeta to the
// A partition, and try to write to the B partiton. See
// the comment in the zbi case for more details.
if err := writeAsset(svc, paver.ConfigurationA,
paver.AssetVerifiedBootMetadata, buffer); err != nil {
return err
}
return writeAsset(svc, paver.ConfigurationB, paver.AssetVerifiedBootMetadata, buffer2)
} else {
// device supports ABR, so write the vbmeta to the
// target partition.
writeImg = func() error {
return writeAsset(svc, *targetConfig, paver.AssetVerifiedBootMetadata, buffer2)
}
}
case "zedboot", "zedboot.signed":
if skipRecovery {
return nil
} else {
writeImg = func() error {
return writeAsset(svc, paver.ConfigurationRecovery, paver.AssetKernel, buffer)
}
}
case "recovery.vbmeta":
if skipRecovery {
return nil
} else {
writeImg = func() error {
return writeAsset(svc, paver.ConfigurationRecovery, paver.AssetVerifiedBootMetadata, buffer)
}
}
case "bootloader":
// Keep support for update packages still using the older "bootloader"
// file, which is handled identically to "firmware" but without type
// support so img.Type will always be "".
fallthrough
case "firmware":
writeImg = func() error {
result, err := svc.WriteFirmware(context.Background(), img.Type, *buffer)
if err != nil {
return err
}
if result.Which() == paver.WriteFirmwareResultUnsupportedType {
syslog.Infof("img_writer: skipping unsupported firmware type %q", img.Type)
// Return nil here to skip unsupported types rather than failing.
// This lets us add new types in the future without breaking
// the update flow from older devices.
return nil
}
statusErr := zx.Status(result.Status)
if statusErr != zx.ErrOk {
return fmt.Errorf("%s", statusErr)
}
return nil
}
case "board":
return nil
default:
return fmt.Errorf("unrecognized image %q", img.Filename())
}
syslog.Infof("img_writer: writing %q from update package", img.Filename())
if err := writeImg(); err != nil {
return fmt.Errorf("img_writer: error writing %q: %q", img.Filename(), err)
}
syslog.Infof("img_writer: wrote %q successfully", img.Filename())
return nil
}
func bufferForFile(f *os.File) (*mem.Buffer, error) {
fio := syscall.FDIOForFD(int(f.Fd())).(*fdio.File)
if fio == nil {
return nil, fmt.Errorf("not fdio file")
}
status, buffer, err := fio.GetBuffer(fuchsiaio.VmoFlagRead)
if err != nil {
return nil, fmt.Errorf("GetBuffer fidl error: %q", err)
}
statusErr := zx.Status(status)
if statusErr != zx.ErrOk {
return nil, fmt.Errorf("GetBuffer error: %q", statusErr)
}
defer buffer.Vmo.Close()
// VMOs acquired over FIDL are not guaranteed to be resizable, so create a child VMO that is.
childVmo, err := buffer.Vmo.CreateChild(zx.VMOChildOptionCopyOnWrite|zx.VMOChildOptionResizable, 0, buffer.Size)
if err != nil {
return nil, err
}
return &mem.Buffer{
Vmo: childVmo,
Size: buffer.Size,
}, nil
}
// UpdateCurrentChannel persists the update channel info for a successful update
func UpdateCurrentChannel() error {
targetPath := "/misc/ota/target_channel.json"
contents, err := ioutil.ReadFile(targetPath)
if err != nil {
return fmt.Errorf("no target channel recorded in %v: %w", targetPath, err)
}
currentPath := "/misc/ota/current_channel.json"
partPath := currentPath + ".part"
f, err := os.Create(partPath)
if err != nil {
return fmt.Errorf("unable to write current channel to %v: %w", partPath, err)
}
defer f.Close()
buf := bytes.NewBuffer(contents)
_, err = buf.WriteTo(f)
if err != nil |
f.Sync()
f.Close()
if err := os.Rename(partPath, currentPath); err != nil {
return fmt.Errorf("error moving %v to %v: %w", partPath, currentPath, err)
}
return nil
}
| {
return fmt.Errorf("unable to write current channel to %v: %w", currentPath, err)
} | conditional_block |
processor.go | // Copyright 2018 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package system_updater
import (
"bufio"
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"syscall"
"syscall/zx"
"syscall/zx/fdio"
fuchsiaio "fidl/fuchsia/io"
"fidl/fuchsia/mem"
"fidl/fuchsia/paver"
"fidl/fuchsia/pkg"
"fuchsia.googlesource.com/component"
"fuchsia.googlesource.com/syslog"
)
// When this suffix is found in the "images" file, it indicates a typed image
// that looks for all matches within the update package.
const ImageTypeSuffix = "[_type]"
func ConnectToPackageResolver(ctx *component.Context) (*pkg.PackageResolverWithCtxInterface, error) {
req, pxy, err := pkg.NewPackageResolverWithCtxInterfaceRequest()
if err != nil {
syslog.Errorf("control interface could not be acquired: %s", err)
return nil, err
}
ctx.ConnectToEnvService(req)
return pxy, nil
}
func ConnectToPaver(ctx *component.Context) (*paver.DataSinkWithCtxInterface, *paver.BootManagerWithCtxInterface, error) {
req, pxy, err := paver.NewPaverWithCtxInterfaceRequest()
if err != nil {
syslog.Errorf("control interface could not be acquired: %s", err)
return nil, nil, err
}
defer pxy.Close()
ctx.ConnectToEnvService(req)
dataSinkReq, dataSinkPxy, err := paver.NewDataSinkWithCtxInterfaceRequest()
if err != nil {
syslog.Errorf("data sink interface could not be acquired: %s", err)
return nil, nil, err
}
err = pxy.FindDataSink(context.Background(), dataSinkReq)
if err != nil {
syslog.Errorf("could not find data sink: %s", err)
return nil, nil, err
}
| bootManagerReq, bootManagerPxy, err := paver.NewBootManagerWithCtxInterfaceRequest()
if err != nil {
syslog.Errorf("boot manager interface could not be acquired: %s", err)
return nil, nil, err
}
err = pxy.FindBootManager(context.Background(), bootManagerReq)
if err != nil {
syslog.Errorf("could not find boot manager: %s", err)
return nil, nil, err
}
return dataSinkPxy, bootManagerPxy, nil
}
// CacheUpdatePackage caches the requested, possibly merkle-pinned, update
// package URL and returns the pkgfs path to the package.
func CacheUpdatePackage(updateURL string, resolver *pkg.PackageResolverWithCtxInterface) (*UpdatePackage, error) {
dirPxy, err := resolvePackage(updateURL, resolver)
if err != nil {
return nil, err
}
pkg, err := NewUpdatePackage(dirPxy)
if err != nil {
return nil, err
}
merkle, err := pkg.Merkleroot()
if err != nil {
pkg.Close()
return nil, err
}
syslog.Infof("resolved %s as %s", updateURL, merkle)
return pkg, nil
}
// An image name and type string.
type Image struct {
// The base name of the image.
Name string
// A type string, default "".
Type string
}
// Returns an Image's filename in an update package.
//
// If a type is given, the filename in the package will be <name>_<type>, e.g.:
// name="foo", type="" -> "foo"
// name="foo", type="bar" -> "foo_bar"
func (i *Image) Filename() string {
if i.Type == "" {
return i.Name
}
return fmt.Sprintf("%s_%s", i.Name, i.Type)
}
func ParseRequirements(updatePkg *UpdatePackage) ([]string, []Image, error) {
// First, figure out which packages files we should parse
parseJson := true
pkgSrc, err := updatePkg.Open("packages.json")
// Fall back to line formatted packages file if packages.json not present
// Ideally, we'd fall back if specifically given the "file not found" error,
// though it's unclear which error that is (syscall.ENOENT did not work)
if err != nil {
syslog.Infof("parse_requirements: could not open packages.json, falling back to packages.")
parseJson = false
pkgSrc, err = updatePkg.Open("packages")
}
if err != nil {
return nil, nil, fmt.Errorf("error opening packages data file! %s", err)
}
defer pkgSrc.Close()
// Now that we know which packages file to parse, we can parse it.
pkgs := []string{}
if parseJson {
pkgs, err = ParsePackagesJson(pkgSrc)
} else {
pkgs, err = ParsePackagesLineFormatted(pkgSrc)
}
if err != nil {
return nil, nil, fmt.Errorf("failed to parse packages: %v", err)
}
// Finally, we parse images
imgSrc, err := os.Open(filepath.Join("/pkg", "data", "images"))
if err != nil {
return nil, nil, fmt.Errorf("error opening images data file! %s", err)
}
defer imgSrc.Close()
filenames, err := updatePkg.ListFiles()
if err != nil {
return nil, nil, fmt.Errorf("failed to list package files: %v", err)
}
imgs, err := ParseImages(imgSrc, filenames)
if err != nil {
return nil, nil, fmt.Errorf("failed to parse images: %v", err)
}
return pkgs, imgs, nil
}
// Packages deserializes the packages.json file in the system update package.
// NOTE: Fields must be exported for json decoding.
type packages struct {
Version intOrStr `json:"version"`
// A list of fully qualified URIs.
URIs []string `json:"content"`
}
type intOrStr int
// Enables us to support version as either a string or int.
func (i *intOrStr) UnmarshalJSON(b []byte) error {
var s string
if err := json.Unmarshal(b, &s); err == nil {
b = []byte(s)
}
return json.Unmarshal(b, (*int)(i))
}
func ParsePackagesJson(pkgSrc io.ReadCloser) ([]string, error) {
bytes, err := ioutil.ReadAll(pkgSrc)
if err != nil {
return nil, fmt.Errorf("failed to read packages.json with error: %v", err)
}
var packages packages
if err := json.Unmarshal(bytes, &packages); err != nil {
return nil, fmt.Errorf("failed to unmarshal packages.json: %v", err)
}
if packages.Version != 1 {
return nil, fmt.Errorf("unsupported version of packages.json: %v", packages.Version)
}
return packages.URIs, nil
}
func ParsePackagesLineFormatted(pkgSrc io.ReadCloser) ([]string, error) {
pkgs := []string{}
rdr := bufio.NewReader(pkgSrc)
for {
l, err := rdr.ReadString('\n')
s := strings.TrimSpace(l)
if (err == nil || err == io.EOF) && len(s) > 0 {
entry := strings.Split(s, "=")
if len(entry) != 2 {
return nil, fmt.Errorf("parser: entry format %q", s)
} else {
pkgURI := fmt.Sprintf("fuchsia-pkg://fuchsia.com/%s?hash=%s", entry[0], entry[1])
pkgs = append(pkgs, pkgURI)
}
}
if err != nil {
if err != io.EOF {
return nil, fmt.Errorf("parser: got error reading packages file %s", err)
}
break
}
}
return pkgs, nil
}
// Finds all images that match |basename| in |filenames|.
//
// A match is one of:
// <basename>
// <basename>_<type>
func FindTypedImages(basename string, filenames []string) []Image {
var images []Image
for _, name := range filenames {
if strings.HasPrefix(name, basename) {
suffix := name[len(basename):]
if len(suffix) == 0 {
// The base name alone indicates default type (empty string).
images = append(images, Image{Name: basename, Type: ""})
} else if suffix[0] == '_' {
images = append(images, Image{Name: basename, Type: suffix[1:]})
}
}
}
return images
}
// Returns a list of images derived from the "images" file.
//
// Untyped images (those without the [_type] suffix) are included in the return
// slice no matter what.
//
// Typed images, on the other hand, will only include matches that exist in
// |filenames|.
func ParseImages(imgSrc io.ReadCloser, filenames []string) ([]Image, error) {
rdr := bufio.NewReader(imgSrc)
imgs := []Image{}
for {
l, err := rdr.ReadString('\n')
s := strings.TrimSpace(l)
if (err == nil || err == io.EOF) && len(s) > 0 {
if strings.HasSuffix(s, ImageTypeSuffix) {
// Typed image: look for all matching images in the package.
basename := strings.TrimSuffix(s, ImageTypeSuffix)
imgs = append(imgs, FindTypedImages(basename, filenames)...)
} else {
imgs = append(imgs, Image{Name: s, Type: ""})
}
}
if err != nil {
if err != io.EOF {
return nil, fmt.Errorf("parser: got error reading images file %s", err)
}
break
}
}
return imgs, nil
}
// Types to deserialize the update-mode file. NOTE: Fields must be exported for json decoding.
// Expected form for update-mode file is:
// {
// "version": "1",
// "content": {
// "mode": "normal" / "force-recovery",
// }
// }
type updateModeFileContent struct {
Mode string `json:"mode"`
}
type updateModeFile struct {
Version string `json:"version"`
Content updateModeFileContent `json:"content"`
}
// Type to describe the supported update modes.
// Note: exporting since this will be used in main (to be consistent with the rest of the code).
type UpdateMode string
const (
UpdateModeNormal UpdateMode = "normal"
UpdateModeForceRecovery = "force-recovery"
)
// We define custom error wrappers so we can test the proper error is being returned.
type updateModeNotSupportedError UpdateMode
func (e updateModeNotSupportedError) Error() string {
return fmt.Sprintf("unsupported update mode: %s", string(e))
}
type jsonUnmarshalError struct {
err error
}
func (e jsonUnmarshalError) Error() string {
return fmt.Sprintf("failed to unmarshal update-mode: %v", e.err)
}
// Note: exporting since this will be used in main (to be consistent with the rest of the code).
func ParseUpdateMode(updatePkg *UpdatePackage) (UpdateMode, error) {
// Fall back to normal if the update-mode file does not exist.
// Ideally, we'd fall back if specifically given the "file not found" error,
// though it's unclear which error that is (syscall.ENOENT did not work).
modeSrc, err := updatePkg.Open("update-mode")
if err != nil {
syslog.Infof("parse_update_mode: could not open update-mode file, assuming normal system update flow.")
return UpdateModeNormal, nil
}
defer modeSrc.Close()
// Read the raw bytes.
b, err := ioutil.ReadAll(modeSrc)
if err != nil {
return "", fmt.Errorf("failed to read mode file: %w", err)
}
// Convert to json.
var updateModeFile updateModeFile
if err := json.Unmarshal(b, &updateModeFile); err != nil {
return "", jsonUnmarshalError{err}
}
// Confirm we support this mode.
mode := UpdateMode(updateModeFile.Content.Mode)
if mode != UpdateModeNormal && mode != UpdateModeForceRecovery {
return "", updateModeNotSupportedError(mode)
}
return mode, nil
}
func FetchPackages(pkgs []string, resolver *pkg.PackageResolverWithCtxInterface) error {
var errCount int
var firstErr error
for _, pkgURI := range pkgs {
if err := fetchPackage(pkgURI, resolver); err != nil {
syslog.Errorf("fetch error: %s", err)
errCount++
if firstErr == nil {
firstErr = err
}
}
}
if errCount > 0 {
syslog.Errorf("system update failed, %d packages had errors", errCount)
return firstErr
}
return nil
}
func fetchPackage(pkgURI string, resolver *pkg.PackageResolverWithCtxInterface) error {
dirPxy, err := resolvePackage(pkgURI, resolver)
if dirPxy != nil {
dirPxy.Close(context.Background())
}
return err
}
func resolvePackage(pkgURI string, resolver *pkg.PackageResolverWithCtxInterface) (*fuchsiaio.DirectoryWithCtxInterface, error) {
selectors := []string{}
updatePolicy := pkg.UpdatePolicy{}
dirReq, dirPxy, err := fuchsiaio.NewDirectoryWithCtxInterfaceRequest()
if err != nil {
return nil, err
}
syslog.Infof("requesting %s from update system", pkgURI)
status, err := resolver.Resolve(context.Background(), pkgURI, selectors, updatePolicy, dirReq)
if err != nil {
dirPxy.Close(context.Background())
return nil, fmt.Errorf("fetch: Resolve error: %s", err)
}
statusErr := zx.Status(status)
if statusErr != zx.ErrOk {
dirPxy.Close(context.Background())
return nil, fmt.Errorf("fetch: Resolve status: %s", statusErr)
}
return dirPxy, nil
}
func ValidateUpdatePackage(updatePkg *UpdatePackage) error {
actual, err := updatePkg.ReadFile("board")
if err == nil {
expected, err := ioutil.ReadFile("/config/build-info/board")
if err != nil {
return err
}
if !bytes.Equal(actual, expected) {
return fmt.Errorf("parser: expected board name %s found %s", expected, actual)
}
} else if !os.IsNotExist(err) {
return err
}
return nil
}
func ValidateImgs(imgs []Image, updatePkg *UpdatePackage, updateMode UpdateMode) error {
found := false
for _, img := range []string{"zbi", "zbi.signed"} {
if _, err := updatePkg.Stat(img); err == nil {
found = true
break
}
}
// Update package with normal mode should have a `zbi` or `zbi.signed`.
if updateMode == UpdateModeNormal && !found {
return fmt.Errorf("parser: missing 'zbi' or 'zbi.signed', this is required in normal update mode")
}
// Update package with force-recovery mode should NOT have a `zbi` nor `zbi.signed`.
if updateMode == UpdateModeForceRecovery && found {
return fmt.Errorf("parser: contains 'zbi' or 'zbi.signed', this is not allowed in force-recovery update mode")
}
return nil
}
func WriteImgs(dataSink *paver.DataSinkWithCtxInterface, bootManager *paver.BootManagerWithCtxInterface, imgs []Image, updatePkg *UpdatePackage, updateMode UpdateMode, skipRecovery bool) error {
if updateMode == UpdateModeForceRecovery && skipRecovery == true {
return fmt.Errorf("can't force recovery when skipping recovery image installation")
}
syslog.Infof("Writing images %+v from update package", imgs)
activeConfig, err := queryActiveConfig(bootManager)
if err != nil {
return fmt.Errorf("querying target config: %v", err)
}
// If we have an active config (and thus support ABR), compute the
// target config. Otherwise set the target config to nil so we fall
// back to the legacy behavior where we write to the A partition, and
// attempt to write to the B partition.
var targetConfig *paver.Configuration
if activeConfig == nil {
targetConfig = nil
} else {
targetConfig, err = calculateTargetConfig(*activeConfig)
if err != nil {
return err
}
}
for _, img := range imgs {
if err := writeImg(dataSink, img, updatePkg, targetConfig, skipRecovery); err != nil {
return err
}
}
if updateMode == UpdateModeNormal && targetConfig != nil {
if err := setConfigurationActive(bootManager, *targetConfig); err != nil {
return err
}
} else if updateMode == UpdateModeForceRecovery {
for _, config := range []paver.Configuration{paver.ConfigurationA, paver.ConfigurationB} {
if err := setConfigurationUnbootable(bootManager, config); err != nil {
return fmt.Errorf("failed to set configuration unbootable: %v", err)
}
}
}
if err = flushDataSink(dataSink); err != nil {
return fmt.Errorf("img_writer: failed to flush data sink. %v", err)
}
if targetConfig != nil {
if err = flushBootManager(bootManager); err != nil {
return fmt.Errorf("img_writer: failed to flush boot manager. %v", err)
}
}
return nil
}
func flushDataSink(dataSink *paver.DataSinkWithCtxInterface) error {
status, err := dataSink.Flush(context.Background())
if err != nil {
return err
}
if statusErr := zx.Status(status); statusErr != zx.ErrOk {
return &zx.Error{Status: statusErr}
}
return nil
}
func flushBootManager(bootManager *paver.BootManagerWithCtxInterface) error {
status, err := bootManager.Flush(context.Background())
if err != nil {
return err
}
if statusErr := zx.Status(status); statusErr != zx.ErrOk {
return &zx.Error{Status: statusErr}
}
return nil
}
// queryActiveConfig asks the boot manager what partition the device booted
// from. If the device does not support ABR, it returns nil as the
// configuration.
func queryActiveConfig(bootManager *paver.BootManagerWithCtxInterface) (*paver.Configuration, error) {
activeConfig, err := bootManager.QueryActiveConfiguration(context.Background())
if err != nil {
// FIXME(fxb/43577): If the paver service runs into a problem
// creating a boot manager, it will close the channel with an
// epitaph. The error we are particularly interested in is
// whether or not the current device supports ABR.
// Unfortunately the go fidl bindings do not support epitaphs,
// so we can't actually check for this error condition. All we
// can observe is that the channel has been closed, so treat
// this condition as the device does not support ABR.
if err, ok := err.(*zx.Error); ok && err.Status == zx.ErrPeerClosed {
syslog.Warnf("img_writer: boot manager channel closed, assuming device does not support ABR")
return nil, nil
}
return nil, fmt.Errorf("querying active config: %v", err)
}
if activeConfig.Which() == paver.BootManagerQueryActiveConfigurationResultResponse {
syslog.Infof("img_writer: device supports ABR")
return &activeConfig.Response.Configuration, nil
}
statusErr := zx.Status(activeConfig.Err)
if statusErr == zx.ErrNotSupported {
// this device doesn't support ABR, so fall back to the
// legacy workflow.
syslog.Infof("img_writer: device does not support ABR")
return nil, nil
}
return nil, &zx.Error{Status: statusErr}
}
func calculateTargetConfig(activeConfig paver.Configuration) (*paver.Configuration, error) {
var config paver.Configuration
switch activeConfig {
case paver.ConfigurationA:
config = paver.ConfigurationB
case paver.ConfigurationB:
config = paver.ConfigurationA
case paver.ConfigurationRecovery:
syslog.Warnf("img_writer: configured for recovery, using partition A instead")
config = paver.ConfigurationA
default:
return nil, fmt.Errorf("img_writer: unknown config: %s", activeConfig)
}
syslog.Infof("img_writer: writing to configuration %s", config)
return &config, nil
}
func setConfigurationActive(bootManager *paver.BootManagerWithCtxInterface, targetConfig paver.Configuration) error {
syslog.Infof("img_writer: setting configuration %s active", targetConfig)
status, err := bootManager.SetConfigurationActive(context.Background(), targetConfig)
if err != nil {
return err
}
statusErr := zx.Status(status)
if statusErr != zx.ErrOk {
return &zx.Error{Status: statusErr}
}
return nil
}
func setConfigurationUnbootable(bootManager *paver.BootManagerWithCtxInterface, targetConfig paver.Configuration) error {
syslog.Infof("img_writer: setting configuration %s unbootable", targetConfig)
status, err := bootManager.SetConfigurationUnbootable(context.Background(), targetConfig)
if err != nil {
return err
}
statusErr := zx.Status(status)
if statusErr != zx.ErrOk {
return &zx.Error{Status: statusErr}
}
return nil
}
func writeAsset(svc *paver.DataSinkWithCtxInterface, configuration paver.Configuration, asset paver.Asset, payload *mem.Buffer) error {
syslog.Infof("img_writer: writing asset %q to %q", asset, configuration)
status, err := svc.WriteAsset(context.Background(), configuration, asset, *payload)
if err != nil {
syslog.Errorf("img_writer: failed to write asset %q: %s", asset, err)
return err
}
statusErr := zx.Status(status)
if statusErr != zx.ErrOk {
return &zx.Error{Status: statusErr}
}
return nil
}
func writeImg(svc *paver.DataSinkWithCtxInterface, img Image, updatePkg *UpdatePackage, targetConfig *paver.Configuration, skipRecovery bool) error {
f, err := updatePkg.Open(img.Filename())
if err != nil {
syslog.Warnf("img_writer: %q image not found, skipping", img.Filename())
return nil
}
if fi, err := f.Stat(); err != nil || fi.Size() == 0 {
syslog.Warnf("img_writer: %q zero length, skipping", img.Filename())
return nil
}
defer f.Close()
buffer, err := bufferForFile(f)
if err != nil {
return fmt.Errorf("img_writer: while getting vmo for %q: %q", img.Filename(), err)
}
defer buffer.Vmo.Close()
var writeImg func() error
switch img.Name {
case "zbi", "zbi.signed":
childVmo, err := buffer.Vmo.CreateChild(zx.VMOChildOptionCopyOnWrite|zx.VMOChildOptionResizable, 0, buffer.Size)
if err != nil {
return fmt.Errorf("img_writer: while getting vmo for %q: %q", img.Filename(), err)
}
buffer2 := &mem.Buffer{
Vmo: childVmo,
Size: buffer.Size,
}
defer buffer2.Vmo.Close()
if targetConfig == nil {
// device does not support ABR, so write the ZBI to the
// A partition. We also try to write to the B partition
// in order to be forwards compatible with devices that
// will eventually support ABR, but we ignore errors
// because some devices won't have a B partition.
writeImg = func() error {
if err := writeAsset(svc, paver.ConfigurationA, paver.AssetKernel, buffer); err != nil {
return err
}
if err := writeAsset(svc, paver.ConfigurationB, paver.AssetKernel, buffer2); err != nil {
asZxErr, ok := err.(*zx.Error)
if ok && asZxErr.Status == zx.ErrNotSupported {
syslog.Warnf("img_writer: skipping writing %q to B: %v", img.Filename(), err)
} else {
return err
}
}
return nil
}
} else {
// device supports ABR, so only write the ZB to the
// target partition.
writeImg = func() error {
return writeAsset(svc, *targetConfig, paver.AssetKernel, buffer)
}
}
case "fuchsia.vbmeta":
childVmo, err := buffer.Vmo.CreateChild(zx.VMOChildOptionCopyOnWrite|zx.VMOChildOptionResizable, 0, buffer.Size)
if err != nil {
return fmt.Errorf("img_writer: while getting vmo for %q: %q", img.Filename(), err)
}
buffer2 := &mem.Buffer{
Vmo: childVmo,
Size: buffer.Size,
}
defer buffer2.Vmo.Close()
if targetConfig == nil {
// device does not support ABR, so write vbmeta to the
// A partition, and try to write to the B partiton. See
// the comment in the zbi case for more details.
if err := writeAsset(svc, paver.ConfigurationA,
paver.AssetVerifiedBootMetadata, buffer); err != nil {
return err
}
return writeAsset(svc, paver.ConfigurationB, paver.AssetVerifiedBootMetadata, buffer2)
} else {
// device supports ABR, so write the vbmeta to the
// target partition.
writeImg = func() error {
return writeAsset(svc, *targetConfig, paver.AssetVerifiedBootMetadata, buffer2)
}
}
case "zedboot", "zedboot.signed":
if skipRecovery {
return nil
} else {
writeImg = func() error {
return writeAsset(svc, paver.ConfigurationRecovery, paver.AssetKernel, buffer)
}
}
case "recovery.vbmeta":
if skipRecovery {
return nil
} else {
writeImg = func() error {
return writeAsset(svc, paver.ConfigurationRecovery, paver.AssetVerifiedBootMetadata, buffer)
}
}
case "bootloader":
// Keep support for update packages still using the older "bootloader"
// file, which is handled identically to "firmware" but without type
// support so img.Type will always be "".
fallthrough
case "firmware":
writeImg = func() error {
result, err := svc.WriteFirmware(context.Background(), img.Type, *buffer)
if err != nil {
return err
}
if result.Which() == paver.WriteFirmwareResultUnsupportedType {
syslog.Infof("img_writer: skipping unsupported firmware type %q", img.Type)
// Return nil here to skip unsupported types rather than failing.
// This lets us add new types in the future without breaking
// the update flow from older devices.
return nil
}
statusErr := zx.Status(result.Status)
if statusErr != zx.ErrOk {
return fmt.Errorf("%s", statusErr)
}
return nil
}
case "board":
return nil
default:
return fmt.Errorf("unrecognized image %q", img.Filename())
}
syslog.Infof("img_writer: writing %q from update package", img.Filename())
if err := writeImg(); err != nil {
return fmt.Errorf("img_writer: error writing %q: %q", img.Filename(), err)
}
syslog.Infof("img_writer: wrote %q successfully", img.Filename())
return nil
}
func bufferForFile(f *os.File) (*mem.Buffer, error) {
fio := syscall.FDIOForFD(int(f.Fd())).(*fdio.File)
if fio == nil {
return nil, fmt.Errorf("not fdio file")
}
status, buffer, err := fio.GetBuffer(fuchsiaio.VmoFlagRead)
if err != nil {
return nil, fmt.Errorf("GetBuffer fidl error: %q", err)
}
statusErr := zx.Status(status)
if statusErr != zx.ErrOk {
return nil, fmt.Errorf("GetBuffer error: %q", statusErr)
}
defer buffer.Vmo.Close()
// VMOs acquired over FIDL are not guaranteed to be resizable, so create a child VMO that is.
childVmo, err := buffer.Vmo.CreateChild(zx.VMOChildOptionCopyOnWrite|zx.VMOChildOptionResizable, 0, buffer.Size)
if err != nil {
return nil, err
}
return &mem.Buffer{
Vmo: childVmo,
Size: buffer.Size,
}, nil
}
// UpdateCurrentChannel persists the update channel info for a successful update
func UpdateCurrentChannel() error {
targetPath := "/misc/ota/target_channel.json"
contents, err := ioutil.ReadFile(targetPath)
if err != nil {
return fmt.Errorf("no target channel recorded in %v: %w", targetPath, err)
}
currentPath := "/misc/ota/current_channel.json"
partPath := currentPath + ".part"
f, err := os.Create(partPath)
if err != nil {
return fmt.Errorf("unable to write current channel to %v: %w", partPath, err)
}
defer f.Close()
buf := bytes.NewBuffer(contents)
_, err = buf.WriteTo(f)
if err != nil {
return fmt.Errorf("unable to write current channel to %v: %w", currentPath, err)
}
f.Sync()
f.Close()
if err := os.Rename(partPath, currentPath); err != nil {
return fmt.Errorf("error moving %v to %v: %w", partPath, currentPath, err)
}
return nil
} | random_line_split | |
processor.go | // Copyright 2018 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package system_updater
import (
"bufio"
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"syscall"
"syscall/zx"
"syscall/zx/fdio"
fuchsiaio "fidl/fuchsia/io"
"fidl/fuchsia/mem"
"fidl/fuchsia/paver"
"fidl/fuchsia/pkg"
"fuchsia.googlesource.com/component"
"fuchsia.googlesource.com/syslog"
)
// When this suffix is found in the "images" file, it indicates a typed image
// that looks for all matches within the update package.
const ImageTypeSuffix = "[_type]"
func ConnectToPackageResolver(ctx *component.Context) (*pkg.PackageResolverWithCtxInterface, error) {
req, pxy, err := pkg.NewPackageResolverWithCtxInterfaceRequest()
if err != nil {
syslog.Errorf("control interface could not be acquired: %s", err)
return nil, err
}
ctx.ConnectToEnvService(req)
return pxy, nil
}
func ConnectToPaver(ctx *component.Context) (*paver.DataSinkWithCtxInterface, *paver.BootManagerWithCtxInterface, error) {
req, pxy, err := paver.NewPaverWithCtxInterfaceRequest()
if err != nil {
syslog.Errorf("control interface could not be acquired: %s", err)
return nil, nil, err
}
defer pxy.Close()
ctx.ConnectToEnvService(req)
dataSinkReq, dataSinkPxy, err := paver.NewDataSinkWithCtxInterfaceRequest()
if err != nil {
syslog.Errorf("data sink interface could not be acquired: %s", err)
return nil, nil, err
}
err = pxy.FindDataSink(context.Background(), dataSinkReq)
if err != nil {
syslog.Errorf("could not find data sink: %s", err)
return nil, nil, err
}
bootManagerReq, bootManagerPxy, err := paver.NewBootManagerWithCtxInterfaceRequest()
if err != nil {
syslog.Errorf("boot manager interface could not be acquired: %s", err)
return nil, nil, err
}
err = pxy.FindBootManager(context.Background(), bootManagerReq)
if err != nil {
syslog.Errorf("could not find boot manager: %s", err)
return nil, nil, err
}
return dataSinkPxy, bootManagerPxy, nil
}
// CacheUpdatePackage caches the requested, possibly merkle-pinned, update
// package URL and returns the pkgfs path to the package.
func CacheUpdatePackage(updateURL string, resolver *pkg.PackageResolverWithCtxInterface) (*UpdatePackage, error) {
dirPxy, err := resolvePackage(updateURL, resolver)
if err != nil {
return nil, err
}
pkg, err := NewUpdatePackage(dirPxy)
if err != nil {
return nil, err
}
merkle, err := pkg.Merkleroot()
if err != nil {
pkg.Close()
return nil, err
}
syslog.Infof("resolved %s as %s", updateURL, merkle)
return pkg, nil
}
// An image name and type string.
type Image struct {
// The base name of the image.
Name string
// A type string, default "".
Type string
}
// Returns an Image's filename in an update package.
//
// If a type is given, the filename in the package will be <name>_<type>, e.g.:
// name="foo", type="" -> "foo"
// name="foo", type="bar" -> "foo_bar"
func (i *Image) Filename() string {
if i.Type == "" {
return i.Name
}
return fmt.Sprintf("%s_%s", i.Name, i.Type)
}
func ParseRequirements(updatePkg *UpdatePackage) ([]string, []Image, error) {
// First, figure out which packages files we should parse
parseJson := true
pkgSrc, err := updatePkg.Open("packages.json")
// Fall back to line formatted packages file if packages.json not present
// Ideally, we'd fall back if specifically given the "file not found" error,
// though it's unclear which error that is (syscall.ENOENT did not work)
if err != nil {
syslog.Infof("parse_requirements: could not open packages.json, falling back to packages.")
parseJson = false
pkgSrc, err = updatePkg.Open("packages")
}
if err != nil {
return nil, nil, fmt.Errorf("error opening packages data file! %s", err)
}
defer pkgSrc.Close()
// Now that we know which packages file to parse, we can parse it.
pkgs := []string{}
if parseJson {
pkgs, err = ParsePackagesJson(pkgSrc)
} else {
pkgs, err = ParsePackagesLineFormatted(pkgSrc)
}
if err != nil {
return nil, nil, fmt.Errorf("failed to parse packages: %v", err)
}
// Finally, we parse images
imgSrc, err := os.Open(filepath.Join("/pkg", "data", "images"))
if err != nil {
return nil, nil, fmt.Errorf("error opening images data file! %s", err)
}
defer imgSrc.Close()
filenames, err := updatePkg.ListFiles()
if err != nil {
return nil, nil, fmt.Errorf("failed to list package files: %v", err)
}
imgs, err := ParseImages(imgSrc, filenames)
if err != nil {
return nil, nil, fmt.Errorf("failed to parse images: %v", err)
}
return pkgs, imgs, nil
}
// Packages deserializes the packages.json file in the system update package.
// NOTE: Fields must be exported for json decoding.
type packages struct {
Version intOrStr `json:"version"`
// A list of fully qualified URIs.
URIs []string `json:"content"`
}
type intOrStr int
// Enables us to support version as either a string or int.
func (i *intOrStr) UnmarshalJSON(b []byte) error {
var s string
if err := json.Unmarshal(b, &s); err == nil {
b = []byte(s)
}
return json.Unmarshal(b, (*int)(i))
}
func ParsePackagesJson(pkgSrc io.ReadCloser) ([]string, error) {
bytes, err := ioutil.ReadAll(pkgSrc)
if err != nil {
return nil, fmt.Errorf("failed to read packages.json with error: %v", err)
}
var packages packages
if err := json.Unmarshal(bytes, &packages); err != nil {
return nil, fmt.Errorf("failed to unmarshal packages.json: %v", err)
}
if packages.Version != 1 {
return nil, fmt.Errorf("unsupported version of packages.json: %v", packages.Version)
}
return packages.URIs, nil
}
func ParsePackagesLineFormatted(pkgSrc io.ReadCloser) ([]string, error) {
pkgs := []string{}
rdr := bufio.NewReader(pkgSrc)
for {
l, err := rdr.ReadString('\n')
s := strings.TrimSpace(l)
if (err == nil || err == io.EOF) && len(s) > 0 {
entry := strings.Split(s, "=")
if len(entry) != 2 {
return nil, fmt.Errorf("parser: entry format %q", s)
} else {
pkgURI := fmt.Sprintf("fuchsia-pkg://fuchsia.com/%s?hash=%s", entry[0], entry[1])
pkgs = append(pkgs, pkgURI)
}
}
if err != nil {
if err != io.EOF {
return nil, fmt.Errorf("parser: got error reading packages file %s", err)
}
break
}
}
return pkgs, nil
}
// Finds all images that match |basename| in |filenames|.
//
// A match is one of:
// <basename>
// <basename>_<type>
func FindTypedImages(basename string, filenames []string) []Image {
var images []Image
for _, name := range filenames {
if strings.HasPrefix(name, basename) {
suffix := name[len(basename):]
if len(suffix) == 0 {
// The base name alone indicates default type (empty string).
images = append(images, Image{Name: basename, Type: ""})
} else if suffix[0] == '_' {
images = append(images, Image{Name: basename, Type: suffix[1:]})
}
}
}
return images
}
// Returns a list of images derived from the "images" file.
//
// Untyped images (those without the [_type] suffix) are included in the return
// slice no matter what.
//
// Typed images, on the other hand, will only include matches that exist in
// |filenames|.
func ParseImages(imgSrc io.ReadCloser, filenames []string) ([]Image, error) {
rdr := bufio.NewReader(imgSrc)
imgs := []Image{}
for {
l, err := rdr.ReadString('\n')
s := strings.TrimSpace(l)
if (err == nil || err == io.EOF) && len(s) > 0 {
if strings.HasSuffix(s, ImageTypeSuffix) {
// Typed image: look for all matching images in the package.
basename := strings.TrimSuffix(s, ImageTypeSuffix)
imgs = append(imgs, FindTypedImages(basename, filenames)...)
} else {
imgs = append(imgs, Image{Name: s, Type: ""})
}
}
if err != nil {
if err != io.EOF {
return nil, fmt.Errorf("parser: got error reading images file %s", err)
}
break
}
}
return imgs, nil
}
// Types to deserialize the update-mode file. NOTE: Fields must be exported for json decoding.
// Expected form for update-mode file is:
// {
// "version": "1",
// "content": {
// "mode": "normal" / "force-recovery",
// }
// }
type updateModeFileContent struct {
Mode string `json:"mode"`
}
type updateModeFile struct {
Version string `json:"version"`
Content updateModeFileContent `json:"content"`
}
// Type to describe the supported update modes.
// Note: exporting since this will be used in main (to be consistent with the rest of the code).
type UpdateMode string
const (
UpdateModeNormal UpdateMode = "normal"
UpdateModeForceRecovery = "force-recovery"
)
// We define custom error wrappers so we can test the proper error is being returned.
type updateModeNotSupportedError UpdateMode
func (e updateModeNotSupportedError) Error() string {
return fmt.Sprintf("unsupported update mode: %s", string(e))
}
type jsonUnmarshalError struct {
err error
}
func (e jsonUnmarshalError) Error() string {
return fmt.Sprintf("failed to unmarshal update-mode: %v", e.err)
}
// Note: exporting since this will be used in main (to be consistent with the rest of the code).
func ParseUpdateMode(updatePkg *UpdatePackage) (UpdateMode, error) {
// Fall back to normal if the update-mode file does not exist.
// Ideally, we'd fall back if specifically given the "file not found" error,
// though it's unclear which error that is (syscall.ENOENT did not work).
modeSrc, err := updatePkg.Open("update-mode")
if err != nil {
syslog.Infof("parse_update_mode: could not open update-mode file, assuming normal system update flow.")
return UpdateModeNormal, nil
}
defer modeSrc.Close()
// Read the raw bytes.
b, err := ioutil.ReadAll(modeSrc)
if err != nil {
return "", fmt.Errorf("failed to read mode file: %w", err)
}
// Convert to json.
var updateModeFile updateModeFile
if err := json.Unmarshal(b, &updateModeFile); err != nil {
return "", jsonUnmarshalError{err}
}
// Confirm we support this mode.
mode := UpdateMode(updateModeFile.Content.Mode)
if mode != UpdateModeNormal && mode != UpdateModeForceRecovery {
return "", updateModeNotSupportedError(mode)
}
return mode, nil
}
func FetchPackages(pkgs []string, resolver *pkg.PackageResolverWithCtxInterface) error {
var errCount int
var firstErr error
for _, pkgURI := range pkgs {
if err := fetchPackage(pkgURI, resolver); err != nil {
syslog.Errorf("fetch error: %s", err)
errCount++
if firstErr == nil {
firstErr = err
}
}
}
if errCount > 0 {
syslog.Errorf("system update failed, %d packages had errors", errCount)
return firstErr
}
return nil
}
func fetchPackage(pkgURI string, resolver *pkg.PackageResolverWithCtxInterface) error {
dirPxy, err := resolvePackage(pkgURI, resolver)
if dirPxy != nil {
dirPxy.Close(context.Background())
}
return err
}
func resolvePackage(pkgURI string, resolver *pkg.PackageResolverWithCtxInterface) (*fuchsiaio.DirectoryWithCtxInterface, error) {
selectors := []string{}
updatePolicy := pkg.UpdatePolicy{}
dirReq, dirPxy, err := fuchsiaio.NewDirectoryWithCtxInterfaceRequest()
if err != nil {
return nil, err
}
syslog.Infof("requesting %s from update system", pkgURI)
status, err := resolver.Resolve(context.Background(), pkgURI, selectors, updatePolicy, dirReq)
if err != nil {
dirPxy.Close(context.Background())
return nil, fmt.Errorf("fetch: Resolve error: %s", err)
}
statusErr := zx.Status(status)
if statusErr != zx.ErrOk {
dirPxy.Close(context.Background())
return nil, fmt.Errorf("fetch: Resolve status: %s", statusErr)
}
return dirPxy, nil
}
func ValidateUpdatePackage(updatePkg *UpdatePackage) error {
actual, err := updatePkg.ReadFile("board")
if err == nil {
expected, err := ioutil.ReadFile("/config/build-info/board")
if err != nil {
return err
}
if !bytes.Equal(actual, expected) {
return fmt.Errorf("parser: expected board name %s found %s", expected, actual)
}
} else if !os.IsNotExist(err) {
return err
}
return nil
}
func ValidateImgs(imgs []Image, updatePkg *UpdatePackage, updateMode UpdateMode) error {
found := false
for _, img := range []string{"zbi", "zbi.signed"} {
if _, err := updatePkg.Stat(img); err == nil {
found = true
break
}
}
// Update package with normal mode should have a `zbi` or `zbi.signed`.
if updateMode == UpdateModeNormal && !found {
return fmt.Errorf("parser: missing 'zbi' or 'zbi.signed', this is required in normal update mode")
}
// Update package with force-recovery mode should NOT have a `zbi` nor `zbi.signed`.
if updateMode == UpdateModeForceRecovery && found {
return fmt.Errorf("parser: contains 'zbi' or 'zbi.signed', this is not allowed in force-recovery update mode")
}
return nil
}
func WriteImgs(dataSink *paver.DataSinkWithCtxInterface, bootManager *paver.BootManagerWithCtxInterface, imgs []Image, updatePkg *UpdatePackage, updateMode UpdateMode, skipRecovery bool) error {
if updateMode == UpdateModeForceRecovery && skipRecovery == true {
return fmt.Errorf("can't force recovery when skipping recovery image installation")
}
syslog.Infof("Writing images %+v from update package", imgs)
activeConfig, err := queryActiveConfig(bootManager)
if err != nil {
return fmt.Errorf("querying target config: %v", err)
}
// If we have an active config (and thus support ABR), compute the
// target config. Otherwise set the target config to nil so we fall
// back to the legacy behavior where we write to the A partition, and
// attempt to write to the B partition.
var targetConfig *paver.Configuration
if activeConfig == nil {
targetConfig = nil
} else {
targetConfig, err = calculateTargetConfig(*activeConfig)
if err != nil {
return err
}
}
for _, img := range imgs {
if err := writeImg(dataSink, img, updatePkg, targetConfig, skipRecovery); err != nil {
return err
}
}
if updateMode == UpdateModeNormal && targetConfig != nil {
if err := setConfigurationActive(bootManager, *targetConfig); err != nil {
return err
}
} else if updateMode == UpdateModeForceRecovery {
for _, config := range []paver.Configuration{paver.ConfigurationA, paver.ConfigurationB} {
if err := setConfigurationUnbootable(bootManager, config); err != nil {
return fmt.Errorf("failed to set configuration unbootable: %v", err)
}
}
}
if err = flushDataSink(dataSink); err != nil {
return fmt.Errorf("img_writer: failed to flush data sink. %v", err)
}
if targetConfig != nil {
if err = flushBootManager(bootManager); err != nil {
return fmt.Errorf("img_writer: failed to flush boot manager. %v", err)
}
}
return nil
}
func flushDataSink(dataSink *paver.DataSinkWithCtxInterface) error {
status, err := dataSink.Flush(context.Background())
if err != nil {
return err
}
if statusErr := zx.Status(status); statusErr != zx.ErrOk {
return &zx.Error{Status: statusErr}
}
return nil
}
func flushBootManager(bootManager *paver.BootManagerWithCtxInterface) error {
status, err := bootManager.Flush(context.Background())
if err != nil {
return err
}
if statusErr := zx.Status(status); statusErr != zx.ErrOk {
return &zx.Error{Status: statusErr}
}
return nil
}
// queryActiveConfig asks the boot manager what partition the device booted
// from. If the device does not support ABR, it returns nil as the
// configuration.
func queryActiveConfig(bootManager *paver.BootManagerWithCtxInterface) (*paver.Configuration, error) {
activeConfig, err := bootManager.QueryActiveConfiguration(context.Background())
if err != nil {
// FIXME(fxb/43577): If the paver service runs into a problem
// creating a boot manager, it will close the channel with an
// epitaph. The error we are particularly interested in is
// whether or not the current device supports ABR.
// Unfortunately the go fidl bindings do not support epitaphs,
// so we can't actually check for this error condition. All we
// can observe is that the channel has been closed, so treat
// this condition as the device does not support ABR.
if err, ok := err.(*zx.Error); ok && err.Status == zx.ErrPeerClosed {
syslog.Warnf("img_writer: boot manager channel closed, assuming device does not support ABR")
return nil, nil
}
return nil, fmt.Errorf("querying active config: %v", err)
}
if activeConfig.Which() == paver.BootManagerQueryActiveConfigurationResultResponse {
syslog.Infof("img_writer: device supports ABR")
return &activeConfig.Response.Configuration, nil
}
statusErr := zx.Status(activeConfig.Err)
if statusErr == zx.ErrNotSupported {
// this device doesn't support ABR, so fall back to the
// legacy workflow.
syslog.Infof("img_writer: device does not support ABR")
return nil, nil
}
return nil, &zx.Error{Status: statusErr}
}
func calculateTargetConfig(activeConfig paver.Configuration) (*paver.Configuration, error) |
func setConfigurationActive(bootManager *paver.BootManagerWithCtxInterface, targetConfig paver.Configuration) error {
syslog.Infof("img_writer: setting configuration %s active", targetConfig)
status, err := bootManager.SetConfigurationActive(context.Background(), targetConfig)
if err != nil {
return err
}
statusErr := zx.Status(status)
if statusErr != zx.ErrOk {
return &zx.Error{Status: statusErr}
}
return nil
}
func setConfigurationUnbootable(bootManager *paver.BootManagerWithCtxInterface, targetConfig paver.Configuration) error {
syslog.Infof("img_writer: setting configuration %s unbootable", targetConfig)
status, err := bootManager.SetConfigurationUnbootable(context.Background(), targetConfig)
if err != nil {
return err
}
statusErr := zx.Status(status)
if statusErr != zx.ErrOk {
return &zx.Error{Status: statusErr}
}
return nil
}
func writeAsset(svc *paver.DataSinkWithCtxInterface, configuration paver.Configuration, asset paver.Asset, payload *mem.Buffer) error {
syslog.Infof("img_writer: writing asset %q to %q", asset, configuration)
status, err := svc.WriteAsset(context.Background(), configuration, asset, *payload)
if err != nil {
syslog.Errorf("img_writer: failed to write asset %q: %s", asset, err)
return err
}
statusErr := zx.Status(status)
if statusErr != zx.ErrOk {
return &zx.Error{Status: statusErr}
}
return nil
}
func writeImg(svc *paver.DataSinkWithCtxInterface, img Image, updatePkg *UpdatePackage, targetConfig *paver.Configuration, skipRecovery bool) error {
f, err := updatePkg.Open(img.Filename())
if err != nil {
syslog.Warnf("img_writer: %q image not found, skipping", img.Filename())
return nil
}
if fi, err := f.Stat(); err != nil || fi.Size() == 0 {
syslog.Warnf("img_writer: %q zero length, skipping", img.Filename())
return nil
}
defer f.Close()
buffer, err := bufferForFile(f)
if err != nil {
return fmt.Errorf("img_writer: while getting vmo for %q: %q", img.Filename(), err)
}
defer buffer.Vmo.Close()
var writeImg func() error
switch img.Name {
case "zbi", "zbi.signed":
childVmo, err := buffer.Vmo.CreateChild(zx.VMOChildOptionCopyOnWrite|zx.VMOChildOptionResizable, 0, buffer.Size)
if err != nil {
return fmt.Errorf("img_writer: while getting vmo for %q: %q", img.Filename(), err)
}
buffer2 := &mem.Buffer{
Vmo: childVmo,
Size: buffer.Size,
}
defer buffer2.Vmo.Close()
if targetConfig == nil {
// device does not support ABR, so write the ZBI to the
// A partition. We also try to write to the B partition
// in order to be forwards compatible with devices that
// will eventually support ABR, but we ignore errors
// because some devices won't have a B partition.
writeImg = func() error {
if err := writeAsset(svc, paver.ConfigurationA, paver.AssetKernel, buffer); err != nil {
return err
}
if err := writeAsset(svc, paver.ConfigurationB, paver.AssetKernel, buffer2); err != nil {
asZxErr, ok := err.(*zx.Error)
if ok && asZxErr.Status == zx.ErrNotSupported {
syslog.Warnf("img_writer: skipping writing %q to B: %v", img.Filename(), err)
} else {
return err
}
}
return nil
}
} else {
// device supports ABR, so only write the ZB to the
// target partition.
writeImg = func() error {
return writeAsset(svc, *targetConfig, paver.AssetKernel, buffer)
}
}
case "fuchsia.vbmeta":
childVmo, err := buffer.Vmo.CreateChild(zx.VMOChildOptionCopyOnWrite|zx.VMOChildOptionResizable, 0, buffer.Size)
if err != nil {
return fmt.Errorf("img_writer: while getting vmo for %q: %q", img.Filename(), err)
}
buffer2 := &mem.Buffer{
Vmo: childVmo,
Size: buffer.Size,
}
defer buffer2.Vmo.Close()
if targetConfig == nil {
// device does not support ABR, so write vbmeta to the
// A partition, and try to write to the B partiton. See
// the comment in the zbi case for more details.
if err := writeAsset(svc, paver.ConfigurationA,
paver.AssetVerifiedBootMetadata, buffer); err != nil {
return err
}
return writeAsset(svc, paver.ConfigurationB, paver.AssetVerifiedBootMetadata, buffer2)
} else {
// device supports ABR, so write the vbmeta to the
// target partition.
writeImg = func() error {
return writeAsset(svc, *targetConfig, paver.AssetVerifiedBootMetadata, buffer2)
}
}
case "zedboot", "zedboot.signed":
if skipRecovery {
return nil
} else {
writeImg = func() error {
return writeAsset(svc, paver.ConfigurationRecovery, paver.AssetKernel, buffer)
}
}
case "recovery.vbmeta":
if skipRecovery {
return nil
} else {
writeImg = func() error {
return writeAsset(svc, paver.ConfigurationRecovery, paver.AssetVerifiedBootMetadata, buffer)
}
}
case "bootloader":
// Keep support for update packages still using the older "bootloader"
// file, which is handled identically to "firmware" but without type
// support so img.Type will always be "".
fallthrough
case "firmware":
writeImg = func() error {
result, err := svc.WriteFirmware(context.Background(), img.Type, *buffer)
if err != nil {
return err
}
if result.Which() == paver.WriteFirmwareResultUnsupportedType {
syslog.Infof("img_writer: skipping unsupported firmware type %q", img.Type)
// Return nil here to skip unsupported types rather than failing.
// This lets us add new types in the future without breaking
// the update flow from older devices.
return nil
}
statusErr := zx.Status(result.Status)
if statusErr != zx.ErrOk {
return fmt.Errorf("%s", statusErr)
}
return nil
}
case "board":
return nil
default:
return fmt.Errorf("unrecognized image %q", img.Filename())
}
syslog.Infof("img_writer: writing %q from update package", img.Filename())
if err := writeImg(); err != nil {
return fmt.Errorf("img_writer: error writing %q: %q", img.Filename(), err)
}
syslog.Infof("img_writer: wrote %q successfully", img.Filename())
return nil
}
func bufferForFile(f *os.File) (*mem.Buffer, error) {
fio := syscall.FDIOForFD(int(f.Fd())).(*fdio.File)
if fio == nil {
return nil, fmt.Errorf("not fdio file")
}
status, buffer, err := fio.GetBuffer(fuchsiaio.VmoFlagRead)
if err != nil {
return nil, fmt.Errorf("GetBuffer fidl error: %q", err)
}
statusErr := zx.Status(status)
if statusErr != zx.ErrOk {
return nil, fmt.Errorf("GetBuffer error: %q", statusErr)
}
defer buffer.Vmo.Close()
// VMOs acquired over FIDL are not guaranteed to be resizable, so create a child VMO that is.
childVmo, err := buffer.Vmo.CreateChild(zx.VMOChildOptionCopyOnWrite|zx.VMOChildOptionResizable, 0, buffer.Size)
if err != nil {
return nil, err
}
return &mem.Buffer{
Vmo: childVmo,
Size: buffer.Size,
}, nil
}
// UpdateCurrentChannel persists the update channel info for a successful update
func UpdateCurrentChannel() error {
targetPath := "/misc/ota/target_channel.json"
contents, err := ioutil.ReadFile(targetPath)
if err != nil {
return fmt.Errorf("no target channel recorded in %v: %w", targetPath, err)
}
currentPath := "/misc/ota/current_channel.json"
partPath := currentPath + ".part"
f, err := os.Create(partPath)
if err != nil {
return fmt.Errorf("unable to write current channel to %v: %w", partPath, err)
}
defer f.Close()
buf := bytes.NewBuffer(contents)
_, err = buf.WriteTo(f)
if err != nil {
return fmt.Errorf("unable to write current channel to %v: %w", currentPath, err)
}
f.Sync()
f.Close()
if err := os.Rename(partPath, currentPath); err != nil {
return fmt.Errorf("error moving %v to %v: %w", partPath, currentPath, err)
}
return nil
}
| {
var config paver.Configuration
switch activeConfig {
case paver.ConfigurationA:
config = paver.ConfigurationB
case paver.ConfigurationB:
config = paver.ConfigurationA
case paver.ConfigurationRecovery:
syslog.Warnf("img_writer: configured for recovery, using partition A instead")
config = paver.ConfigurationA
default:
return nil, fmt.Errorf("img_writer: unknown config: %s", activeConfig)
}
syslog.Infof("img_writer: writing to configuration %s", config)
return &config, nil
} | identifier_body |
processor.go | // Copyright 2018 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package system_updater
import (
"bufio"
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"syscall"
"syscall/zx"
"syscall/zx/fdio"
fuchsiaio "fidl/fuchsia/io"
"fidl/fuchsia/mem"
"fidl/fuchsia/paver"
"fidl/fuchsia/pkg"
"fuchsia.googlesource.com/component"
"fuchsia.googlesource.com/syslog"
)
// When this suffix is found in the "images" file, it indicates a typed image
// that looks for all matches within the update package.
const ImageTypeSuffix = "[_type]"
func ConnectToPackageResolver(ctx *component.Context) (*pkg.PackageResolverWithCtxInterface, error) {
req, pxy, err := pkg.NewPackageResolverWithCtxInterfaceRequest()
if err != nil {
syslog.Errorf("control interface could not be acquired: %s", err)
return nil, err
}
ctx.ConnectToEnvService(req)
return pxy, nil
}
func ConnectToPaver(ctx *component.Context) (*paver.DataSinkWithCtxInterface, *paver.BootManagerWithCtxInterface, error) {
req, pxy, err := paver.NewPaverWithCtxInterfaceRequest()
if err != nil {
syslog.Errorf("control interface could not be acquired: %s", err)
return nil, nil, err
}
defer pxy.Close()
ctx.ConnectToEnvService(req)
dataSinkReq, dataSinkPxy, err := paver.NewDataSinkWithCtxInterfaceRequest()
if err != nil {
syslog.Errorf("data sink interface could not be acquired: %s", err)
return nil, nil, err
}
err = pxy.FindDataSink(context.Background(), dataSinkReq)
if err != nil {
syslog.Errorf("could not find data sink: %s", err)
return nil, nil, err
}
bootManagerReq, bootManagerPxy, err := paver.NewBootManagerWithCtxInterfaceRequest()
if err != nil {
syslog.Errorf("boot manager interface could not be acquired: %s", err)
return nil, nil, err
}
err = pxy.FindBootManager(context.Background(), bootManagerReq)
if err != nil {
syslog.Errorf("could not find boot manager: %s", err)
return nil, nil, err
}
return dataSinkPxy, bootManagerPxy, nil
}
// CacheUpdatePackage caches the requested, possibly merkle-pinned, update
// package URL and returns the pkgfs path to the package.
func CacheUpdatePackage(updateURL string, resolver *pkg.PackageResolverWithCtxInterface) (*UpdatePackage, error) {
dirPxy, err := resolvePackage(updateURL, resolver)
if err != nil {
return nil, err
}
pkg, err := NewUpdatePackage(dirPxy)
if err != nil {
return nil, err
}
merkle, err := pkg.Merkleroot()
if err != nil {
pkg.Close()
return nil, err
}
syslog.Infof("resolved %s as %s", updateURL, merkle)
return pkg, nil
}
// An image name and type string.
type Image struct {
// The base name of the image.
Name string
// A type string, default "".
Type string
}
// Returns an Image's filename in an update package.
//
// If a type is given, the filename in the package will be <name>_<type>, e.g.:
// name="foo", type="" -> "foo"
// name="foo", type="bar" -> "foo_bar"
func (i *Image) Filename() string {
if i.Type == "" {
return i.Name
}
return fmt.Sprintf("%s_%s", i.Name, i.Type)
}
func ParseRequirements(updatePkg *UpdatePackage) ([]string, []Image, error) {
// First, figure out which packages files we should parse
parseJson := true
pkgSrc, err := updatePkg.Open("packages.json")
// Fall back to line formatted packages file if packages.json not present
// Ideally, we'd fall back if specifically given the "file not found" error,
// though it's unclear which error that is (syscall.ENOENT did not work)
if err != nil {
syslog.Infof("parse_requirements: could not open packages.json, falling back to packages.")
parseJson = false
pkgSrc, err = updatePkg.Open("packages")
}
if err != nil {
return nil, nil, fmt.Errorf("error opening packages data file! %s", err)
}
defer pkgSrc.Close()
// Now that we know which packages file to parse, we can parse it.
pkgs := []string{}
if parseJson {
pkgs, err = ParsePackagesJson(pkgSrc)
} else {
pkgs, err = ParsePackagesLineFormatted(pkgSrc)
}
if err != nil {
return nil, nil, fmt.Errorf("failed to parse packages: %v", err)
}
// Finally, we parse images
imgSrc, err := os.Open(filepath.Join("/pkg", "data", "images"))
if err != nil {
return nil, nil, fmt.Errorf("error opening images data file! %s", err)
}
defer imgSrc.Close()
filenames, err := updatePkg.ListFiles()
if err != nil {
return nil, nil, fmt.Errorf("failed to list package files: %v", err)
}
imgs, err := ParseImages(imgSrc, filenames)
if err != nil {
return nil, nil, fmt.Errorf("failed to parse images: %v", err)
}
return pkgs, imgs, nil
}
// Packages deserializes the packages.json file in the system update package.
// NOTE: Fields must be exported for json decoding.
type packages struct {
Version intOrStr `json:"version"`
// A list of fully qualified URIs.
URIs []string `json:"content"`
}
type intOrStr int
// Enables us to support version as either a string or int.
func (i *intOrStr) UnmarshalJSON(b []byte) error {
var s string
if err := json.Unmarshal(b, &s); err == nil {
b = []byte(s)
}
return json.Unmarshal(b, (*int)(i))
}
func ParsePackagesJson(pkgSrc io.ReadCloser) ([]string, error) {
bytes, err := ioutil.ReadAll(pkgSrc)
if err != nil {
return nil, fmt.Errorf("failed to read packages.json with error: %v", err)
}
var packages packages
if err := json.Unmarshal(bytes, &packages); err != nil {
return nil, fmt.Errorf("failed to unmarshal packages.json: %v", err)
}
if packages.Version != 1 {
return nil, fmt.Errorf("unsupported version of packages.json: %v", packages.Version)
}
return packages.URIs, nil
}
func ParsePackagesLineFormatted(pkgSrc io.ReadCloser) ([]string, error) {
pkgs := []string{}
rdr := bufio.NewReader(pkgSrc)
for {
l, err := rdr.ReadString('\n')
s := strings.TrimSpace(l)
if (err == nil || err == io.EOF) && len(s) > 0 {
entry := strings.Split(s, "=")
if len(entry) != 2 {
return nil, fmt.Errorf("parser: entry format %q", s)
} else {
pkgURI := fmt.Sprintf("fuchsia-pkg://fuchsia.com/%s?hash=%s", entry[0], entry[1])
pkgs = append(pkgs, pkgURI)
}
}
if err != nil {
if err != io.EOF {
return nil, fmt.Errorf("parser: got error reading packages file %s", err)
}
break
}
}
return pkgs, nil
}
// Finds all images that match |basename| in |filenames|.
//
// A match is one of:
// <basename>
// <basename>_<type>
func | (basename string, filenames []string) []Image {
var images []Image
for _, name := range filenames {
if strings.HasPrefix(name, basename) {
suffix := name[len(basename):]
if len(suffix) == 0 {
// The base name alone indicates default type (empty string).
images = append(images, Image{Name: basename, Type: ""})
} else if suffix[0] == '_' {
images = append(images, Image{Name: basename, Type: suffix[1:]})
}
}
}
return images
}
// Returns a list of images derived from the "images" file.
//
// Untyped images (those without the [_type] suffix) are included in the return
// slice no matter what.
//
// Typed images, on the other hand, will only include matches that exist in
// |filenames|.
func ParseImages(imgSrc io.ReadCloser, filenames []string) ([]Image, error) {
rdr := bufio.NewReader(imgSrc)
imgs := []Image{}
for {
l, err := rdr.ReadString('\n')
s := strings.TrimSpace(l)
if (err == nil || err == io.EOF) && len(s) > 0 {
if strings.HasSuffix(s, ImageTypeSuffix) {
// Typed image: look for all matching images in the package.
basename := strings.TrimSuffix(s, ImageTypeSuffix)
imgs = append(imgs, FindTypedImages(basename, filenames)...)
} else {
imgs = append(imgs, Image{Name: s, Type: ""})
}
}
if err != nil {
if err != io.EOF {
return nil, fmt.Errorf("parser: got error reading images file %s", err)
}
break
}
}
return imgs, nil
}
// Types to deserialize the update-mode file. NOTE: Fields must be exported for json decoding.
// Expected form for update-mode file is:
// {
// "version": "1",
// "content": {
// "mode": "normal" / "force-recovery",
// }
// }
type updateModeFileContent struct {
Mode string `json:"mode"`
}
type updateModeFile struct {
Version string `json:"version"`
Content updateModeFileContent `json:"content"`
}
// Type to describe the supported update modes.
// Note: exporting since this will be used in main (to be consistent with the rest of the code).
type UpdateMode string
const (
UpdateModeNormal UpdateMode = "normal"
UpdateModeForceRecovery = "force-recovery"
)
// We define custom error wrappers so we can test the proper error is being returned.
type updateModeNotSupportedError UpdateMode
func (e updateModeNotSupportedError) Error() string {
return fmt.Sprintf("unsupported update mode: %s", string(e))
}
type jsonUnmarshalError struct {
err error
}
func (e jsonUnmarshalError) Error() string {
return fmt.Sprintf("failed to unmarshal update-mode: %v", e.err)
}
// Note: exporting since this will be used in main (to be consistent with the rest of the code).
func ParseUpdateMode(updatePkg *UpdatePackage) (UpdateMode, error) {
// Fall back to normal if the update-mode file does not exist.
// Ideally, we'd fall back if specifically given the "file not found" error,
// though it's unclear which error that is (syscall.ENOENT did not work).
modeSrc, err := updatePkg.Open("update-mode")
if err != nil {
syslog.Infof("parse_update_mode: could not open update-mode file, assuming normal system update flow.")
return UpdateModeNormal, nil
}
defer modeSrc.Close()
// Read the raw bytes.
b, err := ioutil.ReadAll(modeSrc)
if err != nil {
return "", fmt.Errorf("failed to read mode file: %w", err)
}
// Convert to json.
var updateModeFile updateModeFile
if err := json.Unmarshal(b, &updateModeFile); err != nil {
return "", jsonUnmarshalError{err}
}
// Confirm we support this mode.
mode := UpdateMode(updateModeFile.Content.Mode)
if mode != UpdateModeNormal && mode != UpdateModeForceRecovery {
return "", updateModeNotSupportedError(mode)
}
return mode, nil
}
func FetchPackages(pkgs []string, resolver *pkg.PackageResolverWithCtxInterface) error {
var errCount int
var firstErr error
for _, pkgURI := range pkgs {
if err := fetchPackage(pkgURI, resolver); err != nil {
syslog.Errorf("fetch error: %s", err)
errCount++
if firstErr == nil {
firstErr = err
}
}
}
if errCount > 0 {
syslog.Errorf("system update failed, %d packages had errors", errCount)
return firstErr
}
return nil
}
func fetchPackage(pkgURI string, resolver *pkg.PackageResolverWithCtxInterface) error {
dirPxy, err := resolvePackage(pkgURI, resolver)
if dirPxy != nil {
dirPxy.Close(context.Background())
}
return err
}
func resolvePackage(pkgURI string, resolver *pkg.PackageResolverWithCtxInterface) (*fuchsiaio.DirectoryWithCtxInterface, error) {
selectors := []string{}
updatePolicy := pkg.UpdatePolicy{}
dirReq, dirPxy, err := fuchsiaio.NewDirectoryWithCtxInterfaceRequest()
if err != nil {
return nil, err
}
syslog.Infof("requesting %s from update system", pkgURI)
status, err := resolver.Resolve(context.Background(), pkgURI, selectors, updatePolicy, dirReq)
if err != nil {
dirPxy.Close(context.Background())
return nil, fmt.Errorf("fetch: Resolve error: %s", err)
}
statusErr := zx.Status(status)
if statusErr != zx.ErrOk {
dirPxy.Close(context.Background())
return nil, fmt.Errorf("fetch: Resolve status: %s", statusErr)
}
return dirPxy, nil
}
func ValidateUpdatePackage(updatePkg *UpdatePackage) error {
actual, err := updatePkg.ReadFile("board")
if err == nil {
expected, err := ioutil.ReadFile("/config/build-info/board")
if err != nil {
return err
}
if !bytes.Equal(actual, expected) {
return fmt.Errorf("parser: expected board name %s found %s", expected, actual)
}
} else if !os.IsNotExist(err) {
return err
}
return nil
}
func ValidateImgs(imgs []Image, updatePkg *UpdatePackage, updateMode UpdateMode) error {
found := false
for _, img := range []string{"zbi", "zbi.signed"} {
if _, err := updatePkg.Stat(img); err == nil {
found = true
break
}
}
// Update package with normal mode should have a `zbi` or `zbi.signed`.
if updateMode == UpdateModeNormal && !found {
return fmt.Errorf("parser: missing 'zbi' or 'zbi.signed', this is required in normal update mode")
}
// Update package with force-recovery mode should NOT have a `zbi` nor `zbi.signed`.
if updateMode == UpdateModeForceRecovery && found {
return fmt.Errorf("parser: contains 'zbi' or 'zbi.signed', this is not allowed in force-recovery update mode")
}
return nil
}
func WriteImgs(dataSink *paver.DataSinkWithCtxInterface, bootManager *paver.BootManagerWithCtxInterface, imgs []Image, updatePkg *UpdatePackage, updateMode UpdateMode, skipRecovery bool) error {
if updateMode == UpdateModeForceRecovery && skipRecovery == true {
return fmt.Errorf("can't force recovery when skipping recovery image installation")
}
syslog.Infof("Writing images %+v from update package", imgs)
activeConfig, err := queryActiveConfig(bootManager)
if err != nil {
return fmt.Errorf("querying target config: %v", err)
}
// If we have an active config (and thus support ABR), compute the
// target config. Otherwise set the target config to nil so we fall
// back to the legacy behavior where we write to the A partition, and
// attempt to write to the B partition.
var targetConfig *paver.Configuration
if activeConfig == nil {
targetConfig = nil
} else {
targetConfig, err = calculateTargetConfig(*activeConfig)
if err != nil {
return err
}
}
for _, img := range imgs {
if err := writeImg(dataSink, img, updatePkg, targetConfig, skipRecovery); err != nil {
return err
}
}
if updateMode == UpdateModeNormal && targetConfig != nil {
if err := setConfigurationActive(bootManager, *targetConfig); err != nil {
return err
}
} else if updateMode == UpdateModeForceRecovery {
for _, config := range []paver.Configuration{paver.ConfigurationA, paver.ConfigurationB} {
if err := setConfigurationUnbootable(bootManager, config); err != nil {
return fmt.Errorf("failed to set configuration unbootable: %v", err)
}
}
}
if err = flushDataSink(dataSink); err != nil {
return fmt.Errorf("img_writer: failed to flush data sink. %v", err)
}
if targetConfig != nil {
if err = flushBootManager(bootManager); err != nil {
return fmt.Errorf("img_writer: failed to flush boot manager. %v", err)
}
}
return nil
}
func flushDataSink(dataSink *paver.DataSinkWithCtxInterface) error {
status, err := dataSink.Flush(context.Background())
if err != nil {
return err
}
if statusErr := zx.Status(status); statusErr != zx.ErrOk {
return &zx.Error{Status: statusErr}
}
return nil
}
func flushBootManager(bootManager *paver.BootManagerWithCtxInterface) error {
status, err := bootManager.Flush(context.Background())
if err != nil {
return err
}
if statusErr := zx.Status(status); statusErr != zx.ErrOk {
return &zx.Error{Status: statusErr}
}
return nil
}
// queryActiveConfig asks the boot manager what partition the device booted
// from. If the device does not support ABR, it returns nil as the
// configuration.
func queryActiveConfig(bootManager *paver.BootManagerWithCtxInterface) (*paver.Configuration, error) {
activeConfig, err := bootManager.QueryActiveConfiguration(context.Background())
if err != nil {
// FIXME(fxb/43577): If the paver service runs into a problem
// creating a boot manager, it will close the channel with an
// epitaph. The error we are particularly interested in is
// whether or not the current device supports ABR.
// Unfortunately the go fidl bindings do not support epitaphs,
// so we can't actually check for this error condition. All we
// can observe is that the channel has been closed, so treat
// this condition as the device does not support ABR.
if err, ok := err.(*zx.Error); ok && err.Status == zx.ErrPeerClosed {
syslog.Warnf("img_writer: boot manager channel closed, assuming device does not support ABR")
return nil, nil
}
return nil, fmt.Errorf("querying active config: %v", err)
}
if activeConfig.Which() == paver.BootManagerQueryActiveConfigurationResultResponse {
syslog.Infof("img_writer: device supports ABR")
return &activeConfig.Response.Configuration, nil
}
statusErr := zx.Status(activeConfig.Err)
if statusErr == zx.ErrNotSupported {
// this device doesn't support ABR, so fall back to the
// legacy workflow.
syslog.Infof("img_writer: device does not support ABR")
return nil, nil
}
return nil, &zx.Error{Status: statusErr}
}
func calculateTargetConfig(activeConfig paver.Configuration) (*paver.Configuration, error) {
var config paver.Configuration
switch activeConfig {
case paver.ConfigurationA:
config = paver.ConfigurationB
case paver.ConfigurationB:
config = paver.ConfigurationA
case paver.ConfigurationRecovery:
syslog.Warnf("img_writer: configured for recovery, using partition A instead")
config = paver.ConfigurationA
default:
return nil, fmt.Errorf("img_writer: unknown config: %s", activeConfig)
}
syslog.Infof("img_writer: writing to configuration %s", config)
return &config, nil
}
func setConfigurationActive(bootManager *paver.BootManagerWithCtxInterface, targetConfig paver.Configuration) error {
syslog.Infof("img_writer: setting configuration %s active", targetConfig)
status, err := bootManager.SetConfigurationActive(context.Background(), targetConfig)
if err != nil {
return err
}
statusErr := zx.Status(status)
if statusErr != zx.ErrOk {
return &zx.Error{Status: statusErr}
}
return nil
}
func setConfigurationUnbootable(bootManager *paver.BootManagerWithCtxInterface, targetConfig paver.Configuration) error {
syslog.Infof("img_writer: setting configuration %s unbootable", targetConfig)
status, err := bootManager.SetConfigurationUnbootable(context.Background(), targetConfig)
if err != nil {
return err
}
statusErr := zx.Status(status)
if statusErr != zx.ErrOk {
return &zx.Error{Status: statusErr}
}
return nil
}
func writeAsset(svc *paver.DataSinkWithCtxInterface, configuration paver.Configuration, asset paver.Asset, payload *mem.Buffer) error {
syslog.Infof("img_writer: writing asset %q to %q", asset, configuration)
status, err := svc.WriteAsset(context.Background(), configuration, asset, *payload)
if err != nil {
syslog.Errorf("img_writer: failed to write asset %q: %s", asset, err)
return err
}
statusErr := zx.Status(status)
if statusErr != zx.ErrOk {
return &zx.Error{Status: statusErr}
}
return nil
}
func writeImg(svc *paver.DataSinkWithCtxInterface, img Image, updatePkg *UpdatePackage, targetConfig *paver.Configuration, skipRecovery bool) error {
f, err := updatePkg.Open(img.Filename())
if err != nil {
syslog.Warnf("img_writer: %q image not found, skipping", img.Filename())
return nil
}
if fi, err := f.Stat(); err != nil || fi.Size() == 0 {
syslog.Warnf("img_writer: %q zero length, skipping", img.Filename())
return nil
}
defer f.Close()
buffer, err := bufferForFile(f)
if err != nil {
return fmt.Errorf("img_writer: while getting vmo for %q: %q", img.Filename(), err)
}
defer buffer.Vmo.Close()
var writeImg func() error
switch img.Name {
case "zbi", "zbi.signed":
childVmo, err := buffer.Vmo.CreateChild(zx.VMOChildOptionCopyOnWrite|zx.VMOChildOptionResizable, 0, buffer.Size)
if err != nil {
return fmt.Errorf("img_writer: while getting vmo for %q: %q", img.Filename(), err)
}
buffer2 := &mem.Buffer{
Vmo: childVmo,
Size: buffer.Size,
}
defer buffer2.Vmo.Close()
if targetConfig == nil {
// device does not support ABR, so write the ZBI to the
// A partition. We also try to write to the B partition
// in order to be forwards compatible with devices that
// will eventually support ABR, but we ignore errors
// because some devices won't have a B partition.
writeImg = func() error {
if err := writeAsset(svc, paver.ConfigurationA, paver.AssetKernel, buffer); err != nil {
return err
}
if err := writeAsset(svc, paver.ConfigurationB, paver.AssetKernel, buffer2); err != nil {
asZxErr, ok := err.(*zx.Error)
if ok && asZxErr.Status == zx.ErrNotSupported {
syslog.Warnf("img_writer: skipping writing %q to B: %v", img.Filename(), err)
} else {
return err
}
}
return nil
}
} else {
// device supports ABR, so only write the ZB to the
// target partition.
writeImg = func() error {
return writeAsset(svc, *targetConfig, paver.AssetKernel, buffer)
}
}
case "fuchsia.vbmeta":
childVmo, err := buffer.Vmo.CreateChild(zx.VMOChildOptionCopyOnWrite|zx.VMOChildOptionResizable, 0, buffer.Size)
if err != nil {
return fmt.Errorf("img_writer: while getting vmo for %q: %q", img.Filename(), err)
}
buffer2 := &mem.Buffer{
Vmo: childVmo,
Size: buffer.Size,
}
defer buffer2.Vmo.Close()
if targetConfig == nil {
// device does not support ABR, so write vbmeta to the
// A partition, and try to write to the B partiton. See
// the comment in the zbi case for more details.
if err := writeAsset(svc, paver.ConfigurationA,
paver.AssetVerifiedBootMetadata, buffer); err != nil {
return err
}
return writeAsset(svc, paver.ConfigurationB, paver.AssetVerifiedBootMetadata, buffer2)
} else {
// device supports ABR, so write the vbmeta to the
// target partition.
writeImg = func() error {
return writeAsset(svc, *targetConfig, paver.AssetVerifiedBootMetadata, buffer2)
}
}
case "zedboot", "zedboot.signed":
if skipRecovery {
return nil
} else {
writeImg = func() error {
return writeAsset(svc, paver.ConfigurationRecovery, paver.AssetKernel, buffer)
}
}
case "recovery.vbmeta":
if skipRecovery {
return nil
} else {
writeImg = func() error {
return writeAsset(svc, paver.ConfigurationRecovery, paver.AssetVerifiedBootMetadata, buffer)
}
}
case "bootloader":
// Keep support for update packages still using the older "bootloader"
// file, which is handled identically to "firmware" but without type
// support so img.Type will always be "".
fallthrough
case "firmware":
writeImg = func() error {
result, err := svc.WriteFirmware(context.Background(), img.Type, *buffer)
if err != nil {
return err
}
if result.Which() == paver.WriteFirmwareResultUnsupportedType {
syslog.Infof("img_writer: skipping unsupported firmware type %q", img.Type)
// Return nil here to skip unsupported types rather than failing.
// This lets us add new types in the future without breaking
// the update flow from older devices.
return nil
}
statusErr := zx.Status(result.Status)
if statusErr != zx.ErrOk {
return fmt.Errorf("%s", statusErr)
}
return nil
}
case "board":
return nil
default:
return fmt.Errorf("unrecognized image %q", img.Filename())
}
syslog.Infof("img_writer: writing %q from update package", img.Filename())
if err := writeImg(); err != nil {
return fmt.Errorf("img_writer: error writing %q: %q", img.Filename(), err)
}
syslog.Infof("img_writer: wrote %q successfully", img.Filename())
return nil
}
func bufferForFile(f *os.File) (*mem.Buffer, error) {
fio := syscall.FDIOForFD(int(f.Fd())).(*fdio.File)
if fio == nil {
return nil, fmt.Errorf("not fdio file")
}
status, buffer, err := fio.GetBuffer(fuchsiaio.VmoFlagRead)
if err != nil {
return nil, fmt.Errorf("GetBuffer fidl error: %q", err)
}
statusErr := zx.Status(status)
if statusErr != zx.ErrOk {
return nil, fmt.Errorf("GetBuffer error: %q", statusErr)
}
defer buffer.Vmo.Close()
// VMOs acquired over FIDL are not guaranteed to be resizable, so create a child VMO that is.
childVmo, err := buffer.Vmo.CreateChild(zx.VMOChildOptionCopyOnWrite|zx.VMOChildOptionResizable, 0, buffer.Size)
if err != nil {
return nil, err
}
return &mem.Buffer{
Vmo: childVmo,
Size: buffer.Size,
}, nil
}
// UpdateCurrentChannel persists the update channel info for a successful update
func UpdateCurrentChannel() error {
targetPath := "/misc/ota/target_channel.json"
contents, err := ioutil.ReadFile(targetPath)
if err != nil {
return fmt.Errorf("no target channel recorded in %v: %w", targetPath, err)
}
currentPath := "/misc/ota/current_channel.json"
partPath := currentPath + ".part"
f, err := os.Create(partPath)
if err != nil {
return fmt.Errorf("unable to write current channel to %v: %w", partPath, err)
}
defer f.Close()
buf := bytes.NewBuffer(contents)
_, err = buf.WriteTo(f)
if err != nil {
return fmt.Errorf("unable to write current channel to %v: %w", currentPath, err)
}
f.Sync()
f.Close()
if err := os.Rename(partPath, currentPath); err != nil {
return fmt.Errorf("error moving %v to %v: %w", partPath, currentPath, err)
}
return nil
}
| FindTypedImages | identifier_name |
services.js | var myapp = angular.module('starter.services', []);
myapp.factory('FavoritesService', function () {
// Might use a resource here that returns a JSON array
// Some testing data
var items = [];
for (var i = 0; i < 100; i++) {
var item = {};
item.imgURL = './img/ionic.png';
item.title = 'news' + (i + 1);
item.content = 'news content' + (i + 1);
items.push(item);
}
var pageNumber = 0;
var hasMoreItems = true;
return {
getMoreItems: function () {
console.log('[Service Favorites getMoreItems] Start');
if (pageNumber === 10) {
hasMoreItems = false;
return [];
}
pageNumber = pageNumber + 1;
console.log("[Service Favorites getMoreItems] pageNumber:" + pageNumber);
return items.slice((pageNumber - 1) * 10, pageNumber * 10);
},
hasMoreItems: function () {
return hasMoreItems;
}
};
});
myapp.factory('Chats', function () {
// Might use a resource here that returns a JSON array
// Some fake testing data
var chats = [{
id: 0,
name: 'Ben Sparrow',
lastText: 'You on your way?',
face: 'https://pbs.twimg.com/profile_images/514549811765211136/9SgAuHeY.png'
}, {
id: 1,
name: 'Max Lynx',
lastText: 'Hey, it\'s me',
face: 'https://avatars3.githubusercontent.com/u/11214?v=3&s=460'
}, {
id: 2,
name: 'Adam Bradleyson',
lastText: 'I should buy a boat',
face: 'https://pbs.twimg.com/profile_images/479090794058379264/84TKj_qa.jpeg'
}, {
id: 3,
name: 'Perry Governor',
lastText: 'Look at my mukluks!',
face: 'https://pbs.twimg.com/profile_images/598205061232103424/3j5HUXMY.png'
}, {
id: 4,
name: 'Mike Harrington',
lastText: 'This is wicked good ice cream.',
face: 'https://pbs.twimg.com/profile_images/578237281384841216/R3ae1n61.png'
}];
return {
all: function () {
return chats;
},
remove: function (chat) {
chats.splice(chats.indexOf(chat), 1);
},
get: function (chatId) {
for (var i = 0; i < chats.length; i++) {
if (chats[i].id === parseInt(chatId)) {
return chats[i];
}
}
return null;
}
};
});
myapp.factory('AuthService', function ($q) {
var _db;
// We'll need this later.
var _users;
var _loginFlg = false;
return {
initDB: initDB,
// We'll add these later.
getAllUsers: getAllUsers,
addUser: addUser,
validUser: validUser,
updateUser: updateUser,
deleteUser: deleteUser,
getLoginFlg: getLoginFlg,
setLoginFlg: setLoginFlg
};
function initDB() {
// Creates the database or opens if it already exists
_db = new PouchDB('users', {adapter: 'websql'});
addUser({username: 'admin', password: 'admin'});
}
function addUser(user) {
return $q.when(_db.post(user));
}
function validUser(user) {
_loginFlg = false;
angular.forEach(_users, function (tmpUser) {
if (user.username === tmpUser.username && user.password === tmpUser.password) {
_loginFlg = true;
}
});
return getLoginFlg();
}
function getLoginFlg() {
return _loginFlg;
}
function setLoginFlg(loginFlg) {
_loginFlg = loginFlg;
return _loginFlg
}
function updateUser(user) {
return $q.when(_db.put(user));
}
function deleteUser(user) {
return $q.when(_db.remove(user));
}
function getAllUsers() {
if (!_users) {
return $q.when(_db.allDocs({include_docs: true}))
.then(function (docs) {
// Each row has a .doc object and we just want to send an
// array of user objects back to the calling controller,
// so let's map the array to contain just the .doc objects.
_users = docs.rows.map(function (row) {
// Dates are not automatically converted from a string.
row.doc.Date = new Date(row.doc.Date);
return row.doc;
});
// Listen for changes on the database.
_db.changes({live: true, since: 'now', include_docs: true})
.on('change', onDatabaseChange);
return _users;
});
} else {
// Return cached data as a promise
return $q.when(_users);
}
}
// This function allows you to update the _birthdays array whenever there is a change on your database.
function onDatabaseChange(change) {
var index = findIndex(_users, change.id);
var user = _users[index];
if (change.deleted) {
if (user) {
_users.splice(index, 1); // delete
}
} else {
if (user && user._id === change.id) {
_users[index] = change.doc; // update
} else {
_users.splice(index, 0, change.doc); // insert
}
}
}
// Binary search, the array is by default sorted by _id.
function findIndex(array, id) {
var low = 0, high = array.length, mid;
while (low < high) {
mid = (low + high) >>> 1;
array[mid]._id < id ? low = mid + 1 : high = mid
}
return low;
}
});
myapp.factory('CategoryService', function ($q) {
var local_db_name = 'FamilyMoneyTracker';
var remote_db_name = 'http://localhost:5984/FamilyMoneyTracker';
//PouchDB.debug.enable('*');
var local_db = new PouchDB(local_db_name);
var remote_db = new PouchDB(remote_db_name);
//local_db.sync(remote_db, {live: true});
return {
initialCategory: initialCategory,
getExpenseLargeWithSmallCategoryList: getExpenseLargeWithSmallCategoryList,
getExpenseLargeCategoryList: getExpenseLargeCategoryList,
getIncomeLargeCategoryList: getIncomeLargeCategoryList,
setLocalDefaultValue: setLocalDefaultValue,
getLocalDefaultValueByKey: getLocalDefaultValueByKey
};
function setLocalDefaultValue(key, default_value) {
var local_default_db = new PouchDB('local_default_value_db');
var doc = {};
var getPromise = local_default_db.get(key).then(function (result) {
doc = result;
doc.default_value = default_value;
}).catch(function (err) {
doc._id = key;
doc.default_value = default_value;
$q.when(doc);
});
return getPromise.then(function () {
return local_default_db.put(doc);
}).then(function (result) {
console.log('[Service setLocalDefaultValue]put default db value Success.');
console.log(result);
}).catch(function (err) {
console.log('[Service setLocalDefaultValue]put default db value Error.');
console.log(err);
throw err;
});
}
function getLocalDefaultValueByKey(key) {
var local_default_db = new PouchDB('local_default_value_db');
var defaultValue = '';
var doc = {};
return local_default_db.get(key).then(function (result) {
return $q.when(result.default_value);
}).catch(function (err) {
$q.when('');
});
}
// get expense large and small category list
function getExpenseLargeWithSmallCategoryList() {
console.log('[Service CategoryService getLargeAndSmallCategoryList] start');
return local_db.query('categoryDoc/expense_category_large_small').then(function (response) {
var largeCategoryAndSmallCategoryDocs = response.rows.map(function (response) {
return response.value;
});
var largeAndSmallCategoryList = largeCategoryAndSmallCategoryDocs;
var largeCategoryWithSmallCategoriesList = [];
var largeCategoryWithSmallCategories = {};
angular.forEach(largeAndSmallCategoryList, function (largeOrSmallCategory, i) {
// when large category
if (!largeOrSmallCategory.large_category_id) {
if (i !== 0) {
largeCategoryWithSmallCategoriesList.push(largeCategoryWithSmallCategories);
}
largeCategoryWithSmallCategories = largeOrSmallCategory;
largeCategoryWithSmallCategories.small_categories = [];
} else {
largeCategoryWithSmallCategories.small_categories.push(largeOrSmallCategory);
}
if (i === largeAndSmallCategoryList.length - 1) {
largeCategoryWithSmallCategoriesList.push(largeCategoryWithSmallCategories);
}
});
return $q.when(largeCategoryWithSmallCategoriesList);
}).then(function (results) {
console.log('[Service CategoryService getLargeAndSmallCategoryList]get large and small category list Success.');
return $q.when(results);
}).catch(function (err) {
console.log('[Service CategoryService getLargeAndSmallCategoryList]get large and small category list Error.');
console.log(err);
return $q.when('');
});
}
// get expense large category list
function getExpenseLargeCategoryList() {
console.log('[Service CategoryService getExpenseLargeCategoryList] start');
return local_db.query('categoryDoc/expense_category_large').then(function (response) {
var largeCategory = response.rows.map(function (response) {
return response.value;
});
return $q.when(largeCategory);
}).then(function (results) {
console.log('[Service CategoryService getExpenseLargeCategoryList]get large category list Success.');
return $q.when(results);
}).catch(function (err) {
console.log('[Service CategoryService getExpenseLargeCategoryList]get large category list Error.');
console.log(err);
return $q.when('');
});
}
// get income large category list
function getIncomeLargeCategoryList() {
console.log('[Service CategoryService getIncomeLargeCategoryList] start');
return local_db.query('categoryDoc/income_category_large').then(function (response) {
var largeCategory = response.rows.map(function (response) {
return response.value;
});
return $q.when(largeCategory);
}).then(function (results) {
console.log('[Service CategoryService getIncomeLargeCategoryList]get large category list Success.');
return $q.when(results);
}).catch(function (err) {
console.log('[Service CategoryService getIncomeLargeCategoryList]get large category list Error.');
console.log(err);
return $q.when('');
});
}
function initialCategory() {
// delete large category data
return deleteAllRecords().then(function () {
// delete category design doc
return deleteCategoryDesignDoc();
}).then(function () {
// initialize category data
return initializeCategoryData();
}).then(function () {
// add category design doc
return createCategoryDesignDoc();
}).catch(function (err) {
console.log(err);
throw err;
});
}
/**
* Expense Large Category:
* id: 100, 101, 102
* expense_or_income: Expense
*
* Expense Small Category:
* id:
* large_category_id: 100
*
* Income Large Category:
* id: 200, 201, 202
* expense_or_income: Income
*
* Income Small Category:
* id:
* large_category_id: 100
*
*
* @returns {promise}
*/
function initializeCategoryData() {
console.log('[Service CategoryService initializeCategoryData] start');
var initialExpenseLargeCategoryNameArray = ['餐饮', '零食烟酒', '交通', '汽车', '住房', '购物', '娱乐', '通讯', '孩子', '居家', '医疗', '教育', '投资', '人情'];
var initialExpenseSmallCategoryNameArrayObject = {
'餐饮': ['三餐', '买菜原料', '夜宵', '油盐酱醋'],
'零食烟酒': ['饮料', '水果', '零食', '烟酒'],
'交通': ['打车', '公交', '地铁', '火车', '长途汽车', '飞机', '船舶'],
'汽车': ['加油', '停车费', '过路过桥', '保养维修', '车款车贷', '罚款赔偿', '车险', '驾照费用'],
'住房': ['家具家纺', '物业', '水电燃气', '房租', '房贷', '装修'],
'购物': ['服饰鞋包', '家居百货', '化妆护肤', '电子数码', '报刊书籍', '电器', '珠宝首饰', '保健用品', '摄影文印'],
'娱乐': ['旅游度假', '网游电玩', '电影', '洗浴足浴', '运动健身', '卡拉OK', '茶酒咖啡', '歌舞演出', '电视', '娱乐其他', '花鸟宠物', '麻将棋牌', '聚会玩乐'],
'通讯': ['手机电话', '电脑宽带'],
'孩子': ['学费', '教育', '文具', '玩具', '用品', '家教补习', '学杂教材'],
'居家': ['美发美容', '材料建材', '快递邮政', '家政服务', '生活费', '婚庆摄影', '漏记款', '保险费', '消费贷款', '税费手续费'],
'医疗': ['医疗药品', '挂号门诊', '养生保健', '住院费'],
'教育': ['培训考试'],
'人情': ['礼金红包', '物品', '请客', '代付款', '孝敬', '给予', '慈善捐款'],
'投资': ['股票', '基金', '理财产品', '余额宝', '银行存款', '保险', 'P2P', '证券期货', '出资', '贵金属', '投资贷款', '外汇', '收藏品', '利息支出']
};
var expenseCategoryDocArray = [];
angular.forEach(initialExpenseLargeCategoryNameArray, function (value, i) {
var expenseLargeCategoryDoc = getExpenseLargeCategorySchema();
expenseLargeCategoryDoc.id = (100 + i) + '';
expenseLargeCategoryDoc.name = value;
expenseLargeCategoryDoc.order = expenseLargeCategoryDoc.id;
// sort by type, large category order, large category id
expenseLargeCategoryDoc._id = pouchCollate.toIndexableString([expenseLargeCategoryDoc.type, expenseLargeCategoryDoc.id]);
expenseCategoryDocArray.push(expenseLargeCategoryDoc);
var initialExpenseSmallCategoryNameArray = initialExpenseSmallCategoryNameArrayObject[value];
if (initialExpenseSmallCategoryNameArray) {
angular.forEach(initialExpenseSmallCategoryNameArray, function (initialExpenseSmallCategoryName, j) {
var expenseSmallCategoryDoc = getExpenseSmallCategorySchema();
expenseSmallCategoryDoc.large_category_id = expenseLargeCategoryDoc._id;
expenseSmallCategoryDoc.id = ('00' + j).slice(-2);
expenseSmallCategoryDoc.name = initialExpenseSmallCategoryName;
expenseSmallCategoryDoc.order = expenseSmallCategoryDoc.id;
// sort by type, large category order, large category id
expenseSmallCategoryDoc._id = pouchCollate.toIndexableString([expenseSmallCategoryDoc.type, expenseSmallCategoryDoc.large_category_id, expenseSmallCategoryDoc.id]);
expenseCategoryDocArray.push(expenseSmallCategoryDoc);
});
}
});
var initialIncomeLargeCategoryNameArray = ['工资薪水', ' 奖金', ' 兼职外快', ' 福利补贴', ' 生活费', ' 公积金', ' 退款返款', ' 礼金', ' 红包', ' 赔付款', ' 漏记款', ' 报销款', ' 利息', ' 余额宝', ' 基金', ' 分红', ' 租金', ' 股票', ' 销售款', ' 应收款', ' 营业收入', ' 工程款'];
var incomeCategoryDocArray = [];
angular.forEach(initialIncomeLargeCategoryNameArray, function (value, i) {
var incomeLargeCategoryDoc = getIncomeLargeCategorySchema();
incomeLargeCategoryDoc.id = (200 + i) + '';
incomeLargeCategoryDoc.name = value;
incomeLargeCategoryDoc.order = incomeLargeCategoryDoc.id;
// sort by type, large category order, large category id
incomeLargeCategoryDoc._id = pouchCollate.toIndexableString([incomeLargeCategoryDoc.type, incomeLargeCategoryDoc.id]);
incomeCategoryDocArray.push(incomeLargeCategoryDoc);
});
return local_db.bulkDocs(expenseCategoryDocArray).then(function (results) {
console.log('[Service CategoryService initializeCategoryData] expense category bulkDocs finished');
console.log(results.length);
return local_db.bulkDocs(incomeCategoryDocArray);
}).then(function(results){
console.log('[Service CategoryService initializeCategoryData] income category bulkDocs finished');
console.log(results.length);
}).catch(function () {
console.log(err);
throw err;
});
}
function getExpenseLargeCategorySchema() {
var expenseLargeCategorySchema = {
'type': 'expense_category_large',
'_id': '',
'id': '',
'name': '',
'order': '',
'create_user': '',
'create_date': new Date().toJSON(),
'update_user': '',
'update_date': ''
};
return expenseLargeCategorySchema;
}
function getExpenseSmallCategorySchema() {
var expenseSmallCategorySchema = {
'type': 'expense_category_small',
'_id': '',
'large_category_id': '',
'id': '',
'name': '',
'order': '',
'create_user': '',
'create_date': new Date().toJSON(),
'update_user': '',
'update_date': ''
};
return expenseSmallCategorySchema;
}
function getIncomeLargeCategorySchema() {
var incomeLargeCategorySchema = {
'type': 'income_category_large',
'_id': '',
'id': '',
'name': '',
'order': '',
'create_user': '',
'create_date': new Date().toJSON(),
'update_user': '',
'update_date': ''
};
return incomeLargeCategorySchema;
}
function createCategoryDesignDoc() {
console.log('[Service CategoryService createCategoryDesignDoc] start');
// create a design doc
var designDocCategory = {
_id: '_design/categoryDoc',
views: {
'expense_category_large': {
map: function (doc) {
if (doc.type === 'expense_category_large') {
emit([doc._id], doc);
}
}.toString()
},
'expense_category_small': {
map: function (doc) {
if (doc.type === 'expense_category_small') {
emit([doc._id], doc);
}
}.toString()
},
'expense_category_large_small': {
map: function (doc) {
if (doc.type === 'expense_category_large') {
emit([doc._id, 0, doc.order], doc);
} else if (doc.type === 'expense_category_small') {
emit([doc.large_category_id, 1, doc.order], doc);
}
}.toString()
},
'income_category_large': {
map: function (doc) {
if (doc.type === 'income_category_large') {
emit([doc._id], doc);
}
}.toString( | (designDocCategory).then(function () {
console.log('[Service CategoryService createCategoryDesignDoc]createCategoryDesignDoc finished.');
}).catch(function (err) {
console.log(err);
throw err;
});
}
function deleteCategoryDesignDoc() {
console.log('[Service CategoryService deleteCategoryDesignDoc] start');
return local_db.get('_design/categoryDoc').then(function (doc) {
return local_db.remove(doc);
}).then(function (result) {
console.log('[Service CategoryService deleteCategoryDesignDoc]remove design doc finished.');
console.log(result);
}).catch(function (err) {
console.log(err);
return $q.when('');
});
}
function deleteAllRecords() {
console.log('[Service CategoryService deleteLargeCategoryRecords] start');
return local_db.allDocs().then(function (response) {
return Promise.all(response.rows.map(function (row) {
return local_db.remove(row.id, row.value.rev);
}));
}).then(function (results) {
console.log('[Service CategoryService deleteLargeCategoryRecords]remove all category_large view recoreds finished');
console.log(results);
console.log(results.length);
}).catch(function (err) {
console.log(err);
return $q.when('');
});
}
});
| )
}
}
};
return local_db.put | conditional_block |
services.js | var myapp = angular.module('starter.services', []);
myapp.factory('FavoritesService', function () {
// Might use a resource here that returns a JSON array
// Some testing data
var items = [];
for (var i = 0; i < 100; i++) {
var item = {};
item.imgURL = './img/ionic.png';
item.title = 'news' + (i + 1);
item.content = 'news content' + (i + 1);
items.push(item);
}
var pageNumber = 0;
var hasMoreItems = true;
return {
getMoreItems: function () {
console.log('[Service Favorites getMoreItems] Start');
if (pageNumber === 10) {
hasMoreItems = false;
return [];
}
pageNumber = pageNumber + 1;
console.log("[Service Favorites getMoreItems] pageNumber:" + pageNumber);
return items.slice((pageNumber - 1) * 10, pageNumber * 10);
},
hasMoreItems: function () {
return hasMoreItems;
}
};
});
myapp.factory('Chats', function () {
// Might use a resource here that returns a JSON array
// Some fake testing data
var chats = [{
id: 0,
name: 'Ben Sparrow',
lastText: 'You on your way?',
face: 'https://pbs.twimg.com/profile_images/514549811765211136/9SgAuHeY.png'
}, {
id: 1,
name: 'Max Lynx',
lastText: 'Hey, it\'s me',
face: 'https://avatars3.githubusercontent.com/u/11214?v=3&s=460'
}, {
id: 2,
name: 'Adam Bradleyson',
lastText: 'I should buy a boat',
face: 'https://pbs.twimg.com/profile_images/479090794058379264/84TKj_qa.jpeg'
}, {
id: 3,
name: 'Perry Governor',
lastText: 'Look at my mukluks!',
face: 'https://pbs.twimg.com/profile_images/598205061232103424/3j5HUXMY.png'
}, {
id: 4,
name: 'Mike Harrington',
lastText: 'This is wicked good ice cream.',
face: 'https://pbs.twimg.com/profile_images/578237281384841216/R3ae1n61.png'
}];
return {
all: function () {
return chats;
},
remove: function (chat) {
chats.splice(chats.indexOf(chat), 1);
},
get: function (chatId) {
for (var i = 0; i < chats.length; i++) {
if (chats[i].id === parseInt(chatId)) {
return chats[i];
}
}
return null;
}
};
});
myapp.factory('AuthService', function ($q) {
var _db;
// We'll need this later.
var _users;
var _loginFlg = false;
return {
initDB: initDB,
// We'll add these later.
getAllUsers: getAllUsers,
addUser: addUser,
validUser: validUser,
updateUser: updateUser,
deleteUser: deleteUser,
getLoginFlg: getLoginFlg,
setLoginFlg: setLoginFlg
};
function initDB() {
// Creates the database or opens if it already exists
_db = new PouchDB('users', {adapter: 'websql'});
addUser({username: 'admin', password: 'admin'});
}
function addUser(user) {
return $q.when(_db.post(user));
}
function validUser(user) {
_loginFlg = false;
angular.forEach(_users, function (tmpUser) {
if (user.username === tmpUser.username && user.password === tmpUser.password) {
_loginFlg = true;
}
});
return getLoginFlg();
}
function getLoginFlg() {
return _loginFlg;
}
function setLoginFlg(loginFlg) {
_loginFlg = loginFlg;
return _loginFlg
}
function updateUser(user) {
return $q.when(_db.put(user));
}
function deleteUser(user) {
return $q.when(_db.remove(user));
}
function getAllUsers() {
if (!_users) {
return $q.when(_db.allDocs({include_docs: true}))
.then(function (docs) {
// Each row has a .doc object and we just want to send an
// array of user objects back to the calling controller,
// so let's map the array to contain just the .doc objects.
_users = docs.rows.map(function (row) {
// Dates are not automatically converted from a string.
row.doc.Date = new Date(row.doc.Date);
return row.doc;
});
// Listen for changes on the database.
_db.changes({live: true, since: 'now', include_docs: true})
.on('change', onDatabaseChange);
return _users;
});
} else {
// Return cached data as a promise
return $q.when(_users);
}
}
// This function allows you to update the _birthdays array whenever there is a change on your database.
function onDatabaseChange(change) {
var index = findIndex(_users, change.id);
var user = _users[index];
if (change.deleted) {
if (user) {
_users.splice(index, 1); // delete
}
} else {
if (user && user._id === change.id) {
_users[index] = change.doc; // update
} else {
_users.splice(index, 0, change.doc); // insert
}
}
}
// Binary search, the array is by default sorted by _id.
function findIndex(array, id) {
var low = 0, high = array.length, mid;
while (low < high) {
mid = (low + high) >>> 1;
array[mid]._id < id ? low = mid + 1 : high = mid
}
return low;
}
});
myapp.factory('CategoryService', function ($q) {
var local_db_name = 'FamilyMoneyTracker';
var remote_db_name = 'http://localhost:5984/FamilyMoneyTracker';
//PouchDB.debug.enable('*');
var local_db = new PouchDB(local_db_name);
var remote_db = new PouchDB(remote_db_name);
//local_db.sync(remote_db, {live: true});
return {
initialCategory: initialCategory,
getExpenseLargeWithSmallCategoryList: getExpenseLargeWithSmallCategoryList,
getExpenseLargeCategoryList: getExpenseLargeCategoryList,
getIncomeLargeCategoryList: getIncomeLargeCategoryList,
setLocalDefaultValue: setLocalDefaultValue,
getLocalDefaultValueByKey: getLocalDefaultValueByKey
};
function setLocalDefaultValue(key, default_value) {
var local_default_db = new PouchDB('local_default_value_db');
var doc = {};
var getPromise = local_default_db.get(key).then(function (result) {
doc = result;
doc.default_value = default_value;
}).catch(function (err) {
doc._id = key;
doc.default_value = default_value;
$q.when(doc);
});
return getPromise.then(function () {
return local_default_db.put(doc);
}).then(function (result) {
console.log('[Service setLocalDefaultValue]put default db value Success.');
console.log(result);
}).catch(function (err) {
console.log('[Service setLocalDefaultValue]put default db value Error.');
console.log(err);
throw err;
});
}
function getLocalDefaultValueByKey(key) {
var local_default_db = new PouchDB('local_default_value_db');
var defaultValue = '';
var doc = {};
return local_default_db.get(key).then(function (result) {
return $q.when(result.default_value);
}).catch(function (err) {
$q.when('');
});
}
// get expense large and small category list
function getExpenseLargeWithSmallCategoryList() {
console.log('[Service CategoryService getLargeAndSmallCategoryList] start');
return local_db.query('categoryDoc/expense_category_large_small').then(function (response) {
var largeCategoryAndSmallCategoryDocs = response.rows.map(function (response) {
return response.value;
});
var largeAndSmallCategoryList = largeCategoryAndSmallCategoryDocs;
var largeCategoryWithSmallCategoriesList = [];
var largeCategoryWithSmallCategories = {};
angular.forEach(largeAndSmallCategoryList, function (largeOrSmallCategory, i) {
// when large category
if (!largeOrSmallCategory.large_category_id) {
if (i !== 0) {
largeCategoryWithSmallCategoriesList.push(largeCategoryWithSmallCategories);
}
largeCategoryWithSmallCategories = largeOrSmallCategory;
largeCategoryWithSmallCategories.small_categories = [];
} else {
largeCategoryWithSmallCategories.small_categories.push(largeOrSmallCategory);
}
if (i === largeAndSmallCategoryList.length - 1) {
largeCategoryWithSmallCategoriesList.push(largeCategoryWithSmallCategories);
}
});
return $q.when(largeCategoryWithSmallCategoriesList);
}).then(function (results) {
console.log('[Service CategoryService getLargeAndSmallCategoryList]get large and small category list Success.');
return $q.when(results);
}).catch(function (err) {
console.log('[Service CategoryService getLargeAndSmallCategoryList]get large and small category list Error.');
console.log(err);
return $q.when('');
});
}
// get expense large category list
function getExpenseLargeCategoryList() {
console.log('[Service CategoryService getExpenseLargeCategoryList] start');
return local_db.query('categoryDoc/expense_category_large').then(function (response) {
var largeCategory = response.rows.map(function (response) {
return response.value;
});
return $q.when(largeCategory);
}).then(function (results) {
console.log('[Service CategoryService getExpenseLargeCategoryList]get large category list Success.');
return $q.when(results);
}).catch(function (err) {
console.log('[Service CategoryService getExpenseLargeCategoryList]get large category list Error.');
console.log(err);
return $q.when('');
});
}
// get income large category list
function getIncomeLargeCategoryList() {
console.log('[Service CategoryService getIncomeLargeCategoryList] start');
return local_db.query('categoryDoc/income_category_large').then(function (response) {
var largeCategory = response.rows.map(function (response) {
return response.value;
});
return $q.when(largeCategory);
}).then(function (results) {
console.log('[Service CategoryService getIncomeLargeCategoryList]get large category list Success.');
return $q.when(results);
}).catch(function (err) {
console.log('[Service CategoryService getIncomeLargeCategoryList]get large category list Error.');
console.log(err);
return $q.when('');
});
}
function initialCategory() {
// delete large category data
return deleteAllRecords().then(function () {
// delete category design doc
return deleteCategoryDesignDoc();
}).then(function () {
// initialize category data
return initializeCategoryData();
}).then(function () {
// add category design doc
return createCategoryDesignDoc();
}).catch(function (err) {
console.log(err);
throw err;
});
}
/**
* Expense Large Category:
* id: 100, 101, 102
* expense_or_income: Expense
*
* Expense Small Category:
* id:
* large_category_id: 100
*
* Income Large Category:
* id: 200, 201, 202
* expense_or_income: Income
*
* Income Small Category:
* id:
* large_category_id: 100
*
*
* @returns {promise}
*/
function initializeCategoryData() {
console.log('[Service CategoryService initializeCategoryData] start');
var initialExpenseLargeCategoryNameArray = ['餐饮', '零食烟酒', '交通', '汽车', '住房', '购物', '娱乐', '通讯', '孩子', '居家', '医疗', '教育', '投资', '人情'];
var initialExpenseSmallCategoryNameArrayObject = {
'餐饮': ['三餐', '买菜原料', '夜宵', '油盐酱醋'],
'零食烟酒': ['饮料', '水果', '零食', '烟酒'],
'交通': ['打车', '公交', '地铁', '火车', '长途汽车', '飞机', '船舶'],
'汽车': ['加油', '停车费', '过路过桥', '保养维修', '车款车贷', '罚款赔偿', '车险', '驾照费用'],
'住房': ['家具家纺', '物业', '水电燃气', '房租', '房贷', '装修'],
'购物': ['服饰鞋包', '家居百货', '化妆护肤', '电子数码', '报刊书籍', '电器', '珠宝首饰', '保健用品', '摄影文印'],
'娱乐': ['旅游度假', '网游电玩', '电影', '洗浴足浴', '运动健身', '卡拉OK', '茶酒咖啡', '歌舞演出', '电视', '娱乐其他', '花鸟宠物', '麻将棋牌', '聚会玩乐'],
'通讯': ['手机电话', '电脑宽带'],
'孩子': ['学费', '教育', '文具', '玩具', '用品', '家教补习', '学杂教材'],
'居家': ['美发美容', '材料建材', '快递邮政', '家政服务', '生活费', '婚庆摄影', '漏记款', '保险费', '消费贷款', '税费手续费'],
'医疗': ['医疗药品', '挂号门诊', '养生保健', '住院费'],
'教育': ['培训考试'],
'人情': ['礼金红包', '物品', '请客', '代付款', '孝敬', '给予', '慈善捐款'],
'投资': ['股票', '基金', '理财产品', '余额宝', '银行存款', '保险', 'P2P', '证券期货', '出资', '贵金属', '投资贷款', '外汇', '收藏品', '利息支出']
};
var expenseCategoryDocArray = [];
angular.forEach(initialExpenseLargeCategoryNameArray, function (value, i) {
var expenseLargeCategoryDoc = getExpenseLargeCategorySchema();
expenseLargeCategoryDoc.id = (100 + i) + '';
expenseLargeCategoryDoc.name = value;
expenseLargeCategoryDoc.order = expenseLargeCategoryDoc.id;
// sort by type, large category order, large category id
expenseLargeCategoryDoc._id = pouchCollate.toIndexableString([expenseLargeCategoryDoc.type, expenseLargeCategoryDoc.id]);
expenseCategoryDocArray.push(expenseLargeCategoryDoc);
var initialExpenseSmallCategoryNameArray = initialExpenseSmallCategoryNameArrayObject[value];
if (initialExpenseSmallCategoryNameArray) {
angular.forEach(initialExpenseSmallCategoryNameArray, function (initialExpenseSmallCategoryName, j) {
var expenseSmallCategoryDoc = getExpenseSmallCategorySchema();
expenseSmallCategoryDoc.large_category_id = expenseLargeCategoryDoc._id;
expenseSmallCategoryDoc.id = ('00' + j).slice(-2);
expenseSmallCategoryDoc.name = initialExpenseSmallCategoryName;
expenseSmallCategoryDoc.order = expenseSmallCategoryDoc.id;
// sort by type, large category order, large category id
expenseSmallCategoryDoc._id = pouchCollate.toIndexableString([expenseSmallCategoryDoc.type, expenseSmallCategoryDoc.large_category_id, expenseSmallCategoryDoc.id]);
expenseCategoryDocArray.push(expenseSmallCategoryDoc);
});
}
});
var initialIncomeLargeCategoryNameArray = ['工资薪水', ' 奖金', ' 兼职外快', ' 福利补贴', ' 生活费', ' 公积金', ' 退款返款', ' 礼金', ' 红包', ' 赔付款', ' 漏记款', ' 报销款', ' 利息', ' 余额宝', ' 基金', ' 分红', ' 租金', ' 股票', ' 销售款', ' 应收款', ' 营业收入', ' 工程款'];
var incomeCategoryDocArray = [];
angular.forEach(initialIncomeLargeCategoryNameArray, function (value, i) {
var incomeLargeCategoryDoc = getIncomeLargeCategorySchema();
incomeLargeCategoryDoc.id = (200 + i) + '';
incomeLargeCategoryDoc.name = value;
incomeLargeCategoryDoc.order = incomeLargeCategoryDoc.id;
// sort by type, large category order, large category id
incomeLargeCategoryDoc._id = pouchCollate.toIndexableString([incomeLargeCategoryDoc.type, incomeLargeCategoryDoc.id]);
incomeCategoryDocArray.push(incomeLargeCategoryDoc);
});
return local_db.bulkDocs(expenseCategoryDocArray).then(function (results) {
console.log('[Service CategoryService initializeCategoryData] expense category bulkDocs finished');
console.log(results.length);
return local_db.bulkDocs(incomeCategoryDocArray);
}).then(function(results){
console.log('[Service CategoryService initializeCategoryData] income category bulkDocs finished');
console.log(results.length);
}).catch(function () {
console.log(err);
throw err;
});
}
function getExpenseLargeCategorySchema() {
var expenseLargeCategorySchema = {
'type': 'expense_category_large',
'_id': '',
'id': '',
'name': '',
'order': '',
'create_user': '',
'create_date': new Date().toJSON(),
'update_user': '',
'update_date': ''
};
return expenseLargeCategorySchema;
}
function getExpenseSmallCategorySchema() {
var expenseSmallCategorySchema = {
'type': 'expense_category_small',
'_id': '',
'large_category_id': '',
'id': '',
'name': '',
'order': '',
'create_user': '',
'create_date': new Date().toJSON(),
'update_user': '',
'update_date': ''
};
return expenseSmallCategorySchema;
}
function getIncomeLargeCategorySchema() {
var incomeLargeCategorySchema = {
'type': 'income_category_large',
'_id': '',
'id': '',
'name': '',
'order': '',
'create_user': '',
'create_date': new Date().toJSON(),
'update_user': '',
'update_date': ''
};
return incomeLargeCategorySchema;
}
function createCategoryDesignDoc() {
console.log('[Service CategoryService createCategoryDesignDoc] start');
// create a design doc
var designDocCategory = {
_id: '_design/categoryDoc',
views: {
'expense_category_large': {
map: function (doc) {
if (doc.type === 'expense_category_large') {
emit([doc._id], doc);
}
}.toString()
},
'expense_category_small': {
map: function (doc) {
| 'expense_category_small') {
emit([doc._id], doc);
}
}.toString()
},
'expense_category_large_small': {
map: function (doc) {
if (doc.type === 'expense_category_large') {
emit([doc._id, 0, doc.order], doc);
} else if (doc.type === 'expense_category_small') {
emit([doc.large_category_id, 1, doc.order], doc);
}
}.toString()
},
'income_category_large': {
map: function (doc) {
if (doc.type === 'income_category_large') {
emit([doc._id], doc);
}
}.toString()
}
}
};
return local_db.put(designDocCategory).then(function () {
console.log('[Service CategoryService createCategoryDesignDoc]createCategoryDesignDoc finished.');
}).catch(function (err) {
console.log(err);
throw err;
});
}
function deleteCategoryDesignDoc() {
console.log('[Service CategoryService deleteCategoryDesignDoc] start');
return local_db.get('_design/categoryDoc').then(function (doc) {
return local_db.remove(doc);
}).then(function (result) {
console.log('[Service CategoryService deleteCategoryDesignDoc]remove design doc finished.');
console.log(result);
}).catch(function (err) {
console.log(err);
return $q.when('');
});
}
function deleteAllRecords() {
console.log('[Service CategoryService deleteLargeCategoryRecords] start');
return local_db.allDocs().then(function (response) {
return Promise.all(response.rows.map(function (row) {
return local_db.remove(row.id, row.value.rev);
}));
}).then(function (results) {
console.log('[Service CategoryService deleteLargeCategoryRecords]remove all category_large view recoreds finished');
console.log(results);
console.log(results.length);
}).catch(function (err) {
console.log(err);
return $q.when('');
});
}
});
| if (doc.type === | identifier_name |
services.js | var myapp = angular.module('starter.services', []);
myapp.factory('FavoritesService', function () {
// Might use a resource here that returns a JSON array
// Some testing data
var items = [];
for (var i = 0; i < 100; i++) {
var item = {};
item.imgURL = './img/ionic.png';
item.title = 'news' + (i + 1);
item.content = 'news content' + (i + 1);
items.push(item);
}
var pageNumber = 0;
var hasMoreItems = true;
return {
getMoreItems: function () {
console.log('[Service Favorites getMoreItems] Start');
if (pageNumber === 10) {
hasMoreItems = false;
return [];
}
pageNumber = pageNumber + 1;
console.log("[Service Favorites getMoreItems] pageNumber:" + pageNumber);
return items.slice((pageNumber - 1) * 10, pageNumber * 10);
},
hasMoreItems: function () {
return hasMoreItems;
}
};
});
myapp.factory('Chats', function () {
// Might use a resource here that returns a JSON array
// Some fake testing data
var chats = [{
id: 0,
name: 'Ben Sparrow',
lastText: 'You on your way?',
face: 'https://pbs.twimg.com/profile_images/514549811765211136/9SgAuHeY.png'
}, {
id: 1,
name: 'Max Lynx',
lastText: 'Hey, it\'s me',
face: 'https://avatars3.githubusercontent.com/u/11214?v=3&s=460'
}, {
id: 2,
name: 'Adam Bradleyson',
lastText: 'I should buy a boat',
face: 'https://pbs.twimg.com/profile_images/479090794058379264/84TKj_qa.jpeg'
}, {
id: 3,
name: 'Perry Governor',
lastText: 'Look at my mukluks!',
face: 'https://pbs.twimg.com/profile_images/598205061232103424/3j5HUXMY.png'
}, {
id: 4,
name: 'Mike Harrington',
lastText: 'This is wicked good ice cream.',
face: 'https://pbs.twimg.com/profile_images/578237281384841216/R3ae1n61.png'
}];
return {
all: function () {
return chats;
},
remove: function (chat) {
chats.splice(chats.indexOf(chat), 1);
},
get: function (chatId) {
for (var i = 0; i < chats.length; i++) {
if (chats[i].id === parseInt(chatId)) {
return chats[i];
}
}
return null;
}
};
});
myapp.factory('AuthService', function ($q) {
var _db;
// We'll need this later.
var _users;
var _loginFlg = false;
return {
initDB: initDB,
// We'll add these later.
getAllUsers: getAllUsers,
addUser: addUser,
validUser: validUser,
updateUser: updateUser,
deleteUser: deleteUser,
getLoginFlg: getLoginFlg,
setLoginFlg: setLoginFlg
};
function initDB() {
// Creates the database or opens if it already exists
_db = new PouchDB('users', {adapter: 'websql'});
addUser({username: 'admin', password: 'admin'});
}
function addUser(user) {
return $q.when(_db.post(user));
}
function validUser(user) {
_loginFlg = false;
angular.forEach(_users, function (tmpUser) {
if (user.username === tmpUser.username && user.password === tmpUser.password) {
_loginFlg = true;
}
});
return getLoginFlg();
}
function getLoginFlg() {
return _loginFlg;
}
function setLoginFlg(loginFlg) {
_loginFlg = loginFlg;
return _loginFlg
}
function updateUser(user) {
return $q.when(_db.put(user));
}
function deleteUser(user) {
return $q.when(_db.remove(user));
}
function getAllUsers() {
if (!_users) {
return $q.when(_db.allDocs({include_docs: true}))
.then(function (docs) {
// Each row has a .doc object and we just want to send an
// array of user objects back to the calling controller,
// so let's map the array to contain just the .doc objects.
_users = docs.rows.map(function (row) {
// Dates are not automatically converted from a string.
row.doc.Date = new Date(row.doc.Date);
return row.doc;
});
// Listen for changes on the database.
_db.changes({live: true, since: 'now', include_docs: true})
.on('change', onDatabaseChange);
return _users;
});
} else {
// Return cached data as a promise
return $q.when(_users);
}
}
// This function allows you to update the _birthdays array whenever there is a change on your database.
function onDatabaseChange(change) {
var index = findIndex(_users, change.id);
var user = _users[index];
if (change.deleted) {
if (user) {
_users.splice(index, 1); // delete
}
} else {
if (user && user._id === change.id) {
_users[index] = change.doc; // update
} else {
_users.splice(index, 0, change.doc); // insert
}
}
}
// Binary search, the array is by default sorted by _id.
function findIndex(array, id) {
var low = 0, high = array.length, mid;
while (low < high) {
mid = (low + high) >>> 1;
array[mid]._id < id ? low = mid + 1 : high = mid
}
return low;
}
});
myapp.factory('CategoryService', function ($q) {
var local_db_name = 'FamilyMoneyTracker';
var remote_db_name = 'http://localhost:5984/FamilyMoneyTracker';
//PouchDB.debug.enable('*');
var local_db = new PouchDB(local_db_name);
var remote_db = new PouchDB(remote_db_name);
//local_db.sync(remote_db, {live: true});
return {
initialCategory: initialCategory,
getExpenseLargeWithSmallCategoryList: getExpenseLargeWithSmallCategoryList,
getExpenseLargeCategoryList: getExpenseLargeCategoryList,
getIncomeLargeCategoryList: getIncomeLargeCategoryList,
setLocalDefaultValue: setLocalDefaultValue,
getLocalDefaultValueByKey: getLocalDefaultValueByKey
};
function setLocalDefaultValue(key, default_value) {
var local_default_db = new PouchDB('local_default_value_db');
var doc = {};
var getPromise = local_default_db.get(key).then(function (result) {
doc = result;
doc.default_value = default_value;
}).catch(function (err) {
doc._id = key;
doc.default_value = default_value;
$q.when(doc);
});
return getPromise.then(function () {
return local_default_db.put(doc);
}).then(function (result) {
console.log('[Service setLocalDefaultValue]put default db value Success.');
console.log(result);
}).catch(function (err) {
console.log('[Service setLocalDefaultValue]put default db value Error.');
console.log(err);
throw err;
});
}
function getLocalDefaultValueByKey(key) {
var local_default_db = new PouchDB('local_default_value_db');
var defaultValue = '';
var doc = {};
return local_default_db.get(key).then(function (result) {
return $q.when(result.default_value);
}).catch(function (err) {
$q.when('');
});
}
// get expense large and small category list
function getExpenseLargeWithSmallCategoryList() {
console.log('[Service CategoryService getLargeAndSmallCategoryList] start');
return local_db.query('categoryDoc/expense_category_large_small').then(function (response) {
var largeCategoryAndSmallCategoryDocs = response.rows.map(function (response) {
return response.value;
});
var largeAndSmallCategoryList = largeCategoryAndSmallCategoryDocs;
var largeCategoryWithSmallCategoriesList = [];
var largeCategoryWithSmallCategories = {};
angular.forEach(largeAndSmallCategoryList, function (largeOrSmallCategory, i) {
// when large category
if (!largeOrSmallCategory.large_category_id) {
if (i !== 0) {
largeCategoryWithSmallCategoriesList.push(largeCategoryWithSmallCategories);
}
largeCategoryWithSmallCategories = largeOrSmallCategory;
largeCategoryWithSmallCategories.small_categories = [];
} else {
largeCategoryWithSmallCategories.small_categories.push(largeOrSmallCategory);
}
if (i === largeAndSmallCategoryList.length - 1) {
largeCategoryWithSmallCategoriesList.push(largeCategoryWithSmallCategories);
}
});
return $q.when(largeCategoryWithSmallCategoriesList);
}).then(function (results) {
console.log('[Service CategoryService getLargeAndSmallCategoryList]get large and small category list Success.');
return $q.when(results);
}).catch(function (err) {
console.log('[Service CategoryService getLargeAndSmallCategoryList]get large and small category list Error.');
console.log(err);
return $q.when('');
});
}
// get expense large category list
function getExpenseLargeCategoryList() {
console.log('[Service CategoryService getExpenseLargeCategoryList] start');
return local_db.query('categoryDoc/expense_category_large').then(function (response) {
var largeCategory = response.rows.map(function (response) {
return response.value;
});
return $q.when(largeCategory);
}).then(function (results) {
console.log('[Service CategoryService getExpenseLargeCategoryList]get large category list Success.');
return $q.when(results);
}).catch(function (err) {
console.log('[Service CategoryService getExpenseLargeCategoryList]get large category list Error.');
console.log(err);
return $q.when('');
});
}
// get income large category list
function getIncomeLargeCategoryList() {
console.log('[Service CategoryService getIncomeLargeCategoryList] start');
return local_db.query('categoryDoc/income_category_large').then(function (response) {
var largeCategory = response.rows.map(function (response) {
return response.value;
});
return $q.when(largeCategory);
}).then(function (results) {
console.log('[Service CategoryService getIncomeLargeCategoryList]get large category list Success.');
return $q.when(results);
}).catch(function (err) {
console.log('[Service CategoryService getIncomeLargeCategoryList]get large category list Error.');
console.log(err);
return $q.when('');
});
}
function initialCategory() {
// delete large category data
return deleteAllRecords().then(function () {
// delete category design doc
return deleteCategoryDesignDoc();
}).then(function () {
// initialize category data
return initializeCategoryData();
}).then(function () {
// add category design doc
return createCategoryDesignDoc();
}).catch(function (err) {
console.log(err);
throw err;
});
}
/**
* Expense Large Category:
* id: 100, 101, 102
* expense_or_income: Expense
*
* Expense Small Category:
* id:
* large_category_id: 100
*
* Income Large Category:
* id: 200, 201, 202
* expense_or_income: Income
*
* Income Small Category:
* id:
* large_category_id: 100
*
*
* @returns {promise}
*/
function initializeCategoryData() {
console.log('[Service CategoryService initializeCategoryData] start');
var initialExpenseLargeCategoryNameArray = ['餐饮', '零食烟酒', '交通', '汽车', '住房', '购物', '娱乐', '通讯', '孩子', '居家', '医疗', '教育', '投资', '人情'];
var initialExpenseSmallCategoryNameArrayObject = {
'餐饮': ['三餐', '买菜原料', '夜宵', '油盐酱醋'],
'零食烟酒': ['饮料', '水果', '零食', '烟酒'],
'交通': ['打车', '公交', '地铁', '火车', '长途汽车', '飞机', '船舶'],
'汽车': ['加油', '停车费', '过路过桥', '保养维修', '车款车贷', '罚款赔偿', '车险', '驾照费用'],
'住房': ['家具家纺', '物业', '水电燃气', '房租', '房贷', '装修'],
'购物': ['服饰鞋包', '家居百货', '化妆护肤', '电子数码', '报刊书籍', '电器', '珠宝首饰', '保健用品', '摄影文印'],
'娱乐': ['旅游度假', '网游电玩', '电影', '洗浴足浴', '运动健身', '卡拉OK', '茶酒咖啡', '歌舞演出', '电视', '娱乐其他', '花鸟宠物', '麻将棋牌', '聚会玩乐'],
'通讯': ['手机电话', '电脑宽带'],
'孩子': ['学费', '教育', '文具', '玩具', '用品', '家教补习', '学杂教材'],
'居家': ['美发美容', '材料建材', '快递邮政', '家政服务', '生活费', '婚庆摄影', '漏记款', '保险费', '消费贷款', '税费手续费'],
'医疗': ['医疗药品', '挂号门诊', '养生保健', '住院费'],
'教育': ['培训考试'],
'人情': ['礼金红包', '物品', '请客', '代付款', '孝敬', '给予', '慈善捐款'],
'投资': ['股票', '基金', '理财产品', '余额宝', '银行存款', '保险', 'P2P', '证券期货', '出资', '贵金属', '投资贷款', '外汇', '收藏品', '利息支出']
};
var expenseCategoryDocArray = [];
angular.forEach(initialExpenseLargeCategoryNameArray, function (value, i) {
var expenseLargeCategoryDoc = getExpenseLargeCategorySchema();
expenseLargeCategoryDoc.id = (100 + i) + '';
expenseLargeCategoryDoc.name = value;
expenseLargeCategoryDoc.order = expenseLargeCategoryDoc.id;
// sort by type, large category order, large category id
expenseLargeCategoryDoc._id = pouchCollate.toIndexableString([expenseLargeCategoryDoc.type, expenseLargeCategoryDoc.id]);
expenseCategoryDocArray.push(expenseLargeCategoryDoc);
var initialExpenseSmallCategoryNameArray = initialExpenseSmallCategoryNameArrayObject[value];
if (initialExpenseSmallCategoryNameArray) {
angular.forEach(initialExpenseSmallCategoryNameArray, function (initialExpenseSmallCategoryName, j) {
var expenseSmallCategoryDoc = getExpenseSmallCategorySchema();
expenseSmallCategoryDoc.large_category_id = expenseLargeCategoryDoc._id;
expenseSmallCategoryDoc.id = ('00' + j).slice(-2);
expenseSmallCategoryDoc.name = initialExpenseSmallCategoryName;
expenseSmallCategoryDoc.order = expenseSmallCategoryDoc.id;
// sort by type, large category order, large category id
expenseSmallCategoryDoc._id = pouchCollate.toIndexableString([expenseSmallCategoryDoc.type, expenseSmallCategoryDoc.large_category_id, expenseSmallCategoryDoc.id]);
expenseCategoryDocArray.push(expenseSmallCategoryDoc);
});
}
});
var initialIncomeLargeCategoryNameArray = ['工资薪水', ' 奖金', ' 兼职外快', ' 福利补贴', ' 生活费', ' 公积金', ' 退款返款', ' 礼金', ' 红包', ' 赔付款', ' 漏记款', ' 报销款', ' 利息', ' 余额宝', ' 基金', ' 分红', ' 租金', ' 股票', ' 销售款', ' 应收款', ' 营业收入', ' 工程款'];
var incomeCategoryDocArray = [];
angular.forEach(initialIncomeLargeCategoryNameArray, function (value, i) {
var incomeLargeCategoryDoc = getIncomeLargeCategorySchema();
incomeLargeCategoryDoc.id = (200 + i) + '';
incomeLargeCategoryDoc.name = value;
incomeLargeCategoryDoc.order = incomeLargeCategoryDoc.id;
// sort by type, large category order, large category id
incomeLargeCategoryDoc._id = pouchCollate.toIndexableString([incomeLargeCategoryDoc.type, incomeLargeCategoryDoc.id]);
incomeCategoryDocArray.push(incomeLargeCategoryDoc);
});
return local_db.bulkDocs(expenseCategoryDocArray).then(function (results) {
console.log('[Service CategoryService initializeCategoryData] expense category bulkDocs finished');
console.log(results.length);
return local_db.bulkDocs(incomeCategoryDocArray);
}).then(function(results){
console.log('[Service CategoryService initializeCategoryData] income category bulkDocs finished');
console.log(results.length);
}).catch(function () {
console.log(err);
throw err;
});
}
function getExpenseLargeCategorySchema() {
var expenseLargeCategorySchema = {
'type': 'expense_category_large',
'_id': '',
'id': '',
'name': '',
'order': '',
'create_user': '',
'create_date': new Date().toJSON(),
'update_user': '',
'update_date': ''
};
return expenseLargeCategorySchema;
}
function getExpenseSmallCategorySchema() {
var expenseSmallCategorySchema = {
'type': 'expense_category_small',
'_id': '',
'large_category_id': '',
'id': '',
'name': '',
'order': '',
'create_user': '',
'create_date': new Date().toJSON(),
'update_user': '',
'update_date': ''
};
return expenseSmallCategorySchema;
}
function getIncomeLargeCategorySchema() {
var incomeLargeCategorySchema = {
'type': 'income_category_large',
'_id': '',
'id': '',
'name': '',
'order': '',
'create_user': '',
'create_date': new Date().toJSON(),
'update_user': '',
'update_date': ''
};
return incomeLargeCategorySchema;
}
function createCategoryDesignDoc() {
console.log('[Service CategoryService createCategoryDesignDoc] start');
// create a design doc
var designDocCategory = {
_id: '_design/categoryDoc',
views: {
'expense_category_large': {
map: function (doc) {
if (doc.type === 'expense_category_large') {
emit([doc._id], doc);
}
}.toString()
},
'expense_category_small': {
map: function (doc) {
if (doc.type === 'expense_category_small') {
emit([doc._id], doc);
}
}.toString()
},
'expense_category_large_small': {
map: function (doc) {
if (doc.type === 'expense_category_large') {
emit([doc._id, 0, doc.order], doc);
} else if (doc.type === 'expense_category_small') {
emit([doc.large_category_id, 1, doc.order], doc);
}
}.toString()
},
'income_category_large': {
map: function (doc) {
if (doc.type === 'income_category_large') { | }
}.toString()
}
}
};
return local_db.put(designDocCategory).then(function () {
console.log('[Service CategoryService createCategoryDesignDoc]createCategoryDesignDoc finished.');
}).catch(function (err) {
console.log(err);
throw err;
});
}
function deleteCategoryDesignDoc() {
console.log('[Service CategoryService deleteCategoryDesignDoc] start');
return local_db.get('_design/categoryDoc').then(function (doc) {
return local_db.remove(doc);
}).then(function (result) {
console.log('[Service CategoryService deleteCategoryDesignDoc]remove design doc finished.');
console.log(result);
}).catch(function (err) {
console.log(err);
return $q.when('');
});
}
function deleteAllRecords() {
console.log('[Service CategoryService deleteLargeCategoryRecords] start');
return local_db.allDocs().then(function (response) {
return Promise.all(response.rows.map(function (row) {
return local_db.remove(row.id, row.value.rev);
}));
}).then(function (results) {
console.log('[Service CategoryService deleteLargeCategoryRecords]remove all category_large view recoreds finished');
console.log(results);
console.log(results.length);
}).catch(function (err) {
console.log(err);
return $q.when('');
});
}
}); | emit([doc._id], doc); | random_line_split |
services.js | var myapp = angular.module('starter.services', []);
myapp.factory('FavoritesService', function () {
// Might use a resource here that returns a JSON array
// Some testing data
var items = [];
for (var i = 0; i < 100; i++) {
var item = {};
item.imgURL = './img/ionic.png';
item.title = 'news' + (i + 1);
item.content = 'news content' + (i + 1);
items.push(item);
}
var pageNumber = 0;
var hasMoreItems = true;
return {
getMoreItems: function () {
console.log('[Service Favorites getMoreItems] Start');
if (pageNumber === 10) {
hasMoreItems = false;
return [];
}
pageNumber = pageNumber + 1;
console.log("[Service Favorites getMoreItems] pageNumber:" + pageNumber);
return items.slice((pageNumber - 1) * 10, pageNumber * 10);
},
hasMoreItems: function () {
return hasMoreItems;
}
};
});
myapp.factory('Chats', function () {
// Might use a resource here that returns a JSON array
// Some fake testing data
var chats = [{
id: 0,
name: 'Ben Sparrow',
lastText: 'You on your way?',
face: 'https://pbs.twimg.com/profile_images/514549811765211136/9SgAuHeY.png'
}, {
id: 1,
name: 'Max Lynx',
lastText: 'Hey, it\'s me',
face: 'https://avatars3.githubusercontent.com/u/11214?v=3&s=460'
}, {
id: 2,
name: 'Adam Bradleyson',
lastText: 'I should buy a boat',
face: 'https://pbs.twimg.com/profile_images/479090794058379264/84TKj_qa.jpeg'
}, {
id: 3,
name: 'Perry Governor',
lastText: 'Look at my mukluks!',
face: 'https://pbs.twimg.com/profile_images/598205061232103424/3j5HUXMY.png'
}, {
id: 4,
name: 'Mike Harrington',
lastText: 'This is wicked good ice cream.',
face: 'https://pbs.twimg.com/profile_images/578237281384841216/R3ae1n61.png'
}];
return {
all: function () {
return chats;
},
remove: function (chat) {
chats.splice(chats.indexOf(chat), 1);
},
get: function (chatId) {
for (var i = 0; i < chats.length; i++) {
if (chats[i].id === parseInt(chatId)) {
return chats[i];
}
}
return null;
}
};
});
myapp.factory('AuthService', function ($q) {
var _db;
// We'll need this later.
var _users;
var _loginFlg = false;
return {
initDB: initDB,
// We'll add these later.
getAllUsers: getAllUsers,
addUser: addUser,
validUser: validUser,
updateUser: updateUser,
deleteUser: deleteUser,
getLoginFlg: getLoginFlg,
setLoginFlg: setLoginFlg
};
function initDB() {
// Creates the database or opens if it already exists
_db = new PouchDB('users', {adapter: 'websql'});
addUser({username: 'admin', password: 'admin'});
}
function addUser(user) {
return $q.when(_db.post(user));
}
function validUser(user) {
_loginFlg = false;
angular.forEach(_users, function (tmpUser) {
if (user.username === tmpUser.username && user.password === tmpUser.password) {
_loginFlg = true;
}
});
return getLoginFlg();
}
function getLoginFlg() {
return _loginFlg;
}
function setLoginFlg(loginFlg) {
_loginFlg = loginFlg;
return _loginFlg
}
function updateUser(user) {
return $q.when(_db.put(user));
}
function deleteUser(user) {
return $q.when(_db.remove(user));
}
function getAllUsers() {
if (!_users) {
return $q.when(_db.allDocs({include_docs: true}))
.then(function (docs) {
// Each row has a .doc object and we just want to send an
// array of user objects back to the calling controller,
// so let's map the array to contain just the .doc objects.
_users = docs.rows.map(function (row) {
// Dates are not automatically converted from a string.
row.doc.Date = new Date(row.doc.Date);
return row.doc;
});
// Listen for changes on the database.
_db.changes({live: true, since: 'now', include_docs: true})
.on('change', onDatabaseChange);
return _users;
});
} else {
// Return cached data as a promise
return $q.when(_users);
}
}
// This function allows you to update the _birthdays array whenever there is a change on your database.
function onDatabaseChange(change) {
var index = findIndex(_users, change.id);
var user = _users[index];
if (change.deleted) {
if (user) {
_users.splice(index, 1); // delete
}
} else {
if (user && user._id === change.id) {
_users[index] = change.doc; // update
} else {
_users.splice(index, 0, change.doc); // insert
}
}
}
// Binary search, the array is by default sorted by _id.
function findIndex(array, id) {
var low = 0, high = array.length, mid;
while (low < high) {
mid = (low + high) >>> 1;
array[mid]._id < id ? low = mid + 1 : high = mid
}
return low;
}
});
myapp.factory('CategoryService', function ($q) {
var local_db_name = 'FamilyMoneyTracker';
var remote_db_name = 'http://localhost:5984/FamilyMoneyTracker';
//PouchDB.debug.enable('*');
var local_db = new PouchDB(local_db_name);
var remote_db = new PouchDB(remote_db_name);
//local_db.sync(remote_db, {live: true});
return {
initialCategory: initialCategory,
getExpenseLargeWithSmallCategoryList: getExpenseLargeWithSmallCategoryList,
getExpenseLargeCategoryList: getExpenseLargeCategoryList,
getIncomeLargeCategoryList: getIncomeLargeCategoryList,
setLocalDefaultValue: setLocalDefaultValue,
getLocalDefaultValueByKey: getLocalDefaultValueByKey
};
function setLocalDefaultValue(key, default_value) {
var local_default_db = new PouchDB('local_default_value_db');
var doc = {};
var getPromise = local_default_db.get(key).then(function (result) {
doc = result;
doc.default_value = default_value;
}).catch(function (err) {
doc._id = key;
doc.default_value = default_value;
$q.when(doc);
});
return getPromise.then(function () {
return local_default_db.put(doc);
}).then(function (result) {
console.log('[Service setLocalDefaultValue]put default db value Success.');
console.log(result);
}).catch(function (err) {
console.log('[Service setLocalDefaultValue]put default db value Error.');
console.log(err);
throw err;
});
}
function getLocalDefaultValueByKey(key) |
// get expense large and small category list
function getExpenseLargeWithSmallCategoryList() {
console.log('[Service CategoryService getLargeAndSmallCategoryList] start');
return local_db.query('categoryDoc/expense_category_large_small').then(function (response) {
var largeCategoryAndSmallCategoryDocs = response.rows.map(function (response) {
return response.value;
});
var largeAndSmallCategoryList = largeCategoryAndSmallCategoryDocs;
var largeCategoryWithSmallCategoriesList = [];
var largeCategoryWithSmallCategories = {};
angular.forEach(largeAndSmallCategoryList, function (largeOrSmallCategory, i) {
// when large category
if (!largeOrSmallCategory.large_category_id) {
if (i !== 0) {
largeCategoryWithSmallCategoriesList.push(largeCategoryWithSmallCategories);
}
largeCategoryWithSmallCategories = largeOrSmallCategory;
largeCategoryWithSmallCategories.small_categories = [];
} else {
largeCategoryWithSmallCategories.small_categories.push(largeOrSmallCategory);
}
if (i === largeAndSmallCategoryList.length - 1) {
largeCategoryWithSmallCategoriesList.push(largeCategoryWithSmallCategories);
}
});
return $q.when(largeCategoryWithSmallCategoriesList);
}).then(function (results) {
console.log('[Service CategoryService getLargeAndSmallCategoryList]get large and small category list Success.');
return $q.when(results);
}).catch(function (err) {
console.log('[Service CategoryService getLargeAndSmallCategoryList]get large and small category list Error.');
console.log(err);
return $q.when('');
});
}
// get expense large category list
function getExpenseLargeCategoryList() {
console.log('[Service CategoryService getExpenseLargeCategoryList] start');
return local_db.query('categoryDoc/expense_category_large').then(function (response) {
var largeCategory = response.rows.map(function (response) {
return response.value;
});
return $q.when(largeCategory);
}).then(function (results) {
console.log('[Service CategoryService getExpenseLargeCategoryList]get large category list Success.');
return $q.when(results);
}).catch(function (err) {
console.log('[Service CategoryService getExpenseLargeCategoryList]get large category list Error.');
console.log(err);
return $q.when('');
});
}
// get income large category list
function getIncomeLargeCategoryList() {
console.log('[Service CategoryService getIncomeLargeCategoryList] start');
return local_db.query('categoryDoc/income_category_large').then(function (response) {
var largeCategory = response.rows.map(function (response) {
return response.value;
});
return $q.when(largeCategory);
}).then(function (results) {
console.log('[Service CategoryService getIncomeLargeCategoryList]get large category list Success.');
return $q.when(results);
}).catch(function (err) {
console.log('[Service CategoryService getIncomeLargeCategoryList]get large category list Error.');
console.log(err);
return $q.when('');
});
}
function initialCategory() {
// delete large category data
return deleteAllRecords().then(function () {
// delete category design doc
return deleteCategoryDesignDoc();
}).then(function () {
// initialize category data
return initializeCategoryData();
}).then(function () {
// add category design doc
return createCategoryDesignDoc();
}).catch(function (err) {
console.log(err);
throw err;
});
}
/**
* Expense Large Category:
* id: 100, 101, 102
* expense_or_income: Expense
*
* Expense Small Category:
* id:
* large_category_id: 100
*
* Income Large Category:
* id: 200, 201, 202
* expense_or_income: Income
*
* Income Small Category:
* id:
* large_category_id: 100
*
*
* @returns {promise}
*/
function initializeCategoryData() {
console.log('[Service CategoryService initializeCategoryData] start');
var initialExpenseLargeCategoryNameArray = ['餐饮', '零食烟酒', '交通', '汽车', '住房', '购物', '娱乐', '通讯', '孩子', '居家', '医疗', '教育', '投资', '人情'];
var initialExpenseSmallCategoryNameArrayObject = {
'餐饮': ['三餐', '买菜原料', '夜宵', '油盐酱醋'],
'零食烟酒': ['饮料', '水果', '零食', '烟酒'],
'交通': ['打车', '公交', '地铁', '火车', '长途汽车', '飞机', '船舶'],
'汽车': ['加油', '停车费', '过路过桥', '保养维修', '车款车贷', '罚款赔偿', '车险', '驾照费用'],
'住房': ['家具家纺', '物业', '水电燃气', '房租', '房贷', '装修'],
'购物': ['服饰鞋包', '家居百货', '化妆护肤', '电子数码', '报刊书籍', '电器', '珠宝首饰', '保健用品', '摄影文印'],
'娱乐': ['旅游度假', '网游电玩', '电影', '洗浴足浴', '运动健身', '卡拉OK', '茶酒咖啡', '歌舞演出', '电视', '娱乐其他', '花鸟宠物', '麻将棋牌', '聚会玩乐'],
'通讯': ['手机电话', '电脑宽带'],
'孩子': ['学费', '教育', '文具', '玩具', '用品', '家教补习', '学杂教材'],
'居家': ['美发美容', '材料建材', '快递邮政', '家政服务', '生活费', '婚庆摄影', '漏记款', '保险费', '消费贷款', '税费手续费'],
'医疗': ['医疗药品', '挂号门诊', '养生保健', '住院费'],
'教育': ['培训考试'],
'人情': ['礼金红包', '物品', '请客', '代付款', '孝敬', '给予', '慈善捐款'],
'投资': ['股票', '基金', '理财产品', '余额宝', '银行存款', '保险', 'P2P', '证券期货', '出资', '贵金属', '投资贷款', '外汇', '收藏品', '利息支出']
};
var expenseCategoryDocArray = [];
angular.forEach(initialExpenseLargeCategoryNameArray, function (value, i) {
var expenseLargeCategoryDoc = getExpenseLargeCategorySchema();
expenseLargeCategoryDoc.id = (100 + i) + '';
expenseLargeCategoryDoc.name = value;
expenseLargeCategoryDoc.order = expenseLargeCategoryDoc.id;
// sort by type, large category order, large category id
expenseLargeCategoryDoc._id = pouchCollate.toIndexableString([expenseLargeCategoryDoc.type, expenseLargeCategoryDoc.id]);
expenseCategoryDocArray.push(expenseLargeCategoryDoc);
var initialExpenseSmallCategoryNameArray = initialExpenseSmallCategoryNameArrayObject[value];
if (initialExpenseSmallCategoryNameArray) {
angular.forEach(initialExpenseSmallCategoryNameArray, function (initialExpenseSmallCategoryName, j) {
var expenseSmallCategoryDoc = getExpenseSmallCategorySchema();
expenseSmallCategoryDoc.large_category_id = expenseLargeCategoryDoc._id;
expenseSmallCategoryDoc.id = ('00' + j).slice(-2);
expenseSmallCategoryDoc.name = initialExpenseSmallCategoryName;
expenseSmallCategoryDoc.order = expenseSmallCategoryDoc.id;
// sort by type, large category order, large category id
expenseSmallCategoryDoc._id = pouchCollate.toIndexableString([expenseSmallCategoryDoc.type, expenseSmallCategoryDoc.large_category_id, expenseSmallCategoryDoc.id]);
expenseCategoryDocArray.push(expenseSmallCategoryDoc);
});
}
});
var initialIncomeLargeCategoryNameArray = ['工资薪水', ' 奖金', ' 兼职外快', ' 福利补贴', ' 生活费', ' 公积金', ' 退款返款', ' 礼金', ' 红包', ' 赔付款', ' 漏记款', ' 报销款', ' 利息', ' 余额宝', ' 基金', ' 分红', ' 租金', ' 股票', ' 销售款', ' 应收款', ' 营业收入', ' 工程款'];
var incomeCategoryDocArray = [];
angular.forEach(initialIncomeLargeCategoryNameArray, function (value, i) {
var incomeLargeCategoryDoc = getIncomeLargeCategorySchema();
incomeLargeCategoryDoc.id = (200 + i) + '';
incomeLargeCategoryDoc.name = value;
incomeLargeCategoryDoc.order = incomeLargeCategoryDoc.id;
// sort by type, large category order, large category id
incomeLargeCategoryDoc._id = pouchCollate.toIndexableString([incomeLargeCategoryDoc.type, incomeLargeCategoryDoc.id]);
incomeCategoryDocArray.push(incomeLargeCategoryDoc);
});
return local_db.bulkDocs(expenseCategoryDocArray).then(function (results) {
console.log('[Service CategoryService initializeCategoryData] expense category bulkDocs finished');
console.log(results.length);
return local_db.bulkDocs(incomeCategoryDocArray);
}).then(function(results){
console.log('[Service CategoryService initializeCategoryData] income category bulkDocs finished');
console.log(results.length);
}).catch(function () {
console.log(err);
throw err;
});
}
function getExpenseLargeCategorySchema() {
var expenseLargeCategorySchema = {
'type': 'expense_category_large',
'_id': '',
'id': '',
'name': '',
'order': '',
'create_user': '',
'create_date': new Date().toJSON(),
'update_user': '',
'update_date': ''
};
return expenseLargeCategorySchema;
}
function getExpenseSmallCategorySchema() {
var expenseSmallCategorySchema = {
'type': 'expense_category_small',
'_id': '',
'large_category_id': '',
'id': '',
'name': '',
'order': '',
'create_user': '',
'create_date': new Date().toJSON(),
'update_user': '',
'update_date': ''
};
return expenseSmallCategorySchema;
}
function getIncomeLargeCategorySchema() {
var incomeLargeCategorySchema = {
'type': 'income_category_large',
'_id': '',
'id': '',
'name': '',
'order': '',
'create_user': '',
'create_date': new Date().toJSON(),
'update_user': '',
'update_date': ''
};
return incomeLargeCategorySchema;
}
function createCategoryDesignDoc() {
console.log('[Service CategoryService createCategoryDesignDoc] start');
// create a design doc
var designDocCategory = {
_id: '_design/categoryDoc',
views: {
'expense_category_large': {
map: function (doc) {
if (doc.type === 'expense_category_large') {
emit([doc._id], doc);
}
}.toString()
},
'expense_category_small': {
map: function (doc) {
if (doc.type === 'expense_category_small') {
emit([doc._id], doc);
}
}.toString()
},
'expense_category_large_small': {
map: function (doc) {
if (doc.type === 'expense_category_large') {
emit([doc._id, 0, doc.order], doc);
} else if (doc.type === 'expense_category_small') {
emit([doc.large_category_id, 1, doc.order], doc);
}
}.toString()
},
'income_category_large': {
map: function (doc) {
if (doc.type === 'income_category_large') {
emit([doc._id], doc);
}
}.toString()
}
}
};
return local_db.put(designDocCategory).then(function () {
console.log('[Service CategoryService createCategoryDesignDoc]createCategoryDesignDoc finished.');
}).catch(function (err) {
console.log(err);
throw err;
});
}
function deleteCategoryDesignDoc() {
console.log('[Service CategoryService deleteCategoryDesignDoc] start');
return local_db.get('_design/categoryDoc').then(function (doc) {
return local_db.remove(doc);
}).then(function (result) {
console.log('[Service CategoryService deleteCategoryDesignDoc]remove design doc finished.');
console.log(result);
}).catch(function (err) {
console.log(err);
return $q.when('');
});
}
function deleteAllRecords() {
console.log('[Service CategoryService deleteLargeCategoryRecords] start');
return local_db.allDocs().then(function (response) {
return Promise.all(response.rows.map(function (row) {
return local_db.remove(row.id, row.value.rev);
}));
}).then(function (results) {
console.log('[Service CategoryService deleteLargeCategoryRecords]remove all category_large view recoreds finished');
console.log(results);
console.log(results.length);
}).catch(function (err) {
console.log(err);
return $q.when('');
});
}
});
| {
var local_default_db = new PouchDB('local_default_value_db');
var defaultValue = '';
var doc = {};
return local_default_db.get(key).then(function (result) {
return $q.when(result.default_value);
}).catch(function (err) {
$q.when('');
});
} | identifier_body |
packet_handler.rs | // Copyright 2020-2022 IOTA Stiftung
// SPDX-License-Identifier: Apache-2.0
use bee_gossip::Multiaddr;
use futures::{
channel::oneshot,
future::{self, FutureExt},
stream::StreamExt,
};
use log::trace;
use tokio::select;
use tokio_stream::wrappers::UnboundedReceiverStream;
use crate::packets::{HeaderPacket, HEADER_SIZE};
type EventRecv = UnboundedReceiverStream<Vec<u8>>;
type ShutdownRecv = future::Fuse<oneshot::Receiver<()>>;
/// The read state of the packet handler.
///
/// This type is used by `PacketHandler` to decide what should be read next when handling an
/// event.
enum ReadState {
/// `PacketHandler` should read a header.
Header,
/// `PacketHandler` should read a payload based on a header.
Payload(HeaderPacket),
}
/// A packet handler.
///
/// It takes care of processing events into packets that can be processed by the workers.
pub(super) struct PacketHandler {
events: EventHandler,
// FIXME: see if we can implement `Stream` for the `PacketHandler` and use the
// `ShutdownStream` type instead.
shutdown: ShutdownRecv,
state: ReadState,
/// The address of the peer. This field is only here for logging purposes.
address: Multiaddr,
}
impl PacketHandler {
/// Create a new packet handler from an event receiver, a shutdown receiver and the peer's
/// address.
pub(super) fn new(receiver: EventRecv, shutdown: ShutdownRecv, address: Multiaddr) -> Self {
Self {
events: EventHandler::new(receiver),
shutdown,
// The handler should read a header first.
state: ReadState::Header,
address,
}
}
/// Fetch the header and payload of a packet.
///
/// This method only returns `None` if a shutdown signal is received.
pub(super) async fn fetch_packet(&mut self) -> Option<(HeaderPacket, &[u8])> {
// loop until we can return the header and payload
loop {
match &self.state {
// Read a header.
ReadState::Header => {
// We need `HEADER_SIZE` bytes to read a header.
let bytes = self
.events
.fetch_bytes_or_shutdown(&mut self.shutdown, HEADER_SIZE)
.await?;
trace!("[{}] Reading Header...", self.address);
// This never panics because we fetch exactly `HEADER_SIZE` bytes.
let header = HeaderPacket::from_bytes(bytes.try_into().unwrap());
// Now we are ready to read a payload.
self.state = ReadState::Payload(header);
}
// Read a payload.
ReadState::Payload(header) => {
// We read the quantity of bytes stated by the header.
let bytes = self
.events
.fetch_bytes_or_shutdown(&mut self.shutdown, header.packet_length.into())
.await?;
// FIXME: Avoid this clone
let header = header.clone();
// Now we are ready to read the next packet's header.
self.state = ReadState::Header;
// We return the current packet's header and payload.
return Some((header, bytes));
}
}
}
}
}
// An event handler.
//
// This type takes care of actually receiving the events and appending them to an inner buffer so
// they can be used seamlessly by the `PacketHandler`.
struct EventHandler {
receiver: EventRecv,
buffer: Vec<u8>,
offset: usize,
}
impl EventHandler {
/// Create a new event handler from an event receiver.
fn new(receiver: EventRecv) -> Self {
Self {
receiver,
buffer: vec![],
offset: 0,
}
}
/// Push a new event into the buffer.
///
/// This method also removes the `..self.offset` range from the buffer and sets the offset back
/// to zero. Which means that this should only be called when the buffer is empty or when there
/// are not enough bytes to read a new header or payload.
fn push_event(&mut self, mut bytes: Vec<u8>) {
// Remove the already read bytes from the buffer.
self.buffer = self.buffer.split_off(self.offset);
// Reset the offset.
self.offset = 0;
// Append the bytes of the new event
self.buffer.append(&mut bytes);
}
/// Fetch a slice of bytes of a determined length.
///
/// The future returned by this method will be ready until there are enough bytes to fulfill
/// the request.
async fn fetch_bytes(&mut self, len: usize) -> &[u8] {
// We need to be sure that we have enough bytes in the buffer.
while self.offset + len > self.buffer.len() {
// If there are not enough bytes in the buffer, we must receive new events
if let Some(event) = self.receiver.next().await |
}
// Get the requested bytes. This will not panic because the loop above only exists if we
// have enough bytes to do this step.
let bytes = &self.buffer[self.offset..][..len];
// Increase the offset by the length of the byte slice.
self.offset += len;
bytes
}
/// Helper method to be able to shutdown when fetching bytes for a packet.
///
/// This method returns `None` if a shutdown signal is received, otherwise it returns the
/// requested bytes.
async fn fetch_bytes_or_shutdown(&mut self, shutdown: &mut ShutdownRecv, len: usize) -> Option<&'_ [u8]> {
select! {
// Always select `shutdown` first, otherwise you can end with an infinite loop.
_ = shutdown => None,
bytes = self.fetch_bytes(len).fuse() => Some(bytes),
}
}
}
#[cfg(test)]
mod tests {
use std::time::Duration;
use futures::{channel::oneshot, future::FutureExt};
use tokio::{spawn, sync::mpsc, time::sleep};
use tokio_stream::wrappers::UnboundedReceiverStream;
use super::*;
/// Generate a vector of events filled with packets of a desired length.
fn gen_events(event_len: usize, msg_size: usize, n_msg: usize) -> Vec<Vec<u8>> {
// Bytes of all the packets.
let mut msgs = vec![0u8; msg_size * n_msg];
// We need 3 bytes for the header. Thus the packet length stored in the header should be 3
// bytes shorter.
let msg_len = ((msg_size - 3) as u16).to_le_bytes();
// We write the bytes that correspond to the packet length in the header.
for i in (0..n_msg).map(|i| i * msg_size + 1) {
msgs[i] = msg_len[0];
msgs[i + 1] = msg_len[1];
}
// Finally, we split all the bytes into events.
msgs.chunks(event_len).map(Vec::from).collect()
}
/// Test if the `PacketHandler` can produce an exact number of packets of a desired length,
/// divided in events of an specified length. This test checks that:
/// - The header and payload of all the packets have the right content.
/// - The number of produced packets is the desired one.
async fn test(event_size: usize, msg_size: usize, msg_count: usize) {
let msg_len = msg_size - 3;
// Produce the events
let events = gen_events(event_size, msg_size, msg_count);
// Create a new packet handler
let (sender_shutdown, receiver_shutdown) = oneshot::channel::<()>();
let (sender, receiver) = mpsc::unbounded_channel::<Vec<u8>>();
let mut msg_handler = PacketHandler::new(
UnboundedReceiverStream::new(receiver),
receiver_shutdown.fuse(),
"/ip4/0.0.0.0/tcp/8080".parse().unwrap(),
);
// Create the task that does the checks of the test.
let handle = spawn(async move {
// The packets are expected to be filled with zeroes except for the packet length
// field of the header.
let expected_bytes = vec![0u8; msg_len];
let expected_msg = (
HeaderPacket {
packet_type: 0,
packet_length: msg_len as u16,
},
expected_bytes.as_slice(),
);
// Count how many packets can be fetched.
let mut counter = 0;
while let Some(msg) = msg_handler.fetch_packet().await {
// Assert that the packets' content is correct.
assert_eq!(msg, expected_msg);
counter += 1;
}
// Assert that the number of packets is correct.
assert_eq!(msg_count, counter);
// Return back the packet handler to avoid dropping the channels.
msg_handler
});
// Send all the events to the packet handler.
for event in events {
sender.send(event).unwrap();
sleep(Duration::from_millis(1)).await;
}
// Sleep to be sure the handler had time to produce all the packets.
sleep(Duration::from_millis(1)).await;
// Send a shutdown signal.
sender_shutdown.send(()).unwrap();
// Await for the task with the checks to be completed.
assert!(handle.await.is_ok());
}
/// Test that packets are produced correctly when they are divided into one byte events.
#[tokio::test]
async fn one_byte_events() {
test(1, 5, 10).await;
}
/// Test that packets are produced correctly when each mes// let peer_id: PeerId =
/// Url::from_url_str("tcp://[::1]:16000").await.unwrap().into();sage fits exactly into an event.
#[tokio::test]
async fn one_packet_per_event() {
test(5, 5, 10).await;
}
/// Test that packets are produced correctly when two packets fit exactly into an event.
#[tokio::test]
async fn two_packets_per_event() {
test(10, 5, 10).await;
}
/// Test that packets are produced correctly when a packet fits exactly into two events.
#[tokio::test]
async fn two_events_per_packet() {
test(5, 10, 10).await;
}
/// Test that packets are produced correctly when a packet does not fit in a single event and
/// it is not aligned either.
#[tokio::test]
async fn misaligned_packets() {
test(3, 5, 10).await;
}
/// Test that the handler stops producing packets after receiving the shutdown signal.
///
/// This test is basically the same as the `one_packet_per_event` test. But the last event is
/// sent after the shutdown signal. As a consequence, the last packet is not produced by the
/// packet handler.
#[tokio::test]
async fn shutdown() {
let event_size = 5;
let msg_size = event_size;
let msg_count = 10;
let msg_len = msg_size - 3;
let mut events = gen_events(event_size, msg_size, msg_count);
// Put the last event into its own variable.
let last_event = events.pop().unwrap();
let (sender_shutdown, receiver_shutdown) = oneshot::channel::<()>();
let (sender, receiver) = mpsc::unbounded_channel::<Vec<u8>>();
let mut msg_handler = PacketHandler::new(
UnboundedReceiverStream::new(receiver),
receiver_shutdown.fuse(),
"/ip4/0.0.0.0/tcp/8080".parse().unwrap(),
);
let handle = spawn(async move {
let expected_bytes = vec![0u8; msg_len];
let expected_msg = (
HeaderPacket {
packet_type: 0,
packet_length: msg_len as u16,
},
expected_bytes.as_slice(),
);
let mut counter = 0;
while let Some(msg) = msg_handler.fetch_packet().await {
assert_eq!(msg, expected_msg);
counter += 1;
}
// Assert that we are missing one packet.
assert_eq!(msg_count - 1, counter);
msg_handler
});
for event in events {
sender.send(event).unwrap();
sleep(Duration::from_millis(1)).await;
}
sender_shutdown.send(()).unwrap();
sleep(Duration::from_millis(1)).await;
// Send the last event after the shutdown signal
sender.send(last_event).unwrap();
assert!(handle.await.is_ok());
}
}
| {
// If we received an event, we push it to the buffer.
self.push_event(event);
} | conditional_block |
packet_handler.rs | // Copyright 2020-2022 IOTA Stiftung
// SPDX-License-Identifier: Apache-2.0
use bee_gossip::Multiaddr;
use futures::{
channel::oneshot,
future::{self, FutureExt},
stream::StreamExt,
};
use log::trace;
use tokio::select;
use tokio_stream::wrappers::UnboundedReceiverStream;
use crate::packets::{HeaderPacket, HEADER_SIZE};
type EventRecv = UnboundedReceiverStream<Vec<u8>>;
type ShutdownRecv = future::Fuse<oneshot::Receiver<()>>;
/// The read state of the packet handler.
///
/// This type is used by `PacketHandler` to decide what should be read next when handling an
/// event.
enum ReadState {
/// `PacketHandler` should read a header.
Header,
/// `PacketHandler` should read a payload based on a header.
Payload(HeaderPacket),
}
/// A packet handler.
///
/// It takes care of processing events into packets that can be processed by the workers.
pub(super) struct PacketHandler {
events: EventHandler,
// FIXME: see if we can implement `Stream` for the `PacketHandler` and use the
// `ShutdownStream` type instead.
shutdown: ShutdownRecv,
state: ReadState,
/// The address of the peer. This field is only here for logging purposes.
address: Multiaddr,
}
impl PacketHandler {
/// Create a new packet handler from an event receiver, a shutdown receiver and the peer's
/// address.
pub(super) fn | (receiver: EventRecv, shutdown: ShutdownRecv, address: Multiaddr) -> Self {
Self {
events: EventHandler::new(receiver),
shutdown,
// The handler should read a header first.
state: ReadState::Header,
address,
}
}
/// Fetch the header and payload of a packet.
///
/// This method only returns `None` if a shutdown signal is received.
pub(super) async fn fetch_packet(&mut self) -> Option<(HeaderPacket, &[u8])> {
// loop until we can return the header and payload
loop {
match &self.state {
// Read a header.
ReadState::Header => {
// We need `HEADER_SIZE` bytes to read a header.
let bytes = self
.events
.fetch_bytes_or_shutdown(&mut self.shutdown, HEADER_SIZE)
.await?;
trace!("[{}] Reading Header...", self.address);
// This never panics because we fetch exactly `HEADER_SIZE` bytes.
let header = HeaderPacket::from_bytes(bytes.try_into().unwrap());
// Now we are ready to read a payload.
self.state = ReadState::Payload(header);
}
// Read a payload.
ReadState::Payload(header) => {
// We read the quantity of bytes stated by the header.
let bytes = self
.events
.fetch_bytes_or_shutdown(&mut self.shutdown, header.packet_length.into())
.await?;
// FIXME: Avoid this clone
let header = header.clone();
// Now we are ready to read the next packet's header.
self.state = ReadState::Header;
// We return the current packet's header and payload.
return Some((header, bytes));
}
}
}
}
}
// An event handler.
//
// This type takes care of actually receiving the events and appending them to an inner buffer so
// they can be used seamlessly by the `PacketHandler`.
struct EventHandler {
receiver: EventRecv,
buffer: Vec<u8>,
offset: usize,
}
impl EventHandler {
/// Create a new event handler from an event receiver.
fn new(receiver: EventRecv) -> Self {
Self {
receiver,
buffer: vec![],
offset: 0,
}
}
/// Push a new event into the buffer.
///
/// This method also removes the `..self.offset` range from the buffer and sets the offset back
/// to zero. Which means that this should only be called when the buffer is empty or when there
/// are not enough bytes to read a new header or payload.
fn push_event(&mut self, mut bytes: Vec<u8>) {
// Remove the already read bytes from the buffer.
self.buffer = self.buffer.split_off(self.offset);
// Reset the offset.
self.offset = 0;
// Append the bytes of the new event
self.buffer.append(&mut bytes);
}
/// Fetch a slice of bytes of a determined length.
///
/// The future returned by this method will be ready until there are enough bytes to fulfill
/// the request.
async fn fetch_bytes(&mut self, len: usize) -> &[u8] {
// We need to be sure that we have enough bytes in the buffer.
while self.offset + len > self.buffer.len() {
// If there are not enough bytes in the buffer, we must receive new events
if let Some(event) = self.receiver.next().await {
// If we received an event, we push it to the buffer.
self.push_event(event);
}
}
// Get the requested bytes. This will not panic because the loop above only exists if we
// have enough bytes to do this step.
let bytes = &self.buffer[self.offset..][..len];
// Increase the offset by the length of the byte slice.
self.offset += len;
bytes
}
/// Helper method to be able to shutdown when fetching bytes for a packet.
///
/// This method returns `None` if a shutdown signal is received, otherwise it returns the
/// requested bytes.
async fn fetch_bytes_or_shutdown(&mut self, shutdown: &mut ShutdownRecv, len: usize) -> Option<&'_ [u8]> {
select! {
// Always select `shutdown` first, otherwise you can end with an infinite loop.
_ = shutdown => None,
bytes = self.fetch_bytes(len).fuse() => Some(bytes),
}
}
}
#[cfg(test)]
mod tests {
use std::time::Duration;
use futures::{channel::oneshot, future::FutureExt};
use tokio::{spawn, sync::mpsc, time::sleep};
use tokio_stream::wrappers::UnboundedReceiverStream;
use super::*;
/// Generate a vector of events filled with packets of a desired length.
fn gen_events(event_len: usize, msg_size: usize, n_msg: usize) -> Vec<Vec<u8>> {
// Bytes of all the packets.
let mut msgs = vec![0u8; msg_size * n_msg];
// We need 3 bytes for the header. Thus the packet length stored in the header should be 3
// bytes shorter.
let msg_len = ((msg_size - 3) as u16).to_le_bytes();
// We write the bytes that correspond to the packet length in the header.
for i in (0..n_msg).map(|i| i * msg_size + 1) {
msgs[i] = msg_len[0];
msgs[i + 1] = msg_len[1];
}
// Finally, we split all the bytes into events.
msgs.chunks(event_len).map(Vec::from).collect()
}
/// Test if the `PacketHandler` can produce an exact number of packets of a desired length,
/// divided in events of an specified length. This test checks that:
/// - The header and payload of all the packets have the right content.
/// - The number of produced packets is the desired one.
async fn test(event_size: usize, msg_size: usize, msg_count: usize) {
let msg_len = msg_size - 3;
// Produce the events
let events = gen_events(event_size, msg_size, msg_count);
// Create a new packet handler
let (sender_shutdown, receiver_shutdown) = oneshot::channel::<()>();
let (sender, receiver) = mpsc::unbounded_channel::<Vec<u8>>();
let mut msg_handler = PacketHandler::new(
UnboundedReceiverStream::new(receiver),
receiver_shutdown.fuse(),
"/ip4/0.0.0.0/tcp/8080".parse().unwrap(),
);
// Create the task that does the checks of the test.
let handle = spawn(async move {
// The packets are expected to be filled with zeroes except for the packet length
// field of the header.
let expected_bytes = vec![0u8; msg_len];
let expected_msg = (
HeaderPacket {
packet_type: 0,
packet_length: msg_len as u16,
},
expected_bytes.as_slice(),
);
// Count how many packets can be fetched.
let mut counter = 0;
while let Some(msg) = msg_handler.fetch_packet().await {
// Assert that the packets' content is correct.
assert_eq!(msg, expected_msg);
counter += 1;
}
// Assert that the number of packets is correct.
assert_eq!(msg_count, counter);
// Return back the packet handler to avoid dropping the channels.
msg_handler
});
// Send all the events to the packet handler.
for event in events {
sender.send(event).unwrap();
sleep(Duration::from_millis(1)).await;
}
// Sleep to be sure the handler had time to produce all the packets.
sleep(Duration::from_millis(1)).await;
// Send a shutdown signal.
sender_shutdown.send(()).unwrap();
// Await for the task with the checks to be completed.
assert!(handle.await.is_ok());
}
/// Test that packets are produced correctly when they are divided into one byte events.
#[tokio::test]
async fn one_byte_events() {
test(1, 5, 10).await;
}
/// Test that packets are produced correctly when each mes// let peer_id: PeerId =
/// Url::from_url_str("tcp://[::1]:16000").await.unwrap().into();sage fits exactly into an event.
#[tokio::test]
async fn one_packet_per_event() {
test(5, 5, 10).await;
}
/// Test that packets are produced correctly when two packets fit exactly into an event.
#[tokio::test]
async fn two_packets_per_event() {
test(10, 5, 10).await;
}
/// Test that packets are produced correctly when a packet fits exactly into two events.
#[tokio::test]
async fn two_events_per_packet() {
test(5, 10, 10).await;
}
/// Test that packets are produced correctly when a packet does not fit in a single event and
/// it is not aligned either.
#[tokio::test]
async fn misaligned_packets() {
test(3, 5, 10).await;
}
/// Test that the handler stops producing packets after receiving the shutdown signal.
///
/// This test is basically the same as the `one_packet_per_event` test. But the last event is
/// sent after the shutdown signal. As a consequence, the last packet is not produced by the
/// packet handler.
#[tokio::test]
async fn shutdown() {
let event_size = 5;
let msg_size = event_size;
let msg_count = 10;
let msg_len = msg_size - 3;
let mut events = gen_events(event_size, msg_size, msg_count);
// Put the last event into its own variable.
let last_event = events.pop().unwrap();
let (sender_shutdown, receiver_shutdown) = oneshot::channel::<()>();
let (sender, receiver) = mpsc::unbounded_channel::<Vec<u8>>();
let mut msg_handler = PacketHandler::new(
UnboundedReceiverStream::new(receiver),
receiver_shutdown.fuse(),
"/ip4/0.0.0.0/tcp/8080".parse().unwrap(),
);
let handle = spawn(async move {
let expected_bytes = vec![0u8; msg_len];
let expected_msg = (
HeaderPacket {
packet_type: 0,
packet_length: msg_len as u16,
},
expected_bytes.as_slice(),
);
let mut counter = 0;
while let Some(msg) = msg_handler.fetch_packet().await {
assert_eq!(msg, expected_msg);
counter += 1;
}
// Assert that we are missing one packet.
assert_eq!(msg_count - 1, counter);
msg_handler
});
for event in events {
sender.send(event).unwrap();
sleep(Duration::from_millis(1)).await;
}
sender_shutdown.send(()).unwrap();
sleep(Duration::from_millis(1)).await;
// Send the last event after the shutdown signal
sender.send(last_event).unwrap();
assert!(handle.await.is_ok());
}
}
| new | identifier_name |
packet_handler.rs | // Copyright 2020-2022 IOTA Stiftung
// SPDX-License-Identifier: Apache-2.0
use bee_gossip::Multiaddr;
use futures::{
channel::oneshot,
future::{self, FutureExt},
stream::StreamExt,
};
use log::trace;
use tokio::select;
use tokio_stream::wrappers::UnboundedReceiverStream;
use crate::packets::{HeaderPacket, HEADER_SIZE};
type EventRecv = UnboundedReceiverStream<Vec<u8>>;
type ShutdownRecv = future::Fuse<oneshot::Receiver<()>>;
/// The read state of the packet handler.
///
/// This type is used by `PacketHandler` to decide what should be read next when handling an
/// event.
enum ReadState {
/// `PacketHandler` should read a header.
Header,
/// `PacketHandler` should read a payload based on a header.
Payload(HeaderPacket),
}
/// A packet handler.
///
/// It takes care of processing events into packets that can be processed by the workers.
pub(super) struct PacketHandler {
events: EventHandler,
// FIXME: see if we can implement `Stream` for the `PacketHandler` and use the
// `ShutdownStream` type instead.
shutdown: ShutdownRecv,
state: ReadState,
/// The address of the peer. This field is only here for logging purposes.
address: Multiaddr,
}
impl PacketHandler {
/// Create a new packet handler from an event receiver, a shutdown receiver and the peer's
/// address.
pub(super) fn new(receiver: EventRecv, shutdown: ShutdownRecv, address: Multiaddr) -> Self {
Self {
events: EventHandler::new(receiver),
shutdown,
// The handler should read a header first.
state: ReadState::Header,
address,
}
}
/// Fetch the header and payload of a packet.
///
/// This method only returns `None` if a shutdown signal is received.
pub(super) async fn fetch_packet(&mut self) -> Option<(HeaderPacket, &[u8])> {
// loop until we can return the header and payload
loop {
match &self.state {
// Read a header.
ReadState::Header => {
// We need `HEADER_SIZE` bytes to read a header.
let bytes = self
.events
.fetch_bytes_or_shutdown(&mut self.shutdown, HEADER_SIZE)
.await?;
trace!("[{}] Reading Header...", self.address);
// This never panics because we fetch exactly `HEADER_SIZE` bytes.
let header = HeaderPacket::from_bytes(bytes.try_into().unwrap());
// Now we are ready to read a payload.
self.state = ReadState::Payload(header);
}
// Read a payload.
ReadState::Payload(header) => {
// We read the quantity of bytes stated by the header.
let bytes = self
.events
.fetch_bytes_or_shutdown(&mut self.shutdown, header.packet_length.into())
.await?;
// FIXME: Avoid this clone
let header = header.clone();
// Now we are ready to read the next packet's header.
self.state = ReadState::Header;
// We return the current packet's header and payload.
return Some((header, bytes));
}
}
}
}
}
// An event handler.
//
// This type takes care of actually receiving the events and appending them to an inner buffer so
// they can be used seamlessly by the `PacketHandler`.
struct EventHandler {
receiver: EventRecv,
buffer: Vec<u8>,
offset: usize,
}
impl EventHandler {
/// Create a new event handler from an event receiver.
fn new(receiver: EventRecv) -> Self {
Self {
receiver,
buffer: vec![],
offset: 0,
}
}
/// Push a new event into the buffer.
///
/// This method also removes the `..self.offset` range from the buffer and sets the offset back
/// to zero. Which means that this should only be called when the buffer is empty or when there
/// are not enough bytes to read a new header or payload.
fn push_event(&mut self, mut bytes: Vec<u8>) {
// Remove the already read bytes from the buffer.
self.buffer = self.buffer.split_off(self.offset);
// Reset the offset.
self.offset = 0;
// Append the bytes of the new event
self.buffer.append(&mut bytes);
}
/// Fetch a slice of bytes of a determined length.
///
/// The future returned by this method will be ready until there are enough bytes to fulfill
/// the request.
async fn fetch_bytes(&mut self, len: usize) -> &[u8] {
// We need to be sure that we have enough bytes in the buffer.
while self.offset + len > self.buffer.len() {
// If there are not enough bytes in the buffer, we must receive new events
if let Some(event) = self.receiver.next().await {
// If we received an event, we push it to the buffer. | self.push_event(event);
}
}
// Get the requested bytes. This will not panic because the loop above only exists if we
// have enough bytes to do this step.
let bytes = &self.buffer[self.offset..][..len];
// Increase the offset by the length of the byte slice.
self.offset += len;
bytes
}
/// Helper method to be able to shutdown when fetching bytes for a packet.
///
/// This method returns `None` if a shutdown signal is received, otherwise it returns the
/// requested bytes.
async fn fetch_bytes_or_shutdown(&mut self, shutdown: &mut ShutdownRecv, len: usize) -> Option<&'_ [u8]> {
select! {
// Always select `shutdown` first, otherwise you can end with an infinite loop.
_ = shutdown => None,
bytes = self.fetch_bytes(len).fuse() => Some(bytes),
}
}
}
#[cfg(test)]
mod tests {
use std::time::Duration;
use futures::{channel::oneshot, future::FutureExt};
use tokio::{spawn, sync::mpsc, time::sleep};
use tokio_stream::wrappers::UnboundedReceiverStream;
use super::*;
/// Generate a vector of events filled with packets of a desired length.
fn gen_events(event_len: usize, msg_size: usize, n_msg: usize) -> Vec<Vec<u8>> {
// Bytes of all the packets.
let mut msgs = vec![0u8; msg_size * n_msg];
// We need 3 bytes for the header. Thus the packet length stored in the header should be 3
// bytes shorter.
let msg_len = ((msg_size - 3) as u16).to_le_bytes();
// We write the bytes that correspond to the packet length in the header.
for i in (0..n_msg).map(|i| i * msg_size + 1) {
msgs[i] = msg_len[0];
msgs[i + 1] = msg_len[1];
}
// Finally, we split all the bytes into events.
msgs.chunks(event_len).map(Vec::from).collect()
}
/// Test if the `PacketHandler` can produce an exact number of packets of a desired length,
/// divided in events of an specified length. This test checks that:
/// - The header and payload of all the packets have the right content.
/// - The number of produced packets is the desired one.
async fn test(event_size: usize, msg_size: usize, msg_count: usize) {
let msg_len = msg_size - 3;
// Produce the events
let events = gen_events(event_size, msg_size, msg_count);
// Create a new packet handler
let (sender_shutdown, receiver_shutdown) = oneshot::channel::<()>();
let (sender, receiver) = mpsc::unbounded_channel::<Vec<u8>>();
let mut msg_handler = PacketHandler::new(
UnboundedReceiverStream::new(receiver),
receiver_shutdown.fuse(),
"/ip4/0.0.0.0/tcp/8080".parse().unwrap(),
);
// Create the task that does the checks of the test.
let handle = spawn(async move {
// The packets are expected to be filled with zeroes except for the packet length
// field of the header.
let expected_bytes = vec![0u8; msg_len];
let expected_msg = (
HeaderPacket {
packet_type: 0,
packet_length: msg_len as u16,
},
expected_bytes.as_slice(),
);
// Count how many packets can be fetched.
let mut counter = 0;
while let Some(msg) = msg_handler.fetch_packet().await {
// Assert that the packets' content is correct.
assert_eq!(msg, expected_msg);
counter += 1;
}
// Assert that the number of packets is correct.
assert_eq!(msg_count, counter);
// Return back the packet handler to avoid dropping the channels.
msg_handler
});
// Send all the events to the packet handler.
for event in events {
sender.send(event).unwrap();
sleep(Duration::from_millis(1)).await;
}
// Sleep to be sure the handler had time to produce all the packets.
sleep(Duration::from_millis(1)).await;
// Send a shutdown signal.
sender_shutdown.send(()).unwrap();
// Await for the task with the checks to be completed.
assert!(handle.await.is_ok());
}
/// Test that packets are produced correctly when they are divided into one byte events.
#[tokio::test]
async fn one_byte_events() {
test(1, 5, 10).await;
}
/// Test that packets are produced correctly when each mes// let peer_id: PeerId =
/// Url::from_url_str("tcp://[::1]:16000").await.unwrap().into();sage fits exactly into an event.
#[tokio::test]
async fn one_packet_per_event() {
test(5, 5, 10).await;
}
/// Test that packets are produced correctly when two packets fit exactly into an event.
#[tokio::test]
async fn two_packets_per_event() {
test(10, 5, 10).await;
}
/// Test that packets are produced correctly when a packet fits exactly into two events.
#[tokio::test]
async fn two_events_per_packet() {
test(5, 10, 10).await;
}
/// Test that packets are produced correctly when a packet does not fit in a single event and
/// it is not aligned either.
#[tokio::test]
async fn misaligned_packets() {
test(3, 5, 10).await;
}
/// Test that the handler stops producing packets after receiving the shutdown signal.
///
/// This test is basically the same as the `one_packet_per_event` test. But the last event is
/// sent after the shutdown signal. As a consequence, the last packet is not produced by the
/// packet handler.
#[tokio::test]
async fn shutdown() {
let event_size = 5;
let msg_size = event_size;
let msg_count = 10;
let msg_len = msg_size - 3;
let mut events = gen_events(event_size, msg_size, msg_count);
// Put the last event into its own variable.
let last_event = events.pop().unwrap();
let (sender_shutdown, receiver_shutdown) = oneshot::channel::<()>();
let (sender, receiver) = mpsc::unbounded_channel::<Vec<u8>>();
let mut msg_handler = PacketHandler::new(
UnboundedReceiverStream::new(receiver),
receiver_shutdown.fuse(),
"/ip4/0.0.0.0/tcp/8080".parse().unwrap(),
);
let handle = spawn(async move {
let expected_bytes = vec![0u8; msg_len];
let expected_msg = (
HeaderPacket {
packet_type: 0,
packet_length: msg_len as u16,
},
expected_bytes.as_slice(),
);
let mut counter = 0;
while let Some(msg) = msg_handler.fetch_packet().await {
assert_eq!(msg, expected_msg);
counter += 1;
}
// Assert that we are missing one packet.
assert_eq!(msg_count - 1, counter);
msg_handler
});
for event in events {
sender.send(event).unwrap();
sleep(Duration::from_millis(1)).await;
}
sender_shutdown.send(()).unwrap();
sleep(Duration::from_millis(1)).await;
// Send the last event after the shutdown signal
sender.send(last_event).unwrap();
assert!(handle.await.is_ok());
}
} | random_line_split | |
MNIST_minimal.py | """
MNIST cINN: minimal
depends on: {FrEIA}
based on: https://github.com/VLL-HD/conditional_INNs
"""
__all__ = ["CONFIG", "MNISTcINN_minimal", "train", "evaluate"]
# imports
import numpy as np
from collections import OrderedDict
import torch
import torch.optim
import torch.nn as nn
from tqdm import tqdm
from time import time
import FrEIA.framework as Ff
import FrEIA.modules as Fm
from common import one_hot, MNISTData, MNISTDataPreprocessed, baseCONFIG, \
Visualizer, LiveVisualizer, sample_outputs, \
style_transfer, interpolation, val_loss, show_samples
########################################################################
# configuration
class CONFIG(baseCONFIG):
"""
Namspace for configuration
"""
# Data
data_mean = 0.128
data_std = 0.305
add_image_noise = 0.08
maxpool = False
img_size = (28, 28)
device = "cuda"
n_workers = 4
# Training
lr = 5e-4
batch_size = 256
#decay_by = 0.01
weight_decay = 1e-5
gamma = 0.1
milestones = [20, 40]
betas = (0.9, 0.999)
n_epochs = 20
init_scale = 0.03
pre_low_lr = 0
clip_grad_norm = 10.0
# Architecture
n_blocks = 20
internal_width = 64
clamping = 1.0
fc_dropout = 0.0
# Logging/preview
loss_names = ['L']
preview_upscale = 3 # Scale up the images for preview
sampling_temperature = 0.8 # Sample at a reduced temperature for the preview
progress_bar = True # Show a progress bar of each epoch
eval_steps_interploation = 12
eval_seeds_interpolation = (51, 89)
# Validation
pca_weights = [
[(0,0.55)],
[(1,0.1), (3, 0.4), (4, 0.5)],
[(2,0.33), (3, 0.33), (1, -0.33)]]
pca_gridsize = 10
pca_extent = 8.
# Paths
mnist_data = "../../../mnist_data"
save_dir = "../../../out/MNIST_minimal"
load_file = "../../../out/MNIST_minimal/mnist_minimal_checkpoint_" + \
f"width{internal_width}_epochs{n_epochs}.pt"
filename = "../../../out/MNIST_minimal/mnist_minimal_cinn_" + \
f"width{internal_width}_epochs{n_epochs}.pt"
loss_means_filename = save_dir + f"/val_losses_means_{n_epochs}e_{internal_width}w.txt"
loss_filename = save_dir + f"/val_losses_{n_epochs}e_{internal_width}w.txt"
checkpoint_save_interval = 20
checkpoint_save_overwrite = True
checkpoint_on_error = True
########################################################################
# model definition
class MNISTcINN_minimal(nn.Module):
"""
Conditional INN model for MNIST
"""
def __init__(self, config: object=CONFIG):
super().__init__()
self.c = config
self.cinn = self.build_inn()
self.trainable_parameters = [p for p in self.cinn.parameters() if p.requires_grad]
for p in self.trainable_parameters:
p.data = self.c.init_scale * torch.randn_like(p)
self.cinn.to(self.c.device)
#gamma = (self.c.decay_by)**(1. / self.c.n_epochs)
self.optimizer = torch.optim.Adam(self.trainable_parameters,
lr=self.c.lr,
weight_decay=self.c.weight_decay)
self.weight_scheduler = torch.optim.lr_scheduler.MultiStepLR(self.optimizer,
#step_size=1,
milestones=self.c.milestones,
gamma=self.c.gamma)
def build_inn(self):
|
def forward(self, x, l, jac=True):
return self.cinn(x, c=one_hot(l), jac=jac)
def reverse_sample(self, z, l, jac=True):
return self.cinn(z, c=one_hot(l), rev=True, jac=jac)
def save(self, name):
save_dict = {"opt": self.optimizer.state_dict(),
"net": self.cinn.state_dict(),
"lr": self.weight_scheduler.state_dict()}
torch.save(save_dict, name)
def load(self, name):
state_dicts = torch.load(name)
self.cinn.load_state_dict(state_dicts["net"])
try:
self.optimizer.load_state_dict(state_dicts["opt"])
except ValueError:
print("Cannot load optimizer for some reason or other")
try:
self.weight_scheduler.load_state_dict(state_dicts["lr"])
except ValueError:
print("Cannot load optimizer for some reason or other")
########################################################################
# helper
def train(config):
model = MNISTcINN_minimal(config)
if config.maxpool:
data = MNISTDataPreprocessed(config)
else:
data = MNISTData(config)
t_start = time()
nll_mean = []
# memorize evolution of losses
val_losses_means = np.array([])
val_losses = np.array([])
try:
for i_epoch in range(-config.pre_low_lr, config.n_epochs):
if i_epoch < 0:
for param_group in model.optimizer.param_groups:
param_group['lr'] = config.lr * 2e-2
for i_batch, (x, l) in tqdm(enumerate(data.train_loader),
total=len(data.train_loader),
leave=False,
mininterval=1.,
disable=(not config.progress_bar),
ncols=83):
x, l = x.cuda(), l.cuda()
z, log_j = model(x, l)
nll = torch.mean(z**2) / 2 - torch.mean(log_j) / np.prod(config.img_size)
nll.backward()
torch.nn.utils.clip_grad_norm_(model.trainable_parameters,
config.clip_grad_norm)
nll_mean.append(nll.item())
model.optimizer.step()
model.optimizer.zero_grad()
with torch.no_grad():
model.eval()
z, log_j = model(data.val_x, data.val_l)
nll_val = torch.mean(z**2) / 2 - torch.mean(log_j) / np.prod(config.img_size)
model.train()
print('%.3i \t%.5i/%.5i \t%.2f \t%.6f\t%.6f\t%.2e' % (i_epoch,
i_batch, len(data.train_loader),
(time() - t_start)/60.,
np.mean(nll_mean),
nll_val.item(),
model.optimizer.param_groups[0]['lr'],
), flush=True)
val_losses_means = np.append(val_losses_means, np.mean(nll_mean))
val_losses = np.append(val_losses, nll_val.item())
nll_mean = []
model.weight_scheduler.step()
if (i_epoch % config.checkpoint_save_interval) == 0:
model.save(config.filename + '_checkpoint_%.4i' % (i_epoch * (1-config.checkpoint_save_overwrite)))
# save model and losses
model.save(config.filename)
np.savetxt(config.loss_means_filename, val_losses_means)
np.savetxt(config.loss_filename, val_losses)
except BaseException as b:
if config.checkpoint_on_error:
model.save(config.filename + "_ABORT")
raise b
def evaluate(config):
model = MNISTcINN_minimal(config)
model.load(config.filename)
model.eval()
if config.maxpool:
data = MNISTDataPreprocessed(config)
else:
data = MNISTData(config)
#for s in tqdm(range(0, 256)):
# torch.manual_seed(s)
# temperature(0.88, columns=1, save_as='./images/samples/T_%.4i.png' % (s))
# plt.title(str(s))
train_config = f"width{config.internal_width}_epochs{config.n_epochs}"
index_ins = [284, 394, 422, 759, 639, 599, 471, 449, 448, 426]
style_transfer(model, data, index_ins, config, train_config)
interpolation(model, config, train_config)
#for j in range(3):
# plt.figure()
# for i in range(10):
# plt.subplot(10, 1, i+1)
# val_set_pca(I=j, C=i)
# plt.title(str(j))
val_loss(model, data, config)
for i in range(10):
show_samples(model, data, config, i, train_config)
########################################################################
# execution
if __name__ == "__main__":
config = CONFIG()
import argparse
parser = argparse.ArgumentParser(description=config.str())
parser.add_argument("-t", "--train", action="store_true")
parser.add_argument("-e", "--eval", action="store_true")
parser.add_argument("-m", "--maxpool", action="store_true")
parser.add_argument("-d", "--downloadMNIST", action="store_true")
args = parser.parse_args()
if args.downloadMNIST:
data = MNISTData(config)
if args.maxpool:
config.maxpool = True
config.img_size = (14, 14)
config.data_mean = None
config.data_std = None
config.save_dir = "../../../out/MNIST_minimal_maxpool"
config.load_file = config.save_dir + "/mnist_minimal_maxpool_checkpoint_" + \
f"width{config.internal_width}_epochs{config.n_epochs}.pt"
config.filename = config.save_dir + "/mnist_minimal_maxpool_cinn_" + \
f"width{config.internal_width}_epochs{config.n_epochs}.pt"
config.loss_means_filename = config.save_dir + f"/val_losses_means_{config.n_epochs}e_{config.internal_width}w.txt"
config.loss_filename = config.save_dir + f"/val_losses_{config.n_epochs}e_{config.internal_width}w.txt"
if args.train:
# model training
print(config.str())
train(config)
if args.eval:
# model evaluation
print(config.str())
evaluate(config)
print("Done! Exit normaly.")
| def fc_subnet(ch_in, ch_out):
return nn.Sequential(nn.Linear(ch_in, self.c.internal_width),
nn.ReLU(),
nn.Linear(self.c.internal_width, ch_out))
cond = Ff.ConditionNode(10)
nodes = [Ff.InputNode(1, *self.c.img_size)]
nodes.append(Ff.Node(nodes[-1], Fm.Flatten, {}))
for k in range(self.c.n_blocks):
nodes.append(Ff.Node(nodes[-1], Fm.PermuteRandom,
{"seed": k}))
nodes.append(Ff.Node(nodes[-1], Fm.GLOWCouplingBlock,
{"subnet_constructor": fc_subnet,
"clamp": self.c.clamping},
conditions=cond))
nodes += [cond, Ff.OutputNode(nodes[-1])]
return Ff.ReversibleGraphNet(nodes, verbose=False) | identifier_body |
MNIST_minimal.py | """
MNIST cINN: minimal
depends on: {FrEIA}
based on: https://github.com/VLL-HD/conditional_INNs
"""
__all__ = ["CONFIG", "MNISTcINN_minimal", "train", "evaluate"]
# imports
import numpy as np
from collections import OrderedDict
import torch
import torch.optim
import torch.nn as nn
from tqdm import tqdm
from time import time
import FrEIA.framework as Ff
import FrEIA.modules as Fm
from common import one_hot, MNISTData, MNISTDataPreprocessed, baseCONFIG, \
Visualizer, LiveVisualizer, sample_outputs, \
style_transfer, interpolation, val_loss, show_samples
########################################################################
# configuration
class CONFIG(baseCONFIG):
"""
Namspace for configuration
"""
# Data
data_mean = 0.128
data_std = 0.305
add_image_noise = 0.08
maxpool = False
img_size = (28, 28)
device = "cuda"
n_workers = 4
# Training
lr = 5e-4
batch_size = 256
#decay_by = 0.01
weight_decay = 1e-5
gamma = 0.1
milestones = [20, 40]
betas = (0.9, 0.999)
n_epochs = 20
init_scale = 0.03
pre_low_lr = 0
clip_grad_norm = 10.0
# Architecture
n_blocks = 20
internal_width = 64
clamping = 1.0
fc_dropout = 0.0
# Logging/preview
loss_names = ['L']
preview_upscale = 3 # Scale up the images for preview
sampling_temperature = 0.8 # Sample at a reduced temperature for the preview
progress_bar = True # Show a progress bar of each epoch
eval_steps_interploation = 12
eval_seeds_interpolation = (51, 89)
# Validation
pca_weights = [
[(0,0.55)],
[(1,0.1), (3, 0.4), (4, 0.5)],
[(2,0.33), (3, 0.33), (1, -0.33)]]
pca_gridsize = 10
pca_extent = 8.
# Paths
mnist_data = "../../../mnist_data"
save_dir = "../../../out/MNIST_minimal"
load_file = "../../../out/MNIST_minimal/mnist_minimal_checkpoint_" + \
f"width{internal_width}_epochs{n_epochs}.pt"
filename = "../../../out/MNIST_minimal/mnist_minimal_cinn_" + \
f"width{internal_width}_epochs{n_epochs}.pt"
loss_means_filename = save_dir + f"/val_losses_means_{n_epochs}e_{internal_width}w.txt"
loss_filename = save_dir + f"/val_losses_{n_epochs}e_{internal_width}w.txt"
checkpoint_save_interval = 20
checkpoint_save_overwrite = True
checkpoint_on_error = True
########################################################################
# model definition
class MNISTcINN_minimal(nn.Module):
"""
Conditional INN model for MNIST
"""
def __init__(self, config: object=CONFIG):
super().__init__()
self.c = config
self.cinn = self.build_inn()
self.trainable_parameters = [p for p in self.cinn.parameters() if p.requires_grad]
for p in self.trainable_parameters:
p.data = self.c.init_scale * torch.randn_like(p)
self.cinn.to(self.c.device)
#gamma = (self.c.decay_by)**(1. / self.c.n_epochs)
self.optimizer = torch.optim.Adam(self.trainable_parameters,
lr=self.c.lr,
weight_decay=self.c.weight_decay)
self.weight_scheduler = torch.optim.lr_scheduler.MultiStepLR(self.optimizer,
#step_size=1,
milestones=self.c.milestones,
gamma=self.c.gamma)
def build_inn(self):
def fc_subnet(ch_in, ch_out):
return nn.Sequential(nn.Linear(ch_in, self.c.internal_width),
nn.ReLU(),
nn.Linear(self.c.internal_width, ch_out))
cond = Ff.ConditionNode(10)
nodes = [Ff.InputNode(1, *self.c.img_size)]
nodes.append(Ff.Node(nodes[-1], Fm.Flatten, {}))
for k in range(self.c.n_blocks):
nodes.append(Ff.Node(nodes[-1], Fm.PermuteRandom,
{"seed": k}))
nodes.append(Ff.Node(nodes[-1], Fm.GLOWCouplingBlock,
{"subnet_constructor": fc_subnet,
"clamp": self.c.clamping},
conditions=cond))
nodes += [cond, Ff.OutputNode(nodes[-1])]
return Ff.ReversibleGraphNet(nodes, verbose=False)
def forward(self, x, l, jac=True):
return self.cinn(x, c=one_hot(l), jac=jac)
def reverse_sample(self, z, l, jac=True):
return self.cinn(z, c=one_hot(l), rev=True, jac=jac)
def save(self, name):
save_dict = {"opt": self.optimizer.state_dict(),
"net": self.cinn.state_dict(),
"lr": self.weight_scheduler.state_dict()}
torch.save(save_dict, name)
def load(self, name):
state_dicts = torch.load(name)
self.cinn.load_state_dict(state_dicts["net"])
try:
self.optimizer.load_state_dict(state_dicts["opt"])
except ValueError:
print("Cannot load optimizer for some reason or other")
try:
self.weight_scheduler.load_state_dict(state_dicts["lr"])
except ValueError:
print("Cannot load optimizer for some reason or other")
########################################################################
# helper
def train(config):
model = MNISTcINN_minimal(config)
if config.maxpool:
data = MNISTDataPreprocessed(config)
else:
data = MNISTData(config)
t_start = time()
nll_mean = []
# memorize evolution of losses
val_losses_means = np.array([])
val_losses = np.array([])
try:
for i_epoch in range(-config.pre_low_lr, config.n_epochs):
if i_epoch < 0:
for param_group in model.optimizer.param_groups:
param_group['lr'] = config.lr * 2e-2
for i_batch, (x, l) in tqdm(enumerate(data.train_loader),
total=len(data.train_loader),
leave=False,
mininterval=1.,
disable=(not config.progress_bar),
ncols=83):
x, l = x.cuda(), l.cuda()
z, log_j = model(x, l) | nll = torch.mean(z**2) / 2 - torch.mean(log_j) / np.prod(config.img_size)
nll.backward()
torch.nn.utils.clip_grad_norm_(model.trainable_parameters,
config.clip_grad_norm)
nll_mean.append(nll.item())
model.optimizer.step()
model.optimizer.zero_grad()
with torch.no_grad():
model.eval()
z, log_j = model(data.val_x, data.val_l)
nll_val = torch.mean(z**2) / 2 - torch.mean(log_j) / np.prod(config.img_size)
model.train()
print('%.3i \t%.5i/%.5i \t%.2f \t%.6f\t%.6f\t%.2e' % (i_epoch,
i_batch, len(data.train_loader),
(time() - t_start)/60.,
np.mean(nll_mean),
nll_val.item(),
model.optimizer.param_groups[0]['lr'],
), flush=True)
val_losses_means = np.append(val_losses_means, np.mean(nll_mean))
val_losses = np.append(val_losses, nll_val.item())
nll_mean = []
model.weight_scheduler.step()
if (i_epoch % config.checkpoint_save_interval) == 0:
model.save(config.filename + '_checkpoint_%.4i' % (i_epoch * (1-config.checkpoint_save_overwrite)))
# save model and losses
model.save(config.filename)
np.savetxt(config.loss_means_filename, val_losses_means)
np.savetxt(config.loss_filename, val_losses)
except BaseException as b:
if config.checkpoint_on_error:
model.save(config.filename + "_ABORT")
raise b
def evaluate(config):
model = MNISTcINN_minimal(config)
model.load(config.filename)
model.eval()
if config.maxpool:
data = MNISTDataPreprocessed(config)
else:
data = MNISTData(config)
#for s in tqdm(range(0, 256)):
# torch.manual_seed(s)
# temperature(0.88, columns=1, save_as='./images/samples/T_%.4i.png' % (s))
# plt.title(str(s))
train_config = f"width{config.internal_width}_epochs{config.n_epochs}"
index_ins = [284, 394, 422, 759, 639, 599, 471, 449, 448, 426]
style_transfer(model, data, index_ins, config, train_config)
interpolation(model, config, train_config)
#for j in range(3):
# plt.figure()
# for i in range(10):
# plt.subplot(10, 1, i+1)
# val_set_pca(I=j, C=i)
# plt.title(str(j))
val_loss(model, data, config)
for i in range(10):
show_samples(model, data, config, i, train_config)
########################################################################
# execution
if __name__ == "__main__":
config = CONFIG()
import argparse
parser = argparse.ArgumentParser(description=config.str())
parser.add_argument("-t", "--train", action="store_true")
parser.add_argument("-e", "--eval", action="store_true")
parser.add_argument("-m", "--maxpool", action="store_true")
parser.add_argument("-d", "--downloadMNIST", action="store_true")
args = parser.parse_args()
if args.downloadMNIST:
data = MNISTData(config)
if args.maxpool:
config.maxpool = True
config.img_size = (14, 14)
config.data_mean = None
config.data_std = None
config.save_dir = "../../../out/MNIST_minimal_maxpool"
config.load_file = config.save_dir + "/mnist_minimal_maxpool_checkpoint_" + \
f"width{config.internal_width}_epochs{config.n_epochs}.pt"
config.filename = config.save_dir + "/mnist_minimal_maxpool_cinn_" + \
f"width{config.internal_width}_epochs{config.n_epochs}.pt"
config.loss_means_filename = config.save_dir + f"/val_losses_means_{config.n_epochs}e_{config.internal_width}w.txt"
config.loss_filename = config.save_dir + f"/val_losses_{config.n_epochs}e_{config.internal_width}w.txt"
if args.train:
# model training
print(config.str())
train(config)
if args.eval:
# model evaluation
print(config.str())
evaluate(config)
print("Done! Exit normaly.") | random_line_split | |
MNIST_minimal.py | """
MNIST cINN: minimal
depends on: {FrEIA}
based on: https://github.com/VLL-HD/conditional_INNs
"""
__all__ = ["CONFIG", "MNISTcINN_minimal", "train", "evaluate"]
# imports
import numpy as np
from collections import OrderedDict
import torch
import torch.optim
import torch.nn as nn
from tqdm import tqdm
from time import time
import FrEIA.framework as Ff
import FrEIA.modules as Fm
from common import one_hot, MNISTData, MNISTDataPreprocessed, baseCONFIG, \
Visualizer, LiveVisualizer, sample_outputs, \
style_transfer, interpolation, val_loss, show_samples
########################################################################
# configuration
class CONFIG(baseCONFIG):
"""
Namspace for configuration
"""
# Data
data_mean = 0.128
data_std = 0.305
add_image_noise = 0.08
maxpool = False
img_size = (28, 28)
device = "cuda"
n_workers = 4
# Training
lr = 5e-4
batch_size = 256
#decay_by = 0.01
weight_decay = 1e-5
gamma = 0.1
milestones = [20, 40]
betas = (0.9, 0.999)
n_epochs = 20
init_scale = 0.03
pre_low_lr = 0
clip_grad_norm = 10.0
# Architecture
n_blocks = 20
internal_width = 64
clamping = 1.0
fc_dropout = 0.0
# Logging/preview
loss_names = ['L']
preview_upscale = 3 # Scale up the images for preview
sampling_temperature = 0.8 # Sample at a reduced temperature for the preview
progress_bar = True # Show a progress bar of each epoch
eval_steps_interploation = 12
eval_seeds_interpolation = (51, 89)
# Validation
pca_weights = [
[(0,0.55)],
[(1,0.1), (3, 0.4), (4, 0.5)],
[(2,0.33), (3, 0.33), (1, -0.33)]]
pca_gridsize = 10
pca_extent = 8.
# Paths
mnist_data = "../../../mnist_data"
save_dir = "../../../out/MNIST_minimal"
load_file = "../../../out/MNIST_minimal/mnist_minimal_checkpoint_" + \
f"width{internal_width}_epochs{n_epochs}.pt"
filename = "../../../out/MNIST_minimal/mnist_minimal_cinn_" + \
f"width{internal_width}_epochs{n_epochs}.pt"
loss_means_filename = save_dir + f"/val_losses_means_{n_epochs}e_{internal_width}w.txt"
loss_filename = save_dir + f"/val_losses_{n_epochs}e_{internal_width}w.txt"
checkpoint_save_interval = 20
checkpoint_save_overwrite = True
checkpoint_on_error = True
########################################################################
# model definition
class MNISTcINN_minimal(nn.Module):
"""
Conditional INN model for MNIST
"""
def __init__(self, config: object=CONFIG):
super().__init__()
self.c = config
self.cinn = self.build_inn()
self.trainable_parameters = [p for p in self.cinn.parameters() if p.requires_grad]
for p in self.trainable_parameters:
p.data = self.c.init_scale * torch.randn_like(p)
self.cinn.to(self.c.device)
#gamma = (self.c.decay_by)**(1. / self.c.n_epochs)
self.optimizer = torch.optim.Adam(self.trainable_parameters,
lr=self.c.lr,
weight_decay=self.c.weight_decay)
self.weight_scheduler = torch.optim.lr_scheduler.MultiStepLR(self.optimizer,
#step_size=1,
milestones=self.c.milestones,
gamma=self.c.gamma)
def build_inn(self):
def fc_subnet(ch_in, ch_out):
return nn.Sequential(nn.Linear(ch_in, self.c.internal_width),
nn.ReLU(),
nn.Linear(self.c.internal_width, ch_out))
cond = Ff.ConditionNode(10)
nodes = [Ff.InputNode(1, *self.c.img_size)]
nodes.append(Ff.Node(nodes[-1], Fm.Flatten, {}))
for k in range(self.c.n_blocks):
nodes.append(Ff.Node(nodes[-1], Fm.PermuteRandom,
{"seed": k}))
nodes.append(Ff.Node(nodes[-1], Fm.GLOWCouplingBlock,
{"subnet_constructor": fc_subnet,
"clamp": self.c.clamping},
conditions=cond))
nodes += [cond, Ff.OutputNode(nodes[-1])]
return Ff.ReversibleGraphNet(nodes, verbose=False)
def forward(self, x, l, jac=True):
return self.cinn(x, c=one_hot(l), jac=jac)
def reverse_sample(self, z, l, jac=True):
return self.cinn(z, c=one_hot(l), rev=True, jac=jac)
def save(self, name):
save_dict = {"opt": self.optimizer.state_dict(),
"net": self.cinn.state_dict(),
"lr": self.weight_scheduler.state_dict()}
torch.save(save_dict, name)
def load(self, name):
state_dicts = torch.load(name)
self.cinn.load_state_dict(state_dicts["net"])
try:
self.optimizer.load_state_dict(state_dicts["opt"])
except ValueError:
print("Cannot load optimizer for some reason or other")
try:
self.weight_scheduler.load_state_dict(state_dicts["lr"])
except ValueError:
print("Cannot load optimizer for some reason or other")
########################################################################
# helper
def | (config):
model = MNISTcINN_minimal(config)
if config.maxpool:
data = MNISTDataPreprocessed(config)
else:
data = MNISTData(config)
t_start = time()
nll_mean = []
# memorize evolution of losses
val_losses_means = np.array([])
val_losses = np.array([])
try:
for i_epoch in range(-config.pre_low_lr, config.n_epochs):
if i_epoch < 0:
for param_group in model.optimizer.param_groups:
param_group['lr'] = config.lr * 2e-2
for i_batch, (x, l) in tqdm(enumerate(data.train_loader),
total=len(data.train_loader),
leave=False,
mininterval=1.,
disable=(not config.progress_bar),
ncols=83):
x, l = x.cuda(), l.cuda()
z, log_j = model(x, l)
nll = torch.mean(z**2) / 2 - torch.mean(log_j) / np.prod(config.img_size)
nll.backward()
torch.nn.utils.clip_grad_norm_(model.trainable_parameters,
config.clip_grad_norm)
nll_mean.append(nll.item())
model.optimizer.step()
model.optimizer.zero_grad()
with torch.no_grad():
model.eval()
z, log_j = model(data.val_x, data.val_l)
nll_val = torch.mean(z**2) / 2 - torch.mean(log_j) / np.prod(config.img_size)
model.train()
print('%.3i \t%.5i/%.5i \t%.2f \t%.6f\t%.6f\t%.2e' % (i_epoch,
i_batch, len(data.train_loader),
(time() - t_start)/60.,
np.mean(nll_mean),
nll_val.item(),
model.optimizer.param_groups[0]['lr'],
), flush=True)
val_losses_means = np.append(val_losses_means, np.mean(nll_mean))
val_losses = np.append(val_losses, nll_val.item())
nll_mean = []
model.weight_scheduler.step()
if (i_epoch % config.checkpoint_save_interval) == 0:
model.save(config.filename + '_checkpoint_%.4i' % (i_epoch * (1-config.checkpoint_save_overwrite)))
# save model and losses
model.save(config.filename)
np.savetxt(config.loss_means_filename, val_losses_means)
np.savetxt(config.loss_filename, val_losses)
except BaseException as b:
if config.checkpoint_on_error:
model.save(config.filename + "_ABORT")
raise b
def evaluate(config):
model = MNISTcINN_minimal(config)
model.load(config.filename)
model.eval()
if config.maxpool:
data = MNISTDataPreprocessed(config)
else:
data = MNISTData(config)
#for s in tqdm(range(0, 256)):
# torch.manual_seed(s)
# temperature(0.88, columns=1, save_as='./images/samples/T_%.4i.png' % (s))
# plt.title(str(s))
train_config = f"width{config.internal_width}_epochs{config.n_epochs}"
index_ins = [284, 394, 422, 759, 639, 599, 471, 449, 448, 426]
style_transfer(model, data, index_ins, config, train_config)
interpolation(model, config, train_config)
#for j in range(3):
# plt.figure()
# for i in range(10):
# plt.subplot(10, 1, i+1)
# val_set_pca(I=j, C=i)
# plt.title(str(j))
val_loss(model, data, config)
for i in range(10):
show_samples(model, data, config, i, train_config)
########################################################################
# execution
if __name__ == "__main__":
config = CONFIG()
import argparse
parser = argparse.ArgumentParser(description=config.str())
parser.add_argument("-t", "--train", action="store_true")
parser.add_argument("-e", "--eval", action="store_true")
parser.add_argument("-m", "--maxpool", action="store_true")
parser.add_argument("-d", "--downloadMNIST", action="store_true")
args = parser.parse_args()
if args.downloadMNIST:
data = MNISTData(config)
if args.maxpool:
config.maxpool = True
config.img_size = (14, 14)
config.data_mean = None
config.data_std = None
config.save_dir = "../../../out/MNIST_minimal_maxpool"
config.load_file = config.save_dir + "/mnist_minimal_maxpool_checkpoint_" + \
f"width{config.internal_width}_epochs{config.n_epochs}.pt"
config.filename = config.save_dir + "/mnist_minimal_maxpool_cinn_" + \
f"width{config.internal_width}_epochs{config.n_epochs}.pt"
config.loss_means_filename = config.save_dir + f"/val_losses_means_{config.n_epochs}e_{config.internal_width}w.txt"
config.loss_filename = config.save_dir + f"/val_losses_{config.n_epochs}e_{config.internal_width}w.txt"
if args.train:
# model training
print(config.str())
train(config)
if args.eval:
# model evaluation
print(config.str())
evaluate(config)
print("Done! Exit normaly.")
| train | identifier_name |
MNIST_minimal.py | """
MNIST cINN: minimal
depends on: {FrEIA}
based on: https://github.com/VLL-HD/conditional_INNs
"""
__all__ = ["CONFIG", "MNISTcINN_minimal", "train", "evaluate"]
# imports
import numpy as np
from collections import OrderedDict
import torch
import torch.optim
import torch.nn as nn
from tqdm import tqdm
from time import time
import FrEIA.framework as Ff
import FrEIA.modules as Fm
from common import one_hot, MNISTData, MNISTDataPreprocessed, baseCONFIG, \
Visualizer, LiveVisualizer, sample_outputs, \
style_transfer, interpolation, val_loss, show_samples
########################################################################
# configuration
class CONFIG(baseCONFIG):
"""
Namspace for configuration
"""
# Data
data_mean = 0.128
data_std = 0.305
add_image_noise = 0.08
maxpool = False
img_size = (28, 28)
device = "cuda"
n_workers = 4
# Training
lr = 5e-4
batch_size = 256
#decay_by = 0.01
weight_decay = 1e-5
gamma = 0.1
milestones = [20, 40]
betas = (0.9, 0.999)
n_epochs = 20
init_scale = 0.03
pre_low_lr = 0
clip_grad_norm = 10.0
# Architecture
n_blocks = 20
internal_width = 64
clamping = 1.0
fc_dropout = 0.0
# Logging/preview
loss_names = ['L']
preview_upscale = 3 # Scale up the images for preview
sampling_temperature = 0.8 # Sample at a reduced temperature for the preview
progress_bar = True # Show a progress bar of each epoch
eval_steps_interploation = 12
eval_seeds_interpolation = (51, 89)
# Validation
pca_weights = [
[(0,0.55)],
[(1,0.1), (3, 0.4), (4, 0.5)],
[(2,0.33), (3, 0.33), (1, -0.33)]]
pca_gridsize = 10
pca_extent = 8.
# Paths
mnist_data = "../../../mnist_data"
save_dir = "../../../out/MNIST_minimal"
load_file = "../../../out/MNIST_minimal/mnist_minimal_checkpoint_" + \
f"width{internal_width}_epochs{n_epochs}.pt"
filename = "../../../out/MNIST_minimal/mnist_minimal_cinn_" + \
f"width{internal_width}_epochs{n_epochs}.pt"
loss_means_filename = save_dir + f"/val_losses_means_{n_epochs}e_{internal_width}w.txt"
loss_filename = save_dir + f"/val_losses_{n_epochs}e_{internal_width}w.txt"
checkpoint_save_interval = 20
checkpoint_save_overwrite = True
checkpoint_on_error = True
########################################################################
# model definition
class MNISTcINN_minimal(nn.Module):
"""
Conditional INN model for MNIST
"""
def __init__(self, config: object=CONFIG):
super().__init__()
self.c = config
self.cinn = self.build_inn()
self.trainable_parameters = [p for p in self.cinn.parameters() if p.requires_grad]
for p in self.trainable_parameters:
p.data = self.c.init_scale * torch.randn_like(p)
self.cinn.to(self.c.device)
#gamma = (self.c.decay_by)**(1. / self.c.n_epochs)
self.optimizer = torch.optim.Adam(self.trainable_parameters,
lr=self.c.lr,
weight_decay=self.c.weight_decay)
self.weight_scheduler = torch.optim.lr_scheduler.MultiStepLR(self.optimizer,
#step_size=1,
milestones=self.c.milestones,
gamma=self.c.gamma)
def build_inn(self):
def fc_subnet(ch_in, ch_out):
return nn.Sequential(nn.Linear(ch_in, self.c.internal_width),
nn.ReLU(),
nn.Linear(self.c.internal_width, ch_out))
cond = Ff.ConditionNode(10)
nodes = [Ff.InputNode(1, *self.c.img_size)]
nodes.append(Ff.Node(nodes[-1], Fm.Flatten, {}))
for k in range(self.c.n_blocks):
|
nodes += [cond, Ff.OutputNode(nodes[-1])]
return Ff.ReversibleGraphNet(nodes, verbose=False)
def forward(self, x, l, jac=True):
return self.cinn(x, c=one_hot(l), jac=jac)
def reverse_sample(self, z, l, jac=True):
return self.cinn(z, c=one_hot(l), rev=True, jac=jac)
def save(self, name):
save_dict = {"opt": self.optimizer.state_dict(),
"net": self.cinn.state_dict(),
"lr": self.weight_scheduler.state_dict()}
torch.save(save_dict, name)
def load(self, name):
state_dicts = torch.load(name)
self.cinn.load_state_dict(state_dicts["net"])
try:
self.optimizer.load_state_dict(state_dicts["opt"])
except ValueError:
print("Cannot load optimizer for some reason or other")
try:
self.weight_scheduler.load_state_dict(state_dicts["lr"])
except ValueError:
print("Cannot load optimizer for some reason or other")
########################################################################
# helper
def train(config):
model = MNISTcINN_minimal(config)
if config.maxpool:
data = MNISTDataPreprocessed(config)
else:
data = MNISTData(config)
t_start = time()
nll_mean = []
# memorize evolution of losses
val_losses_means = np.array([])
val_losses = np.array([])
try:
for i_epoch in range(-config.pre_low_lr, config.n_epochs):
if i_epoch < 0:
for param_group in model.optimizer.param_groups:
param_group['lr'] = config.lr * 2e-2
for i_batch, (x, l) in tqdm(enumerate(data.train_loader),
total=len(data.train_loader),
leave=False,
mininterval=1.,
disable=(not config.progress_bar),
ncols=83):
x, l = x.cuda(), l.cuda()
z, log_j = model(x, l)
nll = torch.mean(z**2) / 2 - torch.mean(log_j) / np.prod(config.img_size)
nll.backward()
torch.nn.utils.clip_grad_norm_(model.trainable_parameters,
config.clip_grad_norm)
nll_mean.append(nll.item())
model.optimizer.step()
model.optimizer.zero_grad()
with torch.no_grad():
model.eval()
z, log_j = model(data.val_x, data.val_l)
nll_val = torch.mean(z**2) / 2 - torch.mean(log_j) / np.prod(config.img_size)
model.train()
print('%.3i \t%.5i/%.5i \t%.2f \t%.6f\t%.6f\t%.2e' % (i_epoch,
i_batch, len(data.train_loader),
(time() - t_start)/60.,
np.mean(nll_mean),
nll_val.item(),
model.optimizer.param_groups[0]['lr'],
), flush=True)
val_losses_means = np.append(val_losses_means, np.mean(nll_mean))
val_losses = np.append(val_losses, nll_val.item())
nll_mean = []
model.weight_scheduler.step()
if (i_epoch % config.checkpoint_save_interval) == 0:
model.save(config.filename + '_checkpoint_%.4i' % (i_epoch * (1-config.checkpoint_save_overwrite)))
# save model and losses
model.save(config.filename)
np.savetxt(config.loss_means_filename, val_losses_means)
np.savetxt(config.loss_filename, val_losses)
except BaseException as b:
if config.checkpoint_on_error:
model.save(config.filename + "_ABORT")
raise b
def evaluate(config):
model = MNISTcINN_minimal(config)
model.load(config.filename)
model.eval()
if config.maxpool:
data = MNISTDataPreprocessed(config)
else:
data = MNISTData(config)
#for s in tqdm(range(0, 256)):
# torch.manual_seed(s)
# temperature(0.88, columns=1, save_as='./images/samples/T_%.4i.png' % (s))
# plt.title(str(s))
train_config = f"width{config.internal_width}_epochs{config.n_epochs}"
index_ins = [284, 394, 422, 759, 639, 599, 471, 449, 448, 426]
style_transfer(model, data, index_ins, config, train_config)
interpolation(model, config, train_config)
#for j in range(3):
# plt.figure()
# for i in range(10):
# plt.subplot(10, 1, i+1)
# val_set_pca(I=j, C=i)
# plt.title(str(j))
val_loss(model, data, config)
for i in range(10):
show_samples(model, data, config, i, train_config)
########################################################################
# execution
if __name__ == "__main__":
config = CONFIG()
import argparse
parser = argparse.ArgumentParser(description=config.str())
parser.add_argument("-t", "--train", action="store_true")
parser.add_argument("-e", "--eval", action="store_true")
parser.add_argument("-m", "--maxpool", action="store_true")
parser.add_argument("-d", "--downloadMNIST", action="store_true")
args = parser.parse_args()
if args.downloadMNIST:
data = MNISTData(config)
if args.maxpool:
config.maxpool = True
config.img_size = (14, 14)
config.data_mean = None
config.data_std = None
config.save_dir = "../../../out/MNIST_minimal_maxpool"
config.load_file = config.save_dir + "/mnist_minimal_maxpool_checkpoint_" + \
f"width{config.internal_width}_epochs{config.n_epochs}.pt"
config.filename = config.save_dir + "/mnist_minimal_maxpool_cinn_" + \
f"width{config.internal_width}_epochs{config.n_epochs}.pt"
config.loss_means_filename = config.save_dir + f"/val_losses_means_{config.n_epochs}e_{config.internal_width}w.txt"
config.loss_filename = config.save_dir + f"/val_losses_{config.n_epochs}e_{config.internal_width}w.txt"
if args.train:
# model training
print(config.str())
train(config)
if args.eval:
# model evaluation
print(config.str())
evaluate(config)
print("Done! Exit normaly.")
| nodes.append(Ff.Node(nodes[-1], Fm.PermuteRandom,
{"seed": k}))
nodes.append(Ff.Node(nodes[-1], Fm.GLOWCouplingBlock,
{"subnet_constructor": fc_subnet,
"clamp": self.c.clamping},
conditions=cond)) | conditional_block |
session.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// source: github.com/bio-routing/bio-rd/protocols/bgp/api/session.proto
package api
import (
fmt "fmt"
api "github.com/bio-routing/bio-rd/net/api"
proto "github.com/golang/protobuf/proto"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type Session_State int32
const (
Session_Disabled Session_State = 0
Session_Idle Session_State = 1
Session_Connect Session_State = 2
Session_Active Session_State = 3
Session_OpenSent Session_State = 4
Session_OpenConfirmed Session_State = 5
Session_Established Session_State = 6
Session_Ceased Session_State = 7
)
var Session_State_name = map[int32]string{
0: "Disabled",
1: "Idle",
2: "Connect",
3: "Active",
4: "OpenSent",
5: "OpenConfirmed",
6: "Established",
7: "Ceased",
}
var Session_State_value = map[string]int32{
"Disabled": 0,
"Idle": 1,
"Connect": 2,
"Active": 3,
"OpenSent": 4,
"OpenConfirmed": 5,
"Established": 6,
"Ceased": 7,
}
func (x Session_State) String() string {
return proto.EnumName(Session_State_name, int32(x))
}
func (Session_State) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_5b53032c0bb76d75, []int{0, 0}
}
type Session struct {
LocalAddress *api.IP `protobuf:"bytes,1,opt,name=local_address,json=localAddress,proto3" json:"local_address,omitempty"`
NeighborAddress *api.IP `protobuf:"bytes,2,opt,name=neighbor_address,json=neighborAddress,proto3" json:"neighbor_address,omitempty"`
LocalAsn uint32 `protobuf:"varint,3,opt,name=local_asn,json=localAsn,proto3" json:"local_asn,omitempty"`
PeerAsn uint32 `protobuf:"varint,4,opt,name=peer_asn,json=peerAsn,proto3" json:"peer_asn,omitempty"`
Status Session_State `protobuf:"varint,5,opt,name=status,proto3,enum=bio.bgp.Session_State" json:"status,omitempty"`
Stats *SessionStats `protobuf:"bytes,6,opt,name=stats,proto3" json:"stats,omitempty"`
EstablishedSince uint64 `protobuf:"varint,7,opt,name=established_since,json=establishedSince,proto3" json:"established_since,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Session) Reset() { *m = Session{} }
func (m *Session) String() string { return proto.CompactTextString(m) }
func (*Session) ProtoMessage() {}
func (*Session) Descriptor() ([]byte, []int) {
return fileDescriptor_5b53032c0bb76d75, []int{0} | func (m *Session) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Session.Unmarshal(m, b)
}
func (m *Session) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Session.Marshal(b, m, deterministic)
}
func (m *Session) XXX_Merge(src proto.Message) {
xxx_messageInfo_Session.Merge(m, src)
}
func (m *Session) XXX_Size() int {
return xxx_messageInfo_Session.Size(m)
}
func (m *Session) XXX_DiscardUnknown() {
xxx_messageInfo_Session.DiscardUnknown(m)
}
var xxx_messageInfo_Session proto.InternalMessageInfo
func (m *Session) GetLocalAddress() *api.IP {
if m != nil {
return m.LocalAddress
}
return nil
}
func (m *Session) GetNeighborAddress() *api.IP {
if m != nil {
return m.NeighborAddress
}
return nil
}
func (m *Session) GetLocalAsn() uint32 {
if m != nil {
return m.LocalAsn
}
return 0
}
func (m *Session) GetPeerAsn() uint32 {
if m != nil {
return m.PeerAsn
}
return 0
}
func (m *Session) GetStatus() Session_State {
if m != nil {
return m.Status
}
return Session_Disabled
}
func (m *Session) GetStats() *SessionStats {
if m != nil {
return m.Stats
}
return nil
}
func (m *Session) GetEstablishedSince() uint64 {
if m != nil {
return m.EstablishedSince
}
return 0
}
type SessionStats struct {
MessagesIn uint64 `protobuf:"varint,1,opt,name=messages_in,json=messagesIn,proto3" json:"messages_in,omitempty"`
MessagesOut uint64 `protobuf:"varint,2,opt,name=messages_out,json=messagesOut,proto3" json:"messages_out,omitempty"`
Flaps uint64 `protobuf:"varint,3,opt,name=flaps,proto3" json:"flaps,omitempty"`
RoutesReceived uint64 `protobuf:"varint,4,opt,name=routes_received,json=routesReceived,proto3" json:"routes_received,omitempty"`
RoutesImported uint64 `protobuf:"varint,5,opt,name=routes_imported,json=routesImported,proto3" json:"routes_imported,omitempty"`
RoutesExported uint64 `protobuf:"varint,6,opt,name=routes_exported,json=routesExported,proto3" json:"routes_exported,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SessionStats) Reset() { *m = SessionStats{} }
func (m *SessionStats) String() string { return proto.CompactTextString(m) }
func (*SessionStats) ProtoMessage() {}
func (*SessionStats) Descriptor() ([]byte, []int) {
return fileDescriptor_5b53032c0bb76d75, []int{1}
}
func (m *SessionStats) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SessionStats.Unmarshal(m, b)
}
func (m *SessionStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_SessionStats.Marshal(b, m, deterministic)
}
func (m *SessionStats) XXX_Merge(src proto.Message) {
xxx_messageInfo_SessionStats.Merge(m, src)
}
func (m *SessionStats) XXX_Size() int {
return xxx_messageInfo_SessionStats.Size(m)
}
func (m *SessionStats) XXX_DiscardUnknown() {
xxx_messageInfo_SessionStats.DiscardUnknown(m)
}
var xxx_messageInfo_SessionStats proto.InternalMessageInfo
func (m *SessionStats) GetMessagesIn() uint64 {
if m != nil {
return m.MessagesIn
}
return 0
}
func (m *SessionStats) GetMessagesOut() uint64 {
if m != nil {
return m.MessagesOut
}
return 0
}
func (m *SessionStats) GetFlaps() uint64 {
if m != nil {
return m.Flaps
}
return 0
}
func (m *SessionStats) GetRoutesReceived() uint64 {
if m != nil {
return m.RoutesReceived
}
return 0
}
func (m *SessionStats) GetRoutesImported() uint64 {
if m != nil {
return m.RoutesImported
}
return 0
}
func (m *SessionStats) GetRoutesExported() uint64 {
if m != nil {
return m.RoutesExported
}
return 0
}
func init() {
proto.RegisterEnum("bio.bgp.Session_State", Session_State_name, Session_State_value)
proto.RegisterType((*Session)(nil), "bio.bgp.Session")
proto.RegisterType((*SessionStats)(nil), "bio.bgp.SessionStats")
}
func init() {
proto.RegisterFile("github.com/bio-routing/bio-rd/protocols/bgp/api/session.proto", fileDescriptor_5b53032c0bb76d75)
}
var fileDescriptor_5b53032c0bb76d75 = []byte{
// 473 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x93, 0xc1, 0x8e, 0xd3, 0x30,
0x10, 0x86, 0xc9, 0x36, 0x4d, 0xca, 0xb4, 0xdd, 0x66, 0x2d, 0x40, 0x05, 0x0e, 0x94, 0x5e, 0xa8,
0xb4, 0x22, 0x81, 0x45, 0xe2, 0xc6, 0x61, 0x29, 0x7b, 0xe8, 0x69, 0x91, 0x7b, 0xe3, 0x52, 0x39,
0xf1, 0x6c, 0x6a, 0x29, 0xb5, 0xa3, 0x8c, 0x5b, 0xf1, 0x52, 0x3c, 0x15, 0x2f, 0x82, 0xec, 0xa4,
0xa5, 0x02, 0x09, 0x89, 0x9b, 0xe7, 0x9f, 0xef, 0x9f, 0xb1, 0xf5, 0x27, 0xf0, 0xa9, 0x54, 0x76,
0xbb, 0xcf, 0xd3, 0xc2, 0xec, 0xb2, 0x5c, 0x99, 0xb7, 0x8d, 0xd9, 0x5b, 0xa5, 0xcb, 0xf6, 0x2c,
0xb3, 0xba, 0x31, 0xd6, 0x14, 0xa6, 0xa2, 0x2c, 0x2f, 0xeb, 0x4c, 0xd4, 0x2a, 0x23, 0x24, 0x52,
0x46, 0xa7, 0xbe, 0xc3, 0xe2, 0x5c, 0x99, 0x34, 0x2f, 0xeb, 0x17, 0xd9, 0xbf, 0xe7, 0x68, 0xb4,
0xde, 0xad, 0xd1, 0xb6, 0xce, 0xf9, 0x8f, 0x1e, 0xc4, 0xeb, 0x76, 0x16, 0x7b, 0x07, 0xe3, 0xca,
0x14, 0xa2, 0xda, 0x08, 0x29, 0x1b, 0x24, 0x9a, 0x06, 0xb3, 0x60, 0x31, 0xbc, 0x19, 0xa6, 0x6e,
0xba, 0xb3, 0xac, 0xbe, 0xf2, 0x91, 0x27, 0x6e, 0x5b, 0x80, 0x7d, 0x84, 0x44, 0xa3, 0x2a, 0xb7,
0xb9, 0x69, 0x4e, 0xa6, 0x8b, 0xbf, 0x4d, 0x93, 0x23, 0x74, 0xf4, 0xbd, 0x84, 0xc7, 0xdd, 0x26,
0xd2, 0xd3, 0xde, 0x2c, 0x58, 0x8c, 0xf9, 0xa0, 0x1d, 0x4c, 0x9a, 0x3d, 0x87, 0x41, 0x8d, 0xd8,
0xf8, 0x5e, 0xe8, 0x7b, 0xb1, 0xab, 0x5d, 0x2b, 0x85, 0x88, 0xac, 0xb0, 0x7b, 0x9a, 0xf6, 0x67,
0xc1, 0xe2, 0xf2, 0xe6, 0x59, 0xda, 0x3d, 0x3c, 0xed, 0xde, 0x90, 0xae, 0xad, 0xb0, 0xc8, 0x3b,
0x8a, 0x5d, 0x43, 0xdf, 0x9d, 0x68, 0x1a, 0xf9, 0x4b, 0x3d, 0xfd, 0x13, 0x77, 0x34, 0xf1, 0x96,
0x61, 0xd7, 0x70, 0x85, 0x64, 0x45, 0x5e, 0x29, 0xda, 0xa2, 0xdc, 0x90, 0xd2, 0x05, 0x4e, 0xe3,
0x59, 0xb0, 0x08, 0x79, 0x72, 0xd6, 0x58, 0x3b, 0x7d, 0x7e, 0x80, 0xbe, 0x5f, 0xc5, 0x46, 0x30,
0xf8, 0xa2, 0x48, 0xe4, 0x15, 0xca, 0xe4, 0x11, 0x1b, 0x40, 0xb8, 0x92, 0x15, 0x26, 0x01, 0x1b,
0x42, 0xbc, 0x34, 0x5a, 0x63, 0x61, 0x93, 0x0b, 0x06, 0x10, 0xdd, 0x16, 0x56, 0x1d, 0x30, 0xe9,
0x39, 0xc3, 0x7d, 0x8d, 0x7a, 0x8d, 0xda, 0x26, 0x21, 0xbb, 0x82, 0xb1, 0xab, 0x96, 0x46, 0x3f,
0xa8, 0x66, 0x87, 0x32, 0xe9, 0xb3, 0x09, 0x0c, 0xef, 0x7e, 0xaf, 0x4b, 0x22, 0xe7, 0x5e, 0xa2,
0x20, 0x94, 0x49, 0x3c, 0xff, 0x19, 0xc0, 0xe8, 0xfc, 0xf2, 0xec, 0x15, 0x0c, 0x77, 0x48, 0x24,
0x4a, 0xa4, 0x8d, 0xd2, 0x3e, 0xb2, 0x90, 0xc3, 0x51, 0x5a, 0x69, 0xf6, 0x1a, 0x46, 0x27, 0xc0,
0xec, 0xad, 0xcf, 0x27, 0xe4, 0x27, 0xd3, 0xfd, 0xde, 0xb2, 0x27, 0xd0, 0x7f, 0xa8, 0x44, 0x4d,
0x3e, 0x8a, 0x90, 0xb7, 0x05, 0x7b, 0x03, 0x13, 0xf7, 0xf9, 0x20, 0x6d, 0x1a, 0x2c, 0x50, 0x1d,
0x50, 0xfa, 0x38, 0x42, 0x7e, 0xd9, 0xca, 0xbc, 0x53, 0xcf, 0x40, 0xb5, 0xab, 0x4d, 0x63, 0x51,
0xfa, 0x78, 0x4e, 0xe0, 0xaa, 0x53, 0xcf, 0x40, 0xfc, 0xde, 0x81, 0xd1, 0x39, 0x78, 0xd7, 0xa9,
0x9f, 0xdf, 0x7f, 0xcb, 0xfe, 0xf3, 0x87, 0xc8, 0x23, 0x2f, 0x7d, 0xf8, 0x15, 0x00, 0x00, 0xff,
0xff, 0xd4, 0x6f, 0x19, 0xed, 0x4a, 0x03, 0x00, 0x00,
} | }
| random_line_split |
session.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// source: github.com/bio-routing/bio-rd/protocols/bgp/api/session.proto
package api
import (
fmt "fmt"
api "github.com/bio-routing/bio-rd/net/api"
proto "github.com/golang/protobuf/proto"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type Session_State int32
const (
Session_Disabled Session_State = 0
Session_Idle Session_State = 1
Session_Connect Session_State = 2
Session_Active Session_State = 3
Session_OpenSent Session_State = 4
Session_OpenConfirmed Session_State = 5
Session_Established Session_State = 6
Session_Ceased Session_State = 7
)
var Session_State_name = map[int32]string{
0: "Disabled",
1: "Idle",
2: "Connect",
3: "Active",
4: "OpenSent",
5: "OpenConfirmed",
6: "Established",
7: "Ceased",
}
var Session_State_value = map[string]int32{
"Disabled": 0,
"Idle": 1,
"Connect": 2,
"Active": 3,
"OpenSent": 4,
"OpenConfirmed": 5,
"Established": 6,
"Ceased": 7,
}
func (x Session_State) String() string {
return proto.EnumName(Session_State_name, int32(x))
}
func (Session_State) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_5b53032c0bb76d75, []int{0, 0}
}
type Session struct {
LocalAddress *api.IP `protobuf:"bytes,1,opt,name=local_address,json=localAddress,proto3" json:"local_address,omitempty"`
NeighborAddress *api.IP `protobuf:"bytes,2,opt,name=neighbor_address,json=neighborAddress,proto3" json:"neighbor_address,omitempty"`
LocalAsn uint32 `protobuf:"varint,3,opt,name=local_asn,json=localAsn,proto3" json:"local_asn,omitempty"`
PeerAsn uint32 `protobuf:"varint,4,opt,name=peer_asn,json=peerAsn,proto3" json:"peer_asn,omitempty"`
Status Session_State `protobuf:"varint,5,opt,name=status,proto3,enum=bio.bgp.Session_State" json:"status,omitempty"`
Stats *SessionStats `protobuf:"bytes,6,opt,name=stats,proto3" json:"stats,omitempty"`
EstablishedSince uint64 `protobuf:"varint,7,opt,name=established_since,json=establishedSince,proto3" json:"established_since,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Session) Reset() { *m = Session{} }
func (m *Session) String() string { return proto.CompactTextString(m) }
func (*Session) ProtoMessage() {}
func (*Session) Descriptor() ([]byte, []int) {
return fileDescriptor_5b53032c0bb76d75, []int{0}
}
func (m *Session) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Session.Unmarshal(m, b)
}
func (m *Session) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Session.Marshal(b, m, deterministic)
}
func (m *Session) XXX_Merge(src proto.Message) {
xxx_messageInfo_Session.Merge(m, src)
}
func (m *Session) | () int {
return xxx_messageInfo_Session.Size(m)
}
func (m *Session) XXX_DiscardUnknown() {
xxx_messageInfo_Session.DiscardUnknown(m)
}
var xxx_messageInfo_Session proto.InternalMessageInfo
func (m *Session) GetLocalAddress() *api.IP {
if m != nil {
return m.LocalAddress
}
return nil
}
func (m *Session) GetNeighborAddress() *api.IP {
if m != nil {
return m.NeighborAddress
}
return nil
}
func (m *Session) GetLocalAsn() uint32 {
if m != nil {
return m.LocalAsn
}
return 0
}
func (m *Session) GetPeerAsn() uint32 {
if m != nil {
return m.PeerAsn
}
return 0
}
func (m *Session) GetStatus() Session_State {
if m != nil {
return m.Status
}
return Session_Disabled
}
func (m *Session) GetStats() *SessionStats {
if m != nil {
return m.Stats
}
return nil
}
func (m *Session) GetEstablishedSince() uint64 {
if m != nil {
return m.EstablishedSince
}
return 0
}
type SessionStats struct {
MessagesIn uint64 `protobuf:"varint,1,opt,name=messages_in,json=messagesIn,proto3" json:"messages_in,omitempty"`
MessagesOut uint64 `protobuf:"varint,2,opt,name=messages_out,json=messagesOut,proto3" json:"messages_out,omitempty"`
Flaps uint64 `protobuf:"varint,3,opt,name=flaps,proto3" json:"flaps,omitempty"`
RoutesReceived uint64 `protobuf:"varint,4,opt,name=routes_received,json=routesReceived,proto3" json:"routes_received,omitempty"`
RoutesImported uint64 `protobuf:"varint,5,opt,name=routes_imported,json=routesImported,proto3" json:"routes_imported,omitempty"`
RoutesExported uint64 `protobuf:"varint,6,opt,name=routes_exported,json=routesExported,proto3" json:"routes_exported,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SessionStats) Reset() { *m = SessionStats{} }
func (m *SessionStats) String() string { return proto.CompactTextString(m) }
func (*SessionStats) ProtoMessage() {}
func (*SessionStats) Descriptor() ([]byte, []int) {
return fileDescriptor_5b53032c0bb76d75, []int{1}
}
func (m *SessionStats) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SessionStats.Unmarshal(m, b)
}
func (m *SessionStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_SessionStats.Marshal(b, m, deterministic)
}
func (m *SessionStats) XXX_Merge(src proto.Message) {
xxx_messageInfo_SessionStats.Merge(m, src)
}
func (m *SessionStats) XXX_Size() int {
return xxx_messageInfo_SessionStats.Size(m)
}
func (m *SessionStats) XXX_DiscardUnknown() {
xxx_messageInfo_SessionStats.DiscardUnknown(m)
}
var xxx_messageInfo_SessionStats proto.InternalMessageInfo
func (m *SessionStats) GetMessagesIn() uint64 {
if m != nil {
return m.MessagesIn
}
return 0
}
func (m *SessionStats) GetMessagesOut() uint64 {
if m != nil {
return m.MessagesOut
}
return 0
}
func (m *SessionStats) GetFlaps() uint64 {
if m != nil {
return m.Flaps
}
return 0
}
func (m *SessionStats) GetRoutesReceived() uint64 {
if m != nil {
return m.RoutesReceived
}
return 0
}
func (m *SessionStats) GetRoutesImported() uint64 {
if m != nil {
return m.RoutesImported
}
return 0
}
func (m *SessionStats) GetRoutesExported() uint64 {
if m != nil {
return m.RoutesExported
}
return 0
}
func init() {
proto.RegisterEnum("bio.bgp.Session_State", Session_State_name, Session_State_value)
proto.RegisterType((*Session)(nil), "bio.bgp.Session")
proto.RegisterType((*SessionStats)(nil), "bio.bgp.SessionStats")
}
func init() {
proto.RegisterFile("github.com/bio-routing/bio-rd/protocols/bgp/api/session.proto", fileDescriptor_5b53032c0bb76d75)
}
var fileDescriptor_5b53032c0bb76d75 = []byte{
// 473 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x93, 0xc1, 0x8e, 0xd3, 0x30,
0x10, 0x86, 0xc9, 0x36, 0x4d, 0xca, 0xb4, 0xdd, 0x66, 0x2d, 0x40, 0x05, 0x0e, 0x94, 0x5e, 0xa8,
0xb4, 0x22, 0x81, 0x45, 0xe2, 0xc6, 0x61, 0x29, 0x7b, 0xe8, 0x69, 0x91, 0x7b, 0xe3, 0x52, 0x39,
0xf1, 0x6c, 0x6a, 0x29, 0xb5, 0xa3, 0x8c, 0x5b, 0xf1, 0x52, 0x3c, 0x15, 0x2f, 0x82, 0xec, 0xa4,
0xa5, 0x02, 0x09, 0x89, 0x9b, 0xe7, 0x9f, 0xef, 0x9f, 0xb1, 0xf5, 0x27, 0xf0, 0xa9, 0x54, 0x76,
0xbb, 0xcf, 0xd3, 0xc2, 0xec, 0xb2, 0x5c, 0x99, 0xb7, 0x8d, 0xd9, 0x5b, 0xa5, 0xcb, 0xf6, 0x2c,
0xb3, 0xba, 0x31, 0xd6, 0x14, 0xa6, 0xa2, 0x2c, 0x2f, 0xeb, 0x4c, 0xd4, 0x2a, 0x23, 0x24, 0x52,
0x46, 0xa7, 0xbe, 0xc3, 0xe2, 0x5c, 0x99, 0x34, 0x2f, 0xeb, 0x17, 0xd9, 0xbf, 0xe7, 0x68, 0xb4,
0xde, 0xad, 0xd1, 0xb6, 0xce, 0xf9, 0x8f, 0x1e, 0xc4, 0xeb, 0x76, 0x16, 0x7b, 0x07, 0xe3, 0xca,
0x14, 0xa2, 0xda, 0x08, 0x29, 0x1b, 0x24, 0x9a, 0x06, 0xb3, 0x60, 0x31, 0xbc, 0x19, 0xa6, 0x6e,
0xba, 0xb3, 0xac, 0xbe, 0xf2, 0x91, 0x27, 0x6e, 0x5b, 0x80, 0x7d, 0x84, 0x44, 0xa3, 0x2a, 0xb7,
0xb9, 0x69, 0x4e, 0xa6, 0x8b, 0xbf, 0x4d, 0x93, 0x23, 0x74, 0xf4, 0xbd, 0x84, 0xc7, 0xdd, 0x26,
0xd2, 0xd3, 0xde, 0x2c, 0x58, 0x8c, 0xf9, 0xa0, 0x1d, 0x4c, 0x9a, 0x3d, 0x87, 0x41, 0x8d, 0xd8,
0xf8, 0x5e, 0xe8, 0x7b, 0xb1, 0xab, 0x5d, 0x2b, 0x85, 0x88, 0xac, 0xb0, 0x7b, 0x9a, 0xf6, 0x67,
0xc1, 0xe2, 0xf2, 0xe6, 0x59, 0xda, 0x3d, 0x3c, 0xed, 0xde, 0x90, 0xae, 0xad, 0xb0, 0xc8, 0x3b,
0x8a, 0x5d, 0x43, 0xdf, 0x9d, 0x68, 0x1a, 0xf9, 0x4b, 0x3d, 0xfd, 0x13, 0x77, 0x34, 0xf1, 0x96,
0x61, 0xd7, 0x70, 0x85, 0x64, 0x45, 0x5e, 0x29, 0xda, 0xa2, 0xdc, 0x90, 0xd2, 0x05, 0x4e, 0xe3,
0x59, 0xb0, 0x08, 0x79, 0x72, 0xd6, 0x58, 0x3b, 0x7d, 0x7e, 0x80, 0xbe, 0x5f, 0xc5, 0x46, 0x30,
0xf8, 0xa2, 0x48, 0xe4, 0x15, 0xca, 0xe4, 0x11, 0x1b, 0x40, 0xb8, 0x92, 0x15, 0x26, 0x01, 0x1b,
0x42, 0xbc, 0x34, 0x5a, 0x63, 0x61, 0x93, 0x0b, 0x06, 0x10, 0xdd, 0x16, 0x56, 0x1d, 0x30, 0xe9,
0x39, 0xc3, 0x7d, 0x8d, 0x7a, 0x8d, 0xda, 0x26, 0x21, 0xbb, 0x82, 0xb1, 0xab, 0x96, 0x46, 0x3f,
0xa8, 0x66, 0x87, 0x32, 0xe9, 0xb3, 0x09, 0x0c, 0xef, 0x7e, 0xaf, 0x4b, 0x22, 0xe7, 0x5e, 0xa2,
0x20, 0x94, 0x49, 0x3c, 0xff, 0x19, 0xc0, 0xe8, 0xfc, 0xf2, 0xec, 0x15, 0x0c, 0x77, 0x48, 0x24,
0x4a, 0xa4, 0x8d, 0xd2, 0x3e, 0xb2, 0x90, 0xc3, 0x51, 0x5a, 0x69, 0xf6, 0x1a, 0x46, 0x27, 0xc0,
0xec, 0xad, 0xcf, 0x27, 0xe4, 0x27, 0xd3, 0xfd, 0xde, 0xb2, 0x27, 0xd0, 0x7f, 0xa8, 0x44, 0x4d,
0x3e, 0x8a, 0x90, 0xb7, 0x05, 0x7b, 0x03, 0x13, 0xf7, 0xf9, 0x20, 0x6d, 0x1a, 0x2c, 0x50, 0x1d,
0x50, 0xfa, 0x38, 0x42, 0x7e, 0xd9, 0xca, 0xbc, 0x53, 0xcf, 0x40, 0xb5, 0xab, 0x4d, 0x63, 0x51,
0xfa, 0x78, 0x4e, 0xe0, 0xaa, 0x53, 0xcf, 0x40, 0xfc, 0xde, 0x81, 0xd1, 0x39, 0x78, 0xd7, 0xa9,
0x9f, 0xdf, 0x7f, 0xcb, 0xfe, 0xf3, 0x87, 0xc8, 0x23, 0x2f, 0x7d, 0xf8, 0x15, 0x00, 0x00, 0xff,
0xff, 0xd4, 0x6f, 0x19, 0xed, 0x4a, 0x03, 0x00, 0x00,
}
| XXX_Size | identifier_name |
session.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// source: github.com/bio-routing/bio-rd/protocols/bgp/api/session.proto
package api
import (
fmt "fmt"
api "github.com/bio-routing/bio-rd/net/api"
proto "github.com/golang/protobuf/proto"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type Session_State int32
const (
Session_Disabled Session_State = 0
Session_Idle Session_State = 1
Session_Connect Session_State = 2
Session_Active Session_State = 3
Session_OpenSent Session_State = 4
Session_OpenConfirmed Session_State = 5
Session_Established Session_State = 6
Session_Ceased Session_State = 7
)
var Session_State_name = map[int32]string{
0: "Disabled",
1: "Idle",
2: "Connect",
3: "Active",
4: "OpenSent",
5: "OpenConfirmed",
6: "Established",
7: "Ceased",
}
var Session_State_value = map[string]int32{
"Disabled": 0,
"Idle": 1,
"Connect": 2,
"Active": 3,
"OpenSent": 4,
"OpenConfirmed": 5,
"Established": 6,
"Ceased": 7,
}
func (x Session_State) String() string {
return proto.EnumName(Session_State_name, int32(x))
}
func (Session_State) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_5b53032c0bb76d75, []int{0, 0}
}
type Session struct {
LocalAddress *api.IP `protobuf:"bytes,1,opt,name=local_address,json=localAddress,proto3" json:"local_address,omitempty"`
NeighborAddress *api.IP `protobuf:"bytes,2,opt,name=neighbor_address,json=neighborAddress,proto3" json:"neighbor_address,omitempty"`
LocalAsn uint32 `protobuf:"varint,3,opt,name=local_asn,json=localAsn,proto3" json:"local_asn,omitempty"`
PeerAsn uint32 `protobuf:"varint,4,opt,name=peer_asn,json=peerAsn,proto3" json:"peer_asn,omitempty"`
Status Session_State `protobuf:"varint,5,opt,name=status,proto3,enum=bio.bgp.Session_State" json:"status,omitempty"`
Stats *SessionStats `protobuf:"bytes,6,opt,name=stats,proto3" json:"stats,omitempty"`
EstablishedSince uint64 `protobuf:"varint,7,opt,name=established_since,json=establishedSince,proto3" json:"established_since,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Session) Reset() { *m = Session{} }
func (m *Session) String() string { return proto.CompactTextString(m) }
func (*Session) ProtoMessage() {}
func (*Session) Descriptor() ([]byte, []int) |
func (m *Session) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Session.Unmarshal(m, b)
}
func (m *Session) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Session.Marshal(b, m, deterministic)
}
func (m *Session) XXX_Merge(src proto.Message) {
xxx_messageInfo_Session.Merge(m, src)
}
func (m *Session) XXX_Size() int {
return xxx_messageInfo_Session.Size(m)
}
func (m *Session) XXX_DiscardUnknown() {
xxx_messageInfo_Session.DiscardUnknown(m)
}
var xxx_messageInfo_Session proto.InternalMessageInfo
func (m *Session) GetLocalAddress() *api.IP {
if m != nil {
return m.LocalAddress
}
return nil
}
func (m *Session) GetNeighborAddress() *api.IP {
if m != nil {
return m.NeighborAddress
}
return nil
}
func (m *Session) GetLocalAsn() uint32 {
if m != nil {
return m.LocalAsn
}
return 0
}
func (m *Session) GetPeerAsn() uint32 {
if m != nil {
return m.PeerAsn
}
return 0
}
func (m *Session) GetStatus() Session_State {
if m != nil {
return m.Status
}
return Session_Disabled
}
func (m *Session) GetStats() *SessionStats {
if m != nil {
return m.Stats
}
return nil
}
func (m *Session) GetEstablishedSince() uint64 {
if m != nil {
return m.EstablishedSince
}
return 0
}
type SessionStats struct {
MessagesIn uint64 `protobuf:"varint,1,opt,name=messages_in,json=messagesIn,proto3" json:"messages_in,omitempty"`
MessagesOut uint64 `protobuf:"varint,2,opt,name=messages_out,json=messagesOut,proto3" json:"messages_out,omitempty"`
Flaps uint64 `protobuf:"varint,3,opt,name=flaps,proto3" json:"flaps,omitempty"`
RoutesReceived uint64 `protobuf:"varint,4,opt,name=routes_received,json=routesReceived,proto3" json:"routes_received,omitempty"`
RoutesImported uint64 `protobuf:"varint,5,opt,name=routes_imported,json=routesImported,proto3" json:"routes_imported,omitempty"`
RoutesExported uint64 `protobuf:"varint,6,opt,name=routes_exported,json=routesExported,proto3" json:"routes_exported,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SessionStats) Reset() { *m = SessionStats{} }
func (m *SessionStats) String() string { return proto.CompactTextString(m) }
func (*SessionStats) ProtoMessage() {}
func (*SessionStats) Descriptor() ([]byte, []int) {
return fileDescriptor_5b53032c0bb76d75, []int{1}
}
func (m *SessionStats) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SessionStats.Unmarshal(m, b)
}
func (m *SessionStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_SessionStats.Marshal(b, m, deterministic)
}
func (m *SessionStats) XXX_Merge(src proto.Message) {
xxx_messageInfo_SessionStats.Merge(m, src)
}
func (m *SessionStats) XXX_Size() int {
return xxx_messageInfo_SessionStats.Size(m)
}
func (m *SessionStats) XXX_DiscardUnknown() {
xxx_messageInfo_SessionStats.DiscardUnknown(m)
}
var xxx_messageInfo_SessionStats proto.InternalMessageInfo
func (m *SessionStats) GetMessagesIn() uint64 {
if m != nil {
return m.MessagesIn
}
return 0
}
func (m *SessionStats) GetMessagesOut() uint64 {
if m != nil {
return m.MessagesOut
}
return 0
}
func (m *SessionStats) GetFlaps() uint64 {
if m != nil {
return m.Flaps
}
return 0
}
func (m *SessionStats) GetRoutesReceived() uint64 {
if m != nil {
return m.RoutesReceived
}
return 0
}
func (m *SessionStats) GetRoutesImported() uint64 {
if m != nil {
return m.RoutesImported
}
return 0
}
func (m *SessionStats) GetRoutesExported() uint64 {
if m != nil {
return m.RoutesExported
}
return 0
}
func init() {
proto.RegisterEnum("bio.bgp.Session_State", Session_State_name, Session_State_value)
proto.RegisterType((*Session)(nil), "bio.bgp.Session")
proto.RegisterType((*SessionStats)(nil), "bio.bgp.SessionStats")
}
func init() {
proto.RegisterFile("github.com/bio-routing/bio-rd/protocols/bgp/api/session.proto", fileDescriptor_5b53032c0bb76d75)
}
var fileDescriptor_5b53032c0bb76d75 = []byte{
// 473 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x93, 0xc1, 0x8e, 0xd3, 0x30,
0x10, 0x86, 0xc9, 0x36, 0x4d, 0xca, 0xb4, 0xdd, 0x66, 0x2d, 0x40, 0x05, 0x0e, 0x94, 0x5e, 0xa8,
0xb4, 0x22, 0x81, 0x45, 0xe2, 0xc6, 0x61, 0x29, 0x7b, 0xe8, 0x69, 0x91, 0x7b, 0xe3, 0x52, 0x39,
0xf1, 0x6c, 0x6a, 0x29, 0xb5, 0xa3, 0x8c, 0x5b, 0xf1, 0x52, 0x3c, 0x15, 0x2f, 0x82, 0xec, 0xa4,
0xa5, 0x02, 0x09, 0x89, 0x9b, 0xe7, 0x9f, 0xef, 0x9f, 0xb1, 0xf5, 0x27, 0xf0, 0xa9, 0x54, 0x76,
0xbb, 0xcf, 0xd3, 0xc2, 0xec, 0xb2, 0x5c, 0x99, 0xb7, 0x8d, 0xd9, 0x5b, 0xa5, 0xcb, 0xf6, 0x2c,
0xb3, 0xba, 0x31, 0xd6, 0x14, 0xa6, 0xa2, 0x2c, 0x2f, 0xeb, 0x4c, 0xd4, 0x2a, 0x23, 0x24, 0x52,
0x46, 0xa7, 0xbe, 0xc3, 0xe2, 0x5c, 0x99, 0x34, 0x2f, 0xeb, 0x17, 0xd9, 0xbf, 0xe7, 0x68, 0xb4,
0xde, 0xad, 0xd1, 0xb6, 0xce, 0xf9, 0x8f, 0x1e, 0xc4, 0xeb, 0x76, 0x16, 0x7b, 0x07, 0xe3, 0xca,
0x14, 0xa2, 0xda, 0x08, 0x29, 0x1b, 0x24, 0x9a, 0x06, 0xb3, 0x60, 0x31, 0xbc, 0x19, 0xa6, 0x6e,
0xba, 0xb3, 0xac, 0xbe, 0xf2, 0x91, 0x27, 0x6e, 0x5b, 0x80, 0x7d, 0x84, 0x44, 0xa3, 0x2a, 0xb7,
0xb9, 0x69, 0x4e, 0xa6, 0x8b, 0xbf, 0x4d, 0x93, 0x23, 0x74, 0xf4, 0xbd, 0x84, 0xc7, 0xdd, 0x26,
0xd2, 0xd3, 0xde, 0x2c, 0x58, 0x8c, 0xf9, 0xa0, 0x1d, 0x4c, 0x9a, 0x3d, 0x87, 0x41, 0x8d, 0xd8,
0xf8, 0x5e, 0xe8, 0x7b, 0xb1, 0xab, 0x5d, 0x2b, 0x85, 0x88, 0xac, 0xb0, 0x7b, 0x9a, 0xf6, 0x67,
0xc1, 0xe2, 0xf2, 0xe6, 0x59, 0xda, 0x3d, 0x3c, 0xed, 0xde, 0x90, 0xae, 0xad, 0xb0, 0xc8, 0x3b,
0x8a, 0x5d, 0x43, 0xdf, 0x9d, 0x68, 0x1a, 0xf9, 0x4b, 0x3d, 0xfd, 0x13, 0x77, 0x34, 0xf1, 0x96,
0x61, 0xd7, 0x70, 0x85, 0x64, 0x45, 0x5e, 0x29, 0xda, 0xa2, 0xdc, 0x90, 0xd2, 0x05, 0x4e, 0xe3,
0x59, 0xb0, 0x08, 0x79, 0x72, 0xd6, 0x58, 0x3b, 0x7d, 0x7e, 0x80, 0xbe, 0x5f, 0xc5, 0x46, 0x30,
0xf8, 0xa2, 0x48, 0xe4, 0x15, 0xca, 0xe4, 0x11, 0x1b, 0x40, 0xb8, 0x92, 0x15, 0x26, 0x01, 0x1b,
0x42, 0xbc, 0x34, 0x5a, 0x63, 0x61, 0x93, 0x0b, 0x06, 0x10, 0xdd, 0x16, 0x56, 0x1d, 0x30, 0xe9,
0x39, 0xc3, 0x7d, 0x8d, 0x7a, 0x8d, 0xda, 0x26, 0x21, 0xbb, 0x82, 0xb1, 0xab, 0x96, 0x46, 0x3f,
0xa8, 0x66, 0x87, 0x32, 0xe9, 0xb3, 0x09, 0x0c, 0xef, 0x7e, 0xaf, 0x4b, 0x22, 0xe7, 0x5e, 0xa2,
0x20, 0x94, 0x49, 0x3c, 0xff, 0x19, 0xc0, 0xe8, 0xfc, 0xf2, 0xec, 0x15, 0x0c, 0x77, 0x48, 0x24,
0x4a, 0xa4, 0x8d, 0xd2, 0x3e, 0xb2, 0x90, 0xc3, 0x51, 0x5a, 0x69, 0xf6, 0x1a, 0x46, 0x27, 0xc0,
0xec, 0xad, 0xcf, 0x27, 0xe4, 0x27, 0xd3, 0xfd, 0xde, 0xb2, 0x27, 0xd0, 0x7f, 0xa8, 0x44, 0x4d,
0x3e, 0x8a, 0x90, 0xb7, 0x05, 0x7b, 0x03, 0x13, 0xf7, 0xf9, 0x20, 0x6d, 0x1a, 0x2c, 0x50, 0x1d,
0x50, 0xfa, 0x38, 0x42, 0x7e, 0xd9, 0xca, 0xbc, 0x53, 0xcf, 0x40, 0xb5, 0xab, 0x4d, 0x63, 0x51,
0xfa, 0x78, 0x4e, 0xe0, 0xaa, 0x53, 0xcf, 0x40, 0xfc, 0xde, 0x81, 0xd1, 0x39, 0x78, 0xd7, 0xa9,
0x9f, 0xdf, 0x7f, 0xcb, 0xfe, 0xf3, 0x87, 0xc8, 0x23, 0x2f, 0x7d, 0xf8, 0x15, 0x00, 0x00, 0xff,
0xff, 0xd4, 0x6f, 0x19, 0xed, 0x4a, 0x03, 0x00, 0x00,
}
| {
return fileDescriptor_5b53032c0bb76d75, []int{0}
} | identifier_body |
session.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// source: github.com/bio-routing/bio-rd/protocols/bgp/api/session.proto
package api
import (
fmt "fmt"
api "github.com/bio-routing/bio-rd/net/api"
proto "github.com/golang/protobuf/proto"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type Session_State int32
const (
Session_Disabled Session_State = 0
Session_Idle Session_State = 1
Session_Connect Session_State = 2
Session_Active Session_State = 3
Session_OpenSent Session_State = 4
Session_OpenConfirmed Session_State = 5
Session_Established Session_State = 6
Session_Ceased Session_State = 7
)
var Session_State_name = map[int32]string{
0: "Disabled",
1: "Idle",
2: "Connect",
3: "Active",
4: "OpenSent",
5: "OpenConfirmed",
6: "Established",
7: "Ceased",
}
var Session_State_value = map[string]int32{
"Disabled": 0,
"Idle": 1,
"Connect": 2,
"Active": 3,
"OpenSent": 4,
"OpenConfirmed": 5,
"Established": 6,
"Ceased": 7,
}
func (x Session_State) String() string {
return proto.EnumName(Session_State_name, int32(x))
}
func (Session_State) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_5b53032c0bb76d75, []int{0, 0}
}
type Session struct {
LocalAddress *api.IP `protobuf:"bytes,1,opt,name=local_address,json=localAddress,proto3" json:"local_address,omitempty"`
NeighborAddress *api.IP `protobuf:"bytes,2,opt,name=neighbor_address,json=neighborAddress,proto3" json:"neighbor_address,omitempty"`
LocalAsn uint32 `protobuf:"varint,3,opt,name=local_asn,json=localAsn,proto3" json:"local_asn,omitempty"`
PeerAsn uint32 `protobuf:"varint,4,opt,name=peer_asn,json=peerAsn,proto3" json:"peer_asn,omitempty"`
Status Session_State `protobuf:"varint,5,opt,name=status,proto3,enum=bio.bgp.Session_State" json:"status,omitempty"`
Stats *SessionStats `protobuf:"bytes,6,opt,name=stats,proto3" json:"stats,omitempty"`
EstablishedSince uint64 `protobuf:"varint,7,opt,name=established_since,json=establishedSince,proto3" json:"established_since,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Session) Reset() { *m = Session{} }
func (m *Session) String() string { return proto.CompactTextString(m) }
func (*Session) ProtoMessage() {}
func (*Session) Descriptor() ([]byte, []int) {
return fileDescriptor_5b53032c0bb76d75, []int{0}
}
func (m *Session) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Session.Unmarshal(m, b)
}
func (m *Session) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Session.Marshal(b, m, deterministic)
}
func (m *Session) XXX_Merge(src proto.Message) {
xxx_messageInfo_Session.Merge(m, src)
}
func (m *Session) XXX_Size() int {
return xxx_messageInfo_Session.Size(m)
}
func (m *Session) XXX_DiscardUnknown() {
xxx_messageInfo_Session.DiscardUnknown(m)
}
var xxx_messageInfo_Session proto.InternalMessageInfo
func (m *Session) GetLocalAddress() *api.IP {
if m != nil {
return m.LocalAddress
}
return nil
}
func (m *Session) GetNeighborAddress() *api.IP {
if m != nil {
return m.NeighborAddress
}
return nil
}
func (m *Session) GetLocalAsn() uint32 {
if m != nil {
return m.LocalAsn
}
return 0
}
func (m *Session) GetPeerAsn() uint32 {
if m != nil {
return m.PeerAsn
}
return 0
}
func (m *Session) GetStatus() Session_State {
if m != nil {
return m.Status
}
return Session_Disabled
}
func (m *Session) GetStats() *SessionStats {
if m != nil {
return m.Stats
}
return nil
}
func (m *Session) GetEstablishedSince() uint64 {
if m != nil {
return m.EstablishedSince
}
return 0
}
type SessionStats struct {
MessagesIn uint64 `protobuf:"varint,1,opt,name=messages_in,json=messagesIn,proto3" json:"messages_in,omitempty"`
MessagesOut uint64 `protobuf:"varint,2,opt,name=messages_out,json=messagesOut,proto3" json:"messages_out,omitempty"`
Flaps uint64 `protobuf:"varint,3,opt,name=flaps,proto3" json:"flaps,omitempty"`
RoutesReceived uint64 `protobuf:"varint,4,opt,name=routes_received,json=routesReceived,proto3" json:"routes_received,omitempty"`
RoutesImported uint64 `protobuf:"varint,5,opt,name=routes_imported,json=routesImported,proto3" json:"routes_imported,omitempty"`
RoutesExported uint64 `protobuf:"varint,6,opt,name=routes_exported,json=routesExported,proto3" json:"routes_exported,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SessionStats) Reset() { *m = SessionStats{} }
func (m *SessionStats) String() string { return proto.CompactTextString(m) }
func (*SessionStats) ProtoMessage() {}
func (*SessionStats) Descriptor() ([]byte, []int) {
return fileDescriptor_5b53032c0bb76d75, []int{1}
}
func (m *SessionStats) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SessionStats.Unmarshal(m, b)
}
func (m *SessionStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_SessionStats.Marshal(b, m, deterministic)
}
func (m *SessionStats) XXX_Merge(src proto.Message) {
xxx_messageInfo_SessionStats.Merge(m, src)
}
func (m *SessionStats) XXX_Size() int {
return xxx_messageInfo_SessionStats.Size(m)
}
func (m *SessionStats) XXX_DiscardUnknown() {
xxx_messageInfo_SessionStats.DiscardUnknown(m)
}
var xxx_messageInfo_SessionStats proto.InternalMessageInfo
func (m *SessionStats) GetMessagesIn() uint64 {
if m != nil {
return m.MessagesIn
}
return 0
}
func (m *SessionStats) GetMessagesOut() uint64 {
if m != nil {
return m.MessagesOut
}
return 0
}
func (m *SessionStats) GetFlaps() uint64 {
if m != nil {
return m.Flaps
}
return 0
}
func (m *SessionStats) GetRoutesReceived() uint64 {
if m != nil {
return m.RoutesReceived
}
return 0
}
func (m *SessionStats) GetRoutesImported() uint64 {
if m != nil |
return 0
}
func (m *SessionStats) GetRoutesExported() uint64 {
if m != nil {
return m.RoutesExported
}
return 0
}
func init() {
proto.RegisterEnum("bio.bgp.Session_State", Session_State_name, Session_State_value)
proto.RegisterType((*Session)(nil), "bio.bgp.Session")
proto.RegisterType((*SessionStats)(nil), "bio.bgp.SessionStats")
}
func init() {
proto.RegisterFile("github.com/bio-routing/bio-rd/protocols/bgp/api/session.proto", fileDescriptor_5b53032c0bb76d75)
}
var fileDescriptor_5b53032c0bb76d75 = []byte{
// 473 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x93, 0xc1, 0x8e, 0xd3, 0x30,
0x10, 0x86, 0xc9, 0x36, 0x4d, 0xca, 0xb4, 0xdd, 0x66, 0x2d, 0x40, 0x05, 0x0e, 0x94, 0x5e, 0xa8,
0xb4, 0x22, 0x81, 0x45, 0xe2, 0xc6, 0x61, 0x29, 0x7b, 0xe8, 0x69, 0x91, 0x7b, 0xe3, 0x52, 0x39,
0xf1, 0x6c, 0x6a, 0x29, 0xb5, 0xa3, 0x8c, 0x5b, 0xf1, 0x52, 0x3c, 0x15, 0x2f, 0x82, 0xec, 0xa4,
0xa5, 0x02, 0x09, 0x89, 0x9b, 0xe7, 0x9f, 0xef, 0x9f, 0xb1, 0xf5, 0x27, 0xf0, 0xa9, 0x54, 0x76,
0xbb, 0xcf, 0xd3, 0xc2, 0xec, 0xb2, 0x5c, 0x99, 0xb7, 0x8d, 0xd9, 0x5b, 0xa5, 0xcb, 0xf6, 0x2c,
0xb3, 0xba, 0x31, 0xd6, 0x14, 0xa6, 0xa2, 0x2c, 0x2f, 0xeb, 0x4c, 0xd4, 0x2a, 0x23, 0x24, 0x52,
0x46, 0xa7, 0xbe, 0xc3, 0xe2, 0x5c, 0x99, 0x34, 0x2f, 0xeb, 0x17, 0xd9, 0xbf, 0xe7, 0x68, 0xb4,
0xde, 0xad, 0xd1, 0xb6, 0xce, 0xf9, 0x8f, 0x1e, 0xc4, 0xeb, 0x76, 0x16, 0x7b, 0x07, 0xe3, 0xca,
0x14, 0xa2, 0xda, 0x08, 0x29, 0x1b, 0x24, 0x9a, 0x06, 0xb3, 0x60, 0x31, 0xbc, 0x19, 0xa6, 0x6e,
0xba, 0xb3, 0xac, 0xbe, 0xf2, 0x91, 0x27, 0x6e, 0x5b, 0x80, 0x7d, 0x84, 0x44, 0xa3, 0x2a, 0xb7,
0xb9, 0x69, 0x4e, 0xa6, 0x8b, 0xbf, 0x4d, 0x93, 0x23, 0x74, 0xf4, 0xbd, 0x84, 0xc7, 0xdd, 0x26,
0xd2, 0xd3, 0xde, 0x2c, 0x58, 0x8c, 0xf9, 0xa0, 0x1d, 0x4c, 0x9a, 0x3d, 0x87, 0x41, 0x8d, 0xd8,
0xf8, 0x5e, 0xe8, 0x7b, 0xb1, 0xab, 0x5d, 0x2b, 0x85, 0x88, 0xac, 0xb0, 0x7b, 0x9a, 0xf6, 0x67,
0xc1, 0xe2, 0xf2, 0xe6, 0x59, 0xda, 0x3d, 0x3c, 0xed, 0xde, 0x90, 0xae, 0xad, 0xb0, 0xc8, 0x3b,
0x8a, 0x5d, 0x43, 0xdf, 0x9d, 0x68, 0x1a, 0xf9, 0x4b, 0x3d, 0xfd, 0x13, 0x77, 0x34, 0xf1, 0x96,
0x61, 0xd7, 0x70, 0x85, 0x64, 0x45, 0x5e, 0x29, 0xda, 0xa2, 0xdc, 0x90, 0xd2, 0x05, 0x4e, 0xe3,
0x59, 0xb0, 0x08, 0x79, 0x72, 0xd6, 0x58, 0x3b, 0x7d, 0x7e, 0x80, 0xbe, 0x5f, 0xc5, 0x46, 0x30,
0xf8, 0xa2, 0x48, 0xe4, 0x15, 0xca, 0xe4, 0x11, 0x1b, 0x40, 0xb8, 0x92, 0x15, 0x26, 0x01, 0x1b,
0x42, 0xbc, 0x34, 0x5a, 0x63, 0x61, 0x93, 0x0b, 0x06, 0x10, 0xdd, 0x16, 0x56, 0x1d, 0x30, 0xe9,
0x39, 0xc3, 0x7d, 0x8d, 0x7a, 0x8d, 0xda, 0x26, 0x21, 0xbb, 0x82, 0xb1, 0xab, 0x96, 0x46, 0x3f,
0xa8, 0x66, 0x87, 0x32, 0xe9, 0xb3, 0x09, 0x0c, 0xef, 0x7e, 0xaf, 0x4b, 0x22, 0xe7, 0x5e, 0xa2,
0x20, 0x94, 0x49, 0x3c, 0xff, 0x19, 0xc0, 0xe8, 0xfc, 0xf2, 0xec, 0x15, 0x0c, 0x77, 0x48, 0x24,
0x4a, 0xa4, 0x8d, 0xd2, 0x3e, 0xb2, 0x90, 0xc3, 0x51, 0x5a, 0x69, 0xf6, 0x1a, 0x46, 0x27, 0xc0,
0xec, 0xad, 0xcf, 0x27, 0xe4, 0x27, 0xd3, 0xfd, 0xde, 0xb2, 0x27, 0xd0, 0x7f, 0xa8, 0x44, 0x4d,
0x3e, 0x8a, 0x90, 0xb7, 0x05, 0x7b, 0x03, 0x13, 0xf7, 0xf9, 0x20, 0x6d, 0x1a, 0x2c, 0x50, 0x1d,
0x50, 0xfa, 0x38, 0x42, 0x7e, 0xd9, 0xca, 0xbc, 0x53, 0xcf, 0x40, 0xb5, 0xab, 0x4d, 0x63, 0x51,
0xfa, 0x78, 0x4e, 0xe0, 0xaa, 0x53, 0xcf, 0x40, 0xfc, 0xde, 0x81, 0xd1, 0x39, 0x78, 0xd7, 0xa9,
0x9f, 0xdf, 0x7f, 0xcb, 0xfe, 0xf3, 0x87, 0xc8, 0x23, 0x2f, 0x7d, 0xf8, 0x15, 0x00, 0x00, 0xff,
0xff, 0xd4, 0x6f, 0x19, 0xed, 0x4a, 0x03, 0x00, 0x00,
}
| {
return m.RoutesImported
} | conditional_block |
core_test.go | package core
import (
"context"
"errors"
"fmt"
"github.com/getlantern/bytemap"
"github.com/getlantern/goexpr"
"github.com/getlantern/zenodb/encoding"
. "github.com/getlantern/zenodb/expr"
"github.com/stretchr/testify/assert"
"sync/atomic"
"testing"
"time"
)
var (
epoch = time.Date(2015, 1, 1, 0, 0, 0, 0, time.UTC)
resolution = 1 * time.Second
asOf = epoch.Add(-10 * resolution)
until = epoch
cond, _ = goexpr.Boolean(">", goexpr.Param("d"), goexpr.Constant(0))
eA = IF(cond, SUM("a"))
eB = SUM("b")
totalField = NewField("total", ADD(eA, eB))
errTest = errors.New("test error")
)
func TestRowFilter(t *testing.T) {
f := RowFilter(&goodSource{}, "test", func(ctx context.Context, key bytemap.ByteMap, fields Fields, vals Vals) (bytemap.ByteMap, Vals, error) {
x := key.Get("x")
if x != nil && x.(int)%2 == 0 {
return key, vals, nil
}
return nil, nil, nil
})
totalA := int64(0)
totalB := int64(0)
_, err := f.Iterate(context.Background(), FieldsIgnored, func(key bytemap.ByteMap, vals Vals) (bool, error) {
a, _ := vals[0].ValueAt(0, eA)
b, _ := vals[1].ValueAt(0, eB)
atomic.AddInt64(&totalA, int64(a))
atomic.AddInt64(&totalB, int64(b))
return true, nil
})
assert.NoError(t, err)
assert.EqualValues(t, 260, atomic.LoadInt64(&totalB))
assert.EqualValues(t, 0, atomic.LoadInt64(&totalA), "Filter should have excluded anything with a value for A")
}
func TestFlatRowFilter(t *testing.T) {
f := FlatRowFilter(Flatten(&goodSource{}), "test", func(ctx context.Context, row *FlatRow, fields Fields) (*FlatRow, error) {
x := row.Key.Get("x")
if x != nil && x.(int)%2 == 0 {
return row, nil
}
return nil, nil
})
totalA := int64(0)
totalB := int64(0)
_, err := f.Iterate(context.Background(), FieldsIgnored, func(row *FlatRow) (bool, error) {
a := row.Values[0]
b := row.Values[1]
atomic.AddInt64(&totalA, int64(a))
atomic.AddInt64(&totalB, int64(b))
return true, nil
})
assert.NoError(t, err)
assert.EqualValues(t, 260, atomic.LoadInt64(&totalB))
assert.EqualValues(t, 0, atomic.LoadInt64(&totalA), "Filter should have excluded anything with a value for A")
}
func TestDeadlineFilter(t *testing.T) {
f := RowFilter(&goodSource{}, "deadline", func(ctx context.Context, key bytemap.ByteMap, fields Fields, vals Vals) (bytemap.ByteMap, Vals, error) {
// Slow things down by sleeping for a bit
time.Sleep(100 * time.Millisecond)
return key, vals, nil
})
rowsSeen := int64(0)
ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(50*time.Millisecond))
defer cancel()
_, err := f.Iterate(ctx, FieldsIgnored, func(key bytemap.ByteMap, vals Vals) (bool, error) {
atomic.AddInt64(&rowsSeen, 1)
return true, nil
})
assert.Equal(t, ErrDeadlineExceeded, err, "Should have gotten deadline exceeded error")
assert.EqualValues(t, 1, atomic.LoadInt64(&rowsSeen), "Should have gotten only 1 row before deadline exceeded")
}
func TestDeadlineGroup(t *testing.T) {
eTotal := ADD(eA, eB)
g := Group(&infiniteSource{}, GroupOpts{
By: []GroupBy{NewGroupBy("x", goexpr.Param("x"))},
Fields: StaticFieldSource{
Field{
Name: "total",
Expr: eTotal,
},
},
Resolution: resolution * 2,
AsOf: asOf.Add(2 * resolution),
Until: until.Add(-2 * resolution),
})
rowsSeen := int64(0)
ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(25*time.Millisecond))
defer cancel()
_, err := g.Iterate(ctx, FieldsIgnored, func(key bytemap.ByteMap, vals Vals) (bool, error) {
atomic.AddInt64(&rowsSeen, 1)
return true, nil
})
assert.Equal(t, ErrDeadlineExceeded, err, "Should have gotten deadline exceeded error")
assert.EqualValues(t, 0, atomic.LoadInt64(&rowsSeen), "Should have gotten 0 rows before deadline exceeded")
}
func TestGroupSingle(t *testing.T) {
eTotal := ADD(eA, eB)
gx := Group(&goodSource{}, GroupOpts{
By: []GroupBy{NewGroupBy("x", goexpr.Param("x"))},
Fields: StaticFieldSource{
Field{
Name: "total",
Expr: eTotal,
},
},
Resolution: resolution * 2,
AsOf: asOf.Add(2 * resolution),
Until: until.Add(-2 * resolution),
})
totalByX := make(map[int]float64, 0)
var fields Fields
_, err := gx.Iterate(context.Background(), func(inFields Fields) error {
fields = inFields
return nil
}, func(key bytemap.ByteMap, vals Vals) (bool, error) {
t.Log(key.AsMap())
for i, field := range fields {
t.Log(vals[i].String(field.Expr, resolution*2))
}
total := float64(0)
v := vals[0]
for p := 0; p < v.NumPeriods(eTotal.EncodedWidth()); p++ {
val, _ := v.ValueAt(p, eTotal)
total += val
}
totalByX[key.Get("x").(int)] = total
return true, nil
})
assert.NoError(t, err)
assert.EqualValues(t, 120, totalByX[1])
assert.EqualValues(t, 140, totalByX[2])
}
func TestGroupCrosstabSingle(t *testing.T) {
eAdd := ADD(eA, eB)
addField := Field{
Name: "add",
Expr: eAdd,
}
expectedFields := Fields{}
for _, i := range []string{"1", "2", "3", "5"} {
cond, err := goexpr.Binary("=", goexpr.Concat(goexpr.Constant("_"), goexpr.Param("y")), goexpr.Constant(i))
if !assert.NoError(t, err) {
return
}
ifex := IF(cond, eAdd)
expectedFields = append(expectedFields, NewField(i+"_add", ifex))
}
expectedFields = append(expectedFields, NewField("total_add", eAdd))
expectedKeys := []bytemap.ByteMap{
bytemap.FromSortedKeysAndValues([]string{"x"}, []interface{}{1}),
bytemap.FromSortedKeysAndValues([]string{"x"}, []interface{}{2}),
}
expectedRows := [][][]float64{
[][]float64{
[]float64{70, 0},
[]float64{0, 0},
[]float64{0, 50},
[]float64{0, 0},
[]float64{70, 50},
},
[][]float64{
[]float64{0, 0},
[]float64{0, 0},
[]float64{80, 0},
[]float64{0, 60},
[]float64{80, 60},
},
}
gx := Group(&goodSource{}, GroupOpts{
By: []GroupBy{NewGroupBy("x", goexpr.Param("x"))},
Crosstab: goexpr.Concat(goexpr.Constant("_"), goexpr.Param("y")),
CrosstabIncludesTotal: true,
Fields: StaticFieldSource{addField},
Resolution: resolution * 2,
AsOf: asOf.Add(2 * resolution),
Until: until.Add(-2 * resolution),
})
var fields Fields
_, err := gx.Iterate(context.Background(), func(inFields Fields) error {
fields = inFields
if assert.Equal(t, len(expectedFields), len(fields)) {
for i, expected := range expectedFields {
assert.Equal(t, expected.String(), fields[i].String())
}
}
return nil
}, func(key bytemap.ByteMap, vals Vals) (bool, error) {
expectedKey := expectedKeys[0]
expectedKeys = expectedKeys[1:]
expectedRow := expectedRows[0]
expectedRows = expectedRows[1:]
assert.EqualValues(t, expectedKey, key)
if assert.Equal(t, len(expectedRow), len(vals)) {
for i, expected := range expectedRow {
val := vals[i]
field := fields[i]
if assert.Equal(t, len(expected), val.NumPeriods(field.Expr.EncodedWidth())) {
for j, f := range expected {
actual, _ := val.ValueAt(j, field.Expr)
assert.Equal(t, f, actual)
}
}
}
}
return true, nil
})
if !assert.NoError(t, err) {
t.Log(FormatSource(gx))
}
}
func TestGroupResolutionOnly(t *testing.T) {
eTotal := ADD(eA, eB)
gx := Group(&goodSource{}, GroupOpts{
By: []GroupBy{NewGroupBy("_", goexpr.Constant("_"))},
Fields: StaticFieldSource{
Field{
Name: "total",
Expr: eTotal,
},
},
Resolution: resolution * 20,
})
total := float64(0)
_, err := gx.Iterate(context.Background(), FieldsIgnored, func(key bytemap.ByteMap, vals Vals) (bool, error) {
v := vals[0]
for p := 0; p < v.NumPeriods(eTotal.EncodedWidth()); p++ {
val, _ := v.ValueAt(p, eTotal)
t.Log(val)
total += val
}
// We expect only one row
return false, nil
})
assert.NoError(t, err)
assert.EqualValues(t, 480, total)
}
func TestGroupNone(t *testing.T) {
eTotal := ADD(eA, eB)
gx := Group(&goodSource{}, GroupOpts{
Fields: StaticFieldSource{
Field{
Name: "total",
Expr: eTotal,
},
},
Resolution: resolution * 10,
})
expectedValues := map[string]float64{
"1.1": 10 + 70,
"1.3": 50,
"1.5": 90,
"2.2": 100,
"2.3": 20 + 80,
"2.5": 60,
}
ctx := context.Background()
_, err := gx.Iterate(ctx, FieldsIgnored, func(key bytemap.ByteMap, vals Vals) (bool, error) {
dims := fmt.Sprintf("%d.%d", key.Get("x"), key.Get("y"))
val, _ := vals[0].ValueAt(0, eTotal)
expectedVal := expectedValues[dims]
delete(expectedValues, dims)
assert.Equal(t, expectedVal, val, dims)
return true, nil
})
assert.NoError(t, err)
assert.Empty(t, expectedValues, "All combinations should have been seen")
}
func TestFlattenSortOffsetAndLimit(t *testing.T) {
// TODO: add test that tests flattening of rows that contain multiple periods
// worth of values
g := Group(&goodSource{}, GroupOpts{
Fields: StaticFieldSource{NewField("a", eA), NewField("b", eB), NewField("c", CONST(10))},
})
f := Flatten(g)
s := Sort(f, NewOrderBy("b", true), NewOrderBy("a", false))
o := Offset(s, 1)
l := Limit(o, 6)
// This contains the data, sorted, but missing the first and last entries
expectedTSs := []time.Time{
epoch.Add(-2 * resolution), epoch.Add(-4 * resolution), epoch.Add(-8 * resolution),
epoch.Add(-9 * resolution), epoch.Add(-5 * resolution), epoch.Add(-3 * resolution),
}
expectedAs := []float64{0, 0, 0, 10, 50, 70}
expectedBs := []float64{80, 60, 20, 0, 0, 0}
var expectedTS time.Time
var expectedA float64
var expectedB float64
_, err := l.Iterate(context.Background(), FieldsIgnored, func(row *FlatRow) (bool, error) {
expectedTS, expectedTSs = expectedTSs[0], expectedTSs[1:]
expectedA, expectedAs = expectedAs[0], expectedAs[1:]
expectedB, expectedBs = expectedBs[0], expectedBs[1:]
assert.Equal(t, expectedTS.UnixNano(), row.TS)
assert.EqualValues(t, expectedA, row.Values[0])
assert.EqualValues(t, expectedB, row.Values[1])
return true, nil
})
if !assert.NoError(t, err) {
t.Log(FormatSource(l))
}
}
func | (t *testing.T) {
avgTotal := ADD(AVG("a"), AVG("b"))
f := Flatten(&goodSource{})
u := Unflatten(f, StaticFieldSource{NewField("total", avgTotal)})
doTestUnflattened(t, u, avgTotal)
}
func TestUnflattenOptimized(t *testing.T) {
total := ADD(eA, eB)
s := &totalingSource{}
f := Flatten(s)
u := UnflattenOptimized(f)
_, isTotalingSource := u.(*totalingSource)
if !assert.True(t, isTotalingSource, "Unflattened should point directly at totaling source, put points at: %v", u.String()) {
return
}
doTestUnflattened(t, u, total)
}
func doTestUnflattened(t *testing.T, u RowSource, ex Expr) {
expectedRows := make([]*testRow, 0, len(testRows))
for _, row := range testRows {
var ts time.Time
if row.vals[0] != nil {
ts = row.vals[0].Until()
} else {
ts = row.vals[1].Until()
}
a, _ := row.vals[0].ValueAt(0, eA)
b, _ := row.vals[1].ValueAt(0, eB)
params := Map(map[string]float64{
"a": a,
"b": b,
})
expectedRow := &testRow{
key: row.key,
vals: []encoding.Sequence{encoding.NewValue(ex, ts, params, row.key)},
}
expectedRows = append(expectedRows, expectedRow)
}
_, err := u.Iterate(context.Background(), FieldsIgnored, func(key bytemap.ByteMap, vals Vals) (bool, error) {
row := &testRow{key, vals}
for i, expected := range expectedRows {
if row.equals(expected) {
expectedRows = append(expectedRows[:i], expectedRows[i+1:]...)
break
}
}
return true, nil
})
assert.NoError(t, err)
assert.Empty(t, expectedRows, "All rows should have been seen")
}
type testSource struct{}
func (s *testSource) getFields() Fields {
return Fields{NewField("a", eA), NewField("b", eB)}
}
func (s *testSource) GetGroupBy() []GroupBy {
x, _ := goexpr.Binary("+", goexpr.Param("x"), goexpr.Constant(1))
return []GroupBy{NewGroupBy("x", x), NewGroupBy("y", goexpr.Param("y"))}
}
func (s *testSource) GetResolution() time.Duration {
return resolution
}
func (s *testSource) GetAsOf() time.Time {
return asOf
}
func (s *testSource) GetUntil() time.Time {
return until
}
type testRow struct {
key bytemap.ByteMap
vals Vals
}
func (r *testRow) equals(other *testRow) bool {
if r == nil || other == nil {
return false
}
if string(r.key) != string(other.key) {
return false
}
if len(r.vals) != len(other.vals) {
return false
}
for i, val := range r.vals {
ex := eA
if i > 0 {
ex = eB
}
otherVal := other.vals[i]
v, _ := val.ValueAt(0, ex)
ov, _ := otherVal.ValueAt(0, ex)
if v != ov {
return false
}
}
return true
}
var testRows = []*testRow{
makeRow(epoch.Add(-9*resolution), 1, 1, 10, 0),
makeRow(epoch.Add(-8*resolution), 2, 3, 0, 20),
// Intentional gap
makeRow(epoch.Add(-5*resolution), 1, 3, 50, 0),
makeRow(epoch.Add(-4*resolution), 2, 5, 0, 60),
makeRow(epoch.Add(-3*resolution), 1, 1, 70, 0),
makeRow(epoch.Add(-2*resolution), 2, 3, 0, 80),
makeRow(epoch.Add(-1*resolution), 1, 5, 90, 0),
makeRow(epoch, 2, 2, 0, 100),
}
func makeRow(ts time.Time, x int, y int, a float64, b float64) *testRow {
key := bytemap.New(map[string]interface{}{"x": x, "y": y})
vals := make([]encoding.Sequence, 2)
if a != 0 {
vals[0] = encoding.NewFloatValue(eA, ts, a)
}
if b != 0 {
vals[1] = encoding.NewFloatValue(eB, ts, b)
}
return &testRow{key, vals}
}
type goodSource struct {
testSource
}
func (s *goodSource) Iterate(ctx context.Context, onFields OnFields, onRow OnRow) (interface{}, error) {
onFields(s.getFields())
guard := Guard(ctx)
for _, row := range testRows {
if guard.TimedOut() {
return nil, ErrDeadlineExceeded
}
more, err := onRow(row.key, row.vals)
if !more || err != nil {
return nil, err
}
}
return nil, nil
}
func (s *goodSource) String() string {
return "test.good"
}
type infiniteSource struct {
testSource
}
func (s *infiniteSource) Iterate(ctx context.Context, onFields OnFields, onRow OnRow) (interface{}, error) {
onFields(s.getFields())
for {
for _, row := range testRows {
more, err := onRow(row.key, row.vals)
if !more || err != nil {
return nil, err
}
}
}
}
func (s *infiniteSource) String() string {
return "test.infinite"
}
type errorSource struct {
testSource
}
func (s *errorSource) Iterate(ctx context.Context, onFields OnFields, onRow OnRow) (interface{}, error) {
return nil, errTest
}
func (s *errorSource) String() string {
return "test.error"
}
type totalingSource struct {
goodSource
}
func (s *totalingSource) getFields() Fields {
return Fields{totalField}
}
func (s *totalingSource) Iterate(ctx context.Context, onFields OnFields, onRow OnRow) (interface{}, error) {
return s.goodSource.Iterate(ctx, onFields, func(key bytemap.ByteMap, vals Vals) (bool, error) {
a, _ := vals[0].ValueAt(0, eA)
b, _ := vals[0].ValueAt(0, eB)
val := encoding.NewValue(totalField.Expr, vals[0].Until(), Map{"a": a, "b": b}, key)
return onRow(key, Vals{val})
})
}
| TestUnflattenTransform | identifier_name |
core_test.go | package core
import (
"context"
"errors"
"fmt"
"github.com/getlantern/bytemap"
"github.com/getlantern/goexpr"
"github.com/getlantern/zenodb/encoding"
. "github.com/getlantern/zenodb/expr"
"github.com/stretchr/testify/assert"
"sync/atomic"
"testing"
"time"
)
var (
epoch = time.Date(2015, 1, 1, 0, 0, 0, 0, time.UTC)
resolution = 1 * time.Second
asOf = epoch.Add(-10 * resolution)
until = epoch
cond, _ = goexpr.Boolean(">", goexpr.Param("d"), goexpr.Constant(0))
eA = IF(cond, SUM("a"))
eB = SUM("b")
totalField = NewField("total", ADD(eA, eB))
errTest = errors.New("test error")
)
func TestRowFilter(t *testing.T) {
f := RowFilter(&goodSource{}, "test", func(ctx context.Context, key bytemap.ByteMap, fields Fields, vals Vals) (bytemap.ByteMap, Vals, error) {
x := key.Get("x")
if x != nil && x.(int)%2 == 0 {
return key, vals, nil
}
return nil, nil, nil
})
totalA := int64(0)
totalB := int64(0)
_, err := f.Iterate(context.Background(), FieldsIgnored, func(key bytemap.ByteMap, vals Vals) (bool, error) {
a, _ := vals[0].ValueAt(0, eA)
b, _ := vals[1].ValueAt(0, eB)
atomic.AddInt64(&totalA, int64(a))
atomic.AddInt64(&totalB, int64(b))
return true, nil
})
assert.NoError(t, err)
assert.EqualValues(t, 260, atomic.LoadInt64(&totalB))
assert.EqualValues(t, 0, atomic.LoadInt64(&totalA), "Filter should have excluded anything with a value for A")
}
func TestFlatRowFilter(t *testing.T) {
f := FlatRowFilter(Flatten(&goodSource{}), "test", func(ctx context.Context, row *FlatRow, fields Fields) (*FlatRow, error) {
x := row.Key.Get("x")
if x != nil && x.(int)%2 == 0 {
return row, nil
}
return nil, nil
})
totalA := int64(0)
totalB := int64(0)
_, err := f.Iterate(context.Background(), FieldsIgnored, func(row *FlatRow) (bool, error) {
a := row.Values[0]
b := row.Values[1]
atomic.AddInt64(&totalA, int64(a))
atomic.AddInt64(&totalB, int64(b))
return true, nil
})
assert.NoError(t, err)
assert.EqualValues(t, 260, atomic.LoadInt64(&totalB))
assert.EqualValues(t, 0, atomic.LoadInt64(&totalA), "Filter should have excluded anything with a value for A")
}
func TestDeadlineFilter(t *testing.T) {
f := RowFilter(&goodSource{}, "deadline", func(ctx context.Context, key bytemap.ByteMap, fields Fields, vals Vals) (bytemap.ByteMap, Vals, error) {
// Slow things down by sleeping for a bit
time.Sleep(100 * time.Millisecond)
return key, vals, nil
})
rowsSeen := int64(0)
ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(50*time.Millisecond))
defer cancel()
_, err := f.Iterate(ctx, FieldsIgnored, func(key bytemap.ByteMap, vals Vals) (bool, error) {
atomic.AddInt64(&rowsSeen, 1)
return true, nil
})
assert.Equal(t, ErrDeadlineExceeded, err, "Should have gotten deadline exceeded error")
assert.EqualValues(t, 1, atomic.LoadInt64(&rowsSeen), "Should have gotten only 1 row before deadline exceeded")
}
func TestDeadlineGroup(t *testing.T) {
eTotal := ADD(eA, eB)
g := Group(&infiniteSource{}, GroupOpts{
By: []GroupBy{NewGroupBy("x", goexpr.Param("x"))},
Fields: StaticFieldSource{
Field{
Name: "total",
Expr: eTotal,
},
},
Resolution: resolution * 2,
AsOf: asOf.Add(2 * resolution),
Until: until.Add(-2 * resolution),
})
rowsSeen := int64(0)
ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(25*time.Millisecond))
defer cancel()
_, err := g.Iterate(ctx, FieldsIgnored, func(key bytemap.ByteMap, vals Vals) (bool, error) {
atomic.AddInt64(&rowsSeen, 1)
return true, nil
})
assert.Equal(t, ErrDeadlineExceeded, err, "Should have gotten deadline exceeded error")
assert.EqualValues(t, 0, atomic.LoadInt64(&rowsSeen), "Should have gotten 0 rows before deadline exceeded")
}
func TestGroupSingle(t *testing.T) {
eTotal := ADD(eA, eB)
gx := Group(&goodSource{}, GroupOpts{
By: []GroupBy{NewGroupBy("x", goexpr.Param("x"))},
Fields: StaticFieldSource{
Field{
Name: "total",
Expr: eTotal,
},
},
Resolution: resolution * 2,
AsOf: asOf.Add(2 * resolution),
Until: until.Add(-2 * resolution),
})
totalByX := make(map[int]float64, 0)
var fields Fields
_, err := gx.Iterate(context.Background(), func(inFields Fields) error {
fields = inFields
return nil
}, func(key bytemap.ByteMap, vals Vals) (bool, error) {
t.Log(key.AsMap())
for i, field := range fields {
t.Log(vals[i].String(field.Expr, resolution*2))
}
total := float64(0)
v := vals[0]
for p := 0; p < v.NumPeriods(eTotal.EncodedWidth()); p++ {
val, _ := v.ValueAt(p, eTotal)
total += val
}
totalByX[key.Get("x").(int)] = total
return true, nil
})
assert.NoError(t, err)
assert.EqualValues(t, 120, totalByX[1])
assert.EqualValues(t, 140, totalByX[2])
}
func TestGroupCrosstabSingle(t *testing.T) {
eAdd := ADD(eA, eB)
addField := Field{
Name: "add",
Expr: eAdd,
}
expectedFields := Fields{}
for _, i := range []string{"1", "2", "3", "5"} {
cond, err := goexpr.Binary("=", goexpr.Concat(goexpr.Constant("_"), goexpr.Param("y")), goexpr.Constant(i))
if !assert.NoError(t, err) {
return
}
ifex := IF(cond, eAdd)
expectedFields = append(expectedFields, NewField(i+"_add", ifex))
}
expectedFields = append(expectedFields, NewField("total_add", eAdd))
expectedKeys := []bytemap.ByteMap{
bytemap.FromSortedKeysAndValues([]string{"x"}, []interface{}{1}),
bytemap.FromSortedKeysAndValues([]string{"x"}, []interface{}{2}),
}
expectedRows := [][][]float64{
[][]float64{
[]float64{70, 0},
[]float64{0, 0},
[]float64{0, 50},
[]float64{0, 0},
[]float64{70, 50},
},
[][]float64{
[]float64{0, 0},
[]float64{0, 0},
[]float64{80, 0},
[]float64{0, 60},
[]float64{80, 60},
},
}
gx := Group(&goodSource{}, GroupOpts{
By: []GroupBy{NewGroupBy("x", goexpr.Param("x"))},
Crosstab: goexpr.Concat(goexpr.Constant("_"), goexpr.Param("y")),
CrosstabIncludesTotal: true,
Fields: StaticFieldSource{addField},
Resolution: resolution * 2,
AsOf: asOf.Add(2 * resolution),
Until: until.Add(-2 * resolution),
})
var fields Fields
_, err := gx.Iterate(context.Background(), func(inFields Fields) error {
fields = inFields
if assert.Equal(t, len(expectedFields), len(fields)) {
for i, expected := range expectedFields {
assert.Equal(t, expected.String(), fields[i].String())
}
}
return nil
}, func(key bytemap.ByteMap, vals Vals) (bool, error) {
expectedKey := expectedKeys[0]
expectedKeys = expectedKeys[1:]
expectedRow := expectedRows[0]
expectedRows = expectedRows[1:]
assert.EqualValues(t, expectedKey, key)
if assert.Equal(t, len(expectedRow), len(vals)) {
for i, expected := range expectedRow {
val := vals[i]
field := fields[i]
if assert.Equal(t, len(expected), val.NumPeriods(field.Expr.EncodedWidth())) {
for j, f := range expected {
actual, _ := val.ValueAt(j, field.Expr)
assert.Equal(t, f, actual)
}
}
}
}
return true, nil
})
if !assert.NoError(t, err) {
t.Log(FormatSource(gx))
}
}
func TestGroupResolutionOnly(t *testing.T) {
eTotal := ADD(eA, eB)
gx := Group(&goodSource{}, GroupOpts{
By: []GroupBy{NewGroupBy("_", goexpr.Constant("_"))},
Fields: StaticFieldSource{
Field{
Name: "total",
Expr: eTotal,
},
},
Resolution: resolution * 20,
})
total := float64(0)
_, err := gx.Iterate(context.Background(), FieldsIgnored, func(key bytemap.ByteMap, vals Vals) (bool, error) {
v := vals[0]
for p := 0; p < v.NumPeriods(eTotal.EncodedWidth()); p++ {
val, _ := v.ValueAt(p, eTotal)
t.Log(val)
total += val
}
// We expect only one row
return false, nil
})
assert.NoError(t, err)
assert.EqualValues(t, 480, total)
}
func TestGroupNone(t *testing.T) {
eTotal := ADD(eA, eB)
gx := Group(&goodSource{}, GroupOpts{
Fields: StaticFieldSource{
Field{
Name: "total",
Expr: eTotal,
},
},
Resolution: resolution * 10,
})
expectedValues := map[string]float64{
"1.1": 10 + 70,
"1.3": 50,
"1.5": 90,
"2.2": 100,
"2.3": 20 + 80,
"2.5": 60,
}
ctx := context.Background()
_, err := gx.Iterate(ctx, FieldsIgnored, func(key bytemap.ByteMap, vals Vals) (bool, error) {
dims := fmt.Sprintf("%d.%d", key.Get("x"), key.Get("y"))
val, _ := vals[0].ValueAt(0, eTotal)
expectedVal := expectedValues[dims]
delete(expectedValues, dims)
assert.Equal(t, expectedVal, val, dims)
return true, nil
})
assert.NoError(t, err)
assert.Empty(t, expectedValues, "All combinations should have been seen")
}
func TestFlattenSortOffsetAndLimit(t *testing.T) {
// TODO: add test that tests flattening of rows that contain multiple periods
// worth of values
g := Group(&goodSource{}, GroupOpts{
Fields: StaticFieldSource{NewField("a", eA), NewField("b", eB), NewField("c", CONST(10))},
})
f := Flatten(g)
s := Sort(f, NewOrderBy("b", true), NewOrderBy("a", false))
o := Offset(s, 1)
l := Limit(o, 6)
// This contains the data, sorted, but missing the first and last entries
expectedTSs := []time.Time{
epoch.Add(-2 * resolution), epoch.Add(-4 * resolution), epoch.Add(-8 * resolution),
epoch.Add(-9 * resolution), epoch.Add(-5 * resolution), epoch.Add(-3 * resolution),
}
expectedAs := []float64{0, 0, 0, 10, 50, 70}
expectedBs := []float64{80, 60, 20, 0, 0, 0}
var expectedTS time.Time
var expectedA float64
var expectedB float64
_, err := l.Iterate(context.Background(), FieldsIgnored, func(row *FlatRow) (bool, error) {
expectedTS, expectedTSs = expectedTSs[0], expectedTSs[1:]
expectedA, expectedAs = expectedAs[0], expectedAs[1:]
expectedB, expectedBs = expectedBs[0], expectedBs[1:]
assert.Equal(t, expectedTS.UnixNano(), row.TS)
assert.EqualValues(t, expectedA, row.Values[0])
assert.EqualValues(t, expectedB, row.Values[1])
return true, nil
})
if !assert.NoError(t, err) {
t.Log(FormatSource(l))
}
}
func TestUnflattenTransform(t *testing.T) {
avgTotal := ADD(AVG("a"), AVG("b"))
f := Flatten(&goodSource{})
u := Unflatten(f, StaticFieldSource{NewField("total", avgTotal)})
doTestUnflattened(t, u, avgTotal)
}
func TestUnflattenOptimized(t *testing.T) {
total := ADD(eA, eB)
s := &totalingSource{}
f := Flatten(s)
u := UnflattenOptimized(f)
_, isTotalingSource := u.(*totalingSource)
if !assert.True(t, isTotalingSource, "Unflattened should point directly at totaling source, put points at: %v", u.String()) {
return
}
doTestUnflattened(t, u, total)
}
func doTestUnflattened(t *testing.T, u RowSource, ex Expr) {
expectedRows := make([]*testRow, 0, len(testRows))
for _, row := range testRows {
var ts time.Time
if row.vals[0] != nil {
ts = row.vals[0].Until()
} else {
ts = row.vals[1].Until()
}
a, _ := row.vals[0].ValueAt(0, eA)
b, _ := row.vals[1].ValueAt(0, eB)
params := Map(map[string]float64{
"a": a,
"b": b,
})
expectedRow := &testRow{
key: row.key,
vals: []encoding.Sequence{encoding.NewValue(ex, ts, params, row.key)},
}
expectedRows = append(expectedRows, expectedRow)
}
_, err := u.Iterate(context.Background(), FieldsIgnored, func(key bytemap.ByteMap, vals Vals) (bool, error) {
row := &testRow{key, vals}
for i, expected := range expectedRows {
if row.equals(expected) {
expectedRows = append(expectedRows[:i], expectedRows[i+1:]...)
break
}
}
return true, nil
})
assert.NoError(t, err)
assert.Empty(t, expectedRows, "All rows should have been seen")
}
type testSource struct{}
func (s *testSource) getFields() Fields {
return Fields{NewField("a", eA), NewField("b", eB)}
}
func (s *testSource) GetGroupBy() []GroupBy {
x, _ := goexpr.Binary("+", goexpr.Param("x"), goexpr.Constant(1))
return []GroupBy{NewGroupBy("x", x), NewGroupBy("y", goexpr.Param("y"))}
}
func (s *testSource) GetResolution() time.Duration {
return resolution
}
func (s *testSource) GetAsOf() time.Time {
return asOf
}
func (s *testSource) GetUntil() time.Time {
return until
}
type testRow struct {
key bytemap.ByteMap
vals Vals
}
func (r *testRow) equals(other *testRow) bool {
if r == nil || other == nil {
return false
}
if string(r.key) != string(other.key) {
return false
}
if len(r.vals) != len(other.vals) {
return false
}
for i, val := range r.vals {
ex := eA
if i > 0 {
ex = eB
}
otherVal := other.vals[i]
v, _ := val.ValueAt(0, ex)
ov, _ := otherVal.ValueAt(0, ex)
if v != ov {
return false
}
}
return true
}
var testRows = []*testRow{
makeRow(epoch.Add(-9*resolution), 1, 1, 10, 0),
makeRow(epoch.Add(-8*resolution), 2, 3, 0, 20),
// Intentional gap
makeRow(epoch.Add(-5*resolution), 1, 3, 50, 0),
makeRow(epoch.Add(-4*resolution), 2, 5, 0, 60),
makeRow(epoch.Add(-3*resolution), 1, 1, 70, 0),
makeRow(epoch.Add(-2*resolution), 2, 3, 0, 80),
makeRow(epoch.Add(-1*resolution), 1, 5, 90, 0),
makeRow(epoch, 2, 2, 0, 100),
}
func makeRow(ts time.Time, x int, y int, a float64, b float64) *testRow {
key := bytemap.New(map[string]interface{}{"x": x, "y": y})
vals := make([]encoding.Sequence, 2)
if a != 0 {
vals[0] = encoding.NewFloatValue(eA, ts, a)
}
if b != 0 {
vals[1] = encoding.NewFloatValue(eB, ts, b)
}
return &testRow{key, vals}
}
type goodSource struct {
testSource
}
func (s *goodSource) Iterate(ctx context.Context, onFields OnFields, onRow OnRow) (interface{}, error) {
onFields(s.getFields())
guard := Guard(ctx)
for _, row := range testRows {
if guard.TimedOut() {
return nil, ErrDeadlineExceeded
}
more, err := onRow(row.key, row.vals)
if !more || err != nil {
return nil, err
}
}
return nil, nil
}
func (s *goodSource) String() string {
return "test.good"
}
type infiniteSource struct {
testSource
}
func (s *infiniteSource) Iterate(ctx context.Context, onFields OnFields, onRow OnRow) (interface{}, error) {
onFields(s.getFields())
for {
for _, row := range testRows |
}
}
func (s *infiniteSource) String() string {
return "test.infinite"
}
type errorSource struct {
testSource
}
func (s *errorSource) Iterate(ctx context.Context, onFields OnFields, onRow OnRow) (interface{}, error) {
return nil, errTest
}
func (s *errorSource) String() string {
return "test.error"
}
type totalingSource struct {
goodSource
}
func (s *totalingSource) getFields() Fields {
return Fields{totalField}
}
func (s *totalingSource) Iterate(ctx context.Context, onFields OnFields, onRow OnRow) (interface{}, error) {
return s.goodSource.Iterate(ctx, onFields, func(key bytemap.ByteMap, vals Vals) (bool, error) {
a, _ := vals[0].ValueAt(0, eA)
b, _ := vals[0].ValueAt(0, eB)
val := encoding.NewValue(totalField.Expr, vals[0].Until(), Map{"a": a, "b": b}, key)
return onRow(key, Vals{val})
})
}
| {
more, err := onRow(row.key, row.vals)
if !more || err != nil {
return nil, err
}
} | conditional_block |
core_test.go | package core
import (
"context"
"errors"
"fmt"
"github.com/getlantern/bytemap"
"github.com/getlantern/goexpr"
"github.com/getlantern/zenodb/encoding"
. "github.com/getlantern/zenodb/expr"
"github.com/stretchr/testify/assert"
"sync/atomic"
"testing"
"time"
)
var (
epoch = time.Date(2015, 1, 1, 0, 0, 0, 0, time.UTC)
resolution = 1 * time.Second
asOf = epoch.Add(-10 * resolution)
until = epoch
cond, _ = goexpr.Boolean(">", goexpr.Param("d"), goexpr.Constant(0))
eA = IF(cond, SUM("a"))
eB = SUM("b")
totalField = NewField("total", ADD(eA, eB))
errTest = errors.New("test error")
)
func TestRowFilter(t *testing.T) {
f := RowFilter(&goodSource{}, "test", func(ctx context.Context, key bytemap.ByteMap, fields Fields, vals Vals) (bytemap.ByteMap, Vals, error) {
x := key.Get("x")
if x != nil && x.(int)%2 == 0 {
return key, vals, nil
}
return nil, nil, nil
})
totalA := int64(0)
totalB := int64(0)
_, err := f.Iterate(context.Background(), FieldsIgnored, func(key bytemap.ByteMap, vals Vals) (bool, error) {
a, _ := vals[0].ValueAt(0, eA)
b, _ := vals[1].ValueAt(0, eB)
atomic.AddInt64(&totalA, int64(a))
atomic.AddInt64(&totalB, int64(b))
return true, nil
})
assert.NoError(t, err)
assert.EqualValues(t, 260, atomic.LoadInt64(&totalB))
assert.EqualValues(t, 0, atomic.LoadInt64(&totalA), "Filter should have excluded anything with a value for A")
}
func TestFlatRowFilter(t *testing.T) {
f := FlatRowFilter(Flatten(&goodSource{}), "test", func(ctx context.Context, row *FlatRow, fields Fields) (*FlatRow, error) {
x := row.Key.Get("x")
if x != nil && x.(int)%2 == 0 {
return row, nil
}
return nil, nil
})
totalA := int64(0)
totalB := int64(0)
_, err := f.Iterate(context.Background(), FieldsIgnored, func(row *FlatRow) (bool, error) {
a := row.Values[0]
b := row.Values[1]
atomic.AddInt64(&totalA, int64(a))
atomic.AddInt64(&totalB, int64(b))
return true, nil
})
assert.NoError(t, err)
assert.EqualValues(t, 260, atomic.LoadInt64(&totalB))
assert.EqualValues(t, 0, atomic.LoadInt64(&totalA), "Filter should have excluded anything with a value for A")
}
func TestDeadlineFilter(t *testing.T) {
f := RowFilter(&goodSource{}, "deadline", func(ctx context.Context, key bytemap.ByteMap, fields Fields, vals Vals) (bytemap.ByteMap, Vals, error) {
// Slow things down by sleeping for a bit
time.Sleep(100 * time.Millisecond)
return key, vals, nil
})
rowsSeen := int64(0)
ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(50*time.Millisecond))
defer cancel()
_, err := f.Iterate(ctx, FieldsIgnored, func(key bytemap.ByteMap, vals Vals) (bool, error) {
atomic.AddInt64(&rowsSeen, 1)
return true, nil
})
assert.Equal(t, ErrDeadlineExceeded, err, "Should have gotten deadline exceeded error")
assert.EqualValues(t, 1, atomic.LoadInt64(&rowsSeen), "Should have gotten only 1 row before deadline exceeded")
}
func TestDeadlineGroup(t *testing.T) {
eTotal := ADD(eA, eB)
g := Group(&infiniteSource{}, GroupOpts{
By: []GroupBy{NewGroupBy("x", goexpr.Param("x"))},
Fields: StaticFieldSource{
Field{
Name: "total",
Expr: eTotal,
},
},
Resolution: resolution * 2,
AsOf: asOf.Add(2 * resolution),
Until: until.Add(-2 * resolution),
})
rowsSeen := int64(0)
ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(25*time.Millisecond))
defer cancel()
_, err := g.Iterate(ctx, FieldsIgnored, func(key bytemap.ByteMap, vals Vals) (bool, error) {
atomic.AddInt64(&rowsSeen, 1)
return true, nil
})
assert.Equal(t, ErrDeadlineExceeded, err, "Should have gotten deadline exceeded error")
assert.EqualValues(t, 0, atomic.LoadInt64(&rowsSeen), "Should have gotten 0 rows before deadline exceeded")
}
func TestGroupSingle(t *testing.T) {
eTotal := ADD(eA, eB)
gx := Group(&goodSource{}, GroupOpts{
By: []GroupBy{NewGroupBy("x", goexpr.Param("x"))},
Fields: StaticFieldSource{
Field{
Name: "total",
Expr: eTotal,
},
},
Resolution: resolution * 2,
AsOf: asOf.Add(2 * resolution),
Until: until.Add(-2 * resolution),
})
totalByX := make(map[int]float64, 0)
var fields Fields
_, err := gx.Iterate(context.Background(), func(inFields Fields) error {
fields = inFields
return nil
}, func(key bytemap.ByteMap, vals Vals) (bool, error) {
t.Log(key.AsMap())
for i, field := range fields {
t.Log(vals[i].String(field.Expr, resolution*2))
}
total := float64(0)
v := vals[0]
for p := 0; p < v.NumPeriods(eTotal.EncodedWidth()); p++ {
val, _ := v.ValueAt(p, eTotal)
total += val
}
totalByX[key.Get("x").(int)] = total
return true, nil
})
assert.NoError(t, err)
assert.EqualValues(t, 120, totalByX[1])
assert.EqualValues(t, 140, totalByX[2])
}
func TestGroupCrosstabSingle(t *testing.T) {
eAdd := ADD(eA, eB)
addField := Field{
Name: "add",
Expr: eAdd,
}
expectedFields := Fields{}
for _, i := range []string{"1", "2", "3", "5"} {
cond, err := goexpr.Binary("=", goexpr.Concat(goexpr.Constant("_"), goexpr.Param("y")), goexpr.Constant(i))
if !assert.NoError(t, err) {
return
}
ifex := IF(cond, eAdd)
expectedFields = append(expectedFields, NewField(i+"_add", ifex))
}
expectedFields = append(expectedFields, NewField("total_add", eAdd))
expectedKeys := []bytemap.ByteMap{
bytemap.FromSortedKeysAndValues([]string{"x"}, []interface{}{1}),
bytemap.FromSortedKeysAndValues([]string{"x"}, []interface{}{2}),
}
expectedRows := [][][]float64{
[][]float64{
[]float64{70, 0},
[]float64{0, 0},
[]float64{0, 50},
[]float64{0, 0},
[]float64{70, 50},
},
[][]float64{
[]float64{0, 0},
[]float64{0, 0},
[]float64{80, 0},
[]float64{0, 60},
[]float64{80, 60},
},
}
gx := Group(&goodSource{}, GroupOpts{
By: []GroupBy{NewGroupBy("x", goexpr.Param("x"))},
Crosstab: goexpr.Concat(goexpr.Constant("_"), goexpr.Param("y")),
CrosstabIncludesTotal: true,
Fields: StaticFieldSource{addField},
Resolution: resolution * 2,
AsOf: asOf.Add(2 * resolution),
Until: until.Add(-2 * resolution),
})
var fields Fields
_, err := gx.Iterate(context.Background(), func(inFields Fields) error {
fields = inFields
if assert.Equal(t, len(expectedFields), len(fields)) {
for i, expected := range expectedFields {
assert.Equal(t, expected.String(), fields[i].String())
}
}
return nil
}, func(key bytemap.ByteMap, vals Vals) (bool, error) {
expectedKey := expectedKeys[0]
expectedKeys = expectedKeys[1:]
expectedRow := expectedRows[0]
expectedRows = expectedRows[1:]
assert.EqualValues(t, expectedKey, key)
if assert.Equal(t, len(expectedRow), len(vals)) {
for i, expected := range expectedRow {
val := vals[i]
field := fields[i]
if assert.Equal(t, len(expected), val.NumPeriods(field.Expr.EncodedWidth())) {
for j, f := range expected {
actual, _ := val.ValueAt(j, field.Expr)
assert.Equal(t, f, actual)
}
}
}
}
return true, nil
})
if !assert.NoError(t, err) {
t.Log(FormatSource(gx))
}
}
func TestGroupResolutionOnly(t *testing.T) {
eTotal := ADD(eA, eB)
gx := Group(&goodSource{}, GroupOpts{
By: []GroupBy{NewGroupBy("_", goexpr.Constant("_"))},
Fields: StaticFieldSource{
Field{
Name: "total",
Expr: eTotal,
},
},
Resolution: resolution * 20,
})
total := float64(0)
_, err := gx.Iterate(context.Background(), FieldsIgnored, func(key bytemap.ByteMap, vals Vals) (bool, error) {
v := vals[0]
for p := 0; p < v.NumPeriods(eTotal.EncodedWidth()); p++ {
val, _ := v.ValueAt(p, eTotal)
t.Log(val)
total += val
}
// We expect only one row
return false, nil
})
assert.NoError(t, err)
assert.EqualValues(t, 480, total)
}
func TestGroupNone(t *testing.T) {
eTotal := ADD(eA, eB)
gx := Group(&goodSource{}, GroupOpts{
Fields: StaticFieldSource{
Field{
Name: "total",
Expr: eTotal,
},
},
Resolution: resolution * 10,
})
expectedValues := map[string]float64{
"1.1": 10 + 70,
"1.3": 50,
"1.5": 90,
"2.2": 100,
"2.3": 20 + 80,
"2.5": 60,
}
ctx := context.Background()
_, err := gx.Iterate(ctx, FieldsIgnored, func(key bytemap.ByteMap, vals Vals) (bool, error) {
dims := fmt.Sprintf("%d.%d", key.Get("x"), key.Get("y"))
val, _ := vals[0].ValueAt(0, eTotal)
expectedVal := expectedValues[dims]
delete(expectedValues, dims)
assert.Equal(t, expectedVal, val, dims)
return true, nil
})
assert.NoError(t, err)
assert.Empty(t, expectedValues, "All combinations should have been seen")
}
func TestFlattenSortOffsetAndLimit(t *testing.T) {
// TODO: add test that tests flattening of rows that contain multiple periods
// worth of values
g := Group(&goodSource{}, GroupOpts{
Fields: StaticFieldSource{NewField("a", eA), NewField("b", eB), NewField("c", CONST(10))},
})
f := Flatten(g)
s := Sort(f, NewOrderBy("b", true), NewOrderBy("a", false))
o := Offset(s, 1)
l := Limit(o, 6)
// This contains the data, sorted, but missing the first and last entries
expectedTSs := []time.Time{
epoch.Add(-2 * resolution), epoch.Add(-4 * resolution), epoch.Add(-8 * resolution),
epoch.Add(-9 * resolution), epoch.Add(-5 * resolution), epoch.Add(-3 * resolution),
}
expectedAs := []float64{0, 0, 0, 10, 50, 70}
expectedBs := []float64{80, 60, 20, 0, 0, 0}
var expectedTS time.Time
var expectedA float64
var expectedB float64
_, err := l.Iterate(context.Background(), FieldsIgnored, func(row *FlatRow) (bool, error) {
expectedTS, expectedTSs = expectedTSs[0], expectedTSs[1:]
expectedA, expectedAs = expectedAs[0], expectedAs[1:]
expectedB, expectedBs = expectedBs[0], expectedBs[1:]
assert.Equal(t, expectedTS.UnixNano(), row.TS)
assert.EqualValues(t, expectedA, row.Values[0])
assert.EqualValues(t, expectedB, row.Values[1])
return true, nil
})
if !assert.NoError(t, err) {
t.Log(FormatSource(l))
}
}
func TestUnflattenTransform(t *testing.T) {
avgTotal := ADD(AVG("a"), AVG("b"))
f := Flatten(&goodSource{})
u := Unflatten(f, StaticFieldSource{NewField("total", avgTotal)})
doTestUnflattened(t, u, avgTotal)
}
func TestUnflattenOptimized(t *testing.T) {
total := ADD(eA, eB)
s := &totalingSource{}
f := Flatten(s)
u := UnflattenOptimized(f)
_, isTotalingSource := u.(*totalingSource)
if !assert.True(t, isTotalingSource, "Unflattened should point directly at totaling source, put points at: %v", u.String()) {
return
}
doTestUnflattened(t, u, total)
}
func doTestUnflattened(t *testing.T, u RowSource, ex Expr) {
expectedRows := make([]*testRow, 0, len(testRows))
for _, row := range testRows {
var ts time.Time
if row.vals[0] != nil {
ts = row.vals[0].Until()
} else {
ts = row.vals[1].Until()
}
a, _ := row.vals[0].ValueAt(0, eA)
b, _ := row.vals[1].ValueAt(0, eB)
params := Map(map[string]float64{
"a": a,
"b": b,
})
expectedRow := &testRow{
key: row.key,
vals: []encoding.Sequence{encoding.NewValue(ex, ts, params, row.key)},
}
expectedRows = append(expectedRows, expectedRow)
}
_, err := u.Iterate(context.Background(), FieldsIgnored, func(key bytemap.ByteMap, vals Vals) (bool, error) {
row := &testRow{key, vals}
for i, expected := range expectedRows {
if row.equals(expected) {
expectedRows = append(expectedRows[:i], expectedRows[i+1:]...)
break
}
}
return true, nil
})
assert.NoError(t, err)
assert.Empty(t, expectedRows, "All rows should have been seen")
}
type testSource struct{}
func (s *testSource) getFields() Fields {
return Fields{NewField("a", eA), NewField("b", eB)}
}
func (s *testSource) GetGroupBy() []GroupBy {
x, _ := goexpr.Binary("+", goexpr.Param("x"), goexpr.Constant(1))
return []GroupBy{NewGroupBy("x", x), NewGroupBy("y", goexpr.Param("y"))}
}
func (s *testSource) GetResolution() time.Duration { | }
func (s *testSource) GetAsOf() time.Time {
return asOf
}
func (s *testSource) GetUntil() time.Time {
return until
}
type testRow struct {
key bytemap.ByteMap
vals Vals
}
func (r *testRow) equals(other *testRow) bool {
if r == nil || other == nil {
return false
}
if string(r.key) != string(other.key) {
return false
}
if len(r.vals) != len(other.vals) {
return false
}
for i, val := range r.vals {
ex := eA
if i > 0 {
ex = eB
}
otherVal := other.vals[i]
v, _ := val.ValueAt(0, ex)
ov, _ := otherVal.ValueAt(0, ex)
if v != ov {
return false
}
}
return true
}
var testRows = []*testRow{
makeRow(epoch.Add(-9*resolution), 1, 1, 10, 0),
makeRow(epoch.Add(-8*resolution), 2, 3, 0, 20),
// Intentional gap
makeRow(epoch.Add(-5*resolution), 1, 3, 50, 0),
makeRow(epoch.Add(-4*resolution), 2, 5, 0, 60),
makeRow(epoch.Add(-3*resolution), 1, 1, 70, 0),
makeRow(epoch.Add(-2*resolution), 2, 3, 0, 80),
makeRow(epoch.Add(-1*resolution), 1, 5, 90, 0),
makeRow(epoch, 2, 2, 0, 100),
}
func makeRow(ts time.Time, x int, y int, a float64, b float64) *testRow {
key := bytemap.New(map[string]interface{}{"x": x, "y": y})
vals := make([]encoding.Sequence, 2)
if a != 0 {
vals[0] = encoding.NewFloatValue(eA, ts, a)
}
if b != 0 {
vals[1] = encoding.NewFloatValue(eB, ts, b)
}
return &testRow{key, vals}
}
type goodSource struct {
testSource
}
func (s *goodSource) Iterate(ctx context.Context, onFields OnFields, onRow OnRow) (interface{}, error) {
onFields(s.getFields())
guard := Guard(ctx)
for _, row := range testRows {
if guard.TimedOut() {
return nil, ErrDeadlineExceeded
}
more, err := onRow(row.key, row.vals)
if !more || err != nil {
return nil, err
}
}
return nil, nil
}
func (s *goodSource) String() string {
return "test.good"
}
type infiniteSource struct {
testSource
}
func (s *infiniteSource) Iterate(ctx context.Context, onFields OnFields, onRow OnRow) (interface{}, error) {
onFields(s.getFields())
for {
for _, row := range testRows {
more, err := onRow(row.key, row.vals)
if !more || err != nil {
return nil, err
}
}
}
}
func (s *infiniteSource) String() string {
return "test.infinite"
}
type errorSource struct {
testSource
}
func (s *errorSource) Iterate(ctx context.Context, onFields OnFields, onRow OnRow) (interface{}, error) {
return nil, errTest
}
func (s *errorSource) String() string {
return "test.error"
}
type totalingSource struct {
goodSource
}
func (s *totalingSource) getFields() Fields {
return Fields{totalField}
}
func (s *totalingSource) Iterate(ctx context.Context, onFields OnFields, onRow OnRow) (interface{}, error) {
return s.goodSource.Iterate(ctx, onFields, func(key bytemap.ByteMap, vals Vals) (bool, error) {
a, _ := vals[0].ValueAt(0, eA)
b, _ := vals[0].ValueAt(0, eB)
val := encoding.NewValue(totalField.Expr, vals[0].Until(), Map{"a": a, "b": b}, key)
return onRow(key, Vals{val})
})
} | return resolution | random_line_split |
core_test.go | package core
import (
"context"
"errors"
"fmt"
"github.com/getlantern/bytemap"
"github.com/getlantern/goexpr"
"github.com/getlantern/zenodb/encoding"
. "github.com/getlantern/zenodb/expr"
"github.com/stretchr/testify/assert"
"sync/atomic"
"testing"
"time"
)
var (
epoch = time.Date(2015, 1, 1, 0, 0, 0, 0, time.UTC)
resolution = 1 * time.Second
asOf = epoch.Add(-10 * resolution)
until = epoch
cond, _ = goexpr.Boolean(">", goexpr.Param("d"), goexpr.Constant(0))
eA = IF(cond, SUM("a"))
eB = SUM("b")
totalField = NewField("total", ADD(eA, eB))
errTest = errors.New("test error")
)
func TestRowFilter(t *testing.T) {
f := RowFilter(&goodSource{}, "test", func(ctx context.Context, key bytemap.ByteMap, fields Fields, vals Vals) (bytemap.ByteMap, Vals, error) {
x := key.Get("x")
if x != nil && x.(int)%2 == 0 {
return key, vals, nil
}
return nil, nil, nil
})
totalA := int64(0)
totalB := int64(0)
_, err := f.Iterate(context.Background(), FieldsIgnored, func(key bytemap.ByteMap, vals Vals) (bool, error) {
a, _ := vals[0].ValueAt(0, eA)
b, _ := vals[1].ValueAt(0, eB)
atomic.AddInt64(&totalA, int64(a))
atomic.AddInt64(&totalB, int64(b))
return true, nil
})
assert.NoError(t, err)
assert.EqualValues(t, 260, atomic.LoadInt64(&totalB))
assert.EqualValues(t, 0, atomic.LoadInt64(&totalA), "Filter should have excluded anything with a value for A")
}
func TestFlatRowFilter(t *testing.T) {
f := FlatRowFilter(Flatten(&goodSource{}), "test", func(ctx context.Context, row *FlatRow, fields Fields) (*FlatRow, error) {
x := row.Key.Get("x")
if x != nil && x.(int)%2 == 0 {
return row, nil
}
return nil, nil
})
totalA := int64(0)
totalB := int64(0)
_, err := f.Iterate(context.Background(), FieldsIgnored, func(row *FlatRow) (bool, error) {
a := row.Values[0]
b := row.Values[1]
atomic.AddInt64(&totalA, int64(a))
atomic.AddInt64(&totalB, int64(b))
return true, nil
})
assert.NoError(t, err)
assert.EqualValues(t, 260, atomic.LoadInt64(&totalB))
assert.EqualValues(t, 0, atomic.LoadInt64(&totalA), "Filter should have excluded anything with a value for A")
}
func TestDeadlineFilter(t *testing.T) {
f := RowFilter(&goodSource{}, "deadline", func(ctx context.Context, key bytemap.ByteMap, fields Fields, vals Vals) (bytemap.ByteMap, Vals, error) {
// Slow things down by sleeping for a bit
time.Sleep(100 * time.Millisecond)
return key, vals, nil
})
rowsSeen := int64(0)
ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(50*time.Millisecond))
defer cancel()
_, err := f.Iterate(ctx, FieldsIgnored, func(key bytemap.ByteMap, vals Vals) (bool, error) {
atomic.AddInt64(&rowsSeen, 1)
return true, nil
})
assert.Equal(t, ErrDeadlineExceeded, err, "Should have gotten deadline exceeded error")
assert.EqualValues(t, 1, atomic.LoadInt64(&rowsSeen), "Should have gotten only 1 row before deadline exceeded")
}
func TestDeadlineGroup(t *testing.T) {
eTotal := ADD(eA, eB)
g := Group(&infiniteSource{}, GroupOpts{
By: []GroupBy{NewGroupBy("x", goexpr.Param("x"))},
Fields: StaticFieldSource{
Field{
Name: "total",
Expr: eTotal,
},
},
Resolution: resolution * 2,
AsOf: asOf.Add(2 * resolution),
Until: until.Add(-2 * resolution),
})
rowsSeen := int64(0)
ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(25*time.Millisecond))
defer cancel()
_, err := g.Iterate(ctx, FieldsIgnored, func(key bytemap.ByteMap, vals Vals) (bool, error) {
atomic.AddInt64(&rowsSeen, 1)
return true, nil
})
assert.Equal(t, ErrDeadlineExceeded, err, "Should have gotten deadline exceeded error")
assert.EqualValues(t, 0, atomic.LoadInt64(&rowsSeen), "Should have gotten 0 rows before deadline exceeded")
}
func TestGroupSingle(t *testing.T) {
eTotal := ADD(eA, eB)
gx := Group(&goodSource{}, GroupOpts{
By: []GroupBy{NewGroupBy("x", goexpr.Param("x"))},
Fields: StaticFieldSource{
Field{
Name: "total",
Expr: eTotal,
},
},
Resolution: resolution * 2,
AsOf: asOf.Add(2 * resolution),
Until: until.Add(-2 * resolution),
})
totalByX := make(map[int]float64, 0)
var fields Fields
_, err := gx.Iterate(context.Background(), func(inFields Fields) error {
fields = inFields
return nil
}, func(key bytemap.ByteMap, vals Vals) (bool, error) {
t.Log(key.AsMap())
for i, field := range fields {
t.Log(vals[i].String(field.Expr, resolution*2))
}
total := float64(0)
v := vals[0]
for p := 0; p < v.NumPeriods(eTotal.EncodedWidth()); p++ {
val, _ := v.ValueAt(p, eTotal)
total += val
}
totalByX[key.Get("x").(int)] = total
return true, nil
})
assert.NoError(t, err)
assert.EqualValues(t, 120, totalByX[1])
assert.EqualValues(t, 140, totalByX[2])
}
func TestGroupCrosstabSingle(t *testing.T) {
eAdd := ADD(eA, eB)
addField := Field{
Name: "add",
Expr: eAdd,
}
expectedFields := Fields{}
for _, i := range []string{"1", "2", "3", "5"} {
cond, err := goexpr.Binary("=", goexpr.Concat(goexpr.Constant("_"), goexpr.Param("y")), goexpr.Constant(i))
if !assert.NoError(t, err) {
return
}
ifex := IF(cond, eAdd)
expectedFields = append(expectedFields, NewField(i+"_add", ifex))
}
expectedFields = append(expectedFields, NewField("total_add", eAdd))
expectedKeys := []bytemap.ByteMap{
bytemap.FromSortedKeysAndValues([]string{"x"}, []interface{}{1}),
bytemap.FromSortedKeysAndValues([]string{"x"}, []interface{}{2}),
}
expectedRows := [][][]float64{
[][]float64{
[]float64{70, 0},
[]float64{0, 0},
[]float64{0, 50},
[]float64{0, 0},
[]float64{70, 50},
},
[][]float64{
[]float64{0, 0},
[]float64{0, 0},
[]float64{80, 0},
[]float64{0, 60},
[]float64{80, 60},
},
}
gx := Group(&goodSource{}, GroupOpts{
By: []GroupBy{NewGroupBy("x", goexpr.Param("x"))},
Crosstab: goexpr.Concat(goexpr.Constant("_"), goexpr.Param("y")),
CrosstabIncludesTotal: true,
Fields: StaticFieldSource{addField},
Resolution: resolution * 2,
AsOf: asOf.Add(2 * resolution),
Until: until.Add(-2 * resolution),
})
var fields Fields
_, err := gx.Iterate(context.Background(), func(inFields Fields) error {
fields = inFields
if assert.Equal(t, len(expectedFields), len(fields)) {
for i, expected := range expectedFields {
assert.Equal(t, expected.String(), fields[i].String())
}
}
return nil
}, func(key bytemap.ByteMap, vals Vals) (bool, error) {
expectedKey := expectedKeys[0]
expectedKeys = expectedKeys[1:]
expectedRow := expectedRows[0]
expectedRows = expectedRows[1:]
assert.EqualValues(t, expectedKey, key)
if assert.Equal(t, len(expectedRow), len(vals)) {
for i, expected := range expectedRow {
val := vals[i]
field := fields[i]
if assert.Equal(t, len(expected), val.NumPeriods(field.Expr.EncodedWidth())) {
for j, f := range expected {
actual, _ := val.ValueAt(j, field.Expr)
assert.Equal(t, f, actual)
}
}
}
}
return true, nil
})
if !assert.NoError(t, err) {
t.Log(FormatSource(gx))
}
}
func TestGroupResolutionOnly(t *testing.T) {
eTotal := ADD(eA, eB)
gx := Group(&goodSource{}, GroupOpts{
By: []GroupBy{NewGroupBy("_", goexpr.Constant("_"))},
Fields: StaticFieldSource{
Field{
Name: "total",
Expr: eTotal,
},
},
Resolution: resolution * 20,
})
total := float64(0)
_, err := gx.Iterate(context.Background(), FieldsIgnored, func(key bytemap.ByteMap, vals Vals) (bool, error) {
v := vals[0]
for p := 0; p < v.NumPeriods(eTotal.EncodedWidth()); p++ {
val, _ := v.ValueAt(p, eTotal)
t.Log(val)
total += val
}
// We expect only one row
return false, nil
})
assert.NoError(t, err)
assert.EqualValues(t, 480, total)
}
func TestGroupNone(t *testing.T) {
eTotal := ADD(eA, eB)
gx := Group(&goodSource{}, GroupOpts{
Fields: StaticFieldSource{
Field{
Name: "total",
Expr: eTotal,
},
},
Resolution: resolution * 10,
})
expectedValues := map[string]float64{
"1.1": 10 + 70,
"1.3": 50,
"1.5": 90,
"2.2": 100,
"2.3": 20 + 80,
"2.5": 60,
}
ctx := context.Background()
_, err := gx.Iterate(ctx, FieldsIgnored, func(key bytemap.ByteMap, vals Vals) (bool, error) {
dims := fmt.Sprintf("%d.%d", key.Get("x"), key.Get("y"))
val, _ := vals[0].ValueAt(0, eTotal)
expectedVal := expectedValues[dims]
delete(expectedValues, dims)
assert.Equal(t, expectedVal, val, dims)
return true, nil
})
assert.NoError(t, err)
assert.Empty(t, expectedValues, "All combinations should have been seen")
}
func TestFlattenSortOffsetAndLimit(t *testing.T) {
// TODO: add test that tests flattening of rows that contain multiple periods
// worth of values
g := Group(&goodSource{}, GroupOpts{
Fields: StaticFieldSource{NewField("a", eA), NewField("b", eB), NewField("c", CONST(10))},
})
f := Flatten(g)
s := Sort(f, NewOrderBy("b", true), NewOrderBy("a", false))
o := Offset(s, 1)
l := Limit(o, 6)
// This contains the data, sorted, but missing the first and last entries
expectedTSs := []time.Time{
epoch.Add(-2 * resolution), epoch.Add(-4 * resolution), epoch.Add(-8 * resolution),
epoch.Add(-9 * resolution), epoch.Add(-5 * resolution), epoch.Add(-3 * resolution),
}
expectedAs := []float64{0, 0, 0, 10, 50, 70}
expectedBs := []float64{80, 60, 20, 0, 0, 0}
var expectedTS time.Time
var expectedA float64
var expectedB float64
_, err := l.Iterate(context.Background(), FieldsIgnored, func(row *FlatRow) (bool, error) {
expectedTS, expectedTSs = expectedTSs[0], expectedTSs[1:]
expectedA, expectedAs = expectedAs[0], expectedAs[1:]
expectedB, expectedBs = expectedBs[0], expectedBs[1:]
assert.Equal(t, expectedTS.UnixNano(), row.TS)
assert.EqualValues(t, expectedA, row.Values[0])
assert.EqualValues(t, expectedB, row.Values[1])
return true, nil
})
if !assert.NoError(t, err) {
t.Log(FormatSource(l))
}
}
func TestUnflattenTransform(t *testing.T) {
avgTotal := ADD(AVG("a"), AVG("b"))
f := Flatten(&goodSource{})
u := Unflatten(f, StaticFieldSource{NewField("total", avgTotal)})
doTestUnflattened(t, u, avgTotal)
}
func TestUnflattenOptimized(t *testing.T) {
total := ADD(eA, eB)
s := &totalingSource{}
f := Flatten(s)
u := UnflattenOptimized(f)
_, isTotalingSource := u.(*totalingSource)
if !assert.True(t, isTotalingSource, "Unflattened should point directly at totaling source, put points at: %v", u.String()) {
return
}
doTestUnflattened(t, u, total)
}
func doTestUnflattened(t *testing.T, u RowSource, ex Expr) {
expectedRows := make([]*testRow, 0, len(testRows))
for _, row := range testRows {
var ts time.Time
if row.vals[0] != nil {
ts = row.vals[0].Until()
} else {
ts = row.vals[1].Until()
}
a, _ := row.vals[0].ValueAt(0, eA)
b, _ := row.vals[1].ValueAt(0, eB)
params := Map(map[string]float64{
"a": a,
"b": b,
})
expectedRow := &testRow{
key: row.key,
vals: []encoding.Sequence{encoding.NewValue(ex, ts, params, row.key)},
}
expectedRows = append(expectedRows, expectedRow)
}
_, err := u.Iterate(context.Background(), FieldsIgnored, func(key bytemap.ByteMap, vals Vals) (bool, error) {
row := &testRow{key, vals}
for i, expected := range expectedRows {
if row.equals(expected) {
expectedRows = append(expectedRows[:i], expectedRows[i+1:]...)
break
}
}
return true, nil
})
assert.NoError(t, err)
assert.Empty(t, expectedRows, "All rows should have been seen")
}
type testSource struct{}
func (s *testSource) getFields() Fields {
return Fields{NewField("a", eA), NewField("b", eB)}
}
func (s *testSource) GetGroupBy() []GroupBy {
x, _ := goexpr.Binary("+", goexpr.Param("x"), goexpr.Constant(1))
return []GroupBy{NewGroupBy("x", x), NewGroupBy("y", goexpr.Param("y"))}
}
func (s *testSource) GetResolution() time.Duration {
return resolution
}
func (s *testSource) GetAsOf() time.Time {
return asOf
}
func (s *testSource) GetUntil() time.Time {
return until
}
type testRow struct {
key bytemap.ByteMap
vals Vals
}
func (r *testRow) equals(other *testRow) bool {
if r == nil || other == nil {
return false
}
if string(r.key) != string(other.key) {
return false
}
if len(r.vals) != len(other.vals) {
return false
}
for i, val := range r.vals {
ex := eA
if i > 0 {
ex = eB
}
otherVal := other.vals[i]
v, _ := val.ValueAt(0, ex)
ov, _ := otherVal.ValueAt(0, ex)
if v != ov {
return false
}
}
return true
}
var testRows = []*testRow{
makeRow(epoch.Add(-9*resolution), 1, 1, 10, 0),
makeRow(epoch.Add(-8*resolution), 2, 3, 0, 20),
// Intentional gap
makeRow(epoch.Add(-5*resolution), 1, 3, 50, 0),
makeRow(epoch.Add(-4*resolution), 2, 5, 0, 60),
makeRow(epoch.Add(-3*resolution), 1, 1, 70, 0),
makeRow(epoch.Add(-2*resolution), 2, 3, 0, 80),
makeRow(epoch.Add(-1*resolution), 1, 5, 90, 0),
makeRow(epoch, 2, 2, 0, 100),
}
func makeRow(ts time.Time, x int, y int, a float64, b float64) *testRow {
key := bytemap.New(map[string]interface{}{"x": x, "y": y})
vals := make([]encoding.Sequence, 2)
if a != 0 {
vals[0] = encoding.NewFloatValue(eA, ts, a)
}
if b != 0 {
vals[1] = encoding.NewFloatValue(eB, ts, b)
}
return &testRow{key, vals}
}
type goodSource struct {
testSource
}
func (s *goodSource) Iterate(ctx context.Context, onFields OnFields, onRow OnRow) (interface{}, error) {
onFields(s.getFields())
guard := Guard(ctx)
for _, row := range testRows {
if guard.TimedOut() {
return nil, ErrDeadlineExceeded
}
more, err := onRow(row.key, row.vals)
if !more || err != nil {
return nil, err
}
}
return nil, nil
}
func (s *goodSource) String() string {
return "test.good"
}
type infiniteSource struct {
testSource
}
func (s *infiniteSource) Iterate(ctx context.Context, onFields OnFields, onRow OnRow) (interface{}, error) {
onFields(s.getFields())
for {
for _, row := range testRows {
more, err := onRow(row.key, row.vals)
if !more || err != nil {
return nil, err
}
}
}
}
func (s *infiniteSource) String() string {
return "test.infinite"
}
type errorSource struct {
testSource
}
func (s *errorSource) Iterate(ctx context.Context, onFields OnFields, onRow OnRow) (interface{}, error) {
return nil, errTest
}
func (s *errorSource) String() string {
return "test.error"
}
type totalingSource struct {
goodSource
}
func (s *totalingSource) getFields() Fields |
func (s *totalingSource) Iterate(ctx context.Context, onFields OnFields, onRow OnRow) (interface{}, error) {
return s.goodSource.Iterate(ctx, onFields, func(key bytemap.ByteMap, vals Vals) (bool, error) {
a, _ := vals[0].ValueAt(0, eA)
b, _ := vals[0].ValueAt(0, eB)
val := encoding.NewValue(totalField.Expr, vals[0].Until(), Map{"a": a, "b": b}, key)
return onRow(key, Vals{val})
})
}
| {
return Fields{totalField}
} | identifier_body |
util.ts |
// #ifdef H5
// var clipboardJS = require( from ''./clipboardJS'');
// import clipboardJS from './clipboardJS'
// #endif
import {ComponentInternalInstance} from 'vue'
/**
* 预览图片。
@param {Object} url 必填 当前预览的图片链接。
@param {Object} list 可以是url数组,也可以是对象,数据比如:["http:url"] or [{url:"https:url",...}]
@param {Object} rangKey 如果list是对象数组,需要提供url字段。
*/
import {preview} from "./preview.js"
export default preview;
/**
* 数据分组
* @param {Array} oArr - 原数组列表
* @param {Number} length - 单个数组长度
* @return {Array} arr - 分组后的新数组
*/
export function splitData(oArr:Array<any> = [], length = 1) {
let arr:Array<any> = [];
let minArr:Array<any> = [];
oArr.forEach(c => {
if (minArr.length === length) {
minArr = [];
}
if (minArr.length === 0) {
arr.push(minArr);
}
minArr.push(c);
});
return arr;
}
/**
* 剩余时间格式化
* @param {Number} t - 剩余多少秒
* @return {Object} format - 格式后的天时分秒对象
*/
export function timeMuch(t:number) {
let format:any = {
d: '00',
h: '00',
m: '00',
s: '00'
};
if (t > 0) {
let d = Math.floor(t / 86400);
let h = Math.floor((t / 3600) % 24);
let m = Math.floor((t / 60) % 60);
let s = Math.floor(t % 60);
format.d = d < 10 ? '0' + d : d;
format.h = h < 10 ? '0' + h : h;
format.m = m < 10 ? '0' + m : m;
format.s = s < 10 ? '0' + s : s;
}
return format;
}
//获取时间距离当前时间
export function getDateToNewData(timestamp:number|string|Date = new Date().getTime()){
if(typeof timestamp == 'string'){
timestamp = new Date(timestamp).getTime();
}
// 补全为13位
var arrTimestamp:Array<string> = (timestamp + '').split('');
for (var start = 0; start < 13; start++) {
if (!arrTimestamp[start]) {
arrTimestamp[start] = '0';
}
}
timestamp = Number(arrTimestamp.join(''))* 1;
var minute = 1000 * 60;
var hour = minute * 60;
var day = hour * 24;
var halfamonth = day * 15;
var month = day * 30;
var now = new Date().getTime();
var diffValue = now - timestamp;
// 如果本地时间反而小于变量时间
if (diffValue < 0) {
return '不久前';
}
// 计算差异时间的量级
var monthC = diffValue / month;
var weekC = diffValue / (7 * day);
var dayC = diffValue / day;
var hourC = diffValue / hour;
var minC = diffValue / minute;
// 数值补0方法
var zero = function (value:number) {
if (value < 10) {
return '0' + value;
}
return value;
};
// 使用
if (monthC > 12) {
// 超过1年,直接显示年月日
return (function () {
var date = new Date(timestamp);
return date.getFullYear() + '年' + zero(date.getMonth() + 1) + '月' + zero(date.getDate()) + '日';
})();
} else if (monthC >= 1) {
return parseInt(monthC+'') + "月前";
} else if (weekC >= 1) {
return parseInt(weekC+'') + "周前";
} else if (dayC >= 1) {
return parseInt(dayC+'') + "天前";
} else if (hourC >= 1) {
return parseInt(hourC+'') + "小时前";
} else if (minC >= 1) {
return parseInt(minC+'') + "分钟前";
}
return '刚刚';
}
/**
* 打电话
* @param {String<Number>} phoneNumber - 数字字符串
* @return {Promise}
*/
export function callPhone(phoneNumber = '') {
let num = phoneNumber.toString()
return new Promise((rs,rj)=>{
uni.makePhoneCall({
phoneNumber: num,
success:()=> rs(true),
fail:(err)=> rj(err)
});
})
}
/**
* 调起客户端相机扫码。
* @param {Boolean} onlyFromCamera true 是否只允许相机扫码识别
* @param {Array<string>} scanType ['barCode', 'qrCode', 'datamatrix','datamatrix']
* @returns Promise 成功返回相关数据结构
*/
export function scanCode(onlyFromCamera = true, scanType = ['barCode', 'qrCode', 'datamatrix','datamatrix']):Promise<string|UniApp.ScanCodeSuccessRes>{
return new Promise((rs,rj)=>{
// #ifdef H5
rj('不支持H5');
// #endif
// #ifndef H5
uni.scanCode({
onlyFromCamera: onlyFromCamera,
scanType: scanType,
success: (res) => rs(res),
fail:(error)=>rj(error)
});
// #endif
})
}
/**
* 设置剪切板内容。
* @param {String} data
* @returns Promise true/false
*/
export function setClipboardData(data:string):Promise<string|boolean>{
return new Promise((rs,rj)=>{
// #ifndef H5
uni.setClipboardData({
data: data,
success:()=>rs(true),
fail:(error)=>rj(error)
});
// #endif
// #ifdef H5
if (navigator.clipboard && window.isSecureContext) {
return navigator.clipboard.writeText(data)
}
else {
const textArea = document.createElement('textarea')
textArea.style.opacity = "0"
textArea.style.position = "fixed"
textArea.style.top = "0px"
textArea.value = data
document.body.appendChild(textArea)
textArea.focus()
textArea.select()
document.execCommand('copy') ? rs(true) : rj("错误")
textArea.remove()
}
// #endif
})
}
/**
* 获取剪切板内容
* @returns Promise 剪切板内容
*/
export function getClipboardData():Promise<boolean|string>{
return new Promise((rs, rj) => {
// #ifndef H5
uni.getClipboardData({
success: (res) => rs(res.data),
fail: (error) => rj(error)
});
// #endif
// #ifdef H5
console.error('H5无法获取剪切板内容')
rj('H5无法获取剪切板内容')
// #endif
})
}
/**
* 设置cookie数据
* @param {String} key 键值
* @param {String} data 值
* @returns Boolean
*/
export function setCookie(key:string, data:any) {
try {
uni.setStorageSync(key, data);
return true;
} catch (e) {
return false;
}
}
/**
* 删除一个本地cookie
* @param {String} key 键值
* @returns Boolean
*/
export function delCookie(key:string) {
try {
uni.removeStorageSync(key);
return true;
} catch (e) {
return false;
}
}
/**
* 获取一个cookie数据
* 如果存入的是对象,返回的也是对象。如果是string返回的也是字符串。
* @param {String} key 键
* @returns json/string
*/
export function getCookie(key:string) {
try {
const value = uni.getStorageSync(key);
try {
let val = JSON.parse(value)
return val;
} catch (e) {
return value;
}
} catch (e) {
return undefined;
}
}
/**
* 向地址连接追加参数。
* @param {string} uri 网址
* @param {string} key 字段
* @param {string} value 字段值
* @returns
*/
export function httpUrlAddKey(uri:string, key:string, value:string) {
if (!value) {
return uri;
}
var re = new RegExp("([?&])" + key + "=.*?(&|$)", "i");
var separator = uri.indexOf("?") !== -1 ? "&" : "?";
if (uri.match(re)) {
return uri.replace(re, "$1" + key + "=" + value + "$2");
} else {
return uri + separator + key + "=" + value;
}
}
/**
* 取url参数
* @param {string} uri 网址
* @param {string} key 字段
* @returns string
*/
export function getQueryString(url:string,key:string):string {
var query_string = url.substring(url.indexOf("?"));
if (!query_string) return "";
var re = /[?&]?([^=]+)=([^&]*)/g;
var tokens:any;
while (tokens = re.exec(query_string)) {
if (decodeURIComponent(tokens[1]) === key) {
return decodeURIComponent(tokens[2]);
break;
}
}
return "";
}
/**
* rdix 随机因子,
* length 取的长度.
*/
export function getUid (rdix=1,length=12,isAddStr=false){
return Math.floor(Math.random() * rdix * Math.floor(Math.random() * Date.now())).toString(isAddStr?16:10).substring(0,length);
}
/*
防抖
防抖原理:在一定时间内,只有最后一次操作,再过wait毫秒后才执行函数
@param {Function} func 要执行的回调函数
@param {Number} wait 延迟的时间
@param{Boolean} immediate 是否要立即执行
*/
var timeout= getUid(1)
export function debounce(func:Function, wait = 500, immediate = false) {
// 清除定时器
if (timeout !== null) clearTimeout(timeout);
// 立即执行,此类情况一般用不到
if (immediate) {
var callNow = !timeout;
timeout = setTimeout(() => {
timeout = null;
}, wait);
if (callNow) typeof func === "function" && func();
} else {
// 设置定时器,当最后一次操作后,timeout不会再被清除,所以在延时wait毫秒后执行func回调方法
timeout = getUid(1);
timeout = setTimeout(() => {
typeof func === "function" && func();
}, wait);
}
}
/**
* 节流
节流原理:在一定时间内,只能触发一次
* @param {Function} func 要执行的回调函数
* @param {Number} wait 延时的时间
* @param {Boolean} immediate 是否立即执行
* @return null
*/
export function throttle(func:Function, wait = 500, immediate = true,timer=85688,flag=false) {
if (immediate) {
if (!flag) {
flag = true;
// 如果是立即执行,则在wait毫秒内开始时执行
typeof func === 'function' && func();
timer = setTimeout(() => {
flag = false;
}, wait);
}
} else {
if (!flag) {
flag = true
// 如果是非立即执行,则在wait毫秒内的结束处执行
timer = setTimeout(() => {
flag = false
typeof func === 'function' && func();
}, wait);
}
}
};
// 深度克隆
export function deepClone (obj:any) {
// 对常见的“非”值,直接返回原来值
if([null, undefined, NaN, false].includes(obj)) return obj;
if(typeof obj !== "object" && typeof obj !== 'function') {
//原始类型直接返回
return obj;
}
var o:any = Array.isArray(obj) ? [] : {};
for(let i in obj) {
if(obj.hasOwnProperty(i)){
o[i] = typeof obj[i] === "object" ? deepClone(obj[i]) : obj[i];
}
}
return o;
}
export function quereyDom(t:ComponentInternalInstance,node:string){
// #ifdef APP-NVUE
const dom:any = uni.requireNativePlugin('dom')
return new Promise((res,rej)=>{
setTimeout(function(){
node = node.replace(/#\./g,'')
dom.getComponentRect(t.refs[node], function(el:any) {
res(el.size);
})
},60)
})
// #endif
// #ifndef APP-NVUE
return new Promise((res,rej)=>{
const query = uni.createSelectorQuery().in(t);
query.select(node).boundingClientRect(el => {
res(el);
}).exec();
})
// #endif
}
/**
* 是否是手机号码
* @param phone 号码
* @returns Boolean
*/
export function isPhone(phone:string|number){
let val = String(phone);
let reg = /^(13[0-9]|14[01456879]|15[0-35-9]|16[2567]|17[0-8]|18[0-9]|19[0-35-9])\d{8}$/
return !!val.match(reg);
}
/**
* 是否含有中文
* @param s 字符串
* @returns Boolean
*/
export function isChina(s:string){
var patrn=/[\u4E00-\u9FA5]|[\uFE30-\uFFA0]/gi;
return !!patrn.exec(s);
}
/**
* 是否为空
* @description 判断是否是null,对象是否为空,数组是否为空。是否为 undefaind,是否为 “”空字符串。
* @param s 任意
*/
export function isEmpty(s:any){
if(typeof s === 'string'){
s = s.trim();
}
if(s=="") return true
if(s==null) return true;
if(typeof s === 'undefined') return true;
if(Array.isArray(s)){
if(s.length==0) return true;
}
if(typeof s ==='object'){
if(Object.keys(s).length==0) return true;
}
return false;
}
/**
* 是否邮箱
* @param s 字符串
* @returns Boolean
*/
export function isEmail(s:string){
let reg = /^[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+(\.[a-zA-Z0-9_-]+)+$/;
return !!s.match(reg);
}
/**
* 是否身份证号
* @param val 字符号或者数字
* @returns Boolean
* @author https://cloud.tencent.com/developer/article/1114323
*/
export function isIdCard (val:string|number) {
val = String(val)
var p = /^[1-9]\d{5}(18|19|20)\d{2}((0[1-9])|(1[0-2]))(([0-2][1-9])|10|20|30|31)\d{3}[0-9Xx]$/;
var factor = [ 7, 9, 10, 5, 8, 4, 2, 1, 6, 3, 7, 9, 10, 5, 8, 4, 2 ];
var parity = [ 1, 0, 'X', 9, 8, 7, 6, 5, 4, 3, 2 ];
var code = val.substring(17);
if(p.test(val)) {
var sum:number = 0;
for(var i=0;i<17;i++) {
let id:number|string|any = val[i]
sum += id*factor[i];
}
if(parity[sum % 11] == code.toUpperCase()) {
return true;
}
}
return false;
| * 是否车牌
* @description 蓝牌5位,绿牌6位。
* @param s 字符串
* @returns Boolean
*/
export function isIdCar(s:string){
let reg = /^[京|沪|津|渝|鲁|冀|晋|蒙|辽|吉|黑|苏|浙|皖|闽|赣|豫|湘|鄂|粤|桂|琼|川|贵|云|藏|陕|甘|青|宁|新|港|澳|台|新|使]{1}[A-Z]{1}[A-Z_0-9]{5,6}$/
return !!s.match(reg);
}
/**
* 纯数字密码验证
* @param s 字符串或者数字
* @param len 最小长度,默认6
* @param maxLen 最大长度,默认20
* @returns Boolean
*/
export function isPasswordOfNumber(s:number|string,len=6,maxLen=20){
s = String(s);
let reg = new RegExp(`^[0-9]{${len},${maxLen}}$`)
return !!s.match(reg)
}
/**
* 密码验证
* @param s 字符串或者数字
* @param len 最小长度,默认6
* @param maxLen 最大长度,默认20
* @param model 0数字和英文,1数字,英文必须包含,不允许有特殊字符,2数字和字母必须包含,可以有特殊字符。
* @returns Boolean
*/
export function isPasswordOfOther(s:string|number,len=6,maxLen=20,model=0){
s = String(s);
//密码至少包含 数字和英文,长度6-20
let reg = /^(?![0-9]+$)(?![a-zA-Z]+$)[0-9A-Za-z]{6,20}$/
//密码包含 数字,英文,字符中的两种以上,长度6-20
if(model===1){
reg = /^(?![0-9]+$)(?![a-z]+$)(?![A-Z]+$)(?!([^(0-9a-zA-Z)])+$).{6,20}$/
}
//至少包含数字跟字母,可以有字符
if(model===2){
reg = /(?=.*([a-zA-Z].*))(?=.*[0-9].*)[a-zA-Z0-9-*/+.~!@#$%^&*()]{6,20}$/
}
return !!s.match(reg)
}
/**
* 是否是一个有效的日期
* @param s 字符串,数字,日期对象
* @returns Boolean
*/
export function isDate(s:string|number|Date){
if(s==null||typeof s === 'undefined' || !s) return false;
if(typeof s ==='string'){
//兼容ios,mac
s = s.replace('-','/');
}
let d = new Date(s);
if(d.toString() == 'Invalid Date') return false;
return true;
}
/**
* 显示信息
* @param word 标题
* @param mask 不允许穿透
* @param icon 图标
*/
export function toast(word:string,mask:boolean=true,icon:any='none'){
// #ifndef MP-ALIPAY
uni.showToast({
mask:mask,
title:word,
icon:icon
})
// #endif
// #ifdef MP-ALIPAY
uni.showToast({
title:word,
icon:icon
})
// #endif
}
/**
* 获取屏幕窗口安全高度和宽度
* 注意是针对种屏幕的统一计算,统一高度,不再让uni获取有效高度而烦恼。
* 请一定要在onMounted或者onLoad中调用,否则不准确在h5端。
* @return {height,width,top,isCustomHeader,statusBarHeight,sysinfo}
*/
export function getWindow():{width:number,height:number,top:number,bottom:number,statusBarHeight:number,isCustomHeader:Boolean,sysinfo:UniApp.GetSystemInfoResult}{
// let getsysinfoSync = getCookie("tmui_sysinfo")
// if(getsysinfoSync){
// return getsysinfoSync
// }
const sysinfo = uni.getSystemInfoSync();
let top =0;
let height = sysinfo.windowHeight;
let nowPage = getCurrentPages().pop()
let isCustomHeader = false;
let pages = uni.$tm?.pages??[]
let bottom = sysinfo.safeAreaInsets?.bottom??0;
for(let i=0;i<uni.$tm.pages.length;i++){
if(nowPage?.route==uni.$tm.pages[i].path&&uni.$tm.pages[i].custom=='custom'){
isCustomHeader = true;
break;
}
}
// #ifdef H5
// 兼容说明:h5端第一次获取的高度和第二次获取的高度是有差异 的。
if (isCustomHeader) {
height = sysinfo.windowHeight+sysinfo.windowTop
}else{
top = 44
if(sysinfo.windowTop>0){
height = sysinfo.windowHeight;
}else{
height = sysinfo.windowHeight+sysinfo.windowTop
}
}
// #endif
let reulst = {bottom:bottom,height:height,width:sysinfo.windowWidth,top:top,isCustomHeader:isCustomHeader,statusBarHeight:sysinfo.statusBarHeight,sysinfo:sysinfo};
return reulst;
}
type openUrlType = "navigate"|"redirect"|"reLaunch"|"switchTab"|"navigateBack"
/**
*
* @param url string 打开的页面路径
* @param type openUrlType "navigate"|"redirect"|"reLaunch"|"switchTab"|"navigateBack"
*/
export function routerTo(url:string,type:openUrlType='navigate'){
type openUrlTypeFun = "navigateTo"|"redirectTo"|"reLaunch"|"switchTab"|"navigateBack"
let funType = {
navigate:"navigateTo",
redirect:"redirectTo",
switchTab:"switchTab",
reLaunch:"reLaunch",
navigateBack:"navigateBack",
}
let fun= funType[type];
if(fun=='navigateBack'){
uni.navigateBack({fail(error) {
console.error(error)
}})
}else if(fun=='reLaunch'){
uni.reLaunch({
url:url,
fail(error) {
console.error(error)
}
})
}else if(fun=='switchTab'){
uni.switchTab({
url:url,
fail(error) {
console.error(error)
}
})
}else if(fun=='redirectTo'){
uni.redirectTo({
url:url,
fail(error) {
console.error(error)
}
})
}else if(fun=='navigateTo'){
uni.navigateTo({
url:url,
fail(error) {
console.error(error)
}
})
}
}
/**
* 将rpx转换为px
* @param v 待转换的数字
* @param screenWidth 屏幕的宽度,如果不提供默认自动获取
* @return number
*/
export function torpx(v:number,screenWidth:number=0){
if(typeof screenWidth === 'undefined'||!screenWidth){
screenWidth = uni.getSystemInfoSync().screenWidth;
}
let pixelRatio = 750 / screenWidth;
return Math.ceil(v * pixelRatio)
}
/**
* 将rpx转换为px
* @param v 待转换的数字
* @return number
*/
export function topx(v:number){
return Math.ceil(uni.upx2px(Number(v)))
} | }
/**
| identifier_name |
util.ts | // #ifdef H5
// var clipboardJS = require( from ''./clipboardJS'');
// import clipboardJS from './clipboardJS'
// #endif
import {ComponentInternalInstance} from 'vue'
/**
* 预览图片。
@param {Object} url 必填 当前预览的图片链接。
@param {Object} list 可以是url数组,也可以是对象,数据比如:["http:url"] or [{url:"https:url",...}]
@param {Object} rangKey 如果list是对象数组,需要提供url字段。
*/
import {preview} from "./preview.js"
export default preview;
/**
* 数据分组
* @param {Array} oArr - 原数组列表
* @param {Number} length - 单个数组长度
* @return {Array} arr - 分组后的新数组
*/
export function splitData(oArr:Array<any> = [], length = 1) {
let arr:Array<any> = [];
let minArr:Array<any> = [];
oArr.forEach(c => {
if (minArr.length === length) {
minArr = [];
}
if (minArr.length === 0) {
arr.push(minArr);
}
minArr.push(c);
});
return arr;
}
/**
* 剩余时间格式化
* @param {Number} t - 剩余多少秒
* @return {Object} format - 格式后的天时分秒对象
*/
export function timeMuch(t:number) {
let format:any = {
d: '00',
h: '00',
m: '00',
s: '00'
};
if (t > 0) {
let d = Math.floor(t / 86400);
let h = Math.floor((t / 3600) % 24);
let m = Math.floor((t / 60) % 60);
let s = Math.floor(t % 60);
format.d = d < 10 ? '0' + d : d;
format.h = h < 10 ? '0' + h : h; | format.m = m < 10 ? '0' + m : m;
format.s = s < 10 ? '0' + s : s;
}
return format;
}
//获取时间距离当前时间
export function getDateToNewData(timestamp:number|string|Date = new Date().getTime()){
if(typeof timestamp == 'string'){
timestamp = new Date(timestamp).getTime();
}
// 补全为13位
var arrTimestamp:Array<string> = (timestamp + '').split('');
for (var start = 0; start < 13; start++) {
if (!arrTimestamp[start]) {
arrTimestamp[start] = '0';
}
}
timestamp = Number(arrTimestamp.join(''))* 1;
var minute = 1000 * 60;
var hour = minute * 60;
var day = hour * 24;
var halfamonth = day * 15;
var month = day * 30;
var now = new Date().getTime();
var diffValue = now - timestamp;
// 如果本地时间反而小于变量时间
if (diffValue < 0) {
return '不久前';
}
// 计算差异时间的量级
var monthC = diffValue / month;
var weekC = diffValue / (7 * day);
var dayC = diffValue / day;
var hourC = diffValue / hour;
var minC = diffValue / minute;
// 数值补0方法
var zero = function (value:number) {
if (value < 10) {
return '0' + value;
}
return value;
};
// 使用
if (monthC > 12) {
// 超过1年,直接显示年月日
return (function () {
var date = new Date(timestamp);
return date.getFullYear() + '年' + zero(date.getMonth() + 1) + '月' + zero(date.getDate()) + '日';
})();
} else if (monthC >= 1) {
return parseInt(monthC+'') + "月前";
} else if (weekC >= 1) {
return parseInt(weekC+'') + "周前";
} else if (dayC >= 1) {
return parseInt(dayC+'') + "天前";
} else if (hourC >= 1) {
return parseInt(hourC+'') + "小时前";
} else if (minC >= 1) {
return parseInt(minC+'') + "分钟前";
}
return '刚刚';
}
/**
* 打电话
* @param {String<Number>} phoneNumber - 数字字符串
* @return {Promise}
*/
export function callPhone(phoneNumber = '') {
let num = phoneNumber.toString()
return new Promise((rs,rj)=>{
uni.makePhoneCall({
phoneNumber: num,
success:()=> rs(true),
fail:(err)=> rj(err)
});
})
}
/**
* 调起客户端相机扫码。
* @param {Boolean} onlyFromCamera true 是否只允许相机扫码识别
* @param {Array<string>} scanType ['barCode', 'qrCode', 'datamatrix','datamatrix']
* @returns Promise 成功返回相关数据结构
*/
export function scanCode(onlyFromCamera = true, scanType = ['barCode', 'qrCode', 'datamatrix','datamatrix']):Promise<string|UniApp.ScanCodeSuccessRes>{
return new Promise((rs,rj)=>{
// #ifdef H5
rj('不支持H5');
// #endif
// #ifndef H5
uni.scanCode({
onlyFromCamera: onlyFromCamera,
scanType: scanType,
success: (res) => rs(res),
fail:(error)=>rj(error)
});
// #endif
})
}
/**
* 设置剪切板内容。
* @param {String} data
* @returns Promise true/false
*/
export function setClipboardData(data:string):Promise<string|boolean>{
return new Promise((rs,rj)=>{
// #ifndef H5
uni.setClipboardData({
data: data,
success:()=>rs(true),
fail:(error)=>rj(error)
});
// #endif
// #ifdef H5
if (navigator.clipboard && window.isSecureContext) {
return navigator.clipboard.writeText(data)
}
else {
const textArea = document.createElement('textarea')
textArea.style.opacity = "0"
textArea.style.position = "fixed"
textArea.style.top = "0px"
textArea.value = data
document.body.appendChild(textArea)
textArea.focus()
textArea.select()
document.execCommand('copy') ? rs(true) : rj("错误")
textArea.remove()
}
// #endif
})
}
/**
* 获取剪切板内容
* @returns Promise 剪切板内容
*/
export function getClipboardData():Promise<boolean|string>{
return new Promise((rs, rj) => {
// #ifndef H5
uni.getClipboardData({
success: (res) => rs(res.data),
fail: (error) => rj(error)
});
// #endif
// #ifdef H5
console.error('H5无法获取剪切板内容')
rj('H5无法获取剪切板内容')
// #endif
})
}
/**
* 设置cookie数据
* @param {String} key 键值
* @param {String} data 值
* @returns Boolean
*/
export function setCookie(key:string, data:any) {
try {
uni.setStorageSync(key, data);
return true;
} catch (e) {
return false;
}
}
/**
* 删除一个本地cookie
* @param {String} key 键值
* @returns Boolean
*/
export function delCookie(key:string) {
try {
uni.removeStorageSync(key);
return true;
} catch (e) {
return false;
}
}
/**
* 获取一个cookie数据
* 如果存入的是对象,返回的也是对象。如果是string返回的也是字符串。
* @param {String} key 键
* @returns json/string
*/
export function getCookie(key:string) {
try {
const value = uni.getStorageSync(key);
try {
let val = JSON.parse(value)
return val;
} catch (e) {
return value;
}
} catch (e) {
return undefined;
}
}
/**
* 向地址连接追加参数。
* @param {string} uri 网址
* @param {string} key 字段
* @param {string} value 字段值
* @returns
*/
export function httpUrlAddKey(uri:string, key:string, value:string) {
if (!value) {
return uri;
}
var re = new RegExp("([?&])" + key + "=.*?(&|$)", "i");
var separator = uri.indexOf("?") !== -1 ? "&" : "?";
if (uri.match(re)) {
return uri.replace(re, "$1" + key + "=" + value + "$2");
} else {
return uri + separator + key + "=" + value;
}
}
/**
* 取url参数
* @param {string} uri 网址
* @param {string} key 字段
* @returns string
*/
export function getQueryString(url:string,key:string):string {
var query_string = url.substring(url.indexOf("?"));
if (!query_string) return "";
var re = /[?&]?([^=]+)=([^&]*)/g;
var tokens:any;
while (tokens = re.exec(query_string)) {
if (decodeURIComponent(tokens[1]) === key) {
return decodeURIComponent(tokens[2]);
break;
}
}
return "";
}
/**
* rdix 随机因子,
* length 取的长度.
*/
export function getUid (rdix=1,length=12,isAddStr=false){
return Math.floor(Math.random() * rdix * Math.floor(Math.random() * Date.now())).toString(isAddStr?16:10).substring(0,length);
}
/*
防抖
防抖原理:在一定时间内,只有最后一次操作,再过wait毫秒后才执行函数
@param {Function} func 要执行的回调函数
@param {Number} wait 延迟的时间
@param{Boolean} immediate 是否要立即执行
*/
var timeout= getUid(1)
export function debounce(func:Function, wait = 500, immediate = false) {
// 清除定时器
if (timeout !== null) clearTimeout(timeout);
// 立即执行,此类情况一般用不到
if (immediate) {
var callNow = !timeout;
timeout = setTimeout(() => {
timeout = null;
}, wait);
if (callNow) typeof func === "function" && func();
} else {
// 设置定时器,当最后一次操作后,timeout不会再被清除,所以在延时wait毫秒后执行func回调方法
timeout = getUid(1);
timeout = setTimeout(() => {
typeof func === "function" && func();
}, wait);
}
}
/**
* 节流
节流原理:在一定时间内,只能触发一次
* @param {Function} func 要执行的回调函数
* @param {Number} wait 延时的时间
* @param {Boolean} immediate 是否立即执行
* @return null
*/
export function throttle(func:Function, wait = 500, immediate = true,timer=85688,flag=false) {
if (immediate) {
if (!flag) {
flag = true;
// 如果是立即执行,则在wait毫秒内开始时执行
typeof func === 'function' && func();
timer = setTimeout(() => {
flag = false;
}, wait);
}
} else {
if (!flag) {
flag = true
// 如果是非立即执行,则在wait毫秒内的结束处执行
timer = setTimeout(() => {
flag = false
typeof func === 'function' && func();
}, wait);
}
}
};
// 深度克隆
export function deepClone (obj:any) {
// 对常见的“非”值,直接返回原来值
if([null, undefined, NaN, false].includes(obj)) return obj;
if(typeof obj !== "object" && typeof obj !== 'function') {
//原始类型直接返回
return obj;
}
var o:any = Array.isArray(obj) ? [] : {};
for(let i in obj) {
if(obj.hasOwnProperty(i)){
o[i] = typeof obj[i] === "object" ? deepClone(obj[i]) : obj[i];
}
}
return o;
}
export function quereyDom(t:ComponentInternalInstance,node:string){
// #ifdef APP-NVUE
const dom:any = uni.requireNativePlugin('dom')
return new Promise((res,rej)=>{
setTimeout(function(){
node = node.replace(/#\./g,'')
dom.getComponentRect(t.refs[node], function(el:any) {
res(el.size);
})
},60)
})
// #endif
// #ifndef APP-NVUE
return new Promise((res,rej)=>{
const query = uni.createSelectorQuery().in(t);
query.select(node).boundingClientRect(el => {
res(el);
}).exec();
})
// #endif
}
/**
* 是否是手机号码
* @param phone 号码
* @returns Boolean
*/
export function isPhone(phone:string|number){
let val = String(phone);
let reg = /^(13[0-9]|14[01456879]|15[0-35-9]|16[2567]|17[0-8]|18[0-9]|19[0-35-9])\d{8}$/
return !!val.match(reg);
}
/**
* 是否含有中文
* @param s 字符串
* @returns Boolean
*/
export function isChina(s:string){
var patrn=/[\u4E00-\u9FA5]|[\uFE30-\uFFA0]/gi;
return !!patrn.exec(s);
}
/**
* 是否为空
* @description 判断是否是null,对象是否为空,数组是否为空。是否为 undefaind,是否为 “”空字符串。
* @param s 任意
*/
export function isEmpty(s:any){
if(typeof s === 'string'){
s = s.trim();
}
if(s=="") return true
if(s==null) return true;
if(typeof s === 'undefined') return true;
if(Array.isArray(s)){
if(s.length==0) return true;
}
if(typeof s ==='object'){
if(Object.keys(s).length==0) return true;
}
return false;
}
/**
* 是否邮箱
* @param s 字符串
* @returns Boolean
*/
export function isEmail(s:string){
let reg = /^[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+(\.[a-zA-Z0-9_-]+)+$/;
return !!s.match(reg);
}
/**
* 是否身份证号
* @param val 字符号或者数字
* @returns Boolean
* @author https://cloud.tencent.com/developer/article/1114323
*/
export function isIdCard (val:string|number) {
val = String(val)
var p = /^[1-9]\d{5}(18|19|20)\d{2}((0[1-9])|(1[0-2]))(([0-2][1-9])|10|20|30|31)\d{3}[0-9Xx]$/;
var factor = [ 7, 9, 10, 5, 8, 4, 2, 1, 6, 3, 7, 9, 10, 5, 8, 4, 2 ];
var parity = [ 1, 0, 'X', 9, 8, 7, 6, 5, 4, 3, 2 ];
var code = val.substring(17);
if(p.test(val)) {
var sum:number = 0;
for(var i=0;i<17;i++) {
let id:number|string|any = val[i]
sum += id*factor[i];
}
if(parity[sum % 11] == code.toUpperCase()) {
return true;
}
}
return false;
}
/**
* 是否车牌
* @description 蓝牌5位,绿牌6位。
* @param s 字符串
* @returns Boolean
*/
export function isIdCar(s:string){
let reg = /^[京|沪|津|渝|鲁|冀|晋|蒙|辽|吉|黑|苏|浙|皖|闽|赣|豫|湘|鄂|粤|桂|琼|川|贵|云|藏|陕|甘|青|宁|新|港|澳|台|新|使]{1}[A-Z]{1}[A-Z_0-9]{5,6}$/
return !!s.match(reg);
}
/**
* 纯数字密码验证
* @param s 字符串或者数字
* @param len 最小长度,默认6
* @param maxLen 最大长度,默认20
* @returns Boolean
*/
export function isPasswordOfNumber(s:number|string,len=6,maxLen=20){
s = String(s);
let reg = new RegExp(`^[0-9]{${len},${maxLen}}$`)
return !!s.match(reg)
}
/**
* 密码验证
* @param s 字符串或者数字
* @param len 最小长度,默认6
* @param maxLen 最大长度,默认20
* @param model 0数字和英文,1数字,英文必须包含,不允许有特殊字符,2数字和字母必须包含,可以有特殊字符。
* @returns Boolean
*/
export function isPasswordOfOther(s:string|number,len=6,maxLen=20,model=0){
s = String(s);
//密码至少包含 数字和英文,长度6-20
let reg = /^(?![0-9]+$)(?![a-zA-Z]+$)[0-9A-Za-z]{6,20}$/
//密码包含 数字,英文,字符中的两种以上,长度6-20
if(model===1){
reg = /^(?![0-9]+$)(?![a-z]+$)(?![A-Z]+$)(?!([^(0-9a-zA-Z)])+$).{6,20}$/
}
//至少包含数字跟字母,可以有字符
if(model===2){
reg = /(?=.*([a-zA-Z].*))(?=.*[0-9].*)[a-zA-Z0-9-*/+.~!@#$%^&*()]{6,20}$/
}
return !!s.match(reg)
}
/**
* 是否是一个有效的日期
* @param s 字符串,数字,日期对象
* @returns Boolean
*/
export function isDate(s:string|number|Date){
if(s==null||typeof s === 'undefined' || !s) return false;
if(typeof s ==='string'){
//兼容ios,mac
s = s.replace('-','/');
}
let d = new Date(s);
if(d.toString() == 'Invalid Date') return false;
return true;
}
/**
* 显示信息
* @param word 标题
* @param mask 不允许穿透
* @param icon 图标
*/
export function toast(word:string,mask:boolean=true,icon:any='none'){
// #ifndef MP-ALIPAY
uni.showToast({
mask:mask,
title:word,
icon:icon
})
// #endif
// #ifdef MP-ALIPAY
uni.showToast({
title:word,
icon:icon
})
// #endif
}
/**
* 获取屏幕窗口安全高度和宽度
* 注意是针对种屏幕的统一计算,统一高度,不再让uni获取有效高度而烦恼。
* 请一定要在onMounted或者onLoad中调用,否则不准确在h5端。
* @return {height,width,top,isCustomHeader,statusBarHeight,sysinfo}
*/
export function getWindow():{width:number,height:number,top:number,bottom:number,statusBarHeight:number,isCustomHeader:Boolean,sysinfo:UniApp.GetSystemInfoResult}{
// let getsysinfoSync = getCookie("tmui_sysinfo")
// if(getsysinfoSync){
// return getsysinfoSync
// }
const sysinfo = uni.getSystemInfoSync();
let top =0;
let height = sysinfo.windowHeight;
let nowPage = getCurrentPages().pop()
let isCustomHeader = false;
let pages = uni.$tm?.pages??[]
let bottom = sysinfo.safeAreaInsets?.bottom??0;
for(let i=0;i<uni.$tm.pages.length;i++){
if(nowPage?.route==uni.$tm.pages[i].path&&uni.$tm.pages[i].custom=='custom'){
isCustomHeader = true;
break;
}
}
// #ifdef H5
// 兼容说明:h5端第一次获取的高度和第二次获取的高度是有差异 的。
if (isCustomHeader) {
height = sysinfo.windowHeight+sysinfo.windowTop
}else{
top = 44
if(sysinfo.windowTop>0){
height = sysinfo.windowHeight;
}else{
height = sysinfo.windowHeight+sysinfo.windowTop
}
}
// #endif
let reulst = {bottom:bottom,height:height,width:sysinfo.windowWidth,top:top,isCustomHeader:isCustomHeader,statusBarHeight:sysinfo.statusBarHeight,sysinfo:sysinfo};
return reulst;
}
type openUrlType = "navigate"|"redirect"|"reLaunch"|"switchTab"|"navigateBack"
/**
*
* @param url string 打开的页面路径
* @param type openUrlType "navigate"|"redirect"|"reLaunch"|"switchTab"|"navigateBack"
*/
export function routerTo(url:string,type:openUrlType='navigate'){
type openUrlTypeFun = "navigateTo"|"redirectTo"|"reLaunch"|"switchTab"|"navigateBack"
let funType = {
navigate:"navigateTo",
redirect:"redirectTo",
switchTab:"switchTab",
reLaunch:"reLaunch",
navigateBack:"navigateBack",
}
let fun= funType[type];
if(fun=='navigateBack'){
uni.navigateBack({fail(error) {
console.error(error)
}})
}else if(fun=='reLaunch'){
uni.reLaunch({
url:url,
fail(error) {
console.error(error)
}
})
}else if(fun=='switchTab'){
uni.switchTab({
url:url,
fail(error) {
console.error(error)
}
})
}else if(fun=='redirectTo'){
uni.redirectTo({
url:url,
fail(error) {
console.error(error)
}
})
}else if(fun=='navigateTo'){
uni.navigateTo({
url:url,
fail(error) {
console.error(error)
}
})
}
}
/**
* 将rpx转换为px
* @param v 待转换的数字
* @param screenWidth 屏幕的宽度,如果不提供默认自动获取
* @return number
*/
export function torpx(v:number,screenWidth:number=0){
if(typeof screenWidth === 'undefined'||!screenWidth){
screenWidth = uni.getSystemInfoSync().screenWidth;
}
let pixelRatio = 750 / screenWidth;
return Math.ceil(v * pixelRatio)
}
/**
* 将rpx转换为px
* @param v 待转换的数字
* @return number
*/
export function topx(v:number){
return Math.ceil(uni.upx2px(Number(v)))
} | random_line_split | |
util.ts |
// #ifdef H5
// var clipboardJS = require( from ''./clipboardJS'');
// import clipboardJS from './clipboardJS'
// #endif
import {ComponentInternalInstance} from 'vue'
/**
* 预览图片。
@param {Object} url 必填 当前预览的图片链接。
@param {Object} list 可以是url数组,也可以是对象,数据比如:["http:url"] or [{url:"https:url",...}]
@param {Object} rangKey 如果list是对象数组,需要提供url字段。
*/
import {preview} from "./preview.js"
export default preview;
/**
* 数据分组
* @param {Array} oArr - 原数组列表
* @param {Number} length - 单个数组长度
* @return {Array} arr - 分组后的新数组
*/
export function splitData(oArr:Array<any> = [], length = 1) {
let arr:Array<any> = [];
let minArr:Array<any> = [];
oArr.forEach(c => {
if (minArr.length === length) {
minArr = [];
}
if (minArr.length === 0) {
arr.push(minArr);
}
minArr.push(c);
});
return arr;
}
/**
* 剩余时间格式化
* @param {Number} t - 剩余多少秒
* @return {Object} format - 格式后的天时分秒对象
*/
export function timeMuch(t:number) {
let format:any = {
d: '00',
h: '00',
m: '00',
s: '00'
};
if (t > 0) {
let d = Math.floor(t / 86400);
let h = Math.floor((t / 3600) % 24);
let m = Math.floor((t / 60) % 60);
let s = Math.floor(t % 60);
format.d = d < 10 ? '0' + d : d;
format.h = h < 10 ? '0' + h : h;
format.m = m < 10 ? '0' + m : m;
format.s = s < 10 ? '0' + s : s;
}
return format;
}
//获取时间距离当前时间
export function getDateToNewData(timestamp:number|string|Date = new Date().getTime()){
if(typeof timestamp == 'string'){
timestamp = new Date(timestamp).getTime();
}
// 补全为13位
var arrTimestamp:Array<string> = (timestamp + '').split('');
for (var start = 0; start < 13; start++) {
if (!arrTimestamp[start]) {
arrTimestamp[start] = '0';
}
}
timestamp = Number(arrTimestamp.join(''))* 1;
var minute = 1000 * 60;
var hour = minute * 60;
var day = hour * 24;
var halfamonth = day * 15;
var month = day * 30;
var now = new Date().getTime();
var diffValue = now - timestamp;
// 如果本地时间反而小于变量时间
if (diffValue < 0) {
return '不久前';
}
// 计算差异时间的量级
var monthC = diffValue / month;
var weekC = diffValue / (7 * day);
var dayC = diffValue / day;
var hourC = diffValue / hour;
var minC = diffValue / minute;
// 数值补0方法
var zero = function (value:number) {
if (value < 10) {
return '0' + value;
}
return value;
};
// 使用
if (monthC > 12) {
// 超过1年,直接显示年月日
return (function () {
var date = new Date(timestamp);
return date.getFullYear() + '年' + zero(date.getMonth() + 1) + '月' + zero(date.getDate()) + '日';
})();
} else if (monthC >= 1) {
return parseInt(monthC+'') + "月前";
} else if (weekC >= 1) {
return parseInt(weekC+'') + "周前";
} else if (dayC >= 1) {
return parseInt(dayC+'') + "天前";
} else if (hourC >= 1) {
return parseInt(hourC+'') + "小时前";
} else if (minC >= 1) {
return parseInt(minC+'') + "分钟前";
}
return '刚刚';
}
/**
* 打电话
* @param {String<Number>} phoneNumber - 数字字符串
* @return {Promise}
*/
export function callPhone(phoneNumber = '') {
let num = phoneNumber.toString()
return new Promise((rs,rj)=>{
uni.makePhoneCall({
phoneNumber: num,
success:()=> rs(true),
fail:(err)=> rj(err)
});
})
}
/**
* 调起客户端相机扫码。
* @param {Boolean} onlyFromCamera true 是否只允许相机扫码识别
* @param {Array<string>} scanType ['barCode', 'qrCode', 'datamatrix','datamatrix']
* @returns Promise 成功返回相关数据结构
*/
export function scanCode(onlyFromCamera = true, scanType = ['barCode', 'qrCode', 'datamatrix','datamatrix']):Promise<string|UniApp.ScanCodeSuccessRes>{
return new Promise((rs,rj)=>{
// #ifdef H5
rj('不支持H5');
// #endif
// #ifndef H5
uni.scanCode({
onlyFromCamera: onlyFromCamera,
scanType: scanType,
success: (res) => rs(res),
fail:(error)=>rj(error)
});
// #endif
})
}
/**
* 设置剪切板内容。
* @param {String} data
* @returns Promise true/false
*/
export function setClipboardData(data:string):Promise<string|boolean>{
return new Promise( | t(data)
}
else {
const textArea = document.createElement('textarea')
textArea.style.opacity = "0"
textArea.style.position = "fixed"
textArea.style.top = "0px"
textArea.value = data
document.body.appendChild(textArea)
textArea.focus()
textArea.select()
document.execCommand('copy') ? rs(true) : rj("错误")
textArea.remove()
}
// #endif
})
}
/**
* 获取剪切板内容
* @returns Promise 剪切板内容
*/
export function getClipboardData():Promise<boolean|string>{
return new Promise((rs, rj) => {
// #ifndef H5
uni.getClipboardData({
success: (res) => rs(res.data),
fail: (error) => rj(error)
});
// #endif
// #ifdef H5
console.error('H5无法获取剪切板内容')
rj('H5无法获取剪切板内容')
// #endif
})
}
/**
* 设置cookie数据
* @param {String} key 键值
* @param {String} data 值
* @returns Boolean
*/
export function setCookie(key:string, data:any) {
try {
uni.setStorageSync(key, data);
return true;
} catch (e) {
return false;
}
}
/**
* 删除一个本地cookie
* @param {String} key 键值
* @returns Boolean
*/
export function delCookie(key:string) {
try {
uni.removeStorageSync(key);
return true;
} catch (e) {
return false;
}
}
/**
* 获取一个cookie数据
* 如果存入的是对象,返回的也是对象。如果是string返回的也是字符串。
* @param {String} key 键
* @returns json/string
*/
export function getCookie(key:string) {
try {
const value = uni.getStorageSync(key);
try {
let val = JSON.parse(value)
return val;
} catch (e) {
return value;
}
} catch (e) {
return undefined;
}
}
/**
* 向地址连接追加参数。
* @param {string} uri 网址
* @param {string} key 字段
* @param {string} value 字段值
* @returns
*/
export function httpUrlAddKey(uri:string, key:string, value:string) {
if (!value) {
return uri;
}
var re = new RegExp("([?&])" + key + "=.*?(&|$)", "i");
var separator = uri.indexOf("?") !== -1 ? "&" : "?";
if (uri.match(re)) {
return uri.replace(re, "$1" + key + "=" + value + "$2");
} else {
return uri + separator + key + "=" + value;
}
}
/**
* 取url参数
* @param {string} uri 网址
* @param {string} key 字段
* @returns string
*/
export function getQueryString(url:string,key:string):string {
var query_string = url.substring(url.indexOf("?"));
if (!query_string) return "";
var re = /[?&]?([^=]+)=([^&]*)/g;
var tokens:any;
while (tokens = re.exec(query_string)) {
if (decodeURIComponent(tokens[1]) === key) {
return decodeURIComponent(tokens[2]);
break;
}
}
return "";
}
/**
* rdix 随机因子,
* length 取的长度.
*/
export function getUid (rdix=1,length=12,isAddStr=false){
return Math.floor(Math.random() * rdix * Math.floor(Math.random() * Date.now())).toString(isAddStr?16:10).substring(0,length);
}
/*
防抖
防抖原理:在一定时间内,只有最后一次操作,再过wait毫秒后才执行函数
@param {Function} func 要执行的回调函数
@param {Number} wait 延迟的时间
@param{Boolean} immediate 是否要立即执行
*/
var timeout= getUid(1)
export function debounce(func:Function, wait = 500, immediate = false) {
// 清除定时器
if (timeout !== null) clearTimeout(timeout);
// 立即执行,此类情况一般用不到
if (immediate) {
var callNow = !timeout;
timeout = setTimeout(() => {
timeout = null;
}, wait);
if (callNow) typeof func === "function" && func();
} else {
// 设置定时器,当最后一次操作后,timeout不会再被清除,所以在延时wait毫秒后执行func回调方法
timeout = getUid(1);
timeout = setTimeout(() => {
typeof func === "function" && func();
}, wait);
}
}
/**
* 节流
节流原理:在一定时间内,只能触发一次
* @param {Function} func 要执行的回调函数
* @param {Number} wait 延时的时间
* @param {Boolean} immediate 是否立即执行
* @return null
*/
export function throttle(func:Function, wait = 500, immediate = true,timer=85688,flag=false) {
if (immediate) {
if (!flag) {
flag = true;
// 如果是立即执行,则在wait毫秒内开始时执行
typeof func === 'function' && func();
timer = setTimeout(() => {
flag = false;
}, wait);
}
} else {
if (!flag) {
flag = true
// 如果是非立即执行,则在wait毫秒内的结束处执行
timer = setTimeout(() => {
flag = false
typeof func === 'function' && func();
}, wait);
}
}
};
// 深度克隆
export function deepClone (obj:any) {
// 对常见的“非”值,直接返回原来值
if([null, undefined, NaN, false].includes(obj)) return obj;
if(typeof obj !== "object" && typeof obj !== 'function') {
//原始类型直接返回
return obj;
}
var o:any = Array.isArray(obj) ? [] : {};
for(let i in obj) {
if(obj.hasOwnProperty(i)){
o[i] = typeof obj[i] === "object" ? deepClone(obj[i]) : obj[i];
}
}
return o;
}
export function quereyDom(t:ComponentInternalInstance,node:string){
// #ifdef APP-NVUE
const dom:any = uni.requireNativePlugin('dom')
return new Promise((res,rej)=>{
setTimeout(function(){
node = node.replace(/#\./g,'')
dom.getComponentRect(t.refs[node], function(el:any) {
res(el.size);
})
},60)
})
// #endif
// #ifndef APP-NVUE
return new Promise((res,rej)=>{
const query = uni.createSelectorQuery().in(t);
query.select(node).boundingClientRect(el => {
res(el);
}).exec();
})
// #endif
}
/**
* 是否是手机号码
* @param phone 号码
* @returns Boolean
*/
export function isPhone(phone:string|number){
let val = String(phone);
let reg = /^(13[0-9]|14[01456879]|15[0-35-9]|16[2567]|17[0-8]|18[0-9]|19[0-35-9])\d{8}$/
return !!val.match(reg);
}
/**
* 是否含有中文
* @param s 字符串
* @returns Boolean
*/
export function isChina(s:string){
var patrn=/[\u4E00-\u9FA5]|[\uFE30-\uFFA0]/gi;
return !!patrn.exec(s);
}
/**
* 是否为空
* @description 判断是否是null,对象是否为空,数组是否为空。是否为 undefaind,是否为 “”空字符串。
* @param s 任意
*/
export function isEmpty(s:any){
if(typeof s === 'string'){
s = s.trim();
}
if(s=="") return true
if(s==null) return true;
if(typeof s === 'undefined') return true;
if(Array.isArray(s)){
if(s.length==0) return true;
}
if(typeof s ==='object'){
if(Object.keys(s).length==0) return true;
}
return false;
}
/**
* 是否邮箱
* @param s 字符串
* @returns Boolean
*/
export function isEmail(s:string){
let reg = /^[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+(\.[a-zA-Z0-9_-]+)+$/;
return !!s.match(reg);
}
/**
* 是否身份证号
* @param val 字符号或者数字
* @returns Boolean
* @author https://cloud.tencent.com/developer/article/1114323
*/
export function isIdCard (val:string|number) {
val = String(val)
var p = /^[1-9]\d{5}(18|19|20)\d{2}((0[1-9])|(1[0-2]))(([0-2][1-9])|10|20|30|31)\d{3}[0-9Xx]$/;
var factor = [ 7, 9, 10, 5, 8, 4, 2, 1, 6, 3, 7, 9, 10, 5, 8, 4, 2 ];
var parity = [ 1, 0, 'X', 9, 8, 7, 6, 5, 4, 3, 2 ];
var code = val.substring(17);
if(p.test(val)) {
var sum:number = 0;
for(var i=0;i<17;i++) {
let id:number|string|any = val[i]
sum += id*factor[i];
}
if(parity[sum % 11] == code.toUpperCase()) {
return true;
}
}
return false;
}
/**
* 是否车牌
* @description 蓝牌5位,绿牌6位。
* @param s 字符串
* @returns Boolean
*/
export function isIdCar(s:string){
let reg = /^[京|沪|津|渝|鲁|冀|晋|蒙|辽|吉|黑|苏|浙|皖|闽|赣|豫|湘|鄂|粤|桂|琼|川|贵|云|藏|陕|甘|青|宁|新|港|澳|台|新|使]{1}[A-Z]{1}[A-Z_0-9]{5,6}$/
return !!s.match(reg);
}
/**
* 纯数字密码验证
* @param s 字符串或者数字
* @param len 最小长度,默认6
* @param maxLen 最大长度,默认20
* @returns Boolean
*/
export function isPasswordOfNumber(s:number|string,len=6,maxLen=20){
s = String(s);
let reg = new RegExp(`^[0-9]{${len},${maxLen}}$`)
return !!s.match(reg)
}
/**
* 密码验证
* @param s 字符串或者数字
* @param len 最小长度,默认6
* @param maxLen 最大长度,默认20
* @param model 0数字和英文,1数字,英文必须包含,不允许有特殊字符,2数字和字母必须包含,可以有特殊字符。
* @returns Boolean
*/
export function isPasswordOfOther(s:string|number,len=6,maxLen=20,model=0){
s = String(s);
//密码至少包含 数字和英文,长度6-20
let reg = /^(?![0-9]+$)(?![a-zA-Z]+$)[0-9A-Za-z]{6,20}$/
//密码包含 数字,英文,字符中的两种以上,长度6-20
if(model===1){
reg = /^(?![0-9]+$)(?![a-z]+$)(?![A-Z]+$)(?!([^(0-9a-zA-Z)])+$).{6,20}$/
}
//至少包含数字跟字母,可以有字符
if(model===2){
reg = /(?=.*([a-zA-Z].*))(?=.*[0-9].*)[a-zA-Z0-9-*/+.~!@#$%^&*()]{6,20}$/
}
return !!s.match(reg)
}
/**
* 是否是一个有效的日期
* @param s 字符串,数字,日期对象
* @returns Boolean
*/
export function isDate(s:string|number|Date){
if(s==null||typeof s === 'undefined' || !s) return false;
if(typeof s ==='string'){
//兼容ios,mac
s = s.replace('-','/');
}
let d = new Date(s);
if(d.toString() == 'Invalid Date') return false;
return true;
}
/**
* 显示信息
* @param word 标题
* @param mask 不允许穿透
* @param icon 图标
*/
export function toast(word:string,mask:boolean=true,icon:any='none'){
// #ifndef MP-ALIPAY
uni.showToast({
mask:mask,
title:word,
icon:icon
})
// #endif
// #ifdef MP-ALIPAY
uni.showToast({
title:word,
icon:icon
})
// #endif
}
/**
* 获取屏幕窗口安全高度和宽度
* 注意是针对种屏幕的统一计算,统一高度,不再让uni获取有效高度而烦恼。
* 请一定要在onMounted或者onLoad中调用,否则不准确在h5端。
* @return {height,width,top,isCustomHeader,statusBarHeight,sysinfo}
*/
export function getWindow():{width:number,height:number,top:number,bottom:number,statusBarHeight:number,isCustomHeader:Boolean,sysinfo:UniApp.GetSystemInfoResult}{
// let getsysinfoSync = getCookie("tmui_sysinfo")
// if(getsysinfoSync){
// return getsysinfoSync
// }
const sysinfo = uni.getSystemInfoSync();
let top =0;
let height = sysinfo.windowHeight;
let nowPage = getCurrentPages().pop()
let isCustomHeader = false;
let pages = uni.$tm?.pages??[]
let bottom = sysinfo.safeAreaInsets?.bottom??0;
for(let i=0;i<uni.$tm.pages.length;i++){
if(nowPage?.route==uni.$tm.pages[i].path&&uni.$tm.pages[i].custom=='custom'){
isCustomHeader = true;
break;
}
}
// #ifdef H5
// 兼容说明:h5端第一次获取的高度和第二次获取的高度是有差异 的。
if (isCustomHeader) {
height = sysinfo.windowHeight+sysinfo.windowTop
}else{
top = 44
if(sysinfo.windowTop>0){
height = sysinfo.windowHeight;
}else{
height = sysinfo.windowHeight+sysinfo.windowTop
}
}
// #endif
let reulst = {bottom:bottom,height:height,width:sysinfo.windowWidth,top:top,isCustomHeader:isCustomHeader,statusBarHeight:sysinfo.statusBarHeight,sysinfo:sysinfo};
return reulst;
}
type openUrlType = "navigate"|"redirect"|"reLaunch"|"switchTab"|"navigateBack"
/**
*
* @param url string 打开的页面路径
* @param type openUrlType "navigate"|"redirect"|"reLaunch"|"switchTab"|"navigateBack"
*/
export function routerTo(url:string,type:openUrlType='navigate'){
type openUrlTypeFun = "navigateTo"|"redirectTo"|"reLaunch"|"switchTab"|"navigateBack"
let funType = {
navigate:"navigateTo",
redirect:"redirectTo",
switchTab:"switchTab",
reLaunch:"reLaunch",
navigateBack:"navigateBack",
}
let fun= funType[type];
if(fun=='navigateBack'){
uni.navigateBack({fail(error) {
console.error(error)
}})
}else if(fun=='reLaunch'){
uni.reLaunch({
url:url,
fail(error) {
console.error(error)
}
})
}else if(fun=='switchTab'){
uni.switchTab({
url:url,
fail(error) {
console.error(error)
}
})
}else if(fun=='redirectTo'){
uni.redirectTo({
url:url,
fail(error) {
console.error(error)
}
})
}else if(fun=='navigateTo'){
uni.navigateTo({
url:url,
fail(error) {
console.error(error)
}
})
}
}
/**
* 将rpx转换为px
* @param v 待转换的数字
* @param screenWidth 屏幕的宽度,如果不提供默认自动获取
* @return number
*/
export function torpx(v:number,screenWidth:number=0){
if(typeof screenWidth === 'undefined'||!screenWidth){
screenWidth = uni.getSystemInfoSync().screenWidth;
}
let pixelRatio = 750 / screenWidth;
return Math.ceil(v * pixelRatio)
}
/**
* 将rpx转换为px
* @param v 待转换的数字
* @return number
*/
export function topx(v:number){
return Math.ceil(uni.upx2px(Number(v)))
} | (rs,rj)=>{
// #ifndef H5
uni.setClipboardData({
data: data,
success:()=>rs(true),
fail:(error)=>rj(error)
});
// #endif
// #ifdef H5
if (navigator.clipboard && window.isSecureContext) {
return navigator.clipboard.writeTex | identifier_body |
app-form.js |
define('app/form',["app/common","moment","jquery/validate","jquery/form"],function(APP) {
var FORM = {
initDatePicker : function(ct){
APP.queryContainer(ct).find('[form-role="date"]').each(function(){
$(this).datePicker();
});
}
};
/**
* 将form格式化为json
* @param {Object} form form对象
* @return {Object} json对象
*/
FORM.formToJson = function(form){
var serializeObj={};
var array=form.serializeArray();
$(array).each(function(){
if(serializeObj[this.name]){
if($.isArray(serializeObj[this.name])){
serializeObj[this.name].push(this.value);
}else{
serializeObj[this.name]=[serializeObj[this.name],this.value];
}
}else{
serializeObj[this.name]=this.value;
}
});
return serializeObj;
};
//--------------------------------------datePicker------------------------------
/**
* 日期 bootstrap datePicker
* @param {Object} opts 设置参数
* @param {Function} callback 日期变化时调用的函数
*/
$.fn.datePicker = function(opts,callback){
var _target = $(this);
require(['bootstrap/datepicker'],function(){
var default_opt = $.extend(true,{
language:'zh-CN',autoclose: true,todayHighlight:true,format:'yyyy-mm-dd'
},opts);
var _event_type = "changeDate";
if(default_opt.viewType == "year"){
default_opt.startView = 2;
default_opt.minViewMode = 2;
_event_type="changeYear";
}else if(default_opt.viewType == "month"){
default_opt.startView = 1;
default_opt.minViewMode = 1;
_event_type="changeMonth";
}
_target.datepicker(default_opt);
var _default_date = default_opt.defaultDate ? default_opt.defaultDate : APP.formatDate('YYYY-MM-DD');
_target.datepicker('update',APP.formatDate(default_opt.format.toUpperCase(),_default_date));
_target.data('date-value',APP.formatDate('YYYY-MM-DD',_default_date));
_target.datepicker().on(_event_type,function(e){
if(_target.data('date-value') != APP.formatDate('YYYY-MM-DD',e.date)){
_target.data('date-value',APP.formatDate('YYYY-MM-DD',e.date));
if(typeof callback === 'function') callback(APP.formatDate('YYYY-MM-DD',e.date));
}
})
});
};
/**
* 日期区间 bootstrap dateRangePicker
* @param {Object} opts 设置参数
* @param {Function} callback 设置后调用的函数
*/
$.fn.dateRangePicker = function(opts,callback){
var _target = $(this);
require(['bootstrap/daterangepicker'],function(){
var default_opt = $.extend(true,{
opens: (APP.isRTL ? 'left' : 'right'),
startDate: moment().subtract('days', 29).format('YYYY-MM-DD'),
endDate: moment().format('YYYY-MM-DD'),
minDate: '2012-01-01',
maxDate: moment().format('YYYY-MM-DD'),
dateLimit: {days: 365},
showDropdowns: true,
showWeekNumbers: true,
timePicker: false,
timePickerIncrement: 1,
timePicker12Hour: true,
/*ranges: {
'今天': [moment().format('YYYY-MM-DD'), moment().format('YYYY-MM-DD')],
'昨天': [moment().subtract('days', 1).format('YYYY-MM-DD'), moment().subtract('days', 1).format('YYYY-MM-DD')],
'近7天': [moment().subtract('days', 6).format('YYYY-MM-DD'), moment().format('YYYY-MM-DD')],
'近30天': [moment().subtract('days', 29).format('YYYY-MM-DD'), moment().format('YYYY-MM-DD')],
'本月': [moment().startOf('month').format('YYYY-MM-DD'), moment().endOf('month').format('YYYY-MM-DD')],
'上月': [moment().subtract('month', 1).startOf('month').format('YYYY-MM-DD'), moment().subtract('month', 1).endOf('month').format('YYYY-MM-DD')]
},*/
buttonClasses: ['btn'],
applyClass: 'green',
cancelClass: 'default',
format: 'YYYY-MM-DD',
separator: ' 到 ',
locale: {
"applyLabel": '确定',
"cancelLabel": '取消',
"fromLabel": '从',
"toLabel": '到',
"customRangeLabel": '日期区间选择',
"daysOfWeek": ["日","一","二","三","四","五","六"],
"monthNames": ["1月","2月","3月","4月","5月","6月","7月","8月","9月","10月","11月","12月"],
"firstDay": 1
}
},opts);
_target.daterangepicker(default_opt,function(start, end, label){
if(typeof callback === 'function'){
callback(start, end, label);
}else{
_target.children('span').html(start.format('YYYY年MM月DD日') + ' - ' + end.format('YYYY年MM月DD日'));
//$(target+' span')
}
});
_target.children('span').html(moment().subtract('days', 29).format('YYYY年MM月DD日') + ' - ' + moment().format('YYYY年MM月DD日'));
//$(target+' span').html(moment().subtract('days', 29).format('YYYY年MM月DD日') + ' - ' + moment().format('YYYY年MM月DD日'));
})
};
//--------------------------form validate------------------------------
//jquery.validate默认设置
var validate_default_settings = {
errorElement: 'span',
errorClass: 'help-block help-block-error',
focusInvalid: true,
onkeyup: false,
errorPlacement: function (error, element) {
/*if(element.siblings("span.input-group-addon").size() > 0){//treeselect控件验证时隐藏错误span
error.addClass('hide');
}*/
if (element.parent(".input-group").size() > 0) {//带图标的输入框
error.insertAfter(element.parent(".input-group"));
} else if (element.attr("data-error-container")) { //指定container存放错误
error.appendTo(element.attr("data-error-container"));
} else if (element.parents('.radio-list').size() > 0) { //radio
error.appendTo(element.parents('.radio-list').attr("data-error-container"));
} else if (element.parents('.radio-inline').size() > 0) {
error.appendTo(element.parents('.radio-inline').attr("data-error-container"));
} else if (element.parents('.checkbox-list').size() > 0) {
error.appendTo(element.parents('.checkbox-list').attr("data-error-container"));
} else if (element.parents('.checkbox-inline').size() > 0) {
error.appendTo(element.parents('.checkbox-inline').attr("data-error-container"));
} else if(element.siblings("i.validate-icon").size() > 0){//图标方式提示错误
var icon = element.siblings("i.validate-icon");
icon.removeClass('fa-check').addClass("fa-warning");
icon.attr("data-original-title", error.text()).tooltip();
}else {
error.insertAfter(element);
}
},
invalidHandler: function (event, validator) {
},
highlight: function (element) {
$(element).closest('.form-group').removeClass("has-success").addClass('has-error');
},
success: function (label,element) {
if($(element).siblings("i.validate-icon").size() > 0){//图标方式提示错误
var icon = $(element).siblings("i.validate-icon");
$(element).closest('.form-group').removeClass('has-error').addClass('has-success');
icon.removeClass("fa-warning");
if($(element).tagName == 'INPUT') icon.addClass("fa-check");
icon.removeAttr("data-original-title");
}else {
label.closest('.form-group').removeClass('has-error');
}
}
};
//jquery.validate增加select2验证方法
$.validator.addMethod("selectOpt", function(value, element) {
return this.optional(element) || (value != "-1");
}, "请选择");
$.validator.addMethod("checkExists", function(value, element,p) {
if(APP.isEmpty(value)) return true;
if(APP.isEmpty(p)){
alert('请设置字段校验参数');
return false;
}
if(APP.isEmpty(p.url) && APP.isEmpty(p.stmID || p.stmid || p.stmId)){
alert('请设置字段校验参数中的url或者stmID');
return false;
}
var paramData = {param : (p.data || {})};
paramData.param[element.name] = value;
if(p.original) paramData.param["o_"+element.name] = p.original;//修改form中的初始值
if(p.joinField){//参与验证字段值
if($.isArray(p.joinField)){
for(var i=0;i<p.joinField.length;i++){
var joinField = $(p.joinField[i]);
paramData.param[joinField.attr("name")] = joinField.val();
if(joinField.data("original") && joinField.data("original") != joinField.val()) {//当参与验证字段值发生变化的时候,则取消当前字段的初始值验证
paramData.param["o_"+element.name] = "";
}
}
}else{
var joinField = $(p.joinField);
paramData.param[joinField.attr("name")] = joinField.val();
if(joinField.data("original") && joinField.data("original") != joinField.val()) {//当参与验证字段值发生变化的时候,则取消当前字段的初始值验证
paramData.param["o_"+element.name] = "";
}
}
}
if(!APP.isEmpty(p.url)){
return APP.postJson(p.url,paramData,false);
}else{
paramData.stmID = p.stmID || p.stmid || p.stmId;
return APP.isEmpty(APP.postJson('/app/common/selectMapByStmID',paramData,false));
}
}, "已存在");
/**
* 初始化form
* @param {Object} opts 初始化参数
* @param {Function} callback 成功回调函数
* @param {Function} errorback 失败回调函数
*/
$.fn.initForm = function (opts,callback,errorback) {
var _this = $(this);
if(opts.autoClear)_this.clearForm(true); //静态modal中的form 先清空再初始化
if(APP.isEmpty(opts)) opts = {};
if(APP.isEmpty(opts.fieldOpts)) opts.fieldOpts = {};//fieldOpts表单元素的初始化参数
var validate_settings = $.extend(true,validate_default_settings,opts.validate);
var _validate = _this.validate(validate_settings);
//_validate.resetForm();
var isInitValue = !APP.isEmpty(opts.formData);
var formField;
_this.find(opts.fieldSelector ? opts.fieldSelector : '*[name]').each(function(){
formField = $(this);
var _fieldName = formField.attr('name');
var _fieldRole = formField.attr('form-role');
if(formField.data("init")) formField.val(formField.data("init"));
if(isInitValue){
var _fieldValue = opts.formData[_fieldName];
if(_fieldName.indexOf(".") > 0){
var _fieldNameSp = _fieldName.split(".");
_fieldValue = opts.formData[_fieldNameSp[0]];
for(var i=1;_fieldValue && i<_fieldNameSp.length;i++){
_fieldValue = _fieldValue[_fieldNameSp[i]]
}
}
if(_fieldValue != undefined){
if(this.type == 'checkbox'){
var _checked = (_fieldValue == ((formField.data('on-value') !== undefined) ? formField.data('on-value')+'' : '1'));
formField.attr('checked',_checked);
if(formField.hasClass('bs-switch')){
formField.bootstrapSwitch('state', _checked);
formField.trigger("switch:change", [_checked]);//强制触发change方法赋值
}
}else{
formField.val(_fieldValue);
if(formField.data("init")) formField.data("init",_fieldValue);
}
formField.data("original",_fieldValue);//记录该字段的初始值,验证唯一性使用
}
}else{
formField.removeData("original");
}
//初始化js定义的验证规则,如有checkExists规则需要将original初始值作为入参
if(opts.rules && opts.rules[_fieldName]){
formField.rules( "remove");
if(opts.rules[_fieldName].checkExists){
opts.rules[_fieldName].checkExists.original = formField.val();
}
formField.rules( "add", opts.rules[_fieldName]);
}
if(_fieldRole == 'select'){
var _selectOpt = opts.fieldOpts[_fieldName] || {};
try{
if(formField.attr('placeholder') && !isInitValue) _selectOpt.placeholder = JSON.parse(formField.attr('placeholder'));
}catch(e){alert("placeholder属性值必须为json字符串");}
if(formField.data('json')) _selectOpt.jsonData = formField.data('json');
else if(formField.data('stmid')) _selectOpt.stmID = formField.data('stmid');
else if(formField.data('dict-type')){
_selectOpt.data = APP.getDictByType(formField.data('dict-type'));
if($.isArray(_selectOpt.data)){
for(var i=0;i<_selectOpt.data.length;i++){//select2使用text显示
_selectOpt.data[i].id = _selectOpt.data[i].value;
_selectOpt.data[i].text = _selectOpt.data[i].name;
}
}
}
formField.select(_selectOpt);
}
if(_fieldRole == 'treeSelect'){
var _treeSelectOpt = opts.fieldOpts[_fieldName] || {};
if(formField.data('stmid')) _treeSelectOpt.stmID = formField.data('stmid');
if(!formField.attr('id')){
alert("请指定treeSelect表单元素的id属性");
return;
}
formField.treeSelect(_treeSelectOpt);
}
});
var _in_modal = (_this.parents('.modal-dialog').size() > 0) ? '.modal-dialog' : '';
//提交是初始化bean的提交类型 add save delete 对应BaseBean 的form_action属性
if(opts.formAction){
if(_this.children(":hidden[name='form_action']").size()>0){
_this.children(":hidden[name='form_action']").val(opts.formAction);
}else{
_this.append("<input type='hidden' name='form_action' value='"+opts.formAction+"'>");
}
}
opts.url = APP.ctx + opts.url;
var form_opt = $.extend(true,{
ajax:true,
beforeSubmit : function(formData, jqForm, options){
APP.blockUI({target:_in_modal ? '.modal-dialog' : 'body',message:'提交中',gif : 'form-submit'});
return true;
},
type : 'post',
dataType : 'json',
includeHidden : true,
error:function(error){
if(APP.debug)console.log(error);
APP.unblockUI(_in_modal ? '.modal-dialog' : 'body');
APP.notice('',"系统错误 错误代码:"+error.status+" 错误名称:"+error.statusText,'error',_in_modal);
if(typeof errorback === 'function')errorback(error);
else if(opts.onError) opts.onError(error);
},
success:function(response, status){
if(APP.debug)console.log(response);
APP.unblockUI(_in_modal ? '.modal-dialog' : 'body');
if(response.OK){
APP.notice('',response[APP.MSG],'success',_in_modal);
//动态更新规格,否则会造成重复提交验证不通过
_this.find('.checkExists').each(function(){
var _c_form_field = $(this);
var _c_field_name = formField.attr('name');
if(opts.rules && opts.rules[_c_field_name] && opts.rules[_c_field_name].checkExists){
_c_form_field.rules( "remove","checkExists");
opts.rules[_c_field_name].checkExists.original = _c_form_field.val();
_c_form_field.rules( "add", opts.rules[_c_field_name]);
}
});
if(typeof callback === 'function')callback(response[APP.DATA]);
else if(opts.onSuccess) opts.onSuccess(response[APP.DATA]);
}else{
APP.notice('',response[APP.MSG],'warning',_in_modal);
if(typeof errorback === 'function')errorback(response,status);
else if(opts.onError) opts.onError(response,status);
}
}
},opts);
if(form_opt.ajax) _this.ajaxForm(form_opt);
}
/**
* form表单提交
* @param {String} url form提交url
* @param {Function} callback 回调函数
*/
$.fn.postForm = function(url,callback){
var _form = $(this);
if(_form.is('form')){
$.ajax({
type:"POST",
url:url,
dataType:"json",
contentType:"application/json",
data:JSON.stringify(FORM.formToJson(_form)),
success:function(ret,status){
callback(result,status);
},
error:function(xhr){
APP.notice('系统错误','错误代码['+xhr.status+'] 错误名称['+xhr.statusText+']','error');
}
});
}else
alert("对象不是表单");
};
//------------------------下拉列表----------------------
//初始化下拉列表语言
var select2_language = {
errorLoading: function () {return '无法载入结果。';},
inputTooLong: function (args) {
var overChars = args.input.length - args.maximum;
var message = '请删除' + overChars + '个字符';
return message;
},
inputTooShort: function (args) {
var remainingChars = args.minimum - args.input.length;
var message = '请再输入至少' + remainingChars + '个字符';
return message;
},
loadingMore: function () {return '载入更多结果…';},
maximumSelected: function (args) {
var message = '最多只能选择' + args.maximum + '个项目';
return message;
},
noResults: function () {return '未找到结果';},
searching: function () {return '搜索中…'; }
};
//select2下拉列表默认设置
var select2_default_opts = {
language: select2_language,
placeholder: {id:"-1",text:"请选择..."},
maximumSelectionLength: 50, //多选最多选择个数
allowClear:true,//自动显示清除按钮
width:"100%"
};
/**
* select2下拉列表
* @param {Object} opts select2参数,自定义参数如下
* jsonData[服务器或静态json文件(static/src/jsons/下)的url]
* stmID[sqlMap语句ID]
* url[服务器url实时获取数据(搜索框实时发送请求)]
*
* @return {Object} select控件
*/
function _fill_options(_select,opt_data){
_select.empty();
if($.isArray(opt_data)){
for(var i=0;i<opt_data.length;i++){
_select.append("<option value='"+opt_data[i].id+"'>"+opt_data[i].text+"</option>");
}
}
_select.change();
}
$.fn.select = function ( opts ) {
var _select = $(this);
require(['jquery/select2'],function(){
select2_default_opts.data = null;
select2_default_opts.ajax = null;
if(opts){
if((opts.jsonData||opts.stmID) && opts.data === undefined){//增加jsonData选项获取静态.json文件或者直接通过sqlMapper的sqlID获取数组数据
if(APP.isEmpty(opts.param)) opts.param = {};
if(_select.data("parent-for")){
var _parent_sel = $(_select.data("parent-for"));
opts.param[_parent_sel.attr("name").replace(".","_")] = _parent_sel.val();//替换参数中的. 否则mapper文件会无法识别
}
var url = opts.url || APP.stmidListUrl;
var type = "POST";
if(opts.jsonData && opts.jsonData != ""){
url = opts.jsonData;
type = "GET";
}
var paramData = {};
if(opts.stmID) paramData.stmID=opts.stmID;
if(opts.param) paramData.param=opts.param;
//同步方式防止数据量大是无法加载
APP.ajax(url,paramData,type,false,function(ret){
opts.data = ret;
});
}else if(opts.url && opts.ajax === undefined){//默认ajax方法
opts.ajax = {
delay: 250,
url : opts.url,
data: function (params) {
var queryParameters = {
q: params.term
}
return queryParameters;
}
};
}
}
//允许增加选项
if(opts.allowAdd || _select.data("allow-add")){
if(_select.parent('.input-group').length > 0){
_select.nextAll(".input-group-btn").remove();
_select.unwrap();
}
var _add_btn_id = "select-add-btn-"+new Date().getTime();
var _add_btn = $("<span class='input-group-btn' id='"+_add_btn_id+"'><a class='btn blue'><i class='fa fa-plus'></i></a></span>");
_select.wrap("<div class='input-group'></div>");
_add_btn.insertAfter(_select);
_add_btn.click(function(){
var _this = $(this);
var _adddiv = $("<div>");
var _addform = $("<div class='row'><div class='col-md-12'><div class='form-group'><label class='control-label col-md-3'>代码</label><div class='col-md-9'><input type='text' name='_select_type_code' class='form-control input-small'></div></div></div></div>"+
"<div class='row'><div class='col-md-12'><div class='form-group'><label class='control-label col-md-3'>名称</label><div class='col-md-9'><input type='text' name='_select_type_name' class='form-control input-small'></div></div></div></div>"+
"<a class='btn blue btn-block'> <i class='fa fa-plus'></i> 增加 </a>");
_adddiv.append(_addform);
_adddiv.children(".btn").click(function(){
var _code = _adddiv.find("input[name='_select_type_code']").val();
var _name = _adddiv.find("input[name='_select_type_name']").val();
if($.trim(_code) == "" || $.trim(_name) == ""){
_adddiv.closest(".popover").removeClass("info").addClass("error");
_adddiv.closest(".popover-content").prev().html("<i class='fa fa-plus'/> 代码或名称不能为空");
return;
}
if(_select.children("option[value='"+_code+"']").length > 0){
_adddiv.closest(".popover").removeClass("info").addClass("error");
_adddiv.closest(".popover-content").prev().html("<i class='fa fa-plus'/> 代码已存在")
return;
}
_adddiv.closest(".popover").removeClass("error");
_select.append("<option value='"+_code+"'>"+_name+"</option>");
_select.val(_code).trigger("change");
_this.popover('destroy');
})
APP.popover(_this,_adddiv.get(),"info","fa-plus","增加选择","auto right",235);
});
}
var default_opt = $.extend(true,select2_default_opts,opts);
_select.select2(default_opt);
if(_select.data("original") || _select.data("init")) _select.val((_select.data("original") || _select.data("init"))).trigger("change");
else _select.val(_select.val()).trigger("change");
_select.on("select2:select", function (e) {
if(_select.val() != '-1' && _select.val() != ''){
_select.closest('.form-group').removeClass('has-error');
_select.siblings("span#"+_select.attr("id")+"-error").remove();
_select.siblings("i.validate-icon").removeClass("fa-check fa-warning").removeAttr("data-original-title");
}
});
//级联下拉框
if(_select.data("parent-for")){
$(_select.data("parent-for")).on("change",function(){
opts.param[$(this).attr("name").replace(".","_")] = $(this).val(); //替换参数中的. 否则mapper文件会无法识别
var url = opts.url || APP.stmidListUrl;
var type = "POST";
var paramData = {};
if(opts.stmID) paramData.stmID=opts.stmID;
if(opts.param) paramData.param=opts.param;
//同步方式防止数据量大是无法加载
APP.ajax(url,paramData,type,false,function(ret){
_fill_options(_select,ret);
});
});
}
});
return _select;
};
FORM.getSelectedVal = function(sel){
require(['jquery/select2'],function(){
return $(sel).val();
})
}
FORM.getSelectedText = function(sel){
require(['jquery/select2'],function(){
return $(sel).find("option:selected").text();
})
}
/**
* 基于ztree的treeSelect
* 定义了默认的onClick方法
* @param {Object} settings ztree参数
* @param {String} treeId ztree控件ID
*/
$.fn.treeSelect = function(settings){
var _this = $(this);
var treeId = _this.attr('id');
var _parent = _this.parent();
var _sel_name = _this.attr("name");
//保存ID的隐藏控件
var _id_filed = _this.prevAll("input[data-id-for='"+_sel_name+"']");
if(_id_filed.length == 0){
alert("请在treeSelect元素之前添加id值控件");
return _this;
}
//保存treeSort的隐藏控件,用于树形排序(祖先节点sort-id)
var _tree_filed = _this.prevAll("input[data-tree-for='"+_sel_name+"']");
var _key_id = "id";
var _key_name = "name";
var _key_parent = "pId";
var _key_sort = "sort";
//自定义id、pid、name属性名称
if(!APP.isEmpty(_this.attr('tree-key-id')))_key_id = _this.attr('tree-key-id');
if(!APP.isEmpty(_this.attr('tree-key-name')))_key_name = _this.attr('tree-key-name');
if(!APP.isEmpty(_this.attr('tree-key-pid')))_key_parent = _this.attr('tree-key-pid');
if(!APP.isEmpty(_this.attr('tree-key-sort')))_key_sort = _this.attr('tree-key-sort');
if(settings && settings.data ){
if(settings.data.key && settings.data.key.name) _key_name = settings.data.key.name;
if(settings.data.simpleData){
if(settings.data.simpleData.idKey) _key_id = settings.data.simpleData.idKey;
if(settings.data.simpleData.pIdKey) _key_parent = settings.data.simpleData.pIdKey;
}
}
require(['app/tree'],function(){
//为当前控件增加必要的显示控件和树形下拉菜单
var inputGroup = $("<div class='input-group'></div>");//为当前控件增加图标
var inputIconDiv = $("<div class='input-icon'>");
var inputIcon = $("<i class='fa fa-times fa-fw'></i>");
inputIconDiv.append(inputIcon);
var selBtn = $("<span class='input-group-btn' style='cursor: pointer;'><button class='btn btn-success' type='button'><i class='fa fa-list'></i></span>");//图标-点击显示下拉菜单
inputIconDiv.append(_this);
_this.css("cursor","pointer");
//_this.appendTo(inputIconDiv);//将当前控件放入input-group
inputGroup.append(inputIconDiv);
inputGroup.append(selBtn);//增加图标
_parent.append(inputGroup);//将input-group放入当前控件原父节点
var menuContent = $("<div id='"+treeId+"_MenuContent' style='display:none;height: 150px;overflow-y: auto; background-color: #F5F5F5;'></div>");//下拉菜单显示层
var treeSel = $("<ul id='"+treeId+"' class='ztree' style='margin-top:0; width:100%;'></ul>");//ztree控件
menuContent.append(treeSel);//将树形放入下拉菜单显示层
_parent.append(menuContent);//将下拉菜单显示层放入当前节点原父节点
var treesel_settings = $.extend(true,{
data : {
key : {name : _key_name},
simpleData: {
enable: true,
idKey: _key_id,
pIdKey: _key_parent
}
},
callback: {
onClick: function(e, tree_id, treeNode){//点击时将数据传入显示控件
var zTree = $.fn.zTree.getZTreeObj(tree_id),
nodes = zTree.getSelectedNodes(),
_name = "",
_id = "";
nodes.sort(function compare(a,b){return a[_key_id]-b[_key_id];});
for (var i=0, l=nodes.length; i<l; i++) {
_name += nodes[i][_key_name] + ",";
_id += nodes[i][_key_id] + ",";
}
if(_tree_filed.length == 1 ){ //如果为单选且页面定义了parentTree隐藏域,则为parentTree赋值
var _tree_sort = "";
if(!APP.isEmpty(treeNode[_key_sort])) _tree_sort = treeNode[_key_sort] + "-" + treeNode[_key_id];
else _tree_sort = "0-" + treeNode[_key_id];
if(!APP.isEmpty(treeNode['parentTree'])) _tree_sort = treeNode['parentTree'] + "," + _tree_sort;
else if(!APP.isEmpty(treeNode['parent_tree'])) _tree_sort = treeNode['parent_tree'] + "," + _tree_sort;
_tree_filed.val(_tree_sort);
}
if (_name.length > 0 ) _name = _name.substring(0, _name.length-1);
if (_id.length > 0 ) _id = _id.substring(0, _id.length-1);
_this.val(_name);
//validate字段去除
_this.closest('.form-group').removeClass('has-error');
_this.parent().siblings("span#"+_this.attr("id")+"-error").remove();
_this.parent().siblings("i.validate-icon").removeClass("fa-check fa-warning").removeAttr("data-original-title");
_id_filed.val(_id);
inputIcon.css('color','red');
if (settings.onClick) {
settings.onClick.toFunc().call(this, e, tree_id, treeNode);
}
},
onAsyncSuccess : function(e, tree_id, treeNode, msg){//数据同步成功后显示默认值
if(treeNode === undefined){//根节点同步时显示默认值
var zTree = $.fn.zTree.getZTreeObj(tree_id);
if(_id_filed.attr('value')){
var _selectedNode = zTree.getNodeByParam(_key_id,_id_filed.attr('value'),null);
zTree.selectNode(_selectedNode);
if(_selectedNode) {
_this.attr('value',_selectedNode[_key_name]);
inputIcon.css('color','red');
}
}
}
if (settings.onAsyncSuccess) {
settings.onAsyncSuccess.toFunc().call(this, e, tree_id, treeNode,msg);
}
}
}
},settings);
/**
* 树形下拉列表隐藏-for-treeSelect
* @param {String} content 下拉列表显示DIV的ID
*/
function _treeSelect_hideMenu(content) {
$("#"+content).fadeOut("fast");
$("body").unbind("mousedown", _treeSelect_onBodyDown);
}
/**
* 树形下拉列表触发隐藏点击事件-for-treeSelect
* @param {Object} event 事件对象-传入了menuContentID(下拉列表显示DIV的ID)数据
*/
function _treeSelect_onBodyDown(event) {
if (!(event.target.id == event.data.menuContentID || $(event.target).parents("#"+event.data.menuContentID).length>0)) {
_treeSelect_hideMenu(event.data.menuContentID);
}
}
//显示树形下拉菜单
function _treeSelect_showMenu(){
if(menuContent.css("display") == "none"){
var offset = _this.offset();
menuContent.css({width: + offset.width + "px",left:offset.left + "px", top:offset.top + _this.outerHeight() + "px"}).slideDown("fast");
$("body").bind("mousedown",{menuContentID:treeId+"_MenuContent"}, _treeSelect_onBodyDown);
}
}
//点击显示树形下拉菜单
selBtn.click(function() {
_treeSelect_showMenu();
});
//回车显示
_this.keypress(function(e){
if(e.keyCode == 13) _treeSelect_showMenu();
});
_this.click(function() {
_treeSelect_showMenu();
});
//删除数据
inputIcon.click(function() {
_this.val('');
_id_filed.val('');
if(_tree_filed.length == 1 ){
_tree_filed.val('');
}
$(this).css('color','#ccc');
});
var _treeObj = treeSel.tree(treesel_settings);
_this.treeObj = _treeObj;
});
return _this;
};
return FORM;
});
| identifier_body | ||
app-form.js |
define('app/form',["app/common","moment","jquery/validate","jquery/form"],function(APP) {
var FORM = {
initDatePicker : function(ct){
APP.queryContainer(ct).find('[form-role="date"]').each(function(){
$(this).datePicker();
});
}
};
/**
* 将form格式化为json
* @param {Object} form form对象
* @return {Object} json对象
*/
FORM.formToJson = function(form){
var serializeObj={};
var array=form.serializeArray();
$(array).each(function(){
if(serializeObj[this.name]){
if($.isArray(serializeObj[this.name])){
serializeObj[this.name].push(this.value);
}else{
serializeObj[this.name]=[serializeObj[this.name],this.value];
}
}else{
serializeObj[this.name]=this.value;
}
});
return serializeObj;
};
//--------------------------------------datePicker------------------------------
/**
* 日期 bootstrap datePicker
* @param {Object} opts 设置参数
* @param {Function} callback 日期变化时调用的函数
*/
$.fn.datePicker = function(opts,callback){
var _target = $(this);
require(['bootstrap/datepicker'],function(){
var default_opt = $.extend(true,{
language:'zh-CN',autoclose: true,todayHighlight:true,format:'yyyy-mm-dd'
},opts);
var _event_type = "changeDate";
if(default_opt.viewType == "year"){
default_opt.startView = 2;
default_opt.minViewMode = 2;
_event_type="changeYear";
}else if(default_opt.viewType == "month"){
default_opt.startView = 1;
default_opt.minViewMode = 1;
_event_type="changeMonth";
}
_target.datepicker(default_opt);
var _default_date = default_opt.defaultDate ? default_opt.defaultDate : APP.formatDate('YYYY-MM-DD');
_target.datepicker('update',APP.formatDate(default_opt.format.toUpperCase(),_default_date));
_target.data('date-value',APP.formatDate('YYYY-MM-DD',_default_date));
_target.datepicker().on(_event_type,function(e){
if(_target.data('date-value') != APP.formatDate('YYYY-MM-DD',e.date)){
_target.data('date-value',APP.formatDate('YYYY-MM-DD',e.date));
if(typeof callback === 'function') callback(APP.formatDate('YYYY-MM-DD',e.date));
}
})
});
};
/**
* 日期区间 bootstrap dateRangePicker
* @param {Object} opts 设置参数
* @param {Function} callback 设置后调用的函数
*/
$.fn.dateRangePicker = function(opts,callback){
var _target = $(this);
require(['bootstrap/daterangepicker'],function(){
var default_opt = $.extend(true,{
opens: (APP.isRTL ? 'left' : 'right'),
startDate: moment().subtract('days', 29).format('YYYY-MM-DD'),
endDate: moment().format('YYYY-MM-DD'),
minDate: '2012-01-01',
maxDate: moment().format('YYYY-MM-DD'),
dateLimit: {days: 365},
showDropdowns: true,
showWeekNumbers: true,
timePicker: false,
timePickerIncrement: 1,
timePicker12Hour: true,
/*ranges: {
'今天': [moment().format('YYYY-MM-DD'), moment().format('YYYY-MM-DD')],
'昨天': [moment().subtract('days', 1).format('YYYY-MM-DD'), moment().subtract('days', 1).format('YYYY-MM-DD')],
'近7天': [moment().subtract('days', 6).format('YYYY-MM-DD'), moment().format('YYYY-MM-DD')],
'近30天': [moment().subtract('days', 29).format('YYYY-MM-DD'), moment().format('YYYY-MM-DD')],
'本月': [moment().startOf('month').format('YYYY-MM-DD'), moment().endOf('month').format('YYYY-MM-DD')],
'上月': [moment().subtract('month', 1).startOf('month').format('YYYY-MM-DD'), moment().subtract('month', 1).endOf('month').format('YYYY-MM-DD')]
},*/
buttonClasses: ['btn'],
applyClass: 'green',
cancelClass: 'default',
format: 'YYYY-MM-DD',
separator: ' 到 ',
locale: {
"applyLabel": '确定',
"cancelLabel": '取消',
"fromLabel": '从',
"toLabel": '到',
"customRangeLabel": '日期区间选择',
"daysOfWeek": ["日","一","二","三","四","五","六"],
"monthNames": ["1月","2月","3月","4月","5月","6月","7月","8月","9月","10月","11月","12月"],
"firstDay": 1
}
},opts);
_target.daterangepicker(default_opt,function(start, end, label){
if(typeof callback === 'function'){
callback(start, end, label);
}else{
_target.children('span').html(start.format('YYYY年MM月DD日') + ' - ' + end.format('YYYY年MM月DD日'));
//$(target+' span')
}
});
_target.children('span').html(moment().subtract('days', 29).format('YYYY年MM月DD日') + ' - ' + moment().format('YYYY年MM月DD日'));
//$(target+' span').html(moment().subtract('days', 29).format('YYYY年MM月DD日') + ' - ' + moment().format('YYYY年MM月DD日'));
})
};
//--------------------------form validate------------------------------
//jquery.validate默认设置
var validate_default_settings = {
errorElement: 'span',
errorClass: 'help-block help-block-error',
focusInvalid: true,
onkeyup: false,
errorPlacement: function (error, element) {
/*if(element.siblings("span.input-group-addon").size() > 0){//treeselect控件验证时隐藏错误span
error.addClass('hide');
}*/
if (element.parent(".input-group").size() > 0) {//带图标的输入框
error.insertAfter(element.parent(".input-group"));
} else if (element.attr("data-error-container")) { //指定container存放错误
error.appendTo(element.attr("data-error-container"));
} else if (element.parents('.radio-list').size() > 0) { //radio
error.appendTo(element.parents('.radio-list').attr("data-error-container"));
} else if (element.parents('.radio-inline').size() > 0) {
error.appendTo(element.parents('.radio-inline').attr("data-error-container"));
} else if (element.parents('.checkbox-list').size() > 0) {
error.appendTo(element.parents('.checkbox-list').attr("data-error-container"));
} else if (element.parents('.checkbox-inline').size() > 0) {
error.appendTo(element.parents('.checkbox-inline').attr("data-error-container"));
} else if(element.siblings("i.validate-icon").size() > 0){//图标方式提示错误
var icon = element.siblings("i.validate-icon");
icon.removeClass('fa-check').addClass("fa-warning");
icon.attr("data-original-title", error.text()).tooltip();
}else {
error.insertAfter(element);
}
},
invalidHandler: function (event, validator) {
},
highlight: function (element) {
$(element).closest('.form-group').removeClass("has-success").addClass('has-error');
},
success: function (label,element) {
if($(element).siblings("i.validate-icon").size() > 0){//图标方式提示错误
var icon = $(element).siblings("i.validate-icon");
$(element).closest('.form-group').removeClass('has-error').addClass('has-success');
icon.removeClass("fa-warning");
if($(element).tagName == 'INPUT') icon.addClass("fa-check");
icon.removeAttr("data-original-title");
}else {
label.closest('.form-group').removeClass('has-error');
}
}
};
//jquery.validate增加select2验证方法
$.validator.addMethod("selectOpt", function(value, element) {
return this.optional(element) || (value != "-1");
}, "请选择");
$.validator.addMethod("checkExists", function(value, element,p) {
if(APP.isEmpty(value)) return true;
if(APP.isEmpty(p)){
alert('请设置字段校验参数');
return false;
}
if(APP.isEmpty(p.url) && APP.isEmpty(p.stmID || p.stmid || p.stmId)){
alert('请设置字段校验参数中的url或者stmID');
return false;
}
var paramData = {param : (p.data || {})};
paramData.param[element.name] = value;
if(p.original) paramData.param["o_"+element.name] = p.original;//修改form中的初始值
if(p.joinField){//参与验证字段值
if($.isArray(p.joinField)){
for(var i=0;i<p.joinField.length;i++){
var joinField = $(p.joinField[i]);
paramData.param[joinField.attr("name")] = joinField.val();
if(joinField.data("original") && joinField.data("original") != joinField.val()) {//当参与验证字段值发生变化的时候,则取消当前字段的初始值验证
paramData.param["o_"+element.name] = "";
}
}
}else{
var joinField = $(p.joinField);
paramData.param[joinField.attr("name")] = joinField.val();
if(joinField.data("original") && joinField.data("original") != joinField.val()) {//当参与验证字段值发生变化的时候,则取消当前字段的初始值验证
paramData.param["o_"+element.name] = "";
}
}
}
if(!APP.isEmpty(p.url)){
return APP.postJson(p.url,paramData,false);
}else{
paramData.stmID = p.stmID || p.stmid || p.stmId;
return APP.isEmpty(APP.postJson('/app/common/selectMapByStmID',paramData,false));
}
}, "已存在");
/**
* 初始化form
* @param {Object} opts 初始化参数
* @param {Function} callback 成功回调函数
* @param {Function} errorback 失败回调函数
*/
$.fn.initForm = function (opts,callback,errorback) {
var _this = $(this);
if(opts.autoClear)_this.clearForm(true); //静态modal中的form 先清空再初始化
if(APP.isEmpty(opts)) opts = {};
if(APP.isEmpty(opts.fieldOpts)) opts.fieldOpts = {};//fieldOpts表单元素的初始化参数
var validate_settings = $.extend(true,validate_default_settings,opts.validate);
var _validate = _this.validate(validate_settings);
//_validate.resetForm();
var isInitValue = !APP.isEmpty(opts.formData);
var formField;
_this.find(opts.fieldSelector ? opts.fieldSelector : '*[name]').each(function(){
formField = $(this);
var _fieldName = formField.attr('name');
var _fieldRole = formField.attr('form-role');
if(formField.data("init")) formField.val(formField.data("init"));
if(isInitValue){
var _fieldValue = opts.formData[_fieldName];
if(_fieldName.indexOf(".") > 0){
var _fieldNameSp = _fieldName.split(".");
_fieldValue = opts.formData[_fieldNameSp[0]];
for(var i=1;_fieldValue && i<_fieldNameSp.length;i++){
_fieldValue = _fieldValue[_fieldNameSp[i]]
}
}
if(_fieldValue != undefined){
if(this.type == 'checkbox'){
var _checked = (_fieldValue == ((formField.data('on-value') !== undefined) ? formField.data('on-value')+'' : '1'));
formField.attr('checked',_checked);
if(formField.hasClass('bs-switch')){
formField.bootstrapSwitch('state', _checked);
formField.trigger("switch:change", [_checked]);//强制触发change方法赋值
}
}else{
formField.val(_fieldValue);
if(formField.data("init")) formField.data("init",_fieldValue);
}
formField.data("original",_fieldValue);//记录该字段的初始值,验证唯一性使用
}
}else{
formField.removeData("original");
}
//初始化js定义的验证规则,如有checkExists规则需要将original初始值作为入参
if(opts.rules && opts.rules[_fieldName]){
formField.rules( "remove");
if(opts.rules[_fieldName].checkExists){
opts.rules[_fieldName].checkExists.original = formField.val();
}
formField.rules( "add", opts.rules[_fieldName]);
}
if(_fieldRole == 'select'){
var _selectOpt = opts.fieldOpts[_fieldName] || {};
try{
if(formField.attr('placeholder') && !isInitValue) _selectOpt.placeholder = JSON.parse(formField.attr('placeholder'));
}catch(e){alert("placeholder属性值必须为json字符串");}
if(formField.data('json')) _selectOpt.jsonData = formField.data('json');
else if(formField.data('stmid')) _selectOpt.stmID = formField.data('stmid');
else if(formField.data('dict-type')){
_selectOpt.data = APP.getDictByType(formField.data('dict-type'));
if($.isArray(_selectOpt.data)){
for(var i=0;i<_selectOpt.data.length;i++){//select2使用text显示
_selectOpt.data[i].id = _selectOpt.data[i].value;
_selectOpt.data[i].text = _selectOpt.data[i].name;
}
}
}
formField.select(_selectOpt);
}
if(_fieldRole == 'treeSelect'){
var _treeSelectOpt = opts.fieldOpts[_fieldName] || {};
if(formField.data('stmid')) _treeSelectOpt.stmID = formField.data('stmid');
if(!formField.attr('id')){
alert("请指定treeSelect表单元素的id属性");
return;
}
formField.treeSelect(_treeSelectOpt);
}
});
var _in_modal = (_this.parents('.modal-dialog').size() > 0) ? '.modal-dialog' : '';
//提交是初始化bean的提交类型 add save delete 对应BaseBean 的form_action属性
if(opts.formAction){
if(_this.children(":hidden[name='form_action']").size()>0){
_this.children(":hidden[name='form_action']").val(opts.formAction);
}else{
_this.append("<input type='hidden' name='form_action' value='"+opts.formAction+"'>");
}
}
opts.url = APP.ctx + opts.url;
var form_opt = $.extend(true,{
ajax:true,
beforeSubmit : function(formData, jqForm, options){
APP.blockUI({target:_in_modal ? '.modal-dialog' : 'body',message:'提交中',gif : 'form-submit'});
return true;
},
type : 'post',
dataType : 'json',
includeHidden : true,
error:function(error){
if(APP.debug)console.log(error);
APP.unblockUI(_in_modal ? '.modal-dialog' : 'body');
APP.notice('',"系统错误 错误代码:"+error.status+" 错误名称:"+error.statusText,'error',_in_modal);
if(typeof errorback === 'function')errorback(error);
else if(opts.onError) opts.onError(error);
},
success:function(response, status){
if(APP.debug)console.log(response);
APP.unblockUI(_in_modal ? '.modal-dialog' : 'body');
if(response.OK){
APP.notice('',response[APP.MSG],'success',_in_modal);
//动态更新规格,否则会造成重复提交验证不通过
_this.find('.checkExists').each(function(){
var _c_form_field = $(this);
var _c_field_name = formField.attr('name');
if(opts.rules && opts.rules[_c_field_name] && opts.rules[_c_field_name].checkExists){
_c_form_field.rules( "remove","checkExists");
opts.rules[_c_field_name].checkExists.original = _c_form_field.val();
_c_form_field.rules( "add", opts.rules[_c_field_name]);
}
});
if(typeof callback === 'function')callback(response[APP.DATA]);
else if(opts.onSuccess) opts.onSuccess(response[APP.DATA]);
}else{
APP.notice('',response[APP.MSG],'warning',_in_modal);
if(typeof errorback === 'function')errorback(response,status);
else if(opts.onError) opts.onError(response,status);
}
}
},opts);
if(form_opt.ajax) _this.ajaxForm(form_opt);
}
/**
* form表单提交
* @param {String} url form提交url
* @param {Function} callback 回调函数
*/
$.fn.postForm = function(url,callback){
var _form = $(this);
if(_form.is('form')){
$.ajax({
type:"POST",
url:url,
dataType:"json",
contentType:"application/json",
data:JSON.stringify(FORM.formToJson(_form)),
success:function(ret,status){
callback(result,status);
},
error:function(xhr){
APP.notice('系统错误','错误代码['+xhr.status+'] 错误名称['+xhr.statusText+']','error');
}
});
}else
alert("对象不是表单");
};
//------------------------下拉列表----------------------
//初始化下拉列表语言
var select2_language = {
errorLoading: function () {return '无法载入结果。';},
inputTooLong: function (args) {
var overChars = args.input.length - args.maximum;
var message = '请删除' + overChars + '个字符';
return message;
},
inputTooShort: function (args) {
var remainingChars = args.minimum - args.input.length;
var message = '请再输入至少' + remainingChars + '个字符';
return message;
},
loadingMore: function () {return '载入更多结果…';},
maximumSelected: function (args) {
var message = '最多只能选择' + args.maximum + '个项目';
return message;
},
noResults: function () {return '未找到结果';},
searching: function () {return '搜索中…'; }
};
//select2下拉列表默认设置
var select2_default_opts = {
language: select2_language,
placeholder: {id:"-1",text:"请选择..."},
maximumSelectionLength: 50, //多选最多选择个数
allowClear:true,//自动显示清除按钮
width:"100%"
};
/**
* select2下拉列表
* @param {Object} opts select2参数,自定义参数如下
* jsonData[服务器或静态json文件(static/src/jsons/下)的url]
* stmID[sqlMap语句ID]
* url[服务器url实时获取数据(搜索框实时发送请求)]
*
* @return {Object} select控件
*/
function _fill_options(_select,opt_data){
_select.empty();
if($.isArray(opt_data)){
for(var i=0;i<opt_data.length;i++){
_select.append("<option value='"+opt_data[i].id+"'>"+opt_data[i].text+"</option>");
}
}
_select.change();
}
$.fn.select = function ( opts ) {
var _select = $(this);
require(['jquery/select2'],function(){
select2_default_opts.data = null;
select2_default_opts.ajax = null;
if(opts){
if((opts.jsonData||opts.stmID) && opts.data === undefined){//增加jsonData选项获取静态.json文件或者直接通过sqlMapper的sqlID获取数组数据
if(APP.isEmpty(opts.param)) opts.param = {};
if(_select.data("parent-for")){
var _parent_sel = $(_select.data("parent-for"));
opts.param[_parent_sel.attr("name").replace(".","_")] = _parent_sel.val();//替换参数中的. 否则mapper文件会无法识别
}
var url = opts.url || APP.stmidListUrl;
var type = "POST";
if(opts.jsonData && opts.jsonData != ""){
url = opts.jsonData;
type = "GET";
}
var paramData = {};
if(opts.stmID) paramData.stmID=opts.stmID;
if(opts.param) paramData.param=opts.param;
//同步方式防止数据量大是无法加载
APP.ajax(url,paramData,type,false,function(ret){
opts.data = ret;
});
}else if(opts.url && opts.ajax === undefined){//默认ajax方法
opts.ajax = {
delay: 250,
url : opts.url,
data: function (params) {
var queryParameters = {
q: params.term
}
return queryParameters;
}
};
}
}
//允许增加选项
if(opts.allowAdd || _select.data("allow-add")){
if(_select.parent('.input-group').length > 0){
_select.nextAll(".input-group-btn").remove();
_select.unwrap();
}
var _add_btn_id = "select-add-btn-"+new Date().getTime();
var _add_btn = $("<span class='input-group-btn' id='"+_add_btn_id+"'><a class='b | var _adddiv = $("<div>");
var _addform = $("<div class='row'><div class='col-md-12'><div class='form-group'><label class='control-label col-md-3'>代码</label><div class='col-md-9'><input type='text' name='_select_type_code' class='form-control input-small'></div></div></div></div>"+
"<div class='row'><div class='col-md-12'><div class='form-group'><label class='control-label col-md-3'>名称</label><div class='col-md-9'><input type='text' name='_select_type_name' class='form-control input-small'></div></div></div></div>"+
"<a class='btn blue btn-block'> <i class='fa fa-plus'></i> 增加 </a>");
_adddiv.append(_addform);
_adddiv.children(".btn").click(function(){
var _code = _adddiv.find("input[name='_select_type_code']").val();
var _name = _adddiv.find("input[name='_select_type_name']").val();
if($.trim(_code) == "" || $.trim(_name) == ""){
_adddiv.closest(".popover").removeClass("info").addClass("error");
_adddiv.closest(".popover-content").prev().html("<i class='fa fa-plus'/> 代码或名称不能为空");
return;
}
if(_select.children("option[value='"+_code+"']").length > 0){
_adddiv.closest(".popover").removeClass("info").addClass("error");
_adddiv.closest(".popover-content").prev().html("<i class='fa fa-plus'/> 代码已存在")
return;
}
_adddiv.closest(".popover").removeClass("error");
_select.append("<option value='"+_code+"'>"+_name+"</option>");
_select.val(_code).trigger("change");
_this.popover('destroy');
})
APP.popover(_this,_adddiv.get(),"info","fa-plus","增加选择","auto right",235);
});
}
var default_opt = $.extend(true,select2_default_opts,opts);
_select.select2(default_opt);
if(_select.data("original") || _select.data("init")) _select.val((_select.data("original") || _select.data("init"))).trigger("change");
else _select.val(_select.val()).trigger("change");
_select.on("select2:select", function (e) {
if(_select.val() != '-1' && _select.val() != ''){
_select.closest('.form-group').removeClass('has-error');
_select.siblings("span#"+_select.attr("id")+"-error").remove();
_select.siblings("i.validate-icon").removeClass("fa-check fa-warning").removeAttr("data-original-title");
}
});
//级联下拉框
if(_select.data("parent-for")){
$(_select.data("parent-for")).on("change",function(){
opts.param[$(this).attr("name").replace(".","_")] = $(this).val(); //替换参数中的. 否则mapper文件会无法识别
var url = opts.url || APP.stmidListUrl;
var type = "POST";
var paramData = {};
if(opts.stmID) paramData.stmID=opts.stmID;
if(opts.param) paramData.param=opts.param;
//同步方式防止数据量大是无法加载
APP.ajax(url,paramData,type,false,function(ret){
_fill_options(_select,ret);
});
});
}
});
return _select;
};
FORM.getSelectedVal = function(sel){
require(['jquery/select2'],function(){
return $(sel).val();
})
}
FORM.getSelectedText = function(sel){
require(['jquery/select2'],function(){
return $(sel).find("option:selected").text();
})
}
/**
* 基于ztree的treeSelect
* 定义了默认的onClick方法
* @param {Object} settings ztree参数
* @param {String} treeId ztree控件ID
*/
$.fn.treeSelect = function(settings){
var _this = $(this);
var treeId = _this.attr('id');
var _parent = _this.parent();
var _sel_name = _this.attr("name");
//保存ID的隐藏控件
var _id_filed = _this.prevAll("input[data-id-for='"+_sel_name+"']");
if(_id_filed.length == 0){
alert("请在treeSelect元素之前添加id值控件");
return _this;
}
//保存treeSort的隐藏控件,用于树形排序(祖先节点sort-id)
var _tree_filed = _this.prevAll("input[data-tree-for='"+_sel_name+"']");
var _key_id = "id";
var _key_name = "name";
var _key_parent = "pId";
var _key_sort = "sort";
//自定义id、pid、name属性名称
if(!APP.isEmpty(_this.attr('tree-key-id')))_key_id = _this.attr('tree-key-id');
if(!APP.isEmpty(_this.attr('tree-key-name')))_key_name = _this.attr('tree-key-name');
if(!APP.isEmpty(_this.attr('tree-key-pid')))_key_parent = _this.attr('tree-key-pid');
if(!APP.isEmpty(_this.attr('tree-key-sort')))_key_sort = _this.attr('tree-key-sort');
if(settings && settings.data ){
if(settings.data.key && settings.data.key.name) _key_name = settings.data.key.name;
if(settings.data.simpleData){
if(settings.data.simpleData.idKey) _key_id = settings.data.simpleData.idKey;
if(settings.data.simpleData.pIdKey) _key_parent = settings.data.simpleData.pIdKey;
}
}
require(['app/tree'],function(){
//为当前控件增加必要的显示控件和树形下拉菜单
var inputGroup = $("<div class='input-group'></div>");//为当前控件增加图标
var inputIconDiv = $("<div class='input-icon'>");
var inputIcon = $("<i class='fa fa-times fa-fw'></i>");
inputIconDiv.append(inputIcon);
var selBtn = $("<span class='input-group-btn' style='cursor: pointer;'><button class='btn btn-success' type='button'><i class='fa fa-list'></i></span>");//图标-点击显示下拉菜单
inputIconDiv.append(_this);
_this.css("cursor","pointer");
//_this.appendTo(inputIconDiv);//将当前控件放入input-group
inputGroup.append(inputIconDiv);
inputGroup.append(selBtn);//增加图标
_parent.append(inputGroup);//将input-group放入当前控件原父节点
var menuContent = $("<div id='"+treeId+"_MenuContent' style='display:none;height: 150px;overflow-y: auto; background-color: #F5F5F5;'></div>");//下拉菜单显示层
var treeSel = $("<ul id='"+treeId+"' class='ztree' style='margin-top:0; width:100%;'></ul>");//ztree控件
menuContent.append(treeSel);//将树形放入下拉菜单显示层
_parent.append(menuContent);//将下拉菜单显示层放入当前节点原父节点
var treesel_settings = $.extend(true,{
data : {
key : {name : _key_name},
simpleData: {
enable: true,
idKey: _key_id,
pIdKey: _key_parent
}
},
callback: {
onClick: function(e, tree_id, treeNode){//点击时将数据传入显示控件
var zTree = $.fn.zTree.getZTreeObj(tree_id),
nodes = zTree.getSelectedNodes(),
_name = "",
_id = "";
nodes.sort(function compare(a,b){return a[_key_id]-b[_key_id];});
for (var i=0, l=nodes.length; i<l; i++) {
_name += nodes[i][_key_name] + ",";
_id += nodes[i][_key_id] + ",";
}
if(_tree_filed.length == 1 ){ //如果为单选且页面定义了parentTree隐藏域,则为parentTree赋值
var _tree_sort = "";
if(!APP.isEmpty(treeNode[_key_sort])) _tree_sort = treeNode[_key_sort] + "-" + treeNode[_key_id];
else _tree_sort = "0-" + treeNode[_key_id];
if(!APP.isEmpty(treeNode['parentTree'])) _tree_sort = treeNode['parentTree'] + "," + _tree_sort;
else if(!APP.isEmpty(treeNode['parent_tree'])) _tree_sort = treeNode['parent_tree'] + "," + _tree_sort;
_tree_filed.val(_tree_sort);
}
if (_name.length > 0 ) _name = _name.substring(0, _name.length-1);
if (_id.length > 0 ) _id = _id.substring(0, _id.length-1);
_this.val(_name);
//validate字段去除
_this.closest('.form-group').removeClass('has-error');
_this.parent().siblings("span#"+_this.attr("id")+"-error").remove();
_this.parent().siblings("i.validate-icon").removeClass("fa-check fa-warning").removeAttr("data-original-title");
_id_filed.val(_id);
inputIcon.css('color','red');
if (settings.onClick) {
settings.onClick.toFunc().call(this, e, tree_id, treeNode);
}
},
onAsyncSuccess : function(e, tree_id, treeNode, msg){//数据同步成功后显示默认值
if(treeNode === undefined){//根节点同步时显示默认值
var zTree = $.fn.zTree.getZTreeObj(tree_id);
if(_id_filed.attr('value')){
var _selectedNode = zTree.getNodeByParam(_key_id,_id_filed.attr('value'),null);
zTree.selectNode(_selectedNode);
if(_selectedNode) {
_this.attr('value',_selectedNode[_key_name]);
inputIcon.css('color','red');
}
}
}
if (settings.onAsyncSuccess) {
settings.onAsyncSuccess.toFunc().call(this, e, tree_id, treeNode,msg);
}
}
}
},settings);
/**
* 树形下拉列表隐藏-for-treeSelect
* @param {String} content 下拉列表显示DIV的ID
*/
function _treeSelect_hideMenu(content) {
$("#"+content).fadeOut("fast");
$("body").unbind("mousedown", _treeSelect_onBodyDown);
}
/**
* 树形下拉列表触发隐藏点击事件-for-treeSelect
* @param {Object} event 事件对象-传入了menuContentID(下拉列表显示DIV的ID)数据
*/
function _treeSelect_onBodyDown(event) {
if (!(event.target.id == event.data.menuContentID || $(event.target).parents("#"+event.data.menuContentID).length>0)) {
_treeSelect_hideMenu(event.data.menuContentID);
}
}
//显示树形下拉菜单
function _treeSelect_showMenu(){
if(menuContent.css("display") == "none"){
var offset = _this.offset();
menuContent.css({width: + offset.width + "px",left:offset.left + "px", top:offset.top + _this.outerHeight() + "px"}).slideDown("fast");
$("body").bind("mousedown",{menuContentID:treeId+"_MenuContent"}, _treeSelect_onBodyDown);
}
}
//点击显示树形下拉菜单
selBtn.click(function() {
_treeSelect_showMenu();
});
//回车显示
_this.keypress(function(e){
if(e.keyCode == 13) _treeSelect_showMenu();
});
_this.click(function() {
_treeSelect_showMenu();
});
//删除数据
inputIcon.click(function() {
_this.val('');
_id_filed.val('');
if(_tree_filed.length == 1 ){
_tree_filed.val('');
}
$(this).css('color','#ccc');
});
var _treeObj = treeSel.tree(treesel_settings);
_this.treeObj = _treeObj;
});
return _this;
};
return FORM;
});
| tn blue'><i class='fa fa-plus'></i></a></span>");
_select.wrap("<div class='input-group'></div>");
_add_btn.insertAfter(_select);
_add_btn.click(function(){
var _this = $(this);
| conditional_block |
app-form.js | define('app/form',["app/common","moment","jquery/validate","jquery/form"],function(APP) {
var FORM = {
initDatePicker : function(ct){
APP.queryContainer(ct).find('[form-role="date"]').each(function(){
$(this).datePicker();
});
}
};
/**
* 将form格式化为json
* @param {Object} form form对象
* @return {Object} json对象
*/
FORM.formToJson = function(form){
var serializeObj={};
var array=form.serializeArray();
$(array).each(function(){
if(serializeObj[this.name]){
if($.isArray(serializeObj[this.name])){
serializeObj[this.name].push(this.value);
}else{
serializeObj[this.name]=[serializeObj[this.name],this.value];
}
}else{
serializeObj[this.name]=this.value;
}
});
return serializeObj;
};
//--------------------------------------datePicker------------------------------
/**
* 日期 bootstrap datePicker
* @param {Object} opts 设置参数
* @param {Function} callback 日期变化时调用的函数
*/
$.fn.datePicker = function(opts,callback){
var _target = $(this);
require(['bootstrap/datepicker'],function(){
var default_opt = $.extend(true,{
language:'zh-CN',autoclose: true,todayHighlight:true,format:'yyyy-mm-dd'
},opts);
var _event_type = "changeDate";
if(default_opt.viewType == "year"){
default_opt.startView = 2;
default_opt.minViewMode = 2;
_event_type="changeYear";
}else if(default_opt.viewType == "month"){
default_opt.startView = 1;
default_opt.minViewMode = 1;
_event_type="changeMonth";
}
_target.datepicker(default_opt);
var _default_date = default_opt.defaultDate ? default_opt.defaultDate : APP.formatDate('YYYY-MM-DD');
_target.datepicker('update',APP.formatDate(default_opt.format.toUpperCase(),_default_date));
_target.data('date-value',APP.formatDate('YYYY-MM-DD',_default_date));
_target.datepicker().on(_event_type,function(e){
if(_target.data('date-value') != APP.formatDate('YYYY-MM-DD',e.date)){
_target.data('date-value',APP.formatDate('YYYY-MM-DD',e.date));
if(typeof callback === 'function') callback(APP.formatDate('YYYY-MM-DD',e.date));
}
})
});
};
/**
* 日期区间 bootstrap dateRangePicker
* @param {Object} opts 设置参数
* @param {Function} callback 设置后调用的函数
*/
$.fn.dateRangePicker = function(opts,callback){
var _target = $(this);
require(['bootstrap/daterangepicker'],function(){
var default_opt = $.extend(true,{
opens: (APP.isRTL ? 'left' : 'right'),
startDate: moment().subtract('days', 29).format('YYYY-MM-DD'),
endDate: moment().format('YYYY-MM-DD'),
minDate: '2012-01-01',
maxDate: moment().format('YYYY-MM-DD'),
dateLimit: {days: 365},
showDropdowns: true,
showWeekNumbers: true,
timePicker: false,
timePickerIncrement: 1,
timePicker12Hour: true,
/*ranges: {
'今天': [moment().format('YYYY-MM-DD'), moment().format('YYYY-MM-DD')],
'昨天': [moment().subtract('days', 1).format('YYYY-MM-DD'), moment().subtract('days', 1).format('YYYY-MM-DD')],
'近7天': [moment().subtract('days', 6).format('YYYY-MM-DD'), moment().format('YYYY-MM-DD')],
'近30天': [moment().subtract('days', 29).format('YYYY-MM-DD'), moment().format('YYYY-MM-DD')],
'本月': [moment().startOf('month').format('YYYY-MM-DD'), moment().endOf('month').format('YYYY-MM-DD')],
'上月': [moment().subtract('month', 1).startOf('month').format('YYYY-MM-DD'), moment().subtract('month', 1).endOf('month').format('YYYY-MM-DD')]
},*/
buttonClasses: ['btn'],
applyClass: 'green',
cancelClass: 'default',
format: 'YYYY-MM-DD',
separator: ' 到 ',
locale: {
"applyLabel": '确定',
"cancelLabel": '取消',
"fromLabel": '从',
"toLabel": '到',
"customRangeLabel": '日期区间选择',
"daysOfWeek": ["日","一","二","三","四","五","六"],
"monthNames": ["1月","2月","3月","4月","5月","6月","7月","8月","9月","10月","11月","12月"],
"firstDay": 1
}
},opts);
_target.daterangepicker(default_opt,function(start, end, label){
if(typeof callback === 'function'){
callback(start, end, label);
}else{
_target.children('span').html(start.format('YYYY年MM月DD日') + ' - ' + end.format('YYYY年MM月DD日'));
//$(target+' span')
}
});
_target.children('span').html(moment().subtract('days', 29).format('YYYY年MM月DD日') + ' - ' + moment().format('YYYY年MM月DD日'));
//$(target+' span').html(moment().subtract('days', 29).format('YYYY年MM月DD日') + ' - ' + moment().format('YYYY年MM月DD日'));
})
};
//--------------------------form validate------------------------------
//jquery.validate默认设置
var validate_default_settings = {
errorElement: 'span',
errorClass: 'help-block help-block-error',
focusInvalid: true,
onkeyup: false,
errorPlacement: function (error, element) {
/*if(element.siblings("span.input-group-addon").size() > 0){//treeselect控件验证时隐藏错误span
error.addClass('hide');
}*/
if (element.parent(".input-group").size() > 0) {//带图标的输入框
error.insertAfter(element.parent(".input-group"));
} else if (element.attr("data-error-container")) { //指定container存放错误
error.appendTo(element.attr("data-error-container"));
} else if (element.parents('.radio-list').size() > 0) { //radio
error.appendTo(element.parents('.radio-list').attr("data-error-container"));
} else if (element.parents('.radio-inline').size() > 0) {
error.appendTo(element.parents('.radio-inline').attr("data-error-container"));
} else if (element.parents('.checkbox-list').size() > 0) {
error.appendTo(element.parents('.checkbox-list').attr("data-error-container"));
} else if (element.parents('.checkbox-inline').size() > 0) {
error.appendTo(element.parents('.checkbox-inline').attr("data-error-container"));
} else if(element.siblings("i.validate-icon").size() > 0){//图标方式提示错误
var icon = element.siblings("i.validate-icon");
icon.removeClass('fa-check').addClass("fa-warning");
icon.attr("data-original-title", error.text()).tooltip();
}else {
error.insertAfter(element);
}
},
invalidHandler: function (event, validator) {
},
highlight: function (element) {
$(element).closest('.form-group').removeClass("has-success").addClass('has-error');
},
success: function (label,element) {
if($(element).siblings("i.validate-icon").size() > 0){//图标方式提示错误
var icon = $(element).siblings("i.validate-icon");
$(element).closest('.form-group').removeClass('has-error').addClass('has-success');
icon.removeClass("fa-warning");
if($(element).tagName == 'INPUT') icon.addClass("fa-check");
icon.removeAttr("data-original-title");
}else {
label.closest('.form-group').removeClass('has-error');
}
}
};
//jquery.validate增加select2验证方法
$.validator.addMethod("selectOpt", function(value, element) {
return this.optional(element) || (value != "-1");
}, "请选择");
$.validator.addMethod("checkExists", function(value, element,p) {
if(APP.isEmpty(value)) return true;
if(APP.isEmpty(p)){
alert('请设置字段校验参数');
return false;
}
if(APP.isEmpty(p.url) && APP.isEmpty(p.stmID || p.stmid || p.stmId)){
alert('请设置字段校验参数中的url或者stmID');
return false;
}
var paramData = {param : (p.data || {})};
paramData.param[element.name] = value;
if(p.original) paramData.param["o_"+element.name] = p.original;//修改form中的初始值
if(p.joinField){//参与验证字段值
if($.isArray(p.joinField)){
for(var i=0;i<p.joinField.length;i++){
var joinField = $(p.joinField[i]);
paramData.param[joinField.attr("name")] = joinField.val();
if(joinField.data("original") && joinField.data("original") != joinField.val()) {//当参与验证字段值发生变化的时候,则取消当前字段的初始值验证
paramData.param["o_"+element.name] = "";
}
}
}else{
var joinField = $(p.joinField);
paramData.param[joinField.attr("name")] = joinField.val();
if(joinField.data("original") && joinField.data("original") != joinField.val()) {//当参与验证字段值发生变化的时候,则取消当前字段的初始值验证
paramData.param["o_"+element.name] = "";
}
}
}
if(!APP.isEmpty(p.url)){
return APP.postJson(p.url,paramData,false);
}else{
paramData.stmID = p.stmID || p.stmid || p.stmId;
return APP.isEmpty(APP.postJson('/app/common/selectMapByStmID',paramData,false));
}
}, "已存在");
/**
* 初始化form
* @param {Object} opts 初始化参数
* @param {Function} callback 成功回调函数
* @param {Function} errorback 失败回调函数
*/
$.fn.initForm = function (opts,callback,errorback) {
var _this = $(this);
if(opts.autoClear)_this.clearForm(true); //静态modal中的form 先清空再初始化
if(APP.isEmpty(opts)) opts = {};
if(APP.isEmpty(opts.fieldOpts)) opts.fieldOpts = {};//fieldOpts表单元素的初始化参数
var validate_settings = $.extend(true,validate_default_settings,opts.validate);
var _validate = _this.validate(validate_settings);
//_validate.resetForm();
var isInitValue = !APP.isEmpty(opts.formData);
var formField;
_this.find(opts.fieldSelector ? opts.fieldSelector : '*[name]').each(function(){
formField = $(this);
var _fieldName = formField.attr('name');
var _fieldRole = formField.attr('form-role');
if(formField.data("init")) formField.val(formField.data("init"));
if(isInitValue){
var _fieldValue = opts.formData[_fieldName];
if(_fieldName.indexOf(".") > 0){
var _fieldNameSp = _fieldName.split(".");
_fieldValue = opts.formData[_fieldNameSp[0]];
for(var i=1;_fieldValue && i<_fieldNameSp.length;i++){
_fieldValue = _fieldValue[_fieldNameSp[i]]
}
}
if(_fieldValue != undefined){
if(this.type == 'checkbox'){
var _checked = (_fieldValue == ((formField.data('on-value') !== undefined) ? formField.data('on-value')+'' : '1'));
formField.attr('checked',_checked);
if(formField.hasClass('bs-switch')){
formField.bootstrapSwitch('state', _checked);
formField.trigger("switch:change", [_checked]);//强制触发change方法赋值
}
}else{
formField.val(_fieldValue);
if(formField.data("init")) formField.data("init",_fieldValue);
}
formField.data("original",_fieldValue);//记录该字段的初始值,验证唯一性使用
}
}else{
formField.removeData("original");
}
//初始化js定义的验证规则,如有checkExists规则需要将original初始值作为入参
if(opts.rules && opts.rules[_fieldName]){
formField.rules( "remove");
if(opts.rules[_fieldName].checkExists){
opts.rules[_fieldName].checkExists.original = formField.val();
}
formField.rules( "add", opts.rules[_fieldName]);
}
if(_fieldRole == 'select'){
var _selectOpt = opts.fieldOpts[_fieldName] || {};
try{
if(formField.attr('placeholder') && !isInitValue) _selectOpt.placeholder = JSON.parse(formField.attr('placeholder'));
}catch(e){alert("placeholder属性值必须为json字符串");}
if(formField.data('json')) _selectOpt.jsonData = formField.data('json');
else if(formField.data('stmid')) _selectOpt.stmID = formField.data('stmid');
else if(formField.data('dict-type')){
_selectOpt.data = APP.getDictByType(formField.data('dict-type'));
if($.isArray(_selectOpt.data)){
for(var i=0;i<_selectOpt.data.length;i++){//select2使用text显示
_selectOpt.data[i].id = _selectOpt.data[i].value;
_selectOpt.data[i].text = _selectOpt.data[i].name;
}
}
}
formField.select(_selectOpt);
}
if(_fieldRole == 'treeSelect'){
var _treeSelectOpt = opts.fieldOpts[_fieldName] || {};
if(formField.data('stmid')) _treeSelectOpt.stmID = formField.data('stmid');
if(!formField.attr('id')){
alert("请指定treeSelect表单元素的id属性");
return;
}
formField.treeSelect(_treeSelectOpt);
}
});
var _in_modal = (_this.parents('.modal-dialog').size() > 0) ? '.modal-dialog' : '';
//提交是初始化bean的提交类型 add save delete 对应BaseBean 的form_action属性
if(opts.formAction){
if(_this.children(":hidden[name='form_action']").size()>0){
_this.children(":hidden[name='form_action']").val(opts.formAction);
}else{
_this.append("<input type='hidden' name='form_action' value='"+opts.formAction+"'>");
}
}
opts.url = APP.ctx + opts.url;
var form_opt = $.extend(true,{
ajax:true,
beforeSubmit : function(formData, jqForm, options){
APP.blockUI({target:_in_modal ? '.modal-dialog' : 'body',message:'提交中',gif : 'form-submit'});
return true;
},
type : 'post',
dataType : 'json',
includeHidden : true,
error:function(error){
if(APP.debug)console.log(error);
APP.unblockUI(_in_modal ? '.modal-dialog' : 'body');
APP.notice('',"系统错误 错误代码:"+error.status+" 错误名称:"+error.statusText,'error',_in_modal);
if(typeof errorback === 'function')errorback(error);
else if(opts.onError) opts.onError(error);
},
success:function(response, status){
if(APP.debug)console.log(response);
APP.unblockUI(_in_modal ? '.modal-dialog' : 'body');
if(response.OK){
APP.notice('',response[APP.MSG],'success',_in_modal);
//动态更新规格,否则会造成重复提交验证不通过
_this.find('.checkExists').each(function(){
var _c_form_field = $(this);
var _c_field_name = formField.attr('name');
if(opts.rules && opts.rules[_c_field_name] && opts.rules[_c_field_name].checkExists){
_c_form_field.rules( "remove","checkExists");
opts.rules[_c_field_name].checkExists.original = _c_form_field.val();
_c_form_field.rules( "add", opts.rules[_c_field_name]);
}
});
if(typeof callback === 'function')callback(response[APP.DATA]);
else if(opts.onSuccess) opts.onSuccess(response[APP.DATA]);
}else{
APP.notice('',response[APP.MSG],'warning',_in_modal);
if(typeof errorback === 'function')errorback(response,status);
else if(opts.onError) opts.onError(response,status);
}
}
},opts);
if(form_opt.ajax) _this.ajaxForm(form_opt);
}
/**
* form表单提交
* @param {String} url form提交url
* @param {Function} callback 回调函数
*/
$.fn.postForm = function(url,callback){
var _form = $(this);
if(_form.is('form')){
$.ajax({
type:"POST",
url:url,
dataType:"json",
contentType:"application/json",
data:JSON.stringify(FORM.formToJson(_form)),
success:function(ret,status){
callback(result,status);
},
error:function(xhr){
APP.notice('系统错误','错误代码['+xhr.status+'] 错误名称['+xhr.statusText+']','error');
}
});
}else
alert("对象不是表单");
};
//------------------------下拉列表----------------------
//初始化下拉列表语言
var select2_language = {
errorLoading: function () {return '无法载入结果。';},
inputTooLong: function (args) {
var overChars = args.input.length - args.maximum;
var message = '请删除' + overChars + '个字符';
return message;
},
inputTooShort: function (args) {
var remainingChars = args.minimum - args.input.length;
var message = '请再输入至少' + remainingChars + '个字符';
return message;
},
loadingMore: function () {return '载入更多结果…';},
maximumSelected: function (args) {
var message = '最多只能选择' + args.maximum + '个项目';
return message;
},
noResults: function () {return '未找到结果';},
searching: function () {return '搜索中…'; }
};
//select2下拉列表默认设置
var select2_default_opts = {
language: select2_language,
placeholder: {id:"-1",text:"请选择..."},
maximumSelectionLength: 50, //多选最多选择个数
allowClear:true,//自动显示清除按钮
width:"100%"
};
/**
* select2下拉列表
* @param {Object} opts select2参数,自定义参数如下
* jsonData[服务器或静态json文件(static/src/jsons/下)的url]
* stmID[sqlMap语句ID]
* url[服务器url实时获取数据(搜索框实时发送请求)]
*
* @return {Object} select控件
*/
function _fill_options(_select,opt_data){
_select.empty();
if($.isArray(opt_data)){
for(var i=0;i<opt_data.length;i++){
_select.append("<option value='"+opt_data[i].id+"'>"+opt_data[i].text+"</option>");
}
}
_select.change();
}
$.fn.select = function ( opts ) {
var _select = $(this);
require(['jquery/select2'],function(){
select2_default_opts.data = null;
select2_default_opts.ajax = null;
if(opts){
if((opts.jsonData||opts.stmID) && opts.data === undefined){//增加jsonData选项获取静态.json文件或者直接通过sqlMapper的sqlID获取数组数据
if(APP.isEmpty(opts.param)) opts.param = {};
if(_select.data("parent-for")){
var _parent_sel = $(_select.data("parent-for"));
opts.param[_parent_sel.attr("name").replace(".","_")] = _parent_sel.val();//替换参数中的. 否则mapper文件会无法识别
}
var url = opts.url || APP.stmidListUrl;
var type = "POST";
if(opts.jsonData && opts.jsonData != ""){
url = opts.jsonData;
type = "GET";
}
var paramData = {};
if(opts.stmID) paramData.stmID=opts.stmID;
if(opts.param) paramData.param=opts.param;
//同步方式防止数据量大是无法加载
APP.ajax(url,paramData,type,false,function(ret){
opts.data = ret;
});
}else if(opts.url && opts.ajax === undefined){//默认ajax方法
opts.ajax = {
delay: 250,
url : opts.url,
data: function (params) {
var queryParameters = {
q: params.term
}
return queryParameters;
}
};
}
}
//允许增加选项
if(opts.allowAdd || _select.data("allow-add")){
if(_select.parent('.input-group').length > 0){
_select.nextAll(".input-group-btn").remove();
_select.unwrap();
}
var _add_btn_id = "select-add-btn-"+new Date().getTime();
var _add_btn = $("<span class='input-group-btn' id='"+_add_btn_id+"'><a class='btn blue'><i class='fa fa-plus'></i></a></span>");
_select.wrap("<div class='input-group'></div>");
_add_btn.insertAfter(_select);
_add_btn.click(function(){
var _this = $(this);
var _adddiv = $("<div>");
var _addform = $("<div class='row'><div class='col-md-12'><div class='form-group'><label class='control-label col-md-3'>代码</label><div class='col-md-9'><input type='text' name='_select_type_code' class='form-control input-small'></div></div></div></div>"+
"<div class='row'><div class='col-md-12'><div class='form-group'><label class='control-label col-md-3'>名称</label><div class='col-md-9'><input type='text' name='_select_type_name' class='form-control input-small'></div></div></div></div>"+
"<a class='btn blue btn-block'> <i class='fa fa-plus'></i> 增加 </a>");
_adddiv.append(_addform);
_adddiv.children(".btn").click(function(){
var _code = _adddiv.find("input[name='_select_type_code']").val();
var _name = _adddiv.find("input[name='_select_type_name']").val();
if($.trim(_code) == "" || $.trim(_name) == ""){
_adddiv.closest(".popover").removeClass("info").addClass("error");
_adddiv.closest(".popover-content").prev().html("<i class='fa fa-plus'/> 代码或名称不能为空");
return;
}
if(_select.children("option[value='"+_code+"']").length > 0){
_adddiv.closest(".popover").removeClass("info").addClass("error");
_adddiv.closest(".popover-content").prev().html("<i class='fa fa-plus'/> 代码已存在")
return;
}
_adddiv.closest(".popover").removeClass("error");
_select.append("<option value='"+_code+"'>"+_name+"</option>");
_select.val(_code).trigger("change");
_this.popover('destroy');
})
APP.popover(_this,_adddiv.get(),"info","fa-plus","增加选择","auto right",235);
});
}
var default_opt = $.extend(true,select2_default_opts,opts);
_select.select2(default_opt);
if(_select.data("original") || _select.data("init")) _select.val((_select.data("original") || _select.data("init"))).trigger("change");
else _select.val(_select.val()).trigger("change");
_select.on("select2:select", function (e) {
if(_select.val() != '-1' && _select.val() != ''){
_select.closest('.form-group').removeClass('has-error');
_select.siblings("span#"+_select.attr("id")+"-error").remove();
_select.siblings("i.validate-icon").removeClass("fa-check fa-warning").removeAttr("data-original-title");
}
});
//级联下拉框
if(_select.data("parent-for")){
$(_select.data("parent-for")).on("change",function(){
opts.param[$(this).attr("name").replace(".","_")] = $(this).val(); //替换参数中的. 否则mapper文件会无法识别
var url = opts.url || APP.stmidListUrl;
var type = "POST";
var paramData = {};
if(opts.stmID) paramData.stmID=opts.stmID;
if(opts.param) paramData.param=opts.param;
//同步方式防止数据量大是无法加载
APP.ajax(url,paramData,type,false,function(ret){
_fill_options(_select,ret);
});
});
}
});
return _select;
};
FORM.getSelectedVal = function(sel){
require(['jquery/select2'],function(){
return $(sel).val();
})
}
FORM.getSelectedText = function(sel){
require(['jquery/select2'],function(){
return $(sel).find("option:selected").text();
})
}
/**
* 基于ztree的treeSelect
* 定义了默认的onClick方法
* @param {Object} settings ztree参数
* @param {String} treeId ztree控件ID
*/
$.fn.treeSelect = function(settings){
var _this = $(this);
var treeId = _this.attr('id');
var _parent = _this.parent();
var _sel_name = _this.attr("name");
//保存ID的隐藏控件
var _id_filed = _this.prevAll("input[data-id-for='"+_sel_name+"']");
if(_id_filed.length == 0){
alert("请在treeSelect元素之前添加id值控件");
return _this;
}
//保存treeSort的隐藏控件,用于树形排序(祖先节点sort-id)
var _tree_filed = _this.prevAll("input[data-tree-for='"+_sel_name+"']");
var _key_id = "id";
var _key_name = "name";
var _key_parent = "pId";
var _key_sort = "sort";
//自定义id、pid、name属性名称
if(!APP.isEmpty(_this.attr('tree-key-id')))_key_id = _this.attr('tree-key-id');
if(!APP.isEmpty(_this.attr('tree-key-name')))_key_name = _this.attr('tree-key-name');
if(!APP.isEmpty(_this.attr('tree-key-pid')))_key_parent = _this.attr('tree-key-pid');
if(!APP.isEmpty(_this.attr('tree-key-sort')))_key_sort = _this.attr('tree-key-sort');
if(settings && settings.data ){
if(settings.data.key && settings.data.key.name) _key_name = settings.data.key.name;
if(settings.data.simpleData){
if(settings.data.simpleData.idKey) _key_id = settings.data.simpleData.idKey;
if(settings.data.simpleData.pIdKey) _key_parent = settings.data.simpleData.pIdKey;
}
}
require(['app/tree'],function(){
//为当前控件增加必要的显示控件和树形下拉菜单
var inputGroup = $("<div class='input-group'></div>");//为当前控件增加图标
var inputIconDiv = $("<div class='input-icon'>");
var inputIcon = $("<i class='fa fa-times fa-fw'></i>");
inputIconDiv.append(inputIcon);
var selBtn = $("<span class='input-group-btn' style='cursor: pointer;'><button class='btn btn-success' type='button'><i class='fa fa-list'></i></span>");//图标-点击显示下拉菜单
inputIconDiv.append(_this);
_this.css("cursor","pointer");
//_this.appendTo(inputIconDiv);//将当前控件放入input-group
inputGroup.append(inputIconDiv);
inputGroup.append(selBtn);//增加图标
_parent.append(inputGroup);//将input-group放入当前控件原父节点
var menuContent = $("<div id='"+treeId+"_MenuContent' style='display:none;height: 150px;overflow-y: auto; background-color: #F5F5F5;'></div>");//下拉菜单显示层
var treeSel = $("<ul id='"+treeId+"' class='ztree' style='margin-top:0; width:100%;'></ul>");//ztree控件
menuContent.append(treeSel);//将树形放入下拉菜单显示层
_parent.append(menuContent);//将下拉菜单显示层放入当前节点原父节点
var treesel_settings = $.extend(true,{
data : {
key : {name : _key_name},
simpleData: {
enable: true,
idKey: _key_id,
pIdKey: _key_parent | onClick: function(e, tree_id, treeNode){//点击时将数据传入显示控件
var zTree = $.fn.zTree.getZTreeObj(tree_id),
nodes = zTree.getSelectedNodes(),
_name = "",
_id = "";
nodes.sort(function compare(a,b){return a[_key_id]-b[_key_id];});
for (var i=0, l=nodes.length; i<l; i++) {
_name += nodes[i][_key_name] + ",";
_id += nodes[i][_key_id] + ",";
}
if(_tree_filed.length == 1 ){ //如果为单选且页面定义了parentTree隐藏域,则为parentTree赋值
var _tree_sort = "";
if(!APP.isEmpty(treeNode[_key_sort])) _tree_sort = treeNode[_key_sort] + "-" + treeNode[_key_id];
else _tree_sort = "0-" + treeNode[_key_id];
if(!APP.isEmpty(treeNode['parentTree'])) _tree_sort = treeNode['parentTree'] + "," + _tree_sort;
else if(!APP.isEmpty(treeNode['parent_tree'])) _tree_sort = treeNode['parent_tree'] + "," + _tree_sort;
_tree_filed.val(_tree_sort);
}
if (_name.length > 0 ) _name = _name.substring(0, _name.length-1);
if (_id.length > 0 ) _id = _id.substring(0, _id.length-1);
_this.val(_name);
//validate字段去除
_this.closest('.form-group').removeClass('has-error');
_this.parent().siblings("span#"+_this.attr("id")+"-error").remove();
_this.parent().siblings("i.validate-icon").removeClass("fa-check fa-warning").removeAttr("data-original-title");
_id_filed.val(_id);
inputIcon.css('color','red');
if (settings.onClick) {
settings.onClick.toFunc().call(this, e, tree_id, treeNode);
}
},
onAsyncSuccess : function(e, tree_id, treeNode, msg){//数据同步成功后显示默认值
if(treeNode === undefined){//根节点同步时显示默认值
var zTree = $.fn.zTree.getZTreeObj(tree_id);
if(_id_filed.attr('value')){
var _selectedNode = zTree.getNodeByParam(_key_id,_id_filed.attr('value'),null);
zTree.selectNode(_selectedNode);
if(_selectedNode) {
_this.attr('value',_selectedNode[_key_name]);
inputIcon.css('color','red');
}
}
}
if (settings.onAsyncSuccess) {
settings.onAsyncSuccess.toFunc().call(this, e, tree_id, treeNode,msg);
}
}
}
},settings);
/**
* 树形下拉列表隐藏-for-treeSelect
* @param {String} content 下拉列表显示DIV的ID
*/
function _treeSelect_hideMenu(content) {
$("#"+content).fadeOut("fast");
$("body").unbind("mousedown", _treeSelect_onBodyDown);
}
/**
* 树形下拉列表触发隐藏点击事件-for-treeSelect
* @param {Object} event 事件对象-传入了menuContentID(下拉列表显示DIV的ID)数据
*/
function _treeSelect_onBodyDown(event) {
if (!(event.target.id == event.data.menuContentID || $(event.target).parents("#"+event.data.menuContentID).length>0)) {
_treeSelect_hideMenu(event.data.menuContentID);
}
}
//显示树形下拉菜单
function _treeSelect_showMenu(){
if(menuContent.css("display") == "none"){
var offset = _this.offset();
menuContent.css({width: + offset.width + "px",left:offset.left + "px", top:offset.top + _this.outerHeight() + "px"}).slideDown("fast");
$("body").bind("mousedown",{menuContentID:treeId+"_MenuContent"}, _treeSelect_onBodyDown);
}
}
//点击显示树形下拉菜单
selBtn.click(function() {
_treeSelect_showMenu();
});
//回车显示
_this.keypress(function(e){
if(e.keyCode == 13) _treeSelect_showMenu();
});
_this.click(function() {
_treeSelect_showMenu();
});
//删除数据
inputIcon.click(function() {
_this.val('');
_id_filed.val('');
if(_tree_filed.length == 1 ){
_tree_filed.val('');
}
$(this).css('color','#ccc');
});
var _treeObj = treeSel.tree(treesel_settings);
_this.treeObj = _treeObj;
});
return _this;
};
return FORM;
}); | }
},
callback: { | random_line_split |
app-form.js |
define('app/form',["app/common","moment","jquery/validate","jquery/form"],function(APP) {
var FORM = {
initDatePicker : function(ct){
APP.queryContainer(ct).find('[form-role="date"]').each(function(){
$(this).datePicker();
});
}
};
/**
* 将form格式化为json
* @param {Object} form form对象
* @return {Object} json对象
*/
FORM.formToJson = function(form){
var serializeObj={};
var array=form.serializeArray();
$(array).each(function(){
if(serializeObj[this.name]){
if($.isArray(serializeObj[this.name])){
serializeObj[this.name].push(this.value);
}else{
serializeObj[this.name]=[serializeObj[this.name],this.value];
}
}else{
serializeObj[this.name]=this.value;
}
});
return serializeObj;
};
//--------------------------------------datePicker------------------------------
/**
* 日期 bootstrap datePicker
* @param {Object} opts 设置参数
* @param {Function} callback 日期变化时调用的函数
*/
$.fn.datePicker = function(opts,callback){
var _target = $(this);
require(['bootstrap/datepicker'],function(){
var default_opt = $.extend(true,{
language:'zh-CN',autoclose: true,todayHighlight:true,format:'yyyy-mm-dd'
},opts);
var _event_type = "changeDate";
if(default_opt.viewType == "year"){
default_opt.startView = 2;
default_opt.minViewMode = 2;
_event_type="changeYear";
}else if(default_opt.viewType == "month"){
default_opt.startView = 1;
default_opt.minViewMode = 1;
_event_type="changeMonth";
}
_target.datepicker(default_opt);
var _default_date = default_opt.defaultDate ? default_opt.defaultDate : APP.formatDate('YYYY-MM-DD');
_target.datepicker('update',APP.formatDate(default_opt.format.toUpperCase(),_default_date));
_target.data('date-value',APP.formatDate('YYYY-MM-DD',_default_date));
_target.datepicker().on(_event_type,function(e){
if(_target.data('date-value') != APP.formatDate('YYYY-MM-DD',e.date)){
_target.data('date-value',APP.formatDate('YYYY-MM-DD',e.date));
if(typeof callback === 'function') callback(APP.formatDate('YYYY-MM-DD',e.date));
}
})
});
};
/**
* 日期区间 bootstrap dateRangePicker
* @param {Object} opts 设置参数
* @param {Function} callback 设置后调用的函数
*/
$.fn.dateRangePicker = function(opts,callback){
var _target = $(this);
require(['bootstrap/daterangepicker'],function(){
var default_opt = $.extend(true,{
opens: (APP.isRTL ? 'left' : 'right'),
startDate: moment().subtract('days', 29).format('YYYY-MM-DD'),
endDate: moment().format('YYYY-MM-DD'),
minDate: '2012-01-01',
maxDate: moment().format('YYYY-MM-DD'),
dateLimit: {days: 365},
showDropdowns: true,
showWeekNumbers: true,
timePicker: false,
timePickerIncrement: 1,
timePicker12Hour: true,
/*ranges: {
'今天': [moment().format('YYYY-MM-DD'), moment().format('YYYY-MM-DD')],
'昨天': [moment().subtract('days', 1).format('YYYY-MM-DD'), moment().subtract('days', 1).format('YYYY-MM-DD')],
'近7天': [moment().subtract('days', 6).format('YYYY-MM-DD'), moment().format('YYYY-MM-DD')],
'近30天': [moment().subtract('days', 29).format('YYYY-MM-DD'), moment().format('YYYY-MM-DD')],
'本月': [moment().startOf('month').format('YYYY-MM-DD'), moment().endOf('month').format('YYYY-MM-DD')],
'上月': [moment().subtract('month', 1).startOf('month').format('YYYY-MM-DD'), moment().subtract('month', 1).endOf('month').format('YYYY-MM-DD')]
},*/
buttonClasses: ['btn'],
applyClass: 'green',
cancelClass: 'default',
format: 'YYYY-MM-DD',
separator: ' 到 ',
locale: {
"applyLabel": '确定',
"cancelLabel": '取消',
"fromLabel": '从',
"toLabel": '到',
"customRangeLabel": '日期区间选择',
"daysOfWeek": ["日","一","二","三","四","五","六"],
"monthNames": ["1月","2月","3月","4月","5月","6月","7月","8月","9月","10月","11月","12月"],
"firstDay": 1
}
},opts);
_target.daterangepicker(default_opt,function(start, end, label){
if(typeof callback === 'function'){
callback(start, end, label);
}else{
_target.children('span').html(start.format('YYYY年MM月DD日') + ' - ' + end.format('YYYY年MM月DD日'));
//$(target+' span')
}
});
_target.children('span').html(moment().subtract('days', 29).format('YYYY年MM月DD日') + ' - ' + moment().format('YYYY年MM月DD日'));
//$(target+' span').html(moment().subtract('days', 29).format('YYYY年MM月DD日') + ' - ' + moment().format('YYYY年MM月DD日'));
})
};
//--------------------------form validate------------------------------
//jquery.validate默认设置
var validate_default_settings = {
errorElement: 'span',
errorClass: 'help-block help-block-error',
focusInvalid: true,
onkeyup: false,
errorPlacement: function (error, element) {
/*if(element.siblings("span.input-group-addon").size() > 0){//treeselect控件验证时隐藏错误span
error.addClass('hide');
}*/
if (element.parent(".input-group").size() > 0) {//带图标的输入框
error.insertAfter(element.parent(".input-group"));
} else if (element.attr("data-error-container")) { //指定container存放错误
error.appendTo(element.attr("data-error-container"));
} else if (element.parents('.radio-list').size() > 0) { //radio
error.appendTo(element.parents('.radio-list').attr("data-error-container"));
} else if (element.parents('.radio-inline').size() > 0) {
error.appendTo(element.parents('.radio-inline').attr("data-error-container"));
} else if (element.parents('.checkbox-list').size() > 0) {
error.appendTo(element.parents('.checkbox-list').attr("data-error-container"));
} else if (element.parents('.checkbox-inline').size() > 0) {
error.appendTo(element.parents('.checkbox-inline').attr("data-error-container"));
} else if(element.siblings("i.validate-icon").size() > 0){//图标方式提示错误
var icon = element.siblings("i.validate-icon");
icon.removeClass('fa-check').addClass("fa-warning");
icon.attr("data-original-title", error.text()).tooltip();
}else {
error.insertAfter(element);
}
},
invalidHandler: function (event, validator) {
},
highlight: function (element) {
$(element).closest('.form-group').removeClass("has-success").addClass('has-error');
},
success: function (label,element) {
if($(element).siblings("i.validate-icon").size() > 0){//图标方式提示错误
var icon = $(element).siblings("i.validate-icon");
$(element).closest('.form-group').removeClass('has-error').addClass('has-success');
icon.removeClass("fa-warning");
if($(element).tagName == 'INPUT') icon.addClass("fa-check");
icon.removeAttr("data-original-title");
}else {
label.closest('.form-group').removeClass('has-error');
}
}
};
//jquery.validate增加select2验证方法
$.validator.addMethod("selectOpt", function(value, element) {
return this.optional(element) || (value != "-1");
}, "请选择");
$.validator.addMethod("checkExists", function(value, element,p) {
if(APP.isEmpty(value)) return true;
if(APP.isEmpty(p)){
alert('请设置字段校验参数');
return false;
}
if(APP.isEmpty(p.url) && APP.isEmpty(p.stmID || p.stmid || p.stmId)){
alert('请设置字段校验参数中的url或者stmID');
return false;
}
var paramData = {param : (p.data || {})};
paramData.param[element.name] = value;
if(p.original) paramData.param["o_"+element.name] = p.original;//修改form中的初始值
if(p.joinField){//参与验证字段值
if($.isArray(p.joinField)){
for(var i=0;i<p.joinField.length;i++){
var joinField = $(p.joinField[i]);
paramData.param[joinField.attr("name")] = joinField.val();
if(joinField.data("original") && joinField.data("original") != joinField.val()) {//当参与验证字段值发生变化的时候,则取消当前字段的初始值验证
paramData.param["o_"+element.name] = "";
}
}
}else{
var joinField = $(p.joinField);
paramData.param[joinField.attr("name")] = joinField.val();
if(joinField.data("original") && joinField.data("original") != joinField.val()) {//当参与验证字段值发生变化的时候,则取消当前字段的初始值验证
paramData.param["o_"+element.name] = "";
}
}
}
if(!APP.isEmpty(p.url)){
return APP.postJson(p.url,paramData,false);
}else{
paramData.stmID = p.stmID || p.stmid || p.stmId;
return APP.isEmpty(APP.postJson('/app/common/selectMapByStmID',paramData,false));
}
}, "已存在");
/**
* 初始化form
* @param {Object} opts 初始化参数
* @param {Function} callback 成功回调函数
* @param {Function} errorback 失败回调函数
*/
$.fn.initForm = function (opts,callback,errorback) {
var _this = $(this);
if(opts.autoClear)_this.clearForm(true); //静态modal中的form 先清空再初始化
if(APP.isEmpty(opts)) opts = {};
if(APP.isEmpty(opts.fieldOpts)) opts.fieldOpts = {};//fieldOpts表单元素的初始化参数
var validate_settings = $.extend(true,validate_default_settings,opts.validate);
var _validate = _this.validate(validate_settings);
//_validate.resetForm();
var isInitValue = !APP.isEmpty(opts.formData);
var formField;
_this.find(opts.fieldSelector ? opts.fieldSelector : '*[name]').each(function(){
formField = $(this);
var _fieldName = formField.attr('name');
var _fieldRole = formField.attr('form-role');
if(formField.data("init")) formField.val(formField.data("init"));
if(isInitValue){
var _fieldValue = opts.formData[_fieldName];
if(_fieldName.indexOf(".") > 0){
var _fieldNameSp = _fieldName.split(".");
_fieldValue = opts.formData[_fieldNameSp[0]];
for(var i=1;_fieldValue && i<_fieldNameSp.length;i++){
_fieldValue = _fieldValue[_fieldNameSp[i]]
}
}
if(_fieldValue != undefined){
if(this.type == 'checkbox'){
var _checked = (_fieldValue == ((formField.data('on-value') !== undefined) ? formField.data('on-value')+'' : '1'));
formField.attr('checked',_checked);
if(formField.hasClass('bs-switch')){
formField.bootstrapSwitch('state', _checked);
formField.trigger("switch:change", [_checked]);//强制触发change方法赋值
}
}else{
formField.val(_fieldValue);
if(formField.data("init")) formField.data("init",_fieldValue);
}
formField.data("original",_fieldValue);//记录该字段的初始值,验证唯一性使用
}
}else{
formField.removeData("original");
}
//初始化js定义的验证规则,如有checkExists规则需要将original初始值作为入参
if(opts.rules && opts.rules[_fieldName]){
formField.rules( "remove");
if(opts.rules[_fieldName].checkExists){
opts.rules[_fieldName].checkExists.original = formField.val();
}
formField.rules( "add", opts.rules[_fieldName]);
}
if(_fieldRole == 'select'){
var _selectOpt = opts.fieldOpts[_fieldName] || {};
try{
if(formField.attr('placeholder') && !isInitValue) _selectOpt.placeholder = JSON.parse(formField.attr('placeholder'));
}catch(e){alert("placeholder属性值必须为json字符串");}
if(formField.data('json')) _selectOpt.jsonData = formField.data('json');
else if(formField.data('stmid')) _selectOpt.stmID = formField.data('stmid');
else if(formField.data('dict-type')){
_selectOpt.data = APP.getDictByType(formField.data('dict-type'));
if($.isArray(_selectOpt.data)){
for(var i=0;i<_selectOpt.data.length;i++){//select2使用text显示
_selectOpt.data[i].id = _selectOpt.data[i].value;
_selectOpt.data[i].text = _selectOpt.data[i].name;
}
}
}
formField.select(_selectOpt);
}
if(_fieldRole == 'treeSelect'){
var _treeSelectOpt = opts.fieldOpts[_fieldName] || {};
if(formField.data('stmid')) _treeSelectOpt.stmID = formField.data('stmid');
if(!formField.attr('id')){
alert("请指定treeSelect表单元素的id属性");
return;
}
formField.treeSelect(_treeSelectOpt);
}
});
var _in_modal = (_this.parents('.modal-dialog').size() > 0) ? '.modal-dialog' : '';
//提交是初始化bean的提交类型 add save delete 对应BaseBean 的form_action属性
if(opts.formAction){
if(_this.children(":hidden[name='form_action']").size()>0){
_this.children(":hidden[name='form_action']").val(opts.formAction);
}else{
_this.append("<input type='hidden' name='form_action' value='"+opts.formAction+"'>");
}
}
opts.url = APP.ctx + opts.url;
var form_opt = $.extend(true,{
ajax:true,
beforeSubmit : function(formData, jqForm, options){
APP.blockUI({target:_in_modal ? '.modal-dialog' : 'body',message:'提交中',gif : 'form-submit'});
return true;
},
type : 'post',
dataType : 'json',
includeHidden : true,
error:function(error){
if(APP.debug)console.log(error);
APP.unblockUI(_in_modal ? '.modal-dialog' : 'body');
APP.notice('',"系统错误 错误代码:"+error.status+" 错误名称:"+error.statusText,'error',_in_modal);
if(typeof errorback === 'function')errorback(error);
else if(opts.onError) opts.onError(error);
},
success:function(response, status){
if(APP.debug)console.log(response);
APP.unblockUI(_in_modal ? '.modal-dialog' : 'body');
if(response.OK){
APP.notice('',response[APP.MSG],'success',_in_modal);
//动态更新规格,否则会造成重复提交验证不通过
_this.find('.checkExists').each(function(){
var _c_form_field = $(this);
var _c_field_name = formField.attr('name');
if(opts.rules && opts.rules[_c_field_name] && opts.rules[_c_field_name].checkExists){
_c_form_field.rules( "remove","checkExists");
opts.rules[_c_field_name].checkExists.original = _c_form_field.val();
_c_form_field.rules( "add", opts.rules[_c_field_name]);
}
});
if(typeof callback === 'function')callback(response[APP.DATA]);
else if(opts.onSuccess) opts.onSuccess(response[APP.DATA]);
}else{
APP.notice('',response[APP.MSG],'warning',_in_modal);
if(typeof errorback === 'function')errorback(response,status);
else if(opts.onError) opts.onError(response,status);
}
}
},opts);
if(form_opt.ajax) _this.ajaxForm(form_opt);
}
/**
* form表单提交
* @param {String} url form提交url
* @param {Function} callback 回调函数
*/
$.fn.postForm = function(url,callback){
var _form = $(this);
if(_form.is('form')){
$.ajax({
type:"POST",
url:url,
dataType:"json",
contentType:"application/json",
data:JSON.stringify(FORM.formToJson(_form)),
success:function(ret,status){
callback(result,status);
},
error:function(xhr){
APP.notice('系统错误','错误代码['+xhr.status+'] 错误名称['+xhr.statusText+']','error');
}
});
}else
alert("对象不是表单");
};
//------------------------下拉列表----------------------
//初始化下拉列表语言
var select2_language = {
errorLoading: function () {return '无法载入结果。';},
inputTooLong: function (args) {
var overChars = args.input.length - args.maximum;
var message = '请删除' + overChars + '个字符';
return message;
},
inputTooShort: function (args) {
var remainingChars = args.minimum - args.input.length;
var message = '请再输入至少' + remainingChars + '个字符';
return message;
},
loadingMore: function () {return '载入更多结果…';},
maximumSelected: function (args) {
var message = '最多只能选择' + args.maximum + '个项目';
return message;
},
noResults: function () {return '未找到结果';},
searching: function () {return '搜索中…'; }
};
//select2下拉列表默认设置
var select2_default_opts = {
language: select2_language,
placeholder: {id:"-1",text:"请选择..."},
maximumSelectionLength: 50, //多选最多选择个数
allowClear:true,//自动显示清除按钮
width:"100%"
};
/**
* select2下拉列表
* @param {Object} opts select2参数,自定义参数如下
* jsonData[服务器或静态json文件(static/src/jsons/下)的url]
* stmID[sqlMap语句ID]
* url[服务器url实时获取数据(搜索框实时发送请求)]
*
* @return {Object} select控件
*/
function _fill_options(_select,opt_data){
_select.empty();
if($.isArray(opt_data)){
for(var i=0;i<opt_data.length;i++){
_select.append("<option value='"+opt_data[i].id+"'>"+opt_data[i].text+"</option>");
}
}
_select.change();
}
$.fn.select = function ( opts ) {
var _select = $(this);
require(['jquery/select2'],function(){
select2_default_opts.data = null;
select2_default_opts.ajax = null;
if(opts){
if((opts.jsonData||opts.stmID) && opts.data === undefined){//增加jsonData选项获取静态.json文件或者直接通过sqlMapper的sqlID获取数组数据
if(APP.isEmpty(opts.param)) opts.param = {};
if(_select.data("parent-for")){
var _parent_sel = $(_select.data("parent-for"));
opts.param[_parent_sel.attr("name").replace(".","_")] = _parent_sel.val();//替换参数中的. 否则mapper文件会无法识别
}
var url = opts.url || APP.stmidListUrl;
var type = "POST";
if(opts.jsonData && opts.jsonData != ""){
url = opts.jsonData;
type = "GET";
}
var paramData = {};
if(opts.stmID) paramData.stmID=opts.stmID;
if(opts.param) paramData.param=opts.param;
//同步方式防止数据量大是无法加载
APP.ajax(url,paramData,type,false,function(ret){
opts.data = ret;
});
}else if(opts.url && opts.ajax === undefined){//默认ajax方法
opts.ajax = {
delay: 250,
url : opts.url,
data: function (params) {
var queryParameters = {
q: params.term
}
return queryParameters;
}
};
}
}
//允许增加选项
if(opts.allowAdd || _select.data("allow-add")){
if(_select.parent('.input-group').length > 0){
_select.nextAll(".input-group-btn").remove();
_select.unwrap();
}
var _add_btn_id = "select-add-btn-"+new Date().getTime();
var _add_btn = $("<span class='input-group-btn' id='"+_add_btn_id+"'><a class='btn blue'><i class='fa fa-plus'></i></a></span>");
_select.wrap("<div class='input-group'></div>");
_add_btn.insertAfter(_select);
_add_btn.click(function(){
var _this = $(this);
var _adddiv = $("<div>");
var _addform = $("<div class='row'><div class='col-md-12'><div class='form-group'><label class='control-label col-md-3'>代码</label><div class='col-md-9'><input type='text' name='_select_type_code' class='form-control input-small'></div></div></div></div>"+
"<div class='row'><div class='col-md-12'><div class='form-group'><label class='control-label col-md-3'>名称</label><div class='col-md-9'><input type='text' name='_select_type_name' class='form-control input-small'></div></div></div></div>"+
"<a class='btn blue btn-block'> <i class='fa fa-plus'></i> 增加 </a>");
_adddiv.append(_addform);
_adddiv.children(".btn").click(function(){
var _code = _adddiv.find("input[name='_select_type_code']").val();
var _name = _adddiv.find("input[name='_select_type_name']").val();
if($.trim(_code) == "" || $.trim(_name) == ""){
_adddiv.closest(".popover").removeClass("info").addClass("error");
_adddiv.closest(".popover-content").prev().html("<i class='fa fa-plus'/> 代码或名称不能为空");
return;
}
if(_select.children("option[value='"+_code+"']").length > 0){
_adddiv.closest(".popover").removeClass("info").addClass("error");
_adddiv.closest(".popover-content").prev().html("<i class='fa fa-plus'/> 代码已存在")
return;
}
_adddiv.closest(".popover").removeClass("error");
_select.append("<option value='"+_code+"'>"+_name+"</option>");
_select.val(_code).trigger("change");
_this.popover('destroy');
})
APP.popover(_this,_adddiv.get(),"info","fa-plus","增加选择","auto right",235);
});
}
var default_opt = $.extend(true,select2_default_opts,opts);
_select.select2(default_opt);
if(_select.data("original") || _select.data("init")) _select.val((_select.data("original") || _select.data("init"))).trigger("change");
else _select.val(_select.val()).trigger("change");
_select.on("select2:select", function (e) {
if(_select.val() != '-1' && _select.val() != ''){
_select.closest('.form-group').removeClass('has-error');
_select.siblings("span#"+_select.attr("id")+"-error").remove();
_select.siblings("i.validate-icon").removeClass("fa-check fa-warning").removeAttr("data-original-title");
}
});
//级联下拉框
if(_select.data("parent-for")){
$(_select.data("parent-for")).on("change",function(){
opts.param[$(this).attr("name").replace(".","_")] = $(this).val(); //替换参数中的. 否则mapper文件会无法识别
var url = opts.url || APP.stmidListUrl;
var type = "POST";
var paramData = {};
if(opts.stmID) paramData.stmID=opts.stmID;
if(opts.param) paramData.param=opts.param;
//同步方式防止数据量大是无法加载
APP.ajax(url,paramData,type,false,function(ret){
_fill_options(_select,ret);
});
});
}
});
return _select;
};
FORM.getSelectedVal = function(sel){
require(['jquery/select2'],function(){
return $(sel).val();
})
}
FORM.getSelectedText = function(sel){
require(['jquery/select2'],function(){
return $(sel).find("option:selected").text();
})
}
/**
* 基于ztree的treeSelect
* 定义了默认的onClick方法
* @param {Object} settings ztree参数
* @param {String} treeId ztree控件ID
*/
$.fn.treeSelect = function(settings){
var _this = $(this);
var treeId = _this.attr('id');
var _parent = _this.parent();
var _sel_name = _this.attr("name");
//保存ID的隐藏控件
var _id_filed = _this.prevAll("input[data-id-for='"+_sel_name+"']");
if(_id_filed.length == 0){
alert("请在treeSelect元素之前添加id值控件");
return _this;
}
//保存treeSort的隐藏控件,用于树形排序(祖先节点sort-id)
var _tree_filed = _this.prevAll("input[data-tree-for='"+_sel_name+"']");
var _key_id = "id";
var _key_name = "name";
var _key_parent = "pId";
var _key_sort = "sort";
//自定义id、pid、name属性名称
if(!APP.isEmpty(_this.attr('tree-key-id')))_key_id = _this.attr('tree-key-id');
if(!APP.isEmpty(_this.attr('tree-key-name')))_key_name = _this.attr('tree-key-name');
if(!APP.isEmpty(_this.attr('tree-key-pid')))_key_parent = _this.attr('tree-key-pid');
if(!APP.isEmpty(_this.attr('tree-key-sort')))_key_sort = _this.attr('tree-key-sort');
if(settings && settings.data ){
if(settings.data.key && settings.data.key.name) _key_name = settings.data.key.name;
if(settings.data.simpleData){
if(settings.data.simpleData.idKey) _key_id = settings.data.simpleData.idKey;
if(settings.data.simpleData.pIdKey) _key_parent = settings.data.simpleData.pIdKey;
}
}
require(['app/tree'],function(){
//为当前控件增加必要的显示控件和树形下拉菜单
var inputGroup = $("<div class='input-group'></div>");//为当前控件增加图标
var inputIconDiv = $("<div class='input-icon'>");
var inputIcon = $("<i class='fa fa-times fa-fw'></i>");
inputIconDiv.append(inputIcon);
var selBtn = $("<span class='input-group-btn' style='cursor: pointer;'><button class='btn btn-success' type='button'><i class='fa fa-list'></i></span>");//图标-点击显示下拉菜单
inputIconDiv.append(_this);
_this.css("cursor","pointer");
//_this.appendTo(inputIconDiv);//将当前控件放入input-group
inputGroup.append(inputIconDiv);
inputGroup.append(selBtn);//增加图标
_parent.append(inputGroup);//将input-group放入当前控件原父节点
var menuContent = $("<div id='"+treeId+"_MenuContent' style='display:none;height: 150px;overflow-y: auto; background-color: #F5F5F5;'></div>");//下拉菜单显示层
var treeSel = $("<ul id='"+treeId+"' class='ztree' style='margin-top:0; width:100%;'></ul>");//ztree控件
menuContent.append(treeSel);//将树形放入下拉菜单显示层
_parent.append(menuContent);//将下拉菜单显示层放入当前节点原父节点
var treesel_settings = $.extend(true,{
data : {
key : {name : _key_name},
simpleData: {
enable: true,
idKey: _key_id,
pIdKey: _key_parent
}
},
callback: {
onClick: function(e, tree_id, treeNode){//点击时将数据传入显示控件
var zTree = $.fn.zTree.getZTreeObj(tree_id),
nodes = zTree.getSelectedNodes(),
_name = "",
_id = "";
nodes.sort(function compare(a,b){return a[_key_id]-b[_key_id];});
for (var i=0, l=nodes.length; i<l; i++) {
_name += nodes[i][_key_name] + ",";
_id += nodes[i][_key_id] + ",";
}
if(_tree_filed.length == 1 ){ //如果为单选且页面定义了parentTree隐藏域,则为parentTree赋值
var _tree_sort = "";
if(!APP.isEmpty(treeNode[_key_sort])) _tree_sort = treeNode[_key_sort] + "-" + treeNode[_key_id];
else _tree_sort = "0-" + treeNode[_key_id];
if(!APP.isEmpty(treeNode['parentTree'])) _tree_sort = treeNode['parentTree'] + "," + _tree_sort;
else if(!APP.isEmpty(treeNode['parent_tree'])) _tree_sort = treeNode['parent_tree'] + "," + _tree_sort;
_tree_filed.val(_tree_sort);
}
if (_name.length > 0 ) _name = _name.substring(0, _name.length-1);
if (_id.length > 0 ) _id = _id.substring(0, _id.length-1);
_this.val(_name);
//validate字段去除
_this.closest('.form-group').removeClass('has-error');
_this.parent().siblings("span#"+_this.attr("id")+"-error").remove();
_this.parent().siblings("i.validate-icon").removeClass("fa-check fa-warning").removeAttr("data-original-title");
_id_filed.val(_id);
inputIcon.css('color','red');
if (settings.onClick) {
settings.onClick.toFunc().call(this, e, tree_id, treeNode);
}
},
onAsyncSuccess : function(e, tree_id, treeNode, msg){//数据同步成功后显示默认值
if(treeNode === undefined){//根节点同步时显示默认值
var zTree = $.fn.zTree.getZTreeObj(tree_id);
if(_id_filed.attr('value')){
var _selectedNode = zTree.getNodeByParam(_key_id,_id_filed.attr('value'),null);
zTree.selectNode(_selectedNode);
if(_selectedNode) {
_this.attr('value',_selectedNode[_key_name]);
inputIcon.css('color','red');
}
}
}
if (settings.onAsyncSuccess) {
settings.onAsyncSuccess.toFunc().call(this, e, tree_id, treeNode,msg);
}
}
}
},settings);
/**
* 树形下拉列表隐藏-for-treeSelect
* @param {String} content 下拉列表显示DIV的ID
*/
function _treeSelect_hideMenu(content) {
$("#"+content).fadeOut("fast");
$("body").unbind("mousedown", _treeSelect_onBodyDown);
}
/**
* 树形下拉列表触发隐藏点击事件-for-treeSelect
* @param {Object} event 事件对象-传入了menuContentID(下拉列表显示DIV的ID)数据
*/
function _treeSelect_onBodyDown(event) {
if (!(event.target.id == event.data.menuContentID || $(event.target).parents("#"+event.data.menuContentID).length>0)) {
_treeSelect_hideMenu(event.data.menuContentID);
}
}
//显示树形下拉菜单
function _treeSelect_showMenu(){
if(menuContent.css("display") == "none"){
var offset = _this.offset();
menuContent.css({width: + offset.width + "px",left:offset.left + "px", top:offset.top + _this.outerHeight() + "px"}).slideDown("fast");
$("body").bind("mousedown",{menuContentID:treeId+"_MenuContent"}, _treeSelect_onBodyDown);
}
}
//点击显示树形下拉菜单
selBtn.click(function() {
_treeSelect_showMenu();
});
//回车显示
_this.keypress(function(e){
if(e.keyCode == 13) _treeSelect_showMenu();
});
_this.click(function() {
_treeSelect_showMenu();
});
//删除数据
inputIcon.click(function() {
_this.val('');
_id_filed.val('');
if(_tree_filed.length == 1 ){
_tree_filed.val('');
}
$(this).css('color','#ccc');
});
var _treeObj = treeSel.tree(treesel_settings);
_this.treeObj = _treeObj;
});
return _this;
};
return FORM;
});
| identifier_name | ||
instance.rs | use crate::{
create_idx_struct,
data_structures::{cont_idx_vec::ContiguousIdxVec, skipvec::SkipVec},
small_indices::SmallIdx,
};
use anyhow::{anyhow, ensure, Error, Result};
use log::{info, trace};
use serde::Deserialize;
use std::{
fmt::{self, Display, Write as _},
io::{BufRead, Write},
mem,
time::Instant,
};
create_idx_struct!(pub NodeIdx);
create_idx_struct!(pub EdgeIdx);
create_idx_struct!(pub EntryIdx);
#[derive(Debug)]
struct CompressedIlpName<T>(T);
impl<T: SmallIdx> Display for CompressedIlpName<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
const CHARS: &[u8] = b"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
let mut val = self.0.idx();
while val != 0 {
f.write_char(char::from(CHARS[val % CHARS.len()]))?;
val /= CHARS.len();
}
Ok(())
}
}
#[derive(Debug)]
struct ParsedEdgeHandler {
edge_incidences: Vec<SkipVec<(NodeIdx, EntryIdx)>>,
node_degrees: Vec<usize>,
}
impl ParsedEdgeHandler {
fn handle_edge(&mut self, node_indices: impl IntoIterator<Item = Result<usize>>) -> Result<()> {
let incidences = SkipVec::try_sorted_from(node_indices.into_iter().map(|idx_result| {
idx_result.and_then(|node_idx| {
ensure!(
node_idx < self.node_degrees.len(),
"invalid node idx in edge: {}",
node_idx
);
Ok((NodeIdx::from(node_idx), EntryIdx::INVALID))
})
}))?;
ensure!(incidences.len() > 0, "edges may not be empty");
for (_, (node, _)) in &incidences {
self.node_degrees[node.idx()] += 1;
}
self.edge_incidences.push(incidences);
Ok(())
}
}
#[derive(Debug, Deserialize)]
struct JsonInstance {
num_nodes: usize,
edges: Vec<Vec<usize>>,
}
#[derive(Clone, Debug)]
pub struct Instance {
nodes: ContiguousIdxVec<NodeIdx>,
edges: ContiguousIdxVec<EdgeIdx>,
node_incidences: Vec<SkipVec<(EdgeIdx, EntryIdx)>>,
edge_incidences: Vec<SkipVec<(NodeIdx, EntryIdx)>>,
}
impl Instance {
fn load(
num_nodes: usize,
num_edges: usize,
read_edges: impl FnOnce(&mut ParsedEdgeHandler) -> Result<()>,
) -> Result<Self> {
let mut handler = ParsedEdgeHandler {
edge_incidences: Vec::with_capacity(num_edges),
node_degrees: vec![0; num_nodes],
};
read_edges(&mut handler)?;
let ParsedEdgeHandler {
mut edge_incidences,
node_degrees,
} = handler;
let mut node_incidences: Vec<_> = node_degrees
.iter()
.map(|&len| SkipVec::with_len(len))
.collect();
let mut rem_node_degrees = node_degrees;
for (edge, incidences) in edge_incidences.iter_mut().enumerate() {
let edge = EdgeIdx::from(edge);
for (edge_entry_idx, edge_entry) in incidences.iter_mut() {
let node = edge_entry.0.idx();
let node_entry_idx = node_incidences[node].len() - rem_node_degrees[node];
rem_node_degrees[node] -= 1;
edge_entry.1 = EntryIdx::from(node_entry_idx);
node_incidences[node][node_entry_idx] = (edge, EntryIdx::from(edge_entry_idx));
}
}
Ok(Self {
nodes: (0..num_nodes).map(NodeIdx::from).collect(),
edges: (0..num_edges).map(EdgeIdx::from).collect(),
node_incidences,
edge_incidences,
})
}
pub fn load_from_text(mut reader: impl BufRead) -> Result<Self> {
let time_before = Instant::now();
let mut line = String::new();
reader.read_line(&mut line)?;
let mut numbers = line.split_ascii_whitespace().map(str::parse);
let num_nodes = numbers
.next()
.ok_or_else(|| anyhow!("Missing node count"))??;
let num_edges = numbers
.next()
.ok_or_else(|| anyhow!("Missing edge count"))??;
ensure!(
numbers.next().is_none(),
"Too many numbers in first input line"
);
let instance = Self::load(num_nodes, num_edges, |handler| {
for _ in 0..num_edges {
line.clear();
reader.read_line(&mut line)?;
let mut numbers = line
.split_ascii_whitespace()
.map(|s| s.parse::<usize>().map_err(Error::from));
// Skip degree
numbers
.next()
.ok_or_else(|| anyhow!("empty edge line in input, expected degree"))??;
handler.handle_edge(numbers)?;
}
Ok(())
})?;
info!(
"Loaded text instance with {} nodes, {} edges in {:.2?}",
num_nodes,
num_edges,
time_before.elapsed(),
);
Ok(instance)
}
pub fn load_from_json(mut reader: impl BufRead) -> Result<Self> {
let time_before = Instant::now();
// Usually faster for large inputs, see https://github.com/serde-rs/json/issues/160
let mut text = String::new();
reader.read_to_string(&mut text)?;
let JsonInstance { num_nodes, edges } = serde_json::from_str(&text)?;
let num_edges = edges.len();
let instance = Self::load(num_nodes, num_edges, |handler| {
for edge in edges {
handler.handle_edge(edge.into_iter().map(Ok))?;
}
Ok(())
})?;
info!(
"Loaded json instance with {} nodes, {} edges in {:.2?}",
num_nodes,
num_edges,
time_before.elapsed(),
);
Ok(instance)
}
pub fn num_edges(&self) -> usize {
self.edges.len()
}
pub fn num_nodes_total(&self) -> usize {
self.node_incidences.len()
}
pub fn num_edges_total(&self) -> usize { | }
/// Edges incident to a node, sorted by increasing indices.
pub fn node(
&self,
node: NodeIdx,
) -> impl Iterator<Item = EdgeIdx> + ExactSizeIterator + Clone + '_ {
self.node_incidences[node.idx()]
.iter()
.map(|(_, (edge, _))| *edge)
}
/// Nodes incident to an edge, sorted by increasing indices.
pub fn edge(
&self,
edge: EdgeIdx,
) -> impl Iterator<Item = NodeIdx> + ExactSizeIterator + Clone + '_ {
self.edge_incidences[edge.idx()]
.iter()
.map(|(_, (node, _))| *node)
}
/// Alive nodes in the instance, in arbitrary order.
pub fn nodes(&self) -> &[NodeIdx] {
&self.nodes
}
/// Alive edges in the instance, in arbitrary order.
pub fn edges(&self) -> &[EdgeIdx] {
&self.edges
}
pub fn node_degree(&self, node: NodeIdx) -> usize {
self.node_incidences[node.idx()].len()
}
pub fn edge_size(&self, edge: EdgeIdx) -> usize {
self.edge_incidences[edge.idx()].len()
}
/// Deletes a node from the instance.
pub fn delete_node(&mut self, node: NodeIdx) {
trace!("Deleting node {}", node);
for (_idx, (edge, entry_idx)) in &self.node_incidences[node.idx()] {
self.edge_incidences[edge.idx()].delete(entry_idx.idx());
}
self.nodes.delete(node.idx());
}
/// Deletes an edge from the instance.
pub fn delete_edge(&mut self, edge: EdgeIdx) {
trace!("Deleting edge {}", edge);
for (_idx, (node, entry_idx)) in &self.edge_incidences[edge.idx()] {
self.node_incidences[node.idx()].delete(entry_idx.idx());
}
self.edges.delete(edge.idx());
}
/// Restores a previously deleted node.
///
/// All restore operations (node or edge) must be done in reverse order of
/// the corresponding deletions to produce sensible results.
pub fn restore_node(&mut self, node: NodeIdx) {
trace!("Restoring node {}", node);
for (_idx, (edge, entry_idx)) in self.node_incidences[node.idx()].iter().rev() {
self.edge_incidences[edge.idx()].restore(entry_idx.idx());
}
self.nodes.restore(node.idx());
}
/// Restores a previously deleted edge.
///
/// All restore operations (node or edge) must be done in reverse order of
/// the corresponding deletions to produce sensible results.
pub fn restore_edge(&mut self, edge: EdgeIdx) {
trace!("Restoring edge {}", edge);
for (_idx, (node, entry_idx)) in self.edge_incidences[edge.idx()].iter().rev() {
self.node_incidences[node.idx()].restore(entry_idx.idx());
}
self.edges.restore(edge.idx());
}
/// Deletes all edges incident to a node.
///
/// The node itself must have already been deleted.
pub fn delete_incident_edges(&mut self, node: NodeIdx) {
// We want to iterate over the incidence of `node` while deleting
// edges, which in turn changes node incidences. This is safe, since
// `node` itself was already deleted. To make the borrow checker
// accept this, we temporarily move `node` incidence to a local
// variable, replacing it with an empty list. This should not be much
// slower than unsafe alternatives, since an incidence list is only
// 28 bytes large.
trace!("Deleting all edges incident to {}", node);
debug_assert!(
self.nodes.is_deleted(node.idx()),
"Node passed to delete_incident_edges must be deleted"
);
let incidence = mem::take(&mut self.node_incidences[node.idx()]);
for (_, (edge, _)) in &incidence {
self.delete_edge(*edge);
}
self.node_incidences[node.idx()] = incidence;
}
/// Restores all incident edges to a node.
///
/// This reverses the effect of `delete_incident_edges`. As with all other
/// `restore_*` methods, this must be done in reverse order of deletions.
/// In particular, the node itself must still be deleted.
pub fn restore_incident_edges(&mut self, node: NodeIdx) {
trace!("Restoring all edges incident to {}", node);
debug_assert!(
self.nodes.is_deleted(node.idx()),
"Node passed to restore_incident_edges must be deleted"
);
// See `delete_incident_edges` for an explanation of this swapping around
let incidence = mem::take(&mut self.node_incidences[node.idx()]);
// It is important that we restore the edges in reverse order
for (_, (edge, _)) in incidence.iter().rev() {
self.restore_edge(*edge);
}
self.node_incidences[node.idx()] = incidence;
}
pub fn export_as_ilp(&self, mut writer: impl Write) -> Result<()> {
writeln!(writer, "Minimize")?;
write!(writer, " v{}", CompressedIlpName(self.nodes()[0]))?;
for &node in &self.nodes()[1..] {
write!(writer, " + v{}", CompressedIlpName(node))?;
}
writeln!(writer)?;
writeln!(writer, "Subject To")?;
for &edge in self.edges() {
write!(writer, " e{}: ", CompressedIlpName(edge))?;
for (idx, node) in self.edge(edge).enumerate() {
if idx > 0 {
write!(writer, " + ")?;
}
write!(writer, "v{}", CompressedIlpName(node))?;
}
writeln!(writer, " >= 1")?;
}
writeln!(writer, "Binaries")?;
write!(writer, " v{}", CompressedIlpName(self.nodes()[0]))?;
for &node in &self.nodes()[1..] {
write!(writer, " v{}", CompressedIlpName(node))?;
}
writeln!(writer)?;
writeln!(writer, "End")?;
Ok(())
}
} | self.edge_incidences.len() | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.