file_name
large_stringlengths
4
140
prefix
large_stringlengths
0
39k
suffix
large_stringlengths
0
36.1k
middle
large_stringlengths
0
29.4k
fim_type
large_stringclasses
4 values
tls.go
// Copyright Istio Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package v1alpha3 import ( "reflect" "sort" "strings" "istio.io/api/networking/v1alpha3" "istio.io/istio/pilot/pkg/model" "istio.io/istio/pilot/pkg/networking/util" "istio.io/istio/pkg/config/host" "istio.io/istio/pkg/config/labels" "istio.io/pkg/log" ) // Match by source labels, the listener port where traffic comes in, the gateway on which the rule is being // bound, etc. All these can be checked statically, since we are generating the configuration for a proxy // with predefined labels, on a specific port. func matchTLS(match *v1alpha3.TLSMatchAttributes, proxyLabels labels.Collection, gateways map[string]bool, port int, proxyNamespace string) bool { if match == nil { return true } gatewayMatch := len(match.Gateways) == 0 for _, gateway := range match.Gateways { gatewayMatch = gatewayMatch || gateways[gateway] } labelMatch := proxyLabels.IsSupersetOf(match.SourceLabels) portMatch := match.Port == 0 || match.Port == uint32(port) nsMatch := match.SourceNamespace == "" || match.SourceNamespace == proxyNamespace return gatewayMatch && labelMatch && portMatch && nsMatch } // Match by source labels, the listener port where traffic comes in, the gateway on which the rule is being // bound, etc. All these can be checked statically, since we are generating the configuration for a proxy // with predefined labels, on a specific port. func matchTCP(match *v1alpha3.L4MatchAttributes, proxyLabels labels.Collection, gateways map[string]bool, port int, proxyNamespace string) bool { if match == nil { return true } gatewayMatch := len(match.Gateways) == 0 for _, gateway := range match.Gateways { gatewayMatch = gatewayMatch || gateways[gateway] } labelMatch := proxyLabels.IsSupersetOf(match.SourceLabels) portMatch := match.Port == 0 || match.Port == uint32(port) nsMatch := match.SourceNamespace == "" || match.SourceNamespace == proxyNamespace return gatewayMatch && labelMatch && portMatch && nsMatch } // Select the config pertaining to the service being processed. func getConfigsForHost(hostname host.Name, configs []model.Config) []model.Config { svcConfigs := make([]model.Config, 0) for index := range configs { virtualService := configs[index].Spec.(*v1alpha3.VirtualService) for _, vsHost := range virtualService.Hosts { if host.Name(vsHost).Matches(hostname) { svcConfigs = append(svcConfigs, configs[index]) break } } } return svcConfigs } // hashRuntimeTLSMatchPredicates hashes runtime predicates of a TLS match func hashRuntimeTLSMatchPredicates(match *v1alpha3.TLSMatchAttributes) string { return strings.Join(match.SniHosts, ",") + "|" + strings.Join(match.DestinationSubnets, ",") } func buildSidecarOutboundTLSFilterChainOpts(node *model.Proxy, push *model.PushContext, destinationCIDR string, service *model.Service, listenPort *model.Port, gateways map[string]bool, configs []model.Config) []*filterChainOpts { if !listenPort.Protocol.IsTLS() { return nil } actualWildcard, _ := getActualWildcardAndLocalHost(node) // TLS matches are composed of runtime and static predicates. // Static predicates can be evaluated during the generation of the config. Examples: gateway, source labels, etc. // Runtime predicates cannot be evaluated during config generation. Instead the proxy must be configured to // evaluate them. Examples: SNI hosts, source/destination subnets, etc. // // A list of matches may contain duplicate runtime matches, but different static matches. For example: // // {sni_hosts: A, sourceLabels: X} => destination M // {sni_hosts: A, sourceLabels: *} => destination N // // For a proxy with labels X, we can evaluate the static predicates to get: // {sni_hosts: A} => destination M // {sni_hosts: A} => destination N // // The matches have the same runtime predicates. Since the second match can never be reached, we only // want to generate config for the first match. // // To achieve this in this function we keep track of which runtime matches we have already generated config for // and only add config if the we have not already generated config for that set of runtime predicates. matchHasBeenHandled := make(map[string]bool) // Runtime predicate set -> have we generated config for this set? // Is there a virtual service with a TLS block that matches us? hasTLSMatch := false out := make([]*filterChainOpts, 0) for _, cfg := range configs { virtualService := cfg.Spec.(*v1alpha3.VirtualService) for _, tls := range virtualService.Tls { for _, match := range tls.Match { if matchTLS(match, labels.Collection{node.Metadata.Labels}, gateways, listenPort.Port, node.Metadata.Namespace) { // Use the service's CIDRs. // But if a virtual service overrides it with its own destination subnet match // give preference to the user provided one // destinationCIDR will be empty for services with VIPs destinationCIDRs := []string{destinationCIDR} // Only set CIDR match if the listener is bound to an IP. // If its bound to a unix domain socket, then ignore the CIDR matches // Unix domain socket bound ports have Port value set to 0 if len(match.DestinationSubnets) > 0 && listenPort.Port > 0 { destinationCIDRs = match.DestinationSubnets } matchHash := hashRuntimeTLSMatchPredicates(match) if !matchHasBeenHandled[matchHash] { out = append(out, &filterChainOpts{ metadata: util.BuildConfigInfoMetadataV2(cfg.ConfigMeta), sniHosts: match.SniHosts, destinationCIDRs: destinationCIDRs, networkFilters: buildOutboundNetworkFilters(node, tls.Route, push, listenPort, cfg.ConfigMeta), }) hasTLSMatch = true } matchHasBeenHandled[matchHash] = true } } } }
// HTTPS or TLS ports without associated virtual service if !hasTLSMatch { var sniHosts []string // In case of a sidecar config with user defined port, if the user specified port is not the same as the // service's port, then pick the service port if and only if the service has only one port. If service // has multiple ports, then route to a cluster with the listener port (i.e. sidecar defined port) - the // traffic will most likely blackhole. port := listenPort.Port if len(service.Ports) == 1 { port = service.Ports[0].Port } clusterName := model.BuildSubsetKey(model.TrafficDirectionOutbound, "", service.Hostname, port) statPrefix := clusterName // If stat name is configured, use it to build the stat prefix. if len(push.Mesh.OutboundClusterStatName) != 0 { statPrefix = util.BuildStatPrefix(push.Mesh.OutboundClusterStatName, string(service.Hostname), "", &model.Port{Port: port}, service.Attributes) } // Use the hostname as the SNI value if and only if we do not have a destination VIP or if the destination is a CIDR. // In both cases, the listener will be bound to 0.0.0.0. So SNI match is the only way to distinguish different // target services. If we have a VIP, then we know the destination. There is no need to do a SNI match. It saves us from // having to generate expensive permutations of the host name just like RDS does.. // NOTE that we cannot have two services with the same VIP as our listener build logic will treat it as a collision and // ignore one of the services. svcListenAddress := service.GetServiceAddressForProxy(node) if strings.Contains(svcListenAddress, "/") { // Address is a CIDR, already captured by destinationCIDR parameter. svcListenAddress = "" } if len(destinationCIDR) > 0 || len(svcListenAddress) == 0 || svcListenAddress == actualWildcard { sniHosts = []string{string(service.Hostname)} } out = append(out, &filterChainOpts{ sniHosts: sniHosts, destinationCIDRs: []string{destinationCIDR}, networkFilters: buildOutboundNetworkFiltersWithSingleDestination(push, node, statPrefix, clusterName, listenPort), }) } return out } func buildSidecarOutboundTCPFilterChainOpts(node *model.Proxy, push *model.PushContext, destinationCIDR string, service *model.Service, listenPort *model.Port, gateways map[string]bool, configs []model.Config) []*filterChainOpts { if listenPort.Protocol.IsTLS() { return nil } out := make([]*filterChainOpts, 0) // very basic TCP // break as soon as we add one network filter with no destination addresses to match // This is the terminating condition in the filter chain match list defaultRouteAdded := false TcpLoop: for _, cfg := range configs { virtualService := cfg.Spec.(*v1alpha3.VirtualService) for _, tcp := range virtualService.Tcp { destinationCIDRs := []string{destinationCIDR} if len(tcp.Match) == 0 { // implicit match out = append(out, &filterChainOpts{ metadata: util.BuildConfigInfoMetadataV2(cfg.ConfigMeta), destinationCIDRs: destinationCIDRs, networkFilters: buildOutboundNetworkFilters(node, tcp.Route, push, listenPort, cfg.ConfigMeta), }) defaultRouteAdded = true break TcpLoop } // Use the service's virtual address first. // But if a virtual service overrides it with its own destination subnet match // give preference to the user provided one virtualServiceDestinationSubnets := make([]string, 0) for _, match := range tcp.Match { if matchTCP(match, labels.Collection{node.Metadata.Labels}, gateways, listenPort.Port, node.Metadata.Namespace) { // Scan all the match blocks // if we find any match block without a runtime destination subnet match // i.e. match any destination address, then we treat it as the terminal match/catch all match // and break out of the loop. We also treat it as a terminal match if the listener is bound // to a unix domain socket. // But if we find only runtime destination subnet matches in all match blocks, collect them // (this is similar to virtual hosts in http) and create filter chain match accordingly. if len(match.DestinationSubnets) == 0 || listenPort.Port == 0 { out = append(out, &filterChainOpts{ metadata: util.BuildConfigInfoMetadataV2(cfg.ConfigMeta), destinationCIDRs: destinationCIDRs, networkFilters: buildOutboundNetworkFilters(node, tcp.Route, push, listenPort, cfg.ConfigMeta), }) defaultRouteAdded = true break TcpLoop } else { virtualServiceDestinationSubnets = append(virtualServiceDestinationSubnets, match.DestinationSubnets...) } } } if len(virtualServiceDestinationSubnets) > 0 { out = append(out, &filterChainOpts{ destinationCIDRs: virtualServiceDestinationSubnets, networkFilters: buildOutboundNetworkFilters(node, tcp.Route, push, listenPort, cfg.ConfigMeta), }) // If at this point there is a filter chain generated with the same CIDR match as the // one that may be generated for the service as the default route, do not generate it. // Otherwise, Envoy will complain about having filter chains with identical matches // and will reject the config. sort.Strings(virtualServiceDestinationSubnets) sort.Strings(destinationCIDRs) if reflect.DeepEqual(virtualServiceDestinationSubnets, destinationCIDRs) { log.Warnf("Existing filter chain with same matching CIDR: %v.", destinationCIDRs) defaultRouteAdded = true } } } } if !defaultRouteAdded { // In case of a sidecar config with user defined port, if the user specified port is not the same as the // service's port, then pick the service port if and only if the service has only one port. If service // has multiple ports, then route to a cluster with the listener port (i.e. sidecar defined port) - the // traffic will most likely blackhole. port := listenPort.Port if len(service.Ports) == 1 { port = service.Ports[0].Port } clusterName := model.BuildSubsetKey(model.TrafficDirectionOutbound, "", service.Hostname, port) statPrefix := clusterName // If stat name is configured, use it to build the stat prefix. if len(push.Mesh.OutboundClusterStatName) != 0 { statPrefix = util.BuildStatPrefix(push.Mesh.OutboundClusterStatName, string(service.Hostname), "", &model.Port{Port: port}, service.Attributes) } out = append(out, &filterChainOpts{ destinationCIDRs: []string{destinationCIDR}, networkFilters: buildOutboundNetworkFiltersWithSingleDestination(push, node, statPrefix, clusterName, listenPort), }) } return out } // This function can be called for namespaces with the auto generated sidecar, i.e. once per service and per port. // OR, it could be called in the context of an egress listener with specific TCP port on a sidecar config. // In the latter case, there is no service associated with this listen port. So we have to account for this // missing service throughout this file func buildSidecarOutboundTCPTLSFilterChainOpts(node *model.Proxy, push *model.PushContext, configs []model.Config, destinationCIDR string, service *model.Service, listenPort *model.Port, gateways map[string]bool) []*filterChainOpts { out := make([]*filterChainOpts, 0) var svcConfigs []model.Config if service != nil { svcConfigs = getConfigsForHost(service.Hostname, configs) } else { svcConfigs = configs } out = append(out, buildSidecarOutboundTLSFilterChainOpts(node, push, destinationCIDR, service, listenPort, gateways, svcConfigs)...) out = append(out, buildSidecarOutboundTCPFilterChainOpts(node, push, destinationCIDR, service, listenPort, gateways, svcConfigs)...) return out }
random_line_split
tls.go
// Copyright Istio Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package v1alpha3 import ( "reflect" "sort" "strings" "istio.io/api/networking/v1alpha3" "istio.io/istio/pilot/pkg/model" "istio.io/istio/pilot/pkg/networking/util" "istio.io/istio/pkg/config/host" "istio.io/istio/pkg/config/labels" "istio.io/pkg/log" ) // Match by source labels, the listener port where traffic comes in, the gateway on which the rule is being // bound, etc. All these can be checked statically, since we are generating the configuration for a proxy // with predefined labels, on a specific port. func matchTLS(match *v1alpha3.TLSMatchAttributes, proxyLabels labels.Collection, gateways map[string]bool, port int, proxyNamespace string) bool { if match == nil { return true } gatewayMatch := len(match.Gateways) == 0 for _, gateway := range match.Gateways { gatewayMatch = gatewayMatch || gateways[gateway] } labelMatch := proxyLabels.IsSupersetOf(match.SourceLabels) portMatch := match.Port == 0 || match.Port == uint32(port) nsMatch := match.SourceNamespace == "" || match.SourceNamespace == proxyNamespace return gatewayMatch && labelMatch && portMatch && nsMatch } // Match by source labels, the listener port where traffic comes in, the gateway on which the rule is being // bound, etc. All these can be checked statically, since we are generating the configuration for a proxy // with predefined labels, on a specific port. func matchTCP(match *v1alpha3.L4MatchAttributes, proxyLabels labels.Collection, gateways map[string]bool, port int, proxyNamespace string) bool { if match == nil { return true } gatewayMatch := len(match.Gateways) == 0 for _, gateway := range match.Gateways { gatewayMatch = gatewayMatch || gateways[gateway] } labelMatch := proxyLabels.IsSupersetOf(match.SourceLabels) portMatch := match.Port == 0 || match.Port == uint32(port) nsMatch := match.SourceNamespace == "" || match.SourceNamespace == proxyNamespace return gatewayMatch && labelMatch && portMatch && nsMatch } // Select the config pertaining to the service being processed. func getConfigsForHost(hostname host.Name, configs []model.Config) []model.Config { svcConfigs := make([]model.Config, 0) for index := range configs { virtualService := configs[index].Spec.(*v1alpha3.VirtualService) for _, vsHost := range virtualService.Hosts { if host.Name(vsHost).Matches(hostname) { svcConfigs = append(svcConfigs, configs[index]) break } } } return svcConfigs } // hashRuntimeTLSMatchPredicates hashes runtime predicates of a TLS match func hashRuntimeTLSMatchPredicates(match *v1alpha3.TLSMatchAttributes) string { return strings.Join(match.SniHosts, ",") + "|" + strings.Join(match.DestinationSubnets, ",") } func buildSidecarOutboundTLSFilterChainOpts(node *model.Proxy, push *model.PushContext, destinationCIDR string, service *model.Service, listenPort *model.Port, gateways map[string]bool, configs []model.Config) []*filterChainOpts { if !listenPort.Protocol.IsTLS() { return nil } actualWildcard, _ := getActualWildcardAndLocalHost(node) // TLS matches are composed of runtime and static predicates. // Static predicates can be evaluated during the generation of the config. Examples: gateway, source labels, etc. // Runtime predicates cannot be evaluated during config generation. Instead the proxy must be configured to // evaluate them. Examples: SNI hosts, source/destination subnets, etc. // // A list of matches may contain duplicate runtime matches, but different static matches. For example: // // {sni_hosts: A, sourceLabels: X} => destination M // {sni_hosts: A, sourceLabels: *} => destination N // // For a proxy with labels X, we can evaluate the static predicates to get: // {sni_hosts: A} => destination M // {sni_hosts: A} => destination N // // The matches have the same runtime predicates. Since the second match can never be reached, we only // want to generate config for the first match. // // To achieve this in this function we keep track of which runtime matches we have already generated config for // and only add config if the we have not already generated config for that set of runtime predicates. matchHasBeenHandled := make(map[string]bool) // Runtime predicate set -> have we generated config for this set? // Is there a virtual service with a TLS block that matches us? hasTLSMatch := false out := make([]*filterChainOpts, 0) for _, cfg := range configs { virtualService := cfg.Spec.(*v1alpha3.VirtualService) for _, tls := range virtualService.Tls
} // HTTPS or TLS ports without associated virtual service if !hasTLSMatch { var sniHosts []string // In case of a sidecar config with user defined port, if the user specified port is not the same as the // service's port, then pick the service port if and only if the service has only one port. If service // has multiple ports, then route to a cluster with the listener port (i.e. sidecar defined port) - the // traffic will most likely blackhole. port := listenPort.Port if len(service.Ports) == 1 { port = service.Ports[0].Port } clusterName := model.BuildSubsetKey(model.TrafficDirectionOutbound, "", service.Hostname, port) statPrefix := clusterName // If stat name is configured, use it to build the stat prefix. if len(push.Mesh.OutboundClusterStatName) != 0 { statPrefix = util.BuildStatPrefix(push.Mesh.OutboundClusterStatName, string(service.Hostname), "", &model.Port{Port: port}, service.Attributes) } // Use the hostname as the SNI value if and only if we do not have a destination VIP or if the destination is a CIDR. // In both cases, the listener will be bound to 0.0.0.0. So SNI match is the only way to distinguish different // target services. If we have a VIP, then we know the destination. There is no need to do a SNI match. It saves us from // having to generate expensive permutations of the host name just like RDS does.. // NOTE that we cannot have two services with the same VIP as our listener build logic will treat it as a collision and // ignore one of the services. svcListenAddress := service.GetServiceAddressForProxy(node) if strings.Contains(svcListenAddress, "/") { // Address is a CIDR, already captured by destinationCIDR parameter. svcListenAddress = "" } if len(destinationCIDR) > 0 || len(svcListenAddress) == 0 || svcListenAddress == actualWildcard { sniHosts = []string{string(service.Hostname)} } out = append(out, &filterChainOpts{ sniHosts: sniHosts, destinationCIDRs: []string{destinationCIDR}, networkFilters: buildOutboundNetworkFiltersWithSingleDestination(push, node, statPrefix, clusterName, listenPort), }) } return out } func buildSidecarOutboundTCPFilterChainOpts(node *model.Proxy, push *model.PushContext, destinationCIDR string, service *model.Service, listenPort *model.Port, gateways map[string]bool, configs []model.Config) []*filterChainOpts { if listenPort.Protocol.IsTLS() { return nil } out := make([]*filterChainOpts, 0) // very basic TCP // break as soon as we add one network filter with no destination addresses to match // This is the terminating condition in the filter chain match list defaultRouteAdded := false TcpLoop: for _, cfg := range configs { virtualService := cfg.Spec.(*v1alpha3.VirtualService) for _, tcp := range virtualService.Tcp { destinationCIDRs := []string{destinationCIDR} if len(tcp.Match) == 0 { // implicit match out = append(out, &filterChainOpts{ metadata: util.BuildConfigInfoMetadataV2(cfg.ConfigMeta), destinationCIDRs: destinationCIDRs, networkFilters: buildOutboundNetworkFilters(node, tcp.Route, push, listenPort, cfg.ConfigMeta), }) defaultRouteAdded = true break TcpLoop } // Use the service's virtual address first. // But if a virtual service overrides it with its own destination subnet match // give preference to the user provided one virtualServiceDestinationSubnets := make([]string, 0) for _, match := range tcp.Match { if matchTCP(match, labels.Collection{node.Metadata.Labels}, gateways, listenPort.Port, node.Metadata.Namespace) { // Scan all the match blocks // if we find any match block without a runtime destination subnet match // i.e. match any destination address, then we treat it as the terminal match/catch all match // and break out of the loop. We also treat it as a terminal match if the listener is bound // to a unix domain socket. // But if we find only runtime destination subnet matches in all match blocks, collect them // (this is similar to virtual hosts in http) and create filter chain match accordingly. if len(match.DestinationSubnets) == 0 || listenPort.Port == 0 { out = append(out, &filterChainOpts{ metadata: util.BuildConfigInfoMetadataV2(cfg.ConfigMeta), destinationCIDRs: destinationCIDRs, networkFilters: buildOutboundNetworkFilters(node, tcp.Route, push, listenPort, cfg.ConfigMeta), }) defaultRouteAdded = true break TcpLoop } else { virtualServiceDestinationSubnets = append(virtualServiceDestinationSubnets, match.DestinationSubnets...) } } } if len(virtualServiceDestinationSubnets) > 0 { out = append(out, &filterChainOpts{ destinationCIDRs: virtualServiceDestinationSubnets, networkFilters: buildOutboundNetworkFilters(node, tcp.Route, push, listenPort, cfg.ConfigMeta), }) // If at this point there is a filter chain generated with the same CIDR match as the // one that may be generated for the service as the default route, do not generate it. // Otherwise, Envoy will complain about having filter chains with identical matches // and will reject the config. sort.Strings(virtualServiceDestinationSubnets) sort.Strings(destinationCIDRs) if reflect.DeepEqual(virtualServiceDestinationSubnets, destinationCIDRs) { log.Warnf("Existing filter chain with same matching CIDR: %v.", destinationCIDRs) defaultRouteAdded = true } } } } if !defaultRouteAdded { // In case of a sidecar config with user defined port, if the user specified port is not the same as the // service's port, then pick the service port if and only if the service has only one port. If service // has multiple ports, then route to a cluster with the listener port (i.e. sidecar defined port) - the // traffic will most likely blackhole. port := listenPort.Port if len(service.Ports) == 1 { port = service.Ports[0].Port } clusterName := model.BuildSubsetKey(model.TrafficDirectionOutbound, "", service.Hostname, port) statPrefix := clusterName // If stat name is configured, use it to build the stat prefix. if len(push.Mesh.OutboundClusterStatName) != 0 { statPrefix = util.BuildStatPrefix(push.Mesh.OutboundClusterStatName, string(service.Hostname), "", &model.Port{Port: port}, service.Attributes) } out = append(out, &filterChainOpts{ destinationCIDRs: []string{destinationCIDR}, networkFilters: buildOutboundNetworkFiltersWithSingleDestination(push, node, statPrefix, clusterName, listenPort), }) } return out } // This function can be called for namespaces with the auto generated sidecar, i.e. once per service and per port. // OR, it could be called in the context of an egress listener with specific TCP port on a sidecar config. // In the latter case, there is no service associated with this listen port. So we have to account for this // missing service throughout this file func buildSidecarOutboundTCPTLSFilterChainOpts(node *model.Proxy, push *model.PushContext, configs []model.Config, destinationCIDR string, service *model.Service, listenPort *model.Port, gateways map[string]bool) []*filterChainOpts { out := make([]*filterChainOpts, 0) var svcConfigs []model.Config if service != nil { svcConfigs = getConfigsForHost(service.Hostname, configs) } else { svcConfigs = configs } out = append(out, buildSidecarOutboundTLSFilterChainOpts(node, push, destinationCIDR, service, listenPort, gateways, svcConfigs)...) out = append(out, buildSidecarOutboundTCPFilterChainOpts(node, push, destinationCIDR, service, listenPort, gateways, svcConfigs)...) return out }
{ for _, match := range tls.Match { if matchTLS(match, labels.Collection{node.Metadata.Labels}, gateways, listenPort.Port, node.Metadata.Namespace) { // Use the service's CIDRs. // But if a virtual service overrides it with its own destination subnet match // give preference to the user provided one // destinationCIDR will be empty for services with VIPs destinationCIDRs := []string{destinationCIDR} // Only set CIDR match if the listener is bound to an IP. // If its bound to a unix domain socket, then ignore the CIDR matches // Unix domain socket bound ports have Port value set to 0 if len(match.DestinationSubnets) > 0 && listenPort.Port > 0 { destinationCIDRs = match.DestinationSubnets } matchHash := hashRuntimeTLSMatchPredicates(match) if !matchHasBeenHandled[matchHash] { out = append(out, &filterChainOpts{ metadata: util.BuildConfigInfoMetadataV2(cfg.ConfigMeta), sniHosts: match.SniHosts, destinationCIDRs: destinationCIDRs, networkFilters: buildOutboundNetworkFilters(node, tls.Route, push, listenPort, cfg.ConfigMeta), }) hasTLSMatch = true } matchHasBeenHandled[matchHash] = true } } }
conditional_block
tls.go
// Copyright Istio Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package v1alpha3 import ( "reflect" "sort" "strings" "istio.io/api/networking/v1alpha3" "istio.io/istio/pilot/pkg/model" "istio.io/istio/pilot/pkg/networking/util" "istio.io/istio/pkg/config/host" "istio.io/istio/pkg/config/labels" "istio.io/pkg/log" ) // Match by source labels, the listener port where traffic comes in, the gateway on which the rule is being // bound, etc. All these can be checked statically, since we are generating the configuration for a proxy // with predefined labels, on a specific port. func
(match *v1alpha3.TLSMatchAttributes, proxyLabels labels.Collection, gateways map[string]bool, port int, proxyNamespace string) bool { if match == nil { return true } gatewayMatch := len(match.Gateways) == 0 for _, gateway := range match.Gateways { gatewayMatch = gatewayMatch || gateways[gateway] } labelMatch := proxyLabels.IsSupersetOf(match.SourceLabels) portMatch := match.Port == 0 || match.Port == uint32(port) nsMatch := match.SourceNamespace == "" || match.SourceNamespace == proxyNamespace return gatewayMatch && labelMatch && portMatch && nsMatch } // Match by source labels, the listener port where traffic comes in, the gateway on which the rule is being // bound, etc. All these can be checked statically, since we are generating the configuration for a proxy // with predefined labels, on a specific port. func matchTCP(match *v1alpha3.L4MatchAttributes, proxyLabels labels.Collection, gateways map[string]bool, port int, proxyNamespace string) bool { if match == nil { return true } gatewayMatch := len(match.Gateways) == 0 for _, gateway := range match.Gateways { gatewayMatch = gatewayMatch || gateways[gateway] } labelMatch := proxyLabels.IsSupersetOf(match.SourceLabels) portMatch := match.Port == 0 || match.Port == uint32(port) nsMatch := match.SourceNamespace == "" || match.SourceNamespace == proxyNamespace return gatewayMatch && labelMatch && portMatch && nsMatch } // Select the config pertaining to the service being processed. func getConfigsForHost(hostname host.Name, configs []model.Config) []model.Config { svcConfigs := make([]model.Config, 0) for index := range configs { virtualService := configs[index].Spec.(*v1alpha3.VirtualService) for _, vsHost := range virtualService.Hosts { if host.Name(vsHost).Matches(hostname) { svcConfigs = append(svcConfigs, configs[index]) break } } } return svcConfigs } // hashRuntimeTLSMatchPredicates hashes runtime predicates of a TLS match func hashRuntimeTLSMatchPredicates(match *v1alpha3.TLSMatchAttributes) string { return strings.Join(match.SniHosts, ",") + "|" + strings.Join(match.DestinationSubnets, ",") } func buildSidecarOutboundTLSFilterChainOpts(node *model.Proxy, push *model.PushContext, destinationCIDR string, service *model.Service, listenPort *model.Port, gateways map[string]bool, configs []model.Config) []*filterChainOpts { if !listenPort.Protocol.IsTLS() { return nil } actualWildcard, _ := getActualWildcardAndLocalHost(node) // TLS matches are composed of runtime and static predicates. // Static predicates can be evaluated during the generation of the config. Examples: gateway, source labels, etc. // Runtime predicates cannot be evaluated during config generation. Instead the proxy must be configured to // evaluate them. Examples: SNI hosts, source/destination subnets, etc. // // A list of matches may contain duplicate runtime matches, but different static matches. For example: // // {sni_hosts: A, sourceLabels: X} => destination M // {sni_hosts: A, sourceLabels: *} => destination N // // For a proxy with labels X, we can evaluate the static predicates to get: // {sni_hosts: A} => destination M // {sni_hosts: A} => destination N // // The matches have the same runtime predicates. Since the second match can never be reached, we only // want to generate config for the first match. // // To achieve this in this function we keep track of which runtime matches we have already generated config for // and only add config if the we have not already generated config for that set of runtime predicates. matchHasBeenHandled := make(map[string]bool) // Runtime predicate set -> have we generated config for this set? // Is there a virtual service with a TLS block that matches us? hasTLSMatch := false out := make([]*filterChainOpts, 0) for _, cfg := range configs { virtualService := cfg.Spec.(*v1alpha3.VirtualService) for _, tls := range virtualService.Tls { for _, match := range tls.Match { if matchTLS(match, labels.Collection{node.Metadata.Labels}, gateways, listenPort.Port, node.Metadata.Namespace) { // Use the service's CIDRs. // But if a virtual service overrides it with its own destination subnet match // give preference to the user provided one // destinationCIDR will be empty for services with VIPs destinationCIDRs := []string{destinationCIDR} // Only set CIDR match if the listener is bound to an IP. // If its bound to a unix domain socket, then ignore the CIDR matches // Unix domain socket bound ports have Port value set to 0 if len(match.DestinationSubnets) > 0 && listenPort.Port > 0 { destinationCIDRs = match.DestinationSubnets } matchHash := hashRuntimeTLSMatchPredicates(match) if !matchHasBeenHandled[matchHash] { out = append(out, &filterChainOpts{ metadata: util.BuildConfigInfoMetadataV2(cfg.ConfigMeta), sniHosts: match.SniHosts, destinationCIDRs: destinationCIDRs, networkFilters: buildOutboundNetworkFilters(node, tls.Route, push, listenPort, cfg.ConfigMeta), }) hasTLSMatch = true } matchHasBeenHandled[matchHash] = true } } } } // HTTPS or TLS ports without associated virtual service if !hasTLSMatch { var sniHosts []string // In case of a sidecar config with user defined port, if the user specified port is not the same as the // service's port, then pick the service port if and only if the service has only one port. If service // has multiple ports, then route to a cluster with the listener port (i.e. sidecar defined port) - the // traffic will most likely blackhole. port := listenPort.Port if len(service.Ports) == 1 { port = service.Ports[0].Port } clusterName := model.BuildSubsetKey(model.TrafficDirectionOutbound, "", service.Hostname, port) statPrefix := clusterName // If stat name is configured, use it to build the stat prefix. if len(push.Mesh.OutboundClusterStatName) != 0 { statPrefix = util.BuildStatPrefix(push.Mesh.OutboundClusterStatName, string(service.Hostname), "", &model.Port{Port: port}, service.Attributes) } // Use the hostname as the SNI value if and only if we do not have a destination VIP or if the destination is a CIDR. // In both cases, the listener will be bound to 0.0.0.0. So SNI match is the only way to distinguish different // target services. If we have a VIP, then we know the destination. There is no need to do a SNI match. It saves us from // having to generate expensive permutations of the host name just like RDS does.. // NOTE that we cannot have two services with the same VIP as our listener build logic will treat it as a collision and // ignore one of the services. svcListenAddress := service.GetServiceAddressForProxy(node) if strings.Contains(svcListenAddress, "/") { // Address is a CIDR, already captured by destinationCIDR parameter. svcListenAddress = "" } if len(destinationCIDR) > 0 || len(svcListenAddress) == 0 || svcListenAddress == actualWildcard { sniHosts = []string{string(service.Hostname)} } out = append(out, &filterChainOpts{ sniHosts: sniHosts, destinationCIDRs: []string{destinationCIDR}, networkFilters: buildOutboundNetworkFiltersWithSingleDestination(push, node, statPrefix, clusterName, listenPort), }) } return out } func buildSidecarOutboundTCPFilterChainOpts(node *model.Proxy, push *model.PushContext, destinationCIDR string, service *model.Service, listenPort *model.Port, gateways map[string]bool, configs []model.Config) []*filterChainOpts { if listenPort.Protocol.IsTLS() { return nil } out := make([]*filterChainOpts, 0) // very basic TCP // break as soon as we add one network filter with no destination addresses to match // This is the terminating condition in the filter chain match list defaultRouteAdded := false TcpLoop: for _, cfg := range configs { virtualService := cfg.Spec.(*v1alpha3.VirtualService) for _, tcp := range virtualService.Tcp { destinationCIDRs := []string{destinationCIDR} if len(tcp.Match) == 0 { // implicit match out = append(out, &filterChainOpts{ metadata: util.BuildConfigInfoMetadataV2(cfg.ConfigMeta), destinationCIDRs: destinationCIDRs, networkFilters: buildOutboundNetworkFilters(node, tcp.Route, push, listenPort, cfg.ConfigMeta), }) defaultRouteAdded = true break TcpLoop } // Use the service's virtual address first. // But if a virtual service overrides it with its own destination subnet match // give preference to the user provided one virtualServiceDestinationSubnets := make([]string, 0) for _, match := range tcp.Match { if matchTCP(match, labels.Collection{node.Metadata.Labels}, gateways, listenPort.Port, node.Metadata.Namespace) { // Scan all the match blocks // if we find any match block without a runtime destination subnet match // i.e. match any destination address, then we treat it as the terminal match/catch all match // and break out of the loop. We also treat it as a terminal match if the listener is bound // to a unix domain socket. // But if we find only runtime destination subnet matches in all match blocks, collect them // (this is similar to virtual hosts in http) and create filter chain match accordingly. if len(match.DestinationSubnets) == 0 || listenPort.Port == 0 { out = append(out, &filterChainOpts{ metadata: util.BuildConfigInfoMetadataV2(cfg.ConfigMeta), destinationCIDRs: destinationCIDRs, networkFilters: buildOutboundNetworkFilters(node, tcp.Route, push, listenPort, cfg.ConfigMeta), }) defaultRouteAdded = true break TcpLoop } else { virtualServiceDestinationSubnets = append(virtualServiceDestinationSubnets, match.DestinationSubnets...) } } } if len(virtualServiceDestinationSubnets) > 0 { out = append(out, &filterChainOpts{ destinationCIDRs: virtualServiceDestinationSubnets, networkFilters: buildOutboundNetworkFilters(node, tcp.Route, push, listenPort, cfg.ConfigMeta), }) // If at this point there is a filter chain generated with the same CIDR match as the // one that may be generated for the service as the default route, do not generate it. // Otherwise, Envoy will complain about having filter chains with identical matches // and will reject the config. sort.Strings(virtualServiceDestinationSubnets) sort.Strings(destinationCIDRs) if reflect.DeepEqual(virtualServiceDestinationSubnets, destinationCIDRs) { log.Warnf("Existing filter chain with same matching CIDR: %v.", destinationCIDRs) defaultRouteAdded = true } } } } if !defaultRouteAdded { // In case of a sidecar config with user defined port, if the user specified port is not the same as the // service's port, then pick the service port if and only if the service has only one port. If service // has multiple ports, then route to a cluster with the listener port (i.e. sidecar defined port) - the // traffic will most likely blackhole. port := listenPort.Port if len(service.Ports) == 1 { port = service.Ports[0].Port } clusterName := model.BuildSubsetKey(model.TrafficDirectionOutbound, "", service.Hostname, port) statPrefix := clusterName // If stat name is configured, use it to build the stat prefix. if len(push.Mesh.OutboundClusterStatName) != 0 { statPrefix = util.BuildStatPrefix(push.Mesh.OutboundClusterStatName, string(service.Hostname), "", &model.Port{Port: port}, service.Attributes) } out = append(out, &filterChainOpts{ destinationCIDRs: []string{destinationCIDR}, networkFilters: buildOutboundNetworkFiltersWithSingleDestination(push, node, statPrefix, clusterName, listenPort), }) } return out } // This function can be called for namespaces with the auto generated sidecar, i.e. once per service and per port. // OR, it could be called in the context of an egress listener with specific TCP port on a sidecar config. // In the latter case, there is no service associated with this listen port. So we have to account for this // missing service throughout this file func buildSidecarOutboundTCPTLSFilterChainOpts(node *model.Proxy, push *model.PushContext, configs []model.Config, destinationCIDR string, service *model.Service, listenPort *model.Port, gateways map[string]bool) []*filterChainOpts { out := make([]*filterChainOpts, 0) var svcConfigs []model.Config if service != nil { svcConfigs = getConfigsForHost(service.Hostname, configs) } else { svcConfigs = configs } out = append(out, buildSidecarOutboundTLSFilterChainOpts(node, push, destinationCIDR, service, listenPort, gateways, svcConfigs)...) out = append(out, buildSidecarOutboundTCPFilterChainOpts(node, push, destinationCIDR, service, listenPort, gateways, svcConfigs)...) return out }
matchTLS
identifier_name
theme.rs
use cssparser::{self, BasicParseError, CompactCowStr, DeclarationListParser, Parser, ParseError, ParserInput, Token}; use orbclient::Color; use std::collections::HashSet; use std::sync::Arc; use std::mem; use std::ops::Add; use std::path::Path; use std::fs::File; use std::io::BufReader; use std::io::Read; static DEFAULT_THEME_CSS: &'static str = include_str!("theme.css"); lazy_static! { static ref DEFAULT_THEME: Arc<Theme> = { Arc::new(Theme {parent: None, rules: parse(DEFAULT_THEME_CSS)}) }; } pub struct Theme { parent: Option<Arc<Theme>>, rules: Vec<Rule>, } impl Theme { pub fn new() -> Self { Theme::parse("") } pub fn parse(s: &str) -> Self { Theme { parent: Some(DEFAULT_THEME.clone()), rules: parse(s), } } pub fn from_path<P: AsRef<Path>>(path: P) -> Result<Theme, String> { let file = try!(File::open(path).map_err(|err| format!("failed to open css: {}", err))); let mut reader = BufReader::new(file); let mut css = String::new(); let res = reader.read_to_string(&mut css).map_err(|err| format!("failed to read css: {}", err)); match res { Ok(_) => Ok(Theme::parse(&css)), Err(err) => Err(err), } } fn all_rules(&self) -> Vec<Rule> { if let Some(ref parent) = self.parent { self.rules.iter().chain(parent.rules.iter()).cloned().collect() } else { self.rules.clone() } } pub fn get(&self, property: &str, query: &Selector) -> Option<Value> { let mut matches: Vec<(bool, Specificity, Value)> = Vec::new(); for rule in self.all_rules().iter().rev() { let matching_selectors = rule.selectors.iter().filter(|x| x.matches(query)).collect::<Vec<_>>(); if matching_selectors.len() > 0 { if let Some(decl) = rule.declarations.iter().find(|decl| decl.property == property) { let highest_specifity = matching_selectors.iter().map(|sel| sel.specificity()).max().unwrap(); matches.push((decl.important, highest_specifity, decl.value.clone())); } } } matches.sort_by_key(|x| (x.0, x.1)); matches.last().map(|x| x.2.clone()) } pub fn color(&self, property: &str, query: &Selector) -> Color { let default = Color { data: 0 }; self.get(property, query).map(|v| v.color().unwrap_or(default)).unwrap_or(default) } pub fn uint(&self, property: &str, query: &Selector) -> u32 { self.get(property, query).map(|v| v.uint().unwrap_or(0)).unwrap_or(0) } } #[derive(Clone, Debug)] pub struct Rule { pub selectors: Vec<Selector>, pub declarations: Vec<Declaration>, } #[derive(Clone, Debug)] pub enum SelectorRelation { Ancestor(Selector), Parent(Selector), } impl<T: Into<String>> From<T> for Selector { fn from(t: T) -> Self { Selector::new(Some(t.into())) } } /// Describes the specificity of a selector. /// /// The indexes are as follows: /// 0 - number of IDs (most important) /// 1 - number of classes and pseudo-classes /// 2 - number of elements (least important) #[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] struct Specificity([u8; 3]); impl Add<Self> for Specificity { type Output = Self; fn add(self, rhs: Self) -> Self::Output { Specificity([ self.0[0] + rhs.0[0], self.0[1] + rhs.0[1], self.0[2] + rhs.0[2], ]) } } #[derive(Clone, Debug, Default)] pub struct Selector { pub element: Option<String>, pub classes: HashSet<String>, pub pseudo_classes: HashSet<String>, pub relation: Option<Box<SelectorRelation>>, } impl Selector { pub fn new<S: Into<String>>(element: Option<S>) -> Self { Selector { element: element.map(|s| s.into()), classes: HashSet::new(), pseudo_classes: HashSet::new(), relation: None, } } fn specificity(&self) -> Specificity { let s = Specificity([ 0, (self.classes.len() + self.pseudo_classes.len()) as u8, if self.element.is_some() { 1 } else { 0 } ]); if let Some(ref relation) = self.relation { match **relation { SelectorRelation::Ancestor(ref x) | SelectorRelation::Parent(ref x) => return x.specificity() + s, } } s } pub fn matches(&self, other: &Selector) -> bool { if self.element.is_some() && self.element != other.element { return false; } if !other.classes.is_superset(&self.classes) { return false; } if !other.pseudo_classes.is_superset(&self.pseudo_classes) { return false; } true } pub fn with_class<S: Into<String>>(mut self, class: S) -> Self { self.classes.insert(class.into()); self } pub fn without_class<S: Into<String>>(mut self, class: S) -> Self { self.classes.remove(&class.into()); self } pub fn with_pseudo_class<S: Into<String>>(mut self, pseudo_class: S) -> Self { self.pseudo_classes.insert(pseudo_class.into()); self } pub fn without_pseudo_class<S: Into<String>>(mut self, pseudo_class: S) -> Self { self.pseudo_classes.remove(&pseudo_class.into()); self } } impl Selector { pub fn is_empty(&self) -> bool { self.element.is_none() && self.classes.is_empty() && self.pseudo_classes.is_empty() } } #[derive(Clone, Debug)] pub struct Declaration { pub property: String, pub value: Value, pub important: bool, } #[derive(Clone, Debug)] pub enum Value { UInt(u32), Color(Color), } impl Value { pub fn uint(&self) -> Option<u32> { match *self { Value::UInt(x) => Some(x), _ => None, } } pub fn color(&self) -> Option<Color> { match *self { Value::Color(x) => Some(x), _ => None, } } } #[derive(Clone, Debug)] pub enum CustomParseError { InvalidColorName(String), InvalidColorHex(String), } impl<'t> From<CustomParseError> for ParseError<'t, CustomParseError> { fn from(e: CustomParseError) -> Self { ParseError::Custom(e) } } struct RuleParser; impl RuleParser { fn new() -> Self { RuleParser {} } } impl<'i> cssparser::QualifiedRuleParser<'i> for RuleParser { type Prelude = Vec<Selector>; type QualifiedRule = Rule; type Error = CustomParseError; fn parse_prelude<'t>(&mut self, input: &mut Parser<'i, 't>) -> Result<Self::Prelude, ParseError<'i, Self::Error>> { let res = parse_selectors(input)?; Ok(res) } fn parse_block<'t>(&mut self, selectors: Self::Prelude, input: &mut Parser<'i, 't>) -> Result<Self::QualifiedRule, ParseError<'i, Self::Error>> { let decl_parser = DeclarationParser {}; let decls = DeclarationListParser::new(input, decl_parser).collect::<Vec<_>>(); for decl in &decls { match *decl { Ok(_) => {}, Err(ref e) => { match e.error { ParseError::Basic(ref e) => eprintln!("{:?}", e), ParseError::Custom(ref e) => eprintln!("{:?}", e), } println!("Error occured in `{}`", input.slice(e.span.clone())); } } } let decls = decls.into_iter().filter_map(|decl| decl.ok()).collect(); Ok(Rule { selectors: selectors, declarations: decls, }) } } impl<'i> cssparser::AtRuleParser<'i> for RuleParser { type Prelude = (); type AtRule = Rule; type Error = CustomParseError; } fn parse_selectors<'i, 't>(input: &mut Parser<'i, 't>) -> Result<Vec<Selector>, ParseError<'i, CustomParseError>> { let mut selectors = Vec::new(); let mut selector = Selector::default(); let mut first_token_in_selector = true; while let Ok(t) = input.next() { match t { // Element Token::Ident(ref element_name) => { if first_token_in_selector { selector.element = Some(element_name.to_string()) } else { let mut old_selector = Selector::new(Some(element_name.to_string())); mem::swap(&mut old_selector, &mut selector); selector.relation = Some(Box::new(SelectorRelation::Ancestor(old_selector))); } } Token::Delim('>') => { let mut old_selector = Selector::new(Some(input.expect_ident()?.to_string())); mem::swap(&mut old_selector, &mut selector); selector.relation = Some(Box::new(SelectorRelation::Parent(old_selector))); } // Any element Token::Delim('*') => {} // Class Token::Delim('.') => {selector.classes.insert(input.expect_ident()?.into_owned());} // Pseudo-class Token::Colon => {selector.pseudo_classes.insert(input.expect_ident()?.into_owned());} // This selector is done, on to the next one Token::Comma => { selectors.push(selector); selector = Selector::default(); first_token_in_selector = true; continue; // need to continue to avoid `first_token_in_selector` being set to false } t => { let basic_error = BasicParseError::UnexpectedToken(t); return Err(basic_error.into()); } } first_token_in_selector = false; } selectors.push(selector); if selectors.iter().any(|sel| sel.relation.is_some()) { eprintln!("WARNING: Complex selector relations not implemented"); } Ok(selectors) } struct DeclarationParser; impl<'i> cssparser::DeclarationParser<'i> for DeclarationParser { type Declaration = Declaration; type Error = CustomParseError; fn parse_value<'t>(&mut self, name: CompactCowStr<'i>, input: &mut Parser<'i, 't>) -> Result<Self::Declaration, ParseError<'i, Self::Error>> { let value = match &*name { "color" | "border-color" => Value::Color(parse_basic_color(input)?), "background" | "foreground" => Value::Color(parse_basic_color(input)?), "border-radius" | "border-width" => { match input.next()? { Token::Number { int_value: Some(x), has_sign, .. } if !has_sign && x >= 0 => Value::UInt(x as u32), t => return Err(BasicParseError::UnexpectedToken(t).into()) } } _ => return Err(BasicParseError::UnexpectedToken(input.next()?).into()), }; Ok(Declaration { property: name.into_owned(), value: value, important: input.try(cssparser::parse_important).is_ok() }) } } impl<'i> cssparser::AtRuleParser<'i> for DeclarationParser { type Prelude = (); type AtRule = Declaration; type Error = CustomParseError; } fn css_color(name: &str) -> Option<Color> { Some(hex(match name { "transparent" => return Some(Color { data: 0 }), "black" => 0x000000, "silver" => 0xc0c0c0, "gray" | "grey" => 0x808080, "white" => 0xffffff, "maroon" => 0x800000, "red" => 0xff0000, "purple" => 0x800080, "fuchsia" => 0xff00ff, "green" => 0x008000, "lime" => 0x00ff00, "olive" => 0x808000, "yellow" => 0xffff00, "navy" => 0x000080, "blue" => 0x0000ff, "teal" => 0x008080, "aqua" => 0x00ffff, _ => return None, })) } fn parse_basic_color<'i, 't>(input: &mut Parser<'i, 't>) -> Result<Color, ParseError<'i, CustomParseError>> { Ok(match input.next()? { Token::Ident(s) => match css_color(&s) { Some(color) => color, None => return Err(CustomParseError::InvalidColorName(s.into_owned()).into()), }, Token::IDHash(hash) | Token::Hash(hash) => { match hash.len() { 6 | 8 => { let mut x = match u32::from_str_radix(&hash, 16) { Ok(x) => x, Err(_) => return Err(CustomParseError::InvalidColorHex(hash.into_owned()).into()), }; if hash.len() == 6 { x |= 0xFF000000; } Color { data: x } }, _ => return Err(CustomParseError::InvalidColorHex(hash.into_owned()).into()), } } t => { let basic_error = BasicParseError::UnexpectedToken(t); return Err(basic_error.into()); } }) } fn parse(s: &str) -> Vec<Rule> { let mut input = ParserInput::new(s); let mut parser = Parser::new(&mut input); let rule_parser = RuleParser::new(); let rules = { let rule_list_parser = cssparser::RuleListParser::new_for_stylesheet(&mut parser, rule_parser); rule_list_parser.collect::<Vec<_>>() }; for rule in &rules { match *rule { Ok(_) => {}, Err(ref e) => { match e.error { ParseError::Basic(ref e) => eprintln!("{:?}", e), ParseError::Custom(ref e) => eprintln!("{:?}", e), } println!("Error occured in `{}`", parser.slice(e.span.clone())); } } } rules.into_iter().filter_map(|rule| rule.ok()).collect() } const fn
(data: u32) -> Color { Color { data: 0xFF000000 | data } }
hex
identifier_name
theme.rs
use cssparser::{self, BasicParseError, CompactCowStr, DeclarationListParser, Parser, ParseError, ParserInput, Token}; use orbclient::Color; use std::collections::HashSet; use std::sync::Arc; use std::mem; use std::ops::Add; use std::path::Path; use std::fs::File; use std::io::BufReader; use std::io::Read; static DEFAULT_THEME_CSS: &'static str = include_str!("theme.css"); lazy_static! { static ref DEFAULT_THEME: Arc<Theme> = { Arc::new(Theme {parent: None, rules: parse(DEFAULT_THEME_CSS)}) }; } pub struct Theme { parent: Option<Arc<Theme>>, rules: Vec<Rule>, } impl Theme { pub fn new() -> Self { Theme::parse("") } pub fn parse(s: &str) -> Self { Theme { parent: Some(DEFAULT_THEME.clone()), rules: parse(s), } } pub fn from_path<P: AsRef<Path>>(path: P) -> Result<Theme, String> { let file = try!(File::open(path).map_err(|err| format!("failed to open css: {}", err))); let mut reader = BufReader::new(file); let mut css = String::new(); let res = reader.read_to_string(&mut css).map_err(|err| format!("failed to read css: {}", err)); match res { Ok(_) => Ok(Theme::parse(&css)), Err(err) => Err(err), } } fn all_rules(&self) -> Vec<Rule> { if let Some(ref parent) = self.parent { self.rules.iter().chain(parent.rules.iter()).cloned().collect() } else { self.rules.clone() } } pub fn get(&self, property: &str, query: &Selector) -> Option<Value> { let mut matches: Vec<(bool, Specificity, Value)> = Vec::new(); for rule in self.all_rules().iter().rev() { let matching_selectors = rule.selectors.iter().filter(|x| x.matches(query)).collect::<Vec<_>>(); if matching_selectors.len() > 0 { if let Some(decl) = rule.declarations.iter().find(|decl| decl.property == property) { let highest_specifity = matching_selectors.iter().map(|sel| sel.specificity()).max().unwrap(); matches.push((decl.important, highest_specifity, decl.value.clone())); } } } matches.sort_by_key(|x| (x.0, x.1)); matches.last().map(|x| x.2.clone()) } pub fn color(&self, property: &str, query: &Selector) -> Color { let default = Color { data: 0 }; self.get(property, query).map(|v| v.color().unwrap_or(default)).unwrap_or(default) } pub fn uint(&self, property: &str, query: &Selector) -> u32 { self.get(property, query).map(|v| v.uint().unwrap_or(0)).unwrap_or(0) } } #[derive(Clone, Debug)] pub struct Rule { pub selectors: Vec<Selector>, pub declarations: Vec<Declaration>, } #[derive(Clone, Debug)] pub enum SelectorRelation { Ancestor(Selector), Parent(Selector), } impl<T: Into<String>> From<T> for Selector { fn from(t: T) -> Self { Selector::new(Some(t.into())) } } /// Describes the specificity of a selector. /// /// The indexes are as follows: /// 0 - number of IDs (most important) /// 1 - number of classes and pseudo-classes /// 2 - number of elements (least important) #[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] struct Specificity([u8; 3]); impl Add<Self> for Specificity { type Output = Self; fn add(self, rhs: Self) -> Self::Output { Specificity([ self.0[0] + rhs.0[0], self.0[1] + rhs.0[1], self.0[2] + rhs.0[2], ]) } } #[derive(Clone, Debug, Default)] pub struct Selector { pub element: Option<String>, pub classes: HashSet<String>, pub pseudo_classes: HashSet<String>, pub relation: Option<Box<SelectorRelation>>, } impl Selector { pub fn new<S: Into<String>>(element: Option<S>) -> Self { Selector { element: element.map(|s| s.into()), classes: HashSet::new(), pseudo_classes: HashSet::new(), relation: None, } } fn specificity(&self) -> Specificity { let s = Specificity([ 0, (self.classes.len() + self.pseudo_classes.len()) as u8, if self.element.is_some() { 1 } else { 0 } ]); if let Some(ref relation) = self.relation { match **relation { SelectorRelation::Ancestor(ref x) | SelectorRelation::Parent(ref x) => return x.specificity() + s, } } s } pub fn matches(&self, other: &Selector) -> bool { if self.element.is_some() && self.element != other.element { return false; } if !other.classes.is_superset(&self.classes) { return false; } if !other.pseudo_classes.is_superset(&self.pseudo_classes) { return false; } true } pub fn with_class<S: Into<String>>(mut self, class: S) -> Self { self.classes.insert(class.into()); self } pub fn without_class<S: Into<String>>(mut self, class: S) -> Self { self.classes.remove(&class.into()); self } pub fn with_pseudo_class<S: Into<String>>(mut self, pseudo_class: S) -> Self { self.pseudo_classes.insert(pseudo_class.into()); self } pub fn without_pseudo_class<S: Into<String>>(mut self, pseudo_class: S) -> Self { self.pseudo_classes.remove(&pseudo_class.into()); self } } impl Selector { pub fn is_empty(&self) -> bool { self.element.is_none() && self.classes.is_empty() && self.pseudo_classes.is_empty() } } #[derive(Clone, Debug)] pub struct Declaration { pub property: String, pub value: Value, pub important: bool, } #[derive(Clone, Debug)] pub enum Value { UInt(u32), Color(Color), } impl Value { pub fn uint(&self) -> Option<u32> { match *self { Value::UInt(x) => Some(x), _ => None, } } pub fn color(&self) -> Option<Color> { match *self { Value::Color(x) => Some(x), _ => None, } } } #[derive(Clone, Debug)] pub enum CustomParseError { InvalidColorName(String), InvalidColorHex(String), } impl<'t> From<CustomParseError> for ParseError<'t, CustomParseError> { fn from(e: CustomParseError) -> Self { ParseError::Custom(e) } } struct RuleParser; impl RuleParser { fn new() -> Self { RuleParser {} } } impl<'i> cssparser::QualifiedRuleParser<'i> for RuleParser { type Prelude = Vec<Selector>; type QualifiedRule = Rule; type Error = CustomParseError; fn parse_prelude<'t>(&mut self, input: &mut Parser<'i, 't>) -> Result<Self::Prelude, ParseError<'i, Self::Error>> { let res = parse_selectors(input)?; Ok(res) } fn parse_block<'t>(&mut self, selectors: Self::Prelude, input: &mut Parser<'i, 't>) -> Result<Self::QualifiedRule, ParseError<'i, Self::Error>> { let decl_parser = DeclarationParser {}; let decls = DeclarationListParser::new(input, decl_parser).collect::<Vec<_>>(); for decl in &decls { match *decl { Ok(_) => {}, Err(ref e) => { match e.error { ParseError::Basic(ref e) => eprintln!("{:?}", e), ParseError::Custom(ref e) => eprintln!("{:?}", e), } println!("Error occured in `{}`", input.slice(e.span.clone())); } } } let decls = decls.into_iter().filter_map(|decl| decl.ok()).collect(); Ok(Rule { selectors: selectors, declarations: decls, }) } } impl<'i> cssparser::AtRuleParser<'i> for RuleParser { type Prelude = (); type AtRule = Rule; type Error = CustomParseError; } fn parse_selectors<'i, 't>(input: &mut Parser<'i, 't>) -> Result<Vec<Selector>, ParseError<'i, CustomParseError>> { let mut selectors = Vec::new(); let mut selector = Selector::default(); let mut first_token_in_selector = true; while let Ok(t) = input.next() { match t { // Element Token::Ident(ref element_name) => { if first_token_in_selector { selector.element = Some(element_name.to_string()) } else { let mut old_selector = Selector::new(Some(element_name.to_string())); mem::swap(&mut old_selector, &mut selector); selector.relation = Some(Box::new(SelectorRelation::Ancestor(old_selector))); } } Token::Delim('>') => { let mut old_selector = Selector::new(Some(input.expect_ident()?.to_string())); mem::swap(&mut old_selector, &mut selector); selector.relation = Some(Box::new(SelectorRelation::Parent(old_selector))); } // Any element Token::Delim('*') => {} // Class Token::Delim('.') => {selector.classes.insert(input.expect_ident()?.into_owned());} // Pseudo-class Token::Colon => {selector.pseudo_classes.insert(input.expect_ident()?.into_owned());} // This selector is done, on to the next one Token::Comma => { selectors.push(selector); selector = Selector::default(); first_token_in_selector = true; continue; // need to continue to avoid `first_token_in_selector` being set to false } t => { let basic_error = BasicParseError::UnexpectedToken(t); return Err(basic_error.into()); } } first_token_in_selector = false; } selectors.push(selector); if selectors.iter().any(|sel| sel.relation.is_some()) { eprintln!("WARNING: Complex selector relations not implemented"); } Ok(selectors) } struct DeclarationParser; impl<'i> cssparser::DeclarationParser<'i> for DeclarationParser { type Declaration = Declaration; type Error = CustomParseError; fn parse_value<'t>(&mut self, name: CompactCowStr<'i>, input: &mut Parser<'i, 't>) -> Result<Self::Declaration, ParseError<'i, Self::Error>> { let value = match &*name { "color" | "border-color" => Value::Color(parse_basic_color(input)?), "background" | "foreground" => Value::Color(parse_basic_color(input)?), "border-radius" | "border-width" => { match input.next()? { Token::Number { int_value: Some(x), has_sign, .. } if !has_sign && x >= 0 => Value::UInt(x as u32), t => return Err(BasicParseError::UnexpectedToken(t).into()) } } _ => return Err(BasicParseError::UnexpectedToken(input.next()?).into()), }; Ok(Declaration { property: name.into_owned(), value: value, important: input.try(cssparser::parse_important).is_ok() }) } } impl<'i> cssparser::AtRuleParser<'i> for DeclarationParser { type Prelude = (); type AtRule = Declaration; type Error = CustomParseError; } fn css_color(name: &str) -> Option<Color> { Some(hex(match name { "transparent" => return Some(Color { data: 0 }), "black" => 0x000000, "silver" => 0xc0c0c0, "gray" | "grey" => 0x808080, "white" => 0xffffff, "maroon" => 0x800000, "red" => 0xff0000, "purple" => 0x800080, "fuchsia" => 0xff00ff, "green" => 0x008000, "lime" => 0x00ff00, "olive" => 0x808000, "yellow" => 0xffff00, "navy" => 0x000080, "blue" => 0x0000ff, "teal" => 0x008080, "aqua" => 0x00ffff, _ => return None, })) } fn parse_basic_color<'i, 't>(input: &mut Parser<'i, 't>) -> Result<Color, ParseError<'i, CustomParseError>> { Ok(match input.next()? { Token::Ident(s) => match css_color(&s) { Some(color) => color, None => return Err(CustomParseError::InvalidColorName(s.into_owned()).into()), }, Token::IDHash(hash) | Token::Hash(hash) => { match hash.len() { 6 | 8 =>
, _ => return Err(CustomParseError::InvalidColorHex(hash.into_owned()).into()), } } t => { let basic_error = BasicParseError::UnexpectedToken(t); return Err(basic_error.into()); } }) } fn parse(s: &str) -> Vec<Rule> { let mut input = ParserInput::new(s); let mut parser = Parser::new(&mut input); let rule_parser = RuleParser::new(); let rules = { let rule_list_parser = cssparser::RuleListParser::new_for_stylesheet(&mut parser, rule_parser); rule_list_parser.collect::<Vec<_>>() }; for rule in &rules { match *rule { Ok(_) => {}, Err(ref e) => { match e.error { ParseError::Basic(ref e) => eprintln!("{:?}", e), ParseError::Custom(ref e) => eprintln!("{:?}", e), } println!("Error occured in `{}`", parser.slice(e.span.clone())); } } } rules.into_iter().filter_map(|rule| rule.ok()).collect() } const fn hex(data: u32) -> Color { Color { data: 0xFF000000 | data } }
{ let mut x = match u32::from_str_radix(&hash, 16) { Ok(x) => x, Err(_) => return Err(CustomParseError::InvalidColorHex(hash.into_owned()).into()), }; if hash.len() == 6 { x |= 0xFF000000; } Color { data: x } }
conditional_block
theme.rs
use cssparser::{self, BasicParseError, CompactCowStr, DeclarationListParser, Parser, ParseError, ParserInput, Token}; use orbclient::Color; use std::collections::HashSet; use std::sync::Arc; use std::mem; use std::ops::Add; use std::path::Path; use std::fs::File; use std::io::BufReader; use std::io::Read; static DEFAULT_THEME_CSS: &'static str = include_str!("theme.css"); lazy_static! { static ref DEFAULT_THEME: Arc<Theme> = { Arc::new(Theme {parent: None, rules: parse(DEFAULT_THEME_CSS)}) }; } pub struct Theme { parent: Option<Arc<Theme>>, rules: Vec<Rule>, } impl Theme { pub fn new() -> Self { Theme::parse("") } pub fn parse(s: &str) -> Self { Theme { parent: Some(DEFAULT_THEME.clone()), rules: parse(s), } } pub fn from_path<P: AsRef<Path>>(path: P) -> Result<Theme, String> { let file = try!(File::open(path).map_err(|err| format!("failed to open css: {}", err))); let mut reader = BufReader::new(file); let mut css = String::new(); let res = reader.read_to_string(&mut css).map_err(|err| format!("failed to read css: {}", err)); match res { Ok(_) => Ok(Theme::parse(&css)), Err(err) => Err(err), } } fn all_rules(&self) -> Vec<Rule> { if let Some(ref parent) = self.parent { self.rules.iter().chain(parent.rules.iter()).cloned().collect() } else { self.rules.clone() } } pub fn get(&self, property: &str, query: &Selector) -> Option<Value> { let mut matches: Vec<(bool, Specificity, Value)> = Vec::new(); for rule in self.all_rules().iter().rev() { let matching_selectors = rule.selectors.iter().filter(|x| x.matches(query)).collect::<Vec<_>>(); if matching_selectors.len() > 0 { if let Some(decl) = rule.declarations.iter().find(|decl| decl.property == property) { let highest_specifity = matching_selectors.iter().map(|sel| sel.specificity()).max().unwrap(); matches.push((decl.important, highest_specifity, decl.value.clone())); } } } matches.sort_by_key(|x| (x.0, x.1)); matches.last().map(|x| x.2.clone()) } pub fn color(&self, property: &str, query: &Selector) -> Color { let default = Color { data: 0 }; self.get(property, query).map(|v| v.color().unwrap_or(default)).unwrap_or(default) } pub fn uint(&self, property: &str, query: &Selector) -> u32 { self.get(property, query).map(|v| v.uint().unwrap_or(0)).unwrap_or(0) } } #[derive(Clone, Debug)] pub struct Rule { pub selectors: Vec<Selector>, pub declarations: Vec<Declaration>, } #[derive(Clone, Debug)] pub enum SelectorRelation { Ancestor(Selector), Parent(Selector), } impl<T: Into<String>> From<T> for Selector { fn from(t: T) -> Self { Selector::new(Some(t.into())) } } /// Describes the specificity of a selector. /// /// The indexes are as follows: /// 0 - number of IDs (most important) /// 1 - number of classes and pseudo-classes /// 2 - number of elements (least important) #[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] struct Specificity([u8; 3]); impl Add<Self> for Specificity { type Output = Self; fn add(self, rhs: Self) -> Self::Output { Specificity([ self.0[0] + rhs.0[0], self.0[1] + rhs.0[1],
} #[derive(Clone, Debug, Default)] pub struct Selector { pub element: Option<String>, pub classes: HashSet<String>, pub pseudo_classes: HashSet<String>, pub relation: Option<Box<SelectorRelation>>, } impl Selector { pub fn new<S: Into<String>>(element: Option<S>) -> Self { Selector { element: element.map(|s| s.into()), classes: HashSet::new(), pseudo_classes: HashSet::new(), relation: None, } } fn specificity(&self) -> Specificity { let s = Specificity([ 0, (self.classes.len() + self.pseudo_classes.len()) as u8, if self.element.is_some() { 1 } else { 0 } ]); if let Some(ref relation) = self.relation { match **relation { SelectorRelation::Ancestor(ref x) | SelectorRelation::Parent(ref x) => return x.specificity() + s, } } s } pub fn matches(&self, other: &Selector) -> bool { if self.element.is_some() && self.element != other.element { return false; } if !other.classes.is_superset(&self.classes) { return false; } if !other.pseudo_classes.is_superset(&self.pseudo_classes) { return false; } true } pub fn with_class<S: Into<String>>(mut self, class: S) -> Self { self.classes.insert(class.into()); self } pub fn without_class<S: Into<String>>(mut self, class: S) -> Self { self.classes.remove(&class.into()); self } pub fn with_pseudo_class<S: Into<String>>(mut self, pseudo_class: S) -> Self { self.pseudo_classes.insert(pseudo_class.into()); self } pub fn without_pseudo_class<S: Into<String>>(mut self, pseudo_class: S) -> Self { self.pseudo_classes.remove(&pseudo_class.into()); self } } impl Selector { pub fn is_empty(&self) -> bool { self.element.is_none() && self.classes.is_empty() && self.pseudo_classes.is_empty() } } #[derive(Clone, Debug)] pub struct Declaration { pub property: String, pub value: Value, pub important: bool, } #[derive(Clone, Debug)] pub enum Value { UInt(u32), Color(Color), } impl Value { pub fn uint(&self) -> Option<u32> { match *self { Value::UInt(x) => Some(x), _ => None, } } pub fn color(&self) -> Option<Color> { match *self { Value::Color(x) => Some(x), _ => None, } } } #[derive(Clone, Debug)] pub enum CustomParseError { InvalidColorName(String), InvalidColorHex(String), } impl<'t> From<CustomParseError> for ParseError<'t, CustomParseError> { fn from(e: CustomParseError) -> Self { ParseError::Custom(e) } } struct RuleParser; impl RuleParser { fn new() -> Self { RuleParser {} } } impl<'i> cssparser::QualifiedRuleParser<'i> for RuleParser { type Prelude = Vec<Selector>; type QualifiedRule = Rule; type Error = CustomParseError; fn parse_prelude<'t>(&mut self, input: &mut Parser<'i, 't>) -> Result<Self::Prelude, ParseError<'i, Self::Error>> { let res = parse_selectors(input)?; Ok(res) } fn parse_block<'t>(&mut self, selectors: Self::Prelude, input: &mut Parser<'i, 't>) -> Result<Self::QualifiedRule, ParseError<'i, Self::Error>> { let decl_parser = DeclarationParser {}; let decls = DeclarationListParser::new(input, decl_parser).collect::<Vec<_>>(); for decl in &decls { match *decl { Ok(_) => {}, Err(ref e) => { match e.error { ParseError::Basic(ref e) => eprintln!("{:?}", e), ParseError::Custom(ref e) => eprintln!("{:?}", e), } println!("Error occured in `{}`", input.slice(e.span.clone())); } } } let decls = decls.into_iter().filter_map(|decl| decl.ok()).collect(); Ok(Rule { selectors: selectors, declarations: decls, }) } } impl<'i> cssparser::AtRuleParser<'i> for RuleParser { type Prelude = (); type AtRule = Rule; type Error = CustomParseError; } fn parse_selectors<'i, 't>(input: &mut Parser<'i, 't>) -> Result<Vec<Selector>, ParseError<'i, CustomParseError>> { let mut selectors = Vec::new(); let mut selector = Selector::default(); let mut first_token_in_selector = true; while let Ok(t) = input.next() { match t { // Element Token::Ident(ref element_name) => { if first_token_in_selector { selector.element = Some(element_name.to_string()) } else { let mut old_selector = Selector::new(Some(element_name.to_string())); mem::swap(&mut old_selector, &mut selector); selector.relation = Some(Box::new(SelectorRelation::Ancestor(old_selector))); } } Token::Delim('>') => { let mut old_selector = Selector::new(Some(input.expect_ident()?.to_string())); mem::swap(&mut old_selector, &mut selector); selector.relation = Some(Box::new(SelectorRelation::Parent(old_selector))); } // Any element Token::Delim('*') => {} // Class Token::Delim('.') => {selector.classes.insert(input.expect_ident()?.into_owned());} // Pseudo-class Token::Colon => {selector.pseudo_classes.insert(input.expect_ident()?.into_owned());} // This selector is done, on to the next one Token::Comma => { selectors.push(selector); selector = Selector::default(); first_token_in_selector = true; continue; // need to continue to avoid `first_token_in_selector` being set to false } t => { let basic_error = BasicParseError::UnexpectedToken(t); return Err(basic_error.into()); } } first_token_in_selector = false; } selectors.push(selector); if selectors.iter().any(|sel| sel.relation.is_some()) { eprintln!("WARNING: Complex selector relations not implemented"); } Ok(selectors) } struct DeclarationParser; impl<'i> cssparser::DeclarationParser<'i> for DeclarationParser { type Declaration = Declaration; type Error = CustomParseError; fn parse_value<'t>(&mut self, name: CompactCowStr<'i>, input: &mut Parser<'i, 't>) -> Result<Self::Declaration, ParseError<'i, Self::Error>> { let value = match &*name { "color" | "border-color" => Value::Color(parse_basic_color(input)?), "background" | "foreground" => Value::Color(parse_basic_color(input)?), "border-radius" | "border-width" => { match input.next()? { Token::Number { int_value: Some(x), has_sign, .. } if !has_sign && x >= 0 => Value::UInt(x as u32), t => return Err(BasicParseError::UnexpectedToken(t).into()) } } _ => return Err(BasicParseError::UnexpectedToken(input.next()?).into()), }; Ok(Declaration { property: name.into_owned(), value: value, important: input.try(cssparser::parse_important).is_ok() }) } } impl<'i> cssparser::AtRuleParser<'i> for DeclarationParser { type Prelude = (); type AtRule = Declaration; type Error = CustomParseError; } fn css_color(name: &str) -> Option<Color> { Some(hex(match name { "transparent" => return Some(Color { data: 0 }), "black" => 0x000000, "silver" => 0xc0c0c0, "gray" | "grey" => 0x808080, "white" => 0xffffff, "maroon" => 0x800000, "red" => 0xff0000, "purple" => 0x800080, "fuchsia" => 0xff00ff, "green" => 0x008000, "lime" => 0x00ff00, "olive" => 0x808000, "yellow" => 0xffff00, "navy" => 0x000080, "blue" => 0x0000ff, "teal" => 0x008080, "aqua" => 0x00ffff, _ => return None, })) } fn parse_basic_color<'i, 't>(input: &mut Parser<'i, 't>) -> Result<Color, ParseError<'i, CustomParseError>> { Ok(match input.next()? { Token::Ident(s) => match css_color(&s) { Some(color) => color, None => return Err(CustomParseError::InvalidColorName(s.into_owned()).into()), }, Token::IDHash(hash) | Token::Hash(hash) => { match hash.len() { 6 | 8 => { let mut x = match u32::from_str_radix(&hash, 16) { Ok(x) => x, Err(_) => return Err(CustomParseError::InvalidColorHex(hash.into_owned()).into()), }; if hash.len() == 6 { x |= 0xFF000000; } Color { data: x } }, _ => return Err(CustomParseError::InvalidColorHex(hash.into_owned()).into()), } } t => { let basic_error = BasicParseError::UnexpectedToken(t); return Err(basic_error.into()); } }) } fn parse(s: &str) -> Vec<Rule> { let mut input = ParserInput::new(s); let mut parser = Parser::new(&mut input); let rule_parser = RuleParser::new(); let rules = { let rule_list_parser = cssparser::RuleListParser::new_for_stylesheet(&mut parser, rule_parser); rule_list_parser.collect::<Vec<_>>() }; for rule in &rules { match *rule { Ok(_) => {}, Err(ref e) => { match e.error { ParseError::Basic(ref e) => eprintln!("{:?}", e), ParseError::Custom(ref e) => eprintln!("{:?}", e), } println!("Error occured in `{}`", parser.slice(e.span.clone())); } } } rules.into_iter().filter_map(|rule| rule.ok()).collect() } const fn hex(data: u32) -> Color { Color { data: 0xFF000000 | data } }
self.0[2] + rhs.0[2], ]) }
random_line_split
theme.rs
use cssparser::{self, BasicParseError, CompactCowStr, DeclarationListParser, Parser, ParseError, ParserInput, Token}; use orbclient::Color; use std::collections::HashSet; use std::sync::Arc; use std::mem; use std::ops::Add; use std::path::Path; use std::fs::File; use std::io::BufReader; use std::io::Read; static DEFAULT_THEME_CSS: &'static str = include_str!("theme.css"); lazy_static! { static ref DEFAULT_THEME: Arc<Theme> = { Arc::new(Theme {parent: None, rules: parse(DEFAULT_THEME_CSS)}) }; } pub struct Theme { parent: Option<Arc<Theme>>, rules: Vec<Rule>, } impl Theme { pub fn new() -> Self { Theme::parse("") } pub fn parse(s: &str) -> Self { Theme { parent: Some(DEFAULT_THEME.clone()), rules: parse(s), } } pub fn from_path<P: AsRef<Path>>(path: P) -> Result<Theme, String> { let file = try!(File::open(path).map_err(|err| format!("failed to open css: {}", err))); let mut reader = BufReader::new(file); let mut css = String::new(); let res = reader.read_to_string(&mut css).map_err(|err| format!("failed to read css: {}", err)); match res { Ok(_) => Ok(Theme::parse(&css)), Err(err) => Err(err), } } fn all_rules(&self) -> Vec<Rule> { if let Some(ref parent) = self.parent { self.rules.iter().chain(parent.rules.iter()).cloned().collect() } else { self.rules.clone() } } pub fn get(&self, property: &str, query: &Selector) -> Option<Value> { let mut matches: Vec<(bool, Specificity, Value)> = Vec::new(); for rule in self.all_rules().iter().rev() { let matching_selectors = rule.selectors.iter().filter(|x| x.matches(query)).collect::<Vec<_>>(); if matching_selectors.len() > 0 { if let Some(decl) = rule.declarations.iter().find(|decl| decl.property == property) { let highest_specifity = matching_selectors.iter().map(|sel| sel.specificity()).max().unwrap(); matches.push((decl.important, highest_specifity, decl.value.clone())); } } } matches.sort_by_key(|x| (x.0, x.1)); matches.last().map(|x| x.2.clone()) } pub fn color(&self, property: &str, query: &Selector) -> Color { let default = Color { data: 0 }; self.get(property, query).map(|v| v.color().unwrap_or(default)).unwrap_or(default) } pub fn uint(&self, property: &str, query: &Selector) -> u32 { self.get(property, query).map(|v| v.uint().unwrap_or(0)).unwrap_or(0) } } #[derive(Clone, Debug)] pub struct Rule { pub selectors: Vec<Selector>, pub declarations: Vec<Declaration>, } #[derive(Clone, Debug)] pub enum SelectorRelation { Ancestor(Selector), Parent(Selector), } impl<T: Into<String>> From<T> for Selector { fn from(t: T) -> Self { Selector::new(Some(t.into())) } } /// Describes the specificity of a selector. /// /// The indexes are as follows: /// 0 - number of IDs (most important) /// 1 - number of classes and pseudo-classes /// 2 - number of elements (least important) #[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] struct Specificity([u8; 3]); impl Add<Self> for Specificity { type Output = Self; fn add(self, rhs: Self) -> Self::Output { Specificity([ self.0[0] + rhs.0[0], self.0[1] + rhs.0[1], self.0[2] + rhs.0[2], ]) } } #[derive(Clone, Debug, Default)] pub struct Selector { pub element: Option<String>, pub classes: HashSet<String>, pub pseudo_classes: HashSet<String>, pub relation: Option<Box<SelectorRelation>>, } impl Selector { pub fn new<S: Into<String>>(element: Option<S>) -> Self { Selector { element: element.map(|s| s.into()), classes: HashSet::new(), pseudo_classes: HashSet::new(), relation: None, } } fn specificity(&self) -> Specificity { let s = Specificity([ 0, (self.classes.len() + self.pseudo_classes.len()) as u8, if self.element.is_some() { 1 } else { 0 } ]); if let Some(ref relation) = self.relation { match **relation { SelectorRelation::Ancestor(ref x) | SelectorRelation::Parent(ref x) => return x.specificity() + s, } } s } pub fn matches(&self, other: &Selector) -> bool { if self.element.is_some() && self.element != other.element { return false; } if !other.classes.is_superset(&self.classes) { return false; } if !other.pseudo_classes.is_superset(&self.pseudo_classes) { return false; } true } pub fn with_class<S: Into<String>>(mut self, class: S) -> Self { self.classes.insert(class.into()); self } pub fn without_class<S: Into<String>>(mut self, class: S) -> Self { self.classes.remove(&class.into()); self } pub fn with_pseudo_class<S: Into<String>>(mut self, pseudo_class: S) -> Self
pub fn without_pseudo_class<S: Into<String>>(mut self, pseudo_class: S) -> Self { self.pseudo_classes.remove(&pseudo_class.into()); self } } impl Selector { pub fn is_empty(&self) -> bool { self.element.is_none() && self.classes.is_empty() && self.pseudo_classes.is_empty() } } #[derive(Clone, Debug)] pub struct Declaration { pub property: String, pub value: Value, pub important: bool, } #[derive(Clone, Debug)] pub enum Value { UInt(u32), Color(Color), } impl Value { pub fn uint(&self) -> Option<u32> { match *self { Value::UInt(x) => Some(x), _ => None, } } pub fn color(&self) -> Option<Color> { match *self { Value::Color(x) => Some(x), _ => None, } } } #[derive(Clone, Debug)] pub enum CustomParseError { InvalidColorName(String), InvalidColorHex(String), } impl<'t> From<CustomParseError> for ParseError<'t, CustomParseError> { fn from(e: CustomParseError) -> Self { ParseError::Custom(e) } } struct RuleParser; impl RuleParser { fn new() -> Self { RuleParser {} } } impl<'i> cssparser::QualifiedRuleParser<'i> for RuleParser { type Prelude = Vec<Selector>; type QualifiedRule = Rule; type Error = CustomParseError; fn parse_prelude<'t>(&mut self, input: &mut Parser<'i, 't>) -> Result<Self::Prelude, ParseError<'i, Self::Error>> { let res = parse_selectors(input)?; Ok(res) } fn parse_block<'t>(&mut self, selectors: Self::Prelude, input: &mut Parser<'i, 't>) -> Result<Self::QualifiedRule, ParseError<'i, Self::Error>> { let decl_parser = DeclarationParser {}; let decls = DeclarationListParser::new(input, decl_parser).collect::<Vec<_>>(); for decl in &decls { match *decl { Ok(_) => {}, Err(ref e) => { match e.error { ParseError::Basic(ref e) => eprintln!("{:?}", e), ParseError::Custom(ref e) => eprintln!("{:?}", e), } println!("Error occured in `{}`", input.slice(e.span.clone())); } } } let decls = decls.into_iter().filter_map(|decl| decl.ok()).collect(); Ok(Rule { selectors: selectors, declarations: decls, }) } } impl<'i> cssparser::AtRuleParser<'i> for RuleParser { type Prelude = (); type AtRule = Rule; type Error = CustomParseError; } fn parse_selectors<'i, 't>(input: &mut Parser<'i, 't>) -> Result<Vec<Selector>, ParseError<'i, CustomParseError>> { let mut selectors = Vec::new(); let mut selector = Selector::default(); let mut first_token_in_selector = true; while let Ok(t) = input.next() { match t { // Element Token::Ident(ref element_name) => { if first_token_in_selector { selector.element = Some(element_name.to_string()) } else { let mut old_selector = Selector::new(Some(element_name.to_string())); mem::swap(&mut old_selector, &mut selector); selector.relation = Some(Box::new(SelectorRelation::Ancestor(old_selector))); } } Token::Delim('>') => { let mut old_selector = Selector::new(Some(input.expect_ident()?.to_string())); mem::swap(&mut old_selector, &mut selector); selector.relation = Some(Box::new(SelectorRelation::Parent(old_selector))); } // Any element Token::Delim('*') => {} // Class Token::Delim('.') => {selector.classes.insert(input.expect_ident()?.into_owned());} // Pseudo-class Token::Colon => {selector.pseudo_classes.insert(input.expect_ident()?.into_owned());} // This selector is done, on to the next one Token::Comma => { selectors.push(selector); selector = Selector::default(); first_token_in_selector = true; continue; // need to continue to avoid `first_token_in_selector` being set to false } t => { let basic_error = BasicParseError::UnexpectedToken(t); return Err(basic_error.into()); } } first_token_in_selector = false; } selectors.push(selector); if selectors.iter().any(|sel| sel.relation.is_some()) { eprintln!("WARNING: Complex selector relations not implemented"); } Ok(selectors) } struct DeclarationParser; impl<'i> cssparser::DeclarationParser<'i> for DeclarationParser { type Declaration = Declaration; type Error = CustomParseError; fn parse_value<'t>(&mut self, name: CompactCowStr<'i>, input: &mut Parser<'i, 't>) -> Result<Self::Declaration, ParseError<'i, Self::Error>> { let value = match &*name { "color" | "border-color" => Value::Color(parse_basic_color(input)?), "background" | "foreground" => Value::Color(parse_basic_color(input)?), "border-radius" | "border-width" => { match input.next()? { Token::Number { int_value: Some(x), has_sign, .. } if !has_sign && x >= 0 => Value::UInt(x as u32), t => return Err(BasicParseError::UnexpectedToken(t).into()) } } _ => return Err(BasicParseError::UnexpectedToken(input.next()?).into()), }; Ok(Declaration { property: name.into_owned(), value: value, important: input.try(cssparser::parse_important).is_ok() }) } } impl<'i> cssparser::AtRuleParser<'i> for DeclarationParser { type Prelude = (); type AtRule = Declaration; type Error = CustomParseError; } fn css_color(name: &str) -> Option<Color> { Some(hex(match name { "transparent" => return Some(Color { data: 0 }), "black" => 0x000000, "silver" => 0xc0c0c0, "gray" | "grey" => 0x808080, "white" => 0xffffff, "maroon" => 0x800000, "red" => 0xff0000, "purple" => 0x800080, "fuchsia" => 0xff00ff, "green" => 0x008000, "lime" => 0x00ff00, "olive" => 0x808000, "yellow" => 0xffff00, "navy" => 0x000080, "blue" => 0x0000ff, "teal" => 0x008080, "aqua" => 0x00ffff, _ => return None, })) } fn parse_basic_color<'i, 't>(input: &mut Parser<'i, 't>) -> Result<Color, ParseError<'i, CustomParseError>> { Ok(match input.next()? { Token::Ident(s) => match css_color(&s) { Some(color) => color, None => return Err(CustomParseError::InvalidColorName(s.into_owned()).into()), }, Token::IDHash(hash) | Token::Hash(hash) => { match hash.len() { 6 | 8 => { let mut x = match u32::from_str_radix(&hash, 16) { Ok(x) => x, Err(_) => return Err(CustomParseError::InvalidColorHex(hash.into_owned()).into()), }; if hash.len() == 6 { x |= 0xFF000000; } Color { data: x } }, _ => return Err(CustomParseError::InvalidColorHex(hash.into_owned()).into()), } } t => { let basic_error = BasicParseError::UnexpectedToken(t); return Err(basic_error.into()); } }) } fn parse(s: &str) -> Vec<Rule> { let mut input = ParserInput::new(s); let mut parser = Parser::new(&mut input); let rule_parser = RuleParser::new(); let rules = { let rule_list_parser = cssparser::RuleListParser::new_for_stylesheet(&mut parser, rule_parser); rule_list_parser.collect::<Vec<_>>() }; for rule in &rules { match *rule { Ok(_) => {}, Err(ref e) => { match e.error { ParseError::Basic(ref e) => eprintln!("{:?}", e), ParseError::Custom(ref e) => eprintln!("{:?}", e), } println!("Error occured in `{}`", parser.slice(e.span.clone())); } } } rules.into_iter().filter_map(|rule| rule.ok()).collect() } const fn hex(data: u32) -> Color { Color { data: 0xFF000000 | data } }
{ self.pseudo_classes.insert(pseudo_class.into()); self }
identifier_body
lib.rs
#[cfg(target_os = "linux")] pub mod dynamic_lib_loading{ #![allow(unused_imports)] #![allow(non_camel_case_types)] #![allow(dead_code)] use std::ffi::CString; use std::ptr; use std::os::raw::{c_int, c_char, c_void}; // //This is a lib of dlopen and dlclose using rust //Comments copied from the following source files // /usr/include/dlfcn.h // https://www.unvanquished.net/~modi/code/include/x86_64-linux-gnu/bits/dlfcn.h.html /* These are the possible values for the REQUEST argument to `dlinfo'. */ enum DL_INFO{ /* Treat ARG as `lmid_t *'; store namespace ID for HANDLE there. */ RTLD_DI_LMID = 1, /* Treat ARG as `struct link_map **'; store the `struct link_map *' for HANDLE there. */ RTLD_DI_LINKMAP = 2, RTLD_DI_CONFIGADDR = 3, /* Unsupported, defined by Solaris. */ /* Treat ARG as `Dl_serinfo *' (see below), and fill in to describe the directories that will be searched for dependencies of this object. RTLD_DI_SERINFOSIZE fills in just the `dls_cnt' and `dls_size' entries to indicate the size of the buffer that must be passed to RTLD_DI_SERINFO to fill in the full information. */ RTLD_DI_SERINFO = 4, RTLD_DI_SERINFOSIZE = 5, /* Treat ARG as `char *', and store there the directory name used to expand $ORIGIN in this shared object's dependency file names. */ RTLD_DI_ORIGIN = 6, RTLD_DI_PROFILENAME = 7, /* Unsupported, defined by Solaris. */ RTLD_DI_PROFILEOUT = 8, /* Unsupported, defined by Solaris. */ /* Treat ARG as `size_t *', and store there the TLS module ID of this object's PT_TLS segment, as used in TLS relocations; store zero if this object does not define a PT_TLS segment. */ RTLD_DI_TLS_MODID = 9, /* Treat ARG as `void **', and store there a pointer to the calling thread's TLS block corresponding to this object's PT_TLS segment. Store a null pointer if this object does not define a PT_TLS segment, or if the calling thread has not allocated a block for it. */ RTLD_DI_TLS_DATA = 10, //RTLD_DI_MAX = 10 } /* The MODE argument to `dlopen' contains one of the following: */ pub const RTLD_LAZY : i32 = 0x00001; /* Lazy function call binding. */ pub const RTLD_NOW : i32 = 0x00002; /* Immediate function call binding. */ pub const RTLD_BINDING_MASK : i32 = 0x3 ; /* Mask of binding time value. */ pub const RTLD_NOLOAD : i32 = 0x00004; /* Do not load the object. */ pub const RTLD_DEEPBIND : i32 = 0x00008; /* Use deep binding. */ /* If the following bit is set in the MODE argument to `dlopen', * the symbols of the loaded object and its dependencies are made * visible as if the object were linked directly into the program. */ pub const RTLD_GLOBAL : i32 = 0x00100; /* Unix98 demands the following flag which is the inverse to RTLD_GLOBAL. * The implementation does this by default and so we can define the * value to zero. */ pub const RTLD_LOCAL : i32 = 0; /* Do not delete object when closed. */ pub const RTLD_NODELETE : i32 = 0x01000; struct Dl_info{ dli_fname: *mut c_char, /* File name of defining object. */ dli_fbase: *mut c_void, /* Load address of that object. */ dli_sname: *mut c_char, /* Name of nearest symbol. */ dli_saddr: *mut c_void, /* Exact value of nearest symbol. */ //dlerror } /* This is the type of elements in `Dl_serinfo', below. The `dls_name' member points to space in the buffer passed to `dlinfo'. */ struct Dl_serpath { dls_name: *mut c_char, /* Name of library search path directory. */ dls_flags: u32, /* Indicates where this directory came from. */ } /* This is the structure that must be passed (by reference) to `dlinfo' for the RTLD_DI_SERINFO and RTLD_DI_SERINFOSIZE requests. */ struct Dl_serinfo { dls_size: usize, /* Size in bytes of the whole buffer. */ dls_cnt: u32, /* Number of elements in `dls_serpath'. */ dls_serpath: [Dl_serpath;1], /* Actually longer, dls_cnt elements. */ } //TODO //Think about changing from c_int to i32 or something extern "C" { pub fn dlopen(filename: *const c_char, flag: c_int) -> *mut c_void; pub fn dlsym(lib_handle: *mut c_void, name: *const c_char) -> *mut c_void; pub fn dlclose(lib_handle: *mut c_void) -> c_int; pub fn dlinfo(lib_handle: *mut c_void, request: c_int, info: *mut c_void) -> c_int; pub fn dlerror() -> *mut c_char; } pub struct DyLib(*mut c_void); pub fn open_lib( lib_path: &str, flag: i32 )->Result<DyLib, String>{unsafe{ //TODO //Get enums dlopen uses let shared_lib_handle = dlopen(CString::new(lib_path).unwrap().as_ptr(), flag as c_int); if shared_lib_handle.is_null()
else{ Ok( DyLib(shared_lib_handle) ) } }} //Example //let function : fn()->i32= transmute_copy((dlsym(shared_lib_handle, CString::new(name).unwrap().as_ptr()) as *mut ()).as_mut()); pub fn get_fn( shared_lib_handle: &DyLib, name: &str)-> Result<*mut (), String>{ unsafe{ let _fn = dlsym(shared_lib_handle.0, CString::new(name).unwrap().as_ptr()); if _fn.is_null() { Err("Function name could not be found.".to_string()) } else{ Ok(_fn as *mut () ) } }} pub fn get_error()->String{unsafe{ let error = dlerror(); if error.is_null(){ return "No Error".to_string(); } else{ CString::from_raw(error).into_string().unwrap() } }} pub fn close_lib(shared_lib_handle: &DyLib){unsafe{ if dlclose(shared_lib_handle.0) != 0{ println!("Could not properly close shared library."); } }} } #[cfg(target_os = "windows")] pub mod dynamic_lib_loading{ use std::os::raw::{c_int, c_void}; extern "C" { fn LoadLibraryA( path: *const i8 ) -> *mut c_void; fn GetProcAddress( lib: *mut c_void, name: *const i8 ) -> *mut c_void; fn FreeLibrary( lib: *mut c_void ) -> c_int; fn GetLastError() -> u32; } //TODO //This is temporary should be replaced by windows enums pub const RTLD_LAZY : i32 = 0x00001; /* Lazy function call binding. */ pub struct DyLib(*mut c_void); pub fn open_lib( lib_path: &str, _flag: i32 )->Result<DyLib, String>{unsafe{ let _path = lib_path.to_string() + "\0"; let lib = LoadLibraryA( _path.as_ptr() as *const i8); if lib.is_null(){ let s = format!("Could not open lib \n{:?}\n\n For more info => https://docs.microsoft.com/en-us/windows/win32/debug/system-error-codes", GetLastError()); return Err(s); } Ok(DyLib(lib as *mut c_void)) }} //Example //let function : fn()->i32= transmute_copy((dlsym(shared_lib_handle, CString::new(name).unwrap().as_ptr()) as *mut ()).as_mut()); pub fn get_fn( shared_lib_handle: &DyLib, name: &str)-> Result<*mut (), String>{ unsafe{ let fn_name = name.to_string() + "\0"; let function = GetProcAddress(shared_lib_handle.0 as _, fn_name.as_ptr() as *const i8) as *mut (); if function.is_null(){ let s = format!("Could not get function \n{:?}", GetLastError()); return Err(s); } Ok(function) }} pub fn get_error()->String{ "Windows version has not been implemented".to_string() } pub fn close_lib(shared_lib_handle: &DyLib){unsafe{ if FreeLibrary(shared_lib_handle.0 as _) == 0{ println!("Could not properly close shared library."); println!("{}", format!("{:?}", GetLastError())); } }} } pub mod render_tools{ #[derive(PartialEq, Clone, Debug)] pub enum RenderType{ Image, Rectangle, String, PrintString, Empty, } impl Default for RenderType{ fn default()->Self{ RenderType::Empty } } #[derive(Default)] pub struct RenderStruct{ pub rendertype : RenderType, pub x: f32, pub y: f32, pub width: f32, pub height: f32, pub alpha : f32, //rect related things pub filled: bool, pub color: [f32;3], //image related things pub color_buffer: Vec<u8>, pub rgba_type: RGBA, pub new_width: Option<f32>,// NOTE Testing out using a factional new width pub new_height: Option<f32>,// NOTE Testing out using a factional new height //Stings pub char_buffer: String, pub font_size: u32 } #[derive(Default)] pub struct RenderInstructions{ pub buffer: Vec<RenderStruct>, } //TODO //This is a BAD name.... do better #[derive(Clone, Copy, PartialEq)] pub enum RGBA{ U8rgba, U8argb, U8rgb, Empty, //More maybe ... maybe not } impl RenderInstructions{ pub fn clear(&mut self){ self.buffer.clear(); } pub fn draw_rect(&mut self, rect: [f32; 4], color: [f32; 4], filled: bool){ let _color = [color[0], color[1], color[2]]; self.buffer.push( RenderStruct{rendertype: RenderType::Rectangle, x: rect[0], y:rect[1], width: rect[2], height: rect[3], alpha: color[3], filled: filled, color: _color, .. Default::default()}); } pub fn draw_string(&mut self, s: &str, color: [f32; 4], size: u32, x: f32, y: f32 ){ //TODO //should size be optional //shouldn't a good text size be choosen automatically //TODO //should color be optional //shouldn't a good text color be choosen automatically let _color = [color[0], color[1], color[2]]; self.buffer.push( RenderStruct{ rendertype: RenderType::String, x: x, y: y, alpha: color[3], color: _color, char_buffer: s.to_string(), font_size: size, .. Default::default()} ); } pub fn draw_bmp(&mut self, bmp: &Bitmap, x: f32, y: f32, alpha: f32, w: Option<f32>, h: Option<f32>){ //TODO //should x and y be options, Often I want to just draw the image where ever and have it //automagicly look good with corresponding text self.buffer.push( RenderStruct{rendertype: RenderType::Image, alpha: alpha, x: x, y: y, width: bmp.width as f32, height: bmp.height as f32, new_width: w, new_height: h, rgba_type: bmp.rgba_type, color_buffer: bmp.buffer.clone(), .. Default::default()} ); } pub fn println(&mut self, string: &str){ let buffer = "> ".to_string() + string; self.buffer.push( RenderStruct{ rendertype: RenderType::PrintString, alpha: 1.0, color: [1.0, 1.0, 1.0], char_buffer: buffer, font_size: 19, .. Default::default()} ); } } impl Default for RGBA{ fn default()->Self{ RGBA::Empty } } #[derive(Clone)] pub struct Bitmap{ //NOTE BMP should be 400 x 400 to start off. pub width: i32, pub height: i32, pub rgba_type: RGBA, pub buffer: Vec<u8>, } impl Bitmap{ pub fn new(w: i32, h: i32, rgba_type: RGBA)->Bitmap{ let _w = w as usize; let _h = h as usize; let v = match rgba_type{ RGBA::U8rgba=>{ vec![0u8; _w*_h*4] }, RGBA::U8argb=>{ vec![0u8; _w*_h*4] }, RGBA::U8rgb=>{ vec![0u8; _w*_h*3] }, _=>{ panic!("Not supported"); } //TODO Clean up }; Bitmap{ width: w, height: h, rgba_type: rgba_type, buffer: v, } } } pub struct BitmapContainer{ pub initialized : bool, pub bmp: Option<Bitmap>, } } pub mod memory_tools{ //TODO play around with this maybe //use std::alloc; use std::any::TypeId; const _FIXED_CHAR_BUFFER_SIZE : usize = 128; #[derive(Copy, Clone)] pub struct TinyString{ //NOTE //currently struct vars are public for debugging purposed. They should not be public. //NOTE //This should prob be a general tool pub buffer: [char; _FIXED_CHAR_BUFFER_SIZE], pub cursor: usize, } impl TinyString{ pub fn new()->TinyString{ TinyString{ buffer: ['\0'; _FIXED_CHAR_BUFFER_SIZE], cursor: 0, } } pub fn get(&self, index: usize)->&char{ &self.buffer[index] } pub fn get_mut(&mut self, index: usize)->&mut char{ &mut self.buffer[index] } pub fn len(&self)->usize{ self.cursor } pub fn push(&mut self, c: char)->Result< () , String>{ if self.len() >= _FIXED_CHAR_BUFFER_SIZE { return Err("Out sized tinystring".to_string()); } let _i = self.cursor; self[_i] = c; self.cursor += 1; return Ok( () ); } //TODO //check meaning of clone and copy in rust pub fn copystr(&mut self, s: &str){ let mut chars = s.chars(); for _ in 0.. _FIXED_CHAR_BUFFER_SIZE { match chars.next(){ Some(c)=> { let _i = self.cursor; self[_i] = c; self.cursor += 1; } _=> break } } } //TODO //check meaning of clone and copy in rust pub fn copy(&mut self, s: &TinyString){ for i in 0..s.len(){ self[i] = s[i]; } } pub fn is_samestr(&self, s: &str)->bool{ let mut chars = s.chars(); if self.len() != s.len(){ return false; } for i in 0..self.len(){ if self[i] != chars.next().unwrap(){ return false; } } return true; } pub fn is_same(&self, s: &TinyString)->bool{ if self.len() != s.len(){ return false; } for i in 0..self.len(){ if self[i] != s[i]{ return false; } } return true; } } impl std::ops::Index<usize> for TinyString{ type Output = char; fn index(&self, index: usize)->&Self::Output{ self.get(index) } } impl std::ops::IndexMut<usize> for TinyString{ fn index_mut(&mut self, index: usize)->&mut Self::Output{ self.get_mut(index) } } pub trait Storage{ fn get_storage(&mut self)->&mut [u8]; } impl Storage for GlobalStorage{ fn get_storage(&mut self)->&mut [u8]{ &mut self.storage } } pub struct Ptr<S: Storage>{ //TODO make backend_storage a Generic function that requires trait ptr: usize, type_hash: TypeId, backend_storage: *mut S } impl<S: Storage> Ptr<S>{ pub fn deref_mut<T: 'static>(&self)->&mut T{unsafe{ if self.type_hash != TypeId::of::<T>() { panic!("Could not dereference custom pointer do to failed type hash comparison.");} let gs = self.backend_storage.as_mut().unwrap(); ((&mut gs.get_storage()[self.ptr] as *mut u8 ) as *mut T).as_mut().unwrap() }} } impl<S: Storage> std::clone::Clone for Ptr<S>{ fn clone(&self)->Self{ Ptr{ ptr: self.ptr, type_hash: self.type_hash, backend_storage: self.backend_storage, } } } pub struct DyArray<T>{ ptr: Ptr<GlobalStorage>, length: usize, capacity: usize, phantom: std::marker::PhantomData<T>, } impl<T: 'static> DyArray<T>{ pub fn push(&mut self, v: T) {unsafe{ let gs = self.ptr.backend_storage.as_mut().unwrap(); if self.length >= self.capacity{ let length = self.length * std::mem::size_of::<T>(); let old_ptr = self.ptr.clone(); let new_ptr = gs.realloc::<T>(old_ptr, length, self.capacity); self.ptr = new_ptr; return; } { let cursor = self.ptr.ptr + self.length * std::mem::size_of::<T>(); gs.write_to(v, cursor).expect("Global storage could not write to index."); self.length +=1; } }} pub fn new(gs: &mut GlobalStorage)->DyArray<T>{ DyArray::<T>::with_capacity(gs, 5) } pub fn with_capacity(gs: &mut GlobalStorage, size: usize)->DyArray<T>{ let ptr = gs.alloc_multi_empty::<T>( size ); DyArray{ ptr: ptr, length: 0, capacity: size, phantom: std::marker::PhantomData } } pub fn get(&self, index: usize)->&T{unsafe{ if index > self.length { panic!("Index bounds error."); } let base = self.ptr.ptr; let address = base + index * std::mem::size_of::<T>(); let gs = self.ptr.backend_storage.as_mut().unwrap(); ((&mut gs.get_storage()[address] as *mut u8 ) as *mut T).as_mut().unwrap() }} pub fn get_mut(&mut self, index: usize)->&mut T{unsafe{ if index > self.length { panic!("Index bounds error."); } let base = self.ptr.ptr; let address = base + index * std::mem::size_of::<T>(); let gs = self.ptr.backend_storage.as_mut().unwrap(); ((&mut gs.get_storage()[address] as *mut u8 ) as *mut T).as_mut().unwrap() }} } //TODO //Convert Global and Local storage into a general storage storage thing where storage can be //either a fixed or dynamic array. pub struct GlobalStorage{ pub storage: Vec<u8>, pub storage_filled: Vec<bool>, //TODO speace footprint improvement use bits in u8 reference: [TinyString;100], //This is fixed size because I really want to stop myself from over populating the global space stored_ptr: Vec<Ptr<GlobalStorage>>, } impl GlobalStorage{ pub fn new()->GlobalStorage{ GlobalStorage{ storage: Vec::with_capacity(1028*1028*4), //This is still prob too small storage_filled: Vec::with_capacity(1028*1028*4), //TODO //reference needs to store the ptr index TODO reference: [TinyString::new(); 100], stored_ptr: Vec::new(), } } pub fn alloc<T: 'static>(&mut self, v: T)->Ptr<GlobalStorage>{unsafe{ let size = std::mem::size_of::<T>(); let src = (&v as *const T) as *const u8; let cursor = self.storage.len(); for i in 0..size{ //TODO //SLOW SPEED ME UP //I don't think I want to be pushing every thiem like this //TODO //byte alignments self.storage.push(*src.offset(i as isize)); self.storage_filled.push(true); } return Ptr{ ptr: cursor, type_hash: TypeId::of::<T>(), backend_storage: self as *mut _}; }} pub fn alloc_multi_empty<T: 'static>(&mut self, multiples: usize)->Ptr<GlobalStorage>{ let size = std::mem::size_of::<T>() * multiples; let cursor = self.storage.len(); for _i in 0..size{ //TODO //SLOW SPEED ME UP //I don't think I want to be pushing every thiem like this //TODO //byte alignments self.storage.push(0); self.storage_filled.push(true); } return Ptr{ ptr: cursor, type_hash: TypeId::of::<T>(), backend_storage: self as *mut _}; } pub fn realloc<T>(&mut self, ptr: Ptr<GlobalStorage>, index_back: usize, additional_space: usize)->Ptr<GlobalStorage>{ //TODO //SLOW SPEED UP let cursor = self.storage.len(); let index_front = ptr.ptr; for i in index_front..index_back{ let temp = self.storage[i]; self.storage_filled[i] = false; self.storage.push(temp); self.storage_filled.push(true); } for _i in 0..additional_space{ self.storage.push(0); self.storage_filled.push(true); } return Ptr{ ptr: cursor, type_hash: ptr.type_hash, backend_storage: self as *mut _}; } pub unsafe fn write_to<T: 'static>(&mut self, v: T, at_index: usize)->Result<(),String>{ let size = std::mem::size_of::<T>(); let src = (&v as *const T) as *const u8; if at_index >= self.storage.len() { return Err("Writing outside the bounds of memory allocated to global storage".to_string()); } let cursor = at_index; for i in 0..size{ //TODO //SLOW SPEED ME UP //I don't think I want to be pushing every thiem like this //TODO //byte alignments if !self.storage_filled[cursor+i] { panic!("Storage has not allocated this memory.") } self.storage[cursor+i] = *src.offset(i as isize); } return Ok(()); } pub fn store<T: 'static>(&mut self, v: T, name: &str)->Result<(), String>{ if name.len() > _FIXED_CHAR_BUFFER_SIZE { return Err(format!("storage name is too damn long. Name should be {} chars long.", _FIXED_CHAR_BUFFER_SIZE)); } let cursor = self.stored_ptr.len(); for it in self.reference.iter() { if it.is_samestr(name){ return Err(format!("Global Storage name collision: {}", name)); } } self.reference[cursor].copystr( name ); let ptr = self.alloc(v); self.stored_ptr.push(ptr); return Ok(()); } pub fn get<T: 'static>(&mut self, name: &str)->Result<&mut T, String>{ let mut isgood = false; let mut ptr_index = 0; for (i, it) in self.reference.iter().enumerate() { if it.is_samestr(name){ ptr_index = i; isgood = true; } } if isgood == false { return Err(format!("Name not found in Global Storage: {}", name)); } let ptr = &self.stored_ptr[ptr_index]; return Ok(ptr.deref_mut::<T>()); } // } pub struct LocalStorage{ //NOTE //This seems to be a good idea when it comes to interactive panels //However I'm not sure about the usefulness else where.... // // //Why should the local buffer be fixed sized. This doesn't really make sense. pub interactive: bool, pub storage: GlobalStorage, } impl LocalStorage{ pub fn new()->LocalStorage{ LocalStorage{ interactive: false, storage: GlobalStorage::new(), } } } } pub mod interaction_tools{ pub enum KeyboardEnum{ Rightarrow, Leftarrow, Uparrow, Downarrow, Enter, Default } impl Default for KeyboardEnum{ fn default()->Self{ KeyboardEnum::Default } } pub enum ButtonStatus{ Up, Down, Default } impl Default for ButtonStatus{ fn default()->Self{ ButtonStatus::Default } } #[derive(Default)] pub struct InteractiveInfo{ //TODO add some frame info pub infocus: bool, pub mouse_x: f32, pub mouse_y: f32, pub text_key_pressed: char, pub keyboard_key: Vec<KeyboardEnum>, pub keyboard_key_status: Vec<ButtonStatus>, pub frames: u64, } } #[test] fn globalstorage_alloc_and_store(){ //TODO rename use memory_tools::GlobalStorage; let mut gls = GlobalStorage::new(); { let mut a = [10u8; 4]; gls.store(a, "a"); } let mut b = [10u8; 4]; assert_eq!(b, *gls.get::<[u8;4]>("a").unwrap()); } #[test] fn globalstorage_vec(){ //TODO rename use memory_tools::{GlobalStorage, DyArray}; let mut gls = GlobalStorage::new(); let mut dy = DyArray::<u32>::new(&mut gls); dy.push(12); dy.push(123); dy.push(1231); //let a = dy.get(0); //assert_eq!(12, *a); println!("print test"); let a = dy.get(1); assert_eq!(123, *a); let a = dy.get(2); assert_eq!(1231, *a); /* Does not compile for some reason * I need to inform the rust folks assert_eq!(12, *(dy.get(0))); assert_eq!(123, *(dy,get(1))); assert_eq!(1231, *(dy.get(2))); */ } #[test] fn global_storage_vec2(){ //TODO rename use memory_tools::{GlobalStorage, DyArray}; let mut gls = GlobalStorage::new(); let mut a = gls.alloc(123.09f32); let mut dy = DyArray::<u32>::new(&mut gls); let mut b = gls.alloc(123i32); dy.push(12); dy.push(123); dy.push(1231); //let a = dy.get(0); //assert_eq!(12, *a); let ab = dy.get(1); assert_eq!(123, *ab); let ab = dy.get(2); assert_eq!(1231, *ab); } #[test] fn global_storage_vec3(){ //TODO rename use memory_tools::{GlobalStorage, DyArray}; let mut gls = GlobalStorage::new(); { let mut dy = DyArray::<u32>::new(&mut gls); dy.push(12); dy.push(123); dy.push(1231); gls.store(dy, "dy"); } let a = gls.get::<DyArray<u32>>("dy").unwrap(); let ab = a.get(0); assert_eq!(12, *ab); }
{ println!("{:?}", get_error()); Err(format!("Shared lib is null! {} Check file path/name.", lib_path)) }
conditional_block
lib.rs
#[cfg(target_os = "linux")] pub mod dynamic_lib_loading{ #![allow(unused_imports)] #![allow(non_camel_case_types)] #![allow(dead_code)] use std::ffi::CString; use std::ptr; use std::os::raw::{c_int, c_char, c_void}; // //This is a lib of dlopen and dlclose using rust //Comments copied from the following source files // /usr/include/dlfcn.h // https://www.unvanquished.net/~modi/code/include/x86_64-linux-gnu/bits/dlfcn.h.html /* These are the possible values for the REQUEST argument to `dlinfo'. */ enum DL_INFO{ /* Treat ARG as `lmid_t *'; store namespace ID for HANDLE there. */ RTLD_DI_LMID = 1, /* Treat ARG as `struct link_map **'; store the `struct link_map *' for HANDLE there. */ RTLD_DI_LINKMAP = 2, RTLD_DI_CONFIGADDR = 3, /* Unsupported, defined by Solaris. */ /* Treat ARG as `Dl_serinfo *' (see below), and fill in to describe the directories that will be searched for dependencies of this object. RTLD_DI_SERINFOSIZE fills in just the `dls_cnt' and `dls_size' entries to indicate the size of the buffer that must be passed to RTLD_DI_SERINFO to fill in the full information. */ RTLD_DI_SERINFO = 4, RTLD_DI_SERINFOSIZE = 5, /* Treat ARG as `char *', and store there the directory name used to expand $ORIGIN in this shared object's dependency file names. */ RTLD_DI_ORIGIN = 6, RTLD_DI_PROFILENAME = 7, /* Unsupported, defined by Solaris. */ RTLD_DI_PROFILEOUT = 8, /* Unsupported, defined by Solaris. */ /* Treat ARG as `size_t *', and store there the TLS module ID of this object's PT_TLS segment, as used in TLS relocations; store zero if this object does not define a PT_TLS segment. */ RTLD_DI_TLS_MODID = 9, /* Treat ARG as `void **', and store there a pointer to the calling thread's TLS block corresponding to this object's PT_TLS segment. Store a null pointer if this object does not define a PT_TLS segment, or if the calling thread has not allocated a block for it. */ RTLD_DI_TLS_DATA = 10, //RTLD_DI_MAX = 10 } /* The MODE argument to `dlopen' contains one of the following: */ pub const RTLD_LAZY : i32 = 0x00001; /* Lazy function call binding. */ pub const RTLD_NOW : i32 = 0x00002; /* Immediate function call binding. */ pub const RTLD_BINDING_MASK : i32 = 0x3 ; /* Mask of binding time value. */ pub const RTLD_NOLOAD : i32 = 0x00004; /* Do not load the object. */ pub const RTLD_DEEPBIND : i32 = 0x00008; /* Use deep binding. */ /* If the following bit is set in the MODE argument to `dlopen', * the symbols of the loaded object and its dependencies are made * visible as if the object were linked directly into the program. */ pub const RTLD_GLOBAL : i32 = 0x00100; /* Unix98 demands the following flag which is the inverse to RTLD_GLOBAL. * The implementation does this by default and so we can define the * value to zero. */ pub const RTLD_LOCAL : i32 = 0; /* Do not delete object when closed. */ pub const RTLD_NODELETE : i32 = 0x01000; struct Dl_info{ dli_fname: *mut c_char, /* File name of defining object. */ dli_fbase: *mut c_void, /* Load address of that object. */ dli_sname: *mut c_char, /* Name of nearest symbol. */ dli_saddr: *mut c_void, /* Exact value of nearest symbol. */ //dlerror } /* This is the type of elements in `Dl_serinfo', below. The `dls_name' member points to space in the buffer passed to `dlinfo'. */ struct Dl_serpath { dls_name: *mut c_char, /* Name of library search path directory. */ dls_flags: u32, /* Indicates where this directory came from. */ } /* This is the structure that must be passed (by reference) to `dlinfo' for the RTLD_DI_SERINFO and RTLD_DI_SERINFOSIZE requests. */ struct Dl_serinfo { dls_size: usize, /* Size in bytes of the whole buffer. */ dls_cnt: u32, /* Number of elements in `dls_serpath'. */ dls_serpath: [Dl_serpath;1], /* Actually longer, dls_cnt elements. */ } //TODO //Think about changing from c_int to i32 or something extern "C" { pub fn dlopen(filename: *const c_char, flag: c_int) -> *mut c_void; pub fn dlsym(lib_handle: *mut c_void, name: *const c_char) -> *mut c_void; pub fn dlclose(lib_handle: *mut c_void) -> c_int; pub fn dlinfo(lib_handle: *mut c_void, request: c_int, info: *mut c_void) -> c_int; pub fn dlerror() -> *mut c_char; } pub struct DyLib(*mut c_void); pub fn open_lib( lib_path: &str, flag: i32 )->Result<DyLib, String>{unsafe{ //TODO //Get enums dlopen uses let shared_lib_handle = dlopen(CString::new(lib_path).unwrap().as_ptr(), flag as c_int); if shared_lib_handle.is_null(){ println!("{:?}", get_error()); Err(format!("Shared lib is null! {} Check file path/name.", lib_path)) } else{ Ok( DyLib(shared_lib_handle) ) } }} //Example //let function : fn()->i32= transmute_copy((dlsym(shared_lib_handle, CString::new(name).unwrap().as_ptr()) as *mut ()).as_mut()); pub fn get_fn( shared_lib_handle: &DyLib, name: &str)-> Result<*mut (), String>{ unsafe{ let _fn = dlsym(shared_lib_handle.0, CString::new(name).unwrap().as_ptr()); if _fn.is_null() { Err("Function name could not be found.".to_string()) } else{ Ok(_fn as *mut () ) } }} pub fn get_error()->String{unsafe{ let error = dlerror(); if error.is_null(){ return "No Error".to_string(); } else{ CString::from_raw(error).into_string().unwrap() } }} pub fn close_lib(shared_lib_handle: &DyLib){unsafe{ if dlclose(shared_lib_handle.0) != 0{ println!("Could not properly close shared library."); } }} } #[cfg(target_os = "windows")] pub mod dynamic_lib_loading{ use std::os::raw::{c_int, c_void}; extern "C" { fn LoadLibraryA( path: *const i8 ) -> *mut c_void; fn GetProcAddress( lib: *mut c_void, name: *const i8 ) -> *mut c_void; fn FreeLibrary( lib: *mut c_void ) -> c_int; fn GetLastError() -> u32; } //TODO //This is temporary should be replaced by windows enums pub const RTLD_LAZY : i32 = 0x00001; /* Lazy function call binding. */ pub struct DyLib(*mut c_void); pub fn open_lib( lib_path: &str, _flag: i32 )->Result<DyLib, String>{unsafe{ let _path = lib_path.to_string() + "\0"; let lib = LoadLibraryA( _path.as_ptr() as *const i8); if lib.is_null(){ let s = format!("Could not open lib \n{:?}\n\n For more info => https://docs.microsoft.com/en-us/windows/win32/debug/system-error-codes", GetLastError()); return Err(s); } Ok(DyLib(lib as *mut c_void)) }} //Example //let function : fn()->i32= transmute_copy((dlsym(shared_lib_handle, CString::new(name).unwrap().as_ptr()) as *mut ()).as_mut()); pub fn get_fn( shared_lib_handle: &DyLib, name: &str)-> Result<*mut (), String>{ unsafe{ let fn_name = name.to_string() + "\0"; let function = GetProcAddress(shared_lib_handle.0 as _, fn_name.as_ptr() as *const i8) as *mut (); if function.is_null(){ let s = format!("Could not get function \n{:?}", GetLastError()); return Err(s); } Ok(function) }} pub fn get_error()->String{ "Windows version has not been implemented".to_string() } pub fn close_lib(shared_lib_handle: &DyLib){unsafe{ if FreeLibrary(shared_lib_handle.0 as _) == 0{ println!("Could not properly close shared library."); println!("{}", format!("{:?}", GetLastError())); } }} } pub mod render_tools{ #[derive(PartialEq, Clone, Debug)] pub enum RenderType{ Image, Rectangle, String, PrintString, Empty, } impl Default for RenderType{ fn default()->Self{ RenderType::Empty } } #[derive(Default)] pub struct RenderStruct{ pub rendertype : RenderType, pub x: f32, pub y: f32, pub width: f32, pub height: f32, pub alpha : f32, //rect related things pub filled: bool, pub color: [f32;3], //image related things pub color_buffer: Vec<u8>, pub rgba_type: RGBA, pub new_width: Option<f32>,// NOTE Testing out using a factional new width pub new_height: Option<f32>,// NOTE Testing out using a factional new height //Stings pub char_buffer: String, pub font_size: u32 } #[derive(Default)] pub struct RenderInstructions{ pub buffer: Vec<RenderStruct>, } //TODO //This is a BAD name.... do better #[derive(Clone, Copy, PartialEq)] pub enum RGBA{ U8rgba, U8argb, U8rgb, Empty, //More maybe ... maybe not } impl RenderInstructions{ pub fn clear(&mut self){ self.buffer.clear(); } pub fn draw_rect(&mut self, rect: [f32; 4], color: [f32; 4], filled: bool){ let _color = [color[0], color[1], color[2]]; self.buffer.push( RenderStruct{rendertype: RenderType::Rectangle, x: rect[0], y:rect[1], width: rect[2], height: rect[3], alpha: color[3], filled: filled, color: _color, .. Default::default()}); } pub fn draw_string(&mut self, s: &str, color: [f32; 4], size: u32, x: f32, y: f32 ){ //TODO //should size be optional //shouldn't a good text size be choosen automatically //TODO //should color be optional //shouldn't a good text color be choosen automatically let _color = [color[0], color[1], color[2]]; self.buffer.push( RenderStruct{ rendertype: RenderType::String, x: x, y: y, alpha: color[3], color: _color, char_buffer: s.to_string(), font_size: size, .. Default::default()} ); } pub fn draw_bmp(&mut self, bmp: &Bitmap, x: f32, y: f32, alpha: f32, w: Option<f32>, h: Option<f32>){ //TODO //should x and y be options, Often I want to just draw the image where ever and have it //automagicly look good with corresponding text self.buffer.push( RenderStruct{rendertype: RenderType::Image, alpha: alpha, x: x, y: y, width: bmp.width as f32, height: bmp.height as f32, new_width: w, new_height: h, rgba_type: bmp.rgba_type, color_buffer: bmp.buffer.clone(), .. Default::default()} ); } pub fn println(&mut self, string: &str){ let buffer = "> ".to_string() + string; self.buffer.push( RenderStruct{ rendertype: RenderType::PrintString, alpha: 1.0, color: [1.0, 1.0, 1.0], char_buffer: buffer, font_size: 19, .. Default::default()} ); } } impl Default for RGBA{ fn default()->Self{ RGBA::Empty } } #[derive(Clone)] pub struct Bitmap{ //NOTE BMP should be 400 x 400 to start off. pub width: i32, pub height: i32, pub rgba_type: RGBA, pub buffer: Vec<u8>, } impl Bitmap{ pub fn new(w: i32, h: i32, rgba_type: RGBA)->Bitmap{ let _w = w as usize; let _h = h as usize; let v = match rgba_type{ RGBA::U8rgba=>{ vec![0u8; _w*_h*4] }, RGBA::U8argb=>{ vec![0u8; _w*_h*4] }, RGBA::U8rgb=>{ vec![0u8; _w*_h*3] }, _=>{ panic!("Not supported"); } //TODO Clean up }; Bitmap{ width: w, height: h, rgba_type: rgba_type, buffer: v, } } } pub struct BitmapContainer{ pub initialized : bool, pub bmp: Option<Bitmap>, } } pub mod memory_tools{ //TODO play around with this maybe //use std::alloc; use std::any::TypeId; const _FIXED_CHAR_BUFFER_SIZE : usize = 128; #[derive(Copy, Clone)] pub struct TinyString{ //NOTE //currently struct vars are public for debugging purposed. They should not be public. //NOTE //This should prob be a general tool pub buffer: [char; _FIXED_CHAR_BUFFER_SIZE], pub cursor: usize, } impl TinyString{ pub fn new()->TinyString{ TinyString{ buffer: ['\0'; _FIXED_CHAR_BUFFER_SIZE], cursor: 0, } } pub fn get(&self, index: usize)->&char{ &self.buffer[index] } pub fn get_mut(&mut self, index: usize)->&mut char{ &mut self.buffer[index] } pub fn len(&self)->usize{ self.cursor } pub fn push(&mut self, c: char)->Result< () , String>{ if self.len() >= _FIXED_CHAR_BUFFER_SIZE { return Err("Out sized tinystring".to_string()); } let _i = self.cursor; self[_i] = c; self.cursor += 1; return Ok( () ); } //TODO //check meaning of clone and copy in rust pub fn copystr(&mut self, s: &str){ let mut chars = s.chars(); for _ in 0.. _FIXED_CHAR_BUFFER_SIZE { match chars.next(){ Some(c)=> { let _i = self.cursor; self[_i] = c; self.cursor += 1; } _=> break } } } //TODO //check meaning of clone and copy in rust pub fn copy(&mut self, s: &TinyString){ for i in 0..s.len(){ self[i] = s[i]; } } pub fn is_samestr(&self, s: &str)->bool{ let mut chars = s.chars(); if self.len() != s.len(){ return false; } for i in 0..self.len(){ if self[i] != chars.next().unwrap(){ return false; } } return true; } pub fn is_same(&self, s: &TinyString)->bool{ if self.len() != s.len(){ return false; } for i in 0..self.len(){ if self[i] != s[i]{ return false; } } return true; } } impl std::ops::Index<usize> for TinyString{ type Output = char; fn index(&self, index: usize)->&Self::Output{ self.get(index) } } impl std::ops::IndexMut<usize> for TinyString{ fn index_mut(&mut self, index: usize)->&mut Self::Output{ self.get_mut(index) } } pub trait Storage{ fn get_storage(&mut self)->&mut [u8]; } impl Storage for GlobalStorage{ fn get_storage(&mut self)->&mut [u8]{ &mut self.storage } } pub struct Ptr<S: Storage>{ //TODO make backend_storage a Generic function that requires trait ptr: usize, type_hash: TypeId, backend_storage: *mut S } impl<S: Storage> Ptr<S>{ pub fn deref_mut<T: 'static>(&self)->&mut T{unsafe{ if self.type_hash != TypeId::of::<T>() { panic!("Could not dereference custom pointer do to failed type hash comparison.");} let gs = self.backend_storage.as_mut().unwrap(); ((&mut gs.get_storage()[self.ptr] as *mut u8 ) as *mut T).as_mut().unwrap() }} } impl<S: Storage> std::clone::Clone for Ptr<S>{ fn clone(&self)->Self{ Ptr{ ptr: self.ptr, type_hash: self.type_hash, backend_storage: self.backend_storage, } } } pub struct DyArray<T>{ ptr: Ptr<GlobalStorage>, length: usize, capacity: usize, phantom: std::marker::PhantomData<T>, } impl<T: 'static> DyArray<T>{ pub fn push(&mut self, v: T) {unsafe{ let gs = self.ptr.backend_storage.as_mut().unwrap(); if self.length >= self.capacity{ let length = self.length * std::mem::size_of::<T>(); let old_ptr = self.ptr.clone(); let new_ptr = gs.realloc::<T>(old_ptr, length, self.capacity); self.ptr = new_ptr; return; } { let cursor = self.ptr.ptr + self.length * std::mem::size_of::<T>(); gs.write_to(v, cursor).expect("Global storage could not write to index."); self.length +=1; } }} pub fn new(gs: &mut GlobalStorage)->DyArray<T>{ DyArray::<T>::with_capacity(gs, 5) } pub fn with_capacity(gs: &mut GlobalStorage, size: usize)->DyArray<T>{ let ptr = gs.alloc_multi_empty::<T>( size ); DyArray{ ptr: ptr, length: 0, capacity: size, phantom: std::marker::PhantomData } } pub fn get(&self, index: usize)->&T{unsafe{ if index > self.length { panic!("Index bounds error."); } let base = self.ptr.ptr; let address = base + index * std::mem::size_of::<T>(); let gs = self.ptr.backend_storage.as_mut().unwrap(); ((&mut gs.get_storage()[address] as *mut u8 ) as *mut T).as_mut().unwrap() }} pub fn get_mut(&mut self, index: usize)->&mut T{unsafe{ if index > self.length { panic!("Index bounds error."); } let base = self.ptr.ptr; let address = base + index * std::mem::size_of::<T>(); let gs = self.ptr.backend_storage.as_mut().unwrap(); ((&mut gs.get_storage()[address] as *mut u8 ) as *mut T).as_mut().unwrap() }} } //TODO //Convert Global and Local storage into a general storage storage thing where storage can be //either a fixed or dynamic array. pub struct GlobalStorage{ pub storage: Vec<u8>, pub storage_filled: Vec<bool>, //TODO speace footprint improvement use bits in u8 reference: [TinyString;100], //This is fixed size because I really want to stop myself from over populating the global space stored_ptr: Vec<Ptr<GlobalStorage>>, } impl GlobalStorage{ pub fn new()->GlobalStorage{ GlobalStorage{ storage: Vec::with_capacity(1028*1028*4), //This is still prob too small storage_filled: Vec::with_capacity(1028*1028*4), //TODO //reference needs to store the ptr index TODO reference: [TinyString::new(); 100], stored_ptr: Vec::new(), } } pub fn alloc<T: 'static>(&mut self, v: T)->Ptr<GlobalStorage>{unsafe{ let size = std::mem::size_of::<T>(); let src = (&v as *const T) as *const u8; let cursor = self.storage.len(); for i in 0..size{ //TODO //SLOW SPEED ME UP //I don't think I want to be pushing every thiem like this //TODO //byte alignments self.storage.push(*src.offset(i as isize)); self.storage_filled.push(true); } return Ptr{ ptr: cursor, type_hash: TypeId::of::<T>(), backend_storage: self as *mut _}; }} pub fn alloc_multi_empty<T: 'static>(&mut self, multiples: usize)->Ptr<GlobalStorage>{ let size = std::mem::size_of::<T>() * multiples; let cursor = self.storage.len(); for _i in 0..size{ //TODO //SLOW SPEED ME UP //I don't think I want to be pushing every thiem like this //TODO //byte alignments self.storage.push(0); self.storage_filled.push(true); } return Ptr{ ptr: cursor, type_hash: TypeId::of::<T>(), backend_storage: self as *mut _}; } pub fn realloc<T>(&mut self, ptr: Ptr<GlobalStorage>, index_back: usize, additional_space: usize)->Ptr<GlobalStorage>{ //TODO //SLOW SPEED UP let cursor = self.storage.len(); let index_front = ptr.ptr; for i in index_front..index_back{ let temp = self.storage[i]; self.storage_filled[i] = false; self.storage.push(temp); self.storage_filled.push(true); } for _i in 0..additional_space{ self.storage.push(0); self.storage_filled.push(true); } return Ptr{ ptr: cursor, type_hash: ptr.type_hash, backend_storage: self as *mut _}; } pub unsafe fn write_to<T: 'static>(&mut self, v: T, at_index: usize)->Result<(),String>{ let size = std::mem::size_of::<T>(); let src = (&v as *const T) as *const u8; if at_index >= self.storage.len() { return Err("Writing outside the bounds of memory allocated to global storage".to_string()); } let cursor = at_index; for i in 0..size{ //TODO //SLOW SPEED ME UP //I don't think I want to be pushing every thiem like this //TODO //byte alignments if !self.storage_filled[cursor+i] { panic!("Storage has not allocated this memory.") } self.storage[cursor+i] = *src.offset(i as isize); } return Ok(()); } pub fn store<T: 'static>(&mut self, v: T, name: &str)->Result<(), String>{ if name.len() > _FIXED_CHAR_BUFFER_SIZE { return Err(format!("storage name is too damn long. Name should be {} chars long.", _FIXED_CHAR_BUFFER_SIZE)); } let cursor = self.stored_ptr.len(); for it in self.reference.iter() { if it.is_samestr(name){ return Err(format!("Global Storage name collision: {}", name)); } } self.reference[cursor].copystr( name ); let ptr = self.alloc(v); self.stored_ptr.push(ptr); return Ok(()); } pub fn get<T: 'static>(&mut self, name: &str)->Result<&mut T, String>{ let mut isgood = false; let mut ptr_index = 0; for (i, it) in self.reference.iter().enumerate() { if it.is_samestr(name){ ptr_index = i; isgood = true; } } if isgood == false { return Err(format!("Name not found in Global Storage: {}", name)); } let ptr = &self.stored_ptr[ptr_index]; return Ok(ptr.deref_mut::<T>()); } // } pub struct LocalStorage{ //NOTE //This seems to be a good idea when it comes to interactive panels //However I'm not sure about the usefulness else where.... // // //Why should the local buffer be fixed sized. This doesn't really make sense. pub interactive: bool, pub storage: GlobalStorage, } impl LocalStorage{ pub fn new()->LocalStorage{ LocalStorage{ interactive: false, storage: GlobalStorage::new(), } } } } pub mod interaction_tools{ pub enum KeyboardEnum{ Rightarrow, Leftarrow, Uparrow, Downarrow, Enter, Default } impl Default for KeyboardEnum{ fn default()->Self{ KeyboardEnum::Default } } pub enum ButtonStatus{ Up, Down, Default } impl Default for ButtonStatus{ fn default()->Self{ ButtonStatus::Default } } #[derive(Default)] pub struct InteractiveInfo{ //TODO add some frame info pub infocus: bool, pub mouse_x: f32, pub mouse_y: f32, pub text_key_pressed: char, pub keyboard_key: Vec<KeyboardEnum>, pub keyboard_key_status: Vec<ButtonStatus>, pub frames: u64, } } #[test] fn globalstorage_alloc_and_store(){ //TODO rename use memory_tools::GlobalStorage; let mut gls = GlobalStorage::new(); { let mut a = [10u8; 4]; gls.store(a, "a"); } let mut b = [10u8; 4]; assert_eq!(b, *gls.get::<[u8;4]>("a").unwrap()); } #[test] fn globalstorage_vec(){ //TODO rename use memory_tools::{GlobalStorage, DyArray}; let mut gls = GlobalStorage::new(); let mut dy = DyArray::<u32>::new(&mut gls); dy.push(12); dy.push(123); dy.push(1231); //let a = dy.get(0); //assert_eq!(12, *a); println!("print test"); let a = dy.get(1); assert_eq!(123, *a); let a = dy.get(2); assert_eq!(1231, *a); /* Does not compile for some reason * I need to inform the rust folks assert_eq!(12, *(dy.get(0))); assert_eq!(123, *(dy,get(1))); assert_eq!(1231, *(dy.get(2))); */ } #[test] fn
(){ //TODO rename use memory_tools::{GlobalStorage, DyArray}; let mut gls = GlobalStorage::new(); let mut a = gls.alloc(123.09f32); let mut dy = DyArray::<u32>::new(&mut gls); let mut b = gls.alloc(123i32); dy.push(12); dy.push(123); dy.push(1231); //let a = dy.get(0); //assert_eq!(12, *a); let ab = dy.get(1); assert_eq!(123, *ab); let ab = dy.get(2); assert_eq!(1231, *ab); } #[test] fn global_storage_vec3(){ //TODO rename use memory_tools::{GlobalStorage, DyArray}; let mut gls = GlobalStorage::new(); { let mut dy = DyArray::<u32>::new(&mut gls); dy.push(12); dy.push(123); dy.push(1231); gls.store(dy, "dy"); } let a = gls.get::<DyArray<u32>>("dy").unwrap(); let ab = a.get(0); assert_eq!(12, *ab); }
global_storage_vec2
identifier_name
lib.rs
#[cfg(target_os = "linux")] pub mod dynamic_lib_loading{ #![allow(unused_imports)] #![allow(non_camel_case_types)] #![allow(dead_code)] use std::ffi::CString; use std::ptr; use std::os::raw::{c_int, c_char, c_void}; // //This is a lib of dlopen and dlclose using rust //Comments copied from the following source files // /usr/include/dlfcn.h // https://www.unvanquished.net/~modi/code/include/x86_64-linux-gnu/bits/dlfcn.h.html /* These are the possible values for the REQUEST argument to `dlinfo'. */ enum DL_INFO{ /* Treat ARG as `lmid_t *'; store namespace ID for HANDLE there. */ RTLD_DI_LMID = 1, /* Treat ARG as `struct link_map **'; store the `struct link_map *' for HANDLE there. */ RTLD_DI_LINKMAP = 2, RTLD_DI_CONFIGADDR = 3, /* Unsupported, defined by Solaris. */ /* Treat ARG as `Dl_serinfo *' (see below), and fill in to describe the directories that will be searched for dependencies of this object. RTLD_DI_SERINFOSIZE fills in just the `dls_cnt' and `dls_size' entries to indicate the size of the buffer that must be passed to RTLD_DI_SERINFO to fill in the full information. */ RTLD_DI_SERINFO = 4, RTLD_DI_SERINFOSIZE = 5, /* Treat ARG as `char *', and store there the directory name used to expand $ORIGIN in this shared object's dependency file names. */ RTLD_DI_ORIGIN = 6, RTLD_DI_PROFILENAME = 7, /* Unsupported, defined by Solaris. */ RTLD_DI_PROFILEOUT = 8, /* Unsupported, defined by Solaris. */ /* Treat ARG as `size_t *', and store there the TLS module ID of this object's PT_TLS segment, as used in TLS relocations; store zero if this object does not define a PT_TLS segment. */ RTLD_DI_TLS_MODID = 9, /* Treat ARG as `void **', and store there a pointer to the calling thread's TLS block corresponding to this object's PT_TLS segment. Store a null pointer if this object does not define a PT_TLS segment, or if the calling thread has not allocated a block for it. */ RTLD_DI_TLS_DATA = 10, //RTLD_DI_MAX = 10 } /* The MODE argument to `dlopen' contains one of the following: */ pub const RTLD_LAZY : i32 = 0x00001; /* Lazy function call binding. */ pub const RTLD_NOW : i32 = 0x00002; /* Immediate function call binding. */ pub const RTLD_BINDING_MASK : i32 = 0x3 ; /* Mask of binding time value. */ pub const RTLD_NOLOAD : i32 = 0x00004; /* Do not load the object. */ pub const RTLD_DEEPBIND : i32 = 0x00008; /* Use deep binding. */ /* If the following bit is set in the MODE argument to `dlopen', * the symbols of the loaded object and its dependencies are made * visible as if the object were linked directly into the program. */ pub const RTLD_GLOBAL : i32 = 0x00100; /* Unix98 demands the following flag which is the inverse to RTLD_GLOBAL. * The implementation does this by default and so we can define the * value to zero. */ pub const RTLD_LOCAL : i32 = 0; /* Do not delete object when closed. */ pub const RTLD_NODELETE : i32 = 0x01000; struct Dl_info{ dli_fname: *mut c_char, /* File name of defining object. */ dli_fbase: *mut c_void, /* Load address of that object. */ dli_sname: *mut c_char, /* Name of nearest symbol. */ dli_saddr: *mut c_void, /* Exact value of nearest symbol. */ //dlerror } /* This is the type of elements in `Dl_serinfo', below. The `dls_name' member points to space in the buffer passed to `dlinfo'. */ struct Dl_serpath { dls_name: *mut c_char, /* Name of library search path directory. */ dls_flags: u32, /* Indicates where this directory came from. */ } /* This is the structure that must be passed (by reference) to `dlinfo' for the RTLD_DI_SERINFO and RTLD_DI_SERINFOSIZE requests. */ struct Dl_serinfo { dls_size: usize, /* Size in bytes of the whole buffer. */ dls_cnt: u32, /* Number of elements in `dls_serpath'. */ dls_serpath: [Dl_serpath;1], /* Actually longer, dls_cnt elements. */ } //TODO //Think about changing from c_int to i32 or something extern "C" { pub fn dlopen(filename: *const c_char, flag: c_int) -> *mut c_void; pub fn dlsym(lib_handle: *mut c_void, name: *const c_char) -> *mut c_void; pub fn dlclose(lib_handle: *mut c_void) -> c_int; pub fn dlinfo(lib_handle: *mut c_void, request: c_int, info: *mut c_void) -> c_int; pub fn dlerror() -> *mut c_char; } pub struct DyLib(*mut c_void); pub fn open_lib( lib_path: &str, flag: i32 )->Result<DyLib, String>{unsafe{ //TODO //Get enums dlopen uses let shared_lib_handle = dlopen(CString::new(lib_path).unwrap().as_ptr(), flag as c_int); if shared_lib_handle.is_null(){ println!("{:?}", get_error()); Err(format!("Shared lib is null! {} Check file path/name.", lib_path)) } else{ Ok( DyLib(shared_lib_handle) ) } }} //Example //let function : fn()->i32= transmute_copy((dlsym(shared_lib_handle, CString::new(name).unwrap().as_ptr()) as *mut ()).as_mut()); pub fn get_fn( shared_lib_handle: &DyLib, name: &str)-> Result<*mut (), String>{ unsafe{ let _fn = dlsym(shared_lib_handle.0, CString::new(name).unwrap().as_ptr()); if _fn.is_null() { Err("Function name could not be found.".to_string()) } else{ Ok(_fn as *mut () ) } }} pub fn get_error()->String{unsafe{ let error = dlerror(); if error.is_null(){ return "No Error".to_string(); } else{ CString::from_raw(error).into_string().unwrap() } }} pub fn close_lib(shared_lib_handle: &DyLib){unsafe{ if dlclose(shared_lib_handle.0) != 0{ println!("Could not properly close shared library."); } }} } #[cfg(target_os = "windows")] pub mod dynamic_lib_loading{ use std::os::raw::{c_int, c_void}; extern "C" { fn LoadLibraryA( path: *const i8 ) -> *mut c_void; fn GetProcAddress( lib: *mut c_void, name: *const i8 ) -> *mut c_void; fn FreeLibrary( lib: *mut c_void ) -> c_int; fn GetLastError() -> u32; } //TODO //This is temporary should be replaced by windows enums pub const RTLD_LAZY : i32 = 0x00001; /* Lazy function call binding. */ pub struct DyLib(*mut c_void); pub fn open_lib( lib_path: &str, _flag: i32 )->Result<DyLib, String>{unsafe{ let _path = lib_path.to_string() + "\0"; let lib = LoadLibraryA( _path.as_ptr() as *const i8); if lib.is_null(){ let s = format!("Could not open lib \n{:?}\n\n For more info => https://docs.microsoft.com/en-us/windows/win32/debug/system-error-codes", GetLastError()); return Err(s); } Ok(DyLib(lib as *mut c_void)) }} //Example //let function : fn()->i32= transmute_copy((dlsym(shared_lib_handle, CString::new(name).unwrap().as_ptr()) as *mut ()).as_mut()); pub fn get_fn( shared_lib_handle: &DyLib, name: &str)-> Result<*mut (), String>{ unsafe{ let fn_name = name.to_string() + "\0"; let function = GetProcAddress(shared_lib_handle.0 as _, fn_name.as_ptr() as *const i8) as *mut (); if function.is_null(){ let s = format!("Could not get function \n{:?}", GetLastError()); return Err(s); } Ok(function) }} pub fn get_error()->String{ "Windows version has not been implemented".to_string() } pub fn close_lib(shared_lib_handle: &DyLib){unsafe{ if FreeLibrary(shared_lib_handle.0 as _) == 0{ println!("Could not properly close shared library."); println!("{}", format!("{:?}", GetLastError())); } }} } pub mod render_tools{ #[derive(PartialEq, Clone, Debug)] pub enum RenderType{ Image, Rectangle, String, PrintString, Empty, } impl Default for RenderType{ fn default()->Self{ RenderType::Empty } } #[derive(Default)] pub struct RenderStruct{ pub rendertype : RenderType, pub x: f32, pub y: f32, pub width: f32, pub height: f32, pub alpha : f32, //rect related things pub filled: bool, pub color: [f32;3],
pub new_width: Option<f32>,// NOTE Testing out using a factional new width pub new_height: Option<f32>,// NOTE Testing out using a factional new height //Stings pub char_buffer: String, pub font_size: u32 } #[derive(Default)] pub struct RenderInstructions{ pub buffer: Vec<RenderStruct>, } //TODO //This is a BAD name.... do better #[derive(Clone, Copy, PartialEq)] pub enum RGBA{ U8rgba, U8argb, U8rgb, Empty, //More maybe ... maybe not } impl RenderInstructions{ pub fn clear(&mut self){ self.buffer.clear(); } pub fn draw_rect(&mut self, rect: [f32; 4], color: [f32; 4], filled: bool){ let _color = [color[0], color[1], color[2]]; self.buffer.push( RenderStruct{rendertype: RenderType::Rectangle, x: rect[0], y:rect[1], width: rect[2], height: rect[3], alpha: color[3], filled: filled, color: _color, .. Default::default()}); } pub fn draw_string(&mut self, s: &str, color: [f32; 4], size: u32, x: f32, y: f32 ){ //TODO //should size be optional //shouldn't a good text size be choosen automatically //TODO //should color be optional //shouldn't a good text color be choosen automatically let _color = [color[0], color[1], color[2]]; self.buffer.push( RenderStruct{ rendertype: RenderType::String, x: x, y: y, alpha: color[3], color: _color, char_buffer: s.to_string(), font_size: size, .. Default::default()} ); } pub fn draw_bmp(&mut self, bmp: &Bitmap, x: f32, y: f32, alpha: f32, w: Option<f32>, h: Option<f32>){ //TODO //should x and y be options, Often I want to just draw the image where ever and have it //automagicly look good with corresponding text self.buffer.push( RenderStruct{rendertype: RenderType::Image, alpha: alpha, x: x, y: y, width: bmp.width as f32, height: bmp.height as f32, new_width: w, new_height: h, rgba_type: bmp.rgba_type, color_buffer: bmp.buffer.clone(), .. Default::default()} ); } pub fn println(&mut self, string: &str){ let buffer = "> ".to_string() + string; self.buffer.push( RenderStruct{ rendertype: RenderType::PrintString, alpha: 1.0, color: [1.0, 1.0, 1.0], char_buffer: buffer, font_size: 19, .. Default::default()} ); } } impl Default for RGBA{ fn default()->Self{ RGBA::Empty } } #[derive(Clone)] pub struct Bitmap{ //NOTE BMP should be 400 x 400 to start off. pub width: i32, pub height: i32, pub rgba_type: RGBA, pub buffer: Vec<u8>, } impl Bitmap{ pub fn new(w: i32, h: i32, rgba_type: RGBA)->Bitmap{ let _w = w as usize; let _h = h as usize; let v = match rgba_type{ RGBA::U8rgba=>{ vec![0u8; _w*_h*4] }, RGBA::U8argb=>{ vec![0u8; _w*_h*4] }, RGBA::U8rgb=>{ vec![0u8; _w*_h*3] }, _=>{ panic!("Not supported"); } //TODO Clean up }; Bitmap{ width: w, height: h, rgba_type: rgba_type, buffer: v, } } } pub struct BitmapContainer{ pub initialized : bool, pub bmp: Option<Bitmap>, } } pub mod memory_tools{ //TODO play around with this maybe //use std::alloc; use std::any::TypeId; const _FIXED_CHAR_BUFFER_SIZE : usize = 128; #[derive(Copy, Clone)] pub struct TinyString{ //NOTE //currently struct vars are public for debugging purposed. They should not be public. //NOTE //This should prob be a general tool pub buffer: [char; _FIXED_CHAR_BUFFER_SIZE], pub cursor: usize, } impl TinyString{ pub fn new()->TinyString{ TinyString{ buffer: ['\0'; _FIXED_CHAR_BUFFER_SIZE], cursor: 0, } } pub fn get(&self, index: usize)->&char{ &self.buffer[index] } pub fn get_mut(&mut self, index: usize)->&mut char{ &mut self.buffer[index] } pub fn len(&self)->usize{ self.cursor } pub fn push(&mut self, c: char)->Result< () , String>{ if self.len() >= _FIXED_CHAR_BUFFER_SIZE { return Err("Out sized tinystring".to_string()); } let _i = self.cursor; self[_i] = c; self.cursor += 1; return Ok( () ); } //TODO //check meaning of clone and copy in rust pub fn copystr(&mut self, s: &str){ let mut chars = s.chars(); for _ in 0.. _FIXED_CHAR_BUFFER_SIZE { match chars.next(){ Some(c)=> { let _i = self.cursor; self[_i] = c; self.cursor += 1; } _=> break } } } //TODO //check meaning of clone and copy in rust pub fn copy(&mut self, s: &TinyString){ for i in 0..s.len(){ self[i] = s[i]; } } pub fn is_samestr(&self, s: &str)->bool{ let mut chars = s.chars(); if self.len() != s.len(){ return false; } for i in 0..self.len(){ if self[i] != chars.next().unwrap(){ return false; } } return true; } pub fn is_same(&self, s: &TinyString)->bool{ if self.len() != s.len(){ return false; } for i in 0..self.len(){ if self[i] != s[i]{ return false; } } return true; } } impl std::ops::Index<usize> for TinyString{ type Output = char; fn index(&self, index: usize)->&Self::Output{ self.get(index) } } impl std::ops::IndexMut<usize> for TinyString{ fn index_mut(&mut self, index: usize)->&mut Self::Output{ self.get_mut(index) } } pub trait Storage{ fn get_storage(&mut self)->&mut [u8]; } impl Storage for GlobalStorage{ fn get_storage(&mut self)->&mut [u8]{ &mut self.storage } } pub struct Ptr<S: Storage>{ //TODO make backend_storage a Generic function that requires trait ptr: usize, type_hash: TypeId, backend_storage: *mut S } impl<S: Storage> Ptr<S>{ pub fn deref_mut<T: 'static>(&self)->&mut T{unsafe{ if self.type_hash != TypeId::of::<T>() { panic!("Could not dereference custom pointer do to failed type hash comparison.");} let gs = self.backend_storage.as_mut().unwrap(); ((&mut gs.get_storage()[self.ptr] as *mut u8 ) as *mut T).as_mut().unwrap() }} } impl<S: Storage> std::clone::Clone for Ptr<S>{ fn clone(&self)->Self{ Ptr{ ptr: self.ptr, type_hash: self.type_hash, backend_storage: self.backend_storage, } } } pub struct DyArray<T>{ ptr: Ptr<GlobalStorage>, length: usize, capacity: usize, phantom: std::marker::PhantomData<T>, } impl<T: 'static> DyArray<T>{ pub fn push(&mut self, v: T) {unsafe{ let gs = self.ptr.backend_storage.as_mut().unwrap(); if self.length >= self.capacity{ let length = self.length * std::mem::size_of::<T>(); let old_ptr = self.ptr.clone(); let new_ptr = gs.realloc::<T>(old_ptr, length, self.capacity); self.ptr = new_ptr; return; } { let cursor = self.ptr.ptr + self.length * std::mem::size_of::<T>(); gs.write_to(v, cursor).expect("Global storage could not write to index."); self.length +=1; } }} pub fn new(gs: &mut GlobalStorage)->DyArray<T>{ DyArray::<T>::with_capacity(gs, 5) } pub fn with_capacity(gs: &mut GlobalStorage, size: usize)->DyArray<T>{ let ptr = gs.alloc_multi_empty::<T>( size ); DyArray{ ptr: ptr, length: 0, capacity: size, phantom: std::marker::PhantomData } } pub fn get(&self, index: usize)->&T{unsafe{ if index > self.length { panic!("Index bounds error."); } let base = self.ptr.ptr; let address = base + index * std::mem::size_of::<T>(); let gs = self.ptr.backend_storage.as_mut().unwrap(); ((&mut gs.get_storage()[address] as *mut u8 ) as *mut T).as_mut().unwrap() }} pub fn get_mut(&mut self, index: usize)->&mut T{unsafe{ if index > self.length { panic!("Index bounds error."); } let base = self.ptr.ptr; let address = base + index * std::mem::size_of::<T>(); let gs = self.ptr.backend_storage.as_mut().unwrap(); ((&mut gs.get_storage()[address] as *mut u8 ) as *mut T).as_mut().unwrap() }} } //TODO //Convert Global and Local storage into a general storage storage thing where storage can be //either a fixed or dynamic array. pub struct GlobalStorage{ pub storage: Vec<u8>, pub storage_filled: Vec<bool>, //TODO speace footprint improvement use bits in u8 reference: [TinyString;100], //This is fixed size because I really want to stop myself from over populating the global space stored_ptr: Vec<Ptr<GlobalStorage>>, } impl GlobalStorage{ pub fn new()->GlobalStorage{ GlobalStorage{ storage: Vec::with_capacity(1028*1028*4), //This is still prob too small storage_filled: Vec::with_capacity(1028*1028*4), //TODO //reference needs to store the ptr index TODO reference: [TinyString::new(); 100], stored_ptr: Vec::new(), } } pub fn alloc<T: 'static>(&mut self, v: T)->Ptr<GlobalStorage>{unsafe{ let size = std::mem::size_of::<T>(); let src = (&v as *const T) as *const u8; let cursor = self.storage.len(); for i in 0..size{ //TODO //SLOW SPEED ME UP //I don't think I want to be pushing every thiem like this //TODO //byte alignments self.storage.push(*src.offset(i as isize)); self.storage_filled.push(true); } return Ptr{ ptr: cursor, type_hash: TypeId::of::<T>(), backend_storage: self as *mut _}; }} pub fn alloc_multi_empty<T: 'static>(&mut self, multiples: usize)->Ptr<GlobalStorage>{ let size = std::mem::size_of::<T>() * multiples; let cursor = self.storage.len(); for _i in 0..size{ //TODO //SLOW SPEED ME UP //I don't think I want to be pushing every thiem like this //TODO //byte alignments self.storage.push(0); self.storage_filled.push(true); } return Ptr{ ptr: cursor, type_hash: TypeId::of::<T>(), backend_storage: self as *mut _}; } pub fn realloc<T>(&mut self, ptr: Ptr<GlobalStorage>, index_back: usize, additional_space: usize)->Ptr<GlobalStorage>{ //TODO //SLOW SPEED UP let cursor = self.storage.len(); let index_front = ptr.ptr; for i in index_front..index_back{ let temp = self.storage[i]; self.storage_filled[i] = false; self.storage.push(temp); self.storage_filled.push(true); } for _i in 0..additional_space{ self.storage.push(0); self.storage_filled.push(true); } return Ptr{ ptr: cursor, type_hash: ptr.type_hash, backend_storage: self as *mut _}; } pub unsafe fn write_to<T: 'static>(&mut self, v: T, at_index: usize)->Result<(),String>{ let size = std::mem::size_of::<T>(); let src = (&v as *const T) as *const u8; if at_index >= self.storage.len() { return Err("Writing outside the bounds of memory allocated to global storage".to_string()); } let cursor = at_index; for i in 0..size{ //TODO //SLOW SPEED ME UP //I don't think I want to be pushing every thiem like this //TODO //byte alignments if !self.storage_filled[cursor+i] { panic!("Storage has not allocated this memory.") } self.storage[cursor+i] = *src.offset(i as isize); } return Ok(()); } pub fn store<T: 'static>(&mut self, v: T, name: &str)->Result<(), String>{ if name.len() > _FIXED_CHAR_BUFFER_SIZE { return Err(format!("storage name is too damn long. Name should be {} chars long.", _FIXED_CHAR_BUFFER_SIZE)); } let cursor = self.stored_ptr.len(); for it in self.reference.iter() { if it.is_samestr(name){ return Err(format!("Global Storage name collision: {}", name)); } } self.reference[cursor].copystr( name ); let ptr = self.alloc(v); self.stored_ptr.push(ptr); return Ok(()); } pub fn get<T: 'static>(&mut self, name: &str)->Result<&mut T, String>{ let mut isgood = false; let mut ptr_index = 0; for (i, it) in self.reference.iter().enumerate() { if it.is_samestr(name){ ptr_index = i; isgood = true; } } if isgood == false { return Err(format!("Name not found in Global Storage: {}", name)); } let ptr = &self.stored_ptr[ptr_index]; return Ok(ptr.deref_mut::<T>()); } // } pub struct LocalStorage{ //NOTE //This seems to be a good idea when it comes to interactive panels //However I'm not sure about the usefulness else where.... // // //Why should the local buffer be fixed sized. This doesn't really make sense. pub interactive: bool, pub storage: GlobalStorage, } impl LocalStorage{ pub fn new()->LocalStorage{ LocalStorage{ interactive: false, storage: GlobalStorage::new(), } } } } pub mod interaction_tools{ pub enum KeyboardEnum{ Rightarrow, Leftarrow, Uparrow, Downarrow, Enter, Default } impl Default for KeyboardEnum{ fn default()->Self{ KeyboardEnum::Default } } pub enum ButtonStatus{ Up, Down, Default } impl Default for ButtonStatus{ fn default()->Self{ ButtonStatus::Default } } #[derive(Default)] pub struct InteractiveInfo{ //TODO add some frame info pub infocus: bool, pub mouse_x: f32, pub mouse_y: f32, pub text_key_pressed: char, pub keyboard_key: Vec<KeyboardEnum>, pub keyboard_key_status: Vec<ButtonStatus>, pub frames: u64, } } #[test] fn globalstorage_alloc_and_store(){ //TODO rename use memory_tools::GlobalStorage; let mut gls = GlobalStorage::new(); { let mut a = [10u8; 4]; gls.store(a, "a"); } let mut b = [10u8; 4]; assert_eq!(b, *gls.get::<[u8;4]>("a").unwrap()); } #[test] fn globalstorage_vec(){ //TODO rename use memory_tools::{GlobalStorage, DyArray}; let mut gls = GlobalStorage::new(); let mut dy = DyArray::<u32>::new(&mut gls); dy.push(12); dy.push(123); dy.push(1231); //let a = dy.get(0); //assert_eq!(12, *a); println!("print test"); let a = dy.get(1); assert_eq!(123, *a); let a = dy.get(2); assert_eq!(1231, *a); /* Does not compile for some reason * I need to inform the rust folks assert_eq!(12, *(dy.get(0))); assert_eq!(123, *(dy,get(1))); assert_eq!(1231, *(dy.get(2))); */ } #[test] fn global_storage_vec2(){ //TODO rename use memory_tools::{GlobalStorage, DyArray}; let mut gls = GlobalStorage::new(); let mut a = gls.alloc(123.09f32); let mut dy = DyArray::<u32>::new(&mut gls); let mut b = gls.alloc(123i32); dy.push(12); dy.push(123); dy.push(1231); //let a = dy.get(0); //assert_eq!(12, *a); let ab = dy.get(1); assert_eq!(123, *ab); let ab = dy.get(2); assert_eq!(1231, *ab); } #[test] fn global_storage_vec3(){ //TODO rename use memory_tools::{GlobalStorage, DyArray}; let mut gls = GlobalStorage::new(); { let mut dy = DyArray::<u32>::new(&mut gls); dy.push(12); dy.push(123); dy.push(1231); gls.store(dy, "dy"); } let a = gls.get::<DyArray<u32>>("dy").unwrap(); let ab = a.get(0); assert_eq!(12, *ab); }
//image related things pub color_buffer: Vec<u8>, pub rgba_type: RGBA,
random_line_split
map_loader.rs
use serde_json::Value; use specs::prelude::*; use std::collections::HashMap; use std::path::Path; use super::super::super::prelude::{ hex_color, Attribute, Attributes, BackgroundColor, CanBeEmpty, Color, GlobalTileIndex, InventoryLayer, ItemRecord, Layer, LayerData, Object, Position, Screen, Sprite, Tiledmap, ToonRecord, ZLevel, V2, }; /// The result of loading one or more Tiled layers into the ECS. pub struct LoadedLayers { /// All the top-level entities loaded pub top_level_entities: Vec<Entity>, /// A HashMap of all entities loaded within a layer group, keyed by the group /// layer's name. pub groups: HashMap<String, Vec<Entity>>, } impl LoadedLayers { pub fn new() -> LoadedLayers { LoadedLayers { top_level_entities: vec![], groups: HashMap::new(), } } pub fn all_entities(&self) -> Vec<Entity> { let mut tops = self.top_level_entities.clone(); let mut groups: Vec<Entity> = self.groups.values().flat_map(|es| es.clone()).collect(); tops.append(&mut groups); tops } pub fn append_entities(&mut self, other: LoadedLayers) { let other_tops = other.top_level_entities.into_iter(); let other_groups = other.groups.into_iter(); self.top_level_entities.extend(other_tops); self.groups.extend(other_groups); } } pub struct MapLoader<'a> { loaded_maps: HashMap<String, Tiledmap>, pub z_level: ZLevel, pub world: &'a mut World, pub origin: V2, pub layer_group: Option<String>, pub sprite: Option<Entity>, } impl<'a> MapLoader<'a> { pub fn load_it(file: String, lazy: &LazyUpdate) { lazy.exec_mut(|world| { let mut loader = MapLoader::new(world); let file = file; let map: &Tiledmap = loader.load_map(&file); // Get the background color based on the loaded map let bg: Color = map .backgroundcolor .as_ref() .map(|s: &String| { hex_color(s.as_str()) .map_err(|e| format!("{:?}", e)) .map(|(_, c)| c) }) .unwrap_or(Ok(Color::rgb(0, 0, 0))) .unwrap() .clone(); let width: u32 = map .get_property_by_name("viewport_width") .map(|value: &Value| { value .as_i64() .expect("map's 'viewport_width' property type must be unsigned int") as u32 }) .unwrap_or(map.width as u32 * map.tilewidth as u32); // Get the screen size based on the loaded map let height: u32 = map .get_property_by_name("viewport_height") .map(|value: &Value| { value.as_i64().expect( "map's 'viewport_height' property type must be unsigned int", ) as u32 }) .unwrap_or(map.height as u32 * map.tileheight as u32); let res = loader.load(&file, None, None); match res { Ok(_) => {} Err(msg) => panic!(msg), } let mut screen = world.write_resource::<Screen>(); screen.set_size((width, height)); let mut background_color = world.write_resource::<BackgroundColor>(); background_color.0 = bg; }); } /// Create a new MapLoader pub fn new<'c>(world: &'c mut World) -> MapLoader<'c> { MapLoader { loaded_maps: HashMap::new(), z_level: ZLevel(0.0), world, origin: V2::new(0.0, 0.0), layer_group: None, sprite: None, } } fn load_map(&mut self, file: &String) -> &Tiledmap { if !self.loaded_maps.contains_key(file) { let map: Tiledmap = Tiledmap::new(&Path::new(&file.clone())); self.loaded_maps.insert(file.clone(), map.clone()); } self.loaded_maps.get(file).expect("Impossible!") } /// Sort the layers of a Tiledmap (in place) so that the layers /// process correctly. Really we just want the inventories /// layer to be loaded first. pub fn sort_layers(&self, layers: &mut Vec<Layer>) { let mut mndx = None; 'find_ndx: for (layer, i) in layers.iter().zip(0..) {
} } if let Some(ndx) = mndx { let inv_layer = layers.remove(ndx); layers.insert(0, inv_layer); } } pub fn insert_map( &mut self, map: &mut Tiledmap, layer_group: Option<String>, sprite: Option<Entity>, ) -> Result<LoadedLayers, String> { self.sort_layers(&mut map.layers); let prev_group = self.layer_group.take(); self.layer_group = layer_group; self.sprite = sprite; let res = self.load_layers(&map.layers, &map)?; self.layer_group = prev_group; Ok(res) } /// Load an entire top-level map into the ECS. /// Takes the file to load and optionally a layer group to load. If a layer /// group is provided only layers within the group will be loaded. If no layer /// group is provided all layers will be loaded. /// Returns an error or a tuple consisting of pub fn load( &mut self, file: &String, layer_group: Option<String>, sprite: Option<Entity>, ) -> Result<LoadedLayers, String> { self.load_map(&file); let mut map = self .loaded_maps .get(file) .expect("Could not retreive map.") .clone(); self.insert_map(&mut map, layer_group, sprite) } /// Possibly Increments the ZLevel based on layer properties fn increment_z_by_layer(&mut self, layer: &Layer) { let z_inc = layer.get_z_inc().unwrap_or(0); if z_inc != 0 { self.z_level.0 += z_inc as f32; println!( "incrementing ZLevel to {:?} - layer {:?}", self.z_level, layer.name ); } } /// Load one layer of LayerData. fn load_layer_data( &mut self, layer_name: &String, data: &LayerData, map: &Tiledmap, ) -> Result<Vec<Entity>, String> { println!("load_layer_data: {} at z:{:?}", layer_name, self.z_level); match data { LayerData::Tiles(tiles) => Ok(self.load_tile_layer(&tiles.data, map)?), LayerData::Objects(objects) => { if layer_name == "inventories" { let inv_layer: InventoryLayer = InventoryLayer::read(map, &objects.objects)?; let top_level_entities = inv_layer.into_ecs(self.world, self.z_level)?; Ok(top_level_entities) } else { let top_level_entities = objects.objects.iter().fold( Ok(vec![]), |result: Result<Vec<Entity>, String>, obj: &Object| { let ent = self.load_top_level_object(obj, map)?; let mut ents = result?; ents.push(ent); Ok(ents) }, )?; Ok(top_level_entities) } } LayerData::Layers(layers) => { layers.layers.iter().fold(Ok(vec![]), |res, layer| { let mut res = res?; self.increment_z_by_layer(&layer); let mut ents = self.load_layer_data(&layer.name, &layer.layer_data, map)?; res.append(&mut ents); Ok(res) }) } } } /// Load a vec of layers into the ECS fn load_layers( &mut self, layers: &Vec<Layer>, map: &Tiledmap, ) -> Result<LoadedLayers, String> { let variant = self.layer_group.take(); // First figure out which layers we need to load let layers_to_load: Vec<&Layer> = if variant.is_some() { let variant_name = variant.as_ref().unwrap(); // Only get the variant layers layers .iter() .filter_map(|layer| { if layer.name == *variant_name { match &layer.layer_data { LayerData::Layers(variant_layers) => { let variant_layers: Vec<&Layer> = variant_layers.layers.iter().collect(); Some(variant_layers) } _ => None, } } else { None } }) .flatten() .collect() } else { // Return the layers as normal layers.iter().collect() }; let mut layers = LoadedLayers::new(); for layer in layers_to_load.iter() { self.increment_z_by_layer(&layer); let mut ents = self.load_layer_data(&layer.name, &layer.layer_data, map)?; // If this layer is part of a group, add it as a keyframe if layer.is_group() { layers.groups.insert(layer.name.clone(), ents); } else { layers.top_level_entities.append(&mut ents); } } Ok(layers) } /// ## Loading tiles /// Load a vector of tiles keyed by their GlobalId. fn load_tile_layer( &mut self, tiles: &Vec<GlobalTileIndex>, map: &Tiledmap, ) -> Result<Vec<Entity>, String> { let (width, height) = (map.width as u32, map.height as u32); let tw = map.tilewidth as u32; let th = map.tileheight as u32; println!(" layer width {:?} and height {:?}", width, height); tiles .iter() .zip(0..) .fold(Ok(vec![]), |result, (gid, ndx)| { let mut ents = result?; let yndx = ndx / width; let xndx = ndx % width; println!(" tile {:?} ({:?}, {:?})", ndx, xndx, yndx); let tile_origin = self.origin + V2::new((tw * xndx) as f32, (th * yndx) as f32); let mut attribs = Attributes::read_gid(map, gid, None)?; attribs.push(Attribute::Position(Position(tile_origin))); let attributes = Attributes { attribs }; let ent = attributes.into_ecs(self.world, self.z_level); ents.push(ent); Ok(ents) }) } /// ## Loading objects /// Load a top level object. fn load_top_level_object( &mut self, object: &Object, map: &Tiledmap, ) -> Result<Entity, String> { let msg_name = object .name .non_empty() .map(|s| s.clone()) .unwrap_or("unnamed".to_string()); println!("Encountered top object '{}'", msg_name); match object.get_deep_type(map).as_str() { "character" => { // Load a character into the game world let mut rec = ToonRecord::read(map, object)?; rec.attributes.attribs = rec .attributes .attribs .into_iter() .map(|a| match a { Attribute::Position(p) => { Attribute::Position(Position(p.0 + self.origin)) } a => a, }) .collect(); Ok(rec.into_ecs(self.world, self.z_level)) } "item" => { // Load an item into the game world (not an inventory) let mut rec = ItemRecord::read(map, object)?; // Update the position to be offset by the origin passed in. rec .attributes .position_mut() .map(|pos| pos.0 += self.origin); Ok(rec.into_ecs(self.world, self.z_level)) } "action" => { let mut attributes = Attributes::read(map, object)?; attributes.position_mut().map(|pos| pos.0 += self.origin); let _action = attributes.action().ok_or(format!( "Could not read action {:?}\nDid read:\n{:?}", object, attributes ))?; println!("Creating action:\n{:?}", attributes); Ok(attributes.into_ecs(self.world, self.z_level)) } "sprite" => Sprite::read(self, map, object), "zone" | "fence" | "step_fence" => { let mut attributes = Attributes::read(map, object)?; attributes.position_mut().map(|p| { p.0 += self.origin + V2::new(0.0, object.height); }); Ok(attributes.into_ecs(self.world, self.z_level)) } "point" | "sound" | "music" => { let mut attributes = Attributes::read(map, object)?; attributes.position_mut().map(|p| { p.0 += self.origin; }); Ok(attributes.into_ecs(self.world, self.z_level)) } "barrier" => { let mut attributes = Attributes::read(map, object)?; let position = attributes .position_mut() .expect("Barrier object has no position"); position.0 = self.origin; Ok(attributes.into_ecs(self.world, self.z_level)) } ty => { let gid = object.gid.clone(); if let Some(gid) = gid { // Tiled tiles' origin is at the bottom of the tile, not the top let y = object.y - object.height; let p = self.origin + V2::new(object.x, y); let size = (object.width as u32, object.height as u32); let mut attribs = Attributes::read_gid(map, &gid, Some(size))?; attribs.push(Attribute::Position(Position(p))); let props = object .properties .iter() .map(|p| (&p.name, p)) .collect::<HashMap<_, _>>(); let mut prop_attribs = Attributes::read_properties(&props)?; attribs.append(&mut prop_attribs); let attributes = Attributes { attribs }; println!(" {:?} with attributes:{:?}", ty, attributes); Ok(attributes.into_ecs(self.world, self.z_level)) } else { if object.text.len() > 0 { // This is a text object let mut attribs = Attributes::read(map, object)?; let p = attribs.position_mut().expect("Text must have a Position"); p.0 += self.origin; println!( " {:?} with attributes:{:?} and z_level:{:?}", ty, attribs, self.z_level ); Ok(attribs.into_ecs(self.world, self.z_level)) } else { Err(format!("Unsupported object\n{:?}", object)) } } } } } }
if let LayerData::Objects(_) = layer.layer_data { if layer.name == "inventories" { mndx = Some(i); break 'find_ndx; }
random_line_split
map_loader.rs
use serde_json::Value; use specs::prelude::*; use std::collections::HashMap; use std::path::Path; use super::super::super::prelude::{ hex_color, Attribute, Attributes, BackgroundColor, CanBeEmpty, Color, GlobalTileIndex, InventoryLayer, ItemRecord, Layer, LayerData, Object, Position, Screen, Sprite, Tiledmap, ToonRecord, ZLevel, V2, }; /// The result of loading one or more Tiled layers into the ECS. pub struct LoadedLayers { /// All the top-level entities loaded pub top_level_entities: Vec<Entity>, /// A HashMap of all entities loaded within a layer group, keyed by the group /// layer's name. pub groups: HashMap<String, Vec<Entity>>, } impl LoadedLayers { pub fn new() -> LoadedLayers { LoadedLayers { top_level_entities: vec![], groups: HashMap::new(), } } pub fn all_entities(&self) -> Vec<Entity> { let mut tops = self.top_level_entities.clone(); let mut groups: Vec<Entity> = self.groups.values().flat_map(|es| es.clone()).collect(); tops.append(&mut groups); tops } pub fn append_entities(&mut self, other: LoadedLayers) { let other_tops = other.top_level_entities.into_iter(); let other_groups = other.groups.into_iter(); self.top_level_entities.extend(other_tops); self.groups.extend(other_groups); } } pub struct MapLoader<'a> { loaded_maps: HashMap<String, Tiledmap>, pub z_level: ZLevel, pub world: &'a mut World, pub origin: V2, pub layer_group: Option<String>, pub sprite: Option<Entity>, } impl<'a> MapLoader<'a> { pub fn
(file: String, lazy: &LazyUpdate) { lazy.exec_mut(|world| { let mut loader = MapLoader::new(world); let file = file; let map: &Tiledmap = loader.load_map(&file); // Get the background color based on the loaded map let bg: Color = map .backgroundcolor .as_ref() .map(|s: &String| { hex_color(s.as_str()) .map_err(|e| format!("{:?}", e)) .map(|(_, c)| c) }) .unwrap_or(Ok(Color::rgb(0, 0, 0))) .unwrap() .clone(); let width: u32 = map .get_property_by_name("viewport_width") .map(|value: &Value| { value .as_i64() .expect("map's 'viewport_width' property type must be unsigned int") as u32 }) .unwrap_or(map.width as u32 * map.tilewidth as u32); // Get the screen size based on the loaded map let height: u32 = map .get_property_by_name("viewport_height") .map(|value: &Value| { value.as_i64().expect( "map's 'viewport_height' property type must be unsigned int", ) as u32 }) .unwrap_or(map.height as u32 * map.tileheight as u32); let res = loader.load(&file, None, None); match res { Ok(_) => {} Err(msg) => panic!(msg), } let mut screen = world.write_resource::<Screen>(); screen.set_size((width, height)); let mut background_color = world.write_resource::<BackgroundColor>(); background_color.0 = bg; }); } /// Create a new MapLoader pub fn new<'c>(world: &'c mut World) -> MapLoader<'c> { MapLoader { loaded_maps: HashMap::new(), z_level: ZLevel(0.0), world, origin: V2::new(0.0, 0.0), layer_group: None, sprite: None, } } fn load_map(&mut self, file: &String) -> &Tiledmap { if !self.loaded_maps.contains_key(file) { let map: Tiledmap = Tiledmap::new(&Path::new(&file.clone())); self.loaded_maps.insert(file.clone(), map.clone()); } self.loaded_maps.get(file).expect("Impossible!") } /// Sort the layers of a Tiledmap (in place) so that the layers /// process correctly. Really we just want the inventories /// layer to be loaded first. pub fn sort_layers(&self, layers: &mut Vec<Layer>) { let mut mndx = None; 'find_ndx: for (layer, i) in layers.iter().zip(0..) { if let LayerData::Objects(_) = layer.layer_data { if layer.name == "inventories" { mndx = Some(i); break 'find_ndx; } } } if let Some(ndx) = mndx { let inv_layer = layers.remove(ndx); layers.insert(0, inv_layer); } } pub fn insert_map( &mut self, map: &mut Tiledmap, layer_group: Option<String>, sprite: Option<Entity>, ) -> Result<LoadedLayers, String> { self.sort_layers(&mut map.layers); let prev_group = self.layer_group.take(); self.layer_group = layer_group; self.sprite = sprite; let res = self.load_layers(&map.layers, &map)?; self.layer_group = prev_group; Ok(res) } /// Load an entire top-level map into the ECS. /// Takes the file to load and optionally a layer group to load. If a layer /// group is provided only layers within the group will be loaded. If no layer /// group is provided all layers will be loaded. /// Returns an error or a tuple consisting of pub fn load( &mut self, file: &String, layer_group: Option<String>, sprite: Option<Entity>, ) -> Result<LoadedLayers, String> { self.load_map(&file); let mut map = self .loaded_maps .get(file) .expect("Could not retreive map.") .clone(); self.insert_map(&mut map, layer_group, sprite) } /// Possibly Increments the ZLevel based on layer properties fn increment_z_by_layer(&mut self, layer: &Layer) { let z_inc = layer.get_z_inc().unwrap_or(0); if z_inc != 0 { self.z_level.0 += z_inc as f32; println!( "incrementing ZLevel to {:?} - layer {:?}", self.z_level, layer.name ); } } /// Load one layer of LayerData. fn load_layer_data( &mut self, layer_name: &String, data: &LayerData, map: &Tiledmap, ) -> Result<Vec<Entity>, String> { println!("load_layer_data: {} at z:{:?}", layer_name, self.z_level); match data { LayerData::Tiles(tiles) => Ok(self.load_tile_layer(&tiles.data, map)?), LayerData::Objects(objects) => { if layer_name == "inventories" { let inv_layer: InventoryLayer = InventoryLayer::read(map, &objects.objects)?; let top_level_entities = inv_layer.into_ecs(self.world, self.z_level)?; Ok(top_level_entities) } else { let top_level_entities = objects.objects.iter().fold( Ok(vec![]), |result: Result<Vec<Entity>, String>, obj: &Object| { let ent = self.load_top_level_object(obj, map)?; let mut ents = result?; ents.push(ent); Ok(ents) }, )?; Ok(top_level_entities) } } LayerData::Layers(layers) => { layers.layers.iter().fold(Ok(vec![]), |res, layer| { let mut res = res?; self.increment_z_by_layer(&layer); let mut ents = self.load_layer_data(&layer.name, &layer.layer_data, map)?; res.append(&mut ents); Ok(res) }) } } } /// Load a vec of layers into the ECS fn load_layers( &mut self, layers: &Vec<Layer>, map: &Tiledmap, ) -> Result<LoadedLayers, String> { let variant = self.layer_group.take(); // First figure out which layers we need to load let layers_to_load: Vec<&Layer> = if variant.is_some() { let variant_name = variant.as_ref().unwrap(); // Only get the variant layers layers .iter() .filter_map(|layer| { if layer.name == *variant_name { match &layer.layer_data { LayerData::Layers(variant_layers) => { let variant_layers: Vec<&Layer> = variant_layers.layers.iter().collect(); Some(variant_layers) } _ => None, } } else { None } }) .flatten() .collect() } else { // Return the layers as normal layers.iter().collect() }; let mut layers = LoadedLayers::new(); for layer in layers_to_load.iter() { self.increment_z_by_layer(&layer); let mut ents = self.load_layer_data(&layer.name, &layer.layer_data, map)?; // If this layer is part of a group, add it as a keyframe if layer.is_group() { layers.groups.insert(layer.name.clone(), ents); } else { layers.top_level_entities.append(&mut ents); } } Ok(layers) } /// ## Loading tiles /// Load a vector of tiles keyed by their GlobalId. fn load_tile_layer( &mut self, tiles: &Vec<GlobalTileIndex>, map: &Tiledmap, ) -> Result<Vec<Entity>, String> { let (width, height) = (map.width as u32, map.height as u32); let tw = map.tilewidth as u32; let th = map.tileheight as u32; println!(" layer width {:?} and height {:?}", width, height); tiles .iter() .zip(0..) .fold(Ok(vec![]), |result, (gid, ndx)| { let mut ents = result?; let yndx = ndx / width; let xndx = ndx % width; println!(" tile {:?} ({:?}, {:?})", ndx, xndx, yndx); let tile_origin = self.origin + V2::new((tw * xndx) as f32, (th * yndx) as f32); let mut attribs = Attributes::read_gid(map, gid, None)?; attribs.push(Attribute::Position(Position(tile_origin))); let attributes = Attributes { attribs }; let ent = attributes.into_ecs(self.world, self.z_level); ents.push(ent); Ok(ents) }) } /// ## Loading objects /// Load a top level object. fn load_top_level_object( &mut self, object: &Object, map: &Tiledmap, ) -> Result<Entity, String> { let msg_name = object .name .non_empty() .map(|s| s.clone()) .unwrap_or("unnamed".to_string()); println!("Encountered top object '{}'", msg_name); match object.get_deep_type(map).as_str() { "character" => { // Load a character into the game world let mut rec = ToonRecord::read(map, object)?; rec.attributes.attribs = rec .attributes .attribs .into_iter() .map(|a| match a { Attribute::Position(p) => { Attribute::Position(Position(p.0 + self.origin)) } a => a, }) .collect(); Ok(rec.into_ecs(self.world, self.z_level)) } "item" => { // Load an item into the game world (not an inventory) let mut rec = ItemRecord::read(map, object)?; // Update the position to be offset by the origin passed in. rec .attributes .position_mut() .map(|pos| pos.0 += self.origin); Ok(rec.into_ecs(self.world, self.z_level)) } "action" => { let mut attributes = Attributes::read(map, object)?; attributes.position_mut().map(|pos| pos.0 += self.origin); let _action = attributes.action().ok_or(format!( "Could not read action {:?}\nDid read:\n{:?}", object, attributes ))?; println!("Creating action:\n{:?}", attributes); Ok(attributes.into_ecs(self.world, self.z_level)) } "sprite" => Sprite::read(self, map, object), "zone" | "fence" | "step_fence" => { let mut attributes = Attributes::read(map, object)?; attributes.position_mut().map(|p| { p.0 += self.origin + V2::new(0.0, object.height); }); Ok(attributes.into_ecs(self.world, self.z_level)) } "point" | "sound" | "music" => { let mut attributes = Attributes::read(map, object)?; attributes.position_mut().map(|p| { p.0 += self.origin; }); Ok(attributes.into_ecs(self.world, self.z_level)) } "barrier" => { let mut attributes = Attributes::read(map, object)?; let position = attributes .position_mut() .expect("Barrier object has no position"); position.0 = self.origin; Ok(attributes.into_ecs(self.world, self.z_level)) } ty => { let gid = object.gid.clone(); if let Some(gid) = gid { // Tiled tiles' origin is at the bottom of the tile, not the top let y = object.y - object.height; let p = self.origin + V2::new(object.x, y); let size = (object.width as u32, object.height as u32); let mut attribs = Attributes::read_gid(map, &gid, Some(size))?; attribs.push(Attribute::Position(Position(p))); let props = object .properties .iter() .map(|p| (&p.name, p)) .collect::<HashMap<_, _>>(); let mut prop_attribs = Attributes::read_properties(&props)?; attribs.append(&mut prop_attribs); let attributes = Attributes { attribs }; println!(" {:?} with attributes:{:?}", ty, attributes); Ok(attributes.into_ecs(self.world, self.z_level)) } else { if object.text.len() > 0 { // This is a text object let mut attribs = Attributes::read(map, object)?; let p = attribs.position_mut().expect("Text must have a Position"); p.0 += self.origin; println!( " {:?} with attributes:{:?} and z_level:{:?}", ty, attribs, self.z_level ); Ok(attribs.into_ecs(self.world, self.z_level)) } else { Err(format!("Unsupported object\n{:?}", object)) } } } } } }
load_it
identifier_name
map_loader.rs
use serde_json::Value; use specs::prelude::*; use std::collections::HashMap; use std::path::Path; use super::super::super::prelude::{ hex_color, Attribute, Attributes, BackgroundColor, CanBeEmpty, Color, GlobalTileIndex, InventoryLayer, ItemRecord, Layer, LayerData, Object, Position, Screen, Sprite, Tiledmap, ToonRecord, ZLevel, V2, }; /// The result of loading one or more Tiled layers into the ECS. pub struct LoadedLayers { /// All the top-level entities loaded pub top_level_entities: Vec<Entity>, /// A HashMap of all entities loaded within a layer group, keyed by the group /// layer's name. pub groups: HashMap<String, Vec<Entity>>, } impl LoadedLayers { pub fn new() -> LoadedLayers { LoadedLayers { top_level_entities: vec![], groups: HashMap::new(), } } pub fn all_entities(&self) -> Vec<Entity> { let mut tops = self.top_level_entities.clone(); let mut groups: Vec<Entity> = self.groups.values().flat_map(|es| es.clone()).collect(); tops.append(&mut groups); tops } pub fn append_entities(&mut self, other: LoadedLayers) { let other_tops = other.top_level_entities.into_iter(); let other_groups = other.groups.into_iter(); self.top_level_entities.extend(other_tops); self.groups.extend(other_groups); } } pub struct MapLoader<'a> { loaded_maps: HashMap<String, Tiledmap>, pub z_level: ZLevel, pub world: &'a mut World, pub origin: V2, pub layer_group: Option<String>, pub sprite: Option<Entity>, } impl<'a> MapLoader<'a> { pub fn load_it(file: String, lazy: &LazyUpdate) { lazy.exec_mut(|world| { let mut loader = MapLoader::new(world); let file = file; let map: &Tiledmap = loader.load_map(&file); // Get the background color based on the loaded map let bg: Color = map .backgroundcolor .as_ref() .map(|s: &String| { hex_color(s.as_str()) .map_err(|e| format!("{:?}", e)) .map(|(_, c)| c) }) .unwrap_or(Ok(Color::rgb(0, 0, 0))) .unwrap() .clone(); let width: u32 = map .get_property_by_name("viewport_width") .map(|value: &Value| { value .as_i64() .expect("map's 'viewport_width' property type must be unsigned int") as u32 }) .unwrap_or(map.width as u32 * map.tilewidth as u32); // Get the screen size based on the loaded map let height: u32 = map .get_property_by_name("viewport_height") .map(|value: &Value| { value.as_i64().expect( "map's 'viewport_height' property type must be unsigned int", ) as u32 }) .unwrap_or(map.height as u32 * map.tileheight as u32); let res = loader.load(&file, None, None); match res { Ok(_) => {} Err(msg) => panic!(msg), } let mut screen = world.write_resource::<Screen>(); screen.set_size((width, height)); let mut background_color = world.write_resource::<BackgroundColor>(); background_color.0 = bg; }); } /// Create a new MapLoader pub fn new<'c>(world: &'c mut World) -> MapLoader<'c> { MapLoader { loaded_maps: HashMap::new(), z_level: ZLevel(0.0), world, origin: V2::new(0.0, 0.0), layer_group: None, sprite: None, } } fn load_map(&mut self, file: &String) -> &Tiledmap { if !self.loaded_maps.contains_key(file) { let map: Tiledmap = Tiledmap::new(&Path::new(&file.clone())); self.loaded_maps.insert(file.clone(), map.clone()); } self.loaded_maps.get(file).expect("Impossible!") } /// Sort the layers of a Tiledmap (in place) so that the layers /// process correctly. Really we just want the inventories /// layer to be loaded first. pub fn sort_layers(&self, layers: &mut Vec<Layer>) { let mut mndx = None; 'find_ndx: for (layer, i) in layers.iter().zip(0..) { if let LayerData::Objects(_) = layer.layer_data { if layer.name == "inventories" { mndx = Some(i); break 'find_ndx; } } } if let Some(ndx) = mndx { let inv_layer = layers.remove(ndx); layers.insert(0, inv_layer); } } pub fn insert_map( &mut self, map: &mut Tiledmap, layer_group: Option<String>, sprite: Option<Entity>, ) -> Result<LoadedLayers, String> { self.sort_layers(&mut map.layers); let prev_group = self.layer_group.take(); self.layer_group = layer_group; self.sprite = sprite; let res = self.load_layers(&map.layers, &map)?; self.layer_group = prev_group; Ok(res) } /// Load an entire top-level map into the ECS. /// Takes the file to load and optionally a layer group to load. If a layer /// group is provided only layers within the group will be loaded. If no layer /// group is provided all layers will be loaded. /// Returns an error or a tuple consisting of pub fn load( &mut self, file: &String, layer_group: Option<String>, sprite: Option<Entity>, ) -> Result<LoadedLayers, String> { self.load_map(&file); let mut map = self .loaded_maps .get(file) .expect("Could not retreive map.") .clone(); self.insert_map(&mut map, layer_group, sprite) } /// Possibly Increments the ZLevel based on layer properties fn increment_z_by_layer(&mut self, layer: &Layer) { let z_inc = layer.get_z_inc().unwrap_or(0); if z_inc != 0 { self.z_level.0 += z_inc as f32; println!( "incrementing ZLevel to {:?} - layer {:?}", self.z_level, layer.name ); } } /// Load one layer of LayerData. fn load_layer_data( &mut self, layer_name: &String, data: &LayerData, map: &Tiledmap, ) -> Result<Vec<Entity>, String> { println!("load_layer_data: {} at z:{:?}", layer_name, self.z_level); match data { LayerData::Tiles(tiles) => Ok(self.load_tile_layer(&tiles.data, map)?), LayerData::Objects(objects) => { if layer_name == "inventories" { let inv_layer: InventoryLayer = InventoryLayer::read(map, &objects.objects)?; let top_level_entities = inv_layer.into_ecs(self.world, self.z_level)?; Ok(top_level_entities) } else { let top_level_entities = objects.objects.iter().fold( Ok(vec![]), |result: Result<Vec<Entity>, String>, obj: &Object| { let ent = self.load_top_level_object(obj, map)?; let mut ents = result?; ents.push(ent); Ok(ents) }, )?; Ok(top_level_entities) } } LayerData::Layers(layers) => { layers.layers.iter().fold(Ok(vec![]), |res, layer| { let mut res = res?; self.increment_z_by_layer(&layer); let mut ents = self.load_layer_data(&layer.name, &layer.layer_data, map)?; res.append(&mut ents); Ok(res) }) } } } /// Load a vec of layers into the ECS fn load_layers( &mut self, layers: &Vec<Layer>, map: &Tiledmap, ) -> Result<LoadedLayers, String> { let variant = self.layer_group.take(); // First figure out which layers we need to load let layers_to_load: Vec<&Layer> = if variant.is_some() { let variant_name = variant.as_ref().unwrap(); // Only get the variant layers layers .iter() .filter_map(|layer| { if layer.name == *variant_name { match &layer.layer_data { LayerData::Layers(variant_layers) =>
_ => None, } } else { None } }) .flatten() .collect() } else { // Return the layers as normal layers.iter().collect() }; let mut layers = LoadedLayers::new(); for layer in layers_to_load.iter() { self.increment_z_by_layer(&layer); let mut ents = self.load_layer_data(&layer.name, &layer.layer_data, map)?; // If this layer is part of a group, add it as a keyframe if layer.is_group() { layers.groups.insert(layer.name.clone(), ents); } else { layers.top_level_entities.append(&mut ents); } } Ok(layers) } /// ## Loading tiles /// Load a vector of tiles keyed by their GlobalId. fn load_tile_layer( &mut self, tiles: &Vec<GlobalTileIndex>, map: &Tiledmap, ) -> Result<Vec<Entity>, String> { let (width, height) = (map.width as u32, map.height as u32); let tw = map.tilewidth as u32; let th = map.tileheight as u32; println!(" layer width {:?} and height {:?}", width, height); tiles .iter() .zip(0..) .fold(Ok(vec![]), |result, (gid, ndx)| { let mut ents = result?; let yndx = ndx / width; let xndx = ndx % width; println!(" tile {:?} ({:?}, {:?})", ndx, xndx, yndx); let tile_origin = self.origin + V2::new((tw * xndx) as f32, (th * yndx) as f32); let mut attribs = Attributes::read_gid(map, gid, None)?; attribs.push(Attribute::Position(Position(tile_origin))); let attributes = Attributes { attribs }; let ent = attributes.into_ecs(self.world, self.z_level); ents.push(ent); Ok(ents) }) } /// ## Loading objects /// Load a top level object. fn load_top_level_object( &mut self, object: &Object, map: &Tiledmap, ) -> Result<Entity, String> { let msg_name = object .name .non_empty() .map(|s| s.clone()) .unwrap_or("unnamed".to_string()); println!("Encountered top object '{}'", msg_name); match object.get_deep_type(map).as_str() { "character" => { // Load a character into the game world let mut rec = ToonRecord::read(map, object)?; rec.attributes.attribs = rec .attributes .attribs .into_iter() .map(|a| match a { Attribute::Position(p) => { Attribute::Position(Position(p.0 + self.origin)) } a => a, }) .collect(); Ok(rec.into_ecs(self.world, self.z_level)) } "item" => { // Load an item into the game world (not an inventory) let mut rec = ItemRecord::read(map, object)?; // Update the position to be offset by the origin passed in. rec .attributes .position_mut() .map(|pos| pos.0 += self.origin); Ok(rec.into_ecs(self.world, self.z_level)) } "action" => { let mut attributes = Attributes::read(map, object)?; attributes.position_mut().map(|pos| pos.0 += self.origin); let _action = attributes.action().ok_or(format!( "Could not read action {:?}\nDid read:\n{:?}", object, attributes ))?; println!("Creating action:\n{:?}", attributes); Ok(attributes.into_ecs(self.world, self.z_level)) } "sprite" => Sprite::read(self, map, object), "zone" | "fence" | "step_fence" => { let mut attributes = Attributes::read(map, object)?; attributes.position_mut().map(|p| { p.0 += self.origin + V2::new(0.0, object.height); }); Ok(attributes.into_ecs(self.world, self.z_level)) } "point" | "sound" | "music" => { let mut attributes = Attributes::read(map, object)?; attributes.position_mut().map(|p| { p.0 += self.origin; }); Ok(attributes.into_ecs(self.world, self.z_level)) } "barrier" => { let mut attributes = Attributes::read(map, object)?; let position = attributes .position_mut() .expect("Barrier object has no position"); position.0 = self.origin; Ok(attributes.into_ecs(self.world, self.z_level)) } ty => { let gid = object.gid.clone(); if let Some(gid) = gid { // Tiled tiles' origin is at the bottom of the tile, not the top let y = object.y - object.height; let p = self.origin + V2::new(object.x, y); let size = (object.width as u32, object.height as u32); let mut attribs = Attributes::read_gid(map, &gid, Some(size))?; attribs.push(Attribute::Position(Position(p))); let props = object .properties .iter() .map(|p| (&p.name, p)) .collect::<HashMap<_, _>>(); let mut prop_attribs = Attributes::read_properties(&props)?; attribs.append(&mut prop_attribs); let attributes = Attributes { attribs }; println!(" {:?} with attributes:{:?}", ty, attributes); Ok(attributes.into_ecs(self.world, self.z_level)) } else { if object.text.len() > 0 { // This is a text object let mut attribs = Attributes::read(map, object)?; let p = attribs.position_mut().expect("Text must have a Position"); p.0 += self.origin; println!( " {:?} with attributes:{:?} and z_level:{:?}", ty, attribs, self.z_level ); Ok(attribs.into_ecs(self.world, self.z_level)) } else { Err(format!("Unsupported object\n{:?}", object)) } } } } } }
{ let variant_layers: Vec<&Layer> = variant_layers.layers.iter().collect(); Some(variant_layers) }
conditional_block
map_loader.rs
use serde_json::Value; use specs::prelude::*; use std::collections::HashMap; use std::path::Path; use super::super::super::prelude::{ hex_color, Attribute, Attributes, BackgroundColor, CanBeEmpty, Color, GlobalTileIndex, InventoryLayer, ItemRecord, Layer, LayerData, Object, Position, Screen, Sprite, Tiledmap, ToonRecord, ZLevel, V2, }; /// The result of loading one or more Tiled layers into the ECS. pub struct LoadedLayers { /// All the top-level entities loaded pub top_level_entities: Vec<Entity>, /// A HashMap of all entities loaded within a layer group, keyed by the group /// layer's name. pub groups: HashMap<String, Vec<Entity>>, } impl LoadedLayers { pub fn new() -> LoadedLayers { LoadedLayers { top_level_entities: vec![], groups: HashMap::new(), } } pub fn all_entities(&self) -> Vec<Entity> { let mut tops = self.top_level_entities.clone(); let mut groups: Vec<Entity> = self.groups.values().flat_map(|es| es.clone()).collect(); tops.append(&mut groups); tops } pub fn append_entities(&mut self, other: LoadedLayers)
} pub struct MapLoader<'a> { loaded_maps: HashMap<String, Tiledmap>, pub z_level: ZLevel, pub world: &'a mut World, pub origin: V2, pub layer_group: Option<String>, pub sprite: Option<Entity>, } impl<'a> MapLoader<'a> { pub fn load_it(file: String, lazy: &LazyUpdate) { lazy.exec_mut(|world| { let mut loader = MapLoader::new(world); let file = file; let map: &Tiledmap = loader.load_map(&file); // Get the background color based on the loaded map let bg: Color = map .backgroundcolor .as_ref() .map(|s: &String| { hex_color(s.as_str()) .map_err(|e| format!("{:?}", e)) .map(|(_, c)| c) }) .unwrap_or(Ok(Color::rgb(0, 0, 0))) .unwrap() .clone(); let width: u32 = map .get_property_by_name("viewport_width") .map(|value: &Value| { value .as_i64() .expect("map's 'viewport_width' property type must be unsigned int") as u32 }) .unwrap_or(map.width as u32 * map.tilewidth as u32); // Get the screen size based on the loaded map let height: u32 = map .get_property_by_name("viewport_height") .map(|value: &Value| { value.as_i64().expect( "map's 'viewport_height' property type must be unsigned int", ) as u32 }) .unwrap_or(map.height as u32 * map.tileheight as u32); let res = loader.load(&file, None, None); match res { Ok(_) => {} Err(msg) => panic!(msg), } let mut screen = world.write_resource::<Screen>(); screen.set_size((width, height)); let mut background_color = world.write_resource::<BackgroundColor>(); background_color.0 = bg; }); } /// Create a new MapLoader pub fn new<'c>(world: &'c mut World) -> MapLoader<'c> { MapLoader { loaded_maps: HashMap::new(), z_level: ZLevel(0.0), world, origin: V2::new(0.0, 0.0), layer_group: None, sprite: None, } } fn load_map(&mut self, file: &String) -> &Tiledmap { if !self.loaded_maps.contains_key(file) { let map: Tiledmap = Tiledmap::new(&Path::new(&file.clone())); self.loaded_maps.insert(file.clone(), map.clone()); } self.loaded_maps.get(file).expect("Impossible!") } /// Sort the layers of a Tiledmap (in place) so that the layers /// process correctly. Really we just want the inventories /// layer to be loaded first. pub fn sort_layers(&self, layers: &mut Vec<Layer>) { let mut mndx = None; 'find_ndx: for (layer, i) in layers.iter().zip(0..) { if let LayerData::Objects(_) = layer.layer_data { if layer.name == "inventories" { mndx = Some(i); break 'find_ndx; } } } if let Some(ndx) = mndx { let inv_layer = layers.remove(ndx); layers.insert(0, inv_layer); } } pub fn insert_map( &mut self, map: &mut Tiledmap, layer_group: Option<String>, sprite: Option<Entity>, ) -> Result<LoadedLayers, String> { self.sort_layers(&mut map.layers); let prev_group = self.layer_group.take(); self.layer_group = layer_group; self.sprite = sprite; let res = self.load_layers(&map.layers, &map)?; self.layer_group = prev_group; Ok(res) } /// Load an entire top-level map into the ECS. /// Takes the file to load and optionally a layer group to load. If a layer /// group is provided only layers within the group will be loaded. If no layer /// group is provided all layers will be loaded. /// Returns an error or a tuple consisting of pub fn load( &mut self, file: &String, layer_group: Option<String>, sprite: Option<Entity>, ) -> Result<LoadedLayers, String> { self.load_map(&file); let mut map = self .loaded_maps .get(file) .expect("Could not retreive map.") .clone(); self.insert_map(&mut map, layer_group, sprite) } /// Possibly Increments the ZLevel based on layer properties fn increment_z_by_layer(&mut self, layer: &Layer) { let z_inc = layer.get_z_inc().unwrap_or(0); if z_inc != 0 { self.z_level.0 += z_inc as f32; println!( "incrementing ZLevel to {:?} - layer {:?}", self.z_level, layer.name ); } } /// Load one layer of LayerData. fn load_layer_data( &mut self, layer_name: &String, data: &LayerData, map: &Tiledmap, ) -> Result<Vec<Entity>, String> { println!("load_layer_data: {} at z:{:?}", layer_name, self.z_level); match data { LayerData::Tiles(tiles) => Ok(self.load_tile_layer(&tiles.data, map)?), LayerData::Objects(objects) => { if layer_name == "inventories" { let inv_layer: InventoryLayer = InventoryLayer::read(map, &objects.objects)?; let top_level_entities = inv_layer.into_ecs(self.world, self.z_level)?; Ok(top_level_entities) } else { let top_level_entities = objects.objects.iter().fold( Ok(vec![]), |result: Result<Vec<Entity>, String>, obj: &Object| { let ent = self.load_top_level_object(obj, map)?; let mut ents = result?; ents.push(ent); Ok(ents) }, )?; Ok(top_level_entities) } } LayerData::Layers(layers) => { layers.layers.iter().fold(Ok(vec![]), |res, layer| { let mut res = res?; self.increment_z_by_layer(&layer); let mut ents = self.load_layer_data(&layer.name, &layer.layer_data, map)?; res.append(&mut ents); Ok(res) }) } } } /// Load a vec of layers into the ECS fn load_layers( &mut self, layers: &Vec<Layer>, map: &Tiledmap, ) -> Result<LoadedLayers, String> { let variant = self.layer_group.take(); // First figure out which layers we need to load let layers_to_load: Vec<&Layer> = if variant.is_some() { let variant_name = variant.as_ref().unwrap(); // Only get the variant layers layers .iter() .filter_map(|layer| { if layer.name == *variant_name { match &layer.layer_data { LayerData::Layers(variant_layers) => { let variant_layers: Vec<&Layer> = variant_layers.layers.iter().collect(); Some(variant_layers) } _ => None, } } else { None } }) .flatten() .collect() } else { // Return the layers as normal layers.iter().collect() }; let mut layers = LoadedLayers::new(); for layer in layers_to_load.iter() { self.increment_z_by_layer(&layer); let mut ents = self.load_layer_data(&layer.name, &layer.layer_data, map)?; // If this layer is part of a group, add it as a keyframe if layer.is_group() { layers.groups.insert(layer.name.clone(), ents); } else { layers.top_level_entities.append(&mut ents); } } Ok(layers) } /// ## Loading tiles /// Load a vector of tiles keyed by their GlobalId. fn load_tile_layer( &mut self, tiles: &Vec<GlobalTileIndex>, map: &Tiledmap, ) -> Result<Vec<Entity>, String> { let (width, height) = (map.width as u32, map.height as u32); let tw = map.tilewidth as u32; let th = map.tileheight as u32; println!(" layer width {:?} and height {:?}", width, height); tiles .iter() .zip(0..) .fold(Ok(vec![]), |result, (gid, ndx)| { let mut ents = result?; let yndx = ndx / width; let xndx = ndx % width; println!(" tile {:?} ({:?}, {:?})", ndx, xndx, yndx); let tile_origin = self.origin + V2::new((tw * xndx) as f32, (th * yndx) as f32); let mut attribs = Attributes::read_gid(map, gid, None)?; attribs.push(Attribute::Position(Position(tile_origin))); let attributes = Attributes { attribs }; let ent = attributes.into_ecs(self.world, self.z_level); ents.push(ent); Ok(ents) }) } /// ## Loading objects /// Load a top level object. fn load_top_level_object( &mut self, object: &Object, map: &Tiledmap, ) -> Result<Entity, String> { let msg_name = object .name .non_empty() .map(|s| s.clone()) .unwrap_or("unnamed".to_string()); println!("Encountered top object '{}'", msg_name); match object.get_deep_type(map).as_str() { "character" => { // Load a character into the game world let mut rec = ToonRecord::read(map, object)?; rec.attributes.attribs = rec .attributes .attribs .into_iter() .map(|a| match a { Attribute::Position(p) => { Attribute::Position(Position(p.0 + self.origin)) } a => a, }) .collect(); Ok(rec.into_ecs(self.world, self.z_level)) } "item" => { // Load an item into the game world (not an inventory) let mut rec = ItemRecord::read(map, object)?; // Update the position to be offset by the origin passed in. rec .attributes .position_mut() .map(|pos| pos.0 += self.origin); Ok(rec.into_ecs(self.world, self.z_level)) } "action" => { let mut attributes = Attributes::read(map, object)?; attributes.position_mut().map(|pos| pos.0 += self.origin); let _action = attributes.action().ok_or(format!( "Could not read action {:?}\nDid read:\n{:?}", object, attributes ))?; println!("Creating action:\n{:?}", attributes); Ok(attributes.into_ecs(self.world, self.z_level)) } "sprite" => Sprite::read(self, map, object), "zone" | "fence" | "step_fence" => { let mut attributes = Attributes::read(map, object)?; attributes.position_mut().map(|p| { p.0 += self.origin + V2::new(0.0, object.height); }); Ok(attributes.into_ecs(self.world, self.z_level)) } "point" | "sound" | "music" => { let mut attributes = Attributes::read(map, object)?; attributes.position_mut().map(|p| { p.0 += self.origin; }); Ok(attributes.into_ecs(self.world, self.z_level)) } "barrier" => { let mut attributes = Attributes::read(map, object)?; let position = attributes .position_mut() .expect("Barrier object has no position"); position.0 = self.origin; Ok(attributes.into_ecs(self.world, self.z_level)) } ty => { let gid = object.gid.clone(); if let Some(gid) = gid { // Tiled tiles' origin is at the bottom of the tile, not the top let y = object.y - object.height; let p = self.origin + V2::new(object.x, y); let size = (object.width as u32, object.height as u32); let mut attribs = Attributes::read_gid(map, &gid, Some(size))?; attribs.push(Attribute::Position(Position(p))); let props = object .properties .iter() .map(|p| (&p.name, p)) .collect::<HashMap<_, _>>(); let mut prop_attribs = Attributes::read_properties(&props)?; attribs.append(&mut prop_attribs); let attributes = Attributes { attribs }; println!(" {:?} with attributes:{:?}", ty, attributes); Ok(attributes.into_ecs(self.world, self.z_level)) } else { if object.text.len() > 0 { // This is a text object let mut attribs = Attributes::read(map, object)?; let p = attribs.position_mut().expect("Text must have a Position"); p.0 += self.origin; println!( " {:?} with attributes:{:?} and z_level:{:?}", ty, attribs, self.z_level ); Ok(attribs.into_ecs(self.world, self.z_level)) } else { Err(format!("Unsupported object\n{:?}", object)) } } } } } }
{ let other_tops = other.top_level_entities.into_iter(); let other_groups = other.groups.into_iter(); self.top_level_entities.extend(other_tops); self.groups.extend(other_groups); }
identifier_body
main.go
package main import ( "context" "encoding/json" "fmt" "strings" "time" "github.com/opsgenie/opsgenie-go-sdk-v2/alert" "github.com/opsgenie/opsgenie-go-sdk-v2/client" "github.com/sensu-community/sensu-plugin-sdk/sensu" "github.com/sensu-community/sensu-plugin-sdk/templates" "github.com/sensu/sensu-go/types" ) const ( notFound = "NOT FOUND" source = "sensuGo" ) // Config represents the handler plugin config. type Config struct { sensu.PluginConfig AuthToken string APIRegion string Team string Priority string SensuDashboard string MessageTemplate string MessageLimit int DescriptionTemplate string DescriptionLimit int IncludeEventInNote bool WithAnnotations bool WithLabels bool FullDetails bool } var ( plugin = Config{ PluginConfig: sensu.PluginConfig{ Name: "sensu-opsgenie-handler", Short: "The Sensu Go OpsGenie handler for incident management", Keyspace: "sensu.io/plugins/sensu-opsgenie-handler/config", }, } options = []*sensu.PluginConfigOption{ { Path: "region", Env: "OPSGENIE_REGION", Argument: "region", Shorthand: "r", Default: "us", Usage: "The OpsGenie API Region (us or eu), use default from OPSGENIE_REGION env var", Value: &plugin.APIRegion, }, { Path: "auth", Env: "OPSGENIE_AUTHTOKEN", Argument: "auth", Shorthand: "a", Default: "", Secret: true, Usage: "The OpsGenie API authentication token, use default from OPSGENIE_AUTHTOKEN env var", Value: &plugin.AuthToken, }, { Path: "team", Env: "OPSGENIE_TEAM", Argument: "team", Shorthand: "t", Default: "", Usage: "The OpsGenie Team, use default from OPSGENIE_TEAM env var", Value: &plugin.Team, }, { Path: "priority", Env: "OPSGENIE_PRIORITY", Argument: "priority", Shorthand: "p", Default: "P3", Usage: "The OpsGenie Alert Priority, use default from OPSGENIE_PRIORITY env var", Value: &plugin.Priority, }, { Path: "sensuDashboard", Env: "OPSGENIE_SENSU_DASHBOARD", Argument: "sensuDashboard", Shorthand: "s", Default: "disabled", Usage: "The OpsGenie Handler will use it to create a source Sensu Dashboard URL. Use OPSGENIE_SENSU_DASHBOARD. Example: http://sensu-dashboard.example.local/c/~/n", Value: &plugin.SensuDashboard, }, { Path: "messageTemplate", Env: "OPSGENIE_MESSAGE_TEMPLATE", Argument: "messageTemplate", Shorthand: "m", Default: "{{.Entity.Name}}/{{.Check.Name}}", Usage: "The template for the message to be sent", Value: &plugin.MessageTemplate, }, { Path: "messageLimit", Env: "OPSGENIE_MESSAGE_LIMIT", Argument: "messageLimit", Shorthand: "l", Default: 130, Usage: "The maximum length of the message field", Value: &plugin.MessageLimit, }, { Path: "descriptionTemplate", Env: "OPSGENIE_DESCRIPTION_TEMPLATE", Argument: "descriptionTemplate", Shorthand: "d", Default: "{{.Check.Output}}", Usage: "The template for the description to be sent", Value: &plugin.DescriptionTemplate, }, { Path: "descriptionLimit", Env: "OPSGENIE_DESCRIPTION_LIMIT", Argument: "descriptionLimit", Shorthand: "L", Default: 15000, Usage: "The maximum length of the description field", Value: &plugin.DescriptionLimit, }, { Path: "includeEventInNote", Env: "", Argument: "includeEventInNote", Shorthand: "i", Default: false, Usage: "Include the event JSON in the payload sent to OpsGenie", Value: &plugin.IncludeEventInNote, }, { Path: "withAnnotations", Env: "", Argument: "withAnnotations", Shorthand: "w", Default: false, Usage: "Include the event.metadata.Annotations in details to send to OpsGenie", Value: &plugin.WithAnnotations, }, { Path: "withLabels", Env: "", Argument: "withLabels", Shorthand: "W", Default: false, Usage: "Include the event.metadata.Labels in details to send to OpsGenie", Value: &plugin.WithLabels, }, { Path: "fullDetails", Env: "", Argument: "fullDetails", Shorthand: "F", Default: false, Usage: "Include the more details to send to OpsGenie like proxy_entity_name, occurrences and agent details arch and os", Value: &plugin.FullDetails, }, } ) func main() { handler := sensu.NewGoHandler(&plugin.PluginConfig, options, checkArgs, executeHandler) handler.Execute() } func checkArgs(_ *types.Event) error { if len(plugin.AuthToken) == 0 { return fmt.Errorf("authentication token is empty") } if len(plugin.Team) == 0 { return fmt.Errorf("team is empty") } return nil } // eventPriority func read priority in the event and return alerts.PX // check.Annotations override Entity.Annotations func eventPriority() alert.Priority { switch plugin.Priority { case "P5": return alert.P5 case "P4": return alert.P4 case "P3": return alert.P3 case "P2": return alert.P2 case "P1": return alert.P1 default: return alert.P3 } } func parseActions(event *types.Event) (output []string) { if event.Check.Annotations != nil && event.Check.Annotations["opsgenie_actions"] != "" { output = strings.Split(event.Check.Annotations["opsgenie_actions"], ",") return output } return output } // parseEventKeyTags func returns string, string, and []string with event data // fist string contains custom templte string to use in message // second string contains Entity.Name/Check.Name to use in alias // []string contains Entity.Name Check.Name Entity.Namespace, event.Entity.EntityClass to use as tags in Opsgenie func parseEventKeyTags(event *types.Event) (title string, alias string, tags []string) { alias = fmt.Sprintf("%s/%s", event.Entity.Name, event.Check.Name) title, err := templates.EvalTemplate("title", plugin.MessageTemplate, event) if err != nil { return "", "", []string{} } tags = append(tags, event.Entity.Name, event.Check.Name, event.Entity.Namespace, event.Entity.EntityClass) return trim(title, plugin.MessageLimit), alias, tags } // parseDescription func returns string with custom template string to use in description func parseDescription(event *types.Event) (description string) { description, err := templates.EvalTemplate("description", plugin.DescriptionTemplate, event) if err != nil { return "" } // allow newlines to get expanded description = strings.Replace(description, `\n`, "\n", -1) return trim(description, plugin.DescriptionLimit) } // parseDetails func returns a map of string string with check information for the details field func parseDetails(event *types.Event) map[string]string { details := make(map[string]string) details["subscriptions"] = fmt.Sprintf("%v", event.Check.Subscriptions) details["status"] = fmt.Sprintf("%d", event.Check.Status) details["interval"] = fmt.Sprintf("%d", event.Check.Interval) // only if true if plugin.FullDetails { details["output"] = event.Check.Output details["command"] = event.Check.Command details["proxy_entity_name"] = event.Check.ProxyEntityName details["state"] = event.Check.State details["ttl"] = fmt.Sprintf("%d", event.Check.Ttl) details["occurrences"] = fmt.Sprintf("%d", event.Check.Occurrences) details["occurrences_watermark"] = fmt.Sprintf("%d", event.Check.OccurrencesWatermark) details["handlers"] = fmt.Sprintf("%v", event.Check.Handlers) if event.Entity.EntityClass == "agent" { details["arch"] = event.Entity.System.GetArch() details["os"] = event.Entity.System.GetOS() details["platform"] = event.Entity.System.GetPlatform() details["platform_family"] = event.Entity.System.GetPlatformFamily() details["platform_version"] = event.Entity.System.GetPlatformVersion() } } // only if true if plugin.WithAnnotations { if event.Check.Annotations != nil { for key, value := range event.Check.Annotations { if !strings.Contains(key, "sensu.io/plugins/sensu-opsgenie-handler/config") { checkKey := fmt.Sprintf("%s_annotation_%s", "check", key) details[checkKey] = value } } } if event.Entity.Annotations != nil { for key, value := range event.Entity.Annotations { if !strings.Contains(key, "sensu.io/plugins/sensu-opsgenie-handler/config") { entityKey := fmt.Sprintf("%s_annotation_%s", "entity", key) details[entityKey] = value } } } } // only if true if plugin.WithLabels { if event.Check.Labels != nil { for key, value := range event.Check.Labels { checkKey := fmt.Sprintf("%s_label_%s", "check", key) details[checkKey] = value } } if event.Entity.Labels != nil { for key, value := range event.Entity.Labels { entityKey := fmt.Sprintf("%s_label_%s", "entity", key) details[entityKey] = value } } } if plugin.SensuDashboard != "disabled" { details["sensuDashboard"] = fmt.Sprintf("source: %s/%s/events/%s/%s \n", plugin.SensuDashboard, event.Entity.Namespace, event.Entity.Name, event.Check.Name) } return details } // switchOpsgenieRegion func func switchOpsgenieRegion() client.ApiUrl { var region client.ApiUrl apiRegionLowCase := strings.ToLower(plugin.APIRegion) switch apiRegionLowCase { case "eu": region = client.API_URL_EU case "us": region = client.API_URL default: region = client.API_URL } return region } func executeHandler(event *types.Event) error { alertClient, err := alert.NewClient(&client.Config{ ApiKey: plugin.AuthToken, OpsGenieAPIURL: switchOpsgenieRegion(), }) if err != nil { return fmt.Errorf("failed to create opsgenie client: %s", err) } if event.Check.Status != 0 { return createIncident(alertClient, event) } // check if event has a alert hasAlert, _ := getAlert(alertClient, event) // close incident if status == 0 if hasAlert != notFound && event.Check.Status == 0 { return closeAlert(alertClient, event, hasAlert) } return nil } // createIncident func create an alert in OpsGenie func createIncident(alertClient *alert.Client, event *types.Event) error { var ( note string err error ) if plugin.IncludeEventInNote { note, err = getNote(event) if err != nil { return err } } teams := []alert.Responder{ {Type: alert.EscalationResponder, Name: plugin.Team}, {Type: alert.ScheduleResponder, Name: plugin.Team}, } title, alias, tags := parseEventKeyTags(event) actions := parseActions(event) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() createResult, err := alertClient.Create(ctx, &alert.CreateAlertRequest{ Message: title, Alias: alias, Description: parseDescription(event), Responders: teams, Actions: actions, Tags: tags, Details: parseDetails(event), Entity: event.Entity.Name, Source: source, Priority: eventPriority(), Note: note, }) if err != nil { fmt.Println(err.Error()) } else { fmt.Println("Create request ID: " + createResult.RequestId) } return nil } // getAlert func get a alert using an alias. func getAlert(alertClient *alert.Client, event *types.Event) (string, error) { _, title, _ := parseEventKeyTags(event) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() fmt.Printf("Checking for alert %s \n", title) getResult, err := alertClient.Get(ctx, &alert.GetAlertRequest{ IdentifierType: alert.ALIAS, IdentifierValue: title, }) if err != nil { return notFound, nil } fmt.Printf("ID: %s, Message: %s, Count: %d \n", getResult.Id, getResult.Message, getResult.Count) return getResult.Id, nil } // closeAlert func close an alert if status == 0 func closeAlert(alertClient *alert.Client, event *types.Event, alertid string) error { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() closeResult, err := alertClient.Close(ctx, &alert.CloseAlertRequest{ IdentifierType: alert.ALERTID, IdentifierValue: alertid, Source: source, Note: "Closed Automatically", }) if err != nil { fmt.Printf("[ERROR] Not Closed: %s \n", err) } fmt.Printf("RequestID %s to Close %s \n", alertid, closeResult.RequestId) return nil } // getNote func creates a note with whole event in json format func getNote(event *types.Event) (string, error)
// time func returns only the first n bytes of a string func trim(s string, n int) string { if len(s) > n { return s[:n] } return s }
{ eventJSON, err := json.Marshal(event) if err != nil { return "", err } return fmt.Sprintf("Event data update:\n\n%s", eventJSON), nil }
identifier_body
main.go
package main import ( "context" "encoding/json" "fmt" "strings" "time" "github.com/opsgenie/opsgenie-go-sdk-v2/alert" "github.com/opsgenie/opsgenie-go-sdk-v2/client" "github.com/sensu-community/sensu-plugin-sdk/sensu" "github.com/sensu-community/sensu-plugin-sdk/templates" "github.com/sensu/sensu-go/types" ) const ( notFound = "NOT FOUND" source = "sensuGo" ) // Config represents the handler plugin config. type Config struct { sensu.PluginConfig AuthToken string APIRegion string Team string Priority string SensuDashboard string MessageTemplate string MessageLimit int DescriptionTemplate string DescriptionLimit int IncludeEventInNote bool WithAnnotations bool WithLabels bool FullDetails bool } var ( plugin = Config{ PluginConfig: sensu.PluginConfig{ Name: "sensu-opsgenie-handler", Short: "The Sensu Go OpsGenie handler for incident management", Keyspace: "sensu.io/plugins/sensu-opsgenie-handler/config", }, } options = []*sensu.PluginConfigOption{ { Path: "region", Env: "OPSGENIE_REGION", Argument: "region", Shorthand: "r", Default: "us", Usage: "The OpsGenie API Region (us or eu), use default from OPSGENIE_REGION env var", Value: &plugin.APIRegion, }, { Path: "auth", Env: "OPSGENIE_AUTHTOKEN", Argument: "auth", Shorthand: "a", Default: "", Secret: true, Usage: "The OpsGenie API authentication token, use default from OPSGENIE_AUTHTOKEN env var", Value: &plugin.AuthToken, }, { Path: "team", Env: "OPSGENIE_TEAM", Argument: "team", Shorthand: "t", Default: "", Usage: "The OpsGenie Team, use default from OPSGENIE_TEAM env var", Value: &plugin.Team, }, { Path: "priority", Env: "OPSGENIE_PRIORITY", Argument: "priority", Shorthand: "p", Default: "P3", Usage: "The OpsGenie Alert Priority, use default from OPSGENIE_PRIORITY env var",
}, { Path: "sensuDashboard", Env: "OPSGENIE_SENSU_DASHBOARD", Argument: "sensuDashboard", Shorthand: "s", Default: "disabled", Usage: "The OpsGenie Handler will use it to create a source Sensu Dashboard URL. Use OPSGENIE_SENSU_DASHBOARD. Example: http://sensu-dashboard.example.local/c/~/n", Value: &plugin.SensuDashboard, }, { Path: "messageTemplate", Env: "OPSGENIE_MESSAGE_TEMPLATE", Argument: "messageTemplate", Shorthand: "m", Default: "{{.Entity.Name}}/{{.Check.Name}}", Usage: "The template for the message to be sent", Value: &plugin.MessageTemplate, }, { Path: "messageLimit", Env: "OPSGENIE_MESSAGE_LIMIT", Argument: "messageLimit", Shorthand: "l", Default: 130, Usage: "The maximum length of the message field", Value: &plugin.MessageLimit, }, { Path: "descriptionTemplate", Env: "OPSGENIE_DESCRIPTION_TEMPLATE", Argument: "descriptionTemplate", Shorthand: "d", Default: "{{.Check.Output}}", Usage: "The template for the description to be sent", Value: &plugin.DescriptionTemplate, }, { Path: "descriptionLimit", Env: "OPSGENIE_DESCRIPTION_LIMIT", Argument: "descriptionLimit", Shorthand: "L", Default: 15000, Usage: "The maximum length of the description field", Value: &plugin.DescriptionLimit, }, { Path: "includeEventInNote", Env: "", Argument: "includeEventInNote", Shorthand: "i", Default: false, Usage: "Include the event JSON in the payload sent to OpsGenie", Value: &plugin.IncludeEventInNote, }, { Path: "withAnnotations", Env: "", Argument: "withAnnotations", Shorthand: "w", Default: false, Usage: "Include the event.metadata.Annotations in details to send to OpsGenie", Value: &plugin.WithAnnotations, }, { Path: "withLabels", Env: "", Argument: "withLabels", Shorthand: "W", Default: false, Usage: "Include the event.metadata.Labels in details to send to OpsGenie", Value: &plugin.WithLabels, }, { Path: "fullDetails", Env: "", Argument: "fullDetails", Shorthand: "F", Default: false, Usage: "Include the more details to send to OpsGenie like proxy_entity_name, occurrences and agent details arch and os", Value: &plugin.FullDetails, }, } ) func main() { handler := sensu.NewGoHandler(&plugin.PluginConfig, options, checkArgs, executeHandler) handler.Execute() } func checkArgs(_ *types.Event) error { if len(plugin.AuthToken) == 0 { return fmt.Errorf("authentication token is empty") } if len(plugin.Team) == 0 { return fmt.Errorf("team is empty") } return nil } // eventPriority func read priority in the event and return alerts.PX // check.Annotations override Entity.Annotations func eventPriority() alert.Priority { switch plugin.Priority { case "P5": return alert.P5 case "P4": return alert.P4 case "P3": return alert.P3 case "P2": return alert.P2 case "P1": return alert.P1 default: return alert.P3 } } func parseActions(event *types.Event) (output []string) { if event.Check.Annotations != nil && event.Check.Annotations["opsgenie_actions"] != "" { output = strings.Split(event.Check.Annotations["opsgenie_actions"], ",") return output } return output } // parseEventKeyTags func returns string, string, and []string with event data // fist string contains custom templte string to use in message // second string contains Entity.Name/Check.Name to use in alias // []string contains Entity.Name Check.Name Entity.Namespace, event.Entity.EntityClass to use as tags in Opsgenie func parseEventKeyTags(event *types.Event) (title string, alias string, tags []string) { alias = fmt.Sprintf("%s/%s", event.Entity.Name, event.Check.Name) title, err := templates.EvalTemplate("title", plugin.MessageTemplate, event) if err != nil { return "", "", []string{} } tags = append(tags, event.Entity.Name, event.Check.Name, event.Entity.Namespace, event.Entity.EntityClass) return trim(title, plugin.MessageLimit), alias, tags } // parseDescription func returns string with custom template string to use in description func parseDescription(event *types.Event) (description string) { description, err := templates.EvalTemplate("description", plugin.DescriptionTemplate, event) if err != nil { return "" } // allow newlines to get expanded description = strings.Replace(description, `\n`, "\n", -1) return trim(description, plugin.DescriptionLimit) } // parseDetails func returns a map of string string with check information for the details field func parseDetails(event *types.Event) map[string]string { details := make(map[string]string) details["subscriptions"] = fmt.Sprintf("%v", event.Check.Subscriptions) details["status"] = fmt.Sprintf("%d", event.Check.Status) details["interval"] = fmt.Sprintf("%d", event.Check.Interval) // only if true if plugin.FullDetails { details["output"] = event.Check.Output details["command"] = event.Check.Command details["proxy_entity_name"] = event.Check.ProxyEntityName details["state"] = event.Check.State details["ttl"] = fmt.Sprintf("%d", event.Check.Ttl) details["occurrences"] = fmt.Sprintf("%d", event.Check.Occurrences) details["occurrences_watermark"] = fmt.Sprintf("%d", event.Check.OccurrencesWatermark) details["handlers"] = fmt.Sprintf("%v", event.Check.Handlers) if event.Entity.EntityClass == "agent" { details["arch"] = event.Entity.System.GetArch() details["os"] = event.Entity.System.GetOS() details["platform"] = event.Entity.System.GetPlatform() details["platform_family"] = event.Entity.System.GetPlatformFamily() details["platform_version"] = event.Entity.System.GetPlatformVersion() } } // only if true if plugin.WithAnnotations { if event.Check.Annotations != nil { for key, value := range event.Check.Annotations { if !strings.Contains(key, "sensu.io/plugins/sensu-opsgenie-handler/config") { checkKey := fmt.Sprintf("%s_annotation_%s", "check", key) details[checkKey] = value } } } if event.Entity.Annotations != nil { for key, value := range event.Entity.Annotations { if !strings.Contains(key, "sensu.io/plugins/sensu-opsgenie-handler/config") { entityKey := fmt.Sprintf("%s_annotation_%s", "entity", key) details[entityKey] = value } } } } // only if true if plugin.WithLabels { if event.Check.Labels != nil { for key, value := range event.Check.Labels { checkKey := fmt.Sprintf("%s_label_%s", "check", key) details[checkKey] = value } } if event.Entity.Labels != nil { for key, value := range event.Entity.Labels { entityKey := fmt.Sprintf("%s_label_%s", "entity", key) details[entityKey] = value } } } if plugin.SensuDashboard != "disabled" { details["sensuDashboard"] = fmt.Sprintf("source: %s/%s/events/%s/%s \n", plugin.SensuDashboard, event.Entity.Namespace, event.Entity.Name, event.Check.Name) } return details } // switchOpsgenieRegion func func switchOpsgenieRegion() client.ApiUrl { var region client.ApiUrl apiRegionLowCase := strings.ToLower(plugin.APIRegion) switch apiRegionLowCase { case "eu": region = client.API_URL_EU case "us": region = client.API_URL default: region = client.API_URL } return region } func executeHandler(event *types.Event) error { alertClient, err := alert.NewClient(&client.Config{ ApiKey: plugin.AuthToken, OpsGenieAPIURL: switchOpsgenieRegion(), }) if err != nil { return fmt.Errorf("failed to create opsgenie client: %s", err) } if event.Check.Status != 0 { return createIncident(alertClient, event) } // check if event has a alert hasAlert, _ := getAlert(alertClient, event) // close incident if status == 0 if hasAlert != notFound && event.Check.Status == 0 { return closeAlert(alertClient, event, hasAlert) } return nil } // createIncident func create an alert in OpsGenie func createIncident(alertClient *alert.Client, event *types.Event) error { var ( note string err error ) if plugin.IncludeEventInNote { note, err = getNote(event) if err != nil { return err } } teams := []alert.Responder{ {Type: alert.EscalationResponder, Name: plugin.Team}, {Type: alert.ScheduleResponder, Name: plugin.Team}, } title, alias, tags := parseEventKeyTags(event) actions := parseActions(event) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() createResult, err := alertClient.Create(ctx, &alert.CreateAlertRequest{ Message: title, Alias: alias, Description: parseDescription(event), Responders: teams, Actions: actions, Tags: tags, Details: parseDetails(event), Entity: event.Entity.Name, Source: source, Priority: eventPriority(), Note: note, }) if err != nil { fmt.Println(err.Error()) } else { fmt.Println("Create request ID: " + createResult.RequestId) } return nil } // getAlert func get a alert using an alias. func getAlert(alertClient *alert.Client, event *types.Event) (string, error) { _, title, _ := parseEventKeyTags(event) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() fmt.Printf("Checking for alert %s \n", title) getResult, err := alertClient.Get(ctx, &alert.GetAlertRequest{ IdentifierType: alert.ALIAS, IdentifierValue: title, }) if err != nil { return notFound, nil } fmt.Printf("ID: %s, Message: %s, Count: %d \n", getResult.Id, getResult.Message, getResult.Count) return getResult.Id, nil } // closeAlert func close an alert if status == 0 func closeAlert(alertClient *alert.Client, event *types.Event, alertid string) error { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() closeResult, err := alertClient.Close(ctx, &alert.CloseAlertRequest{ IdentifierType: alert.ALERTID, IdentifierValue: alertid, Source: source, Note: "Closed Automatically", }) if err != nil { fmt.Printf("[ERROR] Not Closed: %s \n", err) } fmt.Printf("RequestID %s to Close %s \n", alertid, closeResult.RequestId) return nil } // getNote func creates a note with whole event in json format func getNote(event *types.Event) (string, error) { eventJSON, err := json.Marshal(event) if err != nil { return "", err } return fmt.Sprintf("Event data update:\n\n%s", eventJSON), nil } // time func returns only the first n bytes of a string func trim(s string, n int) string { if len(s) > n { return s[:n] } return s }
Value: &plugin.Priority,
random_line_split
main.go
package main import ( "context" "encoding/json" "fmt" "strings" "time" "github.com/opsgenie/opsgenie-go-sdk-v2/alert" "github.com/opsgenie/opsgenie-go-sdk-v2/client" "github.com/sensu-community/sensu-plugin-sdk/sensu" "github.com/sensu-community/sensu-plugin-sdk/templates" "github.com/sensu/sensu-go/types" ) const ( notFound = "NOT FOUND" source = "sensuGo" ) // Config represents the handler plugin config. type Config struct { sensu.PluginConfig AuthToken string APIRegion string Team string Priority string SensuDashboard string MessageTemplate string MessageLimit int DescriptionTemplate string DescriptionLimit int IncludeEventInNote bool WithAnnotations bool WithLabels bool FullDetails bool } var ( plugin = Config{ PluginConfig: sensu.PluginConfig{ Name: "sensu-opsgenie-handler", Short: "The Sensu Go OpsGenie handler for incident management", Keyspace: "sensu.io/plugins/sensu-opsgenie-handler/config", }, } options = []*sensu.PluginConfigOption{ { Path: "region", Env: "OPSGENIE_REGION", Argument: "region", Shorthand: "r", Default: "us", Usage: "The OpsGenie API Region (us or eu), use default from OPSGENIE_REGION env var", Value: &plugin.APIRegion, }, { Path: "auth", Env: "OPSGENIE_AUTHTOKEN", Argument: "auth", Shorthand: "a", Default: "", Secret: true, Usage: "The OpsGenie API authentication token, use default from OPSGENIE_AUTHTOKEN env var", Value: &plugin.AuthToken, }, { Path: "team", Env: "OPSGENIE_TEAM", Argument: "team", Shorthand: "t", Default: "", Usage: "The OpsGenie Team, use default from OPSGENIE_TEAM env var", Value: &plugin.Team, }, { Path: "priority", Env: "OPSGENIE_PRIORITY", Argument: "priority", Shorthand: "p", Default: "P3", Usage: "The OpsGenie Alert Priority, use default from OPSGENIE_PRIORITY env var", Value: &plugin.Priority, }, { Path: "sensuDashboard", Env: "OPSGENIE_SENSU_DASHBOARD", Argument: "sensuDashboard", Shorthand: "s", Default: "disabled", Usage: "The OpsGenie Handler will use it to create a source Sensu Dashboard URL. Use OPSGENIE_SENSU_DASHBOARD. Example: http://sensu-dashboard.example.local/c/~/n", Value: &plugin.SensuDashboard, }, { Path: "messageTemplate", Env: "OPSGENIE_MESSAGE_TEMPLATE", Argument: "messageTemplate", Shorthand: "m", Default: "{{.Entity.Name}}/{{.Check.Name}}", Usage: "The template for the message to be sent", Value: &plugin.MessageTemplate, }, { Path: "messageLimit", Env: "OPSGENIE_MESSAGE_LIMIT", Argument: "messageLimit", Shorthand: "l", Default: 130, Usage: "The maximum length of the message field", Value: &plugin.MessageLimit, }, { Path: "descriptionTemplate", Env: "OPSGENIE_DESCRIPTION_TEMPLATE", Argument: "descriptionTemplate", Shorthand: "d", Default: "{{.Check.Output}}", Usage: "The template for the description to be sent", Value: &plugin.DescriptionTemplate, }, { Path: "descriptionLimit", Env: "OPSGENIE_DESCRIPTION_LIMIT", Argument: "descriptionLimit", Shorthand: "L", Default: 15000, Usage: "The maximum length of the description field", Value: &plugin.DescriptionLimit, }, { Path: "includeEventInNote", Env: "", Argument: "includeEventInNote", Shorthand: "i", Default: false, Usage: "Include the event JSON in the payload sent to OpsGenie", Value: &plugin.IncludeEventInNote, }, { Path: "withAnnotations", Env: "", Argument: "withAnnotations", Shorthand: "w", Default: false, Usage: "Include the event.metadata.Annotations in details to send to OpsGenie", Value: &plugin.WithAnnotations, }, { Path: "withLabels", Env: "", Argument: "withLabels", Shorthand: "W", Default: false, Usage: "Include the event.metadata.Labels in details to send to OpsGenie", Value: &plugin.WithLabels, }, { Path: "fullDetails", Env: "", Argument: "fullDetails", Shorthand: "F", Default: false, Usage: "Include the more details to send to OpsGenie like proxy_entity_name, occurrences and agent details arch and os", Value: &plugin.FullDetails, }, } ) func main() { handler := sensu.NewGoHandler(&plugin.PluginConfig, options, checkArgs, executeHandler) handler.Execute() } func checkArgs(_ *types.Event) error { if len(plugin.AuthToken) == 0 { return fmt.Errorf("authentication token is empty") } if len(plugin.Team) == 0 { return fmt.Errorf("team is empty") } return nil } // eventPriority func read priority in the event and return alerts.PX // check.Annotations override Entity.Annotations func eventPriority() alert.Priority { switch plugin.Priority { case "P5": return alert.P5 case "P4": return alert.P4 case "P3": return alert.P3 case "P2": return alert.P2 case "P1": return alert.P1 default: return alert.P3 } } func parseActions(event *types.Event) (output []string) { if event.Check.Annotations != nil && event.Check.Annotations["opsgenie_actions"] != "" { output = strings.Split(event.Check.Annotations["opsgenie_actions"], ",") return output } return output } // parseEventKeyTags func returns string, string, and []string with event data // fist string contains custom templte string to use in message // second string contains Entity.Name/Check.Name to use in alias // []string contains Entity.Name Check.Name Entity.Namespace, event.Entity.EntityClass to use as tags in Opsgenie func parseEventKeyTags(event *types.Event) (title string, alias string, tags []string) { alias = fmt.Sprintf("%s/%s", event.Entity.Name, event.Check.Name) title, err := templates.EvalTemplate("title", plugin.MessageTemplate, event) if err != nil { return "", "", []string{} } tags = append(tags, event.Entity.Name, event.Check.Name, event.Entity.Namespace, event.Entity.EntityClass) return trim(title, plugin.MessageLimit), alias, tags } // parseDescription func returns string with custom template string to use in description func parseDescription(event *types.Event) (description string) { description, err := templates.EvalTemplate("description", plugin.DescriptionTemplate, event) if err != nil { return "" } // allow newlines to get expanded description = strings.Replace(description, `\n`, "\n", -1) return trim(description, plugin.DescriptionLimit) } // parseDetails func returns a map of string string with check information for the details field func parseDetails(event *types.Event) map[string]string { details := make(map[string]string) details["subscriptions"] = fmt.Sprintf("%v", event.Check.Subscriptions) details["status"] = fmt.Sprintf("%d", event.Check.Status) details["interval"] = fmt.Sprintf("%d", event.Check.Interval) // only if true if plugin.FullDetails
// only if true if plugin.WithAnnotations { if event.Check.Annotations != nil { for key, value := range event.Check.Annotations { if !strings.Contains(key, "sensu.io/plugins/sensu-opsgenie-handler/config") { checkKey := fmt.Sprintf("%s_annotation_%s", "check", key) details[checkKey] = value } } } if event.Entity.Annotations != nil { for key, value := range event.Entity.Annotations { if !strings.Contains(key, "sensu.io/plugins/sensu-opsgenie-handler/config") { entityKey := fmt.Sprintf("%s_annotation_%s", "entity", key) details[entityKey] = value } } } } // only if true if plugin.WithLabels { if event.Check.Labels != nil { for key, value := range event.Check.Labels { checkKey := fmt.Sprintf("%s_label_%s", "check", key) details[checkKey] = value } } if event.Entity.Labels != nil { for key, value := range event.Entity.Labels { entityKey := fmt.Sprintf("%s_label_%s", "entity", key) details[entityKey] = value } } } if plugin.SensuDashboard != "disabled" { details["sensuDashboard"] = fmt.Sprintf("source: %s/%s/events/%s/%s \n", plugin.SensuDashboard, event.Entity.Namespace, event.Entity.Name, event.Check.Name) } return details } // switchOpsgenieRegion func func switchOpsgenieRegion() client.ApiUrl { var region client.ApiUrl apiRegionLowCase := strings.ToLower(plugin.APIRegion) switch apiRegionLowCase { case "eu": region = client.API_URL_EU case "us": region = client.API_URL default: region = client.API_URL } return region } func executeHandler(event *types.Event) error { alertClient, err := alert.NewClient(&client.Config{ ApiKey: plugin.AuthToken, OpsGenieAPIURL: switchOpsgenieRegion(), }) if err != nil { return fmt.Errorf("failed to create opsgenie client: %s", err) } if event.Check.Status != 0 { return createIncident(alertClient, event) } // check if event has a alert hasAlert, _ := getAlert(alertClient, event) // close incident if status == 0 if hasAlert != notFound && event.Check.Status == 0 { return closeAlert(alertClient, event, hasAlert) } return nil } // createIncident func create an alert in OpsGenie func createIncident(alertClient *alert.Client, event *types.Event) error { var ( note string err error ) if plugin.IncludeEventInNote { note, err = getNote(event) if err != nil { return err } } teams := []alert.Responder{ {Type: alert.EscalationResponder, Name: plugin.Team}, {Type: alert.ScheduleResponder, Name: plugin.Team}, } title, alias, tags := parseEventKeyTags(event) actions := parseActions(event) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() createResult, err := alertClient.Create(ctx, &alert.CreateAlertRequest{ Message: title, Alias: alias, Description: parseDescription(event), Responders: teams, Actions: actions, Tags: tags, Details: parseDetails(event), Entity: event.Entity.Name, Source: source, Priority: eventPriority(), Note: note, }) if err != nil { fmt.Println(err.Error()) } else { fmt.Println("Create request ID: " + createResult.RequestId) } return nil } // getAlert func get a alert using an alias. func getAlert(alertClient *alert.Client, event *types.Event) (string, error) { _, title, _ := parseEventKeyTags(event) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() fmt.Printf("Checking for alert %s \n", title) getResult, err := alertClient.Get(ctx, &alert.GetAlertRequest{ IdentifierType: alert.ALIAS, IdentifierValue: title, }) if err != nil { return notFound, nil } fmt.Printf("ID: %s, Message: %s, Count: %d \n", getResult.Id, getResult.Message, getResult.Count) return getResult.Id, nil } // closeAlert func close an alert if status == 0 func closeAlert(alertClient *alert.Client, event *types.Event, alertid string) error { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() closeResult, err := alertClient.Close(ctx, &alert.CloseAlertRequest{ IdentifierType: alert.ALERTID, IdentifierValue: alertid, Source: source, Note: "Closed Automatically", }) if err != nil { fmt.Printf("[ERROR] Not Closed: %s \n", err) } fmt.Printf("RequestID %s to Close %s \n", alertid, closeResult.RequestId) return nil } // getNote func creates a note with whole event in json format func getNote(event *types.Event) (string, error) { eventJSON, err := json.Marshal(event) if err != nil { return "", err } return fmt.Sprintf("Event data update:\n\n%s", eventJSON), nil } // time func returns only the first n bytes of a string func trim(s string, n int) string { if len(s) > n { return s[:n] } return s }
{ details["output"] = event.Check.Output details["command"] = event.Check.Command details["proxy_entity_name"] = event.Check.ProxyEntityName details["state"] = event.Check.State details["ttl"] = fmt.Sprintf("%d", event.Check.Ttl) details["occurrences"] = fmt.Sprintf("%d", event.Check.Occurrences) details["occurrences_watermark"] = fmt.Sprintf("%d", event.Check.OccurrencesWatermark) details["handlers"] = fmt.Sprintf("%v", event.Check.Handlers) if event.Entity.EntityClass == "agent" { details["arch"] = event.Entity.System.GetArch() details["os"] = event.Entity.System.GetOS() details["platform"] = event.Entity.System.GetPlatform() details["platform_family"] = event.Entity.System.GetPlatformFamily() details["platform_version"] = event.Entity.System.GetPlatformVersion() } }
conditional_block
main.go
package main import ( "context" "encoding/json" "fmt" "strings" "time" "github.com/opsgenie/opsgenie-go-sdk-v2/alert" "github.com/opsgenie/opsgenie-go-sdk-v2/client" "github.com/sensu-community/sensu-plugin-sdk/sensu" "github.com/sensu-community/sensu-plugin-sdk/templates" "github.com/sensu/sensu-go/types" ) const ( notFound = "NOT FOUND" source = "sensuGo" ) // Config represents the handler plugin config. type Config struct { sensu.PluginConfig AuthToken string APIRegion string Team string Priority string SensuDashboard string MessageTemplate string MessageLimit int DescriptionTemplate string DescriptionLimit int IncludeEventInNote bool WithAnnotations bool WithLabels bool FullDetails bool } var ( plugin = Config{ PluginConfig: sensu.PluginConfig{ Name: "sensu-opsgenie-handler", Short: "The Sensu Go OpsGenie handler for incident management", Keyspace: "sensu.io/plugins/sensu-opsgenie-handler/config", }, } options = []*sensu.PluginConfigOption{ { Path: "region", Env: "OPSGENIE_REGION", Argument: "region", Shorthand: "r", Default: "us", Usage: "The OpsGenie API Region (us or eu), use default from OPSGENIE_REGION env var", Value: &plugin.APIRegion, }, { Path: "auth", Env: "OPSGENIE_AUTHTOKEN", Argument: "auth", Shorthand: "a", Default: "", Secret: true, Usage: "The OpsGenie API authentication token, use default from OPSGENIE_AUTHTOKEN env var", Value: &plugin.AuthToken, }, { Path: "team", Env: "OPSGENIE_TEAM", Argument: "team", Shorthand: "t", Default: "", Usage: "The OpsGenie Team, use default from OPSGENIE_TEAM env var", Value: &plugin.Team, }, { Path: "priority", Env: "OPSGENIE_PRIORITY", Argument: "priority", Shorthand: "p", Default: "P3", Usage: "The OpsGenie Alert Priority, use default from OPSGENIE_PRIORITY env var", Value: &plugin.Priority, }, { Path: "sensuDashboard", Env: "OPSGENIE_SENSU_DASHBOARD", Argument: "sensuDashboard", Shorthand: "s", Default: "disabled", Usage: "The OpsGenie Handler will use it to create a source Sensu Dashboard URL. Use OPSGENIE_SENSU_DASHBOARD. Example: http://sensu-dashboard.example.local/c/~/n", Value: &plugin.SensuDashboard, }, { Path: "messageTemplate", Env: "OPSGENIE_MESSAGE_TEMPLATE", Argument: "messageTemplate", Shorthand: "m", Default: "{{.Entity.Name}}/{{.Check.Name}}", Usage: "The template for the message to be sent", Value: &plugin.MessageTemplate, }, { Path: "messageLimit", Env: "OPSGENIE_MESSAGE_LIMIT", Argument: "messageLimit", Shorthand: "l", Default: 130, Usage: "The maximum length of the message field", Value: &plugin.MessageLimit, }, { Path: "descriptionTemplate", Env: "OPSGENIE_DESCRIPTION_TEMPLATE", Argument: "descriptionTemplate", Shorthand: "d", Default: "{{.Check.Output}}", Usage: "The template for the description to be sent", Value: &plugin.DescriptionTemplate, }, { Path: "descriptionLimit", Env: "OPSGENIE_DESCRIPTION_LIMIT", Argument: "descriptionLimit", Shorthand: "L", Default: 15000, Usage: "The maximum length of the description field", Value: &plugin.DescriptionLimit, }, { Path: "includeEventInNote", Env: "", Argument: "includeEventInNote", Shorthand: "i", Default: false, Usage: "Include the event JSON in the payload sent to OpsGenie", Value: &plugin.IncludeEventInNote, }, { Path: "withAnnotations", Env: "", Argument: "withAnnotations", Shorthand: "w", Default: false, Usage: "Include the event.metadata.Annotations in details to send to OpsGenie", Value: &plugin.WithAnnotations, }, { Path: "withLabels", Env: "", Argument: "withLabels", Shorthand: "W", Default: false, Usage: "Include the event.metadata.Labels in details to send to OpsGenie", Value: &plugin.WithLabels, }, { Path: "fullDetails", Env: "", Argument: "fullDetails", Shorthand: "F", Default: false, Usage: "Include the more details to send to OpsGenie like proxy_entity_name, occurrences and agent details arch and os", Value: &plugin.FullDetails, }, } ) func main() { handler := sensu.NewGoHandler(&plugin.PluginConfig, options, checkArgs, executeHandler) handler.Execute() } func checkArgs(_ *types.Event) error { if len(plugin.AuthToken) == 0 { return fmt.Errorf("authentication token is empty") } if len(plugin.Team) == 0 { return fmt.Errorf("team is empty") } return nil } // eventPriority func read priority in the event and return alerts.PX // check.Annotations override Entity.Annotations func eventPriority() alert.Priority { switch plugin.Priority { case "P5": return alert.P5 case "P4": return alert.P4 case "P3": return alert.P3 case "P2": return alert.P2 case "P1": return alert.P1 default: return alert.P3 } } func parseActions(event *types.Event) (output []string) { if event.Check.Annotations != nil && event.Check.Annotations["opsgenie_actions"] != "" { output = strings.Split(event.Check.Annotations["opsgenie_actions"], ",") return output } return output } // parseEventKeyTags func returns string, string, and []string with event data // fist string contains custom templte string to use in message // second string contains Entity.Name/Check.Name to use in alias // []string contains Entity.Name Check.Name Entity.Namespace, event.Entity.EntityClass to use as tags in Opsgenie func parseEventKeyTags(event *types.Event) (title string, alias string, tags []string) { alias = fmt.Sprintf("%s/%s", event.Entity.Name, event.Check.Name) title, err := templates.EvalTemplate("title", plugin.MessageTemplate, event) if err != nil { return "", "", []string{} } tags = append(tags, event.Entity.Name, event.Check.Name, event.Entity.Namespace, event.Entity.EntityClass) return trim(title, plugin.MessageLimit), alias, tags } // parseDescription func returns string with custom template string to use in description func parseDescription(event *types.Event) (description string) { description, err := templates.EvalTemplate("description", plugin.DescriptionTemplate, event) if err != nil { return "" } // allow newlines to get expanded description = strings.Replace(description, `\n`, "\n", -1) return trim(description, plugin.DescriptionLimit) } // parseDetails func returns a map of string string with check information for the details field func parseDetails(event *types.Event) map[string]string { details := make(map[string]string) details["subscriptions"] = fmt.Sprintf("%v", event.Check.Subscriptions) details["status"] = fmt.Sprintf("%d", event.Check.Status) details["interval"] = fmt.Sprintf("%d", event.Check.Interval) // only if true if plugin.FullDetails { details["output"] = event.Check.Output details["command"] = event.Check.Command details["proxy_entity_name"] = event.Check.ProxyEntityName details["state"] = event.Check.State details["ttl"] = fmt.Sprintf("%d", event.Check.Ttl) details["occurrences"] = fmt.Sprintf("%d", event.Check.Occurrences) details["occurrences_watermark"] = fmt.Sprintf("%d", event.Check.OccurrencesWatermark) details["handlers"] = fmt.Sprintf("%v", event.Check.Handlers) if event.Entity.EntityClass == "agent" { details["arch"] = event.Entity.System.GetArch() details["os"] = event.Entity.System.GetOS() details["platform"] = event.Entity.System.GetPlatform() details["platform_family"] = event.Entity.System.GetPlatformFamily() details["platform_version"] = event.Entity.System.GetPlatformVersion() } } // only if true if plugin.WithAnnotations { if event.Check.Annotations != nil { for key, value := range event.Check.Annotations { if !strings.Contains(key, "sensu.io/plugins/sensu-opsgenie-handler/config") { checkKey := fmt.Sprintf("%s_annotation_%s", "check", key) details[checkKey] = value } } } if event.Entity.Annotations != nil { for key, value := range event.Entity.Annotations { if !strings.Contains(key, "sensu.io/plugins/sensu-opsgenie-handler/config") { entityKey := fmt.Sprintf("%s_annotation_%s", "entity", key) details[entityKey] = value } } } } // only if true if plugin.WithLabels { if event.Check.Labels != nil { for key, value := range event.Check.Labels { checkKey := fmt.Sprintf("%s_label_%s", "check", key) details[checkKey] = value } } if event.Entity.Labels != nil { for key, value := range event.Entity.Labels { entityKey := fmt.Sprintf("%s_label_%s", "entity", key) details[entityKey] = value } } } if plugin.SensuDashboard != "disabled" { details["sensuDashboard"] = fmt.Sprintf("source: %s/%s/events/%s/%s \n", plugin.SensuDashboard, event.Entity.Namespace, event.Entity.Name, event.Check.Name) } return details } // switchOpsgenieRegion func func switchOpsgenieRegion() client.ApiUrl { var region client.ApiUrl apiRegionLowCase := strings.ToLower(plugin.APIRegion) switch apiRegionLowCase { case "eu": region = client.API_URL_EU case "us": region = client.API_URL default: region = client.API_URL } return region } func executeHandler(event *types.Event) error { alertClient, err := alert.NewClient(&client.Config{ ApiKey: plugin.AuthToken, OpsGenieAPIURL: switchOpsgenieRegion(), }) if err != nil { return fmt.Errorf("failed to create opsgenie client: %s", err) } if event.Check.Status != 0 { return createIncident(alertClient, event) } // check if event has a alert hasAlert, _ := getAlert(alertClient, event) // close incident if status == 0 if hasAlert != notFound && event.Check.Status == 0 { return closeAlert(alertClient, event, hasAlert) } return nil } // createIncident func create an alert in OpsGenie func createIncident(alertClient *alert.Client, event *types.Event) error { var ( note string err error ) if plugin.IncludeEventInNote { note, err = getNote(event) if err != nil { return err } } teams := []alert.Responder{ {Type: alert.EscalationResponder, Name: plugin.Team}, {Type: alert.ScheduleResponder, Name: plugin.Team}, } title, alias, tags := parseEventKeyTags(event) actions := parseActions(event) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() createResult, err := alertClient.Create(ctx, &alert.CreateAlertRequest{ Message: title, Alias: alias, Description: parseDescription(event), Responders: teams, Actions: actions, Tags: tags, Details: parseDetails(event), Entity: event.Entity.Name, Source: source, Priority: eventPriority(), Note: note, }) if err != nil { fmt.Println(err.Error()) } else { fmt.Println("Create request ID: " + createResult.RequestId) } return nil } // getAlert func get a alert using an alias. func
(alertClient *alert.Client, event *types.Event) (string, error) { _, title, _ := parseEventKeyTags(event) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() fmt.Printf("Checking for alert %s \n", title) getResult, err := alertClient.Get(ctx, &alert.GetAlertRequest{ IdentifierType: alert.ALIAS, IdentifierValue: title, }) if err != nil { return notFound, nil } fmt.Printf("ID: %s, Message: %s, Count: %d \n", getResult.Id, getResult.Message, getResult.Count) return getResult.Id, nil } // closeAlert func close an alert if status == 0 func closeAlert(alertClient *alert.Client, event *types.Event, alertid string) error { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() closeResult, err := alertClient.Close(ctx, &alert.CloseAlertRequest{ IdentifierType: alert.ALERTID, IdentifierValue: alertid, Source: source, Note: "Closed Automatically", }) if err != nil { fmt.Printf("[ERROR] Not Closed: %s \n", err) } fmt.Printf("RequestID %s to Close %s \n", alertid, closeResult.RequestId) return nil } // getNote func creates a note with whole event in json format func getNote(event *types.Event) (string, error) { eventJSON, err := json.Marshal(event) if err != nil { return "", err } return fmt.Sprintf("Event data update:\n\n%s", eventJSON), nil } // time func returns only the first n bytes of a string func trim(s string, n int) string { if len(s) > n { return s[:n] } return s }
getAlert
identifier_name
pages.py
""" Page objects for interacting with the test site. """ from __future__ import absolute_import import os import time from bok_choy.page_object import PageObject from bok_choy.promise import EmptyPromise from bok_choy.javascript import js_defined, requirejs, wait_for_js class SitePage(PageObject): """ Base class for all pages in the test site. """ # Get the server port from the environment # (set by the test runner script) SERVER_PORT = os.environ.get("SERVER_PORT", 8005) def is_browser_on_page(self): title = self.name.lower().replace('_', ' ') return title in self.browser.title.lower() @property def url(self): return "http://localhost:{0}/{1}".format(self.SERVER_PORT, self.name + ".html") @property def output(self): """ Return the contents of the "#output" div on the page. The fixtures are configured to update this div when the user interacts with the page. """ text_list = self.q(css='#output').text if len(text_list) < 1: return None return text_list[0] class ButtonPage(SitePage): """ Page for testing button interactions. """ name = "button" def click_button(self): """ Click the button on the page, which should cause the JavaScript to update the #output div. """ self.q(css='div#fixture input').first.click() class TextFieldPage(SitePage): """ Page for testing text field interactions. """ name = "text_field" def
(self, text): """ Input `text` into the text field on the page. """ self.q(css='#fixture input').fill(text) class SelectPage(SitePage): """ Page for testing select input interactions. """ name = "select" def select_car(self, car_value): """ Select the car with ``car_value`` in the drop-down list. """ self.q(css=u'select[name="cars"] option[value="{}"]'.format(car_value)).first.click() def is_car_selected(self, car): """ Return ``True`` if the given ``car`` is selected, ``False`` otherwise. """ return self.q(css=u'select[name="cars"] option[value="{}"]'.format(car)).selected class CheckboxPage(SitePage): """ Page for testing checkbox interactions. """ name = "checkbox" def toggle_pill(self, pill_name): """ Toggle the box for the pill with `pill_name` (red or blue). """ self.q(css=u"#fixture input#{}".format(pill_name)).first.click() class AlertPage(SitePage): """ Page for testing alert handling. """ name = "alert" def confirm(self): """ Click the ``Confirm`` button and confirm the dialog. """ with self.handle_alert(confirm=True): self.q(css='button#confirm').first.click() def cancel(self): """ Click the ``Confirm`` button and cancel the dialog. """ with self.handle_alert(confirm=False): self.q(css='button#confirm').first.click() def dismiss(self): """ Click the ``Alert`` button and confirm the alert. """ with self.handle_alert(): self.q(css='button#alert').first.click() class SelectorPage(SitePage): """ Page for testing retrieval of information by CSS selectors. """ name = "selector" @property def num_divs(self): """ Count the number of div.test elements. """ return len(self.q(css='div.test').results) @property def div_text_list(self): """ Return list of text for each div.test element. """ return self.q(css='div.test').text @property def div_value_list(self): """ Return list of values for each div.test element. """ return self.q(css='div.test').attrs('value') @property def div_html_list(self): """ Return list of html for each div.test element. """ return self.q(css='div.test').html def ids_of_outer_divs_with_inner_text(self, child_text): """ Return a list of the ids of outer divs with the specified text in a child element. """ return self.q(css='div.outer').filter( lambda el: child_text in [inner.text for inner in el.find_elements_by_css_selector('div.inner')] ).attrs('id') class DelayPage(SitePage): """ Page for testing elements that appear after a delay. """ name = "delay" def trigger_output(self): """ Wait for click handlers to be installed, then click a button and retrieve the output that appears after a delay. """ EmptyPromise(self.q(css='div#ready').is_present, "Click ready").fulfill() self.q(css='div#fixture button').first.click() EmptyPromise(self.q(css='div#output').is_present, "Output available").fulfill() def make_broken_promise(self): """ Make a promise that will not be fulfilled. Should raise a `BrokenPromise` exception. """ return EmptyPromise( self.q(css='div#not_present').is_present, "Invalid div appeared", try_limit=3, try_interval=0.01 ).fulfill() class SlowPage(SitePage): """ Page that loads its elements slowly. """ name = "slow" def is_browser_on_page(self): return self.q(css='div#ready').is_present() class NextPage(SitePage): """ Page that loads another page after a delay. """ name = "next_page" def is_browser_on_page(self): return self.q(css='#next').is_present() def load_next(self, page, delay_sec): """ Load the page named `page_name` after waiting for `delay_sec`. """ time.sleep(delay_sec) page.visit() @js_defined('$') class FocusedPage(SitePage): """ Page that has a link to a focusable element. """ name = "focused" @wait_for_js def focus_on_main_content(self): """ Give focus to the element with the ``main-content`` ID. """ self.browser.execute_script("$('#main-content').focus()") class VisiblePage(SitePage): """ Page that has some elements visible and others invisible. """ name = "visible" def is_visible(self, name): """ Return a boolean indicating whether the given item is visible. """ return self.q(css="div.{}".format(name)).first.visible def is_invisible(self, name): """ Return a boolean indicating whether the given element is present, but not visible. """ return self.q(css="div.{}".format(name)).first.invisible @js_defined('test_var1', 'test_var2') class JavaScriptPage(SitePage): """ Page for testing asynchronous JavaScript. """ name = "javascript" @wait_for_js def trigger_output(self): """ Click a button which will only work once RequireJS finishes loading. """ self.q(css='div#fixture button').first.click() @wait_for_js def reload_and_trigger_output(self): """ Reload the page, wait for JS, then trigger the output. """ self.browser.refresh() self.wait_for_js() # pylint: disable=no-member self.q(css='div#fixture button').first.click() @js_defined('something.SomethingThatDoesntExist') class JavaScriptUndefinedPage(SitePage): """ Page for testing asynchronous JavaScript, where the javascript that we wait for is never defined. """ name = "javascript" @wait_for_js def trigger_output(self): """ Click a button which will only work once RequireJS finishes loading. """ self.q(css='div#fixture button').first.click() @requirejs('main') class RequireJSPage(SitePage): """ Page for testing asynchronous JavaScript loaded with RequireJS. """ name = "requirejs" @property @wait_for_js def output(self): """ Wait for scripts to finish and then return the contents of the ``#output`` div on the page. """ return super(RequireJSPage, self).output class AjaxNoJQueryPage(SitePage): """ Page for testing an ajax call. """ name = "ajax_no_jquery" class AjaxPage(SitePage): """ Page for testing an ajax call. """ name = "ajax" def click_button(self): """ Click the button on the page, which triggers an ajax call that updates the #output div. """ self.q(css='div#fixture button').first.click() class WaitsPage(SitePage): """ Page for testing wait helpers. """ name = "wait" def is_button_output_present(self): """ Click button and wait until output id appears in DOM. """ self.wait_for_element_presence('div#ready', 'Page is Ready') self.q(css='div#fixture button').first.click() self.wait_for_element_presence('div#output', 'Button Output is Available') def is_class_absent(self): """ Click button and wait until playing class disappeared from DOM """ self.q(css='#spinner').first.click() self.wait_for_element_absence('.playing', 'Animation Stopped') def is_button_output_visible(self): """ Click button and wait until output is displayed. """ self.wait_for_element_presence('div#ready', 'Page is Ready') self.q(css='div#fixture button').first.click() self.wait_for_element_visibility('div#output', 'Button Output is Visible') def is_spinner_invisible(self): """ Click button and wait until spinner is disappeared. """ self.q(css='#spinner').first.click() self.wait_for_element_invisibility('#anim', 'Button Output is Visible') class AccessibilityPage(SitePage): """ Page for testing accessibility auditing. """ name = "accessibility" class ImagePage(SitePage): """ Page for testing image capture and comparison. """ name = "image" class LongPage(SitePage): """ Page that requires scrolling to get to certain elements. """ name = "long_page"
enter_text
identifier_name
pages.py
""" Page objects for interacting with the test site. """ from __future__ import absolute_import import os import time from bok_choy.page_object import PageObject from bok_choy.promise import EmptyPromise from bok_choy.javascript import js_defined, requirejs, wait_for_js class SitePage(PageObject): """ Base class for all pages in the test site. """ # Get the server port from the environment # (set by the test runner script) SERVER_PORT = os.environ.get("SERVER_PORT", 8005) def is_browser_on_page(self): title = self.name.lower().replace('_', ' ') return title in self.browser.title.lower() @property def url(self): return "http://localhost:{0}/{1}".format(self.SERVER_PORT, self.name + ".html") @property def output(self): """ Return the contents of the "#output" div on the page. The fixtures are configured to update this div when the user interacts with the page. """ text_list = self.q(css='#output').text if len(text_list) < 1: return None return text_list[0] class ButtonPage(SitePage): """ Page for testing button interactions. """ name = "button" def click_button(self): """ Click the button on the page, which should cause the JavaScript to update the #output div. """ self.q(css='div#fixture input').first.click() class TextFieldPage(SitePage): """ Page for testing text field interactions. """ name = "text_field" def enter_text(self, text): """ Input `text` into the text field on the page. """ self.q(css='#fixture input').fill(text) class SelectPage(SitePage): """ Page for testing select input interactions. """ name = "select" def select_car(self, car_value): """ Select the car with ``car_value`` in the drop-down list. """ self.q(css=u'select[name="cars"] option[value="{}"]'.format(car_value)).first.click() def is_car_selected(self, car): """ Return ``True`` if the given ``car`` is selected, ``False`` otherwise. """ return self.q(css=u'select[name="cars"] option[value="{}"]'.format(car)).selected class CheckboxPage(SitePage): """ Page for testing checkbox interactions. """ name = "checkbox" def toggle_pill(self, pill_name): """ Toggle the box for the pill with `pill_name` (red or blue). """ self.q(css=u"#fixture input#{}".format(pill_name)).first.click() class AlertPage(SitePage): """ Page for testing alert handling. """ name = "alert" def confirm(self):
with self.handle_alert(confirm=True): self.q(css='button#confirm').first.click() def cancel(self): """ Click the ``Confirm`` button and cancel the dialog. """ with self.handle_alert(confirm=False): self.q(css='button#confirm').first.click() def dismiss(self): """ Click the ``Alert`` button and confirm the alert. """ with self.handle_alert(): self.q(css='button#alert').first.click() class SelectorPage(SitePage): """ Page for testing retrieval of information by CSS selectors. """ name = "selector" @property def num_divs(self): """ Count the number of div.test elements. """ return len(self.q(css='div.test').results) @property def div_text_list(self): """ Return list of text for each div.test element. """ return self.q(css='div.test').text @property def div_value_list(self): """ Return list of values for each div.test element. """ return self.q(css='div.test').attrs('value') @property def div_html_list(self): """ Return list of html for each div.test element. """ return self.q(css='div.test').html def ids_of_outer_divs_with_inner_text(self, child_text): """ Return a list of the ids of outer divs with the specified text in a child element. """ return self.q(css='div.outer').filter( lambda el: child_text in [inner.text for inner in el.find_elements_by_css_selector('div.inner')] ).attrs('id') class DelayPage(SitePage): """ Page for testing elements that appear after a delay. """ name = "delay" def trigger_output(self): """ Wait for click handlers to be installed, then click a button and retrieve the output that appears after a delay. """ EmptyPromise(self.q(css='div#ready').is_present, "Click ready").fulfill() self.q(css='div#fixture button').first.click() EmptyPromise(self.q(css='div#output').is_present, "Output available").fulfill() def make_broken_promise(self): """ Make a promise that will not be fulfilled. Should raise a `BrokenPromise` exception. """ return EmptyPromise( self.q(css='div#not_present').is_present, "Invalid div appeared", try_limit=3, try_interval=0.01 ).fulfill() class SlowPage(SitePage): """ Page that loads its elements slowly. """ name = "slow" def is_browser_on_page(self): return self.q(css='div#ready').is_present() class NextPage(SitePage): """ Page that loads another page after a delay. """ name = "next_page" def is_browser_on_page(self): return self.q(css='#next').is_present() def load_next(self, page, delay_sec): """ Load the page named `page_name` after waiting for `delay_sec`. """ time.sleep(delay_sec) page.visit() @js_defined('$') class FocusedPage(SitePage): """ Page that has a link to a focusable element. """ name = "focused" @wait_for_js def focus_on_main_content(self): """ Give focus to the element with the ``main-content`` ID. """ self.browser.execute_script("$('#main-content').focus()") class VisiblePage(SitePage): """ Page that has some elements visible and others invisible. """ name = "visible" def is_visible(self, name): """ Return a boolean indicating whether the given item is visible. """ return self.q(css="div.{}".format(name)).first.visible def is_invisible(self, name): """ Return a boolean indicating whether the given element is present, but not visible. """ return self.q(css="div.{}".format(name)).first.invisible @js_defined('test_var1', 'test_var2') class JavaScriptPage(SitePage): """ Page for testing asynchronous JavaScript. """ name = "javascript" @wait_for_js def trigger_output(self): """ Click a button which will only work once RequireJS finishes loading. """ self.q(css='div#fixture button').first.click() @wait_for_js def reload_and_trigger_output(self): """ Reload the page, wait for JS, then trigger the output. """ self.browser.refresh() self.wait_for_js() # pylint: disable=no-member self.q(css='div#fixture button').first.click() @js_defined('something.SomethingThatDoesntExist') class JavaScriptUndefinedPage(SitePage): """ Page for testing asynchronous JavaScript, where the javascript that we wait for is never defined. """ name = "javascript" @wait_for_js def trigger_output(self): """ Click a button which will only work once RequireJS finishes loading. """ self.q(css='div#fixture button').first.click() @requirejs('main') class RequireJSPage(SitePage): """ Page for testing asynchronous JavaScript loaded with RequireJS. """ name = "requirejs" @property @wait_for_js def output(self): """ Wait for scripts to finish and then return the contents of the ``#output`` div on the page. """ return super(RequireJSPage, self).output class AjaxNoJQueryPage(SitePage): """ Page for testing an ajax call. """ name = "ajax_no_jquery" class AjaxPage(SitePage): """ Page for testing an ajax call. """ name = "ajax" def click_button(self): """ Click the button on the page, which triggers an ajax call that updates the #output div. """ self.q(css='div#fixture button').first.click() class WaitsPage(SitePage): """ Page for testing wait helpers. """ name = "wait" def is_button_output_present(self): """ Click button and wait until output id appears in DOM. """ self.wait_for_element_presence('div#ready', 'Page is Ready') self.q(css='div#fixture button').first.click() self.wait_for_element_presence('div#output', 'Button Output is Available') def is_class_absent(self): """ Click button and wait until playing class disappeared from DOM """ self.q(css='#spinner').first.click() self.wait_for_element_absence('.playing', 'Animation Stopped') def is_button_output_visible(self): """ Click button and wait until output is displayed. """ self.wait_for_element_presence('div#ready', 'Page is Ready') self.q(css='div#fixture button').first.click() self.wait_for_element_visibility('div#output', 'Button Output is Visible') def is_spinner_invisible(self): """ Click button and wait until spinner is disappeared. """ self.q(css='#spinner').first.click() self.wait_for_element_invisibility('#anim', 'Button Output is Visible') class AccessibilityPage(SitePage): """ Page for testing accessibility auditing. """ name = "accessibility" class ImagePage(SitePage): """ Page for testing image capture and comparison. """ name = "image" class LongPage(SitePage): """ Page that requires scrolling to get to certain elements. """ name = "long_page"
""" Click the ``Confirm`` button and confirm the dialog. """
random_line_split
pages.py
""" Page objects for interacting with the test site. """ from __future__ import absolute_import import os import time from bok_choy.page_object import PageObject from bok_choy.promise import EmptyPromise from bok_choy.javascript import js_defined, requirejs, wait_for_js class SitePage(PageObject): """ Base class for all pages in the test site. """ # Get the server port from the environment # (set by the test runner script) SERVER_PORT = os.environ.get("SERVER_PORT", 8005) def is_browser_on_page(self): title = self.name.lower().replace('_', ' ') return title in self.browser.title.lower() @property def url(self): return "http://localhost:{0}/{1}".format(self.SERVER_PORT, self.name + ".html") @property def output(self): """ Return the contents of the "#output" div on the page. The fixtures are configured to update this div when the user interacts with the page. """ text_list = self.q(css='#output').text if len(text_list) < 1:
return text_list[0] class ButtonPage(SitePage): """ Page for testing button interactions. """ name = "button" def click_button(self): """ Click the button on the page, which should cause the JavaScript to update the #output div. """ self.q(css='div#fixture input').first.click() class TextFieldPage(SitePage): """ Page for testing text field interactions. """ name = "text_field" def enter_text(self, text): """ Input `text` into the text field on the page. """ self.q(css='#fixture input').fill(text) class SelectPage(SitePage): """ Page for testing select input interactions. """ name = "select" def select_car(self, car_value): """ Select the car with ``car_value`` in the drop-down list. """ self.q(css=u'select[name="cars"] option[value="{}"]'.format(car_value)).first.click() def is_car_selected(self, car): """ Return ``True`` if the given ``car`` is selected, ``False`` otherwise. """ return self.q(css=u'select[name="cars"] option[value="{}"]'.format(car)).selected class CheckboxPage(SitePage): """ Page for testing checkbox interactions. """ name = "checkbox" def toggle_pill(self, pill_name): """ Toggle the box for the pill with `pill_name` (red or blue). """ self.q(css=u"#fixture input#{}".format(pill_name)).first.click() class AlertPage(SitePage): """ Page for testing alert handling. """ name = "alert" def confirm(self): """ Click the ``Confirm`` button and confirm the dialog. """ with self.handle_alert(confirm=True): self.q(css='button#confirm').first.click() def cancel(self): """ Click the ``Confirm`` button and cancel the dialog. """ with self.handle_alert(confirm=False): self.q(css='button#confirm').first.click() def dismiss(self): """ Click the ``Alert`` button and confirm the alert. """ with self.handle_alert(): self.q(css='button#alert').first.click() class SelectorPage(SitePage): """ Page for testing retrieval of information by CSS selectors. """ name = "selector" @property def num_divs(self): """ Count the number of div.test elements. """ return len(self.q(css='div.test').results) @property def div_text_list(self): """ Return list of text for each div.test element. """ return self.q(css='div.test').text @property def div_value_list(self): """ Return list of values for each div.test element. """ return self.q(css='div.test').attrs('value') @property def div_html_list(self): """ Return list of html for each div.test element. """ return self.q(css='div.test').html def ids_of_outer_divs_with_inner_text(self, child_text): """ Return a list of the ids of outer divs with the specified text in a child element. """ return self.q(css='div.outer').filter( lambda el: child_text in [inner.text for inner in el.find_elements_by_css_selector('div.inner')] ).attrs('id') class DelayPage(SitePage): """ Page for testing elements that appear after a delay. """ name = "delay" def trigger_output(self): """ Wait for click handlers to be installed, then click a button and retrieve the output that appears after a delay. """ EmptyPromise(self.q(css='div#ready').is_present, "Click ready").fulfill() self.q(css='div#fixture button').first.click() EmptyPromise(self.q(css='div#output').is_present, "Output available").fulfill() def make_broken_promise(self): """ Make a promise that will not be fulfilled. Should raise a `BrokenPromise` exception. """ return EmptyPromise( self.q(css='div#not_present').is_present, "Invalid div appeared", try_limit=3, try_interval=0.01 ).fulfill() class SlowPage(SitePage): """ Page that loads its elements slowly. """ name = "slow" def is_browser_on_page(self): return self.q(css='div#ready').is_present() class NextPage(SitePage): """ Page that loads another page after a delay. """ name = "next_page" def is_browser_on_page(self): return self.q(css='#next').is_present() def load_next(self, page, delay_sec): """ Load the page named `page_name` after waiting for `delay_sec`. """ time.sleep(delay_sec) page.visit() @js_defined('$') class FocusedPage(SitePage): """ Page that has a link to a focusable element. """ name = "focused" @wait_for_js def focus_on_main_content(self): """ Give focus to the element with the ``main-content`` ID. """ self.browser.execute_script("$('#main-content').focus()") class VisiblePage(SitePage): """ Page that has some elements visible and others invisible. """ name = "visible" def is_visible(self, name): """ Return a boolean indicating whether the given item is visible. """ return self.q(css="div.{}".format(name)).first.visible def is_invisible(self, name): """ Return a boolean indicating whether the given element is present, but not visible. """ return self.q(css="div.{}".format(name)).first.invisible @js_defined('test_var1', 'test_var2') class JavaScriptPage(SitePage): """ Page for testing asynchronous JavaScript. """ name = "javascript" @wait_for_js def trigger_output(self): """ Click a button which will only work once RequireJS finishes loading. """ self.q(css='div#fixture button').first.click() @wait_for_js def reload_and_trigger_output(self): """ Reload the page, wait for JS, then trigger the output. """ self.browser.refresh() self.wait_for_js() # pylint: disable=no-member self.q(css='div#fixture button').first.click() @js_defined('something.SomethingThatDoesntExist') class JavaScriptUndefinedPage(SitePage): """ Page for testing asynchronous JavaScript, where the javascript that we wait for is never defined. """ name = "javascript" @wait_for_js def trigger_output(self): """ Click a button which will only work once RequireJS finishes loading. """ self.q(css='div#fixture button').first.click() @requirejs('main') class RequireJSPage(SitePage): """ Page for testing asynchronous JavaScript loaded with RequireJS. """ name = "requirejs" @property @wait_for_js def output(self): """ Wait for scripts to finish and then return the contents of the ``#output`` div on the page. """ return super(RequireJSPage, self).output class AjaxNoJQueryPage(SitePage): """ Page for testing an ajax call. """ name = "ajax_no_jquery" class AjaxPage(SitePage): """ Page for testing an ajax call. """ name = "ajax" def click_button(self): """ Click the button on the page, which triggers an ajax call that updates the #output div. """ self.q(css='div#fixture button').first.click() class WaitsPage(SitePage): """ Page for testing wait helpers. """ name = "wait" def is_button_output_present(self): """ Click button and wait until output id appears in DOM. """ self.wait_for_element_presence('div#ready', 'Page is Ready') self.q(css='div#fixture button').first.click() self.wait_for_element_presence('div#output', 'Button Output is Available') def is_class_absent(self): """ Click button and wait until playing class disappeared from DOM """ self.q(css='#spinner').first.click() self.wait_for_element_absence('.playing', 'Animation Stopped') def is_button_output_visible(self): """ Click button and wait until output is displayed. """ self.wait_for_element_presence('div#ready', 'Page is Ready') self.q(css='div#fixture button').first.click() self.wait_for_element_visibility('div#output', 'Button Output is Visible') def is_spinner_invisible(self): """ Click button and wait until spinner is disappeared. """ self.q(css='#spinner').first.click() self.wait_for_element_invisibility('#anim', 'Button Output is Visible') class AccessibilityPage(SitePage): """ Page for testing accessibility auditing. """ name = "accessibility" class ImagePage(SitePage): """ Page for testing image capture and comparison. """ name = "image" class LongPage(SitePage): """ Page that requires scrolling to get to certain elements. """ name = "long_page"
return None
conditional_block
pages.py
""" Page objects for interacting with the test site. """ from __future__ import absolute_import import os import time from bok_choy.page_object import PageObject from bok_choy.promise import EmptyPromise from bok_choy.javascript import js_defined, requirejs, wait_for_js class SitePage(PageObject): """ Base class for all pages in the test site. """ # Get the server port from the environment # (set by the test runner script) SERVER_PORT = os.environ.get("SERVER_PORT", 8005) def is_browser_on_page(self): title = self.name.lower().replace('_', ' ') return title in self.browser.title.lower() @property def url(self): return "http://localhost:{0}/{1}".format(self.SERVER_PORT, self.name + ".html") @property def output(self): """ Return the contents of the "#output" div on the page. The fixtures are configured to update this div when the user interacts with the page. """ text_list = self.q(css='#output').text if len(text_list) < 1: return None return text_list[0] class ButtonPage(SitePage): """ Page for testing button interactions. """ name = "button" def click_button(self): """ Click the button on the page, which should cause the JavaScript to update the #output div. """ self.q(css='div#fixture input').first.click() class TextFieldPage(SitePage): """ Page for testing text field interactions. """ name = "text_field" def enter_text(self, text): """ Input `text` into the text field on the page. """ self.q(css='#fixture input').fill(text) class SelectPage(SitePage): """ Page for testing select input interactions. """ name = "select" def select_car(self, car_value): """ Select the car with ``car_value`` in the drop-down list. """ self.q(css=u'select[name="cars"] option[value="{}"]'.format(car_value)).first.click() def is_car_selected(self, car): """ Return ``True`` if the given ``car`` is selected, ``False`` otherwise. """ return self.q(css=u'select[name="cars"] option[value="{}"]'.format(car)).selected class CheckboxPage(SitePage): """ Page for testing checkbox interactions. """ name = "checkbox" def toggle_pill(self, pill_name): """ Toggle the box for the pill with `pill_name` (red or blue). """ self.q(css=u"#fixture input#{}".format(pill_name)).first.click() class AlertPage(SitePage): """ Page for testing alert handling. """ name = "alert" def confirm(self): """ Click the ``Confirm`` button and confirm the dialog. """ with self.handle_alert(confirm=True): self.q(css='button#confirm').first.click() def cancel(self): """ Click the ``Confirm`` button and cancel the dialog. """ with self.handle_alert(confirm=False): self.q(css='button#confirm').first.click() def dismiss(self): """ Click the ``Alert`` button and confirm the alert. """ with self.handle_alert(): self.q(css='button#alert').first.click() class SelectorPage(SitePage): """ Page for testing retrieval of information by CSS selectors. """ name = "selector" @property def num_divs(self): """ Count the number of div.test elements. """ return len(self.q(css='div.test').results) @property def div_text_list(self): """ Return list of text for each div.test element. """ return self.q(css='div.test').text @property def div_value_list(self): """ Return list of values for each div.test element. """ return self.q(css='div.test').attrs('value') @property def div_html_list(self): """ Return list of html for each div.test element. """ return self.q(css='div.test').html def ids_of_outer_divs_with_inner_text(self, child_text): """ Return a list of the ids of outer divs with the specified text in a child element. """ return self.q(css='div.outer').filter( lambda el: child_text in [inner.text for inner in el.find_elements_by_css_selector('div.inner')] ).attrs('id') class DelayPage(SitePage): """ Page for testing elements that appear after a delay. """ name = "delay" def trigger_output(self): """ Wait for click handlers to be installed, then click a button and retrieve the output that appears after a delay. """ EmptyPromise(self.q(css='div#ready').is_present, "Click ready").fulfill() self.q(css='div#fixture button').first.click() EmptyPromise(self.q(css='div#output').is_present, "Output available").fulfill() def make_broken_promise(self): """ Make a promise that will not be fulfilled. Should raise a `BrokenPromise` exception. """ return EmptyPromise( self.q(css='div#not_present').is_present, "Invalid div appeared", try_limit=3, try_interval=0.01 ).fulfill() class SlowPage(SitePage): """ Page that loads its elements slowly. """ name = "slow" def is_browser_on_page(self): return self.q(css='div#ready').is_present() class NextPage(SitePage): """ Page that loads another page after a delay. """ name = "next_page" def is_browser_on_page(self): return self.q(css='#next').is_present() def load_next(self, page, delay_sec): """ Load the page named `page_name` after waiting for `delay_sec`. """ time.sleep(delay_sec) page.visit() @js_defined('$') class FocusedPage(SitePage): """ Page that has a link to a focusable element. """ name = "focused" @wait_for_js def focus_on_main_content(self): """ Give focus to the element with the ``main-content`` ID. """ self.browser.execute_script("$('#main-content').focus()") class VisiblePage(SitePage): """ Page that has some elements visible and others invisible. """ name = "visible" def is_visible(self, name): """ Return a boolean indicating whether the given item is visible. """ return self.q(css="div.{}".format(name)).first.visible def is_invisible(self, name): """ Return a boolean indicating whether the given element is present, but not visible. """ return self.q(css="div.{}".format(name)).first.invisible @js_defined('test_var1', 'test_var2') class JavaScriptPage(SitePage): """ Page for testing asynchronous JavaScript. """ name = "javascript" @wait_for_js def trigger_output(self): """ Click a button which will only work once RequireJS finishes loading. """ self.q(css='div#fixture button').first.click() @wait_for_js def reload_and_trigger_output(self):
@js_defined('something.SomethingThatDoesntExist') class JavaScriptUndefinedPage(SitePage): """ Page for testing asynchronous JavaScript, where the javascript that we wait for is never defined. """ name = "javascript" @wait_for_js def trigger_output(self): """ Click a button which will only work once RequireJS finishes loading. """ self.q(css='div#fixture button').first.click() @requirejs('main') class RequireJSPage(SitePage): """ Page for testing asynchronous JavaScript loaded with RequireJS. """ name = "requirejs" @property @wait_for_js def output(self): """ Wait for scripts to finish and then return the contents of the ``#output`` div on the page. """ return super(RequireJSPage, self).output class AjaxNoJQueryPage(SitePage): """ Page for testing an ajax call. """ name = "ajax_no_jquery" class AjaxPage(SitePage): """ Page for testing an ajax call. """ name = "ajax" def click_button(self): """ Click the button on the page, which triggers an ajax call that updates the #output div. """ self.q(css='div#fixture button').first.click() class WaitsPage(SitePage): """ Page for testing wait helpers. """ name = "wait" def is_button_output_present(self): """ Click button and wait until output id appears in DOM. """ self.wait_for_element_presence('div#ready', 'Page is Ready') self.q(css='div#fixture button').first.click() self.wait_for_element_presence('div#output', 'Button Output is Available') def is_class_absent(self): """ Click button and wait until playing class disappeared from DOM """ self.q(css='#spinner').first.click() self.wait_for_element_absence('.playing', 'Animation Stopped') def is_button_output_visible(self): """ Click button and wait until output is displayed. """ self.wait_for_element_presence('div#ready', 'Page is Ready') self.q(css='div#fixture button').first.click() self.wait_for_element_visibility('div#output', 'Button Output is Visible') def is_spinner_invisible(self): """ Click button and wait until spinner is disappeared. """ self.q(css='#spinner').first.click() self.wait_for_element_invisibility('#anim', 'Button Output is Visible') class AccessibilityPage(SitePage): """ Page for testing accessibility auditing. """ name = "accessibility" class ImagePage(SitePage): """ Page for testing image capture and comparison. """ name = "image" class LongPage(SitePage): """ Page that requires scrolling to get to certain elements. """ name = "long_page"
""" Reload the page, wait for JS, then trigger the output. """ self.browser.refresh() self.wait_for_js() # pylint: disable=no-member self.q(css='div#fixture button').first.click()
identifier_body
Train_Trader.py
import numpy as np import sys sys.path.append("game/") import skimage from skimage import transform, color, exposure import keras from keras.models import Sequential, Model, load_model from keras.layers import Dense, Flatten, Activation, Input from keras.layers.convolutional import Convolution2D from keras.optimizers import RMSprop import keras.backend as K from keras.callbacks import LearningRateScheduler, History import tensorflow as tf import pygame #import wrapped_flappy_bird as game import scipy.misc import scipy.stats as st import threading import time import math import matplotlib.pyplot as plt import pandas as pd import GameEngine as game # TO DO # # Make image range from 0 to 1 instead from 0 to 255 # Visualize weights # try adding more layers # in random actions it should buy less often # Connected graph points for better clarity for cnn # # --------------------------------- LOG RELATED --------------------------------- myCount = 0 playLog = pd.DataFrame(columns=["reward", "profit", "action"]) logCnt = 0 score = 0 actionList = [] # --------------------------------- NEURAL NETWORK SETTINGS --------------------------------- GAMMA = 0.99 # Discount value BETA = 0.01 # Regularisation coefficient IMAGE_ROWS = 84 IMAGE_COLS = 84 IMAGE_CHANNELS = 4 LEARNING_RATE = 7e-4 EPISODE = 0 THREADS = 8 t_max = 5 const = 1e-5 T = 0 episode_r = [] episode_state = np.zeros((0, IMAGE_ROWS, IMAGE_COLS, IMAGE_CHANNELS)) episode_output = [] episode_critic = [] ACTIONS = 2 a_t = np.zeros(ACTIONS) lrate = 0 # --------------------------------- LOSS FUNCTION FOR POLICY OUTPUT --------------------------------- def logloss(y_true, y_pred): # Policy loss print("logloss:--------------------------------") return -K.sum( K.log(y_true*y_pred + (1-y_true)*(1-y_pred) + const), axis=-1) # BETA * K.sum(y_pred * K.log(y_pred + const) + (1-y_pred) * K.log(1-y_pred + const)) # regularisation term # --------------------------------- LOSS FUNCTION FOR CRITIC OUTPUT --------------------------------- def sumofsquares(y_true, y_pred): #critic loss print("sumofsquares:--------------------------------") return K.sum(K.square(y_pred - y_true), axis=-1) # --------------------------------- BUILD MODEL --------------------------------- def buildmodel(): print("Model building begins") model = Sequential() keras.initializers.RandomUniform(minval=-0.1, maxval=0.1, seed=None) S = Input(shape = (IMAGE_ROWS, IMAGE_COLS, IMAGE_CHANNELS, ), name = 'Input') h0 = Convolution2D(16, kernel_size = (8,8), strides = (4,4), activation = 'relu', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform')(S) h1 = Convolution2D(32, kernel_size = (4,4), strides = (2,2), activation = 'relu', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform')(h0) h2 = Flatten()(h1) h3 = Dense(256, activation = 'relu', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform') (h2) h4 = Dense(256, activation = 'relu', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform') (h3) h5 = Dense(512, activation='relu', kernel_initializer='random_uniform', bias_initializer='random_uniform')(h4) P = Dense(1, name = 'o_P', activation = 'sigmoid', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform') (h5) V = Dense(1, name = 'o_V', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform') (h5) model = Model(inputs = S, outputs = [P,V]) rms = RMSprop(lr = LEARNING_RATE, rho = 0.99, epsilon = 0.1) model.compile(loss = {'o_P': logloss, 'o_V': sumofsquares}, loss_weights = {'o_P': 1., 'o_V' : 0.5}, optimizer = rms) return model # --------------------------------- PREPROCESS IMAGE --------------------------------- def preprocess(image): image = image[np.newaxis, :] image = image[:, :, :, np.newaxis] return image # Initialize a new model using buildmodel() or use load_model to resume training an already trained model model = buildmodel() # Model = load_model("saved_models/model_updates3900", custom_objects={'logloss': logloss, 'sumofsquares': sumofsquares}) model._make_predict_function() graph = tf.get_default_graph() intermediate_layer_model = Model(inputs=model.input, outputs=model.get_layer('o_P').output) a_t[0] = 1 # Index 0 = no flap, 1 = flap # Output of network represents probability of flap # --------------------------------- INITIALIZE THREADS --------------------------------- game_state = [] for i in range(0,THREADS): #game_state.append(game.GameState(30000)) game_state.append(game.PlayGame()) # --------------------------------- RUN PROCESS --------------------------------- def runprocess(thread_id, s_t): global T global a_t global model global myCount global score global logCnt global playLog global actionList t = 0 t_start = t terminal = False r_t = 0 r_store = [] state_store = np.zeros((0, IMAGE_ROWS, IMAGE_COLS, IMAGE_CHANNELS)) output_store = [] critic_store = [] s_t = s_t.reshape(1, s_t.shape[0], s_t.shape[1], s_t.shape[2]) while t-t_start < t_max and terminal == False: t += 1 T += 1 intermediate_output = 0 with graph.as_default(): predictedChoice = model.predict(s_t)[0] intermediate_output = intermediate_layer_model.predict(s_t) randomChoice = np.random.rand() a_t = [0 , 1] if randomChoice < predictedChoice else [1 , 0] # stochastic action # a_t = [0,1] if 0.5 < y[0] else [1,0] # deterministic action # x_t (next frame), r_t (0.1 if alive, +1.5 if it passes the pipe, -1 if it dies) and the input is a_t (action) x_t, r_t, terminal = game_state[thread_id].nextStep(a_t) x_t = preprocess(x_t) # LOG GAME STEP if thread_id == 0: score = score + r_t print("score", score) action = game_state[0].getActionTaken() actionList.append(action) if terminal == True: print("------------------------------------------------------------------------------------") print("ENDSCORE:", score) profit = game_state[0].getProfit() meanAction = np.mean(actionList) playLog.loc[logCnt] = score, profit, meanAction logCnt += 1 playLog.to_csv("playLog.csv", index=True) score = 0 actionList = [] # SPITOUT IMAGE EVERY GAME STEP if 1==0: if thread_id == 0: mat = x_t mat = mat[0, :, :, 0] myCount += 1 # SAVE TO CSV #fileName = "C:/Users/Treebeard/PycharmProjects/A3C_Keras_FlappyBird/spitout/img_" + str(myCount) + ".csv" #np.savetxt(fileName, mat, fmt='%2.0f', delimiter=",") #PLOT plt.imshow(mat, cmap='hot') #plt.show() fileName2 = "C:/Users/Treebeard/PycharmProjects/A3C_Keras_FlappyBird/spitout/img_" + str(myCount) + ".png" plt.savefig(fileName2) if terminal == True: game_state[thread_id].startGame() with graph.as_default(): critic_reward = model.predict(s_t)[1] y = 0 if a_t[0] == 1 else 1 r_store = np.append(r_store, r_t) state_store = np.append(state_store, s_t, axis = 0) output_store = np.append(output_store, y) critic_store = np.append(critic_store, critic_reward) s_t = np.append(x_t, s_t[:, :, :, :3], axis=3) print("Frame = " + str(T) + ", Updates = " + str(EPISODE) + ", Thread = " + str(thread_id) + ", Output = "+ str(intermediate_output)) if terminal == False: r_store[len(r_store)-1] = critic_store[len(r_store)-1] else: r_store[len(r_store)-1] = -1 s_t = np.concatenate((x_t, x_t, x_t, x_t), axis=3) for i in range(2,len(r_store)+1): r_store[len(r_store)-i] = r_store[len(r_store)-i] + GAMMA*r_store[len(r_store)-i + 1] return s_t, state_store, output_store, r_store, critic_store # Function to decrease the learning rate after every epoch. In this manner, the learning rate reaches 0, by 20,000 epochs def step_decay(epoch): global lrate decay = 3.2e-8 lrate = LEARNING_RATE - epoch*decay lrate = max(lrate, 0) return lrate class actorthread(threading.Thread): def __init__(self,thread_id, s_t): threading.Thread.__init__(self) self.thread_id = thread_id self.next_state = s_t def run(self): global episode_output global episode_r global episode_critic global episode_state threadLock.acquire() self.next_state, state_store, output_store, r_store, critic_store = runprocess(self.thread_id, self.next_state) self.next_state = self.next_state.reshape(self.next_state.shape[1], self.next_state.shape[2], self.next_state.shape[3]) episode_r = np.append(episode_r, r_store) episode_output = np.append(episode_output, output_store) episode_state = np.append(episode_state, state_store, axis = 0) episode_critic = np.append(episode_critic, critic_store) threadLock.release() states = np.zeros((0, IMAGE_ROWS, IMAGE_COLS, 4)) # Initializing state of each thread for i in range(0, len(game_state)): image = game_state[i].getChartData() #image = game_state[i].getCurrentFrame() image = preprocess(image) state = np.concatenate((image, image, image, image), axis=3) states = np.append(states, state, axis = 0) cnt = 0 trainingLog = pd.DataFrame(columns=['update', 'reward_mean', 'loss', "lrate"]) while True: threadLock = threading.Lock() threads = [] for i in range(0,THREADS): threads.append(actorthread(i,states[i])) states = np.zeros((0, IMAGE_ROWS, IMAGE_COLS, 4)) for i in range(0,THREADS): threads[i].start() #thread.join() ensures that all threads fininsh execution before proceeding further for i in range(0,THREADS): threads[i].join() for i in range(0,THREADS): state = threads[i].next_state state = state.reshape(1, state.shape[0], state.shape[1], state.shape[2]) states = np.append(states, state, axis = 0) e_mean = np.mean(episode_r) #advantage calculation for each action taken advantage = episode_r - episode_critic print("backpropagating") lrate = LearningRateScheduler(step_decay) callbacks_list = [lrate] weights = {'o_P':advantage, 'o_V':np.ones(len(advantage))} #backpropagation history = model.fit(episode_state, [episode_output, episode_r], epochs = EPISODE + 1, batch_size = len(episode_output), callbacks = callbacks_list, sample_weight = weights, initial_epoch = EPISODE) episode_r = [] episode_output = [] episode_state = np.zeros((0, IMAGE_ROWS, IMAGE_COLS, IMAGE_CHANNELS)) episode_critic = [] # LOG SAVER trainingLog.loc[cnt] = EPISODE, e_mean, history.history['loss'],lrate cnt += 1 if cnt % 1 == 0: trainingLog.to_csv("trainingLog.csv", index=True) if EPISODE % 50 == 0:
EPISODE += 1
model.save("saved_models/model_updates" + str(EPISODE))
conditional_block
Train_Trader.py
import numpy as np import sys sys.path.append("game/") import skimage from skimage import transform, color, exposure import keras from keras.models import Sequential, Model, load_model from keras.layers import Dense, Flatten, Activation, Input from keras.layers.convolutional import Convolution2D from keras.optimizers import RMSprop import keras.backend as K from keras.callbacks import LearningRateScheduler, History import tensorflow as tf import pygame #import wrapped_flappy_bird as game import scipy.misc import scipy.stats as st import threading import time import math import matplotlib.pyplot as plt import pandas as pd import GameEngine as game # TO DO # # Make image range from 0 to 1 instead from 0 to 255 # Visualize weights # try adding more layers # in random actions it should buy less often # Connected graph points for better clarity for cnn # # --------------------------------- LOG RELATED --------------------------------- myCount = 0 playLog = pd.DataFrame(columns=["reward", "profit", "action"]) logCnt = 0 score = 0 actionList = [] # --------------------------------- NEURAL NETWORK SETTINGS --------------------------------- GAMMA = 0.99 # Discount value BETA = 0.01 # Regularisation coefficient IMAGE_ROWS = 84 IMAGE_COLS = 84 IMAGE_CHANNELS = 4 LEARNING_RATE = 7e-4 EPISODE = 0 THREADS = 8 t_max = 5 const = 1e-5 T = 0 episode_r = [] episode_state = np.zeros((0, IMAGE_ROWS, IMAGE_COLS, IMAGE_CHANNELS)) episode_output = [] episode_critic = [] ACTIONS = 2 a_t = np.zeros(ACTIONS) lrate = 0 # --------------------------------- LOSS FUNCTION FOR POLICY OUTPUT --------------------------------- def logloss(y_true, y_pred): # Policy loss print("logloss:--------------------------------") return -K.sum( K.log(y_true*y_pred + (1-y_true)*(1-y_pred) + const), axis=-1) # BETA * K.sum(y_pred * K.log(y_pred + const) + (1-y_pred) * K.log(1-y_pred + const)) # regularisation term # --------------------------------- LOSS FUNCTION FOR CRITIC OUTPUT --------------------------------- def sumofsquares(y_true, y_pred): #critic loss print("sumofsquares:--------------------------------") return K.sum(K.square(y_pred - y_true), axis=-1) # --------------------------------- BUILD MODEL --------------------------------- def buildmodel(): print("Model building begins") model = Sequential() keras.initializers.RandomUniform(minval=-0.1, maxval=0.1, seed=None) S = Input(shape = (IMAGE_ROWS, IMAGE_COLS, IMAGE_CHANNELS, ), name = 'Input') h0 = Convolution2D(16, kernel_size = (8,8), strides = (4,4), activation = 'relu', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform')(S) h1 = Convolution2D(32, kernel_size = (4,4), strides = (2,2), activation = 'relu', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform')(h0) h2 = Flatten()(h1) h3 = Dense(256, activation = 'relu', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform') (h2) h4 = Dense(256, activation = 'relu', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform') (h3) h5 = Dense(512, activation='relu', kernel_initializer='random_uniform', bias_initializer='random_uniform')(h4) P = Dense(1, name = 'o_P', activation = 'sigmoid', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform') (h5) V = Dense(1, name = 'o_V', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform') (h5) model = Model(inputs = S, outputs = [P,V]) rms = RMSprop(lr = LEARNING_RATE, rho = 0.99, epsilon = 0.1) model.compile(loss = {'o_P': logloss, 'o_V': sumofsquares}, loss_weights = {'o_P': 1., 'o_V' : 0.5}, optimizer = rms) return model # --------------------------------- PREPROCESS IMAGE --------------------------------- def preprocess(image): image = image[np.newaxis, :] image = image[:, :, :, np.newaxis] return image # Initialize a new model using buildmodel() or use load_model to resume training an already trained model model = buildmodel() # Model = load_model("saved_models/model_updates3900", custom_objects={'logloss': logloss, 'sumofsquares': sumofsquares}) model._make_predict_function() graph = tf.get_default_graph() intermediate_layer_model = Model(inputs=model.input, outputs=model.get_layer('o_P').output) a_t[0] = 1 # Index 0 = no flap, 1 = flap # Output of network represents probability of flap # --------------------------------- INITIALIZE THREADS --------------------------------- game_state = [] for i in range(0,THREADS): #game_state.append(game.GameState(30000)) game_state.append(game.PlayGame()) # --------------------------------- RUN PROCESS --------------------------------- def runprocess(thread_id, s_t): global T global a_t global model global myCount global score global logCnt global playLog global actionList t = 0 t_start = t terminal = False r_t = 0 r_store = [] state_store = np.zeros((0, IMAGE_ROWS, IMAGE_COLS, IMAGE_CHANNELS)) output_store = [] critic_store = [] s_t = s_t.reshape(1, s_t.shape[0], s_t.shape[1], s_t.shape[2]) while t-t_start < t_max and terminal == False: t += 1 T += 1 intermediate_output = 0 with graph.as_default(): predictedChoice = model.predict(s_t)[0] intermediate_output = intermediate_layer_model.predict(s_t) randomChoice = np.random.rand() a_t = [0 , 1] if randomChoice < predictedChoice else [1 , 0] # stochastic action # a_t = [0,1] if 0.5 < y[0] else [1,0] # deterministic action # x_t (next frame), r_t (0.1 if alive, +1.5 if it passes the pipe, -1 if it dies) and the input is a_t (action) x_t, r_t, terminal = game_state[thread_id].nextStep(a_t) x_t = preprocess(x_t) # LOG GAME STEP if thread_id == 0: score = score + r_t print("score", score) action = game_state[0].getActionTaken() actionList.append(action) if terminal == True: print("------------------------------------------------------------------------------------") print("ENDSCORE:", score) profit = game_state[0].getProfit() meanAction = np.mean(actionList) playLog.loc[logCnt] = score, profit, meanAction logCnt += 1 playLog.to_csv("playLog.csv", index=True) score = 0 actionList = [] # SPITOUT IMAGE EVERY GAME STEP
mat = mat[0, :, :, 0] myCount += 1 # SAVE TO CSV #fileName = "C:/Users/Treebeard/PycharmProjects/A3C_Keras_FlappyBird/spitout/img_" + str(myCount) + ".csv" #np.savetxt(fileName, mat, fmt='%2.0f', delimiter=",") #PLOT plt.imshow(mat, cmap='hot') #plt.show() fileName2 = "C:/Users/Treebeard/PycharmProjects/A3C_Keras_FlappyBird/spitout/img_" + str(myCount) + ".png" plt.savefig(fileName2) if terminal == True: game_state[thread_id].startGame() with graph.as_default(): critic_reward = model.predict(s_t)[1] y = 0 if a_t[0] == 1 else 1 r_store = np.append(r_store, r_t) state_store = np.append(state_store, s_t, axis = 0) output_store = np.append(output_store, y) critic_store = np.append(critic_store, critic_reward) s_t = np.append(x_t, s_t[:, :, :, :3], axis=3) print("Frame = " + str(T) + ", Updates = " + str(EPISODE) + ", Thread = " + str(thread_id) + ", Output = "+ str(intermediate_output)) if terminal == False: r_store[len(r_store)-1] = critic_store[len(r_store)-1] else: r_store[len(r_store)-1] = -1 s_t = np.concatenate((x_t, x_t, x_t, x_t), axis=3) for i in range(2,len(r_store)+1): r_store[len(r_store)-i] = r_store[len(r_store)-i] + GAMMA*r_store[len(r_store)-i + 1] return s_t, state_store, output_store, r_store, critic_store # Function to decrease the learning rate after every epoch. In this manner, the learning rate reaches 0, by 20,000 epochs def step_decay(epoch): global lrate decay = 3.2e-8 lrate = LEARNING_RATE - epoch*decay lrate = max(lrate, 0) return lrate class actorthread(threading.Thread): def __init__(self,thread_id, s_t): threading.Thread.__init__(self) self.thread_id = thread_id self.next_state = s_t def run(self): global episode_output global episode_r global episode_critic global episode_state threadLock.acquire() self.next_state, state_store, output_store, r_store, critic_store = runprocess(self.thread_id, self.next_state) self.next_state = self.next_state.reshape(self.next_state.shape[1], self.next_state.shape[2], self.next_state.shape[3]) episode_r = np.append(episode_r, r_store) episode_output = np.append(episode_output, output_store) episode_state = np.append(episode_state, state_store, axis = 0) episode_critic = np.append(episode_critic, critic_store) threadLock.release() states = np.zeros((0, IMAGE_ROWS, IMAGE_COLS, 4)) # Initializing state of each thread for i in range(0, len(game_state)): image = game_state[i].getChartData() #image = game_state[i].getCurrentFrame() image = preprocess(image) state = np.concatenate((image, image, image, image), axis=3) states = np.append(states, state, axis = 0) cnt = 0 trainingLog = pd.DataFrame(columns=['update', 'reward_mean', 'loss', "lrate"]) while True: threadLock = threading.Lock() threads = [] for i in range(0,THREADS): threads.append(actorthread(i,states[i])) states = np.zeros((0, IMAGE_ROWS, IMAGE_COLS, 4)) for i in range(0,THREADS): threads[i].start() #thread.join() ensures that all threads fininsh execution before proceeding further for i in range(0,THREADS): threads[i].join() for i in range(0,THREADS): state = threads[i].next_state state = state.reshape(1, state.shape[0], state.shape[1], state.shape[2]) states = np.append(states, state, axis = 0) e_mean = np.mean(episode_r) #advantage calculation for each action taken advantage = episode_r - episode_critic print("backpropagating") lrate = LearningRateScheduler(step_decay) callbacks_list = [lrate] weights = {'o_P':advantage, 'o_V':np.ones(len(advantage))} #backpropagation history = model.fit(episode_state, [episode_output, episode_r], epochs = EPISODE + 1, batch_size = len(episode_output), callbacks = callbacks_list, sample_weight = weights, initial_epoch = EPISODE) episode_r = [] episode_output = [] episode_state = np.zeros((0, IMAGE_ROWS, IMAGE_COLS, IMAGE_CHANNELS)) episode_critic = [] # LOG SAVER trainingLog.loc[cnt] = EPISODE, e_mean, history.history['loss'],lrate cnt += 1 if cnt % 1 == 0: trainingLog.to_csv("trainingLog.csv", index=True) if EPISODE % 50 == 0: model.save("saved_models/model_updates" + str(EPISODE)) EPISODE += 1
if 1==0: if thread_id == 0: mat = x_t
random_line_split
Train_Trader.py
import numpy as np import sys sys.path.append("game/") import skimage from skimage import transform, color, exposure import keras from keras.models import Sequential, Model, load_model from keras.layers import Dense, Flatten, Activation, Input from keras.layers.convolutional import Convolution2D from keras.optimizers import RMSprop import keras.backend as K from keras.callbacks import LearningRateScheduler, History import tensorflow as tf import pygame #import wrapped_flappy_bird as game import scipy.misc import scipy.stats as st import threading import time import math import matplotlib.pyplot as plt import pandas as pd import GameEngine as game # TO DO # # Make image range from 0 to 1 instead from 0 to 255 # Visualize weights # try adding more layers # in random actions it should buy less often # Connected graph points for better clarity for cnn # # --------------------------------- LOG RELATED --------------------------------- myCount = 0 playLog = pd.DataFrame(columns=["reward", "profit", "action"]) logCnt = 0 score = 0 actionList = [] # --------------------------------- NEURAL NETWORK SETTINGS --------------------------------- GAMMA = 0.99 # Discount value BETA = 0.01 # Regularisation coefficient IMAGE_ROWS = 84 IMAGE_COLS = 84 IMAGE_CHANNELS = 4 LEARNING_RATE = 7e-4 EPISODE = 0 THREADS = 8 t_max = 5 const = 1e-5 T = 0 episode_r = [] episode_state = np.zeros((0, IMAGE_ROWS, IMAGE_COLS, IMAGE_CHANNELS)) episode_output = [] episode_critic = [] ACTIONS = 2 a_t = np.zeros(ACTIONS) lrate = 0 # --------------------------------- LOSS FUNCTION FOR POLICY OUTPUT --------------------------------- def logloss(y_true, y_pred): # Policy loss print("logloss:--------------------------------") return -K.sum( K.log(y_true*y_pred + (1-y_true)*(1-y_pred) + const), axis=-1) # BETA * K.sum(y_pred * K.log(y_pred + const) + (1-y_pred) * K.log(1-y_pred + const)) # regularisation term # --------------------------------- LOSS FUNCTION FOR CRITIC OUTPUT --------------------------------- def sumofsquares(y_true, y_pred): #critic loss print("sumofsquares:--------------------------------") return K.sum(K.square(y_pred - y_true), axis=-1) # --------------------------------- BUILD MODEL --------------------------------- def buildmodel(): print("Model building begins") model = Sequential() keras.initializers.RandomUniform(minval=-0.1, maxval=0.1, seed=None) S = Input(shape = (IMAGE_ROWS, IMAGE_COLS, IMAGE_CHANNELS, ), name = 'Input') h0 = Convolution2D(16, kernel_size = (8,8), strides = (4,4), activation = 'relu', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform')(S) h1 = Convolution2D(32, kernel_size = (4,4), strides = (2,2), activation = 'relu', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform')(h0) h2 = Flatten()(h1) h3 = Dense(256, activation = 'relu', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform') (h2) h4 = Dense(256, activation = 'relu', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform') (h3) h5 = Dense(512, activation='relu', kernel_initializer='random_uniform', bias_initializer='random_uniform')(h4) P = Dense(1, name = 'o_P', activation = 'sigmoid', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform') (h5) V = Dense(1, name = 'o_V', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform') (h5) model = Model(inputs = S, outputs = [P,V]) rms = RMSprop(lr = LEARNING_RATE, rho = 0.99, epsilon = 0.1) model.compile(loss = {'o_P': logloss, 'o_V': sumofsquares}, loss_weights = {'o_P': 1., 'o_V' : 0.5}, optimizer = rms) return model # --------------------------------- PREPROCESS IMAGE --------------------------------- def preprocess(image): image = image[np.newaxis, :] image = image[:, :, :, np.newaxis] return image # Initialize a new model using buildmodel() or use load_model to resume training an already trained model model = buildmodel() # Model = load_model("saved_models/model_updates3900", custom_objects={'logloss': logloss, 'sumofsquares': sumofsquares}) model._make_predict_function() graph = tf.get_default_graph() intermediate_layer_model = Model(inputs=model.input, outputs=model.get_layer('o_P').output) a_t[0] = 1 # Index 0 = no flap, 1 = flap # Output of network represents probability of flap # --------------------------------- INITIALIZE THREADS --------------------------------- game_state = [] for i in range(0,THREADS): #game_state.append(game.GameState(30000)) game_state.append(game.PlayGame()) # --------------------------------- RUN PROCESS --------------------------------- def runprocess(thread_id, s_t): global T global a_t global model global myCount global score global logCnt global playLog global actionList t = 0 t_start = t terminal = False r_t = 0 r_store = [] state_store = np.zeros((0, IMAGE_ROWS, IMAGE_COLS, IMAGE_CHANNELS)) output_store = [] critic_store = [] s_t = s_t.reshape(1, s_t.shape[0], s_t.shape[1], s_t.shape[2]) while t-t_start < t_max and terminal == False: t += 1 T += 1 intermediate_output = 0 with graph.as_default(): predictedChoice = model.predict(s_t)[0] intermediate_output = intermediate_layer_model.predict(s_t) randomChoice = np.random.rand() a_t = [0 , 1] if randomChoice < predictedChoice else [1 , 0] # stochastic action # a_t = [0,1] if 0.5 < y[0] else [1,0] # deterministic action # x_t (next frame), r_t (0.1 if alive, +1.5 if it passes the pipe, -1 if it dies) and the input is a_t (action) x_t, r_t, terminal = game_state[thread_id].nextStep(a_t) x_t = preprocess(x_t) # LOG GAME STEP if thread_id == 0: score = score + r_t print("score", score) action = game_state[0].getActionTaken() actionList.append(action) if terminal == True: print("------------------------------------------------------------------------------------") print("ENDSCORE:", score) profit = game_state[0].getProfit() meanAction = np.mean(actionList) playLog.loc[logCnt] = score, profit, meanAction logCnt += 1 playLog.to_csv("playLog.csv", index=True) score = 0 actionList = [] # SPITOUT IMAGE EVERY GAME STEP if 1==0: if thread_id == 0: mat = x_t mat = mat[0, :, :, 0] myCount += 1 # SAVE TO CSV #fileName = "C:/Users/Treebeard/PycharmProjects/A3C_Keras_FlappyBird/spitout/img_" + str(myCount) + ".csv" #np.savetxt(fileName, mat, fmt='%2.0f', delimiter=",") #PLOT plt.imshow(mat, cmap='hot') #plt.show() fileName2 = "C:/Users/Treebeard/PycharmProjects/A3C_Keras_FlappyBird/spitout/img_" + str(myCount) + ".png" plt.savefig(fileName2) if terminal == True: game_state[thread_id].startGame() with graph.as_default(): critic_reward = model.predict(s_t)[1] y = 0 if a_t[0] == 1 else 1 r_store = np.append(r_store, r_t) state_store = np.append(state_store, s_t, axis = 0) output_store = np.append(output_store, y) critic_store = np.append(critic_store, critic_reward) s_t = np.append(x_t, s_t[:, :, :, :3], axis=3) print("Frame = " + str(T) + ", Updates = " + str(EPISODE) + ", Thread = " + str(thread_id) + ", Output = "+ str(intermediate_output)) if terminal == False: r_store[len(r_store)-1] = critic_store[len(r_store)-1] else: r_store[len(r_store)-1] = -1 s_t = np.concatenate((x_t, x_t, x_t, x_t), axis=3) for i in range(2,len(r_store)+1): r_store[len(r_store)-i] = r_store[len(r_store)-i] + GAMMA*r_store[len(r_store)-i + 1] return s_t, state_store, output_store, r_store, critic_store # Function to decrease the learning rate after every epoch. In this manner, the learning rate reaches 0, by 20,000 epochs def step_decay(epoch): global lrate decay = 3.2e-8 lrate = LEARNING_RATE - epoch*decay lrate = max(lrate, 0) return lrate class actorthread(threading.Thread): def __init__(self,thread_id, s_t):
def run(self): global episode_output global episode_r global episode_critic global episode_state threadLock.acquire() self.next_state, state_store, output_store, r_store, critic_store = runprocess(self.thread_id, self.next_state) self.next_state = self.next_state.reshape(self.next_state.shape[1], self.next_state.shape[2], self.next_state.shape[3]) episode_r = np.append(episode_r, r_store) episode_output = np.append(episode_output, output_store) episode_state = np.append(episode_state, state_store, axis = 0) episode_critic = np.append(episode_critic, critic_store) threadLock.release() states = np.zeros((0, IMAGE_ROWS, IMAGE_COLS, 4)) # Initializing state of each thread for i in range(0, len(game_state)): image = game_state[i].getChartData() #image = game_state[i].getCurrentFrame() image = preprocess(image) state = np.concatenate((image, image, image, image), axis=3) states = np.append(states, state, axis = 0) cnt = 0 trainingLog = pd.DataFrame(columns=['update', 'reward_mean', 'loss', "lrate"]) while True: threadLock = threading.Lock() threads = [] for i in range(0,THREADS): threads.append(actorthread(i,states[i])) states = np.zeros((0, IMAGE_ROWS, IMAGE_COLS, 4)) for i in range(0,THREADS): threads[i].start() #thread.join() ensures that all threads fininsh execution before proceeding further for i in range(0,THREADS): threads[i].join() for i in range(0,THREADS): state = threads[i].next_state state = state.reshape(1, state.shape[0], state.shape[1], state.shape[2]) states = np.append(states, state, axis = 0) e_mean = np.mean(episode_r) #advantage calculation for each action taken advantage = episode_r - episode_critic print("backpropagating") lrate = LearningRateScheduler(step_decay) callbacks_list = [lrate] weights = {'o_P':advantage, 'o_V':np.ones(len(advantage))} #backpropagation history = model.fit(episode_state, [episode_output, episode_r], epochs = EPISODE + 1, batch_size = len(episode_output), callbacks = callbacks_list, sample_weight = weights, initial_epoch = EPISODE) episode_r = [] episode_output = [] episode_state = np.zeros((0, IMAGE_ROWS, IMAGE_COLS, IMAGE_CHANNELS)) episode_critic = [] # LOG SAVER trainingLog.loc[cnt] = EPISODE, e_mean, history.history['loss'],lrate cnt += 1 if cnt % 1 == 0: trainingLog.to_csv("trainingLog.csv", index=True) if EPISODE % 50 == 0: model.save("saved_models/model_updates" + str(EPISODE)) EPISODE += 1
threading.Thread.__init__(self) self.thread_id = thread_id self.next_state = s_t
identifier_body
Train_Trader.py
import numpy as np import sys sys.path.append("game/") import skimage from skimage import transform, color, exposure import keras from keras.models import Sequential, Model, load_model from keras.layers import Dense, Flatten, Activation, Input from keras.layers.convolutional import Convolution2D from keras.optimizers import RMSprop import keras.backend as K from keras.callbacks import LearningRateScheduler, History import tensorflow as tf import pygame #import wrapped_flappy_bird as game import scipy.misc import scipy.stats as st import threading import time import math import matplotlib.pyplot as plt import pandas as pd import GameEngine as game # TO DO # # Make image range from 0 to 1 instead from 0 to 255 # Visualize weights # try adding more layers # in random actions it should buy less often # Connected graph points for better clarity for cnn # # --------------------------------- LOG RELATED --------------------------------- myCount = 0 playLog = pd.DataFrame(columns=["reward", "profit", "action"]) logCnt = 0 score = 0 actionList = [] # --------------------------------- NEURAL NETWORK SETTINGS --------------------------------- GAMMA = 0.99 # Discount value BETA = 0.01 # Regularisation coefficient IMAGE_ROWS = 84 IMAGE_COLS = 84 IMAGE_CHANNELS = 4 LEARNING_RATE = 7e-4 EPISODE = 0 THREADS = 8 t_max = 5 const = 1e-5 T = 0 episode_r = [] episode_state = np.zeros((0, IMAGE_ROWS, IMAGE_COLS, IMAGE_CHANNELS)) episode_output = [] episode_critic = [] ACTIONS = 2 a_t = np.zeros(ACTIONS) lrate = 0 # --------------------------------- LOSS FUNCTION FOR POLICY OUTPUT --------------------------------- def logloss(y_true, y_pred): # Policy loss print("logloss:--------------------------------") return -K.sum( K.log(y_true*y_pred + (1-y_true)*(1-y_pred) + const), axis=-1) # BETA * K.sum(y_pred * K.log(y_pred + const) + (1-y_pred) * K.log(1-y_pred + const)) # regularisation term # --------------------------------- LOSS FUNCTION FOR CRITIC OUTPUT --------------------------------- def sumofsquares(y_true, y_pred): #critic loss print("sumofsquares:--------------------------------") return K.sum(K.square(y_pred - y_true), axis=-1) # --------------------------------- BUILD MODEL --------------------------------- def buildmodel(): print("Model building begins") model = Sequential() keras.initializers.RandomUniform(minval=-0.1, maxval=0.1, seed=None) S = Input(shape = (IMAGE_ROWS, IMAGE_COLS, IMAGE_CHANNELS, ), name = 'Input') h0 = Convolution2D(16, kernel_size = (8,8), strides = (4,4), activation = 'relu', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform')(S) h1 = Convolution2D(32, kernel_size = (4,4), strides = (2,2), activation = 'relu', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform')(h0) h2 = Flatten()(h1) h3 = Dense(256, activation = 'relu', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform') (h2) h4 = Dense(256, activation = 'relu', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform') (h3) h5 = Dense(512, activation='relu', kernel_initializer='random_uniform', bias_initializer='random_uniform')(h4) P = Dense(1, name = 'o_P', activation = 'sigmoid', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform') (h5) V = Dense(1, name = 'o_V', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform') (h5) model = Model(inputs = S, outputs = [P,V]) rms = RMSprop(lr = LEARNING_RATE, rho = 0.99, epsilon = 0.1) model.compile(loss = {'o_P': logloss, 'o_V': sumofsquares}, loss_weights = {'o_P': 1., 'o_V' : 0.5}, optimizer = rms) return model # --------------------------------- PREPROCESS IMAGE --------------------------------- def preprocess(image): image = image[np.newaxis, :] image = image[:, :, :, np.newaxis] return image # Initialize a new model using buildmodel() or use load_model to resume training an already trained model model = buildmodel() # Model = load_model("saved_models/model_updates3900", custom_objects={'logloss': logloss, 'sumofsquares': sumofsquares}) model._make_predict_function() graph = tf.get_default_graph() intermediate_layer_model = Model(inputs=model.input, outputs=model.get_layer('o_P').output) a_t[0] = 1 # Index 0 = no flap, 1 = flap # Output of network represents probability of flap # --------------------------------- INITIALIZE THREADS --------------------------------- game_state = [] for i in range(0,THREADS): #game_state.append(game.GameState(30000)) game_state.append(game.PlayGame()) # --------------------------------- RUN PROCESS --------------------------------- def runprocess(thread_id, s_t): global T global a_t global model global myCount global score global logCnt global playLog global actionList t = 0 t_start = t terminal = False r_t = 0 r_store = [] state_store = np.zeros((0, IMAGE_ROWS, IMAGE_COLS, IMAGE_CHANNELS)) output_store = [] critic_store = [] s_t = s_t.reshape(1, s_t.shape[0], s_t.shape[1], s_t.shape[2]) while t-t_start < t_max and terminal == False: t += 1 T += 1 intermediate_output = 0 with graph.as_default(): predictedChoice = model.predict(s_t)[0] intermediate_output = intermediate_layer_model.predict(s_t) randomChoice = np.random.rand() a_t = [0 , 1] if randomChoice < predictedChoice else [1 , 0] # stochastic action # a_t = [0,1] if 0.5 < y[0] else [1,0] # deterministic action # x_t (next frame), r_t (0.1 if alive, +1.5 if it passes the pipe, -1 if it dies) and the input is a_t (action) x_t, r_t, terminal = game_state[thread_id].nextStep(a_t) x_t = preprocess(x_t) # LOG GAME STEP if thread_id == 0: score = score + r_t print("score", score) action = game_state[0].getActionTaken() actionList.append(action) if terminal == True: print("------------------------------------------------------------------------------------") print("ENDSCORE:", score) profit = game_state[0].getProfit() meanAction = np.mean(actionList) playLog.loc[logCnt] = score, profit, meanAction logCnt += 1 playLog.to_csv("playLog.csv", index=True) score = 0 actionList = [] # SPITOUT IMAGE EVERY GAME STEP if 1==0: if thread_id == 0: mat = x_t mat = mat[0, :, :, 0] myCount += 1 # SAVE TO CSV #fileName = "C:/Users/Treebeard/PycharmProjects/A3C_Keras_FlappyBird/spitout/img_" + str(myCount) + ".csv" #np.savetxt(fileName, mat, fmt='%2.0f', delimiter=",") #PLOT plt.imshow(mat, cmap='hot') #plt.show() fileName2 = "C:/Users/Treebeard/PycharmProjects/A3C_Keras_FlappyBird/spitout/img_" + str(myCount) + ".png" plt.savefig(fileName2) if terminal == True: game_state[thread_id].startGame() with graph.as_default(): critic_reward = model.predict(s_t)[1] y = 0 if a_t[0] == 1 else 1 r_store = np.append(r_store, r_t) state_store = np.append(state_store, s_t, axis = 0) output_store = np.append(output_store, y) critic_store = np.append(critic_store, critic_reward) s_t = np.append(x_t, s_t[:, :, :, :3], axis=3) print("Frame = " + str(T) + ", Updates = " + str(EPISODE) + ", Thread = " + str(thread_id) + ", Output = "+ str(intermediate_output)) if terminal == False: r_store[len(r_store)-1] = critic_store[len(r_store)-1] else: r_store[len(r_store)-1] = -1 s_t = np.concatenate((x_t, x_t, x_t, x_t), axis=3) for i in range(2,len(r_store)+1): r_store[len(r_store)-i] = r_store[len(r_store)-i] + GAMMA*r_store[len(r_store)-i + 1] return s_t, state_store, output_store, r_store, critic_store # Function to decrease the learning rate after every epoch. In this manner, the learning rate reaches 0, by 20,000 epochs def step_decay(epoch): global lrate decay = 3.2e-8 lrate = LEARNING_RATE - epoch*decay lrate = max(lrate, 0) return lrate class actorthread(threading.Thread): def
(self,thread_id, s_t): threading.Thread.__init__(self) self.thread_id = thread_id self.next_state = s_t def run(self): global episode_output global episode_r global episode_critic global episode_state threadLock.acquire() self.next_state, state_store, output_store, r_store, critic_store = runprocess(self.thread_id, self.next_state) self.next_state = self.next_state.reshape(self.next_state.shape[1], self.next_state.shape[2], self.next_state.shape[3]) episode_r = np.append(episode_r, r_store) episode_output = np.append(episode_output, output_store) episode_state = np.append(episode_state, state_store, axis = 0) episode_critic = np.append(episode_critic, critic_store) threadLock.release() states = np.zeros((0, IMAGE_ROWS, IMAGE_COLS, 4)) # Initializing state of each thread for i in range(0, len(game_state)): image = game_state[i].getChartData() #image = game_state[i].getCurrentFrame() image = preprocess(image) state = np.concatenate((image, image, image, image), axis=3) states = np.append(states, state, axis = 0) cnt = 0 trainingLog = pd.DataFrame(columns=['update', 'reward_mean', 'loss', "lrate"]) while True: threadLock = threading.Lock() threads = [] for i in range(0,THREADS): threads.append(actorthread(i,states[i])) states = np.zeros((0, IMAGE_ROWS, IMAGE_COLS, 4)) for i in range(0,THREADS): threads[i].start() #thread.join() ensures that all threads fininsh execution before proceeding further for i in range(0,THREADS): threads[i].join() for i in range(0,THREADS): state = threads[i].next_state state = state.reshape(1, state.shape[0], state.shape[1], state.shape[2]) states = np.append(states, state, axis = 0) e_mean = np.mean(episode_r) #advantage calculation for each action taken advantage = episode_r - episode_critic print("backpropagating") lrate = LearningRateScheduler(step_decay) callbacks_list = [lrate] weights = {'o_P':advantage, 'o_V':np.ones(len(advantage))} #backpropagation history = model.fit(episode_state, [episode_output, episode_r], epochs = EPISODE + 1, batch_size = len(episode_output), callbacks = callbacks_list, sample_weight = weights, initial_epoch = EPISODE) episode_r = [] episode_output = [] episode_state = np.zeros((0, IMAGE_ROWS, IMAGE_COLS, IMAGE_CHANNELS)) episode_critic = [] # LOG SAVER trainingLog.loc[cnt] = EPISODE, e_mean, history.history['loss'],lrate cnt += 1 if cnt % 1 == 0: trainingLog.to_csv("trainingLog.csv", index=True) if EPISODE % 50 == 0: model.save("saved_models/model_updates" + str(EPISODE)) EPISODE += 1
__init__
identifier_name
code.py
# -*- coding: utf-8 -*- """ # Task 1 """ # Commented out IPython magic to ensure Python compatibility. #import all libraries we will use for task 1 # %pylab inline --no-import-all import numpy as np import matplotlib.pyplot as plt from keras.models import load_model from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelBinarizer from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import cross_val_score from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Convolution2D, MaxPooling2D from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten from tensorflow.keras.optimizers import Adam from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint from sklearn.metrics import accuracy_score #import the dataset with np.load('training-dataset.npz') as data: img = data['x'] lbl = data['y'] del data print(img.shape) print(type(img)) print(lbl.shape) #split datasets to training, validation and testing sets X_train, X_val_test, y_train, y_val_test = train_test_split(img, lbl, test_size = 0.3, random_state = 666) X_val, X_test, y_val, y_test = train_test_split(X_val_test, y_val_test, test_size = 0.5, random_state = 666) print(y_train.shape) print(y_val.shape) print(y_test.shape) #normalization, image pixel is between 0-255, according to the formula, we divide by 255 and then reshape X_train = X_train/255 X_val = X_val/255 X_test = X_test/255 x_train = X_train.reshape((-1, 28, 28, 1)) x_val = X_val.reshape((-1, 28, 28, 1)) x_test = X_test.reshape((-1, 28, 28, 1)) print(x_train.shape) #dummy coding labels onehot = LabelBinarizer() d_y_train = onehot.fit_transform(y_train) d_y_val = onehot.transform(y_val) d_y_test = onehot.transform(y_test) print(d_y_train.shape) """## Model 1: KNN """ #tune hyperparameter K using 3-fold cross validation k_range = range(3, 8) cv_scores = [] for k in k_range: knn = KNeighborsClassifier(n_neighbors = k, n_jobs = -1) scores = cross_val_score(knn, X_train, d_y_train, cv = 3) #3 folds cv_score = np.mean(scores) print('k = {}, accuracy on validation set = {:.3f}'.format(k, cv_score)) cv_scores.append((k, cv_score)) #selecting k that gave the best accuracy on validation set best_k = max(cv_scores, key = lambda x:x[-1]) print('best_k: {} with validation accuracy of {}'.format(best_k[0], best_k)) #using best parameter k to train KNN on training data knn_model = KNeighborsClassifier(n_neighbors = best_k[0], n_jobs = -1) knn_model.fit(X_train, d_y_train) #evaluate fitted KNN on test data knn_y_pred = knn_model.predict(X_test) test_accuracy = accuracy_score(d_y_test, knn_y_pred) print(test_accuracy) """## Model 2: CNN """ #tune hyperparameters, here we tune batch size, dropout rate and learning rate settings = [] for batch in [64, 100, 128]: for drop in [0, 0.5, 0.8]: for lr in [0.1, 0.01, 0.001]: print("batch :", batch) print("drop:", drop) print('learning rate:', lr) model = Sequential() #convolution 1 model.add(Convolution2D(input_shape=(28,28,1), filters=32, kernel_size=5, strides=1, activation='relu')) #pooling 1 model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same')) #convolution 2 model.add(Convolution2D(filters=64, kernel_size=5, strides=1, activation='relu' )) #pooling 2 model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same')) #convolution 3 model.add(Convolution2D(filters=128, kernel_size=5, strides=1, activation='relu' )) #pooling 3 model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same')) #convolution 4 model.add(Convolution2D(filters=256, kernel_size=5, strides=1, activation='relu' )) #pooling 4 model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same')) #Flatten, transfer to vectors model.add(Flatten()) #Dropout model.add(Dropout(drop)) #fully connected network 1 model.add(Dense(500, activation='relu')) #fully connected network 2, 26 because 26 different letters in total model.add(Dense(26, activation='softmax')) #earlystopping to prevent overfitting early_stopping = EarlyStopping(monitor = 'val_loss', patience = 3, mode = 'min') #reducing learning rate reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor = 0.1, patience = 1, verbose = 1, mode = 'min', min_delta =0.0001, cooldown=0, min_lr=0) callback_lists = [early_stopping, reduce_lr] adam = Adam(lr = lr) model.compile(optimizer=adam,loss="categorical_crossentropy",metrics=['accuracy']) model.fit(x_train, d_y_train, batch_size = batch, epochs = 5, verbose = 1, validation_data = (x_val, d_y_val), shuffle = True, callbacks = callback_lists) loss, acc = model.evaluate(x_val, d_y_val) settings.append((batch, drop, lr, acc)) #print best accuracy best_accuracy = max(settings, key = lambda x:x[-1]) print(best_accuracy) #lr = 0.001 best_batch, best_drop, best_lr = best_accuracy[:-1] print(best_batch, best_drop, best_lr) #using tuned parameters to train model model = Sequential() #convolution 1, activation model.add(Convolution2D(input_shape=(28,28,1), filters=32, kernel_size=5, strides=1, padding='same', activation='relu')) #pooling model.add(MaxPooling2D(pool_size=(2,2), strides=1,padding='same')) #convolution 2, activation model.add(Convolution2D(filters=64, kernel_size=5, strides=1, padding='same', activation='relu' )) #pooling model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same')) #convolution 3, activation model.add(Convolution2D(filters=128, kernel_size=5, strides=1, padding='same', activation='relu' )) #pooling model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same')) #convolution 4, activation model.add(Convolution2D(filters=256, kernel_size=5, strides=1, padding='same', activation='relu' )) #pooling model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same')) #Flatten, transfer to vectors model.add(Flatten()) #Dropout model.add(Dropout(best_drop)) #fully connected network 1 model.add(Dense(500,activation='relu')) #fully connected network 2 model.add(Dense(26, activation='softmax')) #early stopping, to prevent overfitting early_stopping = EarlyStopping(monitor = 'val_loss', patience = 3, mode = 'min') #reducing learning rate reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor = 0.1, patience = 1, verbose = 1, mode = 'min', min_delta =0.0001, cooldown=0, min_lr=0) callback_lists = [early_stopping, reduce_lr] #optimizer adam = Adam(lr = best_lr) model.compile(optimizer=adam,loss="categorical_crossentropy",metrics=['accuracy']) #training history = model.fit(x_train,d_y_train, batch_size = best_batch, epochs = 12, validation_data = (x_val, d_y_val), verbose = 1, shuffle = True, callbacks = callback_lists) #plotting loss and accuracy of training and validation sets #accuracy plt.plot(history.history['accuracy']) plt.plot(history.history['val_accuracy']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'val'], loc='upper left') plt.show() #loss plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'val'], loc='upper left') plt.show() #save our model model.save('my_model1.h5') #make predictions of testing sets and see how accurate those predictions are loss,acc = model.evaluate(x_test, d_y_test) print(loss,acc) """We have got around 85% accuray on testing set with KNN model and 95% accuracy on testing set with CNN model. So we decided to use CNN for our task 2. # Task 2 """ # Commented out IPython magic to ensure Python compatibility. # import libraries used for task 2 # %pylab inline --no-import-all import numpy as np import tensorflow as tf import matplotlib.pyplot as plt from keras.preprocessing.image import ImageDataGenerator import cv2 from keras.utils import plot_model from skimage.util import random_noise from skimage.filters import threshold_local from skimage.morphology import remove_small_objects from skimage.measure import label, regionprops from skimage.color import label2rgb from google.colab import drive # load testing-dataset test = np.load('test-dataset.npy') print(test.shape) # see what images are like before denoise plt.imshow(test[-1]) plt.show() # denoise all images and see what they are like now from scipy import ndimage import matplotlib.pyplot as plt testing_filtered = [] for i in range(len(test)): new_image = ndimage.median_filter(test[i], 2) testing_filtered.append(ndimage.median_filter(new_image, 3)) plt.imshow(testing_filtered[-1]) plt.show() #define a function to split the images def
(data): testing_cropped = [] for i in range(len(data)): #threshold each image and find contours img = (data[i]).astype('uint8') _, threshold = cv2.threshold(img.copy(), 10, 255, 0) contours, _ = cv2.findContours(threshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2:] bboxes = [] #creating bounding boxes from contours for f in range(len(contours)): bboxes.append(cv2.boundingRect(contours[f])) split = [] to_remove = [] #threshold to remove small w and h bbox, and split those with w >= 28 for j in range(len(bboxes)): if (bboxes[j][2] < 20) and (bboxes[j][3] < 17): to_remove.append(bboxes[j]) if bboxes[j][2] >= 30: split.append(j) #modifying bboxes to get half w and move x to (x + w/2) for g in split: bboxes[g] = (bboxes[g][0], bboxes[g][1], int(bboxes[g][2]/2), bboxes[g][3]) modified_bboxes = bboxes[g] modified_bboxes = (int(bboxes[g][0]) + int(bboxes[g][2]), int(bboxes[g][1]), int(bboxes[g][2]), int(bboxes[g][3])) bboxes.append(modified_bboxes) #removing bboxes with small w and h for b in to_remove: bboxes.remove(b) #sorting bboxes bboxes = sorted(np.array(bboxes), key = lambda x: x[0]) cut = [] for h in range(len(bboxes)): images = img[bboxes[h][1]:bboxes[h][1]+bboxes[h][3], bboxes[h][0]:bboxes[h][0]+bboxes[h][2]] if images[0].shape > np.max(3): cut.append(images) cropped = [] #reshaping the cut images to be able to use CNN for image_split in cut: crop = image_split.reshape((image_split.shape[0],image_split.shape[1],1)) crop = np.array(tf.image.resize_with_crop_or_pad(crop, 28, 28)) img_cropped = crop.reshape(28,28) cropped.append(img_cropped) testing_cropped.append(cropped) return np.array(testing_cropped) testing_cropped = image_crop(testing_filtered) print(len(testing_cropped)) #10000 images # let's see an example letter from testing_cropped dataset plt.imshow(testing_cropped[420][0]) plt.show() #most of images are separated into 4 letters, but still many are into 3 or 5 letters l=[] for i in range(len(testing_cropped)): l.append(len(testing_cropped[i])) plt.hist(l) plt.show() #make 5 predictions which have highest probability scores by using our CNN model block_size = 55 predictions = [] top1 = [] top2 = [] top3 = [] top4 = [] top5 = [] final = [] for i in range(10000): crops_number = (len(testing_cropped[i])) for sample in testing_cropped[i]: imbw = sample > threshold_local(sample, block_size, method = 'mean') imbw1 = remove_small_objects(imbw, 10, connectivity=1) roi = imbw1 roi = roi.reshape((roi.shape[0],roi.shape[1],1)) roi = tf.image.resize_with_crop_or_pad(roi, 28, 28).numpy() image = roi.reshape(28, 28) pre = model.predict(image.reshape(-1,28,28,1)) for i in pre: '''i is the probability of each letter, in total 26 probability scores, we select the highest 5 and their index is the predicted label''' prob1 = np.argsort(i)[-5] + 1 top5.append(prob1) prob2 = np.argsort(i)[-4] + 1 top4.append(prob2) prob3 = np.argsort(i)[-3] + 1 top3.append(prob3) prob4 = np.argsort(i)[-2] + 1 top2.append(prob4) prob5 = np.argsort(i)[-1] + 1 top1.append(prob5) pred = top5 + top4 + top3 + top2 + top1 pred1 = [] y_pred = [] for i in pred: i = str(i) if len(i) == 1: s = i.zfill(2) pred1.append(s) else: pred1.append(i) for step in range(0, len(pred1), crops_number): pred2 = pred1[step:step + crops_number] pred3 = ''.join(pred2) y_pred.append(pred3) final1 = y_pred.copy() final.append(final1) top1.clear() top2.clear() top3.clear() top4.clear() top5.clear() y_pred.clear() #print(final) #take the last image as an example, to see the most probable labels, we got 100% accuracy for this image print(final[-1][-1]) plt.imshow(testing_filtered[-1]) plt.show() #take another image that includes 3 letters as an example, we got at least 2 of 3 letters correctly predicted print(final[18][-1]) plt.imshow(testing_filtered[18]) plt.show() # save to a csv file import csv with open('Predictions.csv', 'w', newline = '') as myfile: wr = csv.writer(myfile, quoting=csv.QUOTE_ALL) wr.writerows(final)
image_crop
identifier_name
code.py
# -*- coding: utf-8 -*- """ # Task 1 """ # Commented out IPython magic to ensure Python compatibility. #import all libraries we will use for task 1 # %pylab inline --no-import-all import numpy as np import matplotlib.pyplot as plt from keras.models import load_model from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelBinarizer from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import cross_val_score from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Convolution2D, MaxPooling2D from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten from tensorflow.keras.optimizers import Adam from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint from sklearn.metrics import accuracy_score #import the dataset with np.load('training-dataset.npz') as data: img = data['x'] lbl = data['y'] del data print(img.shape) print(type(img)) print(lbl.shape) #split datasets to training, validation and testing sets X_train, X_val_test, y_train, y_val_test = train_test_split(img, lbl, test_size = 0.3, random_state = 666) X_val, X_test, y_val, y_test = train_test_split(X_val_test, y_val_test, test_size = 0.5, random_state = 666) print(y_train.shape) print(y_val.shape) print(y_test.shape) #normalization, image pixel is between 0-255, according to the formula, we divide by 255 and then reshape X_train = X_train/255 X_val = X_val/255 X_test = X_test/255 x_train = X_train.reshape((-1, 28, 28, 1)) x_val = X_val.reshape((-1, 28, 28, 1)) x_test = X_test.reshape((-1, 28, 28, 1)) print(x_train.shape) #dummy coding labels onehot = LabelBinarizer() d_y_train = onehot.fit_transform(y_train) d_y_val = onehot.transform(y_val) d_y_test = onehot.transform(y_test) print(d_y_train.shape) """## Model 1: KNN """ #tune hyperparameter K using 3-fold cross validation k_range = range(3, 8) cv_scores = [] for k in k_range: knn = KNeighborsClassifier(n_neighbors = k, n_jobs = -1) scores = cross_val_score(knn, X_train, d_y_train, cv = 3) #3 folds cv_score = np.mean(scores) print('k = {}, accuracy on validation set = {:.3f}'.format(k, cv_score)) cv_scores.append((k, cv_score)) #selecting k that gave the best accuracy on validation set best_k = max(cv_scores, key = lambda x:x[-1]) print('best_k: {} with validation accuracy of {}'.format(best_k[0], best_k)) #using best parameter k to train KNN on training data knn_model = KNeighborsClassifier(n_neighbors = best_k[0], n_jobs = -1) knn_model.fit(X_train, d_y_train) #evaluate fitted KNN on test data knn_y_pred = knn_model.predict(X_test) test_accuracy = accuracy_score(d_y_test, knn_y_pred) print(test_accuracy) """## Model 2: CNN """ #tune hyperparameters, here we tune batch size, dropout rate and learning rate settings = [] for batch in [64, 100, 128]: for drop in [0, 0.5, 0.8]: for lr in [0.1, 0.01, 0.001]: print("batch :", batch) print("drop:", drop) print('learning rate:', lr) model = Sequential() #convolution 1 model.add(Convolution2D(input_shape=(28,28,1), filters=32, kernel_size=5, strides=1, activation='relu')) #pooling 1 model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same')) #convolution 2 model.add(Convolution2D(filters=64, kernel_size=5, strides=1, activation='relu' )) #pooling 2 model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same')) #convolution 3 model.add(Convolution2D(filters=128, kernel_size=5, strides=1, activation='relu' )) #pooling 3 model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same')) #convolution 4 model.add(Convolution2D(filters=256, kernel_size=5, strides=1, activation='relu' )) #pooling 4 model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same')) #Flatten, transfer to vectors model.add(Flatten()) #Dropout model.add(Dropout(drop)) #fully connected network 1 model.add(Dense(500, activation='relu')) #fully connected network 2, 26 because 26 different letters in total model.add(Dense(26, activation='softmax')) #earlystopping to prevent overfitting early_stopping = EarlyStopping(monitor = 'val_loss', patience = 3, mode = 'min') #reducing learning rate reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor = 0.1, patience = 1, verbose = 1, mode = 'min', min_delta =0.0001, cooldown=0, min_lr=0) callback_lists = [early_stopping, reduce_lr] adam = Adam(lr = lr) model.compile(optimizer=adam,loss="categorical_crossentropy",metrics=['accuracy']) model.fit(x_train, d_y_train, batch_size = batch, epochs = 5, verbose = 1, validation_data = (x_val, d_y_val), shuffle = True, callbacks = callback_lists) loss, acc = model.evaluate(x_val, d_y_val) settings.append((batch, drop, lr, acc)) #print best accuracy best_accuracy = max(settings, key = lambda x:x[-1]) print(best_accuracy) #lr = 0.001 best_batch, best_drop, best_lr = best_accuracy[:-1] print(best_batch, best_drop, best_lr) #using tuned parameters to train model model = Sequential() #convolution 1, activation model.add(Convolution2D(input_shape=(28,28,1), filters=32, kernel_size=5, strides=1, padding='same', activation='relu')) #pooling model.add(MaxPooling2D(pool_size=(2,2), strides=1,padding='same')) #convolution 2, activation model.add(Convolution2D(filters=64, kernel_size=5, strides=1, padding='same', activation='relu' )) #pooling model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same')) #convolution 3, activation model.add(Convolution2D(filters=128, kernel_size=5, strides=1, padding='same', activation='relu' )) #pooling model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same')) #convolution 4, activation model.add(Convolution2D(filters=256, kernel_size=5, strides=1, padding='same', activation='relu' )) #pooling model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same')) #Flatten, transfer to vectors model.add(Flatten()) #Dropout model.add(Dropout(best_drop)) #fully connected network 1 model.add(Dense(500,activation='relu')) #fully connected network 2 model.add(Dense(26, activation='softmax')) #early stopping, to prevent overfitting early_stopping = EarlyStopping(monitor = 'val_loss', patience = 3, mode = 'min') #reducing learning rate reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor = 0.1, patience = 1, verbose = 1, mode = 'min', min_delta =0.0001, cooldown=0, min_lr=0) callback_lists = [early_stopping, reduce_lr] #optimizer adam = Adam(lr = best_lr) model.compile(optimizer=adam,loss="categorical_crossentropy",metrics=['accuracy']) #training history = model.fit(x_train,d_y_train, batch_size = best_batch, epochs = 12, validation_data = (x_val, d_y_val), verbose = 1, shuffle = True, callbacks = callback_lists) #plotting loss and accuracy of training and validation sets #accuracy plt.plot(history.history['accuracy']) plt.plot(history.history['val_accuracy']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'val'], loc='upper left') plt.show() #loss plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'val'], loc='upper left') plt.show() #save our model model.save('my_model1.h5') #make predictions of testing sets and see how accurate those predictions are loss,acc = model.evaluate(x_test, d_y_test) print(loss,acc) """We have got around 85% accuray on testing set with KNN model and 95% accuracy on testing set with CNN model. So we decided to use CNN for our task 2. # Task 2 """ # Commented out IPython magic to ensure Python compatibility. # import libraries used for task 2 # %pylab inline --no-import-all import numpy as np import tensorflow as tf import matplotlib.pyplot as plt from keras.preprocessing.image import ImageDataGenerator import cv2 from keras.utils import plot_model from skimage.util import random_noise from skimage.filters import threshold_local from skimage.morphology import remove_small_objects from skimage.measure import label, regionprops from skimage.color import label2rgb from google.colab import drive # load testing-dataset test = np.load('test-dataset.npy') print(test.shape) # see what images are like before denoise plt.imshow(test[-1]) plt.show() # denoise all images and see what they are like now from scipy import ndimage import matplotlib.pyplot as plt testing_filtered = [] for i in range(len(test)): new_image = ndimage.median_filter(test[i], 2) testing_filtered.append(ndimage.median_filter(new_image, 3)) plt.imshow(testing_filtered[-1]) plt.show() #define a function to split the images def image_crop(data):
testing_cropped = image_crop(testing_filtered) print(len(testing_cropped)) #10000 images # let's see an example letter from testing_cropped dataset plt.imshow(testing_cropped[420][0]) plt.show() #most of images are separated into 4 letters, but still many are into 3 or 5 letters l=[] for i in range(len(testing_cropped)): l.append(len(testing_cropped[i])) plt.hist(l) plt.show() #make 5 predictions which have highest probability scores by using our CNN model block_size = 55 predictions = [] top1 = [] top2 = [] top3 = [] top4 = [] top5 = [] final = [] for i in range(10000): crops_number = (len(testing_cropped[i])) for sample in testing_cropped[i]: imbw = sample > threshold_local(sample, block_size, method = 'mean') imbw1 = remove_small_objects(imbw, 10, connectivity=1) roi = imbw1 roi = roi.reshape((roi.shape[0],roi.shape[1],1)) roi = tf.image.resize_with_crop_or_pad(roi, 28, 28).numpy() image = roi.reshape(28, 28) pre = model.predict(image.reshape(-1,28,28,1)) for i in pre: '''i is the probability of each letter, in total 26 probability scores, we select the highest 5 and their index is the predicted label''' prob1 = np.argsort(i)[-5] + 1 top5.append(prob1) prob2 = np.argsort(i)[-4] + 1 top4.append(prob2) prob3 = np.argsort(i)[-3] + 1 top3.append(prob3) prob4 = np.argsort(i)[-2] + 1 top2.append(prob4) prob5 = np.argsort(i)[-1] + 1 top1.append(prob5) pred = top5 + top4 + top3 + top2 + top1 pred1 = [] y_pred = [] for i in pred: i = str(i) if len(i) == 1: s = i.zfill(2) pred1.append(s) else: pred1.append(i) for step in range(0, len(pred1), crops_number): pred2 = pred1[step:step + crops_number] pred3 = ''.join(pred2) y_pred.append(pred3) final1 = y_pred.copy() final.append(final1) top1.clear() top2.clear() top3.clear() top4.clear() top5.clear() y_pred.clear() #print(final) #take the last image as an example, to see the most probable labels, we got 100% accuracy for this image print(final[-1][-1]) plt.imshow(testing_filtered[-1]) plt.show() #take another image that includes 3 letters as an example, we got at least 2 of 3 letters correctly predicted print(final[18][-1]) plt.imshow(testing_filtered[18]) plt.show() # save to a csv file import csv with open('Predictions.csv', 'w', newline = '') as myfile: wr = csv.writer(myfile, quoting=csv.QUOTE_ALL) wr.writerows(final)
testing_cropped = [] for i in range(len(data)): #threshold each image and find contours img = (data[i]).astype('uint8') _, threshold = cv2.threshold(img.copy(), 10, 255, 0) contours, _ = cv2.findContours(threshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2:] bboxes = [] #creating bounding boxes from contours for f in range(len(contours)): bboxes.append(cv2.boundingRect(contours[f])) split = [] to_remove = [] #threshold to remove small w and h bbox, and split those with w >= 28 for j in range(len(bboxes)): if (bboxes[j][2] < 20) and (bboxes[j][3] < 17): to_remove.append(bboxes[j]) if bboxes[j][2] >= 30: split.append(j) #modifying bboxes to get half w and move x to (x + w/2) for g in split: bboxes[g] = (bboxes[g][0], bboxes[g][1], int(bboxes[g][2]/2), bboxes[g][3]) modified_bboxes = bboxes[g] modified_bboxes = (int(bboxes[g][0]) + int(bboxes[g][2]), int(bboxes[g][1]), int(bboxes[g][2]), int(bboxes[g][3])) bboxes.append(modified_bboxes) #removing bboxes with small w and h for b in to_remove: bboxes.remove(b) #sorting bboxes bboxes = sorted(np.array(bboxes), key = lambda x: x[0]) cut = [] for h in range(len(bboxes)): images = img[bboxes[h][1]:bboxes[h][1]+bboxes[h][3], bboxes[h][0]:bboxes[h][0]+bboxes[h][2]] if images[0].shape > np.max(3): cut.append(images) cropped = [] #reshaping the cut images to be able to use CNN for image_split in cut: crop = image_split.reshape((image_split.shape[0],image_split.shape[1],1)) crop = np.array(tf.image.resize_with_crop_or_pad(crop, 28, 28)) img_cropped = crop.reshape(28,28) cropped.append(img_cropped) testing_cropped.append(cropped) return np.array(testing_cropped)
identifier_body
code.py
# -*- coding: utf-8 -*- """ # Task 1 """ # Commented out IPython magic to ensure Python compatibility. #import all libraries we will use for task 1 # %pylab inline --no-import-all import numpy as np import matplotlib.pyplot as plt from keras.models import load_model from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelBinarizer from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import cross_val_score from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Convolution2D, MaxPooling2D from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten from tensorflow.keras.optimizers import Adam from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint from sklearn.metrics import accuracy_score #import the dataset with np.load('training-dataset.npz') as data: img = data['x'] lbl = data['y'] del data print(img.shape) print(type(img)) print(lbl.shape) #split datasets to training, validation and testing sets X_train, X_val_test, y_train, y_val_test = train_test_split(img, lbl, test_size = 0.3, random_state = 666) X_val, X_test, y_val, y_test = train_test_split(X_val_test, y_val_test, test_size = 0.5, random_state = 666) print(y_train.shape) print(y_val.shape) print(y_test.shape) #normalization, image pixel is between 0-255, according to the formula, we divide by 255 and then reshape X_train = X_train/255 X_val = X_val/255 X_test = X_test/255 x_train = X_train.reshape((-1, 28, 28, 1)) x_val = X_val.reshape((-1, 28, 28, 1)) x_test = X_test.reshape((-1, 28, 28, 1)) print(x_train.shape) #dummy coding labels onehot = LabelBinarizer() d_y_train = onehot.fit_transform(y_train) d_y_val = onehot.transform(y_val) d_y_test = onehot.transform(y_test) print(d_y_train.shape) """## Model 1: KNN """ #tune hyperparameter K using 3-fold cross validation k_range = range(3, 8) cv_scores = [] for k in k_range: knn = KNeighborsClassifier(n_neighbors = k, n_jobs = -1) scores = cross_val_score(knn, X_train, d_y_train, cv = 3) #3 folds cv_score = np.mean(scores) print('k = {}, accuracy on validation set = {:.3f}'.format(k, cv_score)) cv_scores.append((k, cv_score)) #selecting k that gave the best accuracy on validation set best_k = max(cv_scores, key = lambda x:x[-1]) print('best_k: {} with validation accuracy of {}'.format(best_k[0], best_k)) #using best parameter k to train KNN on training data knn_model = KNeighborsClassifier(n_neighbors = best_k[0], n_jobs = -1) knn_model.fit(X_train, d_y_train) #evaluate fitted KNN on test data knn_y_pred = knn_model.predict(X_test) test_accuracy = accuracy_score(d_y_test, knn_y_pred) print(test_accuracy) """## Model 2: CNN """ #tune hyperparameters, here we tune batch size, dropout rate and learning rate settings = [] for batch in [64, 100, 128]: for drop in [0, 0.5, 0.8]: for lr in [0.1, 0.01, 0.001]: print("batch :", batch) print("drop:", drop) print('learning rate:', lr) model = Sequential() #convolution 1 model.add(Convolution2D(input_shape=(28,28,1), filters=32, kernel_size=5, strides=1, activation='relu')) #pooling 1 model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same')) #convolution 2 model.add(Convolution2D(filters=64, kernel_size=5, strides=1, activation='relu' )) #pooling 2 model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same')) #convolution 3 model.add(Convolution2D(filters=128, kernel_size=5, strides=1, activation='relu' )) #pooling 3 model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same')) #convolution 4 model.add(Convolution2D(filters=256, kernel_size=5, strides=1, activation='relu' )) #pooling 4 model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same')) #Flatten, transfer to vectors model.add(Flatten()) #Dropout model.add(Dropout(drop)) #fully connected network 1 model.add(Dense(500, activation='relu')) #fully connected network 2, 26 because 26 different letters in total model.add(Dense(26, activation='softmax')) #earlystopping to prevent overfitting early_stopping = EarlyStopping(monitor = 'val_loss', patience = 3, mode = 'min') #reducing learning rate reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor = 0.1, patience = 1, verbose = 1, mode = 'min', min_delta =0.0001, cooldown=0, min_lr=0) callback_lists = [early_stopping, reduce_lr] adam = Adam(lr = lr) model.compile(optimizer=adam,loss="categorical_crossentropy",metrics=['accuracy']) model.fit(x_train, d_y_train, batch_size = batch, epochs = 5, verbose = 1, validation_data = (x_val, d_y_val), shuffle = True, callbacks = callback_lists) loss, acc = model.evaluate(x_val, d_y_val) settings.append((batch, drop, lr, acc)) #print best accuracy best_accuracy = max(settings, key = lambda x:x[-1]) print(best_accuracy) #lr = 0.001 best_batch, best_drop, best_lr = best_accuracy[:-1] print(best_batch, best_drop, best_lr) #using tuned parameters to train model model = Sequential() #convolution 1, activation model.add(Convolution2D(input_shape=(28,28,1), filters=32, kernel_size=5, strides=1, padding='same', activation='relu')) #pooling model.add(MaxPooling2D(pool_size=(2,2), strides=1,padding='same')) #convolution 2, activation model.add(Convolution2D(filters=64, kernel_size=5, strides=1, padding='same', activation='relu' )) #pooling model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same')) #convolution 3, activation model.add(Convolution2D(filters=128, kernel_size=5, strides=1, padding='same', activation='relu' )) #pooling model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same')) #convolution 4, activation model.add(Convolution2D(filters=256, kernel_size=5, strides=1, padding='same', activation='relu' )) #pooling model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same')) #Flatten, transfer to vectors model.add(Flatten()) #Dropout model.add(Dropout(best_drop)) #fully connected network 1 model.add(Dense(500,activation='relu')) #fully connected network 2 model.add(Dense(26, activation='softmax')) #early stopping, to prevent overfitting early_stopping = EarlyStopping(monitor = 'val_loss', patience = 3, mode = 'min') #reducing learning rate reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor = 0.1, patience = 1, verbose = 1, mode = 'min', min_delta =0.0001, cooldown=0, min_lr=0) callback_lists = [early_stopping, reduce_lr] #optimizer adam = Adam(lr = best_lr) model.compile(optimizer=adam,loss="categorical_crossentropy",metrics=['accuracy']) #training history = model.fit(x_train,d_y_train, batch_size = best_batch, epochs = 12, validation_data = (x_val, d_y_val), verbose = 1, shuffle = True, callbacks = callback_lists) #plotting loss and accuracy of training and validation sets #accuracy plt.plot(history.history['accuracy']) plt.plot(history.history['val_accuracy']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'val'], loc='upper left') plt.show() #loss plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'val'], loc='upper left') plt.show() #save our model model.save('my_model1.h5') #make predictions of testing sets and see how accurate those predictions are loss,acc = model.evaluate(x_test, d_y_test) print(loss,acc) """We have got around 85% accuray on testing set with KNN model and 95% accuracy on testing set with CNN model. So we decided to use CNN for our task 2. # Task 2 """ # Commented out IPython magic to ensure Python compatibility. # import libraries used for task 2 # %pylab inline --no-import-all import numpy as np import tensorflow as tf import matplotlib.pyplot as plt from keras.preprocessing.image import ImageDataGenerator import cv2 from keras.utils import plot_model from skimage.util import random_noise from skimage.filters import threshold_local from skimage.morphology import remove_small_objects from skimage.measure import label, regionprops from skimage.color import label2rgb from google.colab import drive # load testing-dataset test = np.load('test-dataset.npy') print(test.shape) # see what images are like before denoise plt.imshow(test[-1]) plt.show() # denoise all images and see what they are like now from scipy import ndimage import matplotlib.pyplot as plt testing_filtered = [] for i in range(len(test)): new_image = ndimage.median_filter(test[i], 2) testing_filtered.append(ndimage.median_filter(new_image, 3)) plt.imshow(testing_filtered[-1]) plt.show() #define a function to split the images def image_crop(data): testing_cropped = [] for i in range(len(data)): #threshold each image and find contours img = (data[i]).astype('uint8') _, threshold = cv2.threshold(img.copy(), 10, 255, 0) contours, _ = cv2.findContours(threshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2:] bboxes = [] #creating bounding boxes from contours for f in range(len(contours)): bboxes.append(cv2.boundingRect(contours[f])) split = [] to_remove = [] #threshold to remove small w and h bbox, and split those with w >= 28 for j in range(len(bboxes)): if (bboxes[j][2] < 20) and (bboxes[j][3] < 17):
if bboxes[j][2] >= 30: split.append(j) #modifying bboxes to get half w and move x to (x + w/2) for g in split: bboxes[g] = (bboxes[g][0], bboxes[g][1], int(bboxes[g][2]/2), bboxes[g][3]) modified_bboxes = bboxes[g] modified_bboxes = (int(bboxes[g][0]) + int(bboxes[g][2]), int(bboxes[g][1]), int(bboxes[g][2]), int(bboxes[g][3])) bboxes.append(modified_bboxes) #removing bboxes with small w and h for b in to_remove: bboxes.remove(b) #sorting bboxes bboxes = sorted(np.array(bboxes), key = lambda x: x[0]) cut = [] for h in range(len(bboxes)): images = img[bboxes[h][1]:bboxes[h][1]+bboxes[h][3], bboxes[h][0]:bboxes[h][0]+bboxes[h][2]] if images[0].shape > np.max(3): cut.append(images) cropped = [] #reshaping the cut images to be able to use CNN for image_split in cut: crop = image_split.reshape((image_split.shape[0],image_split.shape[1],1)) crop = np.array(tf.image.resize_with_crop_or_pad(crop, 28, 28)) img_cropped = crop.reshape(28,28) cropped.append(img_cropped) testing_cropped.append(cropped) return np.array(testing_cropped) testing_cropped = image_crop(testing_filtered) print(len(testing_cropped)) #10000 images # let's see an example letter from testing_cropped dataset plt.imshow(testing_cropped[420][0]) plt.show() #most of images are separated into 4 letters, but still many are into 3 or 5 letters l=[] for i in range(len(testing_cropped)): l.append(len(testing_cropped[i])) plt.hist(l) plt.show() #make 5 predictions which have highest probability scores by using our CNN model block_size = 55 predictions = [] top1 = [] top2 = [] top3 = [] top4 = [] top5 = [] final = [] for i in range(10000): crops_number = (len(testing_cropped[i])) for sample in testing_cropped[i]: imbw = sample > threshold_local(sample, block_size, method = 'mean') imbw1 = remove_small_objects(imbw, 10, connectivity=1) roi = imbw1 roi = roi.reshape((roi.shape[0],roi.shape[1],1)) roi = tf.image.resize_with_crop_or_pad(roi, 28, 28).numpy() image = roi.reshape(28, 28) pre = model.predict(image.reshape(-1,28,28,1)) for i in pre: '''i is the probability of each letter, in total 26 probability scores, we select the highest 5 and their index is the predicted label''' prob1 = np.argsort(i)[-5] + 1 top5.append(prob1) prob2 = np.argsort(i)[-4] + 1 top4.append(prob2) prob3 = np.argsort(i)[-3] + 1 top3.append(prob3) prob4 = np.argsort(i)[-2] + 1 top2.append(prob4) prob5 = np.argsort(i)[-1] + 1 top1.append(prob5) pred = top5 + top4 + top3 + top2 + top1 pred1 = [] y_pred = [] for i in pred: i = str(i) if len(i) == 1: s = i.zfill(2) pred1.append(s) else: pred1.append(i) for step in range(0, len(pred1), crops_number): pred2 = pred1[step:step + crops_number] pred3 = ''.join(pred2) y_pred.append(pred3) final1 = y_pred.copy() final.append(final1) top1.clear() top2.clear() top3.clear() top4.clear() top5.clear() y_pred.clear() #print(final) #take the last image as an example, to see the most probable labels, we got 100% accuracy for this image print(final[-1][-1]) plt.imshow(testing_filtered[-1]) plt.show() #take another image that includes 3 letters as an example, we got at least 2 of 3 letters correctly predicted print(final[18][-1]) plt.imshow(testing_filtered[18]) plt.show() # save to a csv file import csv with open('Predictions.csv', 'w', newline = '') as myfile: wr = csv.writer(myfile, quoting=csv.QUOTE_ALL) wr.writerows(final)
to_remove.append(bboxes[j])
conditional_block
code.py
# -*- coding: utf-8 -*- """ # Task 1 """ # Commented out IPython magic to ensure Python compatibility. #import all libraries we will use for task 1 # %pylab inline --no-import-all import numpy as np import matplotlib.pyplot as plt from keras.models import load_model from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelBinarizer from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import cross_val_score from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Convolution2D, MaxPooling2D from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten from tensorflow.keras.optimizers import Adam from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint from sklearn.metrics import accuracy_score #import the dataset with np.load('training-dataset.npz') as data: img = data['x'] lbl = data['y'] del data print(img.shape) print(type(img)) print(lbl.shape) #split datasets to training, validation and testing sets X_train, X_val_test, y_train, y_val_test = train_test_split(img, lbl, test_size = 0.3, random_state = 666) X_val, X_test, y_val, y_test = train_test_split(X_val_test, y_val_test, test_size = 0.5, random_state = 666) print(y_train.shape) print(y_val.shape) print(y_test.shape) #normalization, image pixel is between 0-255, according to the formula, we divide by 255 and then reshape X_train = X_train/255 X_val = X_val/255 X_test = X_test/255 x_train = X_train.reshape((-1, 28, 28, 1)) x_val = X_val.reshape((-1, 28, 28, 1)) x_test = X_test.reshape((-1, 28, 28, 1)) print(x_train.shape) #dummy coding labels onehot = LabelBinarizer() d_y_train = onehot.fit_transform(y_train) d_y_val = onehot.transform(y_val) d_y_test = onehot.transform(y_test) print(d_y_train.shape) """## Model 1: KNN """ #tune hyperparameter K using 3-fold cross validation k_range = range(3, 8) cv_scores = [] for k in k_range: knn = KNeighborsClassifier(n_neighbors = k, n_jobs = -1) scores = cross_val_score(knn, X_train, d_y_train, cv = 3) #3 folds cv_score = np.mean(scores) print('k = {}, accuracy on validation set = {:.3f}'.format(k, cv_score)) cv_scores.append((k, cv_score)) #selecting k that gave the best accuracy on validation set best_k = max(cv_scores, key = lambda x:x[-1]) print('best_k: {} with validation accuracy of {}'.format(best_k[0], best_k)) #using best parameter k to train KNN on training data knn_model = KNeighborsClassifier(n_neighbors = best_k[0], n_jobs = -1) knn_model.fit(X_train, d_y_train) #evaluate fitted KNN on test data knn_y_pred = knn_model.predict(X_test) test_accuracy = accuracy_score(d_y_test, knn_y_pred) print(test_accuracy) """## Model 2: CNN """ #tune hyperparameters, here we tune batch size, dropout rate and learning rate settings = [] for batch in [64, 100, 128]: for drop in [0, 0.5, 0.8]: for lr in [0.1, 0.01, 0.001]: print("batch :", batch) print("drop:", drop) print('learning rate:', lr) model = Sequential() #convolution 1 model.add(Convolution2D(input_shape=(28,28,1), filters=32, kernel_size=5, strides=1, activation='relu')) #pooling 1 model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same')) #convolution 2 model.add(Convolution2D(filters=64, kernel_size=5, strides=1, activation='relu' )) #pooling 2 model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same')) #convolution 3 model.add(Convolution2D(filters=128, kernel_size=5, strides=1, activation='relu' )) #pooling 3 model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same')) #convolution 4 model.add(Convolution2D(filters=256, kernel_size=5, strides=1, activation='relu' )) #pooling 4 model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same')) #Flatten, transfer to vectors model.add(Flatten()) #Dropout model.add(Dropout(drop)) #fully connected network 1 model.add(Dense(500, activation='relu')) #fully connected network 2, 26 because 26 different letters in total model.add(Dense(26, activation='softmax')) #earlystopping to prevent overfitting early_stopping = EarlyStopping(monitor = 'val_loss', patience = 3, mode = 'min') #reducing learning rate reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor = 0.1, patience = 1, verbose = 1, mode = 'min', min_delta =0.0001, cooldown=0, min_lr=0) callback_lists = [early_stopping, reduce_lr] adam = Adam(lr = lr) model.compile(optimizer=adam,loss="categorical_crossentropy",metrics=['accuracy']) model.fit(x_train, d_y_train, batch_size = batch, epochs = 5, verbose = 1, validation_data = (x_val, d_y_val), shuffle = True, callbacks = callback_lists) loss, acc = model.evaluate(x_val, d_y_val) settings.append((batch, drop, lr, acc)) #print best accuracy best_accuracy = max(settings, key = lambda x:x[-1]) print(best_accuracy) #lr = 0.001 best_batch, best_drop, best_lr = best_accuracy[:-1] print(best_batch, best_drop, best_lr) #using tuned parameters to train model model = Sequential() #convolution 1, activation model.add(Convolution2D(input_shape=(28,28,1), filters=32, kernel_size=5, strides=1, padding='same', activation='relu')) #pooling model.add(MaxPooling2D(pool_size=(2,2), strides=1,padding='same')) #convolution 2, activation model.add(Convolution2D(filters=64, kernel_size=5, strides=1, padding='same', activation='relu' )) #pooling model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same')) #convolution 3, activation model.add(Convolution2D(filters=128, kernel_size=5, strides=1, padding='same', activation='relu' )) #pooling model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same')) #convolution 4, activation model.add(Convolution2D(filters=256, kernel_size=5, strides=1, padding='same', activation='relu' )) #pooling model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same')) #Flatten, transfer to vectors model.add(Flatten()) #Dropout model.add(Dropout(best_drop)) #fully connected network 1 model.add(Dense(500,activation='relu')) #fully connected network 2 model.add(Dense(26, activation='softmax')) #early stopping, to prevent overfitting early_stopping = EarlyStopping(monitor = 'val_loss', patience = 3, mode = 'min') #reducing learning rate reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor = 0.1, patience = 1, verbose = 1, mode = 'min', min_delta =0.0001, cooldown=0, min_lr=0) callback_lists = [early_stopping, reduce_lr] #optimizer adam = Adam(lr = best_lr) model.compile(optimizer=adam,loss="categorical_crossentropy",metrics=['accuracy']) #training history = model.fit(x_train,d_y_train, batch_size = best_batch, epochs = 12, validation_data = (x_val, d_y_val), verbose = 1, shuffle = True, callbacks = callback_lists) #plotting loss and accuracy of training and validation sets #accuracy plt.plot(history.history['accuracy']) plt.plot(history.history['val_accuracy']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'val'], loc='upper left') plt.show() #loss plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'val'], loc='upper left') plt.show() #save our model model.save('my_model1.h5') #make predictions of testing sets and see how accurate those predictions are loss,acc = model.evaluate(x_test, d_y_test) print(loss,acc) """We have got around 85% accuray on testing set with KNN model and 95% accuracy on testing set with CNN model. So we decided to use CNN for our task 2. # Task 2 """ # Commented out IPython magic to ensure Python compatibility. # import libraries used for task 2 # %pylab inline --no-import-all import numpy as np import tensorflow as tf import matplotlib.pyplot as plt from keras.preprocessing.image import ImageDataGenerator import cv2 from keras.utils import plot_model from skimage.util import random_noise from skimage.filters import threshold_local from skimage.morphology import remove_small_objects from skimage.measure import label, regionprops from skimage.color import label2rgb from google.colab import drive # load testing-dataset test = np.load('test-dataset.npy') print(test.shape) # see what images are like before denoise plt.imshow(test[-1]) plt.show() # denoise all images and see what they are like now from scipy import ndimage import matplotlib.pyplot as plt testing_filtered = [] for i in range(len(test)): new_image = ndimage.median_filter(test[i], 2) testing_filtered.append(ndimage.median_filter(new_image, 3)) plt.imshow(testing_filtered[-1]) plt.show() #define a function to split the images def image_crop(data): testing_cropped = [] for i in range(len(data)): #threshold each image and find contours img = (data[i]).astype('uint8') _, threshold = cv2.threshold(img.copy(), 10, 255, 0) contours, _ = cv2.findContours(threshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2:] bboxes = [] #creating bounding boxes from contours for f in range(len(contours)): bboxes.append(cv2.boundingRect(contours[f])) split = [] to_remove = [] #threshold to remove small w and h bbox, and split those with w >= 28 for j in range(len(bboxes)): if (bboxes[j][2] < 20) and (bboxes[j][3] < 17): to_remove.append(bboxes[j]) if bboxes[j][2] >= 30: split.append(j) #modifying bboxes to get half w and move x to (x + w/2) for g in split: bboxes[g] = (bboxes[g][0], bboxes[g][1], int(bboxes[g][2]/2), bboxes[g][3]) modified_bboxes = bboxes[g] modified_bboxes = (int(bboxes[g][0]) + int(bboxes[g][2]), int(bboxes[g][1]), int(bboxes[g][2]), int(bboxes[g][3])) bboxes.append(modified_bboxes) #removing bboxes with small w and h for b in to_remove: bboxes.remove(b) #sorting bboxes bboxes = sorted(np.array(bboxes), key = lambda x: x[0]) cut = [] for h in range(len(bboxes)): images = img[bboxes[h][1]:bboxes[h][1]+bboxes[h][3], bboxes[h][0]:bboxes[h][0]+bboxes[h][2]] if images[0].shape > np.max(3): cut.append(images) cropped = [] #reshaping the cut images to be able to use CNN for image_split in cut: crop = image_split.reshape((image_split.shape[0],image_split.shape[1],1)) crop = np.array(tf.image.resize_with_crop_or_pad(crop, 28, 28)) img_cropped = crop.reshape(28,28) cropped.append(img_cropped) testing_cropped.append(cropped) return np.array(testing_cropped) testing_cropped = image_crop(testing_filtered) print(len(testing_cropped)) #10000 images # let's see an example letter from testing_cropped dataset plt.imshow(testing_cropped[420][0]) plt.show() #most of images are separated into 4 letters, but still many are into 3 or 5 letters l=[] for i in range(len(testing_cropped)): l.append(len(testing_cropped[i])) plt.hist(l) plt.show() #make 5 predictions which have highest probability scores by using our CNN model block_size = 55 predictions = [] top1 = [] top2 = [] top3 = [] top4 = [] top5 = [] final = [] for i in range(10000): crops_number = (len(testing_cropped[i]))
imbw1 = remove_small_objects(imbw, 10, connectivity=1) roi = imbw1 roi = roi.reshape((roi.shape[0],roi.shape[1],1)) roi = tf.image.resize_with_crop_or_pad(roi, 28, 28).numpy() image = roi.reshape(28, 28) pre = model.predict(image.reshape(-1,28,28,1)) for i in pre: '''i is the probability of each letter, in total 26 probability scores, we select the highest 5 and their index is the predicted label''' prob1 = np.argsort(i)[-5] + 1 top5.append(prob1) prob2 = np.argsort(i)[-4] + 1 top4.append(prob2) prob3 = np.argsort(i)[-3] + 1 top3.append(prob3) prob4 = np.argsort(i)[-2] + 1 top2.append(prob4) prob5 = np.argsort(i)[-1] + 1 top1.append(prob5) pred = top5 + top4 + top3 + top2 + top1 pred1 = [] y_pred = [] for i in pred: i = str(i) if len(i) == 1: s = i.zfill(2) pred1.append(s) else: pred1.append(i) for step in range(0, len(pred1), crops_number): pred2 = pred1[step:step + crops_number] pred3 = ''.join(pred2) y_pred.append(pred3) final1 = y_pred.copy() final.append(final1) top1.clear() top2.clear() top3.clear() top4.clear() top5.clear() y_pred.clear() #print(final) #take the last image as an example, to see the most probable labels, we got 100% accuracy for this image print(final[-1][-1]) plt.imshow(testing_filtered[-1]) plt.show() #take another image that includes 3 letters as an example, we got at least 2 of 3 letters correctly predicted print(final[18][-1]) plt.imshow(testing_filtered[18]) plt.show() # save to a csv file import csv with open('Predictions.csv', 'w', newline = '') as myfile: wr = csv.writer(myfile, quoting=csv.QUOTE_ALL) wr.writerows(final)
for sample in testing_cropped[i]: imbw = sample > threshold_local(sample, block_size, method = 'mean')
random_line_split
longestCommonSubsequence.py
#!/usr/bin/env python __author__ = 'greg' from copy import deepcopy import numpy as np import matplotlib.pyplot as plt import datetime import os import itertools #important cases # ('gold but black', 'gold out flack', 'Gold ') # [4, 3, 2, 1] # [8, 3, 2, 1] # [4, 3, 2, 1] # [False, True, False] # [['g', 'old ', 'but black'], ['g', 'old', ' out', ' ', 'flack'], ['G', 'old ']] # for Greg - which computer am I on? if os.path.exists("/home/ggdhines"): base_directory = "/home/ggdhines" code_directory = base_directory + "/github" elif os.path.exists("/Users/greg"): base_directory = "/Users/greg" code_directory = base_directory + "/Code" else: base_directory = "/home/greg" code_directory = base_directory + "/github" #each image that was sent out contained several lines to be transcribed
imageIndex = 1 lineIndex = 4 #which input files do you want to read in- need to be of the form transcribe$i.txt #right now can handle at most only 3 or so files - NP-hard problem. Future work might be to make this #code more scalable - probably with branch and bound approach inputFiles = [1,2,3,4,5] def load_file(fname): currentImage = 0 currentLine = -1 individual_transcriptions = {} with open(fname,"rb") as f: for l in f.readlines(): if l == "\n": currentImage += 1 currentLine = -1 continue currentLine += 1 individual_transcriptions[(currentImage,currentLine)] = l[:-1] return individual_transcriptions def lcs(lines): #find the length of each string stringLength = [len(l)+1 for l in lines] #record the length of longest common subsequence - assumes unique LCS #that is there might be more than one longest common subsequence. Have encountered this in practice #but the strings are usually similar enough that it doesn't matter which LCS you choose dynamicMatrix = np.zeros(stringLength) #keep track of the time needed to do the calculation - mainly just because it is NP-hard #want to know how big the input is you can handle time1 = datetime.datetime.now() #the following is the dynamic programming approach as shown on the Wikipedia page for longest common subsequence traceMatrices = [np.zeros(stringLength) for l in lines] #transcribed = lines[(imageIndex,lineIndex)] #need to wrap so that numpy is happy #index for iterating over all tuples of characters - one from each string currentIndex = [[1,] for w in lines] #dynamic programming approach - basically just filling in a matrix as we go while True: characters = [lines[j][currentIndex[j][0]-1] for j in range(len(lines))] #if we have a match across all strings if min(characters) == max(characters): #diagional move newIndex = [[i[0]-1,] for i in currentIndex] #the longest common previous subsequence is a diagonal move backwards for j in range(len(newIndex)): traceMatrices[j][currentIndex] = newIndex[j] dynamicMatrix[currentIndex] = dynamicMatrix[newIndex] + 1 else: #either a up or sideways move #find which is the maximum - assume unique maxLength = -1 axis = None #find out where the previous LCS is - either up or sideways for j in range(len(currentIndex)): #move backwards along this axis newIndex = deepcopy(currentIndex) newIndex[j][0] += -1 if dynamicMatrix[newIndex][0] > maxLength: maxLength = dynamicMatrix[newIndex][0] axis = j newIndex = deepcopy(currentIndex) newIndex[axis][0] += -1 for j in range(len(newIndex)): traceMatrices[j][currentIndex] = newIndex[j] dynamicMatrix[currentIndex] = dynamicMatrix[newIndex] #iterate to the next tuple of characters for j in range(0,len(currentIndex)): currentIndex[j][0] += 1 if currentIndex[j][0] == (len(lines[j])+1): currentIndex[j][0] = 1 else: break if currentIndex == [[1,] for l in lines]: break #check to see if the last tuple of characters is a match lastCharacter = [t[-1] for t in lines] s = [[] for t in lines] lcs_length = 0 if min(lastCharacter) == max(lastCharacter): lcs_length += 1 for i,w in enumerate(lines): s[i].append(len(w)-1) #read out the LCS by travelling backwards (up, left or diagonal) through the matrix endPoint = [[-1,] for j in lines] cell = [[int(traceMatrices[j][endPoint]),] for j in range(len(lines))] while cell != [[0,] for j in range(len(lines))]: newcell = [[int(traceMatrices[j][cell][0]),] for j in range(len(lines))] #if we have a diagonal move - this corresponds to a point in the LCS allChange = not(False in [a!=b for (a,b) in zip(cell,newcell)]) if allChange: lcs_length += 1 for j in range(len(lines)): s[j].append(newcell[j][0]) cell = newcell #print out how long this took time2 = datetime.datetime.now() # use the first string to actually create the LCS lcs_string = "" for i,c in enumerate(lines[0]): if i in s[0]: lcs_string += c # print time2-time1 # #print out the LCS in green, all other characters in red results = [[] for l in lines] at_lcs = [None for l in lines] agreement = [] s = [sorted(s_temp) for s_temp in s] LCStuples = [[s[j][i] for j in range(len(lines))] for i in range(len(s[0]))] LCSsequences = [[LCStuples[0]]] for i in range(1,len(s[0])): max_character_jump = max([(s[j][i] - s[j][i-1]) for j in range(len(lines))]) if max_character_jump > 1: LCSsequences.append([]) LCSsequences[-1].append(LCStuples[i]) segments = {} lcs_string = "" for j in range(len(lines)): currentIndex = 0 results = [] for sequenceIndex,nextSequence in enumerate(LCSsequences): firstLCSChacter = nextSequence[0][j] lastLCSCharacter = nextSequence[-1][j] l = lines[j][currentIndex:firstLCSChacter] if l != "": if not (2*sequenceIndex) in segments: segments[2*sequenceIndex] = [l] else: segments[2*sequenceIndex].append(l) # now extra the LCS - we only need to do this once, since every one is in agreement if j == 0: l = lines[0][firstLCSChacter:lastLCSCharacter+1] segments[2*sequenceIndex+1] = l[:] lcs_string += l currentIndex = lastLCSCharacter + 1 l = lines[j][currentIndex:] if l != "": if not (2*(sequenceIndex+1)) in segments: segments[2*(sequenceIndex+1)] = [l] else: segments[2*(sequenceIndex+1)].append(l) # results.append((l,-sequenceIndex-2)) # segments.add(-sequenceIndex-2) return lcs_string,segments from termcolor import colored for j,tran in enumerate(lines): print s[j] for i,c in enumerate(tran): if i in s[j]: now_at_csl = True #print colored(c,'green'), else: now_at_csl = False #print colored(c,'red'), if now_at_csl != at_lcs[j]: results[j].append("") if j == 0: agreement.append(now_at_csl) at_lcs[j] = now_at_csl results[j][-1] += c #print return lcs_string,agreement,results transcriptions = {} #read in the files - right now just set up to work on Greg's computer for i in inputFiles: fname = base_directory+"/Databases/transcribe/transcribe"+str(i)+".txt" individual_transcriptions = load_file(fname) for key,line in individual_transcriptions.items(): if not(key in transcriptions): transcriptions[key] = [line] else: transcriptions[key].append(line) gold_fname = base_directory+"/Dropbox/goldTranscriptions.txt" gold_transcriptions = load_file(gold_fname) X = [] Y = [] Xstd = [] Ystd = [] for imageIndex in range(3): for lineIndex in range(5): a = [] c = [] for l in transcriptions[(imageIndex,lineIndex)]: lcs_string,segments= lcs([gold_transcriptions[(imageIndex,lineIndex)],l]) accuracy = len(lcs_string)/float(len(l)) completeness = len(lcs_string)/float(len(gold_transcriptions[(imageIndex,lineIndex)])) a.append(accuracy) c.append(completeness) X.append(np.mean(a)) Y.append(np.mean(c)) Xstd.append(np.std(a,ddof=1)) Ystd.append(np.std(c,ddof=1)) print X print np.mean(X) print np.mean(Y) plt.errorbar(X,Y,xerr=Xstd,yerr=Ystd,fmt=".") plt.xlabel("Accuracy (w/ standard dev.)") plt.ylabel("Completeness (w/ standard dev.)") plt.xlim((0,1.1)) plt.ylim((0,1.1)) plt.show() assert False def majority_vote(segments,num_users=3): new_lcs = "" for segmentIndex in sorted(list(segments)): # is this segment part of the LCS (if so, don't worry about merging) if segmentIndex%2 == 0: votes = {} for transcription in segments[segmentIndex]: if transcription in votes: votes[transcription] += 1 else: votes[transcription] = 1 if max(votes.values()) == 2: most_likely = max(votes.items(),key = lambda x:x[1]) new_lcs += most_likely[0] else: new_lcs += segments[segmentIndex] # otherwise - merge # grab all of those segments with the the correct segment index return new_lcs def findsubsets(S,m): return set(itertools.combinations(S, m)) for imageIndex in range(3): for lineIndex in range(5): print (imageIndex,lineIndex) a = [] c = [] #print gold_transcriptions[(imageIndex,lineIndex)] for i,s in enumerate(findsubsets(transcriptions[(imageIndex,lineIndex)],3)): lcs_string,segments = lcs(s) new_lcs_string = majority_vote(segments) lcs_string2,segments = lcs([gold_transcriptions[(imageIndex,lineIndex)],lcs_string]) accuracy = len(lcs_string2)/float(len(lcs_string)) completeness = len(lcs_string2)/float(len(gold_transcriptions[(imageIndex,lineIndex)])) a.append(accuracy) c.append(completeness) #print new_lcs_string #print np.mean(a) X.append(np.mean(a)) Y.append(np.mean(c)) Xstd.append(np.std(a,ddof=1)) Ystd.append(np.std(c,ddof=1)) print np.mean(X) print np.mean(Y) plt.errorbar(X,Y,xerr=Xstd,yerr=Ystd,fmt=".") plt.xlabel("Accuracy (w/ standard dev.)") plt.ylabel("Completeness (w/ standard dev.)") plt.xlim((0,1.1)) plt.ylim((0,1.1)) plt.show() #random.sample(transcriptions[(imageIndex,lineIndex)],3) #lcs_string = lcs(transcriptions[(imageIndex,lineIndex)]) #lcs_string2 = lcs([gold_transcriptions[(imageIndex,lineIndex)],lcs_string]) #print lcs_string2
#give the index of the image and the line
random_line_split
longestCommonSubsequence.py
#!/usr/bin/env python __author__ = 'greg' from copy import deepcopy import numpy as np import matplotlib.pyplot as plt import datetime import os import itertools #important cases # ('gold but black', 'gold out flack', 'Gold ') # [4, 3, 2, 1] # [8, 3, 2, 1] # [4, 3, 2, 1] # [False, True, False] # [['g', 'old ', 'but black'], ['g', 'old', ' out', ' ', 'flack'], ['G', 'old ']] # for Greg - which computer am I on? if os.path.exists("/home/ggdhines"): base_directory = "/home/ggdhines" code_directory = base_directory + "/github" elif os.path.exists("/Users/greg"): base_directory = "/Users/greg" code_directory = base_directory + "/Code" else: base_directory = "/home/greg" code_directory = base_directory + "/github" #each image that was sent out contained several lines to be transcribed #give the index of the image and the line imageIndex = 1 lineIndex = 4 #which input files do you want to read in- need to be of the form transcribe$i.txt #right now can handle at most only 3 or so files - NP-hard problem. Future work might be to make this #code more scalable - probably with branch and bound approach inputFiles = [1,2,3,4,5] def load_file(fname): currentImage = 0 currentLine = -1 individual_transcriptions = {} with open(fname,"rb") as f: for l in f.readlines(): if l == "\n": currentImage += 1 currentLine = -1 continue currentLine += 1 individual_transcriptions[(currentImage,currentLine)] = l[:-1] return individual_transcriptions def
(lines): #find the length of each string stringLength = [len(l)+1 for l in lines] #record the length of longest common subsequence - assumes unique LCS #that is there might be more than one longest common subsequence. Have encountered this in practice #but the strings are usually similar enough that it doesn't matter which LCS you choose dynamicMatrix = np.zeros(stringLength) #keep track of the time needed to do the calculation - mainly just because it is NP-hard #want to know how big the input is you can handle time1 = datetime.datetime.now() #the following is the dynamic programming approach as shown on the Wikipedia page for longest common subsequence traceMatrices = [np.zeros(stringLength) for l in lines] #transcribed = lines[(imageIndex,lineIndex)] #need to wrap so that numpy is happy #index for iterating over all tuples of characters - one from each string currentIndex = [[1,] for w in lines] #dynamic programming approach - basically just filling in a matrix as we go while True: characters = [lines[j][currentIndex[j][0]-1] for j in range(len(lines))] #if we have a match across all strings if min(characters) == max(characters): #diagional move newIndex = [[i[0]-1,] for i in currentIndex] #the longest common previous subsequence is a diagonal move backwards for j in range(len(newIndex)): traceMatrices[j][currentIndex] = newIndex[j] dynamicMatrix[currentIndex] = dynamicMatrix[newIndex] + 1 else: #either a up or sideways move #find which is the maximum - assume unique maxLength = -1 axis = None #find out where the previous LCS is - either up or sideways for j in range(len(currentIndex)): #move backwards along this axis newIndex = deepcopy(currentIndex) newIndex[j][0] += -1 if dynamicMatrix[newIndex][0] > maxLength: maxLength = dynamicMatrix[newIndex][0] axis = j newIndex = deepcopy(currentIndex) newIndex[axis][0] += -1 for j in range(len(newIndex)): traceMatrices[j][currentIndex] = newIndex[j] dynamicMatrix[currentIndex] = dynamicMatrix[newIndex] #iterate to the next tuple of characters for j in range(0,len(currentIndex)): currentIndex[j][0] += 1 if currentIndex[j][0] == (len(lines[j])+1): currentIndex[j][0] = 1 else: break if currentIndex == [[1,] for l in lines]: break #check to see if the last tuple of characters is a match lastCharacter = [t[-1] for t in lines] s = [[] for t in lines] lcs_length = 0 if min(lastCharacter) == max(lastCharacter): lcs_length += 1 for i,w in enumerate(lines): s[i].append(len(w)-1) #read out the LCS by travelling backwards (up, left or diagonal) through the matrix endPoint = [[-1,] for j in lines] cell = [[int(traceMatrices[j][endPoint]),] for j in range(len(lines))] while cell != [[0,] for j in range(len(lines))]: newcell = [[int(traceMatrices[j][cell][0]),] for j in range(len(lines))] #if we have a diagonal move - this corresponds to a point in the LCS allChange = not(False in [a!=b for (a,b) in zip(cell,newcell)]) if allChange: lcs_length += 1 for j in range(len(lines)): s[j].append(newcell[j][0]) cell = newcell #print out how long this took time2 = datetime.datetime.now() # use the first string to actually create the LCS lcs_string = "" for i,c in enumerate(lines[0]): if i in s[0]: lcs_string += c # print time2-time1 # #print out the LCS in green, all other characters in red results = [[] for l in lines] at_lcs = [None for l in lines] agreement = [] s = [sorted(s_temp) for s_temp in s] LCStuples = [[s[j][i] for j in range(len(lines))] for i in range(len(s[0]))] LCSsequences = [[LCStuples[0]]] for i in range(1,len(s[0])): max_character_jump = max([(s[j][i] - s[j][i-1]) for j in range(len(lines))]) if max_character_jump > 1: LCSsequences.append([]) LCSsequences[-1].append(LCStuples[i]) segments = {} lcs_string = "" for j in range(len(lines)): currentIndex = 0 results = [] for sequenceIndex,nextSequence in enumerate(LCSsequences): firstLCSChacter = nextSequence[0][j] lastLCSCharacter = nextSequence[-1][j] l = lines[j][currentIndex:firstLCSChacter] if l != "": if not (2*sequenceIndex) in segments: segments[2*sequenceIndex] = [l] else: segments[2*sequenceIndex].append(l) # now extra the LCS - we only need to do this once, since every one is in agreement if j == 0: l = lines[0][firstLCSChacter:lastLCSCharacter+1] segments[2*sequenceIndex+1] = l[:] lcs_string += l currentIndex = lastLCSCharacter + 1 l = lines[j][currentIndex:] if l != "": if not (2*(sequenceIndex+1)) in segments: segments[2*(sequenceIndex+1)] = [l] else: segments[2*(sequenceIndex+1)].append(l) # results.append((l,-sequenceIndex-2)) # segments.add(-sequenceIndex-2) return lcs_string,segments from termcolor import colored for j,tran in enumerate(lines): print s[j] for i,c in enumerate(tran): if i in s[j]: now_at_csl = True #print colored(c,'green'), else: now_at_csl = False #print colored(c,'red'), if now_at_csl != at_lcs[j]: results[j].append("") if j == 0: agreement.append(now_at_csl) at_lcs[j] = now_at_csl results[j][-1] += c #print return lcs_string,agreement,results transcriptions = {} #read in the files - right now just set up to work on Greg's computer for i in inputFiles: fname = base_directory+"/Databases/transcribe/transcribe"+str(i)+".txt" individual_transcriptions = load_file(fname) for key,line in individual_transcriptions.items(): if not(key in transcriptions): transcriptions[key] = [line] else: transcriptions[key].append(line) gold_fname = base_directory+"/Dropbox/goldTranscriptions.txt" gold_transcriptions = load_file(gold_fname) X = [] Y = [] Xstd = [] Ystd = [] for imageIndex in range(3): for lineIndex in range(5): a = [] c = [] for l in transcriptions[(imageIndex,lineIndex)]: lcs_string,segments= lcs([gold_transcriptions[(imageIndex,lineIndex)],l]) accuracy = len(lcs_string)/float(len(l)) completeness = len(lcs_string)/float(len(gold_transcriptions[(imageIndex,lineIndex)])) a.append(accuracy) c.append(completeness) X.append(np.mean(a)) Y.append(np.mean(c)) Xstd.append(np.std(a,ddof=1)) Ystd.append(np.std(c,ddof=1)) print X print np.mean(X) print np.mean(Y) plt.errorbar(X,Y,xerr=Xstd,yerr=Ystd,fmt=".") plt.xlabel("Accuracy (w/ standard dev.)") plt.ylabel("Completeness (w/ standard dev.)") plt.xlim((0,1.1)) plt.ylim((0,1.1)) plt.show() assert False def majority_vote(segments,num_users=3): new_lcs = "" for segmentIndex in sorted(list(segments)): # is this segment part of the LCS (if so, don't worry about merging) if segmentIndex%2 == 0: votes = {} for transcription in segments[segmentIndex]: if transcription in votes: votes[transcription] += 1 else: votes[transcription] = 1 if max(votes.values()) == 2: most_likely = max(votes.items(),key = lambda x:x[1]) new_lcs += most_likely[0] else: new_lcs += segments[segmentIndex] # otherwise - merge # grab all of those segments with the the correct segment index return new_lcs def findsubsets(S,m): return set(itertools.combinations(S, m)) for imageIndex in range(3): for lineIndex in range(5): print (imageIndex,lineIndex) a = [] c = [] #print gold_transcriptions[(imageIndex,lineIndex)] for i,s in enumerate(findsubsets(transcriptions[(imageIndex,lineIndex)],3)): lcs_string,segments = lcs(s) new_lcs_string = majority_vote(segments) lcs_string2,segments = lcs([gold_transcriptions[(imageIndex,lineIndex)],lcs_string]) accuracy = len(lcs_string2)/float(len(lcs_string)) completeness = len(lcs_string2)/float(len(gold_transcriptions[(imageIndex,lineIndex)])) a.append(accuracy) c.append(completeness) #print new_lcs_string #print np.mean(a) X.append(np.mean(a)) Y.append(np.mean(c)) Xstd.append(np.std(a,ddof=1)) Ystd.append(np.std(c,ddof=1)) print np.mean(X) print np.mean(Y) plt.errorbar(X,Y,xerr=Xstd,yerr=Ystd,fmt=".") plt.xlabel("Accuracy (w/ standard dev.)") plt.ylabel("Completeness (w/ standard dev.)") plt.xlim((0,1.1)) plt.ylim((0,1.1)) plt.show() #random.sample(transcriptions[(imageIndex,lineIndex)],3) #lcs_string = lcs(transcriptions[(imageIndex,lineIndex)]) #lcs_string2 = lcs([gold_transcriptions[(imageIndex,lineIndex)],lcs_string]) #print lcs_string2
lcs
identifier_name
longestCommonSubsequence.py
#!/usr/bin/env python __author__ = 'greg' from copy import deepcopy import numpy as np import matplotlib.pyplot as plt import datetime import os import itertools #important cases # ('gold but black', 'gold out flack', 'Gold ') # [4, 3, 2, 1] # [8, 3, 2, 1] # [4, 3, 2, 1] # [False, True, False] # [['g', 'old ', 'but black'], ['g', 'old', ' out', ' ', 'flack'], ['G', 'old ']] # for Greg - which computer am I on? if os.path.exists("/home/ggdhines"): base_directory = "/home/ggdhines" code_directory = base_directory + "/github" elif os.path.exists("/Users/greg"): base_directory = "/Users/greg" code_directory = base_directory + "/Code" else: base_directory = "/home/greg" code_directory = base_directory + "/github" #each image that was sent out contained several lines to be transcribed #give the index of the image and the line imageIndex = 1 lineIndex = 4 #which input files do you want to read in- need to be of the form transcribe$i.txt #right now can handle at most only 3 or so files - NP-hard problem. Future work might be to make this #code more scalable - probably with branch and bound approach inputFiles = [1,2,3,4,5] def load_file(fname):
def lcs(lines): #find the length of each string stringLength = [len(l)+1 for l in lines] #record the length of longest common subsequence - assumes unique LCS #that is there might be more than one longest common subsequence. Have encountered this in practice #but the strings are usually similar enough that it doesn't matter which LCS you choose dynamicMatrix = np.zeros(stringLength) #keep track of the time needed to do the calculation - mainly just because it is NP-hard #want to know how big the input is you can handle time1 = datetime.datetime.now() #the following is the dynamic programming approach as shown on the Wikipedia page for longest common subsequence traceMatrices = [np.zeros(stringLength) for l in lines] #transcribed = lines[(imageIndex,lineIndex)] #need to wrap so that numpy is happy #index for iterating over all tuples of characters - one from each string currentIndex = [[1,] for w in lines] #dynamic programming approach - basically just filling in a matrix as we go while True: characters = [lines[j][currentIndex[j][0]-1] for j in range(len(lines))] #if we have a match across all strings if min(characters) == max(characters): #diagional move newIndex = [[i[0]-1,] for i in currentIndex] #the longest common previous subsequence is a diagonal move backwards for j in range(len(newIndex)): traceMatrices[j][currentIndex] = newIndex[j] dynamicMatrix[currentIndex] = dynamicMatrix[newIndex] + 1 else: #either a up or sideways move #find which is the maximum - assume unique maxLength = -1 axis = None #find out where the previous LCS is - either up or sideways for j in range(len(currentIndex)): #move backwards along this axis newIndex = deepcopy(currentIndex) newIndex[j][0] += -1 if dynamicMatrix[newIndex][0] > maxLength: maxLength = dynamicMatrix[newIndex][0] axis = j newIndex = deepcopy(currentIndex) newIndex[axis][0] += -1 for j in range(len(newIndex)): traceMatrices[j][currentIndex] = newIndex[j] dynamicMatrix[currentIndex] = dynamicMatrix[newIndex] #iterate to the next tuple of characters for j in range(0,len(currentIndex)): currentIndex[j][0] += 1 if currentIndex[j][0] == (len(lines[j])+1): currentIndex[j][0] = 1 else: break if currentIndex == [[1,] for l in lines]: break #check to see if the last tuple of characters is a match lastCharacter = [t[-1] for t in lines] s = [[] for t in lines] lcs_length = 0 if min(lastCharacter) == max(lastCharacter): lcs_length += 1 for i,w in enumerate(lines): s[i].append(len(w)-1) #read out the LCS by travelling backwards (up, left or diagonal) through the matrix endPoint = [[-1,] for j in lines] cell = [[int(traceMatrices[j][endPoint]),] for j in range(len(lines))] while cell != [[0,] for j in range(len(lines))]: newcell = [[int(traceMatrices[j][cell][0]),] for j in range(len(lines))] #if we have a diagonal move - this corresponds to a point in the LCS allChange = not(False in [a!=b for (a,b) in zip(cell,newcell)]) if allChange: lcs_length += 1 for j in range(len(lines)): s[j].append(newcell[j][0]) cell = newcell #print out how long this took time2 = datetime.datetime.now() # use the first string to actually create the LCS lcs_string = "" for i,c in enumerate(lines[0]): if i in s[0]: lcs_string += c # print time2-time1 # #print out the LCS in green, all other characters in red results = [[] for l in lines] at_lcs = [None for l in lines] agreement = [] s = [sorted(s_temp) for s_temp in s] LCStuples = [[s[j][i] for j in range(len(lines))] for i in range(len(s[0]))] LCSsequences = [[LCStuples[0]]] for i in range(1,len(s[0])): max_character_jump = max([(s[j][i] - s[j][i-1]) for j in range(len(lines))]) if max_character_jump > 1: LCSsequences.append([]) LCSsequences[-1].append(LCStuples[i]) segments = {} lcs_string = "" for j in range(len(lines)): currentIndex = 0 results = [] for sequenceIndex,nextSequence in enumerate(LCSsequences): firstLCSChacter = nextSequence[0][j] lastLCSCharacter = nextSequence[-1][j] l = lines[j][currentIndex:firstLCSChacter] if l != "": if not (2*sequenceIndex) in segments: segments[2*sequenceIndex] = [l] else: segments[2*sequenceIndex].append(l) # now extra the LCS - we only need to do this once, since every one is in agreement if j == 0: l = lines[0][firstLCSChacter:lastLCSCharacter+1] segments[2*sequenceIndex+1] = l[:] lcs_string += l currentIndex = lastLCSCharacter + 1 l = lines[j][currentIndex:] if l != "": if not (2*(sequenceIndex+1)) in segments: segments[2*(sequenceIndex+1)] = [l] else: segments[2*(sequenceIndex+1)].append(l) # results.append((l,-sequenceIndex-2)) # segments.add(-sequenceIndex-2) return lcs_string,segments from termcolor import colored for j,tran in enumerate(lines): print s[j] for i,c in enumerate(tran): if i in s[j]: now_at_csl = True #print colored(c,'green'), else: now_at_csl = False #print colored(c,'red'), if now_at_csl != at_lcs[j]: results[j].append("") if j == 0: agreement.append(now_at_csl) at_lcs[j] = now_at_csl results[j][-1] += c #print return lcs_string,agreement,results transcriptions = {} #read in the files - right now just set up to work on Greg's computer for i in inputFiles: fname = base_directory+"/Databases/transcribe/transcribe"+str(i)+".txt" individual_transcriptions = load_file(fname) for key,line in individual_transcriptions.items(): if not(key in transcriptions): transcriptions[key] = [line] else: transcriptions[key].append(line) gold_fname = base_directory+"/Dropbox/goldTranscriptions.txt" gold_transcriptions = load_file(gold_fname) X = [] Y = [] Xstd = [] Ystd = [] for imageIndex in range(3): for lineIndex in range(5): a = [] c = [] for l in transcriptions[(imageIndex,lineIndex)]: lcs_string,segments= lcs([gold_transcriptions[(imageIndex,lineIndex)],l]) accuracy = len(lcs_string)/float(len(l)) completeness = len(lcs_string)/float(len(gold_transcriptions[(imageIndex,lineIndex)])) a.append(accuracy) c.append(completeness) X.append(np.mean(a)) Y.append(np.mean(c)) Xstd.append(np.std(a,ddof=1)) Ystd.append(np.std(c,ddof=1)) print X print np.mean(X) print np.mean(Y) plt.errorbar(X,Y,xerr=Xstd,yerr=Ystd,fmt=".") plt.xlabel("Accuracy (w/ standard dev.)") plt.ylabel("Completeness (w/ standard dev.)") plt.xlim((0,1.1)) plt.ylim((0,1.1)) plt.show() assert False def majority_vote(segments,num_users=3): new_lcs = "" for segmentIndex in sorted(list(segments)): # is this segment part of the LCS (if so, don't worry about merging) if segmentIndex%2 == 0: votes = {} for transcription in segments[segmentIndex]: if transcription in votes: votes[transcription] += 1 else: votes[transcription] = 1 if max(votes.values()) == 2: most_likely = max(votes.items(),key = lambda x:x[1]) new_lcs += most_likely[0] else: new_lcs += segments[segmentIndex] # otherwise - merge # grab all of those segments with the the correct segment index return new_lcs def findsubsets(S,m): return set(itertools.combinations(S, m)) for imageIndex in range(3): for lineIndex in range(5): print (imageIndex,lineIndex) a = [] c = [] #print gold_transcriptions[(imageIndex,lineIndex)] for i,s in enumerate(findsubsets(transcriptions[(imageIndex,lineIndex)],3)): lcs_string,segments = lcs(s) new_lcs_string = majority_vote(segments) lcs_string2,segments = lcs([gold_transcriptions[(imageIndex,lineIndex)],lcs_string]) accuracy = len(lcs_string2)/float(len(lcs_string)) completeness = len(lcs_string2)/float(len(gold_transcriptions[(imageIndex,lineIndex)])) a.append(accuracy) c.append(completeness) #print new_lcs_string #print np.mean(a) X.append(np.mean(a)) Y.append(np.mean(c)) Xstd.append(np.std(a,ddof=1)) Ystd.append(np.std(c,ddof=1)) print np.mean(X) print np.mean(Y) plt.errorbar(X,Y,xerr=Xstd,yerr=Ystd,fmt=".") plt.xlabel("Accuracy (w/ standard dev.)") plt.ylabel("Completeness (w/ standard dev.)") plt.xlim((0,1.1)) plt.ylim((0,1.1)) plt.show() #random.sample(transcriptions[(imageIndex,lineIndex)],3) #lcs_string = lcs(transcriptions[(imageIndex,lineIndex)]) #lcs_string2 = lcs([gold_transcriptions[(imageIndex,lineIndex)],lcs_string]) #print lcs_string2
currentImage = 0 currentLine = -1 individual_transcriptions = {} with open(fname,"rb") as f: for l in f.readlines(): if l == "\n": currentImage += 1 currentLine = -1 continue currentLine += 1 individual_transcriptions[(currentImage,currentLine)] = l[:-1] return individual_transcriptions
identifier_body
longestCommonSubsequence.py
#!/usr/bin/env python __author__ = 'greg' from copy import deepcopy import numpy as np import matplotlib.pyplot as plt import datetime import os import itertools #important cases # ('gold but black', 'gold out flack', 'Gold ') # [4, 3, 2, 1] # [8, 3, 2, 1] # [4, 3, 2, 1] # [False, True, False] # [['g', 'old ', 'but black'], ['g', 'old', ' out', ' ', 'flack'], ['G', 'old ']] # for Greg - which computer am I on? if os.path.exists("/home/ggdhines"): base_directory = "/home/ggdhines" code_directory = base_directory + "/github" elif os.path.exists("/Users/greg"): base_directory = "/Users/greg" code_directory = base_directory + "/Code" else: base_directory = "/home/greg" code_directory = base_directory + "/github" #each image that was sent out contained several lines to be transcribed #give the index of the image and the line imageIndex = 1 lineIndex = 4 #which input files do you want to read in- need to be of the form transcribe$i.txt #right now can handle at most only 3 or so files - NP-hard problem. Future work might be to make this #code more scalable - probably with branch and bound approach inputFiles = [1,2,3,4,5] def load_file(fname): currentImage = 0 currentLine = -1 individual_transcriptions = {} with open(fname,"rb") as f: for l in f.readlines(): if l == "\n": currentImage += 1 currentLine = -1 continue currentLine += 1 individual_transcriptions[(currentImage,currentLine)] = l[:-1] return individual_transcriptions def lcs(lines): #find the length of each string stringLength = [len(l)+1 for l in lines] #record the length of longest common subsequence - assumes unique LCS #that is there might be more than one longest common subsequence. Have encountered this in practice #but the strings are usually similar enough that it doesn't matter which LCS you choose dynamicMatrix = np.zeros(stringLength) #keep track of the time needed to do the calculation - mainly just because it is NP-hard #want to know how big the input is you can handle time1 = datetime.datetime.now() #the following is the dynamic programming approach as shown on the Wikipedia page for longest common subsequence traceMatrices = [np.zeros(stringLength) for l in lines] #transcribed = lines[(imageIndex,lineIndex)] #need to wrap so that numpy is happy #index for iterating over all tuples of characters - one from each string currentIndex = [[1,] for w in lines] #dynamic programming approach - basically just filling in a matrix as we go while True: characters = [lines[j][currentIndex[j][0]-1] for j in range(len(lines))] #if we have a match across all strings if min(characters) == max(characters): #diagional move newIndex = [[i[0]-1,] for i in currentIndex] #the longest common previous subsequence is a diagonal move backwards for j in range(len(newIndex)): traceMatrices[j][currentIndex] = newIndex[j] dynamicMatrix[currentIndex] = dynamicMatrix[newIndex] + 1 else: #either a up or sideways move #find which is the maximum - assume unique maxLength = -1 axis = None #find out where the previous LCS is - either up or sideways for j in range(len(currentIndex)): #move backwards along this axis newIndex = deepcopy(currentIndex) newIndex[j][0] += -1 if dynamicMatrix[newIndex][0] > maxLength: maxLength = dynamicMatrix[newIndex][0] axis = j newIndex = deepcopy(currentIndex) newIndex[axis][0] += -1 for j in range(len(newIndex)): traceMatrices[j][currentIndex] = newIndex[j] dynamicMatrix[currentIndex] = dynamicMatrix[newIndex] #iterate to the next tuple of characters for j in range(0,len(currentIndex)): currentIndex[j][0] += 1 if currentIndex[j][0] == (len(lines[j])+1): currentIndex[j][0] = 1 else:
if currentIndex == [[1,] for l in lines]: break #check to see if the last tuple of characters is a match lastCharacter = [t[-1] for t in lines] s = [[] for t in lines] lcs_length = 0 if min(lastCharacter) == max(lastCharacter): lcs_length += 1 for i,w in enumerate(lines): s[i].append(len(w)-1) #read out the LCS by travelling backwards (up, left or diagonal) through the matrix endPoint = [[-1,] for j in lines] cell = [[int(traceMatrices[j][endPoint]),] for j in range(len(lines))] while cell != [[0,] for j in range(len(lines))]: newcell = [[int(traceMatrices[j][cell][0]),] for j in range(len(lines))] #if we have a diagonal move - this corresponds to a point in the LCS allChange = not(False in [a!=b for (a,b) in zip(cell,newcell)]) if allChange: lcs_length += 1 for j in range(len(lines)): s[j].append(newcell[j][0]) cell = newcell #print out how long this took time2 = datetime.datetime.now() # use the first string to actually create the LCS lcs_string = "" for i,c in enumerate(lines[0]): if i in s[0]: lcs_string += c # print time2-time1 # #print out the LCS in green, all other characters in red results = [[] for l in lines] at_lcs = [None for l in lines] agreement = [] s = [sorted(s_temp) for s_temp in s] LCStuples = [[s[j][i] for j in range(len(lines))] for i in range(len(s[0]))] LCSsequences = [[LCStuples[0]]] for i in range(1,len(s[0])): max_character_jump = max([(s[j][i] - s[j][i-1]) for j in range(len(lines))]) if max_character_jump > 1: LCSsequences.append([]) LCSsequences[-1].append(LCStuples[i]) segments = {} lcs_string = "" for j in range(len(lines)): currentIndex = 0 results = [] for sequenceIndex,nextSequence in enumerate(LCSsequences): firstLCSChacter = nextSequence[0][j] lastLCSCharacter = nextSequence[-1][j] l = lines[j][currentIndex:firstLCSChacter] if l != "": if not (2*sequenceIndex) in segments: segments[2*sequenceIndex] = [l] else: segments[2*sequenceIndex].append(l) # now extra the LCS - we only need to do this once, since every one is in agreement if j == 0: l = lines[0][firstLCSChacter:lastLCSCharacter+1] segments[2*sequenceIndex+1] = l[:] lcs_string += l currentIndex = lastLCSCharacter + 1 l = lines[j][currentIndex:] if l != "": if not (2*(sequenceIndex+1)) in segments: segments[2*(sequenceIndex+1)] = [l] else: segments[2*(sequenceIndex+1)].append(l) # results.append((l,-sequenceIndex-2)) # segments.add(-sequenceIndex-2) return lcs_string,segments from termcolor import colored for j,tran in enumerate(lines): print s[j] for i,c in enumerate(tran): if i in s[j]: now_at_csl = True #print colored(c,'green'), else: now_at_csl = False #print colored(c,'red'), if now_at_csl != at_lcs[j]: results[j].append("") if j == 0: agreement.append(now_at_csl) at_lcs[j] = now_at_csl results[j][-1] += c #print return lcs_string,agreement,results transcriptions = {} #read in the files - right now just set up to work on Greg's computer for i in inputFiles: fname = base_directory+"/Databases/transcribe/transcribe"+str(i)+".txt" individual_transcriptions = load_file(fname) for key,line in individual_transcriptions.items(): if not(key in transcriptions): transcriptions[key] = [line] else: transcriptions[key].append(line) gold_fname = base_directory+"/Dropbox/goldTranscriptions.txt" gold_transcriptions = load_file(gold_fname) X = [] Y = [] Xstd = [] Ystd = [] for imageIndex in range(3): for lineIndex in range(5): a = [] c = [] for l in transcriptions[(imageIndex,lineIndex)]: lcs_string,segments= lcs([gold_transcriptions[(imageIndex,lineIndex)],l]) accuracy = len(lcs_string)/float(len(l)) completeness = len(lcs_string)/float(len(gold_transcriptions[(imageIndex,lineIndex)])) a.append(accuracy) c.append(completeness) X.append(np.mean(a)) Y.append(np.mean(c)) Xstd.append(np.std(a,ddof=1)) Ystd.append(np.std(c,ddof=1)) print X print np.mean(X) print np.mean(Y) plt.errorbar(X,Y,xerr=Xstd,yerr=Ystd,fmt=".") plt.xlabel("Accuracy (w/ standard dev.)") plt.ylabel("Completeness (w/ standard dev.)") plt.xlim((0,1.1)) plt.ylim((0,1.1)) plt.show() assert False def majority_vote(segments,num_users=3): new_lcs = "" for segmentIndex in sorted(list(segments)): # is this segment part of the LCS (if so, don't worry about merging) if segmentIndex%2 == 0: votes = {} for transcription in segments[segmentIndex]: if transcription in votes: votes[transcription] += 1 else: votes[transcription] = 1 if max(votes.values()) == 2: most_likely = max(votes.items(),key = lambda x:x[1]) new_lcs += most_likely[0] else: new_lcs += segments[segmentIndex] # otherwise - merge # grab all of those segments with the the correct segment index return new_lcs def findsubsets(S,m): return set(itertools.combinations(S, m)) for imageIndex in range(3): for lineIndex in range(5): print (imageIndex,lineIndex) a = [] c = [] #print gold_transcriptions[(imageIndex,lineIndex)] for i,s in enumerate(findsubsets(transcriptions[(imageIndex,lineIndex)],3)): lcs_string,segments = lcs(s) new_lcs_string = majority_vote(segments) lcs_string2,segments = lcs([gold_transcriptions[(imageIndex,lineIndex)],lcs_string]) accuracy = len(lcs_string2)/float(len(lcs_string)) completeness = len(lcs_string2)/float(len(gold_transcriptions[(imageIndex,lineIndex)])) a.append(accuracy) c.append(completeness) #print new_lcs_string #print np.mean(a) X.append(np.mean(a)) Y.append(np.mean(c)) Xstd.append(np.std(a,ddof=1)) Ystd.append(np.std(c,ddof=1)) print np.mean(X) print np.mean(Y) plt.errorbar(X,Y,xerr=Xstd,yerr=Ystd,fmt=".") plt.xlabel("Accuracy (w/ standard dev.)") plt.ylabel("Completeness (w/ standard dev.)") plt.xlim((0,1.1)) plt.ylim((0,1.1)) plt.show() #random.sample(transcriptions[(imageIndex,lineIndex)],3) #lcs_string = lcs(transcriptions[(imageIndex,lineIndex)]) #lcs_string2 = lcs([gold_transcriptions[(imageIndex,lineIndex)],lcs_string]) #print lcs_string2
break
conditional_block
messages.go
package telegram import ( "bufio" "bytes" "errors" "fmt" "io" "log" ) // SerializeRequestMessage serializes the request message to w. func SerializeRequestMessage(w io.Writer, rm RequestMessage) (int, error) { msg := string(StartChar) + string(RequestCommandChar) + rm.deviceAddress + string(EndChar) + string(CR) + string(LF) return w.Write([]byte(msg)) } // DataMessage type captures the data message. type DataMessage struct { DataSets *[]DataSet bcc Bcc } func (d *DataMessage) String() string { b := &bytes.Buffer{} fmt.Fprintf(b, "bcc: %d %+v", d.bcc, *d.DataSets) return b.String() } // DataSet type. type DataSet struct { Address string Value string Unit string } // Bcc type captures the checksum. type Bcc byte // Digest processes the next byte for the checksum. func (bcc *Bcc) Digest(b ...byte) { for _, i := range b { *bcc = (*bcc) ^ Bcc(i) } } // ProtocolControlCharacter type. type ProtocolControlCharacter byte const ( // ProtControlNormal value. ProtControlNormal = ProtocolControlCharacter(byte('0')) // ProtControlSecondary value. ProtControlSecondary = ProtocolControlCharacter(byte('1')) ) // AcknowledgeMode type. type AcknowledgeMode byte // BaudrateIdentification type. type BaudrateIdentification byte // Baudrate returns the numeric baudrate from the code in the ID message. func Baudrate(br BaudrateIdentification) int { switch rune(br) { case '0': return 300 case 'A', '1': return 600 case 'B', '2': return 1200 case 'C', '3': return 2400 case 'D', '4': return 4800 case 'E', '5': return 9600 case 'F', '6': return 19200 }
AckModeDataReadOut = AcknowledgeMode(byte('0')) AckModeProgramming = AcknowledgeMode(byte('1')) AckModeBinary = AcknowledgeMode(byte('2')) AckModeReserved = AcknowledgeMode(byte('3')) AckModeManufacture = AcknowledgeMode(byte('6')) AckModeIllegalMode = AcknowledgeMode(byte(' ')) ) const ( CR = byte(0x0D) LF = byte(0x0A) FrontBoundaryChar = byte('(') RearBoundaryChar = byte(')') UnitSeparator = byte('*') StartChar = byte('/') RequestCommandChar = byte('?') EndChar = byte('!') StxChar = byte(0x02) EtxChar = byte(0x03) SeqDelChar = byte('\\') ) // ValidTestDataMessage can be used for testing. const ValidTestDataMessage = string(StxChar) + `1.1.1.1(12*kWh)` + `1.1.1.2(12*kWh)` + "\r\n" + string(EndChar) + string(CR) + string(LF) + string(EtxChar) + string(Bcc(0)) func ValidAddressChar(b byte) bool { switch b { case FrontBoundaryChar, RearBoundaryChar, StartChar, EndChar: return false default: return true } } func ValidValueChar(b byte) bool { switch b { case FrontBoundaryChar, UnitSeparator, RearBoundaryChar, StartChar, EndChar: return false default: return true } } func ValidUnitChar(b byte) bool { return ValidAddressChar(b) } // AcknowledgeModeFromByte returns the acknowledge mode from the given byte value. func AcknowledgeModeFromByte(a byte) AcknowledgeMode { switch a { case 0, 1, 2: return AcknowledgeMode(a) case 3, 4, 5: return AckModeReserved } switch { case 6 <= a && a <= 9: case 'A' <= a && a <= 'Z': return AckModeManufacture } return AckModeIllegalMode } var ( ErrCRFound = errors.New("End CR found") ErrNotImplementedYet = errors.New("not implemented yet") ErrFormatError = errors.New("format error") ErrFormatNoChars = errors.New("no chars found") ErrEmptyDataLine = errors.New("empty data line found") ErrUnexpectedEOF = errors.New("unexpected end of file") ErrNoBlockEndChar = errors.New("no block end char found") ErrNoStartChar = errors.New("no StartChar found") ErrAddressTooLong = errors.New("field too long") ErrValueTooLong = errors.New("field too long") ErrUnitTooLong = errors.New("field too long") ErrIdentificationTooLong = errors.New("identification field too long") ) func ParseDataMessage(r *bufio.Reader) (*DataMessage, error) { var b byte var err error var res *[]DataSet var bcc = Bcc(0) if verbose { log.Println("Starting ParseDataMessage") } // Consume all bytes till a start of message is found. for { b, err = r.ReadByte() if err != nil { return nil, ErrUnexpectedEOF } if b == StxChar { break } } if verbose { log.Println("Found StxChar") } // Get the datasets. res, err = ParseDataBlock(r, &bcc) if err != nil { return nil, err } _, err = ParseDataMessageEnd(r, &bcc) if err != nil { return nil, err } return &DataMessage{ DataSets: res, bcc: bcc, }, nil } // ParseDataMessageEnd parses the end of a datamessage. // ! CR LF ETX BCC func ParseDataMessageEnd(r *bufio.Reader, bcc *Bcc) (*DataMessage, error) { var b byte var err error if verbose { log.Println("Starting ParseDataMessageEnd") } b, err = r.ReadByte() if err != nil { return nil, err } if b != EndChar { if verbose { log.Printf("ParseDataMessageEnd, error parsing EndChar, found %d", b) } return nil, ErrFormatError } bcc.Digest(b) b, err = r.ReadByte() if err != nil { return nil, err } if b != CR { if verbose { log.Println("ParseDataMessageEnd, error parsing CR") } return nil, ErrFormatError } bcc.Digest(b) b, err = r.ReadByte() if err != nil { return nil, err } if b != LF { if verbose { log.Println("ParseDataMessageEnd, error parsing LF") } return nil, ErrFormatError } bcc.Digest(b) b, err = r.ReadByte() if err != nil { return nil, err } if b != EtxChar { if verbose { log.Println("ParseDataMessageEnd, error parsing EtxChar") } return nil, ErrFormatError } bcc.Digest(b) b, err = r.ReadByte() if err != nil { if verbose { log.Println("ParseDataMessageEnd, error parsing Bcc") } return nil, err } return &DataMessage{ bcc: *bcc, }, nil } // ParseDataBlock parses til no valid data lines can be parsed. func ParseDataBlock(r *bufio.Reader, bcc *Bcc) (*[]DataSet, error) { var err error var res []DataSet if verbose { log.Println("Starting ParseDataBlock") } for { var ds []DataSet ds, err = ParseDataLine(r, bcc) if err != nil { if len(res) <= 0 { return nil, ErrEmptyDataLine } return &res, nil } res = append(res, ds...) } } // ParseDataMessage reads bytes from r till a new complete datamessage has been read or an error occured. // func ParseDataMessage(r io.Reader) (*DataMessage, error) { // return nil, ErrNotImplementedYet // } // ParseDataLine parses a DataSets till a CR LF has been detected. // Data lines consist of one or more datasets. func ParseDataLine(r *bufio.Reader, bcc *Bcc) ([]DataSet, error) { var b byte var err error var ds *DataSet var res []DataSet if verbose { log.Println("Starting ParseDataLine") } for { ds, err = ParseDataSet(r, bcc) if err != nil { r.UnreadByte() return nil, ErrFormatError } res = append(res, *ds) // Test if the next two chars are CR LF b, err = r.ReadByte() if err == nil && b == CR { bcc.Digest(b) b, err = r.ReadByte() if err == nil && b == LF { bcc.Digest(b) return res, nil } // Error, CR not followed by LF return nil, ErrFormatError } r.UnreadByte() } } // ParseDataSet reads bytes from r till a new complete dataset has been read or an error occured. // A data message contains a list of data sets. Each data set consists of 3 fields "address", "value", and "unit". // Each of these fields is optional an may thus be equal to the empty string. // Data set ::= Address '(' Value(optional) ('*' unit)(optional) ')' // Ignores CR and LF and reads up to the first ! func ParseDataSet(r *bufio.Reader, bcc *Bcc) (*DataSet, error) { // read chars til Front boundary. var b byte var err error var va [100]byte var v = va[:0] res := &DataSet{} // Read the address till FrontBoundaryChar == ( if verbose { log.Println("Starting ParseDataSet") } if verbose { log.Println("Scanning for Address") } ScanAddress: for { b, err = r.ReadByte() if err != nil { return nil, ErrFormatNoChars } switch b { case CR, LF: r.UnreadByte() return nil, ErrCRFound case FrontBoundaryChar: bcc.Digest(b) break ScanAddress default: bcc.Digest(b) if !ValidAddressChar(b) { return nil, ErrFormatError } v = append(v, b) if len(v) > 16 { return nil, ErrAddressTooLong } } } // Address read. res.Address = string(v) v = v[:0] // Scan for value till * or ) if verbose { log.Println("Scanning for Value") } ScanValue: for { b, err = r.ReadByte() if err != nil { return nil, ErrFormatError } bcc.Digest(b) switch b { case RearBoundaryChar, UnitSeparator: break ScanValue default: if !ValidValueChar(b) { return nil, ErrFormatError } v = append(v, b) if len(v) > 32 { return nil, ErrValueTooLong } } } res.Value = string(v) if b == RearBoundaryChar { res.Unit = "" return res, nil } v = v[:0] if verbose { log.Println("Scanning for Unit") } ScanUnit: for { b, err = r.ReadByte() if err != nil { return nil, ErrFormatError } bcc.Digest(b) switch b { case RearBoundaryChar: break ScanUnit default: if !ValidValueChar(b) { return nil, ErrFormatError } v = append(v, b) if len(v) > 16 { return nil, ErrUnitTooLong } } } res.Unit = string(v) return res, nil } func ParseIdentificationMessage(r *bufio.Reader) (*IdentifcationMessage, error) { var b byte var err error var res = &IdentifcationMessage{} // StartChar b, err = r.ReadByte() if err != nil { return nil, ErrFormatError } if b != StartChar { return nil, ErrFormatError } // Manufacturer ID var id [3]byte // mID for i := 0; i < len(id); i++ { b, err = r.ReadByte() if err != nil { return nil, ErrFormatError } id[i] = b } res.ManID = string(id[:]) // baudrate mode b, err = r.ReadByte() if err != nil { return nil, ErrFormatError } if Baudrate(BaudrateIdentification(b)) == 0 { return nil, ErrFormatError } res.BaudID = b var vt [33]byte var v = vt[:0] // \W or not b, err = r.ReadByte() if err != nil { return nil, ErrFormatError } if b == SeqDelChar { // Read a W b, err = r.ReadByte() if err != nil || b != byte('W') { return nil, ErrFormatError } } else { v = append(v, b) } ScanIdenfication: for { b, err = r.ReadByte() if err != nil { return nil, ErrFormatError } switch { case b == CR: break ScanIdenfication default: v = append(v, b) if len(v) > 16 { return nil, ErrIdentificationTooLong } } } res.Identification = string(v) // Test if the last char is LF b, err = r.ReadByte() if err != nil || b != LF { return nil, ErrFormatError } return res, nil }
return 0 } const (
random_line_split
messages.go
package telegram import ( "bufio" "bytes" "errors" "fmt" "io" "log" ) // SerializeRequestMessage serializes the request message to w. func SerializeRequestMessage(w io.Writer, rm RequestMessage) (int, error) { msg := string(StartChar) + string(RequestCommandChar) + rm.deviceAddress + string(EndChar) + string(CR) + string(LF) return w.Write([]byte(msg)) } // DataMessage type captures the data message. type DataMessage struct { DataSets *[]DataSet bcc Bcc } func (d *DataMessage) String() string { b := &bytes.Buffer{} fmt.Fprintf(b, "bcc: %d %+v", d.bcc, *d.DataSets) return b.String() } // DataSet type. type DataSet struct { Address string Value string Unit string } // Bcc type captures the checksum. type Bcc byte // Digest processes the next byte for the checksum. func (bcc *Bcc) Digest(b ...byte) { for _, i := range b { *bcc = (*bcc) ^ Bcc(i) } } // ProtocolControlCharacter type. type ProtocolControlCharacter byte const ( // ProtControlNormal value. ProtControlNormal = ProtocolControlCharacter(byte('0')) // ProtControlSecondary value. ProtControlSecondary = ProtocolControlCharacter(byte('1')) ) // AcknowledgeMode type. type AcknowledgeMode byte // BaudrateIdentification type. type BaudrateIdentification byte // Baudrate returns the numeric baudrate from the code in the ID message. func Baudrate(br BaudrateIdentification) int { switch rune(br) { case '0': return 300 case 'A', '1': return 600 case 'B', '2': return 1200 case 'C', '3': return 2400 case 'D', '4': return 4800 case 'E', '5': return 9600 case 'F', '6': return 19200 } return 0 } const ( AckModeDataReadOut = AcknowledgeMode(byte('0')) AckModeProgramming = AcknowledgeMode(byte('1')) AckModeBinary = AcknowledgeMode(byte('2')) AckModeReserved = AcknowledgeMode(byte('3')) AckModeManufacture = AcknowledgeMode(byte('6')) AckModeIllegalMode = AcknowledgeMode(byte(' ')) ) const ( CR = byte(0x0D) LF = byte(0x0A) FrontBoundaryChar = byte('(') RearBoundaryChar = byte(')') UnitSeparator = byte('*') StartChar = byte('/') RequestCommandChar = byte('?') EndChar = byte('!') StxChar = byte(0x02) EtxChar = byte(0x03) SeqDelChar = byte('\\') ) // ValidTestDataMessage can be used for testing. const ValidTestDataMessage = string(StxChar) + `1.1.1.1(12*kWh)` + `1.1.1.2(12*kWh)` + "\r\n" + string(EndChar) + string(CR) + string(LF) + string(EtxChar) + string(Bcc(0)) func ValidAddressChar(b byte) bool { switch b { case FrontBoundaryChar, RearBoundaryChar, StartChar, EndChar: return false default: return true } } func ValidValueChar(b byte) bool { switch b { case FrontBoundaryChar, UnitSeparator, RearBoundaryChar, StartChar, EndChar: return false default: return true } } func ValidUnitChar(b byte) bool { return ValidAddressChar(b) } // AcknowledgeModeFromByte returns the acknowledge mode from the given byte value. func AcknowledgeModeFromByte(a byte) AcknowledgeMode { switch a { case 0, 1, 2: return AcknowledgeMode(a) case 3, 4, 5: return AckModeReserved } switch { case 6 <= a && a <= 9: case 'A' <= a && a <= 'Z': return AckModeManufacture } return AckModeIllegalMode } var ( ErrCRFound = errors.New("End CR found") ErrNotImplementedYet = errors.New("not implemented yet") ErrFormatError = errors.New("format error") ErrFormatNoChars = errors.New("no chars found") ErrEmptyDataLine = errors.New("empty data line found") ErrUnexpectedEOF = errors.New("unexpected end of file") ErrNoBlockEndChar = errors.New("no block end char found") ErrNoStartChar = errors.New("no StartChar found") ErrAddressTooLong = errors.New("field too long") ErrValueTooLong = errors.New("field too long") ErrUnitTooLong = errors.New("field too long") ErrIdentificationTooLong = errors.New("identification field too long") ) func ParseDataMessage(r *bufio.Reader) (*DataMessage, error) { var b byte var err error var res *[]DataSet var bcc = Bcc(0) if verbose { log.Println("Starting ParseDataMessage") } // Consume all bytes till a start of message is found. for { b, err = r.ReadByte() if err != nil { return nil, ErrUnexpectedEOF } if b == StxChar { break } } if verbose { log.Println("Found StxChar") } // Get the datasets. res, err = ParseDataBlock(r, &bcc) if err != nil { return nil, err } _, err = ParseDataMessageEnd(r, &bcc) if err != nil { return nil, err } return &DataMessage{ DataSets: res, bcc: bcc, }, nil } // ParseDataMessageEnd parses the end of a datamessage. // ! CR LF ETX BCC func ParseDataMessageEnd(r *bufio.Reader, bcc *Bcc) (*DataMessage, error) { var b byte var err error if verbose { log.Println("Starting ParseDataMessageEnd") } b, err = r.ReadByte() if err != nil { return nil, err } if b != EndChar { if verbose { log.Printf("ParseDataMessageEnd, error parsing EndChar, found %d", b) } return nil, ErrFormatError } bcc.Digest(b) b, err = r.ReadByte() if err != nil { return nil, err } if b != CR { if verbose { log.Println("ParseDataMessageEnd, error parsing CR") } return nil, ErrFormatError } bcc.Digest(b) b, err = r.ReadByte() if err != nil
if b != LF { if verbose { log.Println("ParseDataMessageEnd, error parsing LF") } return nil, ErrFormatError } bcc.Digest(b) b, err = r.ReadByte() if err != nil { return nil, err } if b != EtxChar { if verbose { log.Println("ParseDataMessageEnd, error parsing EtxChar") } return nil, ErrFormatError } bcc.Digest(b) b, err = r.ReadByte() if err != nil { if verbose { log.Println("ParseDataMessageEnd, error parsing Bcc") } return nil, err } return &DataMessage{ bcc: *bcc, }, nil } // ParseDataBlock parses til no valid data lines can be parsed. func ParseDataBlock(r *bufio.Reader, bcc *Bcc) (*[]DataSet, error) { var err error var res []DataSet if verbose { log.Println("Starting ParseDataBlock") } for { var ds []DataSet ds, err = ParseDataLine(r, bcc) if err != nil { if len(res) <= 0 { return nil, ErrEmptyDataLine } return &res, nil } res = append(res, ds...) } } // ParseDataMessage reads bytes from r till a new complete datamessage has been read or an error occured. // func ParseDataMessage(r io.Reader) (*DataMessage, error) { // return nil, ErrNotImplementedYet // } // ParseDataLine parses a DataSets till a CR LF has been detected. // Data lines consist of one or more datasets. func ParseDataLine(r *bufio.Reader, bcc *Bcc) ([]DataSet, error) { var b byte var err error var ds *DataSet var res []DataSet if verbose { log.Println("Starting ParseDataLine") } for { ds, err = ParseDataSet(r, bcc) if err != nil { r.UnreadByte() return nil, ErrFormatError } res = append(res, *ds) // Test if the next two chars are CR LF b, err = r.ReadByte() if err == nil && b == CR { bcc.Digest(b) b, err = r.ReadByte() if err == nil && b == LF { bcc.Digest(b) return res, nil } // Error, CR not followed by LF return nil, ErrFormatError } r.UnreadByte() } } // ParseDataSet reads bytes from r till a new complete dataset has been read or an error occured. // A data message contains a list of data sets. Each data set consists of 3 fields "address", "value", and "unit". // Each of these fields is optional an may thus be equal to the empty string. // Data set ::= Address '(' Value(optional) ('*' unit)(optional) ')' // Ignores CR and LF and reads up to the first ! func ParseDataSet(r *bufio.Reader, bcc *Bcc) (*DataSet, error) { // read chars til Front boundary. var b byte var err error var va [100]byte var v = va[:0] res := &DataSet{} // Read the address till FrontBoundaryChar == ( if verbose { log.Println("Starting ParseDataSet") } if verbose { log.Println("Scanning for Address") } ScanAddress: for { b, err = r.ReadByte() if err != nil { return nil, ErrFormatNoChars } switch b { case CR, LF: r.UnreadByte() return nil, ErrCRFound case FrontBoundaryChar: bcc.Digest(b) break ScanAddress default: bcc.Digest(b) if !ValidAddressChar(b) { return nil, ErrFormatError } v = append(v, b) if len(v) > 16 { return nil, ErrAddressTooLong } } } // Address read. res.Address = string(v) v = v[:0] // Scan for value till * or ) if verbose { log.Println("Scanning for Value") } ScanValue: for { b, err = r.ReadByte() if err != nil { return nil, ErrFormatError } bcc.Digest(b) switch b { case RearBoundaryChar, UnitSeparator: break ScanValue default: if !ValidValueChar(b) { return nil, ErrFormatError } v = append(v, b) if len(v) > 32 { return nil, ErrValueTooLong } } } res.Value = string(v) if b == RearBoundaryChar { res.Unit = "" return res, nil } v = v[:0] if verbose { log.Println("Scanning for Unit") } ScanUnit: for { b, err = r.ReadByte() if err != nil { return nil, ErrFormatError } bcc.Digest(b) switch b { case RearBoundaryChar: break ScanUnit default: if !ValidValueChar(b) { return nil, ErrFormatError } v = append(v, b) if len(v) > 16 { return nil, ErrUnitTooLong } } } res.Unit = string(v) return res, nil } func ParseIdentificationMessage(r *bufio.Reader) (*IdentifcationMessage, error) { var b byte var err error var res = &IdentifcationMessage{} // StartChar b, err = r.ReadByte() if err != nil { return nil, ErrFormatError } if b != StartChar { return nil, ErrFormatError } // Manufacturer ID var id [3]byte // mID for i := 0; i < len(id); i++ { b, err = r.ReadByte() if err != nil { return nil, ErrFormatError } id[i] = b } res.ManID = string(id[:]) // baudrate mode b, err = r.ReadByte() if err != nil { return nil, ErrFormatError } if Baudrate(BaudrateIdentification(b)) == 0 { return nil, ErrFormatError } res.BaudID = b var vt [33]byte var v = vt[:0] // \W or not b, err = r.ReadByte() if err != nil { return nil, ErrFormatError } if b == SeqDelChar { // Read a W b, err = r.ReadByte() if err != nil || b != byte('W') { return nil, ErrFormatError } } else { v = append(v, b) } ScanIdenfication: for { b, err = r.ReadByte() if err != nil { return nil, ErrFormatError } switch { case b == CR: break ScanIdenfication default: v = append(v, b) if len(v) > 16 { return nil, ErrIdentificationTooLong } } } res.Identification = string(v) // Test if the last char is LF b, err = r.ReadByte() if err != nil || b != LF { return nil, ErrFormatError } return res, nil }
{ return nil, err }
conditional_block
messages.go
package telegram import ( "bufio" "bytes" "errors" "fmt" "io" "log" ) // SerializeRequestMessage serializes the request message to w. func SerializeRequestMessage(w io.Writer, rm RequestMessage) (int, error) { msg := string(StartChar) + string(RequestCommandChar) + rm.deviceAddress + string(EndChar) + string(CR) + string(LF) return w.Write([]byte(msg)) } // DataMessage type captures the data message. type DataMessage struct { DataSets *[]DataSet bcc Bcc } func (d *DataMessage) String() string { b := &bytes.Buffer{} fmt.Fprintf(b, "bcc: %d %+v", d.bcc, *d.DataSets) return b.String() } // DataSet type. type DataSet struct { Address string Value string Unit string } // Bcc type captures the checksum. type Bcc byte // Digest processes the next byte for the checksum. func (bcc *Bcc) Digest(b ...byte) { for _, i := range b { *bcc = (*bcc) ^ Bcc(i) } } // ProtocolControlCharacter type. type ProtocolControlCharacter byte const ( // ProtControlNormal value. ProtControlNormal = ProtocolControlCharacter(byte('0')) // ProtControlSecondary value. ProtControlSecondary = ProtocolControlCharacter(byte('1')) ) // AcknowledgeMode type. type AcknowledgeMode byte // BaudrateIdentification type. type BaudrateIdentification byte // Baudrate returns the numeric baudrate from the code in the ID message. func Baudrate(br BaudrateIdentification) int
const ( AckModeDataReadOut = AcknowledgeMode(byte('0')) AckModeProgramming = AcknowledgeMode(byte('1')) AckModeBinary = AcknowledgeMode(byte('2')) AckModeReserved = AcknowledgeMode(byte('3')) AckModeManufacture = AcknowledgeMode(byte('6')) AckModeIllegalMode = AcknowledgeMode(byte(' ')) ) const ( CR = byte(0x0D) LF = byte(0x0A) FrontBoundaryChar = byte('(') RearBoundaryChar = byte(')') UnitSeparator = byte('*') StartChar = byte('/') RequestCommandChar = byte('?') EndChar = byte('!') StxChar = byte(0x02) EtxChar = byte(0x03) SeqDelChar = byte('\\') ) // ValidTestDataMessage can be used for testing. const ValidTestDataMessage = string(StxChar) + `1.1.1.1(12*kWh)` + `1.1.1.2(12*kWh)` + "\r\n" + string(EndChar) + string(CR) + string(LF) + string(EtxChar) + string(Bcc(0)) func ValidAddressChar(b byte) bool { switch b { case FrontBoundaryChar, RearBoundaryChar, StartChar, EndChar: return false default: return true } } func ValidValueChar(b byte) bool { switch b { case FrontBoundaryChar, UnitSeparator, RearBoundaryChar, StartChar, EndChar: return false default: return true } } func ValidUnitChar(b byte) bool { return ValidAddressChar(b) } // AcknowledgeModeFromByte returns the acknowledge mode from the given byte value. func AcknowledgeModeFromByte(a byte) AcknowledgeMode { switch a { case 0, 1, 2: return AcknowledgeMode(a) case 3, 4, 5: return AckModeReserved } switch { case 6 <= a && a <= 9: case 'A' <= a && a <= 'Z': return AckModeManufacture } return AckModeIllegalMode } var ( ErrCRFound = errors.New("End CR found") ErrNotImplementedYet = errors.New("not implemented yet") ErrFormatError = errors.New("format error") ErrFormatNoChars = errors.New("no chars found") ErrEmptyDataLine = errors.New("empty data line found") ErrUnexpectedEOF = errors.New("unexpected end of file") ErrNoBlockEndChar = errors.New("no block end char found") ErrNoStartChar = errors.New("no StartChar found") ErrAddressTooLong = errors.New("field too long") ErrValueTooLong = errors.New("field too long") ErrUnitTooLong = errors.New("field too long") ErrIdentificationTooLong = errors.New("identification field too long") ) func ParseDataMessage(r *bufio.Reader) (*DataMessage, error) { var b byte var err error var res *[]DataSet var bcc = Bcc(0) if verbose { log.Println("Starting ParseDataMessage") } // Consume all bytes till a start of message is found. for { b, err = r.ReadByte() if err != nil { return nil, ErrUnexpectedEOF } if b == StxChar { break } } if verbose { log.Println("Found StxChar") } // Get the datasets. res, err = ParseDataBlock(r, &bcc) if err != nil { return nil, err } _, err = ParseDataMessageEnd(r, &bcc) if err != nil { return nil, err } return &DataMessage{ DataSets: res, bcc: bcc, }, nil } // ParseDataMessageEnd parses the end of a datamessage. // ! CR LF ETX BCC func ParseDataMessageEnd(r *bufio.Reader, bcc *Bcc) (*DataMessage, error) { var b byte var err error if verbose { log.Println("Starting ParseDataMessageEnd") } b, err = r.ReadByte() if err != nil { return nil, err } if b != EndChar { if verbose { log.Printf("ParseDataMessageEnd, error parsing EndChar, found %d", b) } return nil, ErrFormatError } bcc.Digest(b) b, err = r.ReadByte() if err != nil { return nil, err } if b != CR { if verbose { log.Println("ParseDataMessageEnd, error parsing CR") } return nil, ErrFormatError } bcc.Digest(b) b, err = r.ReadByte() if err != nil { return nil, err } if b != LF { if verbose { log.Println("ParseDataMessageEnd, error parsing LF") } return nil, ErrFormatError } bcc.Digest(b) b, err = r.ReadByte() if err != nil { return nil, err } if b != EtxChar { if verbose { log.Println("ParseDataMessageEnd, error parsing EtxChar") } return nil, ErrFormatError } bcc.Digest(b) b, err = r.ReadByte() if err != nil { if verbose { log.Println("ParseDataMessageEnd, error parsing Bcc") } return nil, err } return &DataMessage{ bcc: *bcc, }, nil } // ParseDataBlock parses til no valid data lines can be parsed. func ParseDataBlock(r *bufio.Reader, bcc *Bcc) (*[]DataSet, error) { var err error var res []DataSet if verbose { log.Println("Starting ParseDataBlock") } for { var ds []DataSet ds, err = ParseDataLine(r, bcc) if err != nil { if len(res) <= 0 { return nil, ErrEmptyDataLine } return &res, nil } res = append(res, ds...) } } // ParseDataMessage reads bytes from r till a new complete datamessage has been read or an error occured. // func ParseDataMessage(r io.Reader) (*DataMessage, error) { // return nil, ErrNotImplementedYet // } // ParseDataLine parses a DataSets till a CR LF has been detected. // Data lines consist of one or more datasets. func ParseDataLine(r *bufio.Reader, bcc *Bcc) ([]DataSet, error) { var b byte var err error var ds *DataSet var res []DataSet if verbose { log.Println("Starting ParseDataLine") } for { ds, err = ParseDataSet(r, bcc) if err != nil { r.UnreadByte() return nil, ErrFormatError } res = append(res, *ds) // Test if the next two chars are CR LF b, err = r.ReadByte() if err == nil && b == CR { bcc.Digest(b) b, err = r.ReadByte() if err == nil && b == LF { bcc.Digest(b) return res, nil } // Error, CR not followed by LF return nil, ErrFormatError } r.UnreadByte() } } // ParseDataSet reads bytes from r till a new complete dataset has been read or an error occured. // A data message contains a list of data sets. Each data set consists of 3 fields "address", "value", and "unit". // Each of these fields is optional an may thus be equal to the empty string. // Data set ::= Address '(' Value(optional) ('*' unit)(optional) ')' // Ignores CR and LF and reads up to the first ! func ParseDataSet(r *bufio.Reader, bcc *Bcc) (*DataSet, error) { // read chars til Front boundary. var b byte var err error var va [100]byte var v = va[:0] res := &DataSet{} // Read the address till FrontBoundaryChar == ( if verbose { log.Println("Starting ParseDataSet") } if verbose { log.Println("Scanning for Address") } ScanAddress: for { b, err = r.ReadByte() if err != nil { return nil, ErrFormatNoChars } switch b { case CR, LF: r.UnreadByte() return nil, ErrCRFound case FrontBoundaryChar: bcc.Digest(b) break ScanAddress default: bcc.Digest(b) if !ValidAddressChar(b) { return nil, ErrFormatError } v = append(v, b) if len(v) > 16 { return nil, ErrAddressTooLong } } } // Address read. res.Address = string(v) v = v[:0] // Scan for value till * or ) if verbose { log.Println("Scanning for Value") } ScanValue: for { b, err = r.ReadByte() if err != nil { return nil, ErrFormatError } bcc.Digest(b) switch b { case RearBoundaryChar, UnitSeparator: break ScanValue default: if !ValidValueChar(b) { return nil, ErrFormatError } v = append(v, b) if len(v) > 32 { return nil, ErrValueTooLong } } } res.Value = string(v) if b == RearBoundaryChar { res.Unit = "" return res, nil } v = v[:0] if verbose { log.Println("Scanning for Unit") } ScanUnit: for { b, err = r.ReadByte() if err != nil { return nil, ErrFormatError } bcc.Digest(b) switch b { case RearBoundaryChar: break ScanUnit default: if !ValidValueChar(b) { return nil, ErrFormatError } v = append(v, b) if len(v) > 16 { return nil, ErrUnitTooLong } } } res.Unit = string(v) return res, nil } func ParseIdentificationMessage(r *bufio.Reader) (*IdentifcationMessage, error) { var b byte var err error var res = &IdentifcationMessage{} // StartChar b, err = r.ReadByte() if err != nil { return nil, ErrFormatError } if b != StartChar { return nil, ErrFormatError } // Manufacturer ID var id [3]byte // mID for i := 0; i < len(id); i++ { b, err = r.ReadByte() if err != nil { return nil, ErrFormatError } id[i] = b } res.ManID = string(id[:]) // baudrate mode b, err = r.ReadByte() if err != nil { return nil, ErrFormatError } if Baudrate(BaudrateIdentification(b)) == 0 { return nil, ErrFormatError } res.BaudID = b var vt [33]byte var v = vt[:0] // \W or not b, err = r.ReadByte() if err != nil { return nil, ErrFormatError } if b == SeqDelChar { // Read a W b, err = r.ReadByte() if err != nil || b != byte('W') { return nil, ErrFormatError } } else { v = append(v, b) } ScanIdenfication: for { b, err = r.ReadByte() if err != nil { return nil, ErrFormatError } switch { case b == CR: break ScanIdenfication default: v = append(v, b) if len(v) > 16 { return nil, ErrIdentificationTooLong } } } res.Identification = string(v) // Test if the last char is LF b, err = r.ReadByte() if err != nil || b != LF { return nil, ErrFormatError } return res, nil }
{ switch rune(br) { case '0': return 300 case 'A', '1': return 600 case 'B', '2': return 1200 case 'C', '3': return 2400 case 'D', '4': return 4800 case 'E', '5': return 9600 case 'F', '6': return 19200 } return 0 }
identifier_body
messages.go
package telegram import ( "bufio" "bytes" "errors" "fmt" "io" "log" ) // SerializeRequestMessage serializes the request message to w. func SerializeRequestMessage(w io.Writer, rm RequestMessage) (int, error) { msg := string(StartChar) + string(RequestCommandChar) + rm.deviceAddress + string(EndChar) + string(CR) + string(LF) return w.Write([]byte(msg)) } // DataMessage type captures the data message. type DataMessage struct { DataSets *[]DataSet bcc Bcc } func (d *DataMessage) String() string { b := &bytes.Buffer{} fmt.Fprintf(b, "bcc: %d %+v", d.bcc, *d.DataSets) return b.String() } // DataSet type. type DataSet struct { Address string Value string Unit string } // Bcc type captures the checksum. type Bcc byte // Digest processes the next byte for the checksum. func (bcc *Bcc) Digest(b ...byte) { for _, i := range b { *bcc = (*bcc) ^ Bcc(i) } } // ProtocolControlCharacter type. type ProtocolControlCharacter byte const ( // ProtControlNormal value. ProtControlNormal = ProtocolControlCharacter(byte('0')) // ProtControlSecondary value. ProtControlSecondary = ProtocolControlCharacter(byte('1')) ) // AcknowledgeMode type. type AcknowledgeMode byte // BaudrateIdentification type. type BaudrateIdentification byte // Baudrate returns the numeric baudrate from the code in the ID message. func Baudrate(br BaudrateIdentification) int { switch rune(br) { case '0': return 300 case 'A', '1': return 600 case 'B', '2': return 1200 case 'C', '3': return 2400 case 'D', '4': return 4800 case 'E', '5': return 9600 case 'F', '6': return 19200 } return 0 } const ( AckModeDataReadOut = AcknowledgeMode(byte('0')) AckModeProgramming = AcknowledgeMode(byte('1')) AckModeBinary = AcknowledgeMode(byte('2')) AckModeReserved = AcknowledgeMode(byte('3')) AckModeManufacture = AcknowledgeMode(byte('6')) AckModeIllegalMode = AcknowledgeMode(byte(' ')) ) const ( CR = byte(0x0D) LF = byte(0x0A) FrontBoundaryChar = byte('(') RearBoundaryChar = byte(')') UnitSeparator = byte('*') StartChar = byte('/') RequestCommandChar = byte('?') EndChar = byte('!') StxChar = byte(0x02) EtxChar = byte(0x03) SeqDelChar = byte('\\') ) // ValidTestDataMessage can be used for testing. const ValidTestDataMessage = string(StxChar) + `1.1.1.1(12*kWh)` + `1.1.1.2(12*kWh)` + "\r\n" + string(EndChar) + string(CR) + string(LF) + string(EtxChar) + string(Bcc(0)) func ValidAddressChar(b byte) bool { switch b { case FrontBoundaryChar, RearBoundaryChar, StartChar, EndChar: return false default: return true } } func
(b byte) bool { switch b { case FrontBoundaryChar, UnitSeparator, RearBoundaryChar, StartChar, EndChar: return false default: return true } } func ValidUnitChar(b byte) bool { return ValidAddressChar(b) } // AcknowledgeModeFromByte returns the acknowledge mode from the given byte value. func AcknowledgeModeFromByte(a byte) AcknowledgeMode { switch a { case 0, 1, 2: return AcknowledgeMode(a) case 3, 4, 5: return AckModeReserved } switch { case 6 <= a && a <= 9: case 'A' <= a && a <= 'Z': return AckModeManufacture } return AckModeIllegalMode } var ( ErrCRFound = errors.New("End CR found") ErrNotImplementedYet = errors.New("not implemented yet") ErrFormatError = errors.New("format error") ErrFormatNoChars = errors.New("no chars found") ErrEmptyDataLine = errors.New("empty data line found") ErrUnexpectedEOF = errors.New("unexpected end of file") ErrNoBlockEndChar = errors.New("no block end char found") ErrNoStartChar = errors.New("no StartChar found") ErrAddressTooLong = errors.New("field too long") ErrValueTooLong = errors.New("field too long") ErrUnitTooLong = errors.New("field too long") ErrIdentificationTooLong = errors.New("identification field too long") ) func ParseDataMessage(r *bufio.Reader) (*DataMessage, error) { var b byte var err error var res *[]DataSet var bcc = Bcc(0) if verbose { log.Println("Starting ParseDataMessage") } // Consume all bytes till a start of message is found. for { b, err = r.ReadByte() if err != nil { return nil, ErrUnexpectedEOF } if b == StxChar { break } } if verbose { log.Println("Found StxChar") } // Get the datasets. res, err = ParseDataBlock(r, &bcc) if err != nil { return nil, err } _, err = ParseDataMessageEnd(r, &bcc) if err != nil { return nil, err } return &DataMessage{ DataSets: res, bcc: bcc, }, nil } // ParseDataMessageEnd parses the end of a datamessage. // ! CR LF ETX BCC func ParseDataMessageEnd(r *bufio.Reader, bcc *Bcc) (*DataMessage, error) { var b byte var err error if verbose { log.Println("Starting ParseDataMessageEnd") } b, err = r.ReadByte() if err != nil { return nil, err } if b != EndChar { if verbose { log.Printf("ParseDataMessageEnd, error parsing EndChar, found %d", b) } return nil, ErrFormatError } bcc.Digest(b) b, err = r.ReadByte() if err != nil { return nil, err } if b != CR { if verbose { log.Println("ParseDataMessageEnd, error parsing CR") } return nil, ErrFormatError } bcc.Digest(b) b, err = r.ReadByte() if err != nil { return nil, err } if b != LF { if verbose { log.Println("ParseDataMessageEnd, error parsing LF") } return nil, ErrFormatError } bcc.Digest(b) b, err = r.ReadByte() if err != nil { return nil, err } if b != EtxChar { if verbose { log.Println("ParseDataMessageEnd, error parsing EtxChar") } return nil, ErrFormatError } bcc.Digest(b) b, err = r.ReadByte() if err != nil { if verbose { log.Println("ParseDataMessageEnd, error parsing Bcc") } return nil, err } return &DataMessage{ bcc: *bcc, }, nil } // ParseDataBlock parses til no valid data lines can be parsed. func ParseDataBlock(r *bufio.Reader, bcc *Bcc) (*[]DataSet, error) { var err error var res []DataSet if verbose { log.Println("Starting ParseDataBlock") } for { var ds []DataSet ds, err = ParseDataLine(r, bcc) if err != nil { if len(res) <= 0 { return nil, ErrEmptyDataLine } return &res, nil } res = append(res, ds...) } } // ParseDataMessage reads bytes from r till a new complete datamessage has been read or an error occured. // func ParseDataMessage(r io.Reader) (*DataMessage, error) { // return nil, ErrNotImplementedYet // } // ParseDataLine parses a DataSets till a CR LF has been detected. // Data lines consist of one or more datasets. func ParseDataLine(r *bufio.Reader, bcc *Bcc) ([]DataSet, error) { var b byte var err error var ds *DataSet var res []DataSet if verbose { log.Println("Starting ParseDataLine") } for { ds, err = ParseDataSet(r, bcc) if err != nil { r.UnreadByte() return nil, ErrFormatError } res = append(res, *ds) // Test if the next two chars are CR LF b, err = r.ReadByte() if err == nil && b == CR { bcc.Digest(b) b, err = r.ReadByte() if err == nil && b == LF { bcc.Digest(b) return res, nil } // Error, CR not followed by LF return nil, ErrFormatError } r.UnreadByte() } } // ParseDataSet reads bytes from r till a new complete dataset has been read or an error occured. // A data message contains a list of data sets. Each data set consists of 3 fields "address", "value", and "unit". // Each of these fields is optional an may thus be equal to the empty string. // Data set ::= Address '(' Value(optional) ('*' unit)(optional) ')' // Ignores CR and LF and reads up to the first ! func ParseDataSet(r *bufio.Reader, bcc *Bcc) (*DataSet, error) { // read chars til Front boundary. var b byte var err error var va [100]byte var v = va[:0] res := &DataSet{} // Read the address till FrontBoundaryChar == ( if verbose { log.Println("Starting ParseDataSet") } if verbose { log.Println("Scanning for Address") } ScanAddress: for { b, err = r.ReadByte() if err != nil { return nil, ErrFormatNoChars } switch b { case CR, LF: r.UnreadByte() return nil, ErrCRFound case FrontBoundaryChar: bcc.Digest(b) break ScanAddress default: bcc.Digest(b) if !ValidAddressChar(b) { return nil, ErrFormatError } v = append(v, b) if len(v) > 16 { return nil, ErrAddressTooLong } } } // Address read. res.Address = string(v) v = v[:0] // Scan for value till * or ) if verbose { log.Println("Scanning for Value") } ScanValue: for { b, err = r.ReadByte() if err != nil { return nil, ErrFormatError } bcc.Digest(b) switch b { case RearBoundaryChar, UnitSeparator: break ScanValue default: if !ValidValueChar(b) { return nil, ErrFormatError } v = append(v, b) if len(v) > 32 { return nil, ErrValueTooLong } } } res.Value = string(v) if b == RearBoundaryChar { res.Unit = "" return res, nil } v = v[:0] if verbose { log.Println("Scanning for Unit") } ScanUnit: for { b, err = r.ReadByte() if err != nil { return nil, ErrFormatError } bcc.Digest(b) switch b { case RearBoundaryChar: break ScanUnit default: if !ValidValueChar(b) { return nil, ErrFormatError } v = append(v, b) if len(v) > 16 { return nil, ErrUnitTooLong } } } res.Unit = string(v) return res, nil } func ParseIdentificationMessage(r *bufio.Reader) (*IdentifcationMessage, error) { var b byte var err error var res = &IdentifcationMessage{} // StartChar b, err = r.ReadByte() if err != nil { return nil, ErrFormatError } if b != StartChar { return nil, ErrFormatError } // Manufacturer ID var id [3]byte // mID for i := 0; i < len(id); i++ { b, err = r.ReadByte() if err != nil { return nil, ErrFormatError } id[i] = b } res.ManID = string(id[:]) // baudrate mode b, err = r.ReadByte() if err != nil { return nil, ErrFormatError } if Baudrate(BaudrateIdentification(b)) == 0 { return nil, ErrFormatError } res.BaudID = b var vt [33]byte var v = vt[:0] // \W or not b, err = r.ReadByte() if err != nil { return nil, ErrFormatError } if b == SeqDelChar { // Read a W b, err = r.ReadByte() if err != nil || b != byte('W') { return nil, ErrFormatError } } else { v = append(v, b) } ScanIdenfication: for { b, err = r.ReadByte() if err != nil { return nil, ErrFormatError } switch { case b == CR: break ScanIdenfication default: v = append(v, b) if len(v) > 16 { return nil, ErrIdentificationTooLong } } } res.Identification = string(v) // Test if the last char is LF b, err = r.ReadByte() if err != nil || b != LF { return nil, ErrFormatError } return res, nil }
ValidValueChar
identifier_name
media_sessions.rs
// Copyright 2019 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use { anyhow::{format_err, Error}, fidl::encoding::Decodable as FidlDecodable, fidl::endpoints::{create_proxy, create_request_stream}, fidl_fuchsia_bluetooth_avrcp::{self as fidl_avrcp}, fidl_fuchsia_media_sessions2::{ DiscoveryMarker, DiscoveryProxy, SessionControlProxy, SessionInfoDelta, SessionsWatcherRequest, SessionsWatcherRequestStream, WatchOptions, }, fuchsia_component::client::connect_to_service, fuchsia_syslog::{fx_log_warn, fx_vlog}, futures::{Future, TryStreamExt}, parking_lot::RwLock, std::collections::HashMap, std::sync::Arc, }; use crate::media::media_state::MediaState; use crate::media::media_types::Notification; use crate::types::{ bounded_queue::BoundedQueue, NotificationData, MAX_NOTIFICATION_EVENT_QUEUE_SIZE, }; #[derive(Debug, Clone)] pub(crate) struct MediaSessions { inner: Arc<RwLock<MediaSessionsInner>>, } impl MediaSessions { pub fn create() -> Self { Self { inner: Arc::new(RwLock::new(MediaSessionsInner::new())) } } // Returns a future that watches MediaPlayer for updates. pub fn watch(&self) -> impl Future<Output = Result<(), anyhow::Error>> { // MediaSession Service Setup // Set up the MediaSession Discovery service. Connect to the session watcher. let discovery = connect_to_service::<DiscoveryMarker>() .expect("Couldn't connect to discovery service."); let (watcher_client, watcher_requests) = create_request_stream().expect("Error creating watcher request stream"); // Only subscribe to updates from players that are active. let watch_active_options = WatchOptions { only_active: Some(true), ..WatchOptions::new_empty() }; discovery .watch_sessions(watch_active_options, watcher_client) .expect("Should watch media sessions"); // End MediaSession Service Setup let inner = self.inner.clone(); Self::watch_media_sessions(discovery, watcher_requests, inner) } pub fn get_active_session(&self) -> Result<MediaState, Error> { let r_inner = self.inner.read().get_active_session(); r_inner.ok_or(format_err!("No active player")) } pub fn get_supported_notification_events(&self) -> Vec<fidl_avrcp::NotificationEvent> { self.inner.read().get_supported_notification_events() } pub fn
( &self, event_id: fidl_avrcp::NotificationEvent, current: Notification, pos_change_interval: u32, responder: fidl_avrcp::TargetHandlerWatchNotificationResponder, ) -> Result<(), fidl::Error> { let mut write = self.inner.write(); write.register_notification(event_id, current, pos_change_interval, responder) } async fn watch_media_sessions( discovery: DiscoveryProxy, mut watcher_requests: SessionsWatcherRequestStream, sessions_inner: Arc<RwLock<MediaSessionsInner>>, ) -> Result<(), anyhow::Error> { while let Some(req) = watcher_requests.try_next().await.expect("Failed to serve Watcher service") { match req { SessionsWatcherRequest::SessionUpdated { session_id: id, session_info_delta: delta, responder, } => { responder.send()?; fx_vlog!(tag: "avrcp-tg", 1, "MediaSession update: id[{}], delta[{:?}]", id, delta); // Since we are only listening to active sessions, update the currently // active media session id every time a watcher event is triggered. // This means AVRCP commands will be queried/set to the player that has most // recently changed in status. sessions_inner.write().update_active_session_id(Some(id.clone())); // If this is our first time receiving updates from this MediaPlayer, create // a session control proxy and connect to the session. sessions_inner.write().create_or_update_session( discovery.clone(), id.clone(), delta, &create_session_control_proxy, )?; fx_vlog!(tag: "avrcp-tg", 1, "MediaSession state after update: state[{:?}]", sessions_inner); } SessionsWatcherRequest::SessionRemoved { session_id, responder } => { // A media session with id `session_id` has been removed. responder.send()?; // Clear any outstanding notifications with a player changed response. // Clear the currently active session, if it equals `session_id`. // Clear entry in state map. sessions_inner.write().clear_session(&session_id); fx_vlog!(tag: "avrcp-tg", 1, "Removed session [{:?}] from state map: {:?}", session_id, sessions_inner); } } } Ok(()) } } #[derive(Debug)] pub(crate) struct MediaSessionsInner { // The currently active MediaSession id. // If present, the `active_session_id` should be present in `map`. active_session_id: Option<u64>, // The map of ids to the respective media session. map: HashMap<u64, MediaState>, // The map of outstanding notifications. notifications: HashMap<fidl_avrcp::NotificationEvent, BoundedQueue<NotificationData>>, } impl MediaSessionsInner { pub fn new() -> Self { Self { active_session_id: None, map: HashMap::new(), notifications: HashMap::new() } } pub fn get_active_session(&self) -> Option<MediaState> { self.active_session_id.as_ref().and_then(|id| self.map.get(id).cloned()) } /// TODO(41703): Add TRACK_POS_CHANGED when implemented. pub fn get_supported_notification_events(&self) -> Vec<fidl_avrcp::NotificationEvent> { vec![ fidl_avrcp::NotificationEvent::PlayerApplicationSettingChanged, fidl_avrcp::NotificationEvent::PlaybackStatusChanged, fidl_avrcp::NotificationEvent::TrackChanged, ] } /// Removes the MediaState specified by `id` from the map, should it exist. /// If the session was currently active, clears `self.active_session_id`. /// Returns the removed MediaState. pub fn clear_session(&mut self, id: &u64) -> Option<MediaState> { if Some(id) == self.active_session_id.as_ref() { self.update_active_session_id(None); } self.map.remove(id) } /// Clears all outstanding notifications with an AddressedPlayerChanged error. /// See `crate::types::update_responder` for more details. pub fn clear_notification_responders(&mut self) { for notif_data in self.notifications.drain().map(|(_, q)| q.into_iter()).flatten() { if let Err(e) = notif_data.update_responder( &fidl_avrcp::NotificationEvent::TrackChanged, // Irrelevant Event ID. Err(fidl_avrcp::TargetAvcError::RejectedAddressedPlayerChanged), ) { fx_log_warn!("There was an error clearing the responder: {:?}", e); } } fx_vlog!(tag: "avrcp-tg", 1, "After evicting cleared responders: {:?}", self.notifications); } /// Updates the active session with the new session specified by `id`. /// Clear all outstanding notifications, if the active session has changed. /// If the updated active session_id has changed, return old active id. pub fn update_active_session_id(&mut self, id: Option<u64>) -> Option<u64> { if self.active_session_id != id { self.clear_notification_responders(); let previous_active_session_id = self.active_session_id.take(); self.active_session_id = id; previous_active_session_id } else { None } } /// If an active session is present, update any outstanding notifications by /// checking if notification values have changed. /// TODO(41703): Take pos_change_interval into account when updating TRACK_POS_CHANGED. pub fn update_notification_responders(&mut self) { let state = if let Some(state) = self.get_active_session() { state.clone() } else { return; }; self.notifications = self .notifications .drain() .map(|(event_id, queue)| { let curr_value = state.session_info().get_notification_value(&event_id); ( event_id, queue .into_iter() .filter_map(|notif_data| { notif_data .update_responder(&event_id, curr_value.clone()) .unwrap_or(None) }) .collect(), ) }) .collect(); fx_vlog!(tag: "avrcp-tg", 1, "After evicting updated responders: {:?}", self.notifications); } /// If the entry, `id` doesn't exist in the map, create a `MediaState` entry /// when the control proxy. /// Update the state with the delta. /// Update any outstanding notification responders with the change in state. pub fn create_or_update_session<F>( &mut self, discovery: DiscoveryProxy, id: u64, delta: SessionInfoDelta, create_fn: F, ) -> Result<(), Error> where F: Fn(DiscoveryProxy, u64) -> Result<SessionControlProxy, Error>, { self.map .entry(id) .or_insert({ let session_proxy = create_fn(discovery, id)?; MediaState::new(session_proxy) }) .update_session_info(delta); self.update_notification_responders(); Ok(()) } /// Given a notification `event_id`: /// 1) insert it into the notifications map. /// 2) If the queue for `event_id` is full, evict the oldest responder and respond /// with the current value. /// 3) Update any outstanding notification responders with any changes in state. pub fn register_notification( &mut self, event_id: fidl_avrcp::NotificationEvent, current: Notification, pos_change_interval: u32, responder: fidl_avrcp::TargetHandlerWatchNotificationResponder, ) -> Result<(), fidl::Error> { // If the `event_id` is not supported, reject the registration. if !self.get_supported_notification_events().contains(&event_id) { return responder.send(&mut Err(fidl_avrcp::TargetAvcError::RejectedInvalidParameter)); } let data = NotificationData::new(current, pos_change_interval, responder); let _evicted = self .notifications .entry(event_id) .or_insert(BoundedQueue::new(MAX_NOTIFICATION_EVENT_QUEUE_SIZE)) .insert(data); // Notify the evicted responder that the TG has removed it from the active list of responders. // Reply with the current value of the notification. // This will happen automatically, when `_evicted` is dropped. // Update outstanding responders with potentially new session data. self.update_notification_responders(); Ok(()) } } /// Creates a session control proxy from the Discovery protocol and connects to /// the session specified by `id`. fn create_session_control_proxy( discovery: DiscoveryProxy, id: u64, ) -> Result<SessionControlProxy, Error> { let (session_proxy, session_request_stream) = create_proxy()?; discovery.connect_to_session(id, session_request_stream)?; Ok(session_proxy) } #[cfg(test)] mod tests { use super::*; use crate::media::media_types::ValidPlayerApplicationSettings; use fidl::encoding::Decodable as FidlDecodable; use fidl::endpoints::create_proxy; use fidl_fuchsia_media::{self as fidl_media_types}; use fidl_fuchsia_media_sessions2::{self as fidl_media, SessionControlMarker}; use fuchsia_async as fasync; fn create_metadata() -> fidl_media_types::Metadata { let mut metadata = fidl_media_types::Metadata::new_empty(); let mut property1 = fidl_media_types::Property::new_empty(); property1.label = fidl_media_types::METADATA_LABEL_TITLE.to_string(); let sample_title = "This is a sample title".to_string(); property1.value = sample_title.clone(); metadata.properties = vec![property1]; metadata } fn create_player_status() -> fidl_media::PlayerStatus { let mut player_status = fidl_media::PlayerStatus::new_empty(); let mut timeline_fn = fidl_media_types::TimelineFunction::new_empty(); // Playback started at beginning of media. timeline_fn.subject_time = 0; // Monotonic clock time at beginning of media (nanos). timeline_fn.reference_time = 500000000; // Playback rate = 1, normal playback. timeline_fn.subject_delta = 1; timeline_fn.reference_delta = 1; player_status.player_state = Some(fidl_media::PlayerState::Playing); player_status.duration = Some(123456789); player_status.shuffle_on = Some(true); player_status.timeline_function = Some(timeline_fn); player_status } #[test] /// Test that retrieving a notification value correctly gets the current state. /// 1) Query with an unsupported `event_id`. /// 2) Query with a supported Event ID, with default state. /// 3) Query with all supported Event IDs. fn test_get_notification_value() { let exec = fasync::Executor::new_with_fake_time().expect("executor should build"); exec.set_fake_time(fasync::Time::from_nanos(555555555)); let media_sessions = MediaSessionsInner::new(); let (session_proxy, _) = create_proxy::<SessionControlMarker>().expect("Couldn't create fidl proxy."); let mut media_state = MediaState::new(session_proxy); // 1. Unsupported ID. let unsupported_id = fidl_avrcp::NotificationEvent::BattStatusChanged; let res = media_state.session_info().get_notification_value(&unsupported_id); assert!(res.is_err()); // 2. Supported ID, `media_state` contains default values. let res = media_state .session_info() .get_notification_value(&fidl_avrcp::NotificationEvent::PlaybackStatusChanged); assert_eq!(res.expect("Should be ok").status, Some(fidl_avrcp::PlaybackStatus::Stopped)); let res = media_state .session_info() .get_notification_value(&fidl_avrcp::NotificationEvent::TrackChanged); assert_eq!(res.expect("Should be ok").track_id, Some(std::u64::MAX)); // 3. exec.set_fake_time(fasync::Time::from_nanos(555555555)); let mut info = fidl_media::SessionInfoDelta::new_empty(); info.metadata = Some(create_metadata()); info.player_status = Some(create_player_status()); media_state.update_session_info(info); let expected_play_status = fidl_avrcp::PlaybackStatus::Playing; let expected_pas = ValidPlayerApplicationSettings::new( None, Some(fidl_avrcp::RepeatStatusMode::Off), Some(fidl_avrcp::ShuffleMode::AllTrackShuffle), None, ); // Supported = PAS, Playback, Track, TrackPos let valid_events = media_sessions.get_supported_notification_events(); let expected_values: Vec<Notification> = vec![ Notification::new(None, None, None, Some(expected_pas), None, None, None), Notification::new(Some(expected_play_status), None, None, None, None, None, None), Notification::new(None, Some(0), None, None, None, None, None), Notification::new(None, None, Some(55), None, None, None, None), ]; for (event_id, expected_v) in valid_events.iter().zip(expected_values.iter()) { assert_eq!( media_state.session_info().get_notification_value(&event_id).expect("Should work"), expected_v.clone() ); } } #[test] // TODO(42623): Implement this test as part of integration test work. /// Tests registering a notification works as expected. /// 1. Normal case, insertion of a supported notification. /// 2. Normal case, insertion of a supported notification, with eviction. /// 3. Normal case, insertion of a supported notification, with change in state, /// so that `update_notification_responders()` correctly updates inserted notif. /// 3. Error case, insertion of an unsupported notification. fn test_register_notification() {} #[test] // TODO(42623): Implement this test as part of integration test work. /// Tests insertion/updating of a new MediaSession into the state map. /// 1. Test branch where MediaSession already exists, so this is just an update. /// 2. Test branch where MediaSession doesn't exist, creates a new session and updates it. fn test_create_or_update_session() {} #[test] // TODO(42623): Implement this test as part of integration test work. /// Tests that updating any outstanding responders behaves as expected. fn test_update_notification_responders() {} #[test] // TODO(42623): Implement this test as part of integration test work. /// Tests updating the active session_id correctly changes the currently /// playing active media session, as well as clears any outstanding notifications /// if a new MediaSession becomes the active session. fn test_update_active_session_id() {} #[test] // TODO(42623): Implement this test as part of integration test work. /// Tests sending PlayerChanged response to all outstanding responders behaves /// as expected, and removes all entries in the Notifications map. fn test_clear_notification_responders() {} #[test] // TODO(42623): Implement this test as part of integration test work. /// Tests removing a session from the map. /// Tests clear_session clears all notifications if the MediaSession is the currently /// active session. fn test_clear_session() {} #[test] // TODO(42623): Implement this test as part of integration test work. /// Tests clearing the active session_id. fn test_clear_active_session_id() {} }
register_notification
identifier_name
media_sessions.rs
// Copyright 2019 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use { anyhow::{format_err, Error}, fidl::encoding::Decodable as FidlDecodable, fidl::endpoints::{create_proxy, create_request_stream}, fidl_fuchsia_bluetooth_avrcp::{self as fidl_avrcp}, fidl_fuchsia_media_sessions2::{ DiscoveryMarker, DiscoveryProxy, SessionControlProxy, SessionInfoDelta, SessionsWatcherRequest, SessionsWatcherRequestStream, WatchOptions, }, fuchsia_component::client::connect_to_service, fuchsia_syslog::{fx_log_warn, fx_vlog}, futures::{Future, TryStreamExt}, parking_lot::RwLock, std::collections::HashMap, std::sync::Arc, }; use crate::media::media_state::MediaState; use crate::media::media_types::Notification; use crate::types::{ bounded_queue::BoundedQueue, NotificationData, MAX_NOTIFICATION_EVENT_QUEUE_SIZE, }; #[derive(Debug, Clone)] pub(crate) struct MediaSessions { inner: Arc<RwLock<MediaSessionsInner>>, } impl MediaSessions { pub fn create() -> Self { Self { inner: Arc::new(RwLock::new(MediaSessionsInner::new())) } } // Returns a future that watches MediaPlayer for updates. pub fn watch(&self) -> impl Future<Output = Result<(), anyhow::Error>> { // MediaSession Service Setup // Set up the MediaSession Discovery service. Connect to the session watcher. let discovery = connect_to_service::<DiscoveryMarker>() .expect("Couldn't connect to discovery service."); let (watcher_client, watcher_requests) = create_request_stream().expect("Error creating watcher request stream"); // Only subscribe to updates from players that are active. let watch_active_options = WatchOptions { only_active: Some(true), ..WatchOptions::new_empty() }; discovery .watch_sessions(watch_active_options, watcher_client) .expect("Should watch media sessions"); // End MediaSession Service Setup let inner = self.inner.clone(); Self::watch_media_sessions(discovery, watcher_requests, inner) } pub fn get_active_session(&self) -> Result<MediaState, Error> { let r_inner = self.inner.read().get_active_session(); r_inner.ok_or(format_err!("No active player")) } pub fn get_supported_notification_events(&self) -> Vec<fidl_avrcp::NotificationEvent> { self.inner.read().get_supported_notification_events() } pub fn register_notification( &self, event_id: fidl_avrcp::NotificationEvent, current: Notification, pos_change_interval: u32, responder: fidl_avrcp::TargetHandlerWatchNotificationResponder, ) -> Result<(), fidl::Error> { let mut write = self.inner.write(); write.register_notification(event_id, current, pos_change_interval, responder)
} async fn watch_media_sessions( discovery: DiscoveryProxy, mut watcher_requests: SessionsWatcherRequestStream, sessions_inner: Arc<RwLock<MediaSessionsInner>>, ) -> Result<(), anyhow::Error> { while let Some(req) = watcher_requests.try_next().await.expect("Failed to serve Watcher service") { match req { SessionsWatcherRequest::SessionUpdated { session_id: id, session_info_delta: delta, responder, } => { responder.send()?; fx_vlog!(tag: "avrcp-tg", 1, "MediaSession update: id[{}], delta[{:?}]", id, delta); // Since we are only listening to active sessions, update the currently // active media session id every time a watcher event is triggered. // This means AVRCP commands will be queried/set to the player that has most // recently changed in status. sessions_inner.write().update_active_session_id(Some(id.clone())); // If this is our first time receiving updates from this MediaPlayer, create // a session control proxy and connect to the session. sessions_inner.write().create_or_update_session( discovery.clone(), id.clone(), delta, &create_session_control_proxy, )?; fx_vlog!(tag: "avrcp-tg", 1, "MediaSession state after update: state[{:?}]", sessions_inner); } SessionsWatcherRequest::SessionRemoved { session_id, responder } => { // A media session with id `session_id` has been removed. responder.send()?; // Clear any outstanding notifications with a player changed response. // Clear the currently active session, if it equals `session_id`. // Clear entry in state map. sessions_inner.write().clear_session(&session_id); fx_vlog!(tag: "avrcp-tg", 1, "Removed session [{:?}] from state map: {:?}", session_id, sessions_inner); } } } Ok(()) } } #[derive(Debug)] pub(crate) struct MediaSessionsInner { // The currently active MediaSession id. // If present, the `active_session_id` should be present in `map`. active_session_id: Option<u64>, // The map of ids to the respective media session. map: HashMap<u64, MediaState>, // The map of outstanding notifications. notifications: HashMap<fidl_avrcp::NotificationEvent, BoundedQueue<NotificationData>>, } impl MediaSessionsInner { pub fn new() -> Self { Self { active_session_id: None, map: HashMap::new(), notifications: HashMap::new() } } pub fn get_active_session(&self) -> Option<MediaState> { self.active_session_id.as_ref().and_then(|id| self.map.get(id).cloned()) } /// TODO(41703): Add TRACK_POS_CHANGED when implemented. pub fn get_supported_notification_events(&self) -> Vec<fidl_avrcp::NotificationEvent> { vec![ fidl_avrcp::NotificationEvent::PlayerApplicationSettingChanged, fidl_avrcp::NotificationEvent::PlaybackStatusChanged, fidl_avrcp::NotificationEvent::TrackChanged, ] } /// Removes the MediaState specified by `id` from the map, should it exist. /// If the session was currently active, clears `self.active_session_id`. /// Returns the removed MediaState. pub fn clear_session(&mut self, id: &u64) -> Option<MediaState> { if Some(id) == self.active_session_id.as_ref() { self.update_active_session_id(None); } self.map.remove(id) } /// Clears all outstanding notifications with an AddressedPlayerChanged error. /// See `crate::types::update_responder` for more details. pub fn clear_notification_responders(&mut self) { for notif_data in self.notifications.drain().map(|(_, q)| q.into_iter()).flatten() { if let Err(e) = notif_data.update_responder( &fidl_avrcp::NotificationEvent::TrackChanged, // Irrelevant Event ID. Err(fidl_avrcp::TargetAvcError::RejectedAddressedPlayerChanged), ) { fx_log_warn!("There was an error clearing the responder: {:?}", e); } } fx_vlog!(tag: "avrcp-tg", 1, "After evicting cleared responders: {:?}", self.notifications); } /// Updates the active session with the new session specified by `id`. /// Clear all outstanding notifications, if the active session has changed. /// If the updated active session_id has changed, return old active id. pub fn update_active_session_id(&mut self, id: Option<u64>) -> Option<u64> { if self.active_session_id != id { self.clear_notification_responders(); let previous_active_session_id = self.active_session_id.take(); self.active_session_id = id; previous_active_session_id } else { None } } /// If an active session is present, update any outstanding notifications by /// checking if notification values have changed. /// TODO(41703): Take pos_change_interval into account when updating TRACK_POS_CHANGED. pub fn update_notification_responders(&mut self) { let state = if let Some(state) = self.get_active_session() { state.clone() } else { return; }; self.notifications = self .notifications .drain() .map(|(event_id, queue)| { let curr_value = state.session_info().get_notification_value(&event_id); ( event_id, queue .into_iter() .filter_map(|notif_data| { notif_data .update_responder(&event_id, curr_value.clone()) .unwrap_or(None) }) .collect(), ) }) .collect(); fx_vlog!(tag: "avrcp-tg", 1, "After evicting updated responders: {:?}", self.notifications); } /// If the entry, `id` doesn't exist in the map, create a `MediaState` entry /// when the control proxy. /// Update the state with the delta. /// Update any outstanding notification responders with the change in state. pub fn create_or_update_session<F>( &mut self, discovery: DiscoveryProxy, id: u64, delta: SessionInfoDelta, create_fn: F, ) -> Result<(), Error> where F: Fn(DiscoveryProxy, u64) -> Result<SessionControlProxy, Error>, { self.map .entry(id) .or_insert({ let session_proxy = create_fn(discovery, id)?; MediaState::new(session_proxy) }) .update_session_info(delta); self.update_notification_responders(); Ok(()) } /// Given a notification `event_id`: /// 1) insert it into the notifications map. /// 2) If the queue for `event_id` is full, evict the oldest responder and respond /// with the current value. /// 3) Update any outstanding notification responders with any changes in state. pub fn register_notification( &mut self, event_id: fidl_avrcp::NotificationEvent, current: Notification, pos_change_interval: u32, responder: fidl_avrcp::TargetHandlerWatchNotificationResponder, ) -> Result<(), fidl::Error> { // If the `event_id` is not supported, reject the registration. if !self.get_supported_notification_events().contains(&event_id) { return responder.send(&mut Err(fidl_avrcp::TargetAvcError::RejectedInvalidParameter)); } let data = NotificationData::new(current, pos_change_interval, responder); let _evicted = self .notifications .entry(event_id) .or_insert(BoundedQueue::new(MAX_NOTIFICATION_EVENT_QUEUE_SIZE)) .insert(data); // Notify the evicted responder that the TG has removed it from the active list of responders. // Reply with the current value of the notification. // This will happen automatically, when `_evicted` is dropped. // Update outstanding responders with potentially new session data. self.update_notification_responders(); Ok(()) } } /// Creates a session control proxy from the Discovery protocol and connects to /// the session specified by `id`. fn create_session_control_proxy( discovery: DiscoveryProxy, id: u64, ) -> Result<SessionControlProxy, Error> { let (session_proxy, session_request_stream) = create_proxy()?; discovery.connect_to_session(id, session_request_stream)?; Ok(session_proxy) } #[cfg(test)] mod tests { use super::*; use crate::media::media_types::ValidPlayerApplicationSettings; use fidl::encoding::Decodable as FidlDecodable; use fidl::endpoints::create_proxy; use fidl_fuchsia_media::{self as fidl_media_types}; use fidl_fuchsia_media_sessions2::{self as fidl_media, SessionControlMarker}; use fuchsia_async as fasync; fn create_metadata() -> fidl_media_types::Metadata { let mut metadata = fidl_media_types::Metadata::new_empty(); let mut property1 = fidl_media_types::Property::new_empty(); property1.label = fidl_media_types::METADATA_LABEL_TITLE.to_string(); let sample_title = "This is a sample title".to_string(); property1.value = sample_title.clone(); metadata.properties = vec![property1]; metadata } fn create_player_status() -> fidl_media::PlayerStatus { let mut player_status = fidl_media::PlayerStatus::new_empty(); let mut timeline_fn = fidl_media_types::TimelineFunction::new_empty(); // Playback started at beginning of media. timeline_fn.subject_time = 0; // Monotonic clock time at beginning of media (nanos). timeline_fn.reference_time = 500000000; // Playback rate = 1, normal playback. timeline_fn.subject_delta = 1; timeline_fn.reference_delta = 1; player_status.player_state = Some(fidl_media::PlayerState::Playing); player_status.duration = Some(123456789); player_status.shuffle_on = Some(true); player_status.timeline_function = Some(timeline_fn); player_status } #[test] /// Test that retrieving a notification value correctly gets the current state. /// 1) Query with an unsupported `event_id`. /// 2) Query with a supported Event ID, with default state. /// 3) Query with all supported Event IDs. fn test_get_notification_value() { let exec = fasync::Executor::new_with_fake_time().expect("executor should build"); exec.set_fake_time(fasync::Time::from_nanos(555555555)); let media_sessions = MediaSessionsInner::new(); let (session_proxy, _) = create_proxy::<SessionControlMarker>().expect("Couldn't create fidl proxy."); let mut media_state = MediaState::new(session_proxy); // 1. Unsupported ID. let unsupported_id = fidl_avrcp::NotificationEvent::BattStatusChanged; let res = media_state.session_info().get_notification_value(&unsupported_id); assert!(res.is_err()); // 2. Supported ID, `media_state` contains default values. let res = media_state .session_info() .get_notification_value(&fidl_avrcp::NotificationEvent::PlaybackStatusChanged); assert_eq!(res.expect("Should be ok").status, Some(fidl_avrcp::PlaybackStatus::Stopped)); let res = media_state .session_info() .get_notification_value(&fidl_avrcp::NotificationEvent::TrackChanged); assert_eq!(res.expect("Should be ok").track_id, Some(std::u64::MAX)); // 3. exec.set_fake_time(fasync::Time::from_nanos(555555555)); let mut info = fidl_media::SessionInfoDelta::new_empty(); info.metadata = Some(create_metadata()); info.player_status = Some(create_player_status()); media_state.update_session_info(info); let expected_play_status = fidl_avrcp::PlaybackStatus::Playing; let expected_pas = ValidPlayerApplicationSettings::new( None, Some(fidl_avrcp::RepeatStatusMode::Off), Some(fidl_avrcp::ShuffleMode::AllTrackShuffle), None, ); // Supported = PAS, Playback, Track, TrackPos let valid_events = media_sessions.get_supported_notification_events(); let expected_values: Vec<Notification> = vec![ Notification::new(None, None, None, Some(expected_pas), None, None, None), Notification::new(Some(expected_play_status), None, None, None, None, None, None), Notification::new(None, Some(0), None, None, None, None, None), Notification::new(None, None, Some(55), None, None, None, None), ]; for (event_id, expected_v) in valid_events.iter().zip(expected_values.iter()) { assert_eq!( media_state.session_info().get_notification_value(&event_id).expect("Should work"), expected_v.clone() ); } } #[test] // TODO(42623): Implement this test as part of integration test work. /// Tests registering a notification works as expected. /// 1. Normal case, insertion of a supported notification. /// 2. Normal case, insertion of a supported notification, with eviction. /// 3. Normal case, insertion of a supported notification, with change in state, /// so that `update_notification_responders()` correctly updates inserted notif. /// 3. Error case, insertion of an unsupported notification. fn test_register_notification() {} #[test] // TODO(42623): Implement this test as part of integration test work. /// Tests insertion/updating of a new MediaSession into the state map. /// 1. Test branch where MediaSession already exists, so this is just an update. /// 2. Test branch where MediaSession doesn't exist, creates a new session and updates it. fn test_create_or_update_session() {} #[test] // TODO(42623): Implement this test as part of integration test work. /// Tests that updating any outstanding responders behaves as expected. fn test_update_notification_responders() {} #[test] // TODO(42623): Implement this test as part of integration test work. /// Tests updating the active session_id correctly changes the currently /// playing active media session, as well as clears any outstanding notifications /// if a new MediaSession becomes the active session. fn test_update_active_session_id() {} #[test] // TODO(42623): Implement this test as part of integration test work. /// Tests sending PlayerChanged response to all outstanding responders behaves /// as expected, and removes all entries in the Notifications map. fn test_clear_notification_responders() {} #[test] // TODO(42623): Implement this test as part of integration test work. /// Tests removing a session from the map. /// Tests clear_session clears all notifications if the MediaSession is the currently /// active session. fn test_clear_session() {} #[test] // TODO(42623): Implement this test as part of integration test work. /// Tests clearing the active session_id. fn test_clear_active_session_id() {} }
random_line_split
media_sessions.rs
// Copyright 2019 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use { anyhow::{format_err, Error}, fidl::encoding::Decodable as FidlDecodable, fidl::endpoints::{create_proxy, create_request_stream}, fidl_fuchsia_bluetooth_avrcp::{self as fidl_avrcp}, fidl_fuchsia_media_sessions2::{ DiscoveryMarker, DiscoveryProxy, SessionControlProxy, SessionInfoDelta, SessionsWatcherRequest, SessionsWatcherRequestStream, WatchOptions, }, fuchsia_component::client::connect_to_service, fuchsia_syslog::{fx_log_warn, fx_vlog}, futures::{Future, TryStreamExt}, parking_lot::RwLock, std::collections::HashMap, std::sync::Arc, }; use crate::media::media_state::MediaState; use crate::media::media_types::Notification; use crate::types::{ bounded_queue::BoundedQueue, NotificationData, MAX_NOTIFICATION_EVENT_QUEUE_SIZE, }; #[derive(Debug, Clone)] pub(crate) struct MediaSessions { inner: Arc<RwLock<MediaSessionsInner>>, } impl MediaSessions { pub fn create() -> Self { Self { inner: Arc::new(RwLock::new(MediaSessionsInner::new())) } } // Returns a future that watches MediaPlayer for updates. pub fn watch(&self) -> impl Future<Output = Result<(), anyhow::Error>> { // MediaSession Service Setup // Set up the MediaSession Discovery service. Connect to the session watcher. let discovery = connect_to_service::<DiscoveryMarker>() .expect("Couldn't connect to discovery service."); let (watcher_client, watcher_requests) = create_request_stream().expect("Error creating watcher request stream"); // Only subscribe to updates from players that are active. let watch_active_options = WatchOptions { only_active: Some(true), ..WatchOptions::new_empty() }; discovery .watch_sessions(watch_active_options, watcher_client) .expect("Should watch media sessions"); // End MediaSession Service Setup let inner = self.inner.clone(); Self::watch_media_sessions(discovery, watcher_requests, inner) } pub fn get_active_session(&self) -> Result<MediaState, Error> { let r_inner = self.inner.read().get_active_session(); r_inner.ok_or(format_err!("No active player")) } pub fn get_supported_notification_events(&self) -> Vec<fidl_avrcp::NotificationEvent> { self.inner.read().get_supported_notification_events() } pub fn register_notification( &self, event_id: fidl_avrcp::NotificationEvent, current: Notification, pos_change_interval: u32, responder: fidl_avrcp::TargetHandlerWatchNotificationResponder, ) -> Result<(), fidl::Error> { let mut write = self.inner.write(); write.register_notification(event_id, current, pos_change_interval, responder) } async fn watch_media_sessions( discovery: DiscoveryProxy, mut watcher_requests: SessionsWatcherRequestStream, sessions_inner: Arc<RwLock<MediaSessionsInner>>, ) -> Result<(), anyhow::Error> { while let Some(req) = watcher_requests.try_next().await.expect("Failed to serve Watcher service") { match req { SessionsWatcherRequest::SessionUpdated { session_id: id, session_info_delta: delta, responder, } => { responder.send()?; fx_vlog!(tag: "avrcp-tg", 1, "MediaSession update: id[{}], delta[{:?}]", id, delta); // Since we are only listening to active sessions, update the currently // active media session id every time a watcher event is triggered. // This means AVRCP commands will be queried/set to the player that has most // recently changed in status. sessions_inner.write().update_active_session_id(Some(id.clone())); // If this is our first time receiving updates from this MediaPlayer, create // a session control proxy and connect to the session. sessions_inner.write().create_or_update_session( discovery.clone(), id.clone(), delta, &create_session_control_proxy, )?; fx_vlog!(tag: "avrcp-tg", 1, "MediaSession state after update: state[{:?}]", sessions_inner); } SessionsWatcherRequest::SessionRemoved { session_id, responder } => { // A media session with id `session_id` has been removed. responder.send()?; // Clear any outstanding notifications with a player changed response. // Clear the currently active session, if it equals `session_id`. // Clear entry in state map. sessions_inner.write().clear_session(&session_id); fx_vlog!(tag: "avrcp-tg", 1, "Removed session [{:?}] from state map: {:?}", session_id, sessions_inner); } } } Ok(()) } } #[derive(Debug)] pub(crate) struct MediaSessionsInner { // The currently active MediaSession id. // If present, the `active_session_id` should be present in `map`. active_session_id: Option<u64>, // The map of ids to the respective media session. map: HashMap<u64, MediaState>, // The map of outstanding notifications. notifications: HashMap<fidl_avrcp::NotificationEvent, BoundedQueue<NotificationData>>, } impl MediaSessionsInner { pub fn new() -> Self { Self { active_session_id: None, map: HashMap::new(), notifications: HashMap::new() } } pub fn get_active_session(&self) -> Option<MediaState> { self.active_session_id.as_ref().and_then(|id| self.map.get(id).cloned()) } /// TODO(41703): Add TRACK_POS_CHANGED when implemented. pub fn get_supported_notification_events(&self) -> Vec<fidl_avrcp::NotificationEvent> { vec![ fidl_avrcp::NotificationEvent::PlayerApplicationSettingChanged, fidl_avrcp::NotificationEvent::PlaybackStatusChanged, fidl_avrcp::NotificationEvent::TrackChanged, ] } /// Removes the MediaState specified by `id` from the map, should it exist. /// If the session was currently active, clears `self.active_session_id`. /// Returns the removed MediaState. pub fn clear_session(&mut self, id: &u64) -> Option<MediaState> { if Some(id) == self.active_session_id.as_ref() { self.update_active_session_id(None); } self.map.remove(id) } /// Clears all outstanding notifications with an AddressedPlayerChanged error. /// See `crate::types::update_responder` for more details. pub fn clear_notification_responders(&mut self) { for notif_data in self.notifications.drain().map(|(_, q)| q.into_iter()).flatten() { if let Err(e) = notif_data.update_responder( &fidl_avrcp::NotificationEvent::TrackChanged, // Irrelevant Event ID. Err(fidl_avrcp::TargetAvcError::RejectedAddressedPlayerChanged), ) { fx_log_warn!("There was an error clearing the responder: {:?}", e); } } fx_vlog!(tag: "avrcp-tg", 1, "After evicting cleared responders: {:?}", self.notifications); } /// Updates the active session with the new session specified by `id`. /// Clear all outstanding notifications, if the active session has changed. /// If the updated active session_id has changed, return old active id. pub fn update_active_session_id(&mut self, id: Option<u64>) -> Option<u64> { if self.active_session_id != id { self.clear_notification_responders(); let previous_active_session_id = self.active_session_id.take(); self.active_session_id = id; previous_active_session_id } else { None } } /// If an active session is present, update any outstanding notifications by /// checking if notification values have changed. /// TODO(41703): Take pos_change_interval into account when updating TRACK_POS_CHANGED. pub fn update_notification_responders(&mut self) { let state = if let Some(state) = self.get_active_session()
else { return; }; self.notifications = self .notifications .drain() .map(|(event_id, queue)| { let curr_value = state.session_info().get_notification_value(&event_id); ( event_id, queue .into_iter() .filter_map(|notif_data| { notif_data .update_responder(&event_id, curr_value.clone()) .unwrap_or(None) }) .collect(), ) }) .collect(); fx_vlog!(tag: "avrcp-tg", 1, "After evicting updated responders: {:?}", self.notifications); } /// If the entry, `id` doesn't exist in the map, create a `MediaState` entry /// when the control proxy. /// Update the state with the delta. /// Update any outstanding notification responders with the change in state. pub fn create_or_update_session<F>( &mut self, discovery: DiscoveryProxy, id: u64, delta: SessionInfoDelta, create_fn: F, ) -> Result<(), Error> where F: Fn(DiscoveryProxy, u64) -> Result<SessionControlProxy, Error>, { self.map .entry(id) .or_insert({ let session_proxy = create_fn(discovery, id)?; MediaState::new(session_proxy) }) .update_session_info(delta); self.update_notification_responders(); Ok(()) } /// Given a notification `event_id`: /// 1) insert it into the notifications map. /// 2) If the queue for `event_id` is full, evict the oldest responder and respond /// with the current value. /// 3) Update any outstanding notification responders with any changes in state. pub fn register_notification( &mut self, event_id: fidl_avrcp::NotificationEvent, current: Notification, pos_change_interval: u32, responder: fidl_avrcp::TargetHandlerWatchNotificationResponder, ) -> Result<(), fidl::Error> { // If the `event_id` is not supported, reject the registration. if !self.get_supported_notification_events().contains(&event_id) { return responder.send(&mut Err(fidl_avrcp::TargetAvcError::RejectedInvalidParameter)); } let data = NotificationData::new(current, pos_change_interval, responder); let _evicted = self .notifications .entry(event_id) .or_insert(BoundedQueue::new(MAX_NOTIFICATION_EVENT_QUEUE_SIZE)) .insert(data); // Notify the evicted responder that the TG has removed it from the active list of responders. // Reply with the current value of the notification. // This will happen automatically, when `_evicted` is dropped. // Update outstanding responders with potentially new session data. self.update_notification_responders(); Ok(()) } } /// Creates a session control proxy from the Discovery protocol and connects to /// the session specified by `id`. fn create_session_control_proxy( discovery: DiscoveryProxy, id: u64, ) -> Result<SessionControlProxy, Error> { let (session_proxy, session_request_stream) = create_proxy()?; discovery.connect_to_session(id, session_request_stream)?; Ok(session_proxy) } #[cfg(test)] mod tests { use super::*; use crate::media::media_types::ValidPlayerApplicationSettings; use fidl::encoding::Decodable as FidlDecodable; use fidl::endpoints::create_proxy; use fidl_fuchsia_media::{self as fidl_media_types}; use fidl_fuchsia_media_sessions2::{self as fidl_media, SessionControlMarker}; use fuchsia_async as fasync; fn create_metadata() -> fidl_media_types::Metadata { let mut metadata = fidl_media_types::Metadata::new_empty(); let mut property1 = fidl_media_types::Property::new_empty(); property1.label = fidl_media_types::METADATA_LABEL_TITLE.to_string(); let sample_title = "This is a sample title".to_string(); property1.value = sample_title.clone(); metadata.properties = vec![property1]; metadata } fn create_player_status() -> fidl_media::PlayerStatus { let mut player_status = fidl_media::PlayerStatus::new_empty(); let mut timeline_fn = fidl_media_types::TimelineFunction::new_empty(); // Playback started at beginning of media. timeline_fn.subject_time = 0; // Monotonic clock time at beginning of media (nanos). timeline_fn.reference_time = 500000000; // Playback rate = 1, normal playback. timeline_fn.subject_delta = 1; timeline_fn.reference_delta = 1; player_status.player_state = Some(fidl_media::PlayerState::Playing); player_status.duration = Some(123456789); player_status.shuffle_on = Some(true); player_status.timeline_function = Some(timeline_fn); player_status } #[test] /// Test that retrieving a notification value correctly gets the current state. /// 1) Query with an unsupported `event_id`. /// 2) Query with a supported Event ID, with default state. /// 3) Query with all supported Event IDs. fn test_get_notification_value() { let exec = fasync::Executor::new_with_fake_time().expect("executor should build"); exec.set_fake_time(fasync::Time::from_nanos(555555555)); let media_sessions = MediaSessionsInner::new(); let (session_proxy, _) = create_proxy::<SessionControlMarker>().expect("Couldn't create fidl proxy."); let mut media_state = MediaState::new(session_proxy); // 1. Unsupported ID. let unsupported_id = fidl_avrcp::NotificationEvent::BattStatusChanged; let res = media_state.session_info().get_notification_value(&unsupported_id); assert!(res.is_err()); // 2. Supported ID, `media_state` contains default values. let res = media_state .session_info() .get_notification_value(&fidl_avrcp::NotificationEvent::PlaybackStatusChanged); assert_eq!(res.expect("Should be ok").status, Some(fidl_avrcp::PlaybackStatus::Stopped)); let res = media_state .session_info() .get_notification_value(&fidl_avrcp::NotificationEvent::TrackChanged); assert_eq!(res.expect("Should be ok").track_id, Some(std::u64::MAX)); // 3. exec.set_fake_time(fasync::Time::from_nanos(555555555)); let mut info = fidl_media::SessionInfoDelta::new_empty(); info.metadata = Some(create_metadata()); info.player_status = Some(create_player_status()); media_state.update_session_info(info); let expected_play_status = fidl_avrcp::PlaybackStatus::Playing; let expected_pas = ValidPlayerApplicationSettings::new( None, Some(fidl_avrcp::RepeatStatusMode::Off), Some(fidl_avrcp::ShuffleMode::AllTrackShuffle), None, ); // Supported = PAS, Playback, Track, TrackPos let valid_events = media_sessions.get_supported_notification_events(); let expected_values: Vec<Notification> = vec![ Notification::new(None, None, None, Some(expected_pas), None, None, None), Notification::new(Some(expected_play_status), None, None, None, None, None, None), Notification::new(None, Some(0), None, None, None, None, None), Notification::new(None, None, Some(55), None, None, None, None), ]; for (event_id, expected_v) in valid_events.iter().zip(expected_values.iter()) { assert_eq!( media_state.session_info().get_notification_value(&event_id).expect("Should work"), expected_v.clone() ); } } #[test] // TODO(42623): Implement this test as part of integration test work. /// Tests registering a notification works as expected. /// 1. Normal case, insertion of a supported notification. /// 2. Normal case, insertion of a supported notification, with eviction. /// 3. Normal case, insertion of a supported notification, with change in state, /// so that `update_notification_responders()` correctly updates inserted notif. /// 3. Error case, insertion of an unsupported notification. fn test_register_notification() {} #[test] // TODO(42623): Implement this test as part of integration test work. /// Tests insertion/updating of a new MediaSession into the state map. /// 1. Test branch where MediaSession already exists, so this is just an update. /// 2. Test branch where MediaSession doesn't exist, creates a new session and updates it. fn test_create_or_update_session() {} #[test] // TODO(42623): Implement this test as part of integration test work. /// Tests that updating any outstanding responders behaves as expected. fn test_update_notification_responders() {} #[test] // TODO(42623): Implement this test as part of integration test work. /// Tests updating the active session_id correctly changes the currently /// playing active media session, as well as clears any outstanding notifications /// if a new MediaSession becomes the active session. fn test_update_active_session_id() {} #[test] // TODO(42623): Implement this test as part of integration test work. /// Tests sending PlayerChanged response to all outstanding responders behaves /// as expected, and removes all entries in the Notifications map. fn test_clear_notification_responders() {} #[test] // TODO(42623): Implement this test as part of integration test work. /// Tests removing a session from the map. /// Tests clear_session clears all notifications if the MediaSession is the currently /// active session. fn test_clear_session() {} #[test] // TODO(42623): Implement this test as part of integration test work. /// Tests clearing the active session_id. fn test_clear_active_session_id() {} }
{ state.clone() }
conditional_block
media_sessions.rs
// Copyright 2019 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use { anyhow::{format_err, Error}, fidl::encoding::Decodable as FidlDecodable, fidl::endpoints::{create_proxy, create_request_stream}, fidl_fuchsia_bluetooth_avrcp::{self as fidl_avrcp}, fidl_fuchsia_media_sessions2::{ DiscoveryMarker, DiscoveryProxy, SessionControlProxy, SessionInfoDelta, SessionsWatcherRequest, SessionsWatcherRequestStream, WatchOptions, }, fuchsia_component::client::connect_to_service, fuchsia_syslog::{fx_log_warn, fx_vlog}, futures::{Future, TryStreamExt}, parking_lot::RwLock, std::collections::HashMap, std::sync::Arc, }; use crate::media::media_state::MediaState; use crate::media::media_types::Notification; use crate::types::{ bounded_queue::BoundedQueue, NotificationData, MAX_NOTIFICATION_EVENT_QUEUE_SIZE, }; #[derive(Debug, Clone)] pub(crate) struct MediaSessions { inner: Arc<RwLock<MediaSessionsInner>>, } impl MediaSessions { pub fn create() -> Self { Self { inner: Arc::new(RwLock::new(MediaSessionsInner::new())) } } // Returns a future that watches MediaPlayer for updates. pub fn watch(&self) -> impl Future<Output = Result<(), anyhow::Error>> { // MediaSession Service Setup // Set up the MediaSession Discovery service. Connect to the session watcher. let discovery = connect_to_service::<DiscoveryMarker>() .expect("Couldn't connect to discovery service."); let (watcher_client, watcher_requests) = create_request_stream().expect("Error creating watcher request stream"); // Only subscribe to updates from players that are active. let watch_active_options = WatchOptions { only_active: Some(true), ..WatchOptions::new_empty() }; discovery .watch_sessions(watch_active_options, watcher_client) .expect("Should watch media sessions"); // End MediaSession Service Setup let inner = self.inner.clone(); Self::watch_media_sessions(discovery, watcher_requests, inner) } pub fn get_active_session(&self) -> Result<MediaState, Error> { let r_inner = self.inner.read().get_active_session(); r_inner.ok_or(format_err!("No active player")) } pub fn get_supported_notification_events(&self) -> Vec<fidl_avrcp::NotificationEvent> { self.inner.read().get_supported_notification_events() } pub fn register_notification( &self, event_id: fidl_avrcp::NotificationEvent, current: Notification, pos_change_interval: u32, responder: fidl_avrcp::TargetHandlerWatchNotificationResponder, ) -> Result<(), fidl::Error> { let mut write = self.inner.write(); write.register_notification(event_id, current, pos_change_interval, responder) } async fn watch_media_sessions( discovery: DiscoveryProxy, mut watcher_requests: SessionsWatcherRequestStream, sessions_inner: Arc<RwLock<MediaSessionsInner>>, ) -> Result<(), anyhow::Error> { while let Some(req) = watcher_requests.try_next().await.expect("Failed to serve Watcher service") { match req { SessionsWatcherRequest::SessionUpdated { session_id: id, session_info_delta: delta, responder, } => { responder.send()?; fx_vlog!(tag: "avrcp-tg", 1, "MediaSession update: id[{}], delta[{:?}]", id, delta); // Since we are only listening to active sessions, update the currently // active media session id every time a watcher event is triggered. // This means AVRCP commands will be queried/set to the player that has most // recently changed in status. sessions_inner.write().update_active_session_id(Some(id.clone())); // If this is our first time receiving updates from this MediaPlayer, create // a session control proxy and connect to the session. sessions_inner.write().create_or_update_session( discovery.clone(), id.clone(), delta, &create_session_control_proxy, )?; fx_vlog!(tag: "avrcp-tg", 1, "MediaSession state after update: state[{:?}]", sessions_inner); } SessionsWatcherRequest::SessionRemoved { session_id, responder } => { // A media session with id `session_id` has been removed. responder.send()?; // Clear any outstanding notifications with a player changed response. // Clear the currently active session, if it equals `session_id`. // Clear entry in state map. sessions_inner.write().clear_session(&session_id); fx_vlog!(tag: "avrcp-tg", 1, "Removed session [{:?}] from state map: {:?}", session_id, sessions_inner); } } } Ok(()) } } #[derive(Debug)] pub(crate) struct MediaSessionsInner { // The currently active MediaSession id. // If present, the `active_session_id` should be present in `map`. active_session_id: Option<u64>, // The map of ids to the respective media session. map: HashMap<u64, MediaState>, // The map of outstanding notifications. notifications: HashMap<fidl_avrcp::NotificationEvent, BoundedQueue<NotificationData>>, } impl MediaSessionsInner { pub fn new() -> Self { Self { active_session_id: None, map: HashMap::new(), notifications: HashMap::new() } } pub fn get_active_session(&self) -> Option<MediaState> { self.active_session_id.as_ref().and_then(|id| self.map.get(id).cloned()) } /// TODO(41703): Add TRACK_POS_CHANGED when implemented. pub fn get_supported_notification_events(&self) -> Vec<fidl_avrcp::NotificationEvent> { vec![ fidl_avrcp::NotificationEvent::PlayerApplicationSettingChanged, fidl_avrcp::NotificationEvent::PlaybackStatusChanged, fidl_avrcp::NotificationEvent::TrackChanged, ] } /// Removes the MediaState specified by `id` from the map, should it exist. /// If the session was currently active, clears `self.active_session_id`. /// Returns the removed MediaState. pub fn clear_session(&mut self, id: &u64) -> Option<MediaState> { if Some(id) == self.active_session_id.as_ref() { self.update_active_session_id(None); } self.map.remove(id) } /// Clears all outstanding notifications with an AddressedPlayerChanged error. /// See `crate::types::update_responder` for more details. pub fn clear_notification_responders(&mut self) { for notif_data in self.notifications.drain().map(|(_, q)| q.into_iter()).flatten() { if let Err(e) = notif_data.update_responder( &fidl_avrcp::NotificationEvent::TrackChanged, // Irrelevant Event ID. Err(fidl_avrcp::TargetAvcError::RejectedAddressedPlayerChanged), ) { fx_log_warn!("There was an error clearing the responder: {:?}", e); } } fx_vlog!(tag: "avrcp-tg", 1, "After evicting cleared responders: {:?}", self.notifications); } /// Updates the active session with the new session specified by `id`. /// Clear all outstanding notifications, if the active session has changed. /// If the updated active session_id has changed, return old active id. pub fn update_active_session_id(&mut self, id: Option<u64>) -> Option<u64> { if self.active_session_id != id { self.clear_notification_responders(); let previous_active_session_id = self.active_session_id.take(); self.active_session_id = id; previous_active_session_id } else { None } } /// If an active session is present, update any outstanding notifications by /// checking if notification values have changed. /// TODO(41703): Take pos_change_interval into account when updating TRACK_POS_CHANGED. pub fn update_notification_responders(&mut self) { let state = if let Some(state) = self.get_active_session() { state.clone() } else { return; }; self.notifications = self .notifications .drain() .map(|(event_id, queue)| { let curr_value = state.session_info().get_notification_value(&event_id); ( event_id, queue .into_iter() .filter_map(|notif_data| { notif_data .update_responder(&event_id, curr_value.clone()) .unwrap_or(None) }) .collect(), ) }) .collect(); fx_vlog!(tag: "avrcp-tg", 1, "After evicting updated responders: {:?}", self.notifications); } /// If the entry, `id` doesn't exist in the map, create a `MediaState` entry /// when the control proxy. /// Update the state with the delta. /// Update any outstanding notification responders with the change in state. pub fn create_or_update_session<F>( &mut self, discovery: DiscoveryProxy, id: u64, delta: SessionInfoDelta, create_fn: F, ) -> Result<(), Error> where F: Fn(DiscoveryProxy, u64) -> Result<SessionControlProxy, Error>, { self.map .entry(id) .or_insert({ let session_proxy = create_fn(discovery, id)?; MediaState::new(session_proxy) }) .update_session_info(delta); self.update_notification_responders(); Ok(()) } /// Given a notification `event_id`: /// 1) insert it into the notifications map. /// 2) If the queue for `event_id` is full, evict the oldest responder and respond /// with the current value. /// 3) Update any outstanding notification responders with any changes in state. pub fn register_notification( &mut self, event_id: fidl_avrcp::NotificationEvent, current: Notification, pos_change_interval: u32, responder: fidl_avrcp::TargetHandlerWatchNotificationResponder, ) -> Result<(), fidl::Error> { // If the `event_id` is not supported, reject the registration. if !self.get_supported_notification_events().contains(&event_id) { return responder.send(&mut Err(fidl_avrcp::TargetAvcError::RejectedInvalidParameter)); } let data = NotificationData::new(current, pos_change_interval, responder); let _evicted = self .notifications .entry(event_id) .or_insert(BoundedQueue::new(MAX_NOTIFICATION_EVENT_QUEUE_SIZE)) .insert(data); // Notify the evicted responder that the TG has removed it from the active list of responders. // Reply with the current value of the notification. // This will happen automatically, when `_evicted` is dropped. // Update outstanding responders with potentially new session data. self.update_notification_responders(); Ok(()) } } /// Creates a session control proxy from the Discovery protocol and connects to /// the session specified by `id`. fn create_session_control_proxy( discovery: DiscoveryProxy, id: u64, ) -> Result<SessionControlProxy, Error> { let (session_proxy, session_request_stream) = create_proxy()?; discovery.connect_to_session(id, session_request_stream)?; Ok(session_proxy) } #[cfg(test)] mod tests { use super::*; use crate::media::media_types::ValidPlayerApplicationSettings; use fidl::encoding::Decodable as FidlDecodable; use fidl::endpoints::create_proxy; use fidl_fuchsia_media::{self as fidl_media_types}; use fidl_fuchsia_media_sessions2::{self as fidl_media, SessionControlMarker}; use fuchsia_async as fasync; fn create_metadata() -> fidl_media_types::Metadata { let mut metadata = fidl_media_types::Metadata::new_empty(); let mut property1 = fidl_media_types::Property::new_empty(); property1.label = fidl_media_types::METADATA_LABEL_TITLE.to_string(); let sample_title = "This is a sample title".to_string(); property1.value = sample_title.clone(); metadata.properties = vec![property1]; metadata } fn create_player_status() -> fidl_media::PlayerStatus { let mut player_status = fidl_media::PlayerStatus::new_empty(); let mut timeline_fn = fidl_media_types::TimelineFunction::new_empty(); // Playback started at beginning of media. timeline_fn.subject_time = 0; // Monotonic clock time at beginning of media (nanos). timeline_fn.reference_time = 500000000; // Playback rate = 1, normal playback. timeline_fn.subject_delta = 1; timeline_fn.reference_delta = 1; player_status.player_state = Some(fidl_media::PlayerState::Playing); player_status.duration = Some(123456789); player_status.shuffle_on = Some(true); player_status.timeline_function = Some(timeline_fn); player_status } #[test] /// Test that retrieving a notification value correctly gets the current state. /// 1) Query with an unsupported `event_id`. /// 2) Query with a supported Event ID, with default state. /// 3) Query with all supported Event IDs. fn test_get_notification_value() { let exec = fasync::Executor::new_with_fake_time().expect("executor should build"); exec.set_fake_time(fasync::Time::from_nanos(555555555)); let media_sessions = MediaSessionsInner::new(); let (session_proxy, _) = create_proxy::<SessionControlMarker>().expect("Couldn't create fidl proxy."); let mut media_state = MediaState::new(session_proxy); // 1. Unsupported ID. let unsupported_id = fidl_avrcp::NotificationEvent::BattStatusChanged; let res = media_state.session_info().get_notification_value(&unsupported_id); assert!(res.is_err()); // 2. Supported ID, `media_state` contains default values. let res = media_state .session_info() .get_notification_value(&fidl_avrcp::NotificationEvent::PlaybackStatusChanged); assert_eq!(res.expect("Should be ok").status, Some(fidl_avrcp::PlaybackStatus::Stopped)); let res = media_state .session_info() .get_notification_value(&fidl_avrcp::NotificationEvent::TrackChanged); assert_eq!(res.expect("Should be ok").track_id, Some(std::u64::MAX)); // 3. exec.set_fake_time(fasync::Time::from_nanos(555555555)); let mut info = fidl_media::SessionInfoDelta::new_empty(); info.metadata = Some(create_metadata()); info.player_status = Some(create_player_status()); media_state.update_session_info(info); let expected_play_status = fidl_avrcp::PlaybackStatus::Playing; let expected_pas = ValidPlayerApplicationSettings::new( None, Some(fidl_avrcp::RepeatStatusMode::Off), Some(fidl_avrcp::ShuffleMode::AllTrackShuffle), None, ); // Supported = PAS, Playback, Track, TrackPos let valid_events = media_sessions.get_supported_notification_events(); let expected_values: Vec<Notification> = vec![ Notification::new(None, None, None, Some(expected_pas), None, None, None), Notification::new(Some(expected_play_status), None, None, None, None, None, None), Notification::new(None, Some(0), None, None, None, None, None), Notification::new(None, None, Some(55), None, None, None, None), ]; for (event_id, expected_v) in valid_events.iter().zip(expected_values.iter()) { assert_eq!( media_state.session_info().get_notification_value(&event_id).expect("Should work"), expected_v.clone() ); } } #[test] // TODO(42623): Implement this test as part of integration test work. /// Tests registering a notification works as expected. /// 1. Normal case, insertion of a supported notification. /// 2. Normal case, insertion of a supported notification, with eviction. /// 3. Normal case, insertion of a supported notification, with change in state, /// so that `update_notification_responders()` correctly updates inserted notif. /// 3. Error case, insertion of an unsupported notification. fn test_register_notification() {} #[test] // TODO(42623): Implement this test as part of integration test work. /// Tests insertion/updating of a new MediaSession into the state map. /// 1. Test branch where MediaSession already exists, so this is just an update. /// 2. Test branch where MediaSession doesn't exist, creates a new session and updates it. fn test_create_or_update_session() {} #[test] // TODO(42623): Implement this test as part of integration test work. /// Tests that updating any outstanding responders behaves as expected. fn test_update_notification_responders() {} #[test] // TODO(42623): Implement this test as part of integration test work. /// Tests updating the active session_id correctly changes the currently /// playing active media session, as well as clears any outstanding notifications /// if a new MediaSession becomes the active session. fn test_update_active_session_id() {} #[test] // TODO(42623): Implement this test as part of integration test work. /// Tests sending PlayerChanged response to all outstanding responders behaves /// as expected, and removes all entries in the Notifications map. fn test_clear_notification_responders() {} #[test] // TODO(42623): Implement this test as part of integration test work. /// Tests removing a session from the map. /// Tests clear_session clears all notifications if the MediaSession is the currently /// active session. fn test_clear_session() {} #[test] // TODO(42623): Implement this test as part of integration test work. /// Tests clearing the active session_id. fn test_clear_active_session_id()
}
{}
identifier_body
zhtta.rs
// // zhtta.rs // // Starting code for PA3 // Revised to run on Rust 1.0.0 nightly - built 02-21 // // Note that this code has serious security risks! You should not run it // on any system with access to sensitive files. // // Brandeis University - cs146a Spring 2015 // Dimokritos Stamatakis and Brionne Godby // Version 1.0 // To see debug! outputs set the RUST_LOG environment variable, e.g.: export RUST_LOG="zhtta=debug" #![feature(rustc_private)] #![feature(libc)] #![feature(io)] #![feature(old_io)] #![feature(old_path)] #![feature(os)] #![feature(core)] #![feature(collections)] #![feature(std_misc)] #![allow(non_camel_case_types)] #![allow(unused_must_use)] #![allow(deprecated)] #[macro_use] extern crate log; extern crate libc; use std::io::*; use std::old_io::File; use std::{os, str}; use std::process::{Command, Stdio}; use std::old_path::posix::Path; use std::collections::hash_map::HashMap; use std::borrow::ToOwned; use std::thread::Thread; use std::old_io::fs::PathExtensions; use std::old_io::{Acceptor, Listener}; use std::old_io::BufferedReader; extern crate getopts; use getopts::{optopt, getopts}; use std::sync::RwLock; use std::sync::{Arc, Mutex,Semaphore}; use std::sync::mpsc::{Sender, Receiver}; use std::sync::mpsc::channel; static SERVER_NAME : &'static str = "Zhtta Version 1.0"; static CacheLowerBounder :u64 =1024;//1K static CacheUpperBounder :u64 =104857600; //100M static IP : &'static str = "127.0.0.1"; static PORT : usize = 4414; static WWW_DIR : &'static str = "./www"; static HTTP_OK : &'static str = "HTTP/1.1 200 OK\r\nContent-Type: text/html; charset=UTF-8\r\n\r\n"; static HTTP_BAD : &'static str = "HTTP/1.1 404 Not Found\r\n\r\n"; static COUNTER_STYLE : &'static str = "<doctype !html><html><head><title>Hello, Rust!</title> <style>body {background-color: #884414; color: #FFEEAA} h1 { font-size:2cm; text-align: center; color: black; text-shadow: 0 0 4mm red } h2 { font-size:2cm; text-align: center; color: black; text-shadow: 0 0 4mm green } </style></head> <body>"; //static mut visitor_count : usize = 0; struct HTTP_Request { // Use peer_name as the key to access TcpStream in hashmap. // (Due to a bug in extra::arc in Rust 0.9, it is very inconvenient to use TcpStream without the "Freeze" bound. // See issue: https://github.com/mozilla/rust/issues/12139) peer_name: String, path: Path, } struct WebServer { ip: String, port: usize, www_dir_path: Path, request_queue_arc: Arc<Mutex<Vec<HTTP_Request>>>, stream_map_arc: Arc<Mutex<HashMap<String, std::old_io::net::tcp::TcpStream>>>,//it is a hash map,store the http stream for each ip visitor_count : Arc<Mutex<usize>>, thread_sema : Arc<Semaphore>, cache: Arc<RwLock<HashMap<Path,(String,Mutex<usize>,u64)>>>,//cache content, counter for LRU, modified time cache_len: Arc<Mutex<usize>>, notify_rx: Receiver<()>, notify_tx: Sender<()>, } impl WebServer { fn new(ip: String, port: usize, www_dir: String) -> WebServer { let (notify_tx, notify_rx) = channel(); let www_dir_path = Path::new(www_dir); os::change_dir(&www_dir_path); WebServer { ip:ip, port: port, www_dir_path: www_dir_path, request_queue_arc: Arc::new(Mutex::new(Vec::new())), stream_map_arc: Arc::new(Mutex::new(HashMap::new())), visitor_count:Arc::new(Mutex::new(0)), thread_sema: Arc::new(Semaphore::new(5)), cache: Arc::new(RwLock::new(HashMap::new())), cache_len: Arc::new(Mutex::new(0)), notify_rx: notify_rx, notify_tx: notify_tx, } } fn run(&mut self) { self.listen(); self.dequeue_static_file_request(); } fn listen(&mut self) { let addr = String::from_str(format!("{}:{}", self.ip, self.port).as_slice()); let www_dir_path_str = self.www_dir_path.clone(); let request_queue_arc = self.request_queue_arc.clone(); let notify_tx = self.notify_tx.clone(); let stream_map_arc = self.stream_map_arc.clone(); let visitor_count=self.visitor_count.clone(); Thread::spawn(move|| { let listener = std::old_io::TcpListener::bind(addr.as_slice()).unwrap(); let mut acceptor = listener.listen().unwrap(); println!("{} listening on {} (serving from: {}).", SERVER_NAME, addr, www_dir_path_str.as_str().unwrap()); for stream_raw in acceptor.incoming() { //for each stream/connection let (queue_tx, queue_rx) = channel();//build up a channel for sub thread queue_tx.send(request_queue_arc.clone());//send the request queue to queue and receive it inside the son thread let notify_chan = notify_tx.clone();//notify_chan is a global channel for webserver let stream_map_arc = stream_map_arc.clone(); let visitor_count=visitor_count.clone(); println!("outer thread:{}",*visitor_count.lock().unwrap()); // Spawn a task to handle the connection. Thread::spawn(move|| { let mut vc= visitor_count.lock().unwrap(); // Done *vc+=1; println!("inner thread:{}",*vc); let request_queue_arc = queue_rx.recv().unwrap();// let mut stream = match stream_raw { Ok(s) => {s} Err(e) => { panic!("Error getting the listener stream! {}", e) } }; let peer_name = WebServer::get_peer_name(&mut stream); debug!("Got connection from {}", peer_name); let mut buf: [u8;500] = [0;500]; stream.read(&mut buf); let request_str = match str::from_utf8(&buf){ Ok(s) => s, Err(e)=> panic!("Error reading from the listener stream! {}", e), }; debug!("Request:\n{}", request_str); //WebServer::enqueue_static_file_request(stream, &path_obj, stream_map_arc, request_queue_arc, notify_chan); let req_group: Vec<&str> = request_str.splitn(3, ' ').collect(); if req_group.len() > 2 { let path_str = ".".to_string() + req_group[1]; let mut path_obj = os::getcwd().unwrap(); path_obj.push(path_str.clone()); let ext_str = match path_obj.extension_str() { Some(e) => e, None => "", }; debug!("Requested path: [{}]", path_obj.as_str().expect("error")); debug!("Requested path: [{}]", path_str); if path_str.as_slice().eq("./") { debug!("===== Counter Page request ====="); WebServer::respond_with_counter_page(stream,*vc); debug!("=====Terminated connection from [{}].=====", peer_name); } else if !path_obj.exists() || path_obj.is_dir() { debug!("===== Error page request ====="); WebServer::respond_with_error_page(stream, &path_obj); debug!("=====Terminated connection from [{}].=====", peer_name); } else if ext_str == "shtml" { // Dynamic web pages. debug!("===== Dynamic Page request ====="); WebServer::respond_with_dynamic_page(stream, &path_obj); debug!("=====Terminated connection from [{}].=====", peer_name); } else { debug!("===== Static Page request ====="); if std::fs::metadata(&path_obj).unwrap().len()> CacheLowerBounder{ WebServer::enqueue_static_file_request(stream, &path_obj, stream_map_arc, request_queue_arc, notify_chan);} else{ debug!("small file, do it without enqueue!"); let mut file_reader = File::open(&path_obj).unwrap(); stream.write(HTTP_OK.as_bytes()); let mut reader = BufferedReader::new(file_reader); for line in reader.lines().filter_map(|result| result.ok()) { let _ = stream.write_all(line.as_bytes()); } } } } }); } }); } fn respond_with_error_page(stream: std::old_io::net::tcp::TcpStream, path: &Path) { let mut stream = stream; let msg: String= format!("Cannot open: {}", path.as_str().expect("invalid path")); stream.write(HTTP_BAD.as_bytes()); stream.write(msg.as_bytes()); } // Done fn respond_with_counter_page(stream: std::old_io::net::tcp::TcpStream,visitor_count:usize)
// TODO: Streaming file. // TODO: Application-layer file caching. fn respond_with_static_file(stream: std::old_io::net::tcp::TcpStream, path: &Path,cache : Arc<RwLock<HashMap<Path,(String,Mutex<usize>,u64)>>>,cache_len :Arc<Mutex<usize>>) { let mut stream = stream; let mut cache_str=String::new(); let mut counter=0; let mut local_cache=cache.clone(); let mut is_modified=false; { let metadata=std::fs::metadata(path).unwrap(); let modify_time=metadata.modified(); let read_hash=local_cache.read().unwrap(); if read_hash.contains_key(path) { let tuple=read_hash.get(path).unwrap(); let time = tuple.2; is_modified= (time !=modify_time); } } if is_modified{ debug!("It is modified, delete from cache!"); let mut write_hash=local_cache.write().unwrap(); write_hash.remove(path); } //let mut local_cache=cache.clone(); { debug!("updating counter..."); let read_hash=local_cache.read().unwrap(); for (key,value) in read_hash.iter(){ let mut counter=value.1.lock().unwrap(); *counter+=1; } if read_hash.contains_key(path){ debug!("Reading cached file:{}",path.display()); let mut pair=read_hash.get(path).unwrap(); { *pair.1.lock().unwrap()=0; } stream.write(HTTP_OK.as_bytes()); let _ = stream.write_all(pair.0.as_bytes()); return; } else{ debug!("reading from disk!"); let mut file_reader = File::open(path).unwrap(); stream.write(HTTP_OK.as_bytes()); /*let mut buf:[u8;1048576]=[0;1048576]; loop{ let size=match file_reader.read(&mut buf){ Err(why) =>0, Ok(size) =>size, }; let str_buf=String::from_utf8_lossy(&buf[0..size]); let _=stream.write(str_buf.as_bytes()); cache_str.push_str(str_buf.as_slice()); debug!("read siez:{}",size); if(size<1048576){ break; } }*/ //better solution let mut reader = BufferedReader::new(file_reader); for line in reader.lines().filter_map(|result| result.ok()) { let _ = stream.write_all(line.as_bytes()); cache_str.push_str(line.as_slice()); } } } let file_size=std::fs::metadata(path).unwrap().len(); if(file_size<CacheLowerBounder){ debug!("file size:{}, don't cache this file(too small)",file_size); return; } else if (file_size>CacheUpperBounder){ debug!("file size:{}, don't cache this file(too large)",file_size); return; } debug!("updating cache...."); { let mut write_hash=local_cache.write().unwrap(); let time=std::fs::metadata(path).unwrap().modified(); write_hash.insert(path.clone(),(cache_str,Mutex::new(0),time)); } *cache_len.lock().unwrap()+=1; { let mut write_hash=local_cache.write().unwrap(); let mut to_be_replaced : Path=Path::new("./"); if *cache_len.lock().unwrap()>5{ let mut max_num=0; //let read_hash=local_cache.write().unwrap(); let mut tmp: &Path=&Path::new("./"); for (key,value) in write_hash.iter(){ let num=*value.1.lock().unwrap(); if num>=max_num{ max_num=num; tmp=key; } } to_be_replaced=tmp.clone(); }else { return; } debug!("least recently used is:{}",to_be_replaced.display()); write_hash.remove(&to_be_replaced); } } // TODO: Server-side gashing. fn respond_with_dynamic_page(stream: std::old_io::net::tcp::TcpStream, path: &Path) { //scan the shtml to find the ssl tag, extract the command line redirect the command line to //our file and serve it let mut stream = stream; let mut file =match File::open(path) { Err(why) => panic!("Coundn't open file:{}",why), Ok(file) => file, }; let mut s= String::new(); s=match file.read_to_string(){ Err(why) => panic!("Couldn't read file:{}",why), Ok(content) => content, }; let str_vec: Vec<&str>=s.split_str("<!--#exec cmd=\"").collect(); let cmd_mix:Vec<&str>=str_vec[1].split_str("\" -->").collect(); let cmd=cmd_mix[0].to_string(); let mut args =Vec::new(); args.push("-c"); args.push(&cmd); let mut gash_command= match Command::new("../main").args(&args).stdout(Stdio::capture()).spawn(){ Err(why) => panic!("Couldn't do command {}",why), Ok(cmd) => cmd, }; let mut stdout=gash_command.stdout.unwrap(); let mut output=String::new(); stdout.read_to_string(&mut output); stream.write(HTTP_OK.as_bytes()); stream.write(str_vec[0].as_bytes()); stream.write(output.as_bytes()); stream.write(cmd_mix[1].as_bytes()); //WebServer::respond_with_static_file(stream, path); } fn get_file_size(path: &Path) ->u64 { let metadata=std::fs::metadata(path).unwrap(); return metadata.len() } // TODO: Smarter Scheduling. fn enqueue_static_file_request(stream: std::old_io::net::tcp::TcpStream, path_obj: &Path, stream_map_arc: Arc<Mutex<HashMap<String, std::old_io::net::tcp::TcpStream>>>, req_queue_arc: Arc<Mutex<Vec<HTTP_Request>>>, notify_chan: Sender<()>) { // Save stream in hashmap for later response. let mut stream = stream; let peer_name = WebServer::get_peer_name(&mut stream); let (stream_tx, stream_rx) = channel(); stream_tx.send(stream); let stream = match stream_rx.recv(){ Ok(s) => s, Err(e) => panic!("There was an error while receiving from the stream channel! {}", e), }; let local_stream_map = stream_map_arc.clone(); { // make sure we request the lock inside a block with different scope, so that we give it back at the end of that block let mut local_stream_map = local_stream_map.lock().unwrap(); local_stream_map.insert(peer_name.clone(), stream); } // Enqueue the HTTP request. // TOCHECK: it was ~path_obj.clone(), make sure in which order are ~ and clone() executed let req = HTTP_Request { peer_name: peer_name.clone(), path: path_obj.clone() }; let (req_tx, req_rx) = channel(); req_tx.send(req); debug!("Waiting for queue mutex lock."); let local_req_queue = req_queue_arc.clone(); { // make sure we request the lock inside a block with different scope, so that we give it back at the end of that block let mut local_req_queue = local_req_queue.lock().unwrap(); let req: HTTP_Request = match req_rx.recv(){ Ok(s) => s, Err(e) => panic!("There was an error while receiving from the request channel! {}", e), }; //REORDER the queue in order of the request size local_req_queue.push(req); local_req_queue.sort_by(|a, b| WebServer::get_file_size(&a.path).cmp(&WebServer::get_file_size(&b.path))); debug!("A new request enqueued, now the length of queue is {}.", local_req_queue.len()); notify_chan.send(()); // Send incoming notification to responder task. } } // TODO: Smarter Scheduling. fn dequeue_static_file_request(&mut self) { let req_queue_get = self.request_queue_arc.clone(); let stream_map_get = self.stream_map_arc.clone(); // Receiver<> cannot be sent to another task. So we have to make this task as the main task that can access self.notify_rx. let (request_tx, request_rx) = channel(); loop { self.notify_rx.recv(); // waiting for new request enqueued. This is where the infinity loop locate { // make sure we request the lock inside a block with different scope, so that we give it back at the end of that block let mut req_queue = req_queue_get.lock().unwrap(); if req_queue.len() > 0 { self.thread_sema.acquire(); let req = req_queue.remove(0); debug!("A new request dequeued, now the length of queue is {}.", req_queue.len()); request_tx.send(req); } } let request = match request_rx.recv(){ Ok(s) => s, Err(e) => panic!("There was an error while receiving from the request channel! {}", e), }; // Get stream from hashmap. let (stream_tx, stream_rx) = channel(); { // make sure we request the lock inside a block with different scope, so that we give it back at the end of that block let mut stream_map = stream_map_get.lock().unwrap(); let stream = stream_map.remove(&request.peer_name).expect("no option tcpstream"); stream_tx.send(stream); } // TODO: Spawning more tasks to respond the dequeued requests concurrently. You may need a semophore to control the concurrency. let stream = match stream_rx.recv(){ Ok(s) => s, Err(e) => panic!("There was an error while receiving from the stream channel! {}", e), }; let sema=self.thread_sema.clone(); let cache_len=self.cache_len.clone(); let mut cache=self.cache.clone(); Thread::spawn(move||{ debug!("Processing...."); WebServer::respond_with_static_file(stream, &request.path,cache,cache_len); debug!("finishing request for{}",request.path.display()); debug!("=====Terminated connection from [{}].=====", request.peer_name); sema.release(); }); } } fn get_peer_name(stream: &mut std::old_io::net::tcp::TcpStream) -> String{ match stream.peer_name(){ Ok(s) => {format!("{}:{}", s.ip, s.port)} Err(e) => {panic!("Error while getting the stream name! {}", e)} } } } fn get_args() -> (String, usize, String) { fn print_usage(program: &str) { println!("Usage: {} [options]", program); println!("--ip \tIP address, \"{}\" by default.", IP); println!("--port \tport number, \"{}\" by default.", PORT); println!("--www \tworking directory, \"{}\" by default", WWW_DIR); println!("-h --help \tUsage"); } /* Begin processing program arguments and initiate the parameters. */ let args = os::args(); let program = args[0].clone(); let opts = [ getopts::optopt("", "ip", "The IP address to bind to", "IP"), getopts::optopt("", "port", "The Port to bind to", "PORT"), getopts::optopt("", "www", "The www directory", "WWW_DIR"), getopts::optflag("h", "help", "Display help"), ]; let matches = match getopts::getopts(args.tail(), &opts) { Ok(m) => { m } Err(f) => { panic!(f.to_err_msg()) } }; if matches.opt_present("h") || matches.opt_present("help") { print_usage(program.as_slice()); unsafe { libc::exit(1); } } let ip_str = if matches.opt_present("ip") { matches.opt_str("ip").expect("invalid ip address?").to_owned() } else { IP.to_owned() }; let port:usize = if matches.opt_present("port") { let input_port = matches.opt_str("port").expect("Invalid port number?").trim().parse::<usize>().ok(); match input_port { Some(port) => port, None => panic!("Invalid port number?"), } } else { PORT }; let www_dir_str = if matches.opt_present("www") { matches.opt_str("www").expect("invalid www argument?") } else { WWW_DIR.to_owned() }; (ip_str, port, www_dir_str) } fn main() { let (ip_str, port, www_dir_str) = get_args(); let mut zhtta = WebServer::new(ip_str, port, www_dir_str); zhtta.run(); }
{ let mut stream = stream; let response: String = format!("{}{}<h1>Greetings, Krusty!</h1><h2>Visitor count: {}</h2></body></html>\r\n", HTTP_OK, COUNTER_STYLE, unsafe { visitor_count } ); debug!("Responding to counter request"); stream.write(response.as_bytes()); }
identifier_body
zhtta.rs
// // zhtta.rs // // Starting code for PA3 // Revised to run on Rust 1.0.0 nightly - built 02-21 // // Note that this code has serious security risks! You should not run it // on any system with access to sensitive files. // // Brandeis University - cs146a Spring 2015 // Dimokritos Stamatakis and Brionne Godby // Version 1.0 // To see debug! outputs set the RUST_LOG environment variable, e.g.: export RUST_LOG="zhtta=debug" #![feature(rustc_private)] #![feature(libc)] #![feature(io)] #![feature(old_io)] #![feature(old_path)] #![feature(os)] #![feature(core)] #![feature(collections)] #![feature(std_misc)] #![allow(non_camel_case_types)] #![allow(unused_must_use)] #![allow(deprecated)] #[macro_use] extern crate log; extern crate libc; use std::io::*; use std::old_io::File; use std::{os, str}; use std::process::{Command, Stdio}; use std::old_path::posix::Path; use std::collections::hash_map::HashMap; use std::borrow::ToOwned; use std::thread::Thread; use std::old_io::fs::PathExtensions; use std::old_io::{Acceptor, Listener}; use std::old_io::BufferedReader; extern crate getopts; use getopts::{optopt, getopts}; use std::sync::RwLock; use std::sync::{Arc, Mutex,Semaphore}; use std::sync::mpsc::{Sender, Receiver}; use std::sync::mpsc::channel; static SERVER_NAME : &'static str = "Zhtta Version 1.0"; static CacheLowerBounder :u64 =1024;//1K static CacheUpperBounder :u64 =104857600; //100M static IP : &'static str = "127.0.0.1"; static PORT : usize = 4414; static WWW_DIR : &'static str = "./www"; static HTTP_OK : &'static str = "HTTP/1.1 200 OK\r\nContent-Type: text/html; charset=UTF-8\r\n\r\n"; static HTTP_BAD : &'static str = "HTTP/1.1 404 Not Found\r\n\r\n"; static COUNTER_STYLE : &'static str = "<doctype !html><html><head><title>Hello, Rust!</title> <style>body {background-color: #884414; color: #FFEEAA} h1 { font-size:2cm; text-align: center; color: black; text-shadow: 0 0 4mm red } h2 { font-size:2cm; text-align: center; color: black; text-shadow: 0 0 4mm green } </style></head> <body>"; //static mut visitor_count : usize = 0; struct HTTP_Request { // Use peer_name as the key to access TcpStream in hashmap. // (Due to a bug in extra::arc in Rust 0.9, it is very inconvenient to use TcpStream without the "Freeze" bound. // See issue: https://github.com/mozilla/rust/issues/12139) peer_name: String, path: Path, } struct WebServer { ip: String, port: usize, www_dir_path: Path, request_queue_arc: Arc<Mutex<Vec<HTTP_Request>>>, stream_map_arc: Arc<Mutex<HashMap<String, std::old_io::net::tcp::TcpStream>>>,//it is a hash map,store the http stream for each ip visitor_count : Arc<Mutex<usize>>, thread_sema : Arc<Semaphore>, cache: Arc<RwLock<HashMap<Path,(String,Mutex<usize>,u64)>>>,//cache content, counter for LRU, modified time cache_len: Arc<Mutex<usize>>, notify_rx: Receiver<()>, notify_tx: Sender<()>, } impl WebServer { fn new(ip: String, port: usize, www_dir: String) -> WebServer { let (notify_tx, notify_rx) = channel(); let www_dir_path = Path::new(www_dir); os::change_dir(&www_dir_path); WebServer { ip:ip, port: port, www_dir_path: www_dir_path, request_queue_arc: Arc::new(Mutex::new(Vec::new())), stream_map_arc: Arc::new(Mutex::new(HashMap::new())), visitor_count:Arc::new(Mutex::new(0)), thread_sema: Arc::new(Semaphore::new(5)), cache: Arc::new(RwLock::new(HashMap::new())), cache_len: Arc::new(Mutex::new(0)), notify_rx: notify_rx, notify_tx: notify_tx, } } fn run(&mut self) { self.listen(); self.dequeue_static_file_request(); } fn listen(&mut self) { let addr = String::from_str(format!("{}:{}", self.ip, self.port).as_slice()); let www_dir_path_str = self.www_dir_path.clone(); let request_queue_arc = self.request_queue_arc.clone(); let notify_tx = self.notify_tx.clone(); let stream_map_arc = self.stream_map_arc.clone(); let visitor_count=self.visitor_count.clone(); Thread::spawn(move|| { let listener = std::old_io::TcpListener::bind(addr.as_slice()).unwrap(); let mut acceptor = listener.listen().unwrap(); println!("{} listening on {} (serving from: {}).", SERVER_NAME, addr, www_dir_path_str.as_str().unwrap()); for stream_raw in acceptor.incoming() { //for each stream/connection let (queue_tx, queue_rx) = channel();//build up a channel for sub thread queue_tx.send(request_queue_arc.clone());//send the request queue to queue and receive it inside the son thread let notify_chan = notify_tx.clone();//notify_chan is a global channel for webserver let stream_map_arc = stream_map_arc.clone(); let visitor_count=visitor_count.clone(); println!("outer thread:{}",*visitor_count.lock().unwrap()); // Spawn a task to handle the connection. Thread::spawn(move|| { let mut vc= visitor_count.lock().unwrap(); // Done *vc+=1; println!("inner thread:{}",*vc); let request_queue_arc = queue_rx.recv().unwrap();// let mut stream = match stream_raw { Ok(s) => {s} Err(e) => { panic!("Error getting the listener stream! {}", e) } }; let peer_name = WebServer::get_peer_name(&mut stream); debug!("Got connection from {}", peer_name); let mut buf: [u8;500] = [0;500]; stream.read(&mut buf); let request_str = match str::from_utf8(&buf){ Ok(s) => s, Err(e)=> panic!("Error reading from the listener stream! {}", e), }; debug!("Request:\n{}", request_str); //WebServer::enqueue_static_file_request(stream, &path_obj, stream_map_arc, request_queue_arc, notify_chan); let req_group: Vec<&str> = request_str.splitn(3, ' ').collect(); if req_group.len() > 2 { let path_str = ".".to_string() + req_group[1]; let mut path_obj = os::getcwd().unwrap(); path_obj.push(path_str.clone()); let ext_str = match path_obj.extension_str() { Some(e) => e, None => "", }; debug!("Requested path: [{}]", path_obj.as_str().expect("error")); debug!("Requested path: [{}]", path_str); if path_str.as_slice().eq("./") { debug!("===== Counter Page request ====="); WebServer::respond_with_counter_page(stream,*vc); debug!("=====Terminated connection from [{}].=====", peer_name); } else if !path_obj.exists() || path_obj.is_dir() { debug!("===== Error page request ====="); WebServer::respond_with_error_page(stream, &path_obj); debug!("=====Terminated connection from [{}].=====", peer_name); } else if ext_str == "shtml" { // Dynamic web pages. debug!("===== Dynamic Page request ====="); WebServer::respond_with_dynamic_page(stream, &path_obj); debug!("=====Terminated connection from [{}].=====", peer_name); } else { debug!("===== Static Page request ====="); if std::fs::metadata(&path_obj).unwrap().len()> CacheLowerBounder{ WebServer::enqueue_static_file_request(stream, &path_obj, stream_map_arc, request_queue_arc, notify_chan);} else{ debug!("small file, do it without enqueue!"); let mut file_reader = File::open(&path_obj).unwrap(); stream.write(HTTP_OK.as_bytes()); let mut reader = BufferedReader::new(file_reader); for line in reader.lines().filter_map(|result| result.ok()) { let _ = stream.write_all(line.as_bytes()); }
}); } }); } fn respond_with_error_page(stream: std::old_io::net::tcp::TcpStream, path: &Path) { let mut stream = stream; let msg: String= format!("Cannot open: {}", path.as_str().expect("invalid path")); stream.write(HTTP_BAD.as_bytes()); stream.write(msg.as_bytes()); } // Done fn respond_with_counter_page(stream: std::old_io::net::tcp::TcpStream,visitor_count:usize) { let mut stream = stream; let response: String = format!("{}{}<h1>Greetings, Krusty!</h1><h2>Visitor count: {}</h2></body></html>\r\n", HTTP_OK, COUNTER_STYLE, unsafe { visitor_count } ); debug!("Responding to counter request"); stream.write(response.as_bytes()); } // TODO: Streaming file. // TODO: Application-layer file caching. fn respond_with_static_file(stream: std::old_io::net::tcp::TcpStream, path: &Path,cache : Arc<RwLock<HashMap<Path,(String,Mutex<usize>,u64)>>>,cache_len :Arc<Mutex<usize>>) { let mut stream = stream; let mut cache_str=String::new(); let mut counter=0; let mut local_cache=cache.clone(); let mut is_modified=false; { let metadata=std::fs::metadata(path).unwrap(); let modify_time=metadata.modified(); let read_hash=local_cache.read().unwrap(); if read_hash.contains_key(path) { let tuple=read_hash.get(path).unwrap(); let time = tuple.2; is_modified= (time !=modify_time); } } if is_modified{ debug!("It is modified, delete from cache!"); let mut write_hash=local_cache.write().unwrap(); write_hash.remove(path); } //let mut local_cache=cache.clone(); { debug!("updating counter..."); let read_hash=local_cache.read().unwrap(); for (key,value) in read_hash.iter(){ let mut counter=value.1.lock().unwrap(); *counter+=1; } if read_hash.contains_key(path){ debug!("Reading cached file:{}",path.display()); let mut pair=read_hash.get(path).unwrap(); { *pair.1.lock().unwrap()=0; } stream.write(HTTP_OK.as_bytes()); let _ = stream.write_all(pair.0.as_bytes()); return; } else{ debug!("reading from disk!"); let mut file_reader = File::open(path).unwrap(); stream.write(HTTP_OK.as_bytes()); /*let mut buf:[u8;1048576]=[0;1048576]; loop{ let size=match file_reader.read(&mut buf){ Err(why) =>0, Ok(size) =>size, }; let str_buf=String::from_utf8_lossy(&buf[0..size]); let _=stream.write(str_buf.as_bytes()); cache_str.push_str(str_buf.as_slice()); debug!("read siez:{}",size); if(size<1048576){ break; } }*/ //better solution let mut reader = BufferedReader::new(file_reader); for line in reader.lines().filter_map(|result| result.ok()) { let _ = stream.write_all(line.as_bytes()); cache_str.push_str(line.as_slice()); } } } let file_size=std::fs::metadata(path).unwrap().len(); if(file_size<CacheLowerBounder){ debug!("file size:{}, don't cache this file(too small)",file_size); return; } else if (file_size>CacheUpperBounder){ debug!("file size:{}, don't cache this file(too large)",file_size); return; } debug!("updating cache...."); { let mut write_hash=local_cache.write().unwrap(); let time=std::fs::metadata(path).unwrap().modified(); write_hash.insert(path.clone(),(cache_str,Mutex::new(0),time)); } *cache_len.lock().unwrap()+=1; { let mut write_hash=local_cache.write().unwrap(); let mut to_be_replaced : Path=Path::new("./"); if *cache_len.lock().unwrap()>5{ let mut max_num=0; //let read_hash=local_cache.write().unwrap(); let mut tmp: &Path=&Path::new("./"); for (key,value) in write_hash.iter(){ let num=*value.1.lock().unwrap(); if num>=max_num{ max_num=num; tmp=key; } } to_be_replaced=tmp.clone(); }else { return; } debug!("least recently used is:{}",to_be_replaced.display()); write_hash.remove(&to_be_replaced); } } // TODO: Server-side gashing. fn respond_with_dynamic_page(stream: std::old_io::net::tcp::TcpStream, path: &Path) { //scan the shtml to find the ssl tag, extract the command line redirect the command line to //our file and serve it let mut stream = stream; let mut file =match File::open(path) { Err(why) => panic!("Coundn't open file:{}",why), Ok(file) => file, }; let mut s= String::new(); s=match file.read_to_string(){ Err(why) => panic!("Couldn't read file:{}",why), Ok(content) => content, }; let str_vec: Vec<&str>=s.split_str("<!--#exec cmd=\"").collect(); let cmd_mix:Vec<&str>=str_vec[1].split_str("\" -->").collect(); let cmd=cmd_mix[0].to_string(); let mut args =Vec::new(); args.push("-c"); args.push(&cmd); let mut gash_command= match Command::new("../main").args(&args).stdout(Stdio::capture()).spawn(){ Err(why) => panic!("Couldn't do command {}",why), Ok(cmd) => cmd, }; let mut stdout=gash_command.stdout.unwrap(); let mut output=String::new(); stdout.read_to_string(&mut output); stream.write(HTTP_OK.as_bytes()); stream.write(str_vec[0].as_bytes()); stream.write(output.as_bytes()); stream.write(cmd_mix[1].as_bytes()); //WebServer::respond_with_static_file(stream, path); } fn get_file_size(path: &Path) ->u64 { let metadata=std::fs::metadata(path).unwrap(); return metadata.len() } // TODO: Smarter Scheduling. fn enqueue_static_file_request(stream: std::old_io::net::tcp::TcpStream, path_obj: &Path, stream_map_arc: Arc<Mutex<HashMap<String, std::old_io::net::tcp::TcpStream>>>, req_queue_arc: Arc<Mutex<Vec<HTTP_Request>>>, notify_chan: Sender<()>) { // Save stream in hashmap for later response. let mut stream = stream; let peer_name = WebServer::get_peer_name(&mut stream); let (stream_tx, stream_rx) = channel(); stream_tx.send(stream); let stream = match stream_rx.recv(){ Ok(s) => s, Err(e) => panic!("There was an error while receiving from the stream channel! {}", e), }; let local_stream_map = stream_map_arc.clone(); { // make sure we request the lock inside a block with different scope, so that we give it back at the end of that block let mut local_stream_map = local_stream_map.lock().unwrap(); local_stream_map.insert(peer_name.clone(), stream); } // Enqueue the HTTP request. // TOCHECK: it was ~path_obj.clone(), make sure in which order are ~ and clone() executed let req = HTTP_Request { peer_name: peer_name.clone(), path: path_obj.clone() }; let (req_tx, req_rx) = channel(); req_tx.send(req); debug!("Waiting for queue mutex lock."); let local_req_queue = req_queue_arc.clone(); { // make sure we request the lock inside a block with different scope, so that we give it back at the end of that block let mut local_req_queue = local_req_queue.lock().unwrap(); let req: HTTP_Request = match req_rx.recv(){ Ok(s) => s, Err(e) => panic!("There was an error while receiving from the request channel! {}", e), }; //REORDER the queue in order of the request size local_req_queue.push(req); local_req_queue.sort_by(|a, b| WebServer::get_file_size(&a.path).cmp(&WebServer::get_file_size(&b.path))); debug!("A new request enqueued, now the length of queue is {}.", local_req_queue.len()); notify_chan.send(()); // Send incoming notification to responder task. } } // TODO: Smarter Scheduling. fn dequeue_static_file_request(&mut self) { let req_queue_get = self.request_queue_arc.clone(); let stream_map_get = self.stream_map_arc.clone(); // Receiver<> cannot be sent to another task. So we have to make this task as the main task that can access self.notify_rx. let (request_tx, request_rx) = channel(); loop { self.notify_rx.recv(); // waiting for new request enqueued. This is where the infinity loop locate { // make sure we request the lock inside a block with different scope, so that we give it back at the end of that block let mut req_queue = req_queue_get.lock().unwrap(); if req_queue.len() > 0 { self.thread_sema.acquire(); let req = req_queue.remove(0); debug!("A new request dequeued, now the length of queue is {}.", req_queue.len()); request_tx.send(req); } } let request = match request_rx.recv(){ Ok(s) => s, Err(e) => panic!("There was an error while receiving from the request channel! {}", e), }; // Get stream from hashmap. let (stream_tx, stream_rx) = channel(); { // make sure we request the lock inside a block with different scope, so that we give it back at the end of that block let mut stream_map = stream_map_get.lock().unwrap(); let stream = stream_map.remove(&request.peer_name).expect("no option tcpstream"); stream_tx.send(stream); } // TODO: Spawning more tasks to respond the dequeued requests concurrently. You may need a semophore to control the concurrency. let stream = match stream_rx.recv(){ Ok(s) => s, Err(e) => panic!("There was an error while receiving from the stream channel! {}", e), }; let sema=self.thread_sema.clone(); let cache_len=self.cache_len.clone(); let mut cache=self.cache.clone(); Thread::spawn(move||{ debug!("Processing...."); WebServer::respond_with_static_file(stream, &request.path,cache,cache_len); debug!("finishing request for{}",request.path.display()); debug!("=====Terminated connection from [{}].=====", request.peer_name); sema.release(); }); } } fn get_peer_name(stream: &mut std::old_io::net::tcp::TcpStream) -> String{ match stream.peer_name(){ Ok(s) => {format!("{}:{}", s.ip, s.port)} Err(e) => {panic!("Error while getting the stream name! {}", e)} } } } fn get_args() -> (String, usize, String) { fn print_usage(program: &str) { println!("Usage: {} [options]", program); println!("--ip \tIP address, \"{}\" by default.", IP); println!("--port \tport number, \"{}\" by default.", PORT); println!("--www \tworking directory, \"{}\" by default", WWW_DIR); println!("-h --help \tUsage"); } /* Begin processing program arguments and initiate the parameters. */ let args = os::args(); let program = args[0].clone(); let opts = [ getopts::optopt("", "ip", "The IP address to bind to", "IP"), getopts::optopt("", "port", "The Port to bind to", "PORT"), getopts::optopt("", "www", "The www directory", "WWW_DIR"), getopts::optflag("h", "help", "Display help"), ]; let matches = match getopts::getopts(args.tail(), &opts) { Ok(m) => { m } Err(f) => { panic!(f.to_err_msg()) } }; if matches.opt_present("h") || matches.opt_present("help") { print_usage(program.as_slice()); unsafe { libc::exit(1); } } let ip_str = if matches.opt_present("ip") { matches.opt_str("ip").expect("invalid ip address?").to_owned() } else { IP.to_owned() }; let port:usize = if matches.opt_present("port") { let input_port = matches.opt_str("port").expect("Invalid port number?").trim().parse::<usize>().ok(); match input_port { Some(port) => port, None => panic!("Invalid port number?"), } } else { PORT }; let www_dir_str = if matches.opt_present("www") { matches.opt_str("www").expect("invalid www argument?") } else { WWW_DIR.to_owned() }; (ip_str, port, www_dir_str) } fn main() { let (ip_str, port, www_dir_str) = get_args(); let mut zhtta = WebServer::new(ip_str, port, www_dir_str); zhtta.run(); }
} } }
random_line_split
zhtta.rs
// // zhtta.rs // // Starting code for PA3 // Revised to run on Rust 1.0.0 nightly - built 02-21 // // Note that this code has serious security risks! You should not run it // on any system with access to sensitive files. // // Brandeis University - cs146a Spring 2015 // Dimokritos Stamatakis and Brionne Godby // Version 1.0 // To see debug! outputs set the RUST_LOG environment variable, e.g.: export RUST_LOG="zhtta=debug" #![feature(rustc_private)] #![feature(libc)] #![feature(io)] #![feature(old_io)] #![feature(old_path)] #![feature(os)] #![feature(core)] #![feature(collections)] #![feature(std_misc)] #![allow(non_camel_case_types)] #![allow(unused_must_use)] #![allow(deprecated)] #[macro_use] extern crate log; extern crate libc; use std::io::*; use std::old_io::File; use std::{os, str}; use std::process::{Command, Stdio}; use std::old_path::posix::Path; use std::collections::hash_map::HashMap; use std::borrow::ToOwned; use std::thread::Thread; use std::old_io::fs::PathExtensions; use std::old_io::{Acceptor, Listener}; use std::old_io::BufferedReader; extern crate getopts; use getopts::{optopt, getopts}; use std::sync::RwLock; use std::sync::{Arc, Mutex,Semaphore}; use std::sync::mpsc::{Sender, Receiver}; use std::sync::mpsc::channel; static SERVER_NAME : &'static str = "Zhtta Version 1.0"; static CacheLowerBounder :u64 =1024;//1K static CacheUpperBounder :u64 =104857600; //100M static IP : &'static str = "127.0.0.1"; static PORT : usize = 4414; static WWW_DIR : &'static str = "./www"; static HTTP_OK : &'static str = "HTTP/1.1 200 OK\r\nContent-Type: text/html; charset=UTF-8\r\n\r\n"; static HTTP_BAD : &'static str = "HTTP/1.1 404 Not Found\r\n\r\n"; static COUNTER_STYLE : &'static str = "<doctype !html><html><head><title>Hello, Rust!</title> <style>body {background-color: #884414; color: #FFEEAA} h1 { font-size:2cm; text-align: center; color: black; text-shadow: 0 0 4mm red } h2 { font-size:2cm; text-align: center; color: black; text-shadow: 0 0 4mm green } </style></head> <body>"; //static mut visitor_count : usize = 0; struct HTTP_Request { // Use peer_name as the key to access TcpStream in hashmap. // (Due to a bug in extra::arc in Rust 0.9, it is very inconvenient to use TcpStream without the "Freeze" bound. // See issue: https://github.com/mozilla/rust/issues/12139) peer_name: String, path: Path, } struct WebServer { ip: String, port: usize, www_dir_path: Path, request_queue_arc: Arc<Mutex<Vec<HTTP_Request>>>, stream_map_arc: Arc<Mutex<HashMap<String, std::old_io::net::tcp::TcpStream>>>,//it is a hash map,store the http stream for each ip visitor_count : Arc<Mutex<usize>>, thread_sema : Arc<Semaphore>, cache: Arc<RwLock<HashMap<Path,(String,Mutex<usize>,u64)>>>,//cache content, counter for LRU, modified time cache_len: Arc<Mutex<usize>>, notify_rx: Receiver<()>, notify_tx: Sender<()>, } impl WebServer { fn new(ip: String, port: usize, www_dir: String) -> WebServer { let (notify_tx, notify_rx) = channel(); let www_dir_path = Path::new(www_dir); os::change_dir(&www_dir_path); WebServer { ip:ip, port: port, www_dir_path: www_dir_path, request_queue_arc: Arc::new(Mutex::new(Vec::new())), stream_map_arc: Arc::new(Mutex::new(HashMap::new())), visitor_count:Arc::new(Mutex::new(0)), thread_sema: Arc::new(Semaphore::new(5)), cache: Arc::new(RwLock::new(HashMap::new())), cache_len: Arc::new(Mutex::new(0)), notify_rx: notify_rx, notify_tx: notify_tx, } } fn run(&mut self) { self.listen(); self.dequeue_static_file_request(); } fn listen(&mut self) { let addr = String::from_str(format!("{}:{}", self.ip, self.port).as_slice()); let www_dir_path_str = self.www_dir_path.clone(); let request_queue_arc = self.request_queue_arc.clone(); let notify_tx = self.notify_tx.clone(); let stream_map_arc = self.stream_map_arc.clone(); let visitor_count=self.visitor_count.clone(); Thread::spawn(move|| { let listener = std::old_io::TcpListener::bind(addr.as_slice()).unwrap(); let mut acceptor = listener.listen().unwrap(); println!("{} listening on {} (serving from: {}).", SERVER_NAME, addr, www_dir_path_str.as_str().unwrap()); for stream_raw in acceptor.incoming() { //for each stream/connection let (queue_tx, queue_rx) = channel();//build up a channel for sub thread queue_tx.send(request_queue_arc.clone());//send the request queue to queue and receive it inside the son thread let notify_chan = notify_tx.clone();//notify_chan is a global channel for webserver let stream_map_arc = stream_map_arc.clone(); let visitor_count=visitor_count.clone(); println!("outer thread:{}",*visitor_count.lock().unwrap()); // Spawn a task to handle the connection. Thread::spawn(move|| { let mut vc= visitor_count.lock().unwrap(); // Done *vc+=1; println!("inner thread:{}",*vc); let request_queue_arc = queue_rx.recv().unwrap();// let mut stream = match stream_raw { Ok(s) => {s} Err(e) => { panic!("Error getting the listener stream! {}", e) } }; let peer_name = WebServer::get_peer_name(&mut stream); debug!("Got connection from {}", peer_name); let mut buf: [u8;500] = [0;500]; stream.read(&mut buf); let request_str = match str::from_utf8(&buf){ Ok(s) => s, Err(e)=> panic!("Error reading from the listener stream! {}", e), }; debug!("Request:\n{}", request_str); //WebServer::enqueue_static_file_request(stream, &path_obj, stream_map_arc, request_queue_arc, notify_chan); let req_group: Vec<&str> = request_str.splitn(3, ' ').collect(); if req_group.len() > 2 { let path_str = ".".to_string() + req_group[1]; let mut path_obj = os::getcwd().unwrap(); path_obj.push(path_str.clone()); let ext_str = match path_obj.extension_str() { Some(e) => e, None => "", }; debug!("Requested path: [{}]", path_obj.as_str().expect("error")); debug!("Requested path: [{}]", path_str); if path_str.as_slice().eq("./") { debug!("===== Counter Page request ====="); WebServer::respond_with_counter_page(stream,*vc); debug!("=====Terminated connection from [{}].=====", peer_name); } else if !path_obj.exists() || path_obj.is_dir() { debug!("===== Error page request ====="); WebServer::respond_with_error_page(stream, &path_obj); debug!("=====Terminated connection from [{}].=====", peer_name); } else if ext_str == "shtml" { // Dynamic web pages. debug!("===== Dynamic Page request ====="); WebServer::respond_with_dynamic_page(stream, &path_obj); debug!("=====Terminated connection from [{}].=====", peer_name); } else { debug!("===== Static Page request ====="); if std::fs::metadata(&path_obj).unwrap().len()> CacheLowerBounder{ WebServer::enqueue_static_file_request(stream, &path_obj, stream_map_arc, request_queue_arc, notify_chan);} else{ debug!("small file, do it without enqueue!"); let mut file_reader = File::open(&path_obj).unwrap(); stream.write(HTTP_OK.as_bytes()); let mut reader = BufferedReader::new(file_reader); for line in reader.lines().filter_map(|result| result.ok()) { let _ = stream.write_all(line.as_bytes()); } } } } }); } }); } fn respond_with_error_page(stream: std::old_io::net::tcp::TcpStream, path: &Path) { let mut stream = stream; let msg: String= format!("Cannot open: {}", path.as_str().expect("invalid path")); stream.write(HTTP_BAD.as_bytes()); stream.write(msg.as_bytes()); } // Done fn respond_with_counter_page(stream: std::old_io::net::tcp::TcpStream,visitor_count:usize) { let mut stream = stream; let response: String = format!("{}{}<h1>Greetings, Krusty!</h1><h2>Visitor count: {}</h2></body></html>\r\n", HTTP_OK, COUNTER_STYLE, unsafe { visitor_count } ); debug!("Responding to counter request"); stream.write(response.as_bytes()); } // TODO: Streaming file. // TODO: Application-layer file caching. fn respond_with_static_file(stream: std::old_io::net::tcp::TcpStream, path: &Path,cache : Arc<RwLock<HashMap<Path,(String,Mutex<usize>,u64)>>>,cache_len :Arc<Mutex<usize>>) { let mut stream = stream; let mut cache_str=String::new(); let mut counter=0; let mut local_cache=cache.clone(); let mut is_modified=false; { let metadata=std::fs::metadata(path).unwrap(); let modify_time=metadata.modified(); let read_hash=local_cache.read().unwrap(); if read_hash.contains_key(path) { let tuple=read_hash.get(path).unwrap(); let time = tuple.2; is_modified= (time !=modify_time); } } if is_modified{ debug!("It is modified, delete from cache!"); let mut write_hash=local_cache.write().unwrap(); write_hash.remove(path); } //let mut local_cache=cache.clone(); { debug!("updating counter..."); let read_hash=local_cache.read().unwrap(); for (key,value) in read_hash.iter(){ let mut counter=value.1.lock().unwrap(); *counter+=1; } if read_hash.contains_key(path){ debug!("Reading cached file:{}",path.display()); let mut pair=read_hash.get(path).unwrap(); { *pair.1.lock().unwrap()=0; } stream.write(HTTP_OK.as_bytes()); let _ = stream.write_all(pair.0.as_bytes()); return; } else{ debug!("reading from disk!"); let mut file_reader = File::open(path).unwrap(); stream.write(HTTP_OK.as_bytes()); /*let mut buf:[u8;1048576]=[0;1048576]; loop{ let size=match file_reader.read(&mut buf){ Err(why) =>0, Ok(size) =>size, }; let str_buf=String::from_utf8_lossy(&buf[0..size]); let _=stream.write(str_buf.as_bytes()); cache_str.push_str(str_buf.as_slice()); debug!("read siez:{}",size); if(size<1048576){ break; } }*/ //better solution let mut reader = BufferedReader::new(file_reader); for line in reader.lines().filter_map(|result| result.ok()) { let _ = stream.write_all(line.as_bytes()); cache_str.push_str(line.as_slice()); } } } let file_size=std::fs::metadata(path).unwrap().len(); if(file_size<CacheLowerBounder){ debug!("file size:{}, don't cache this file(too small)",file_size); return; } else if (file_size>CacheUpperBounder){ debug!("file size:{}, don't cache this file(too large)",file_size); return; } debug!("updating cache...."); { let mut write_hash=local_cache.write().unwrap(); let time=std::fs::metadata(path).unwrap().modified(); write_hash.insert(path.clone(),(cache_str,Mutex::new(0),time)); } *cache_len.lock().unwrap()+=1; { let mut write_hash=local_cache.write().unwrap(); let mut to_be_replaced : Path=Path::new("./"); if *cache_len.lock().unwrap()>5{ let mut max_num=0; //let read_hash=local_cache.write().unwrap(); let mut tmp: &Path=&Path::new("./"); for (key,value) in write_hash.iter(){ let num=*value.1.lock().unwrap(); if num>=max_num{ max_num=num; tmp=key; } } to_be_replaced=tmp.clone(); }else { return; } debug!("least recently used is:{}",to_be_replaced.display()); write_hash.remove(&to_be_replaced); } } // TODO: Server-side gashing. fn respond_with_dynamic_page(stream: std::old_io::net::tcp::TcpStream, path: &Path) { //scan the shtml to find the ssl tag, extract the command line redirect the command line to //our file and serve it let mut stream = stream; let mut file =match File::open(path) { Err(why) => panic!("Coundn't open file:{}",why), Ok(file) => file, }; let mut s= String::new(); s=match file.read_to_string(){ Err(why) => panic!("Couldn't read file:{}",why), Ok(content) => content, }; let str_vec: Vec<&str>=s.split_str("<!--#exec cmd=\"").collect(); let cmd_mix:Vec<&str>=str_vec[1].split_str("\" -->").collect(); let cmd=cmd_mix[0].to_string(); let mut args =Vec::new(); args.push("-c"); args.push(&cmd); let mut gash_command= match Command::new("../main").args(&args).stdout(Stdio::capture()).spawn(){ Err(why) => panic!("Couldn't do command {}",why), Ok(cmd) => cmd, }; let mut stdout=gash_command.stdout.unwrap(); let mut output=String::new(); stdout.read_to_string(&mut output); stream.write(HTTP_OK.as_bytes()); stream.write(str_vec[0].as_bytes()); stream.write(output.as_bytes()); stream.write(cmd_mix[1].as_bytes()); //WebServer::respond_with_static_file(stream, path); } fn get_file_size(path: &Path) ->u64 { let metadata=std::fs::metadata(path).unwrap(); return metadata.len() } // TODO: Smarter Scheduling. fn enqueue_static_file_request(stream: std::old_io::net::tcp::TcpStream, path_obj: &Path, stream_map_arc: Arc<Mutex<HashMap<String, std::old_io::net::tcp::TcpStream>>>, req_queue_arc: Arc<Mutex<Vec<HTTP_Request>>>, notify_chan: Sender<()>) { // Save stream in hashmap for later response. let mut stream = stream; let peer_name = WebServer::get_peer_name(&mut stream); let (stream_tx, stream_rx) = channel(); stream_tx.send(stream); let stream = match stream_rx.recv(){ Ok(s) => s, Err(e) => panic!("There was an error while receiving from the stream channel! {}", e), }; let local_stream_map = stream_map_arc.clone(); { // make sure we request the lock inside a block with different scope, so that we give it back at the end of that block let mut local_stream_map = local_stream_map.lock().unwrap(); local_stream_map.insert(peer_name.clone(), stream); } // Enqueue the HTTP request. // TOCHECK: it was ~path_obj.clone(), make sure in which order are ~ and clone() executed let req = HTTP_Request { peer_name: peer_name.clone(), path: path_obj.clone() }; let (req_tx, req_rx) = channel(); req_tx.send(req); debug!("Waiting for queue mutex lock."); let local_req_queue = req_queue_arc.clone(); { // make sure we request the lock inside a block with different scope, so that we give it back at the end of that block let mut local_req_queue = local_req_queue.lock().unwrap(); let req: HTTP_Request = match req_rx.recv(){ Ok(s) => s, Err(e) => panic!("There was an error while receiving from the request channel! {}", e), }; //REORDER the queue in order of the request size local_req_queue.push(req); local_req_queue.sort_by(|a, b| WebServer::get_file_size(&a.path).cmp(&WebServer::get_file_size(&b.path))); debug!("A new request enqueued, now the length of queue is {}.", local_req_queue.len()); notify_chan.send(()); // Send incoming notification to responder task. } } // TODO: Smarter Scheduling. fn dequeue_static_file_request(&mut self) { let req_queue_get = self.request_queue_arc.clone(); let stream_map_get = self.stream_map_arc.clone(); // Receiver<> cannot be sent to another task. So we have to make this task as the main task that can access self.notify_rx. let (request_tx, request_rx) = channel(); loop { self.notify_rx.recv(); // waiting for new request enqueued. This is where the infinity loop locate { // make sure we request the lock inside a block with different scope, so that we give it back at the end of that block let mut req_queue = req_queue_get.lock().unwrap(); if req_queue.len() > 0 { self.thread_sema.acquire(); let req = req_queue.remove(0); debug!("A new request dequeued, now the length of queue is {}.", req_queue.len()); request_tx.send(req); } } let request = match request_rx.recv(){ Ok(s) => s, Err(e) => panic!("There was an error while receiving from the request channel! {}", e), }; // Get stream from hashmap. let (stream_tx, stream_rx) = channel(); { // make sure we request the lock inside a block with different scope, so that we give it back at the end of that block let mut stream_map = stream_map_get.lock().unwrap(); let stream = stream_map.remove(&request.peer_name).expect("no option tcpstream"); stream_tx.send(stream); } // TODO: Spawning more tasks to respond the dequeued requests concurrently. You may need a semophore to control the concurrency. let stream = match stream_rx.recv(){ Ok(s) => s, Err(e) => panic!("There was an error while receiving from the stream channel! {}", e), }; let sema=self.thread_sema.clone(); let cache_len=self.cache_len.clone(); let mut cache=self.cache.clone(); Thread::spawn(move||{ debug!("Processing...."); WebServer::respond_with_static_file(stream, &request.path,cache,cache_len); debug!("finishing request for{}",request.path.display()); debug!("=====Terminated connection from [{}].=====", request.peer_name); sema.release(); }); } } fn get_peer_name(stream: &mut std::old_io::net::tcp::TcpStream) -> String{ match stream.peer_name(){ Ok(s) => {format!("{}:{}", s.ip, s.port)} Err(e) => {panic!("Error while getting the stream name! {}", e)} } } } fn get_args() -> (String, usize, String) { fn pr
rogram: &str) { println!("Usage: {} [options]", program); println!("--ip \tIP address, \"{}\" by default.", IP); println!("--port \tport number, \"{}\" by default.", PORT); println!("--www \tworking directory, \"{}\" by default", WWW_DIR); println!("-h --help \tUsage"); } /* Begin processing program arguments and initiate the parameters. */ let args = os::args(); let program = args[0].clone(); let opts = [ getopts::optopt("", "ip", "The IP address to bind to", "IP"), getopts::optopt("", "port", "The Port to bind to", "PORT"), getopts::optopt("", "www", "The www directory", "WWW_DIR"), getopts::optflag("h", "help", "Display help"), ]; let matches = match getopts::getopts(args.tail(), &opts) { Ok(m) => { m } Err(f) => { panic!(f.to_err_msg()) } }; if matches.opt_present("h") || matches.opt_present("help") { print_usage(program.as_slice()); unsafe { libc::exit(1); } } let ip_str = if matches.opt_present("ip") { matches.opt_str("ip").expect("invalid ip address?").to_owned() } else { IP.to_owned() }; let port:usize = if matches.opt_present("port") { let input_port = matches.opt_str("port").expect("Invalid port number?").trim().parse::<usize>().ok(); match input_port { Some(port) => port, None => panic!("Invalid port number?"), } } else { PORT }; let www_dir_str = if matches.opt_present("www") { matches.opt_str("www").expect("invalid www argument?") } else { WWW_DIR.to_owned() }; (ip_str, port, www_dir_str) } fn main() { let (ip_str, port, www_dir_str) = get_args(); let mut zhtta = WebServer::new(ip_str, port, www_dir_str); zhtta.run(); }
int_usage(p
identifier_name
zhtta.rs
// // zhtta.rs // // Starting code for PA3 // Revised to run on Rust 1.0.0 nightly - built 02-21 // // Note that this code has serious security risks! You should not run it // on any system with access to sensitive files. // // Brandeis University - cs146a Spring 2015 // Dimokritos Stamatakis and Brionne Godby // Version 1.0 // To see debug! outputs set the RUST_LOG environment variable, e.g.: export RUST_LOG="zhtta=debug" #![feature(rustc_private)] #![feature(libc)] #![feature(io)] #![feature(old_io)] #![feature(old_path)] #![feature(os)] #![feature(core)] #![feature(collections)] #![feature(std_misc)] #![allow(non_camel_case_types)] #![allow(unused_must_use)] #![allow(deprecated)] #[macro_use] extern crate log; extern crate libc; use std::io::*; use std::old_io::File; use std::{os, str}; use std::process::{Command, Stdio}; use std::old_path::posix::Path; use std::collections::hash_map::HashMap; use std::borrow::ToOwned; use std::thread::Thread; use std::old_io::fs::PathExtensions; use std::old_io::{Acceptor, Listener}; use std::old_io::BufferedReader; extern crate getopts; use getopts::{optopt, getopts}; use std::sync::RwLock; use std::sync::{Arc, Mutex,Semaphore}; use std::sync::mpsc::{Sender, Receiver}; use std::sync::mpsc::channel; static SERVER_NAME : &'static str = "Zhtta Version 1.0"; static CacheLowerBounder :u64 =1024;//1K static CacheUpperBounder :u64 =104857600; //100M static IP : &'static str = "127.0.0.1"; static PORT : usize = 4414; static WWW_DIR : &'static str = "./www"; static HTTP_OK : &'static str = "HTTP/1.1 200 OK\r\nContent-Type: text/html; charset=UTF-8\r\n\r\n"; static HTTP_BAD : &'static str = "HTTP/1.1 404 Not Found\r\n\r\n"; static COUNTER_STYLE : &'static str = "<doctype !html><html><head><title>Hello, Rust!</title> <style>body {background-color: #884414; color: #FFEEAA} h1 { font-size:2cm; text-align: center; color: black; text-shadow: 0 0 4mm red } h2 { font-size:2cm; text-align: center; color: black; text-shadow: 0 0 4mm green } </style></head> <body>"; //static mut visitor_count : usize = 0; struct HTTP_Request { // Use peer_name as the key to access TcpStream in hashmap. // (Due to a bug in extra::arc in Rust 0.9, it is very inconvenient to use TcpStream without the "Freeze" bound. // See issue: https://github.com/mozilla/rust/issues/12139) peer_name: String, path: Path, } struct WebServer { ip: String, port: usize, www_dir_path: Path, request_queue_arc: Arc<Mutex<Vec<HTTP_Request>>>, stream_map_arc: Arc<Mutex<HashMap<String, std::old_io::net::tcp::TcpStream>>>,//it is a hash map,store the http stream for each ip visitor_count : Arc<Mutex<usize>>, thread_sema : Arc<Semaphore>, cache: Arc<RwLock<HashMap<Path,(String,Mutex<usize>,u64)>>>,//cache content, counter for LRU, modified time cache_len: Arc<Mutex<usize>>, notify_rx: Receiver<()>, notify_tx: Sender<()>, } impl WebServer { fn new(ip: String, port: usize, www_dir: String) -> WebServer { let (notify_tx, notify_rx) = channel(); let www_dir_path = Path::new(www_dir); os::change_dir(&www_dir_path); WebServer { ip:ip, port: port, www_dir_path: www_dir_path, request_queue_arc: Arc::new(Mutex::new(Vec::new())), stream_map_arc: Arc::new(Mutex::new(HashMap::new())), visitor_count:Arc::new(Mutex::new(0)), thread_sema: Arc::new(Semaphore::new(5)), cache: Arc::new(RwLock::new(HashMap::new())), cache_len: Arc::new(Mutex::new(0)), notify_rx: notify_rx, notify_tx: notify_tx, } } fn run(&mut self) { self.listen(); self.dequeue_static_file_request(); } fn listen(&mut self) { let addr = String::from_str(format!("{}:{}", self.ip, self.port).as_slice()); let www_dir_path_str = self.www_dir_path.clone(); let request_queue_arc = self.request_queue_arc.clone(); let notify_tx = self.notify_tx.clone(); let stream_map_arc = self.stream_map_arc.clone(); let visitor_count=self.visitor_count.clone(); Thread::spawn(move|| { let listener = std::old_io::TcpListener::bind(addr.as_slice()).unwrap(); let mut acceptor = listener.listen().unwrap(); println!("{} listening on {} (serving from: {}).", SERVER_NAME, addr, www_dir_path_str.as_str().unwrap()); for stream_raw in acceptor.incoming() { //for each stream/connection let (queue_tx, queue_rx) = channel();//build up a channel for sub thread queue_tx.send(request_queue_arc.clone());//send the request queue to queue and receive it inside the son thread let notify_chan = notify_tx.clone();//notify_chan is a global channel for webserver let stream_map_arc = stream_map_arc.clone(); let visitor_count=visitor_count.clone(); println!("outer thread:{}",*visitor_count.lock().unwrap()); // Spawn a task to handle the connection. Thread::spawn(move|| { let mut vc= visitor_count.lock().unwrap(); // Done *vc+=1; println!("inner thread:{}",*vc); let request_queue_arc = queue_rx.recv().unwrap();// let mut stream = match stream_raw { Ok(s) => {s} Err(e) => { panic!("Error getting the listener stream! {}", e) } }; let peer_name = WebServer::get_peer_name(&mut stream); debug!("Got connection from {}", peer_name); let mut buf: [u8;500] = [0;500]; stream.read(&mut buf); let request_str = match str::from_utf8(&buf){ Ok(s) => s, Err(e)=> panic!("Error reading from the listener stream! {}", e), }; debug!("Request:\n{}", request_str); //WebServer::enqueue_static_file_request(stream, &path_obj, stream_map_arc, request_queue_arc, notify_chan); let req_group: Vec<&str> = request_str.splitn(3, ' ').collect(); if req_group.len() > 2 { let path_str = ".".to_string() + req_group[1]; let mut path_obj = os::getcwd().unwrap(); path_obj.push(path_str.clone()); let ext_str = match path_obj.extension_str() { Some(e) => e, None => "", }; debug!("Requested path: [{}]", path_obj.as_str().expect("error")); debug!("Requested path: [{}]", path_str); if path_str.as_slice().eq("./") { debug!("===== Counter Page request ====="); WebServer::respond_with_counter_page(stream,*vc); debug!("=====Terminated connection from [{}].=====", peer_name); } else if !path_obj.exists() || path_obj.is_dir() { debug!("===== Error page request ====="); WebServer::respond_with_error_page(stream, &path_obj); debug!("=====Terminated connection from [{}].=====", peer_name); } else if ext_str == "shtml" { // Dynamic web pages. debug!("===== Dynamic Page request ====="); WebServer::respond_with_dynamic_page(stream, &path_obj); debug!("=====Terminated connection from [{}].=====", peer_name); } else { debug!("===== Static Page request ====="); if std::fs::metadata(&path_obj).unwrap().len()> CacheLowerBounder{ WebServer::enqueue_static_file_request(stream, &path_obj, stream_map_arc, request_queue_arc, notify_chan);} else{ debug!("small file, do it without enqueue!"); let mut file_reader = File::open(&path_obj).unwrap(); stream.write(HTTP_OK.as_bytes()); let mut reader = BufferedReader::new(file_reader); for line in reader.lines().filter_map(|result| result.ok()) { let _ = stream.write_all(line.as_bytes()); } } } } }); } }); } fn respond_with_error_page(stream: std::old_io::net::tcp::TcpStream, path: &Path) { let mut stream = stream; let msg: String= format!("Cannot open: {}", path.as_str().expect("invalid path")); stream.write(HTTP_BAD.as_bytes()); stream.write(msg.as_bytes()); } // Done fn respond_with_counter_page(stream: std::old_io::net::tcp::TcpStream,visitor_count:usize) { let mut stream = stream; let response: String = format!("{}{}<h1>Greetings, Krusty!</h1><h2>Visitor count: {}</h2></body></html>\r\n", HTTP_OK, COUNTER_STYLE, unsafe { visitor_count } ); debug!("Responding to counter request"); stream.write(response.as_bytes()); } // TODO: Streaming file. // TODO: Application-layer file caching. fn respond_with_static_file(stream: std::old_io::net::tcp::TcpStream, path: &Path,cache : Arc<RwLock<HashMap<Path,(String,Mutex<usize>,u64)>>>,cache_len :Arc<Mutex<usize>>) { let mut stream = stream; let mut cache_str=String::new(); let mut counter=0; let mut local_cache=cache.clone(); let mut is_modified=false; { let metadata=std::fs::metadata(path).unwrap(); let modify_time=metadata.modified(); let read_hash=local_cache.read().unwrap(); if read_hash.contains_key(path) { let tuple=read_hash.get(path).unwrap(); let time = tuple.2; is_modified= (time !=modify_time); } } if is_modified{ debug!("It is modified, delete from cache!"); let mut write_hash=local_cache.write().unwrap(); write_hash.remove(path); } //let mut local_cache=cache.clone(); { debug!("updating counter..."); let read_hash=local_cache.read().unwrap(); for (key,value) in read_hash.iter(){ let mut counter=value.1.lock().unwrap(); *counter+=1; } if read_hash.contains_key(path){ debug!("Reading cached file:{}",path.display()); let mut pair=read_hash.get(path).unwrap(); { *pair.1.lock().unwrap()=0; } stream.write(HTTP_OK.as_bytes()); let _ = stream.write_all(pair.0.as_bytes()); return; } else{ debug!("reading from disk!"); let mut file_reader = File::open(path).unwrap(); stream.write(HTTP_OK.as_bytes()); /*let mut buf:[u8;1048576]=[0;1048576]; loop{ let size=match file_reader.read(&mut buf){ Err(why) =>0, Ok(size) =>size, }; let str_buf=String::from_utf8_lossy(&buf[0..size]); let _=stream.write(str_buf.as_bytes()); cache_str.push_str(str_buf.as_slice()); debug!("read siez:{}",size); if(size<1048576){ break; } }*/ //better solution let mut reader = BufferedReader::new(file_reader); for line in reader.lines().filter_map(|result| result.ok()) { let _ = stream.write_all(line.as_bytes()); cache_str.push_str(line.as_slice()); } } } let file_size=std::fs::metadata(path).unwrap().len(); if(file_size<CacheLowerBounder){ debug!("file size:{}, don't cache this file(too small)",file_size); return; } else if (file_size>CacheUpperBounder){ debug!("file size:{}, don't cache this file(too large)",file_size); return; } debug!("updating cache...."); { let mut write_hash=local_cache.write().unwrap(); let time=std::fs::metadata(path).unwrap().modified(); write_hash.insert(path.clone(),(cache_str,Mutex::new(0),time)); } *cache_len.lock().unwrap()+=1; { let mut write_hash=local_cache.write().unwrap(); let mut to_be_replaced : Path=Path::new("./"); if *cache_len.lock().unwrap()>5{ let mut max_num=0; //let read_hash=local_cache.write().unwrap(); let mut tmp: &Path=&Path::new("./"); for (key,value) in write_hash.iter(){ let num=*value.1.lock().unwrap(); if num>=max_num{ max_num=num; tmp=key; } } to_be_replaced=tmp.clone(); }else { return; } debug!("least recently used is:{}",to_be_replaced.display()); write_hash.remove(&to_be_replaced); } } // TODO: Server-side gashing. fn respond_with_dynamic_page(stream: std::old_io::net::tcp::TcpStream, path: &Path) { //scan the shtml to find the ssl tag, extract the command line redirect the command line to //our file and serve it let mut stream = stream; let mut file =match File::open(path) { Err(why) => panic!("Coundn't open file:{}",why), Ok(file) => file, }; let mut s= String::new(); s=match file.read_to_string(){ Err(why) => panic!("Couldn't read file:{}",why), Ok(content) => content, }; let str_vec: Vec<&str>=s.split_str("<!--#exec cmd=\"").collect(); let cmd_mix:Vec<&str>=str_vec[1].split_str("\" -->").collect(); let cmd=cmd_mix[0].to_string(); let mut args =Vec::new(); args.push("-c"); args.push(&cmd); let mut gash_command= match Command::new("../main").args(&args).stdout(Stdio::capture()).spawn(){ Err(why) => panic!("Couldn't do command {}",why), Ok(cmd) => cmd, }; let mut stdout=gash_command.stdout.unwrap(); let mut output=String::new(); stdout.read_to_string(&mut output); stream.write(HTTP_OK.as_bytes()); stream.write(str_vec[0].as_bytes()); stream.write(output.as_bytes()); stream.write(cmd_mix[1].as_bytes()); //WebServer::respond_with_static_file(stream, path); } fn get_file_size(path: &Path) ->u64 { let metadata=std::fs::metadata(path).unwrap(); return metadata.len() } // TODO: Smarter Scheduling. fn enqueue_static_file_request(stream: std::old_io::net::tcp::TcpStream, path_obj: &Path, stream_map_arc: Arc<Mutex<HashMap<String, std::old_io::net::tcp::TcpStream>>>, req_queue_arc: Arc<Mutex<Vec<HTTP_Request>>>, notify_chan: Sender<()>) { // Save stream in hashmap for later response. let mut stream = stream; let peer_name = WebServer::get_peer_name(&mut stream); let (stream_tx, stream_rx) = channel(); stream_tx.send(stream); let stream = match stream_rx.recv(){ Ok(s) => s, Err(e) => panic!("There was an error while receiving from the stream channel! {}", e), }; let local_stream_map = stream_map_arc.clone(); { // make sure we request the lock inside a block with different scope, so that we give it back at the end of that block let mut local_stream_map = local_stream_map.lock().unwrap(); local_stream_map.insert(peer_name.clone(), stream); } // Enqueue the HTTP request. // TOCHECK: it was ~path_obj.clone(), make sure in which order are ~ and clone() executed let req = HTTP_Request { peer_name: peer_name.clone(), path: path_obj.clone() }; let (req_tx, req_rx) = channel(); req_tx.send(req); debug!("Waiting for queue mutex lock."); let local_req_queue = req_queue_arc.clone(); { // make sure we request the lock inside a block with different scope, so that we give it back at the end of that block let mut local_req_queue = local_req_queue.lock().unwrap(); let req: HTTP_Request = match req_rx.recv(){ Ok(s) => s, Err(e) => panic!("There was an error while receiving from the request channel! {}", e), }; //REORDER the queue in order of the request size local_req_queue.push(req); local_req_queue.sort_by(|a, b| WebServer::get_file_size(&a.path).cmp(&WebServer::get_file_size(&b.path))); debug!("A new request enqueued, now the length of queue is {}.", local_req_queue.len()); notify_chan.send(()); // Send incoming notification to responder task. } } // TODO: Smarter Scheduling. fn dequeue_static_file_request(&mut self) { let req_queue_get = self.request_queue_arc.clone(); let stream_map_get = self.stream_map_arc.clone(); // Receiver<> cannot be sent to another task. So we have to make this task as the main task that can access self.notify_rx. let (request_tx, request_rx) = channel(); loop { self.notify_rx.recv(); // waiting for new request enqueued. This is where the infinity loop locate { // make sure we request the lock inside a block with different scope, so that we give it back at the end of that block let mut req_queue = req_queue_get.lock().unwrap(); if req_queue.len() > 0 { self.thread_sema.acquire(); let req = req_queue.remove(0); debug!("A new request dequeued, now the length of queue is {}.", req_queue.len()); request_tx.send(req); } } let request = match request_rx.recv(){ Ok(s) => s, Err(e) => panic!("There was an error while receiving from the request channel! {}", e), }; // Get stream from hashmap. let (stream_tx, stream_rx) = channel(); { // make sure we request the lock inside a block with different scope, so that we give it back at the end of that block let mut stream_map = stream_map_get.lock().unwrap(); let stream = stream_map.remove(&request.peer_name).expect("no option tcpstream"); stream_tx.send(stream); } // TODO: Spawning more tasks to respond the dequeued requests concurrently. You may need a semophore to control the concurrency. let stream = match stream_rx.recv(){ Ok(s) => s, Err(e) => panic!("There was an error while receiving from the stream channel! {}", e), }; let sema=self.thread_sema.clone(); let cache_len=self.cache_len.clone(); let mut cache=self.cache.clone(); Thread::spawn(move||{ debug!("Processing...."); WebServer::respond_with_static_file(stream, &request.path,cache,cache_len); debug!("finishing request for{}",request.path.display()); debug!("=====Terminated connection from [{}].=====", request.peer_name); sema.release(); }); } } fn get_peer_name(stream: &mut std::old_io::net::tcp::TcpStream) -> String{ match stream.peer_name(){ Ok(s) => {format!("{}:{}", s.ip, s.port)} Err(e) => {panic!("Error while getting the stream name! {}", e)} } } } fn get_args() -> (String, usize, String) { fn print_usage(program: &str) { println!("Usage: {} [options]", program); println!("--ip \tIP address, \"{}\" by default.", IP); println!("--port \tport number, \"{}\" by default.", PORT); println!("--www \tworking directory, \"{}\" by default", WWW_DIR); println!("-h --help \tUsage"); } /* Begin processing program arguments and initiate the parameters. */ let args = os::args(); let program = args[0].clone(); let opts = [ getopts::optopt("", "ip", "The IP address to bind to", "IP"), getopts::optopt("", "port", "The Port to bind to", "PORT"), getopts::optopt("", "www", "The www directory", "WWW_DIR"), getopts::optflag("h", "help", "Display help"), ]; let matches = match getopts::getopts(args.tail(), &opts) { Ok(m) => { m } Err(f) => { panic!(f.to_err_msg()) } }; if matches.opt_present("h") || matches.opt_present("help") { print_usage(program.as_slice()); unsafe { libc::exit(1); } } let ip_str = if matches.opt_present("ip") {
lse { IP.to_owned() }; let port:usize = if matches.opt_present("port") { let input_port = matches.opt_str("port").expect("Invalid port number?").trim().parse::<usize>().ok(); match input_port { Some(port) => port, None => panic!("Invalid port number?"), } } else { PORT }; let www_dir_str = if matches.opt_present("www") { matches.opt_str("www").expect("invalid www argument?") } else { WWW_DIR.to_owned() }; (ip_str, port, www_dir_str) } fn main() { let (ip_str, port, www_dir_str) = get_args(); let mut zhtta = WebServer::new(ip_str, port, www_dir_str); zhtta.run(); }
matches.opt_str("ip").expect("invalid ip address?").to_owned() } e
conditional_block
176_main_448.py
from model.DFL import DFL_VGG16 from utils.util import * from utils.transform import * from train import * from validate import * from utils.init import * import sys import argparse import os import random import shutil import time import warnings import torch import torch.nn as nn import torch.nn.parallel import numpy as np import torch.backends.cudnn as cudnn import torch.distributed as dist import torch.optim import torch.utils.data import torch.utils.data.distributed import torchvision.transforms as transforms import torchvision.datasets as datasets from utils.MyImageFolderWithPaths import * from drawrect import * parser = argparse.ArgumentParser(description='PyTorch ImageNet Training') parser.add_argument('--dataroot', metavar='DIR', help='path to dataset') parser.add_argument('--result', metavar='DIR', help='path to dataset') parser.add_argument('-j', '--workers', default=16, type=int, metavar='N', help='number of data loading workers (default: 4)') parser.add_argument('--epochs', default=100000, type=int, metavar='N', help='number of total epochs to run') parser.add_argument('--start-epoch', default=0, type=int, metavar='N', help='manual epoch number (useful on restarts)') parser.add_argument('--train_batchsize_per_gpu', default=16, type=int, metavar='N', help='mini-batch size (default: 64)') parser.add_argument('-testbatch', '--test_batch_size', default=64, type=int, metavar='N', help='mini-batch size (default: 32)') parser.add_argument('--init_type', default='xavier', type=str, metavar='INIT', help='init net') parser.add_argument('--lr', '--learning-rate', default=0.1, type=float, metavar='LR', help='initial learning rate') parser.add_argument('--momentum', default=0.9, type=float, metavar='momentum', help='momentum') parser.add_argument('--weight_decay', '--wd', default=0.000005, type=float, metavar='W', help='weight decay (default: 1e-4)') parser.add_argument('--print-freq', '-p', default=1, type=int, metavar='N', help='print frequency (default: 10)') parser.add_argument('--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)') parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true', help='evaluate model on validation set') parser.add_argument('--pretrained', dest='pretrained', action='store_true', help='use pre-trained model') parser.add_argument('--seed', default=None, type=int, help='seed for initializing training. ') parser.add_argument('--gpu', default=4, type=int, help='GPU nums to use.') parser.add_argument('--log_train_dir', default='log_train', type=str, help='log for train') parser.add_argument('--log_test_dir', default='log_test', type=str, help='log for test') parser.add_argument('--nclass', default=583, type=int, help='num of classes') parser.add_argument('--eval_epoch', default=2, type=int, help='every eval_epoch we will evaluate') parser.add_argument('--vis_epoch', default=2, type=int, help='every vis_epoch we will evaluate') parser.add_argument('--save_epoch', default=2, type=int, help='every save_epoch we will evaluate') parser.add_argument('--w', default=448, type=int, help='transform, seen as align') parser.add_argument('--h', default=448, type=int, help='transform, seen as align') best_prec1 = 0 def main(): print('DFL-CNN <==> Part1 : prepare for parameters <==> Begin') global args, best_prec1 args = parser.parse_args() print('DFL-CNN <==> Part1 : prepare for parameters <==> Done') print('DFL-CNN <==> Part2 : Load Network <==> Begin') model = DFL_VGG16(k=10, nclass=176) if args.gpu is not None: model = nn.DataParallel(model, device_ids=range(args.gpu)) model = model.cuda() cudnn.benchmark = True if args.init_type is not None: try: init_weights(model, init_type=args.init_type) except: sys.exit('DFL-CNN <==> Part2 : Load Network <==> Init_weights error!') #################### weight = np.array([0.844414894, 2.834821429, 1.725543478, 1.322916667, 1.725543478, 1.700892857, 0.739518634, 0.629960317, 0.773133117, 1.190625, 1.700892857, 3.501838235, 3.052884615, 1.368534483, 1.451981707, 1.831730769, 2.204861111, 1.133928571, 0.592350746, 0.262252203, 1.123231132, 0.308452073, 1.280241935, 1.009004237, 1.725543478, 1.308379121, 0.265172606, 1.777052239, 1.469907407, 1.00052521, 1.803977273, 0.470602767, 0.960181452, 1.67693662, 1.608952703, 0.280807783, 0.9921875, 0.466911765, 1.112733645, 2.903963415, 2.768895349, 0.295440447, 0.265764509, 2.289663462, 2.38125, 1.434487952, 1.984375, 0.580792683, 1.630993151, 1.831730769, 1.860351563, 1.803977273, 2.768895349, 1.951844262, 2.126116071, 1.831730769, 1.920362903, 2.768895349, 1.777052239, 3.217905405, 2.334558824, 2.088815789, 0.519923581, 3.133223684, 1.951844262, 2.289663462, 1.133928571, 1.507120253, 1.984375, 2.334558824, 1.48828125, 1.984375, 1.5875, 1.831730769, 1.092316514, 1.469907407, 1.26662234, 1.5875, 0.661458333, 2.088815789, 1.725543478, 1.725543478, 0.376780063, 1.384447674, 1.630993151, 2.9765625, 0.607461735, 0.888526119, 2.645833333, 2.334558824, 1.777052239, 0.519923581, 0.875459559, 0.268158784, 0.278183411, 1.017628205, 1.009004237, 0.548675115, 0.264583333, 1.860351563, 1.777052239, 0.65418956, 0.712949102, 1.984375, 2.088815789, 0.365222393, 1.803977273, 0.564277251, 1.630993151, 1.889880952, 1.507120253, 1.507120253, 1.803977273, 2.429846939, 3.96875, 1.67693662, 1.630993151, 2.164772727, 2.052801724, 1.167279412, 1.803977273, 0.9525, 1.831730769, 1.700892857, 1.352982955, 1.253289474, 2.164772727, 2.164772727, 1.352982955, 2.645833333, 1.831730769, 1.630993151, 1.5875, 3.217905405, 0.804476351, 2.705965909, 1.725543478, 1.630993151, 2.429846939, 1.777052239, 0.922965116, 2.204861111, 1.123231132, 0.79375, 0.320060484, 0.403601695, 0.543664384, 0.269372172, 2.588315217, 2.289663462, 1.322916667, 1.280241935, 0.259395425, 1.951844262, 0.620117188, 0.252251059, 1.653645833, 0.519923581, 0.616904145, 4.252232143, 2.164772727, 1.920362903, 1.725543478, 2.052801724, 1.700892857, 0.413411458, 1.400735294, 1.035326087, 1.526442308, 0.881944444, 2.126116071, 2.246462264, 1.984375, 1.984375, 1.889880952, 1.860351563]) weight = torch.Tensor(weight) weight = weight.cuda() #################### criterion = nn.CrossEntropyLoss(weight=weight) optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay) # Optionally resume from a checkpoint if args.resume: if os.path.isfile(args.resume): checkpoint = torch.load(args.resume) args.start_epoch = checkpoint['epoch'] best_prec1 = checkpoint['best_prec1'] model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) print('DFL-CNN <==> Part2 : Load Network <==> Continue from {} epoch {}'.format(args.resume, checkpoint['epoch'])) else: print('DFL-CNN <==> Part2 : Load Network <==> Failed') print('DFL-CNN <==> Part2 : Load Network <==> Done') print('DFL-CNN <==> Part3 : Load Dataset <==> Begin') dataroot = os.path.abspath(args.dataroot) traindir = os.path.join(dataroot, 'train') testdir = os.path.join(dataroot, 'test') # ImageFolder to process img transform_train = get_transform_for_train() transform_test = get_transform_for_test() transform_test_simple = get_transform_for_test_simple() train_dataset = ImageFolderWithPaths(traindir, transform=transform_train) test_dataset = ImageFolderWithPaths(testdir, transform=transform_test) test_dataset_simple = ImageFolderWithPaths(testdir, transform=transform_test_simple) # A list for target to classname index2classlist = train_dataset.index2classlist() # data loader train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=args.gpu * args.train_batchsize_per_gpu, shuffle=True, num_workers=args.workers, pin_memory=True, drop_last=True) test_loader = torch.utils.data.DataLoader( test_dataset, batch_size=1, shuffle=True, num_workers=args.workers, pin_memory=True, drop_last=True) test_loader_simple = torch.utils.data.DataLoader( test_dataset_simple, batch_size=1, shuffle=True, num_workers=args.workers, pin_memory=True, drop_last=True) print('DFL-CNN <==> Part3 : Load Dataset <==> Done') print('DFL-CNN <==> Part4 : Train and Test <==> Begin') for epoch in range(args.start_epoch, args.epochs):
if __name__ == '__main__': main()
adjust_learning_rate(args, optimizer, epoch, gamma=0.1) # train for one epoch train(args, train_loader, model, criterion, optimizer, epoch) # evaluate on validation set if epoch % args.eval_epoch == 0: prec1 = validate_simple(args, test_loader_simple, model, criterion, epoch) # remember best prec@1 and save checkpoint is_best = prec1 > best_prec1 best_prec1 = max(prec1, best_prec1) save_checkpoint({ 'epoch': epoch + 1, 'state_dict': model.state_dict(), 'best_prec1': best_prec1, 'optimizer': optimizer.state_dict(), 'prec1': prec1, }, is_best) # do a test for visualization if epoch % args.vis_epoch == 0 and epoch != 0: draw_patch(epoch, model, index2classlist, args)
conditional_block
176_main_448.py
from model.DFL import DFL_VGG16 from utils.util import * from utils.transform import * from train import * from validate import * from utils.init import * import sys import argparse import os import random import shutil import time import warnings import torch import torch.nn as nn import torch.nn.parallel import numpy as np import torch.backends.cudnn as cudnn import torch.distributed as dist import torch.optim import torch.utils.data import torch.utils.data.distributed import torchvision.transforms as transforms import torchvision.datasets as datasets from utils.MyImageFolderWithPaths import * from drawrect import * parser = argparse.ArgumentParser(description='PyTorch ImageNet Training') parser.add_argument('--dataroot', metavar='DIR', help='path to dataset') parser.add_argument('--result', metavar='DIR', help='path to dataset') parser.add_argument('-j', '--workers', default=16, type=int, metavar='N', help='number of data loading workers (default: 4)') parser.add_argument('--epochs', default=100000, type=int, metavar='N', help='number of total epochs to run') parser.add_argument('--start-epoch', default=0, type=int, metavar='N', help='manual epoch number (useful on restarts)') parser.add_argument('--train_batchsize_per_gpu', default=16, type=int, metavar='N', help='mini-batch size (default: 64)') parser.add_argument('-testbatch', '--test_batch_size', default=64, type=int, metavar='N', help='mini-batch size (default: 32)') parser.add_argument('--init_type', default='xavier', type=str, metavar='INIT', help='init net') parser.add_argument('--lr', '--learning-rate', default=0.1, type=float, metavar='LR', help='initial learning rate') parser.add_argument('--momentum', default=0.9, type=float, metavar='momentum', help='momentum') parser.add_argument('--weight_decay', '--wd', default=0.000005, type=float, metavar='W', help='weight decay (default: 1e-4)') parser.add_argument('--print-freq', '-p', default=1, type=int, metavar='N', help='print frequency (default: 10)') parser.add_argument('--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)') parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true', help='evaluate model on validation set') parser.add_argument('--pretrained', dest='pretrained', action='store_true', help='use pre-trained model') parser.add_argument('--seed', default=None, type=int, help='seed for initializing training. ') parser.add_argument('--gpu', default=4, type=int, help='GPU nums to use.') parser.add_argument('--log_train_dir', default='log_train', type=str, help='log for train') parser.add_argument('--log_test_dir', default='log_test', type=str, help='log for test') parser.add_argument('--nclass', default=583, type=int, help='num of classes') parser.add_argument('--eval_epoch', default=2, type=int, help='every eval_epoch we will evaluate') parser.add_argument('--vis_epoch', default=2, type=int, help='every vis_epoch we will evaluate') parser.add_argument('--save_epoch', default=2, type=int, help='every save_epoch we will evaluate') parser.add_argument('--w', default=448, type=int, help='transform, seen as align') parser.add_argument('--h', default=448, type=int, help='transform, seen as align') best_prec1 = 0 def
(): print('DFL-CNN <==> Part1 : prepare for parameters <==> Begin') global args, best_prec1 args = parser.parse_args() print('DFL-CNN <==> Part1 : prepare for parameters <==> Done') print('DFL-CNN <==> Part2 : Load Network <==> Begin') model = DFL_VGG16(k=10, nclass=176) if args.gpu is not None: model = nn.DataParallel(model, device_ids=range(args.gpu)) model = model.cuda() cudnn.benchmark = True if args.init_type is not None: try: init_weights(model, init_type=args.init_type) except: sys.exit('DFL-CNN <==> Part2 : Load Network <==> Init_weights error!') #################### weight = np.array([0.844414894, 2.834821429, 1.725543478, 1.322916667, 1.725543478, 1.700892857, 0.739518634, 0.629960317, 0.773133117, 1.190625, 1.700892857, 3.501838235, 3.052884615, 1.368534483, 1.451981707, 1.831730769, 2.204861111, 1.133928571, 0.592350746, 0.262252203, 1.123231132, 0.308452073, 1.280241935, 1.009004237, 1.725543478, 1.308379121, 0.265172606, 1.777052239, 1.469907407, 1.00052521, 1.803977273, 0.470602767, 0.960181452, 1.67693662, 1.608952703, 0.280807783, 0.9921875, 0.466911765, 1.112733645, 2.903963415, 2.768895349, 0.295440447, 0.265764509, 2.289663462, 2.38125, 1.434487952, 1.984375, 0.580792683, 1.630993151, 1.831730769, 1.860351563, 1.803977273, 2.768895349, 1.951844262, 2.126116071, 1.831730769, 1.920362903, 2.768895349, 1.777052239, 3.217905405, 2.334558824, 2.088815789, 0.519923581, 3.133223684, 1.951844262, 2.289663462, 1.133928571, 1.507120253, 1.984375, 2.334558824, 1.48828125, 1.984375, 1.5875, 1.831730769, 1.092316514, 1.469907407, 1.26662234, 1.5875, 0.661458333, 2.088815789, 1.725543478, 1.725543478, 0.376780063, 1.384447674, 1.630993151, 2.9765625, 0.607461735, 0.888526119, 2.645833333, 2.334558824, 1.777052239, 0.519923581, 0.875459559, 0.268158784, 0.278183411, 1.017628205, 1.009004237, 0.548675115, 0.264583333, 1.860351563, 1.777052239, 0.65418956, 0.712949102, 1.984375, 2.088815789, 0.365222393, 1.803977273, 0.564277251, 1.630993151, 1.889880952, 1.507120253, 1.507120253, 1.803977273, 2.429846939, 3.96875, 1.67693662, 1.630993151, 2.164772727, 2.052801724, 1.167279412, 1.803977273, 0.9525, 1.831730769, 1.700892857, 1.352982955, 1.253289474, 2.164772727, 2.164772727, 1.352982955, 2.645833333, 1.831730769, 1.630993151, 1.5875, 3.217905405, 0.804476351, 2.705965909, 1.725543478, 1.630993151, 2.429846939, 1.777052239, 0.922965116, 2.204861111, 1.123231132, 0.79375, 0.320060484, 0.403601695, 0.543664384, 0.269372172, 2.588315217, 2.289663462, 1.322916667, 1.280241935, 0.259395425, 1.951844262, 0.620117188, 0.252251059, 1.653645833, 0.519923581, 0.616904145, 4.252232143, 2.164772727, 1.920362903, 1.725543478, 2.052801724, 1.700892857, 0.413411458, 1.400735294, 1.035326087, 1.526442308, 0.881944444, 2.126116071, 2.246462264, 1.984375, 1.984375, 1.889880952, 1.860351563]) weight = torch.Tensor(weight) weight = weight.cuda() #################### criterion = nn.CrossEntropyLoss(weight=weight) optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay) # Optionally resume from a checkpoint if args.resume: if os.path.isfile(args.resume): checkpoint = torch.load(args.resume) args.start_epoch = checkpoint['epoch'] best_prec1 = checkpoint['best_prec1'] model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) print('DFL-CNN <==> Part2 : Load Network <==> Continue from {} epoch {}'.format(args.resume, checkpoint['epoch'])) else: print('DFL-CNN <==> Part2 : Load Network <==> Failed') print('DFL-CNN <==> Part2 : Load Network <==> Done') print('DFL-CNN <==> Part3 : Load Dataset <==> Begin') dataroot = os.path.abspath(args.dataroot) traindir = os.path.join(dataroot, 'train') testdir = os.path.join(dataroot, 'test') # ImageFolder to process img transform_train = get_transform_for_train() transform_test = get_transform_for_test() transform_test_simple = get_transform_for_test_simple() train_dataset = ImageFolderWithPaths(traindir, transform=transform_train) test_dataset = ImageFolderWithPaths(testdir, transform=transform_test) test_dataset_simple = ImageFolderWithPaths(testdir, transform=transform_test_simple) # A list for target to classname index2classlist = train_dataset.index2classlist() # data loader train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=args.gpu * args.train_batchsize_per_gpu, shuffle=True, num_workers=args.workers, pin_memory=True, drop_last=True) test_loader = torch.utils.data.DataLoader( test_dataset, batch_size=1, shuffle=True, num_workers=args.workers, pin_memory=True, drop_last=True) test_loader_simple = torch.utils.data.DataLoader( test_dataset_simple, batch_size=1, shuffle=True, num_workers=args.workers, pin_memory=True, drop_last=True) print('DFL-CNN <==> Part3 : Load Dataset <==> Done') print('DFL-CNN <==> Part4 : Train and Test <==> Begin') for epoch in range(args.start_epoch, args.epochs): adjust_learning_rate(args, optimizer, epoch, gamma=0.1) # train for one epoch train(args, train_loader, model, criterion, optimizer, epoch) # evaluate on validation set if epoch % args.eval_epoch == 0: prec1 = validate_simple(args, test_loader_simple, model, criterion, epoch) # remember best prec@1 and save checkpoint is_best = prec1 > best_prec1 best_prec1 = max(prec1, best_prec1) save_checkpoint({ 'epoch': epoch + 1, 'state_dict': model.state_dict(), 'best_prec1': best_prec1, 'optimizer': optimizer.state_dict(), 'prec1': prec1, }, is_best) # do a test for visualization if epoch % args.vis_epoch == 0 and epoch != 0: draw_patch(epoch, model, index2classlist, args) if __name__ == '__main__': main()
main
identifier_name
176_main_448.py
from model.DFL import DFL_VGG16 from utils.util import * from utils.transform import * from train import * from validate import * from utils.init import * import sys import argparse import os import random import shutil import time import warnings import torch import torch.nn as nn import torch.nn.parallel import numpy as np import torch.backends.cudnn as cudnn import torch.distributed as dist import torch.optim import torch.utils.data import torch.utils.data.distributed import torchvision.transforms as transforms import torchvision.datasets as datasets from utils.MyImageFolderWithPaths import * from drawrect import * parser = argparse.ArgumentParser(description='PyTorch ImageNet Training') parser.add_argument('--dataroot', metavar='DIR', help='path to dataset') parser.add_argument('--result', metavar='DIR', help='path to dataset') parser.add_argument('-j', '--workers', default=16, type=int, metavar='N', help='number of data loading workers (default: 4)') parser.add_argument('--epochs', default=100000, type=int, metavar='N', help='number of total epochs to run') parser.add_argument('--start-epoch', default=0, type=int, metavar='N', help='manual epoch number (useful on restarts)') parser.add_argument('--train_batchsize_per_gpu', default=16, type=int, metavar='N', help='mini-batch size (default: 64)') parser.add_argument('-testbatch', '--test_batch_size', default=64, type=int, metavar='N', help='mini-batch size (default: 32)') parser.add_argument('--init_type', default='xavier', type=str, metavar='INIT', help='init net') parser.add_argument('--lr', '--learning-rate', default=0.1, type=float, metavar='LR', help='initial learning rate') parser.add_argument('--momentum', default=0.9, type=float, metavar='momentum', help='momentum') parser.add_argument('--weight_decay', '--wd', default=0.000005, type=float, metavar='W', help='weight decay (default: 1e-4)') parser.add_argument('--print-freq', '-p', default=1, type=int, metavar='N', help='print frequency (default: 10)') parser.add_argument('--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)') parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true', help='evaluate model on validation set') parser.add_argument('--pretrained', dest='pretrained', action='store_true', help='use pre-trained model') parser.add_argument('--seed', default=None, type=int, help='seed for initializing training. ') parser.add_argument('--gpu', default=4, type=int, help='GPU nums to use.') parser.add_argument('--log_train_dir', default='log_train', type=str, help='log for train') parser.add_argument('--log_test_dir', default='log_test', type=str, help='log for test') parser.add_argument('--nclass', default=583, type=int, help='num of classes') parser.add_argument('--eval_epoch', default=2, type=int, help='every eval_epoch we will evaluate') parser.add_argument('--vis_epoch', default=2, type=int, help='every vis_epoch we will evaluate') parser.add_argument('--save_epoch', default=2, type=int, help='every save_epoch we will evaluate') parser.add_argument('--w', default=448, type=int, help='transform, seen as align') parser.add_argument('--h', default=448, type=int, help='transform, seen as align') best_prec1 = 0 def main():
if __name__ == '__main__': main()
print('DFL-CNN <==> Part1 : prepare for parameters <==> Begin') global args, best_prec1 args = parser.parse_args() print('DFL-CNN <==> Part1 : prepare for parameters <==> Done') print('DFL-CNN <==> Part2 : Load Network <==> Begin') model = DFL_VGG16(k=10, nclass=176) if args.gpu is not None: model = nn.DataParallel(model, device_ids=range(args.gpu)) model = model.cuda() cudnn.benchmark = True if args.init_type is not None: try: init_weights(model, init_type=args.init_type) except: sys.exit('DFL-CNN <==> Part2 : Load Network <==> Init_weights error!') #################### weight = np.array([0.844414894, 2.834821429, 1.725543478, 1.322916667, 1.725543478, 1.700892857, 0.739518634, 0.629960317, 0.773133117, 1.190625, 1.700892857, 3.501838235, 3.052884615, 1.368534483, 1.451981707, 1.831730769, 2.204861111, 1.133928571, 0.592350746, 0.262252203, 1.123231132, 0.308452073, 1.280241935, 1.009004237, 1.725543478, 1.308379121, 0.265172606, 1.777052239, 1.469907407, 1.00052521, 1.803977273, 0.470602767, 0.960181452, 1.67693662, 1.608952703, 0.280807783, 0.9921875, 0.466911765, 1.112733645, 2.903963415, 2.768895349, 0.295440447, 0.265764509, 2.289663462, 2.38125, 1.434487952, 1.984375, 0.580792683, 1.630993151, 1.831730769, 1.860351563, 1.803977273, 2.768895349, 1.951844262, 2.126116071, 1.831730769, 1.920362903, 2.768895349, 1.777052239, 3.217905405, 2.334558824, 2.088815789, 0.519923581, 3.133223684, 1.951844262, 2.289663462, 1.133928571, 1.507120253, 1.984375, 2.334558824, 1.48828125, 1.984375, 1.5875, 1.831730769, 1.092316514, 1.469907407, 1.26662234, 1.5875, 0.661458333, 2.088815789, 1.725543478, 1.725543478, 0.376780063, 1.384447674, 1.630993151, 2.9765625, 0.607461735, 0.888526119, 2.645833333, 2.334558824, 1.777052239, 0.519923581, 0.875459559, 0.268158784, 0.278183411, 1.017628205, 1.009004237, 0.548675115, 0.264583333, 1.860351563, 1.777052239, 0.65418956, 0.712949102, 1.984375, 2.088815789, 0.365222393, 1.803977273, 0.564277251, 1.630993151, 1.889880952, 1.507120253, 1.507120253, 1.803977273, 2.429846939, 3.96875, 1.67693662, 1.630993151, 2.164772727, 2.052801724, 1.167279412, 1.803977273, 0.9525, 1.831730769, 1.700892857, 1.352982955, 1.253289474, 2.164772727, 2.164772727, 1.352982955, 2.645833333, 1.831730769, 1.630993151, 1.5875, 3.217905405, 0.804476351, 2.705965909, 1.725543478, 1.630993151, 2.429846939, 1.777052239, 0.922965116, 2.204861111, 1.123231132, 0.79375, 0.320060484, 0.403601695, 0.543664384, 0.269372172, 2.588315217, 2.289663462, 1.322916667, 1.280241935, 0.259395425, 1.951844262, 0.620117188, 0.252251059, 1.653645833, 0.519923581, 0.616904145, 4.252232143, 2.164772727, 1.920362903, 1.725543478, 2.052801724, 1.700892857, 0.413411458, 1.400735294, 1.035326087, 1.526442308, 0.881944444, 2.126116071, 2.246462264, 1.984375, 1.984375, 1.889880952, 1.860351563]) weight = torch.Tensor(weight) weight = weight.cuda() #################### criterion = nn.CrossEntropyLoss(weight=weight) optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay) # Optionally resume from a checkpoint if args.resume: if os.path.isfile(args.resume): checkpoint = torch.load(args.resume) args.start_epoch = checkpoint['epoch'] best_prec1 = checkpoint['best_prec1'] model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) print('DFL-CNN <==> Part2 : Load Network <==> Continue from {} epoch {}'.format(args.resume, checkpoint['epoch'])) else: print('DFL-CNN <==> Part2 : Load Network <==> Failed') print('DFL-CNN <==> Part2 : Load Network <==> Done') print('DFL-CNN <==> Part3 : Load Dataset <==> Begin') dataroot = os.path.abspath(args.dataroot) traindir = os.path.join(dataroot, 'train') testdir = os.path.join(dataroot, 'test') # ImageFolder to process img transform_train = get_transform_for_train() transform_test = get_transform_for_test() transform_test_simple = get_transform_for_test_simple() train_dataset = ImageFolderWithPaths(traindir, transform=transform_train) test_dataset = ImageFolderWithPaths(testdir, transform=transform_test) test_dataset_simple = ImageFolderWithPaths(testdir, transform=transform_test_simple) # A list for target to classname index2classlist = train_dataset.index2classlist() # data loader train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=args.gpu * args.train_batchsize_per_gpu, shuffle=True, num_workers=args.workers, pin_memory=True, drop_last=True) test_loader = torch.utils.data.DataLoader( test_dataset, batch_size=1, shuffle=True, num_workers=args.workers, pin_memory=True, drop_last=True) test_loader_simple = torch.utils.data.DataLoader( test_dataset_simple, batch_size=1, shuffle=True, num_workers=args.workers, pin_memory=True, drop_last=True) print('DFL-CNN <==> Part3 : Load Dataset <==> Done') print('DFL-CNN <==> Part4 : Train and Test <==> Begin') for epoch in range(args.start_epoch, args.epochs): adjust_learning_rate(args, optimizer, epoch, gamma=0.1) # train for one epoch train(args, train_loader, model, criterion, optimizer, epoch) # evaluate on validation set if epoch % args.eval_epoch == 0: prec1 = validate_simple(args, test_loader_simple, model, criterion, epoch) # remember best prec@1 and save checkpoint is_best = prec1 > best_prec1 best_prec1 = max(prec1, best_prec1) save_checkpoint({ 'epoch': epoch + 1, 'state_dict': model.state_dict(), 'best_prec1': best_prec1, 'optimizer': optimizer.state_dict(), 'prec1': prec1, }, is_best) # do a test for visualization if epoch % args.vis_epoch == 0 and epoch != 0: draw_patch(epoch, model, index2classlist, args)
identifier_body
176_main_448.py
from model.DFL import DFL_VGG16 from utils.util import * from utils.transform import * from train import * from validate import * from utils.init import * import sys import argparse import os import random import shutil import time import warnings import torch import torch.nn as nn import torch.nn.parallel import numpy as np import torch.backends.cudnn as cudnn import torch.distributed as dist import torch.optim import torch.utils.data import torch.utils.data.distributed import torchvision.transforms as transforms import torchvision.datasets as datasets from utils.MyImageFolderWithPaths import * from drawrect import * parser = argparse.ArgumentParser(description='PyTorch ImageNet Training') parser.add_argument('--dataroot', metavar='DIR', help='path to dataset') parser.add_argument('--result', metavar='DIR', help='path to dataset') parser.add_argument('-j', '--workers', default=16, type=int, metavar='N', help='number of data loading workers (default: 4)') parser.add_argument('--epochs', default=100000, type=int, metavar='N', help='number of total epochs to run') parser.add_argument('--start-epoch', default=0, type=int, metavar='N', help='manual epoch number (useful on restarts)') parser.add_argument('--train_batchsize_per_gpu', default=16, type=int, metavar='N', help='mini-batch size (default: 64)') parser.add_argument('-testbatch', '--test_batch_size', default=64, type=int, metavar='N', help='mini-batch size (default: 32)') parser.add_argument('--init_type', default='xavier', type=str, metavar='INIT', help='init net') parser.add_argument('--lr', '--learning-rate', default=0.1, type=float, metavar='LR', help='initial learning rate') parser.add_argument('--momentum', default=0.9, type=float, metavar='momentum', help='momentum') parser.add_argument('--weight_decay', '--wd', default=0.000005, type=float, metavar='W', help='weight decay (default: 1e-4)') parser.add_argument('--print-freq', '-p', default=1, type=int, metavar='N', help='print frequency (default: 10)') parser.add_argument('--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)') parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true', help='evaluate model on validation set') parser.add_argument('--pretrained', dest='pretrained', action='store_true', help='use pre-trained model') parser.add_argument('--seed', default=None, type=int, help='seed for initializing training. ') parser.add_argument('--gpu', default=4, type=int, help='GPU nums to use.') parser.add_argument('--log_train_dir', default='log_train', type=str, help='log for train') parser.add_argument('--log_test_dir', default='log_test', type=str, help='log for test') parser.add_argument('--nclass', default=583, type=int, help='num of classes') parser.add_argument('--eval_epoch', default=2, type=int, help='every eval_epoch we will evaluate') parser.add_argument('--vis_epoch', default=2, type=int, help='every vis_epoch we will evaluate') parser.add_argument('--save_epoch', default=2, type=int, help='every save_epoch we will evaluate') parser.add_argument('--w', default=448, type=int, help='transform, seen as align') parser.add_argument('--h', default=448, type=int, help='transform, seen as align') best_prec1 = 0 def main(): print('DFL-CNN <==> Part1 : prepare for parameters <==> Begin') global args, best_prec1 args = parser.parse_args() print('DFL-CNN <==> Part1 : prepare for parameters <==> Done') print('DFL-CNN <==> Part2 : Load Network <==> Begin') model = DFL_VGG16(k=10, nclass=176) if args.gpu is not None: model = nn.DataParallel(model, device_ids=range(args.gpu)) model = model.cuda() cudnn.benchmark = True if args.init_type is not None: try: init_weights(model, init_type=args.init_type) except: sys.exit('DFL-CNN <==> Part2 : Load Network <==> Init_weights error!') #################### weight = np.array([0.844414894, 2.834821429, 1.725543478, 1.322916667, 1.725543478, 1.700892857, 0.739518634, 0.629960317, 0.773133117, 1.190625, 1.700892857, 3.501838235, 3.052884615, 1.368534483, 1.451981707, 1.831730769, 2.204861111, 1.133928571, 0.592350746, 0.262252203, 1.123231132, 0.308452073, 1.280241935, 1.009004237, 1.725543478, 1.308379121, 0.265172606, 1.777052239, 1.469907407, 1.00052521, 1.803977273, 0.470602767, 0.960181452, 1.67693662, 1.608952703, 0.280807783, 0.9921875, 0.466911765, 1.112733645, 2.903963415, 2.768895349, 0.295440447, 0.265764509, 2.289663462, 2.38125, 1.434487952, 1.984375, 0.580792683, 1.630993151, 1.831730769, 1.860351563, 1.803977273, 2.768895349, 1.951844262, 2.126116071, 1.831730769, 1.920362903, 2.768895349, 1.777052239, 3.217905405, 2.334558824, 2.088815789, 0.519923581, 3.133223684, 1.951844262, 2.289663462, 1.133928571, 1.507120253, 1.984375, 2.334558824, 1.48828125, 1.984375, 1.5875, 1.831730769, 1.092316514, 1.469907407, 1.26662234, 1.5875, 0.661458333, 2.088815789, 1.725543478, 1.725543478, 0.376780063, 1.384447674, 1.630993151, 2.9765625, 0.607461735, 0.888526119, 2.645833333, 2.334558824, 1.777052239, 0.519923581, 0.875459559, 0.268158784, 0.278183411, 1.017628205, 1.009004237, 0.548675115, 0.264583333, 1.860351563, 1.777052239, 0.65418956, 0.712949102, 1.984375, 2.088815789, 0.365222393, 1.803977273, 0.564277251, 1.630993151, 1.889880952, 1.507120253, 1.507120253, 1.803977273, 2.429846939, 3.96875, 1.67693662, 1.630993151, 2.164772727, 2.052801724, 1.167279412, 1.803977273, 0.9525, 1.831730769, 1.700892857, 1.352982955, 1.253289474, 2.164772727, 2.164772727, 1.352982955, 2.645833333, 1.831730769, 1.630993151, 1.5875, 3.217905405, 0.804476351, 2.705965909, 1.725543478, 1.630993151, 2.429846939, 1.777052239, 0.922965116, 2.204861111, 1.123231132, 0.79375, 0.320060484, 0.403601695, 0.543664384, 0.269372172, 2.588315217, 2.289663462, 1.322916667, 1.280241935, 0.259395425, 1.951844262, 0.620117188, 0.252251059, 1.653645833, 0.519923581, 0.616904145, 4.252232143, 2.164772727, 1.920362903, 1.725543478, 2.052801724, 1.700892857, 0.413411458, 1.400735294, 1.035326087, 1.526442308, 0.881944444, 2.126116071, 2.246462264, 1.984375, 1.984375, 1.889880952, 1.860351563]) weight = torch.Tensor(weight) weight = weight.cuda() #################### criterion = nn.CrossEntropyLoss(weight=weight) optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay) # Optionally resume from a checkpoint if args.resume: if os.path.isfile(args.resume): checkpoint = torch.load(args.resume)
model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) print('DFL-CNN <==> Part2 : Load Network <==> Continue from {} epoch {}'.format(args.resume, checkpoint['epoch'])) else: print('DFL-CNN <==> Part2 : Load Network <==> Failed') print('DFL-CNN <==> Part2 : Load Network <==> Done') print('DFL-CNN <==> Part3 : Load Dataset <==> Begin') dataroot = os.path.abspath(args.dataroot) traindir = os.path.join(dataroot, 'train') testdir = os.path.join(dataroot, 'test') # ImageFolder to process img transform_train = get_transform_for_train() transform_test = get_transform_for_test() transform_test_simple = get_transform_for_test_simple() train_dataset = ImageFolderWithPaths(traindir, transform=transform_train) test_dataset = ImageFolderWithPaths(testdir, transform=transform_test) test_dataset_simple = ImageFolderWithPaths(testdir, transform=transform_test_simple) # A list for target to classname index2classlist = train_dataset.index2classlist() # data loader train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=args.gpu * args.train_batchsize_per_gpu, shuffle=True, num_workers=args.workers, pin_memory=True, drop_last=True) test_loader = torch.utils.data.DataLoader( test_dataset, batch_size=1, shuffle=True, num_workers=args.workers, pin_memory=True, drop_last=True) test_loader_simple = torch.utils.data.DataLoader( test_dataset_simple, batch_size=1, shuffle=True, num_workers=args.workers, pin_memory=True, drop_last=True) print('DFL-CNN <==> Part3 : Load Dataset <==> Done') print('DFL-CNN <==> Part4 : Train and Test <==> Begin') for epoch in range(args.start_epoch, args.epochs): adjust_learning_rate(args, optimizer, epoch, gamma=0.1) # train for one epoch train(args, train_loader, model, criterion, optimizer, epoch) # evaluate on validation set if epoch % args.eval_epoch == 0: prec1 = validate_simple(args, test_loader_simple, model, criterion, epoch) # remember best prec@1 and save checkpoint is_best = prec1 > best_prec1 best_prec1 = max(prec1, best_prec1) save_checkpoint({ 'epoch': epoch + 1, 'state_dict': model.state_dict(), 'best_prec1': best_prec1, 'optimizer': optimizer.state_dict(), 'prec1': prec1, }, is_best) # do a test for visualization if epoch % args.vis_epoch == 0 and epoch != 0: draw_patch(epoch, model, index2classlist, args) if __name__ == '__main__': main()
args.start_epoch = checkpoint['epoch'] best_prec1 = checkpoint['best_prec1']
random_line_split
ansi_up.ts
/* ansi_up.js * author : Dru Nelson * license : MIT * http://github.com/drudru/ansi_up */ "use strict"; interface AU_Color { rgb:number[]; class_name:string; } // Represents the output of process_ansi(): a snapshot of the AnsiUp state machine // at a given point in time, which wraps a fragment of text. This wouuld allow deferred // processing of text fragments and colors, if ever needed. interface TextWithAttr { fg:AU_Color; bg:AU_Color; bold:boolean; text:string; } // Represents an object that is responsible for generating output from parsed ANSI color // metadata and text content. interface Formatter { // Invoked for each generated TextWithAttr fragment outputted by process_ansi(). // this function is responsible for generating output for a single TextWithAttr // fragment. The result of transform() will be collected into an array that will // be provided to compose(). transform(fragment:TextWithAttr, instance:AnsiUp):any; // Invoked on the set of outputs from transform; the return value of this function // will be the final output of ANSI processing. compose(segments:any[], instance:AnsiUp):any; } // ES5 template string transformer // NOTE: default is multiline (workaround for now til I can // determine flags inline) function rgx(tmplObj, ...subst) { // Use the 'raw' value so we don't have to double backslash in a template string let regexText:string = tmplObj.raw[0]; // Remove white-space and comments let wsrgx = /^\s+|\s+\n|\s+#[\s\S]+?\n/gm; let txt2 = regexText.replace(wsrgx, ''); return new RegExp(txt2, 'm'); } class AnsiUp { VERSION = "3.0.0"; ansi_colors = [ // Normal colors [ { rgb: [ 0, 0, 0], class_name: "ansi-black" }, { rgb: [187, 0, 0], class_name: "ansi-red" }, { rgb: [ 0, 187, 0], class_name: "ansi-green" }, { rgb: [187, 187, 0], class_name: "ansi-yellow" }, { rgb: [ 0, 0, 187], class_name: "ansi-blue" }, { rgb: [187, 0, 187], class_name: "ansi-magenta" }, { rgb: [ 0, 187, 187], class_name: "ansi-cyan" }, { rgb: [255, 255, 255], class_name: "ansi-white" } ], // Bright colors [ { rgb: [ 85, 85, 85], class_name: "ansi-bright-black" }, { rgb: [255, 85, 85], class_name: "ansi-bright-red" }, { rgb: [ 0, 255, 0], class_name: "ansi-bright-green" }, { rgb: [255, 255, 85], class_name: "ansi-bright-yellow" }, { rgb: [ 85, 85, 255], class_name: "ansi-bright-blue" }, { rgb: [255, 85, 255], class_name: "ansi-bright-magenta" }, { rgb: [ 85, 255, 255], class_name: "ansi-bright-cyan" }, { rgb: [255, 255, 255], class_name: "ansi-bright-white" } ] ]; htmlFormatter:Formatter = { transform(fragment:TextWithAttr, instance:AnsiUp):string { let txt = fragment.text; if (txt.length === 0) return txt; if (instance._escape_for_html) txt = instance.old_escape_for_html(txt); // If colors not set, default style is used if (!fragment.bold && fragment.fg === null && fragment.bg === null) return txt; let styles:string[] = []; let classes:string[] = []; let fg = fragment.fg; let bg = fragment.bg; // Note on bold: https://stackoverflow.com/questions/6737005/what-are-some-advantages-to-using-span-style-font-weightbold-rather-than-b?rq=1 if (fragment.bold) styles.push('font-weight:bold') if (!instance._use_classes) { // USE INLINE STYLES if (fg) styles.push(`color:rgb(${fg.rgb.join(',')})`); if (bg) styles.push(`background-color:rgb(${bg.rgb})`); } else { // USE CLASSES if (fg) { if (fg.class_name !== 'truecolor') { classes.push(`${fg.class_name}-fg`); } else { styles.push(`color:rgb(${fg.rgb.join(',')})`); } } if (bg) { if (bg.class_name !== 'truecolor') { classes.push(`${bg.class_name}-bg`); } else { styles.push(`background-color:rgb(${bg.rgb.join(',')})`); } } } let class_string = ''; let style_string = ''; if (classes.length) class_string = ` class="${classes.join(' ')}"`; if (styles.length) style_string = ` style="${styles.join(';')}"`; return `<span${style_string}${class_string}>${txt}</span>`; }, compose(segments:string[], instance:AnsiUp):string { return segments.join(""); } }; textFormatter:Formatter = { transform(fragment:TextWithAttr, instance:AnsiUp):string { return fragment.text; }, compose(segments:string[], instance:AnsiUp):string { return segments.join(""); } }; // 256 Colors Palette // CSS RGB strings - ex. "255, 255, 255" private palette_256:AU_Color[]; private fg:AU_Color; private bg:AU_Color; private bold:boolean; private _use_classes:boolean; private _escape_for_html; private _sgr_regex:RegExp; private _buffer:string; constructor() { this.setup_256_palette(); this._use_classes = false; this._escape_for_html = true; this.bold = false; this.fg = this.bg = null; this._buffer = ''; } set use_classes(arg:boolean) { this._use_classes = arg; } get use_classes():boolean { return this._use_classes; } set escape_for_html(arg:boolean) { this._escape_for_html = arg; } get escape_for_html():boolean { return this._escape_for_html; } private setup_256_palette():void { this.palette_256 = []; // Index 0..15 : Ansi-Colors this.ansi_colors.forEach( palette => { palette.forEach( rec => { this.palette_256.push(rec); }); }); // Index 16..231 : RGB 6x6x6 // https://gist.github.com/jasonm23/2868981#file-xterm-256color-yaml let levels = [0, 95, 135, 175, 215, 255]; for (let r = 0; r < 6; ++r) { for (let g = 0; g < 6; ++g) { for (let b = 0; b < 6; ++b) { let col = {rgb:[levels[r], levels[g], levels[b]], class_name:'truecolor'}; this.palette_256.push(col); } } } // Index 232..255 : Grayscale let grey_level = 8; for (let i = 0; i < 24; ++i, grey_level += 10) { let gry = {rgb:[grey_level, grey_level, grey_level], class_name:'truecolor'}; this.palette_256.push(gry); } } private old_escape_for_html(txt:string):string { return txt.replace(/[&<>]/gm, (str) => { if (str === "&") return "&amp;"; if (str === "<") return "&lt;"; if (str === ">") return "&gt;"; }); } private old_linkify(txt:string):string { return txt.replace(/(https?:\/\/[^\s]+)/gm, (str) => { return `<a href="${str}">${str}</a>`; }); } private detect_incomplete_ansi(txt:string) { // Scan forwards for a potential command character // If one exists, we must assume we are good // [\x40-\x7e]) # the command return !(/.*?[\x40-\x7e]/.test(txt)); } private detect_incomplete_link(txt:string) { // It would be nice if Javascript RegExp supported // a hitEnd() method // Scan backwards for first whitespace var found = false; for (var i = txt.length - 1; i > 0; i--) { if (/\s|\x1B/.test(txt[i])) { found = true; break; } } if (!found) { // Handle one other case // Maybe the whole string is a URL? if (/(https?:\/\/[^\s]+)/.test(txt)) return 0; else return -1; } // Test if possible prefix var prefix = txt.substr(i + 1, 4); if (prefix.length === 0) return -1; if ("http".indexOf(prefix) === 0) return (i + 1); } ansi_to(txt:string, formatter:Formatter):any { var pkt = this._buffer + txt; this._buffer = ''; var raw_text_pkts = pkt.split(/\x1B\[/); if (raw_text_pkts.length === 1) raw_text_pkts.push(''); this.handle_incomplete_sequences(raw_text_pkts); let first_chunk = this.with_state(raw_text_pkts.shift()); // the first pkt is not the result of the split let blocks = new Array(raw_text_pkts.length); for (let i = 0, len = raw_text_pkts.length; i < len; ++i) { blocks[i] = (formatter.transform(this.process_ansi(raw_text_pkts[i]), this)); } if (first_chunk.text.length > 0) blocks.unshift(formatter.transform(first_chunk, this)); return formatter.compose(blocks, this); } ansi_to_html(txt:string):string { return this.ansi_to(txt, this.htmlFormatter); } ansi_to_text(txt:string):string { return this.ansi_to(txt, this.textFormatter); } private with_state(text:string):TextWithAttr { return { bold: this.bold, fg: this.fg, bg: this.bg, text: text }; } private handle_incomplete_sequences(chunks:string[]):void { // COMPLEX - BEGIN // Validate the last chunks for: // - incomplete ANSI sequence // - incomplete ESC // If any of these occur, we may have to buffer var last_chunk = chunks[chunks.length - 1]; // - incomplete ANSI sequence if ((last_chunk.length > 0) && this.detect_incomplete_ansi(last_chunk)) { this._buffer = "\x1B[" + last_chunk; chunks.pop(); chunks.push(''); } else { // - incomplete ESC if (last_chunk.slice(-1) === "\x1B") { this._buffer = "\x1B"; console.log("raw", chunks); chunks.pop(); chunks.push(last_chunk.substr(0, last_chunk.length - 1)); console.log(chunks); console.log(last_chunk); } // - Incomplete ESC, only one packet if (chunks.length === 2 && chunks[1] === "" && chunks[0].slice(-1) === "\x1B") { this._buffer = "\x1B"; last_chunk = chunks.shift(); chunks.unshift(last_chunk.substr(0, last_chunk.length - 1)); } } // COMPLEX - END } private process_ansi(block:string):TextWithAttr { // This must only be called with a string that started with a CSI (the string split above) // The CSI must not be in the string. We consider this string to be a 'block'. // It has an ANSI command at the front that affects the text that follows it. // // All ansi codes are typically in the following format. We parse it and focus // specifically on the graphics commands (SGR) // // CONTROL-SEQUENCE-INTRODUCER CSI (ESC, '[') // PRIVATE-MODE-CHAR (!, <, >, ?) // Numeric parameters separated by semicolons ('0' - '9', ';') // Intermediate-modifiers (0x20 - 0x2f) // COMMAND-CHAR (0x40 - 0x7e) // // We use a regex to parse into capture groups the PRIVATE-MODE-CHAR to the COMMAND // and the following text if (!this._sgr_regex) { // This regex is designed to parse an ANSI terminal CSI command. To be more specific, // we follow the XTERM conventions vs. the various other "standards". // http://invisible-island.net/xterm/ctlseqs/ctlseqs.html // this._sgr_regex = rgx` ^ # beginning of line ([!\x3c-\x3f]?) # a private-mode char (!, <, =, >, ?) ([\d;]*) # any digits or semicolons ([\x20-\x2f]? # an intermediate modifier [\x40-\x7e]) # the command ([\s\S]*) # any text following this CSI sequence `; } let matches = block.match(this._sgr_regex); // The regex should have handled all cases! if (!matches) { return this.with_state(block); } let orig_txt = matches[4]; if (matches[1] !== '' || matches[3] !== 'm') { return this.with_state(orig_txt); } // Ok - we have a valid "SGR" (Select Graphic Rendition) let sgr_cmds = matches[2].split(';'); // Each of these params affects the SGR state // Why do we shift through the array instead of a forEach?? // ... because some commands consume the params that follow ! while (sgr_cmds.length > 0) { let sgr_cmd_str = sgr_cmds.shift(); let num = parseInt(sgr_cmd_str, 10); if (isNaN(num) || num === 0) { this.fg = this.bg = null; this.bold = false; } else if (num === 1) { this.bold = true; } else if (num === 22) { this.bold = false; } else if (num === 39) { this.fg = null; } else if (num === 49) { this.bg = null; } else if ((num >= 30) && (num < 38)) { this.fg = this.ansi_colors[0][(num - 30)]; } else if ((num >= 40) && (num < 48)) { this.bg = this.ansi_colors[0][(num - 40)]; } else if ((num >= 90) && (num < 98)) { this.fg = this.ansi_colors[1][(num - 90)]; } else if ((num >= 100) && (num < 108)) { this.bg = this.ansi_colors[1][(num - 100)]; } else if (num === 38 || num === 48) { // extended set foreground/background color // validate that param exists if (sgr_cmds.length > 0)
} } return this.with_state(orig_txt); } }
{ // extend color (38=fg, 48=bg) let is_foreground = (num === 38); let mode_cmd = sgr_cmds.shift(); // MODE '5' - 256 color palette if (mode_cmd === '5' && sgr_cmds.length > 0) { let palette_index = parseInt(sgr_cmds.shift(), 10); if (palette_index >= 0 && palette_index <= 255) { if (is_foreground) this.fg = this.palette_256[palette_index]; else this.bg = this.palette_256[palette_index]; } } // MODE '2' - True Color if (mode_cmd === '2' && sgr_cmds.length > 2) { let r = parseInt(sgr_cmds.shift(), 10); let g = parseInt(sgr_cmds.shift(), 10); let b = parseInt(sgr_cmds.shift(), 10); if ((r >= 0 && r <= 255) && (g >= 0 && g <= 255) && (b >= 0 && b <= 255)) { let c = { rgb: [r,g,b], class_name: 'truecolor'}; if (is_foreground) this.fg = c; else this.bg = c; } } }
conditional_block
ansi_up.ts
/* ansi_up.js * author : Dru Nelson * license : MIT * http://github.com/drudru/ansi_up */ "use strict"; interface AU_Color { rgb:number[]; class_name:string; } // Represents the output of process_ansi(): a snapshot of the AnsiUp state machine // at a given point in time, which wraps a fragment of text. This wouuld allow deferred // processing of text fragments and colors, if ever needed. interface TextWithAttr { fg:AU_Color; bg:AU_Color; bold:boolean; text:string; } // Represents an object that is responsible for generating output from parsed ANSI color // metadata and text content. interface Formatter { // Invoked for each generated TextWithAttr fragment outputted by process_ansi(). // this function is responsible for generating output for a single TextWithAttr // fragment. The result of transform() will be collected into an array that will // be provided to compose(). transform(fragment:TextWithAttr, instance:AnsiUp):any; // Invoked on the set of outputs from transform; the return value of this function // will be the final output of ANSI processing. compose(segments:any[], instance:AnsiUp):any; } // ES5 template string transformer // NOTE: default is multiline (workaround for now til I can // determine flags inline) function rgx(tmplObj, ...subst) { // Use the 'raw' value so we don't have to double backslash in a template string let regexText:string = tmplObj.raw[0]; // Remove white-space and comments let wsrgx = /^\s+|\s+\n|\s+#[\s\S]+?\n/gm; let txt2 = regexText.replace(wsrgx, ''); return new RegExp(txt2, 'm'); } class AnsiUp { VERSION = "3.0.0"; ansi_colors = [ // Normal colors [ { rgb: [ 0, 0, 0], class_name: "ansi-black" }, { rgb: [187, 0, 0], class_name: "ansi-red" }, { rgb: [ 0, 187, 0], class_name: "ansi-green" }, { rgb: [187, 187, 0], class_name: "ansi-yellow" }, { rgb: [ 0, 0, 187], class_name: "ansi-blue" }, { rgb: [187, 0, 187], class_name: "ansi-magenta" }, { rgb: [ 0, 187, 187], class_name: "ansi-cyan" }, { rgb: [255, 255, 255], class_name: "ansi-white" } ], // Bright colors [ { rgb: [ 85, 85, 85], class_name: "ansi-bright-black" }, { rgb: [255, 85, 85], class_name: "ansi-bright-red" }, { rgb: [ 0, 255, 0], class_name: "ansi-bright-green" }, { rgb: [255, 255, 85], class_name: "ansi-bright-yellow" }, { rgb: [ 85, 85, 255], class_name: "ansi-bright-blue" }, { rgb: [255, 85, 255], class_name: "ansi-bright-magenta" }, { rgb: [ 85, 255, 255], class_name: "ansi-bright-cyan" }, { rgb: [255, 255, 255], class_name: "ansi-bright-white" } ] ]; htmlFormatter:Formatter = { transform(fragment:TextWithAttr, instance:AnsiUp):string { let txt = fragment.text; if (txt.length === 0) return txt; if (instance._escape_for_html) txt = instance.old_escape_for_html(txt); // If colors not set, default style is used if (!fragment.bold && fragment.fg === null && fragment.bg === null) return txt; let styles:string[] = []; let classes:string[] = []; let fg = fragment.fg; let bg = fragment.bg; // Note on bold: https://stackoverflow.com/questions/6737005/what-are-some-advantages-to-using-span-style-font-weightbold-rather-than-b?rq=1 if (fragment.bold) styles.push('font-weight:bold') if (!instance._use_classes) { // USE INLINE STYLES if (fg) styles.push(`color:rgb(${fg.rgb.join(',')})`); if (bg) styles.push(`background-color:rgb(${bg.rgb})`); } else { // USE CLASSES if (fg) { if (fg.class_name !== 'truecolor') { classes.push(`${fg.class_name}-fg`); } else { styles.push(`color:rgb(${fg.rgb.join(',')})`); } } if (bg) { if (bg.class_name !== 'truecolor') { classes.push(`${bg.class_name}-bg`); } else { styles.push(`background-color:rgb(${bg.rgb.join(',')})`); } } } let class_string = ''; let style_string = ''; if (classes.length) class_string = ` class="${classes.join(' ')}"`; if (styles.length) style_string = ` style="${styles.join(';')}"`; return `<span${style_string}${class_string}>${txt}</span>`; }, compose(segments:string[], instance:AnsiUp):string { return segments.join(""); } }; textFormatter:Formatter = { transform(fragment:TextWithAttr, instance:AnsiUp):string { return fragment.text; }, compose(segments:string[], instance:AnsiUp):string { return segments.join(""); } }; // 256 Colors Palette // CSS RGB strings - ex. "255, 255, 255" private palette_256:AU_Color[]; private fg:AU_Color; private bg:AU_Color; private bold:boolean; private _use_classes:boolean; private _escape_for_html; private _sgr_regex:RegExp; private _buffer:string; constructor() { this.setup_256_palette(); this._use_classes = false; this._escape_for_html = true; this.bold = false; this.fg = this.bg = null; this._buffer = ''; } set use_classes(arg:boolean) { this._use_classes = arg; } get use_classes():boolean { return this._use_classes; } set escape_for_html(arg:boolean) { this._escape_for_html = arg; } get escape_for_html():boolean { return this._escape_for_html; } private setup_256_palette():void { this.palette_256 = []; // Index 0..15 : Ansi-Colors this.ansi_colors.forEach( palette => { palette.forEach( rec => { this.palette_256.push(rec); }); }); // Index 16..231 : RGB 6x6x6 // https://gist.github.com/jasonm23/2868981#file-xterm-256color-yaml let levels = [0, 95, 135, 175, 215, 255]; for (let r = 0; r < 6; ++r) { for (let g = 0; g < 6; ++g) { for (let b = 0; b < 6; ++b) { let col = {rgb:[levels[r], levels[g], levels[b]], class_name:'truecolor'}; this.palette_256.push(col); } } } // Index 232..255 : Grayscale let grey_level = 8; for (let i = 0; i < 24; ++i, grey_level += 10) { let gry = {rgb:[grey_level, grey_level, grey_level], class_name:'truecolor'}; this.palette_256.push(gry); } } private old_escape_for_html(txt:string):string { return txt.replace(/[&<>]/gm, (str) => { if (str === "&") return "&amp;"; if (str === "<") return "&lt;"; if (str === ">") return "&gt;"; }); } private old_linkify(txt:string):string { return txt.replace(/(https?:\/\/[^\s]+)/gm, (str) => { return `<a href="${str}">${str}</a>`; }); } private detect_incomplete_ansi(txt:string) { // Scan forwards for a potential command character // If one exists, we must assume we are good // [\x40-\x7e]) # the command return !(/.*?[\x40-\x7e]/.test(txt)); } private detect_incomplete_link(txt:string) { // It would be nice if Javascript RegExp supported // a hitEnd() method // Scan backwards for first whitespace var found = false; for (var i = txt.length - 1; i > 0; i--) { if (/\s|\x1B/.test(txt[i])) { found = true; break; } } if (!found) { // Handle one other case // Maybe the whole string is a URL? if (/(https?:\/\/[^\s]+)/.test(txt)) return 0; else return -1; } // Test if possible prefix var prefix = txt.substr(i + 1, 4); if (prefix.length === 0) return -1; if ("http".indexOf(prefix) === 0) return (i + 1); } ansi_to(txt:string, formatter:Formatter):any { var pkt = this._buffer + txt; this._buffer = ''; var raw_text_pkts = pkt.split(/\x1B\[/); if (raw_text_pkts.length === 1) raw_text_pkts.push(''); this.handle_incomplete_sequences(raw_text_pkts); let first_chunk = this.with_state(raw_text_pkts.shift()); // the first pkt is not the result of the split let blocks = new Array(raw_text_pkts.length); for (let i = 0, len = raw_text_pkts.length; i < len; ++i) { blocks[i] = (formatter.transform(this.process_ansi(raw_text_pkts[i]), this)); } if (first_chunk.text.length > 0) blocks.unshift(formatter.transform(first_chunk, this)); return formatter.compose(blocks, this); } ansi_to_html(txt:string):string { return this.ansi_to(txt, this.htmlFormatter); } ansi_to_text(txt:string):string { return this.ansi_to(txt, this.textFormatter); } private with_state(text:string):TextWithAttr { return { bold: this.bold, fg: this.fg, bg: this.bg, text: text }; } private handle_incomplete_sequences(chunks:string[]):void { // COMPLEX - BEGIN // Validate the last chunks for: // - incomplete ANSI sequence // - incomplete ESC // If any of these occur, we may have to buffer var last_chunk = chunks[chunks.length - 1]; // - incomplete ANSI sequence if ((last_chunk.length > 0) && this.detect_incomplete_ansi(last_chunk)) { this._buffer = "\x1B[" + last_chunk; chunks.pop(); chunks.push(''); } else { // - incomplete ESC if (last_chunk.slice(-1) === "\x1B") { this._buffer = "\x1B"; console.log("raw", chunks); chunks.pop(); chunks.push(last_chunk.substr(0, last_chunk.length - 1)); console.log(chunks); console.log(last_chunk); } // - Incomplete ESC, only one packet if (chunks.length === 2 && chunks[1] === "" && chunks[0].slice(-1) === "\x1B") { this._buffer = "\x1B"; last_chunk = chunks.shift(); chunks.unshift(last_chunk.substr(0, last_chunk.length - 1)); } } // COMPLEX - END } private process_ansi(block:string):TextWithAttr { // This must only be called with a string that started with a CSI (the string split above) // The CSI must not be in the string. We consider this string to be a 'block'. // It has an ANSI command at the front that affects the text that follows it. // // All ansi codes are typically in the following format. We parse it and focus // specifically on the graphics commands (SGR) // // CONTROL-SEQUENCE-INTRODUCER CSI (ESC, '[') // PRIVATE-MODE-CHAR (!, <, >, ?) // Numeric parameters separated by semicolons ('0' - '9', ';') // Intermediate-modifiers (0x20 - 0x2f) // COMMAND-CHAR (0x40 - 0x7e) // // We use a regex to parse into capture groups the PRIVATE-MODE-CHAR to the COMMAND // and the following text if (!this._sgr_regex) { // This regex is designed to parse an ANSI terminal CSI command. To be more specific, // we follow the XTERM conventions vs. the various other "standards". // http://invisible-island.net/xterm/ctlseqs/ctlseqs.html // this._sgr_regex = rgx` ^ # beginning of line ([!\x3c-\x3f]?) # a private-mode char (!, <, =, >, ?) ([\d;]*) # any digits or semicolons ([\x20-\x2f]? # an intermediate modifier [\x40-\x7e]) # the command ([\s\S]*) # any text following this CSI sequence `; } let matches = block.match(this._sgr_regex); // The regex should have handled all cases! if (!matches) { return this.with_state(block); } let orig_txt = matches[4]; if (matches[1] !== '' || matches[3] !== 'm') { return this.with_state(orig_txt); }
// Ok - we have a valid "SGR" (Select Graphic Rendition) let sgr_cmds = matches[2].split(';'); // Each of these params affects the SGR state // Why do we shift through the array instead of a forEach?? // ... because some commands consume the params that follow ! while (sgr_cmds.length > 0) { let sgr_cmd_str = sgr_cmds.shift(); let num = parseInt(sgr_cmd_str, 10); if (isNaN(num) || num === 0) { this.fg = this.bg = null; this.bold = false; } else if (num === 1) { this.bold = true; } else if (num === 22) { this.bold = false; } else if (num === 39) { this.fg = null; } else if (num === 49) { this.bg = null; } else if ((num >= 30) && (num < 38)) { this.fg = this.ansi_colors[0][(num - 30)]; } else if ((num >= 40) && (num < 48)) { this.bg = this.ansi_colors[0][(num - 40)]; } else if ((num >= 90) && (num < 98)) { this.fg = this.ansi_colors[1][(num - 90)]; } else if ((num >= 100) && (num < 108)) { this.bg = this.ansi_colors[1][(num - 100)]; } else if (num === 38 || num === 48) { // extended set foreground/background color // validate that param exists if (sgr_cmds.length > 0) { // extend color (38=fg, 48=bg) let is_foreground = (num === 38); let mode_cmd = sgr_cmds.shift(); // MODE '5' - 256 color palette if (mode_cmd === '5' && sgr_cmds.length > 0) { let palette_index = parseInt(sgr_cmds.shift(), 10); if (palette_index >= 0 && palette_index <= 255) { if (is_foreground) this.fg = this.palette_256[palette_index]; else this.bg = this.palette_256[palette_index]; } } // MODE '2' - True Color if (mode_cmd === '2' && sgr_cmds.length > 2) { let r = parseInt(sgr_cmds.shift(), 10); let g = parseInt(sgr_cmds.shift(), 10); let b = parseInt(sgr_cmds.shift(), 10); if ((r >= 0 && r <= 255) && (g >= 0 && g <= 255) && (b >= 0 && b <= 255)) { let c = { rgb: [r,g,b], class_name: 'truecolor'}; if (is_foreground) this.fg = c; else this.bg = c; } } } } } return this.with_state(orig_txt); } }
random_line_split
ansi_up.ts
/* ansi_up.js * author : Dru Nelson * license : MIT * http://github.com/drudru/ansi_up */ "use strict"; interface AU_Color { rgb:number[]; class_name:string; } // Represents the output of process_ansi(): a snapshot of the AnsiUp state machine // at a given point in time, which wraps a fragment of text. This wouuld allow deferred // processing of text fragments and colors, if ever needed. interface TextWithAttr { fg:AU_Color; bg:AU_Color; bold:boolean; text:string; } // Represents an object that is responsible for generating output from parsed ANSI color // metadata and text content. interface Formatter { // Invoked for each generated TextWithAttr fragment outputted by process_ansi(). // this function is responsible for generating output for a single TextWithAttr // fragment. The result of transform() will be collected into an array that will // be provided to compose(). transform(fragment:TextWithAttr, instance:AnsiUp):any; // Invoked on the set of outputs from transform; the return value of this function // will be the final output of ANSI processing. compose(segments:any[], instance:AnsiUp):any; } // ES5 template string transformer // NOTE: default is multiline (workaround for now til I can // determine flags inline) function rgx(tmplObj, ...subst) { // Use the 'raw' value so we don't have to double backslash in a template string let regexText:string = tmplObj.raw[0]; // Remove white-space and comments let wsrgx = /^\s+|\s+\n|\s+#[\s\S]+?\n/gm; let txt2 = regexText.replace(wsrgx, ''); return new RegExp(txt2, 'm'); } class AnsiUp { VERSION = "3.0.0"; ansi_colors = [ // Normal colors [ { rgb: [ 0, 0, 0], class_name: "ansi-black" }, { rgb: [187, 0, 0], class_name: "ansi-red" }, { rgb: [ 0, 187, 0], class_name: "ansi-green" }, { rgb: [187, 187, 0], class_name: "ansi-yellow" }, { rgb: [ 0, 0, 187], class_name: "ansi-blue" }, { rgb: [187, 0, 187], class_name: "ansi-magenta" }, { rgb: [ 0, 187, 187], class_name: "ansi-cyan" }, { rgb: [255, 255, 255], class_name: "ansi-white" } ], // Bright colors [ { rgb: [ 85, 85, 85], class_name: "ansi-bright-black" }, { rgb: [255, 85, 85], class_name: "ansi-bright-red" }, { rgb: [ 0, 255, 0], class_name: "ansi-bright-green" }, { rgb: [255, 255, 85], class_name: "ansi-bright-yellow" }, { rgb: [ 85, 85, 255], class_name: "ansi-bright-blue" }, { rgb: [255, 85, 255], class_name: "ansi-bright-magenta" }, { rgb: [ 85, 255, 255], class_name: "ansi-bright-cyan" }, { rgb: [255, 255, 255], class_name: "ansi-bright-white" } ] ]; htmlFormatter:Formatter = { transform(fragment:TextWithAttr, instance:AnsiUp):string { let txt = fragment.text; if (txt.length === 0) return txt; if (instance._escape_for_html) txt = instance.old_escape_for_html(txt); // If colors not set, default style is used if (!fragment.bold && fragment.fg === null && fragment.bg === null) return txt; let styles:string[] = []; let classes:string[] = []; let fg = fragment.fg; let bg = fragment.bg; // Note on bold: https://stackoverflow.com/questions/6737005/what-are-some-advantages-to-using-span-style-font-weightbold-rather-than-b?rq=1 if (fragment.bold) styles.push('font-weight:bold') if (!instance._use_classes) { // USE INLINE STYLES if (fg) styles.push(`color:rgb(${fg.rgb.join(',')})`); if (bg) styles.push(`background-color:rgb(${bg.rgb})`); } else { // USE CLASSES if (fg) { if (fg.class_name !== 'truecolor') { classes.push(`${fg.class_name}-fg`); } else { styles.push(`color:rgb(${fg.rgb.join(',')})`); } } if (bg) { if (bg.class_name !== 'truecolor') { classes.push(`${bg.class_name}-bg`); } else { styles.push(`background-color:rgb(${bg.rgb.join(',')})`); } } } let class_string = ''; let style_string = ''; if (classes.length) class_string = ` class="${classes.join(' ')}"`; if (styles.length) style_string = ` style="${styles.join(';')}"`; return `<span${style_string}${class_string}>${txt}</span>`; }, compose(segments:string[], instance:AnsiUp):string { return segments.join(""); } }; textFormatter:Formatter = { transform(fragment:TextWithAttr, instance:AnsiUp):string { return fragment.text; }, compose(segments:string[], instance:AnsiUp):string { return segments.join(""); } }; // 256 Colors Palette // CSS RGB strings - ex. "255, 255, 255" private palette_256:AU_Color[]; private fg:AU_Color; private bg:AU_Color; private bold:boolean; private _use_classes:boolean; private _escape_for_html; private _sgr_regex:RegExp; private _buffer:string; constructor() { this.setup_256_palette(); this._use_classes = false; this._escape_for_html = true; this.bold = false; this.fg = this.bg = null; this._buffer = ''; } set use_classes(arg:boolean) { this._use_classes = arg; } get use_classes():boolean { return this._use_classes; } set escape_for_html(arg:boolean) { this._escape_for_html = arg; } get escape_for_html():boolean
private setup_256_palette():void { this.palette_256 = []; // Index 0..15 : Ansi-Colors this.ansi_colors.forEach( palette => { palette.forEach( rec => { this.palette_256.push(rec); }); }); // Index 16..231 : RGB 6x6x6 // https://gist.github.com/jasonm23/2868981#file-xterm-256color-yaml let levels = [0, 95, 135, 175, 215, 255]; for (let r = 0; r < 6; ++r) { for (let g = 0; g < 6; ++g) { for (let b = 0; b < 6; ++b) { let col = {rgb:[levels[r], levels[g], levels[b]], class_name:'truecolor'}; this.palette_256.push(col); } } } // Index 232..255 : Grayscale let grey_level = 8; for (let i = 0; i < 24; ++i, grey_level += 10) { let gry = {rgb:[grey_level, grey_level, grey_level], class_name:'truecolor'}; this.palette_256.push(gry); } } private old_escape_for_html(txt:string):string { return txt.replace(/[&<>]/gm, (str) => { if (str === "&") return "&amp;"; if (str === "<") return "&lt;"; if (str === ">") return "&gt;"; }); } private old_linkify(txt:string):string { return txt.replace(/(https?:\/\/[^\s]+)/gm, (str) => { return `<a href="${str}">${str}</a>`; }); } private detect_incomplete_ansi(txt:string) { // Scan forwards for a potential command character // If one exists, we must assume we are good // [\x40-\x7e]) # the command return !(/.*?[\x40-\x7e]/.test(txt)); } private detect_incomplete_link(txt:string) { // It would be nice if Javascript RegExp supported // a hitEnd() method // Scan backwards for first whitespace var found = false; for (var i = txt.length - 1; i > 0; i--) { if (/\s|\x1B/.test(txt[i])) { found = true; break; } } if (!found) { // Handle one other case // Maybe the whole string is a URL? if (/(https?:\/\/[^\s]+)/.test(txt)) return 0; else return -1; } // Test if possible prefix var prefix = txt.substr(i + 1, 4); if (prefix.length === 0) return -1; if ("http".indexOf(prefix) === 0) return (i + 1); } ansi_to(txt:string, formatter:Formatter):any { var pkt = this._buffer + txt; this._buffer = ''; var raw_text_pkts = pkt.split(/\x1B\[/); if (raw_text_pkts.length === 1) raw_text_pkts.push(''); this.handle_incomplete_sequences(raw_text_pkts); let first_chunk = this.with_state(raw_text_pkts.shift()); // the first pkt is not the result of the split let blocks = new Array(raw_text_pkts.length); for (let i = 0, len = raw_text_pkts.length; i < len; ++i) { blocks[i] = (formatter.transform(this.process_ansi(raw_text_pkts[i]), this)); } if (first_chunk.text.length > 0) blocks.unshift(formatter.transform(first_chunk, this)); return formatter.compose(blocks, this); } ansi_to_html(txt:string):string { return this.ansi_to(txt, this.htmlFormatter); } ansi_to_text(txt:string):string { return this.ansi_to(txt, this.textFormatter); } private with_state(text:string):TextWithAttr { return { bold: this.bold, fg: this.fg, bg: this.bg, text: text }; } private handle_incomplete_sequences(chunks:string[]):void { // COMPLEX - BEGIN // Validate the last chunks for: // - incomplete ANSI sequence // - incomplete ESC // If any of these occur, we may have to buffer var last_chunk = chunks[chunks.length - 1]; // - incomplete ANSI sequence if ((last_chunk.length > 0) && this.detect_incomplete_ansi(last_chunk)) { this._buffer = "\x1B[" + last_chunk; chunks.pop(); chunks.push(''); } else { // - incomplete ESC if (last_chunk.slice(-1) === "\x1B") { this._buffer = "\x1B"; console.log("raw", chunks); chunks.pop(); chunks.push(last_chunk.substr(0, last_chunk.length - 1)); console.log(chunks); console.log(last_chunk); } // - Incomplete ESC, only one packet if (chunks.length === 2 && chunks[1] === "" && chunks[0].slice(-1) === "\x1B") { this._buffer = "\x1B"; last_chunk = chunks.shift(); chunks.unshift(last_chunk.substr(0, last_chunk.length - 1)); } } // COMPLEX - END } private process_ansi(block:string):TextWithAttr { // This must only be called with a string that started with a CSI (the string split above) // The CSI must not be in the string. We consider this string to be a 'block'. // It has an ANSI command at the front that affects the text that follows it. // // All ansi codes are typically in the following format. We parse it and focus // specifically on the graphics commands (SGR) // // CONTROL-SEQUENCE-INTRODUCER CSI (ESC, '[') // PRIVATE-MODE-CHAR (!, <, >, ?) // Numeric parameters separated by semicolons ('0' - '9', ';') // Intermediate-modifiers (0x20 - 0x2f) // COMMAND-CHAR (0x40 - 0x7e) // // We use a regex to parse into capture groups the PRIVATE-MODE-CHAR to the COMMAND // and the following text if (!this._sgr_regex) { // This regex is designed to parse an ANSI terminal CSI command. To be more specific, // we follow the XTERM conventions vs. the various other "standards". // http://invisible-island.net/xterm/ctlseqs/ctlseqs.html // this._sgr_regex = rgx` ^ # beginning of line ([!\x3c-\x3f]?) # a private-mode char (!, <, =, >, ?) ([\d;]*) # any digits or semicolons ([\x20-\x2f]? # an intermediate modifier [\x40-\x7e]) # the command ([\s\S]*) # any text following this CSI sequence `; } let matches = block.match(this._sgr_regex); // The regex should have handled all cases! if (!matches) { return this.with_state(block); } let orig_txt = matches[4]; if (matches[1] !== '' || matches[3] !== 'm') { return this.with_state(orig_txt); } // Ok - we have a valid "SGR" (Select Graphic Rendition) let sgr_cmds = matches[2].split(';'); // Each of these params affects the SGR state // Why do we shift through the array instead of a forEach?? // ... because some commands consume the params that follow ! while (sgr_cmds.length > 0) { let sgr_cmd_str = sgr_cmds.shift(); let num = parseInt(sgr_cmd_str, 10); if (isNaN(num) || num === 0) { this.fg = this.bg = null; this.bold = false; } else if (num === 1) { this.bold = true; } else if (num === 22) { this.bold = false; } else if (num === 39) { this.fg = null; } else if (num === 49) { this.bg = null; } else if ((num >= 30) && (num < 38)) { this.fg = this.ansi_colors[0][(num - 30)]; } else if ((num >= 40) && (num < 48)) { this.bg = this.ansi_colors[0][(num - 40)]; } else if ((num >= 90) && (num < 98)) { this.fg = this.ansi_colors[1][(num - 90)]; } else if ((num >= 100) && (num < 108)) { this.bg = this.ansi_colors[1][(num - 100)]; } else if (num === 38 || num === 48) { // extended set foreground/background color // validate that param exists if (sgr_cmds.length > 0) { // extend color (38=fg, 48=bg) let is_foreground = (num === 38); let mode_cmd = sgr_cmds.shift(); // MODE '5' - 256 color palette if (mode_cmd === '5' && sgr_cmds.length > 0) { let palette_index = parseInt(sgr_cmds.shift(), 10); if (palette_index >= 0 && palette_index <= 255) { if (is_foreground) this.fg = this.palette_256[palette_index]; else this.bg = this.palette_256[palette_index]; } } // MODE '2' - True Color if (mode_cmd === '2' && sgr_cmds.length > 2) { let r = parseInt(sgr_cmds.shift(), 10); let g = parseInt(sgr_cmds.shift(), 10); let b = parseInt(sgr_cmds.shift(), 10); if ((r >= 0 && r <= 255) && (g >= 0 && g <= 255) && (b >= 0 && b <= 255)) { let c = { rgb: [r,g,b], class_name: 'truecolor'}; if (is_foreground) this.fg = c; else this.bg = c; } } } } } return this.with_state(orig_txt); } }
{ return this._escape_for_html; }
identifier_body
ansi_up.ts
/* ansi_up.js * author : Dru Nelson * license : MIT * http://github.com/drudru/ansi_up */ "use strict"; interface AU_Color { rgb:number[]; class_name:string; } // Represents the output of process_ansi(): a snapshot of the AnsiUp state machine // at a given point in time, which wraps a fragment of text. This wouuld allow deferred // processing of text fragments and colors, if ever needed. interface TextWithAttr { fg:AU_Color; bg:AU_Color; bold:boolean; text:string; } // Represents an object that is responsible for generating output from parsed ANSI color // metadata and text content. interface Formatter { // Invoked for each generated TextWithAttr fragment outputted by process_ansi(). // this function is responsible for generating output for a single TextWithAttr // fragment. The result of transform() will be collected into an array that will // be provided to compose(). transform(fragment:TextWithAttr, instance:AnsiUp):any; // Invoked on the set of outputs from transform; the return value of this function // will be the final output of ANSI processing. compose(segments:any[], instance:AnsiUp):any; } // ES5 template string transformer // NOTE: default is multiline (workaround for now til I can // determine flags inline) function rgx(tmplObj, ...subst) { // Use the 'raw' value so we don't have to double backslash in a template string let regexText:string = tmplObj.raw[0]; // Remove white-space and comments let wsrgx = /^\s+|\s+\n|\s+#[\s\S]+?\n/gm; let txt2 = regexText.replace(wsrgx, ''); return new RegExp(txt2, 'm'); } class AnsiUp { VERSION = "3.0.0"; ansi_colors = [ // Normal colors [ { rgb: [ 0, 0, 0], class_name: "ansi-black" }, { rgb: [187, 0, 0], class_name: "ansi-red" }, { rgb: [ 0, 187, 0], class_name: "ansi-green" }, { rgb: [187, 187, 0], class_name: "ansi-yellow" }, { rgb: [ 0, 0, 187], class_name: "ansi-blue" }, { rgb: [187, 0, 187], class_name: "ansi-magenta" }, { rgb: [ 0, 187, 187], class_name: "ansi-cyan" }, { rgb: [255, 255, 255], class_name: "ansi-white" } ], // Bright colors [ { rgb: [ 85, 85, 85], class_name: "ansi-bright-black" }, { rgb: [255, 85, 85], class_name: "ansi-bright-red" }, { rgb: [ 0, 255, 0], class_name: "ansi-bright-green" }, { rgb: [255, 255, 85], class_name: "ansi-bright-yellow" }, { rgb: [ 85, 85, 255], class_name: "ansi-bright-blue" }, { rgb: [255, 85, 255], class_name: "ansi-bright-magenta" }, { rgb: [ 85, 255, 255], class_name: "ansi-bright-cyan" }, { rgb: [255, 255, 255], class_name: "ansi-bright-white" } ] ]; htmlFormatter:Formatter = { transform(fragment:TextWithAttr, instance:AnsiUp):string { let txt = fragment.text; if (txt.length === 0) return txt; if (instance._escape_for_html) txt = instance.old_escape_for_html(txt); // If colors not set, default style is used if (!fragment.bold && fragment.fg === null && fragment.bg === null) return txt; let styles:string[] = []; let classes:string[] = []; let fg = fragment.fg; let bg = fragment.bg; // Note on bold: https://stackoverflow.com/questions/6737005/what-are-some-advantages-to-using-span-style-font-weightbold-rather-than-b?rq=1 if (fragment.bold) styles.push('font-weight:bold') if (!instance._use_classes) { // USE INLINE STYLES if (fg) styles.push(`color:rgb(${fg.rgb.join(',')})`); if (bg) styles.push(`background-color:rgb(${bg.rgb})`); } else { // USE CLASSES if (fg) { if (fg.class_name !== 'truecolor') { classes.push(`${fg.class_name}-fg`); } else { styles.push(`color:rgb(${fg.rgb.join(',')})`); } } if (bg) { if (bg.class_name !== 'truecolor') { classes.push(`${bg.class_name}-bg`); } else { styles.push(`background-color:rgb(${bg.rgb.join(',')})`); } } } let class_string = ''; let style_string = ''; if (classes.length) class_string = ` class="${classes.join(' ')}"`; if (styles.length) style_string = ` style="${styles.join(';')}"`; return `<span${style_string}${class_string}>${txt}</span>`; }, compose(segments:string[], instance:AnsiUp):string { return segments.join(""); } }; textFormatter:Formatter = { transform(fragment:TextWithAttr, instance:AnsiUp):string { return fragment.text; }, compose(segments:string[], instance:AnsiUp):string { return segments.join(""); } }; // 256 Colors Palette // CSS RGB strings - ex. "255, 255, 255" private palette_256:AU_Color[]; private fg:AU_Color; private bg:AU_Color; private bold:boolean; private _use_classes:boolean; private _escape_for_html; private _sgr_regex:RegExp; private _buffer:string; constructor() { this.setup_256_palette(); this._use_classes = false; this._escape_for_html = true; this.bold = false; this.fg = this.bg = null; this._buffer = ''; } set use_classes(arg:boolean) { this._use_classes = arg; } get use_classes():boolean { return this._use_classes; } set escape_for_html(arg:boolean) { this._escape_for_html = arg; } get escape_for_html():boolean { return this._escape_for_html; } private
():void { this.palette_256 = []; // Index 0..15 : Ansi-Colors this.ansi_colors.forEach( palette => { palette.forEach( rec => { this.palette_256.push(rec); }); }); // Index 16..231 : RGB 6x6x6 // https://gist.github.com/jasonm23/2868981#file-xterm-256color-yaml let levels = [0, 95, 135, 175, 215, 255]; for (let r = 0; r < 6; ++r) { for (let g = 0; g < 6; ++g) { for (let b = 0; b < 6; ++b) { let col = {rgb:[levels[r], levels[g], levels[b]], class_name:'truecolor'}; this.palette_256.push(col); } } } // Index 232..255 : Grayscale let grey_level = 8; for (let i = 0; i < 24; ++i, grey_level += 10) { let gry = {rgb:[grey_level, grey_level, grey_level], class_name:'truecolor'}; this.palette_256.push(gry); } } private old_escape_for_html(txt:string):string { return txt.replace(/[&<>]/gm, (str) => { if (str === "&") return "&amp;"; if (str === "<") return "&lt;"; if (str === ">") return "&gt;"; }); } private old_linkify(txt:string):string { return txt.replace(/(https?:\/\/[^\s]+)/gm, (str) => { return `<a href="${str}">${str}</a>`; }); } private detect_incomplete_ansi(txt:string) { // Scan forwards for a potential command character // If one exists, we must assume we are good // [\x40-\x7e]) # the command return !(/.*?[\x40-\x7e]/.test(txt)); } private detect_incomplete_link(txt:string) { // It would be nice if Javascript RegExp supported // a hitEnd() method // Scan backwards for first whitespace var found = false; for (var i = txt.length - 1; i > 0; i--) { if (/\s|\x1B/.test(txt[i])) { found = true; break; } } if (!found) { // Handle one other case // Maybe the whole string is a URL? if (/(https?:\/\/[^\s]+)/.test(txt)) return 0; else return -1; } // Test if possible prefix var prefix = txt.substr(i + 1, 4); if (prefix.length === 0) return -1; if ("http".indexOf(prefix) === 0) return (i + 1); } ansi_to(txt:string, formatter:Formatter):any { var pkt = this._buffer + txt; this._buffer = ''; var raw_text_pkts = pkt.split(/\x1B\[/); if (raw_text_pkts.length === 1) raw_text_pkts.push(''); this.handle_incomplete_sequences(raw_text_pkts); let first_chunk = this.with_state(raw_text_pkts.shift()); // the first pkt is not the result of the split let blocks = new Array(raw_text_pkts.length); for (let i = 0, len = raw_text_pkts.length; i < len; ++i) { blocks[i] = (formatter.transform(this.process_ansi(raw_text_pkts[i]), this)); } if (first_chunk.text.length > 0) blocks.unshift(formatter.transform(first_chunk, this)); return formatter.compose(blocks, this); } ansi_to_html(txt:string):string { return this.ansi_to(txt, this.htmlFormatter); } ansi_to_text(txt:string):string { return this.ansi_to(txt, this.textFormatter); } private with_state(text:string):TextWithAttr { return { bold: this.bold, fg: this.fg, bg: this.bg, text: text }; } private handle_incomplete_sequences(chunks:string[]):void { // COMPLEX - BEGIN // Validate the last chunks for: // - incomplete ANSI sequence // - incomplete ESC // If any of these occur, we may have to buffer var last_chunk = chunks[chunks.length - 1]; // - incomplete ANSI sequence if ((last_chunk.length > 0) && this.detect_incomplete_ansi(last_chunk)) { this._buffer = "\x1B[" + last_chunk; chunks.pop(); chunks.push(''); } else { // - incomplete ESC if (last_chunk.slice(-1) === "\x1B") { this._buffer = "\x1B"; console.log("raw", chunks); chunks.pop(); chunks.push(last_chunk.substr(0, last_chunk.length - 1)); console.log(chunks); console.log(last_chunk); } // - Incomplete ESC, only one packet if (chunks.length === 2 && chunks[1] === "" && chunks[0].slice(-1) === "\x1B") { this._buffer = "\x1B"; last_chunk = chunks.shift(); chunks.unshift(last_chunk.substr(0, last_chunk.length - 1)); } } // COMPLEX - END } private process_ansi(block:string):TextWithAttr { // This must only be called with a string that started with a CSI (the string split above) // The CSI must not be in the string. We consider this string to be a 'block'. // It has an ANSI command at the front that affects the text that follows it. // // All ansi codes are typically in the following format. We parse it and focus // specifically on the graphics commands (SGR) // // CONTROL-SEQUENCE-INTRODUCER CSI (ESC, '[') // PRIVATE-MODE-CHAR (!, <, >, ?) // Numeric parameters separated by semicolons ('0' - '9', ';') // Intermediate-modifiers (0x20 - 0x2f) // COMMAND-CHAR (0x40 - 0x7e) // // We use a regex to parse into capture groups the PRIVATE-MODE-CHAR to the COMMAND // and the following text if (!this._sgr_regex) { // This regex is designed to parse an ANSI terminal CSI command. To be more specific, // we follow the XTERM conventions vs. the various other "standards". // http://invisible-island.net/xterm/ctlseqs/ctlseqs.html // this._sgr_regex = rgx` ^ # beginning of line ([!\x3c-\x3f]?) # a private-mode char (!, <, =, >, ?) ([\d;]*) # any digits or semicolons ([\x20-\x2f]? # an intermediate modifier [\x40-\x7e]) # the command ([\s\S]*) # any text following this CSI sequence `; } let matches = block.match(this._sgr_regex); // The regex should have handled all cases! if (!matches) { return this.with_state(block); } let orig_txt = matches[4]; if (matches[1] !== '' || matches[3] !== 'm') { return this.with_state(orig_txt); } // Ok - we have a valid "SGR" (Select Graphic Rendition) let sgr_cmds = matches[2].split(';'); // Each of these params affects the SGR state // Why do we shift through the array instead of a forEach?? // ... because some commands consume the params that follow ! while (sgr_cmds.length > 0) { let sgr_cmd_str = sgr_cmds.shift(); let num = parseInt(sgr_cmd_str, 10); if (isNaN(num) || num === 0) { this.fg = this.bg = null; this.bold = false; } else if (num === 1) { this.bold = true; } else if (num === 22) { this.bold = false; } else if (num === 39) { this.fg = null; } else if (num === 49) { this.bg = null; } else if ((num >= 30) && (num < 38)) { this.fg = this.ansi_colors[0][(num - 30)]; } else if ((num >= 40) && (num < 48)) { this.bg = this.ansi_colors[0][(num - 40)]; } else if ((num >= 90) && (num < 98)) { this.fg = this.ansi_colors[1][(num - 90)]; } else if ((num >= 100) && (num < 108)) { this.bg = this.ansi_colors[1][(num - 100)]; } else if (num === 38 || num === 48) { // extended set foreground/background color // validate that param exists if (sgr_cmds.length > 0) { // extend color (38=fg, 48=bg) let is_foreground = (num === 38); let mode_cmd = sgr_cmds.shift(); // MODE '5' - 256 color palette if (mode_cmd === '5' && sgr_cmds.length > 0) { let palette_index = parseInt(sgr_cmds.shift(), 10); if (palette_index >= 0 && palette_index <= 255) { if (is_foreground) this.fg = this.palette_256[palette_index]; else this.bg = this.palette_256[palette_index]; } } // MODE '2' - True Color if (mode_cmd === '2' && sgr_cmds.length > 2) { let r = parseInt(sgr_cmds.shift(), 10); let g = parseInt(sgr_cmds.shift(), 10); let b = parseInt(sgr_cmds.shift(), 10); if ((r >= 0 && r <= 255) && (g >= 0 && g <= 255) && (b >= 0 && b <= 255)) { let c = { rgb: [r,g,b], class_name: 'truecolor'}; if (is_foreground) this.fg = c; else this.bg = c; } } } } } return this.with_state(orig_txt); } }
setup_256_palette
identifier_name
docker_client.go
package docker import ( "context" "crypto/tls" "encoding/json" "fmt" "io" "io/ioutil" "net/http" "net/url" "os" "path/filepath" "strconv" "strings" "time" "github.com/containers/image/docker/reference" "github.com/containers/image/pkg/docker/config" "github.com/containers/image/pkg/tlsclientconfig" "github.com/containers/image/types" "github.com/docker/distribution/registry/client" "github.com/docker/go-connections/tlsconfig" "github.com/opencontainers/go-digest" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) const ( dockerHostname = "docker.io" dockerV1Hostname = "index.docker.io" dockerRegistry = "registry-1.docker.io" resolvedPingV2URL = "%s://%s/v2/" resolvedPingV1URL = "%s://%s/v1/_ping" tagsPath = "/v2/%s/tags/list" manifestPath = "/v2/%s/manifests/%s" blobsPath = "/v2/%s/blobs/%s" blobUploadPath = "/v2/%s/blobs/uploads/" extensionsSignaturePath = "/extensions/v2/%s/signatures/%s" minimumTokenLifetimeSeconds = 60 extensionSignatureSchemaVersion = 2 // extensionSignature.Version extensionSignatureTypeAtomic = "atomic" // extensionSignature.Type ) var ( // ErrV1NotSupported is returned when we're trying to talk to a // docker V1 registry. ErrV1NotSupported = errors.New("can't talk to a V1 docker registry") // ErrUnauthorizedForCredentials is returned when the status code returned is 401 ErrUnauthorizedForCredentials = errors.New("unable to retrieve auth token: invalid username/password") systemPerHostCertDirPaths = [2]string{"/etc/containers/certs.d", "/etc/docker/certs.d"} ) // extensionSignature and extensionSignatureList come from github.com/openshift/origin/pkg/dockerregistry/server/signaturedispatcher.go: // signature represents a Docker image signature. type extensionSignature struct { Version int `json:"schemaVersion"` // Version specifies the schema version Name string `json:"name"` // Name must be in "sha256:<digest>@signatureName" format Type string `json:"type"` // Type is optional, of not set it will be defaulted to "AtomicImageV1" Content []byte `json:"content"` // Content contains the signature } // signatureList represents list of Docker image signatures. type extensionSignatureList struct { Signatures []extensionSignature `json:"signatures"` } type bearerToken struct { Token string `json:"token"` AccessToken string `json:"access_token"` ExpiresIn int `json:"expires_in"` IssuedAt time.Time `json:"issued_at"` } // dockerClient is configuration for dealing with a single Docker registry. type dockerClient struct { // The following members are set by newDockerClient and do not change afterwards. sys *types.SystemContext registry string username string password string client *http.Client signatureBase signatureStorageBase scope authScope // The following members are detected registry properties: // They are set after a successful detectProperties(), and never change afterwards. scheme string // Empty value also used to indicate detectProperties() has not yet succeeded. challenges []challenge supportsSignatures bool // The following members are private state for setupRequestAuth, both are valid if token != nil. token *bearerToken tokenExpiration time.Time } type authScope struct { remoteName string actions string } // sendAuth determines whether we need authentication for v2 or v1 endpoint. type sendAuth int const ( // v2 endpoint with authentication. v2Auth sendAuth = iota // v1 endpoint with authentication. // TODO: Get v1Auth working // v1Auth // no authentication, works for both v1 and v2. noAuth ) func newBearerTokenFromJSONBlob(blob []byte) (*bearerToken, error) { token := new(bearerToken) if err := json.Unmarshal(blob, &token); err != nil { return nil, err } if token.Token == "" { token.Token = token.AccessToken } if token.ExpiresIn < minimumTokenLifetimeSeconds { token.ExpiresIn = minimumTokenLifetimeSeconds logrus.Debugf("Increasing token expiration to: %d seconds", token.ExpiresIn) } if token.IssuedAt.IsZero() { token.IssuedAt = time.Now().UTC() } return token, nil } // this is cloned from docker/go-connections because upstream docker has changed // it and make deps here fails otherwise. // We'll drop this once we upgrade to docker 1.13.x deps. func serverDefault() *tls.Config { return &tls.Config{ // Avoid fallback to SSL protocols < TLS1.0 MinVersion: tls.VersionTLS10, PreferServerCipherSuites: true, CipherSuites: tlsconfig.DefaultServerAcceptedCiphers, } } // dockerCertDir returns a path to a directory to be consumed by tlsclientconfig.SetupCertificates() depending on ctx and hostPort. func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) { if sys != nil && sys.DockerCertPath != "" { return sys.DockerCertPath, nil } if sys != nil && sys.DockerPerHostCertDirPath != "" { return filepath.Join(sys.DockerPerHostCertDirPath, hostPort), nil } var ( hostCertDir string fullCertDirPath string ) for _, systemPerHostCertDirPath := range systemPerHostCertDirPaths { if sys != nil && sys.RootForImplicitAbsolutePaths != "" { hostCertDir = filepath.Join(sys.RootForImplicitAbsolutePaths, systemPerHostCertDirPath) } else { hostCertDir = systemPerHostCertDirPath } fullCertDirPath = filepath.Join(hostCertDir, hostPort) _, err := os.Stat(fullCertDirPath) if err == nil { break } if os.IsNotExist(err) { continue } if os.IsPermission(err) { logrus.Debugf("error accessing certs directory due to permissions: %v", err) continue } if err != nil { return "", err } } return fullCertDirPath, nil } // newDockerClientFromRef returns a new dockerClient instance for refHostname (a host a specified in the Docker image reference, not canonicalized to dockerRegistry) // “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection) func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, write bool, actions string) (*dockerClient, error) { registry := reference.Domain(ref.ref) username, password, err := config.GetAuthentication(sys, reference.Domain(ref.ref)) if err != nil { return nil, errors.Wrapf(err, "error getting username and password") } sigBase, err := configuredSignatureStorageBase(sys, ref, write) if err != nil { return nil, err } remoteName := reference.Path(ref.ref) return newDockerClientWithDetails(sys, registry, username, password, actions, sigBase, remoteName) } // newDockerClientWithDetails returns a new dockerClient instance for the given parameters func newDockerClientWithDetails(sys *types.SystemContext, registry, username, password, actions string, sigBase signatureStorageBase, remoteName string) (*dockerClient, error) { hostName := registry if registry == dockerHostname { registry = dockerRegistry } tr := tlsclientconfig.NewTransport() tr.TLSClientConfig = serverDefault() // It is undefined whether the host[:port] string for dockerHostname should be dockerHostname or dockerRegistry, // because docker/docker does not read the certs.d subdirectory at all in that case. We use the user-visible // dockerHostname here, because it is more symmetrical to read the configuration in that case as well, and because // generally the UI hides the existence of the different dockerRegistry. But note that this behavior is // undocumented and may change if docker/docker changes. certDir, err := dockerCertDir(sys, hostName) if err != nil { return nil, err } if err := tlsclientconfig.SetupCertificates(certDir, tr.TLSClientConfig); err != nil { return nil, err } if sys != nil && sys.DockerInsecureSkipTLSVerify { tr.TLSClientConfig.InsecureSkipVerify = true } return &dockerClient{ sys: sys, registry: registry, username: username, password: password, client: &http.Client{Transport: tr}, signatureBase: sigBase, scope: authScope{ actions: actions, remoteName: remoteName, }, }, nil } // CheckAuth validates the credentials by attempting to log into the registry // returns an error if an error occcured while making the http request or the status code received was 401 func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password, registry string) error { newLoginClient, err := newDockerClientWithDetails(sys, registry, username, password, "", nil, "") if err != nil { return errors.Wrapf(err, "error creating new docker client") } resp, err := newLoginClient.makeRequest(ctx, "GET", "/v2/", nil, nil, v2Auth) if err != nil { return err } defer resp.Body.Close() switch resp.StatusCode { case http.StatusOK: return nil case http.StatusUnauthorized: return ErrUnauthorizedForCredentials default: return errors.Errorf("error occured with status code %q", resp.StatusCode) } } // SearchResult holds the information of each matching image // It matches the output returned by the v1 endpoint type SearchResult struct { Name string `json:"name"` Description string `json:"description"` // StarCount states the number of stars the image has StarCount int `json:"star_count"` IsTrusted bool `json:"is_trusted"` // IsAutomated states whether the image is an automated build IsAutomated bool `json:"is_automated"` // IsOfficial states whether the image is an official build IsOfficial bool `json:"is_official"` } // SearchRegistry queries a registry for images that contain "image" in their name // The limit is the max number of results desired // Note: The limit value doesn't work with all registries // for example registry.access.redhat.com returns all the results without limiting it to the limit value func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, image string, limit int) ([]SearchResult, error) { type V2Results struct { // Repositories holds the results returned by the /v2/_catalog endpoint Repositories []string `json:"repositories"` } type V1Results struct { // Results holds the results returned by the /v1/search endpoint Results []SearchResult `json:"results"` } v2Res := &V2Results{} v1Res := &V1Results{} // Get credentials from authfile for the underlying hostname username, password, err := config.GetAuthentication(sys, registry) if err != nil { return nil, errors.Wrapf(err, "error getting username and password") } // The /v2/_catalog endpoint has been disabled for docker.io therefore the call made to that endpoint will fail // So using the v1 hostname for docker.io for simplicity of implementation and the fact that it returns search results if registry == dockerHostname { registry = dockerV1Hostname } client, err := newDockerClientWithDetails(sys, registry, username, password, "", nil, "") if err != nil { return nil, errors.Wrapf(err, "error creating new docker client") } // Only try the v1 search endpoint if the search query is not empty. If it is // empty skip to the v2 endpoint. if image != "" { // set up the query values for the v1 endpoint u := url.URL{ Path: "/v1/search", } q := u.Query() q.Set("q", image) q.Set("n", strconv.Itoa(limit)) u.RawQuery = q.Encode() logrus.Debugf("trying to talk to v1 search endpoint\n") resp, err := client.makeRequest(ctx, "GET", u.String(), nil, nil, noAuth) if err != nil { logrus.Debugf("error getting search results from v1 endpoint %q: %v", registry, err) } else { defer resp.Body.Close() if resp.StatusCode != http.StatusOK { logrus.Debugf("error getting search results from v1 endpoint %q, status code %d", registry, resp.StatusCode) } else { if err := json.NewDecoder(resp.Body).Decode(v1Res); err != nil { return nil, err } return v1Res.Results, nil } } } logrus.Debugf("trying to talk to v2 search endpoint\n") resp, err := client.makeRequest(ctx, "GET", "/v2/_catalog", nil, nil, v2Auth) if err != nil { logrus.Debugf("error getting search results from v2 endpoint %q: %v", registry, err) } else { defer resp.Body.Close() if resp.StatusCode != http.StatusOK { logrus.Errorf("error getting search results from v2 endpoint %q, status code %d", registry, resp.StatusCode) } else { if err := json.NewDecoder(resp.Body).Decode(v2Res); err != nil { return nil, err } searchRes := []SearchResult{} for _, repo := range v2Res.Repositories { if strings.Contains(repo, image) { res := SearchResult{ Name: repo, } searchRes = append(searchRes, res) } } return searchRes, nil } } return nil, errors.Wrapf(err, "couldn't search registry %q", registry) } // makeRequest creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client. // The host name and schema is taken from the client or autodetected, and the path is relative to it, i.e. the path usually starts with /v2/. func (c *dockerClient) makeRequest(ctx context.Context, method, path string, headers map[string][]string, stream io.Reader, auth sendAuth) (*http.Response, error) { if err := c.detectProperties(ctx); err != nil { return nil, err } url := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path) return c.makeRequestToResolvedURL(ctx, method, url, headers, stream, -1, auth) } // makeRequestToResolvedURL creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client. // streamLen, if not -1, specifies the length of the data expected on stream. // makeRequest should generally be preferred. // TODO(runcom): too many arguments here, use a struct func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth) (*http.Response, error) { req, err := http.NewRequest(method, url, stream) if err != nil { return nil, err } req = req.WithContext(ctx) if streamLen != -1 { // Do not blindly overwrite if streamLen == -1, http.NewRequest above can figure out the length of bytes.Reader and similar objects without us having to compute it. req.ContentLength = streamLen } req.Header.Set("Docker-Distribution-API-Version", "registry/2.0") for n, h := range headers { for _, hh := range h { req.Header.Add(n, hh) } } if c.sys != nil && c.sys.DockerRegistryUserAgent != "" { req.Header.Add("User-Agent", c.sys.DockerRegistryUserAgent) } if auth == v2Auth { if err := c.setupRequestAuth(req); err != nil { return nil, err } } logrus.Debugf("%s %s", method, url) res, err := c.client.Do(req) if err != nil { return nil, err } return res, nil } // we're using the challenges from the /v2/ ping response and not the one from the destination // URL in this request because: // // 1) docker does that as well // 2) gcr.io is sending 401 without a WWW-Authenticate header in the real request // // debugging: https://github.com/containers/image/pull/211#issuecomment-273426236 and follows up func (c *dockerClient) setupRequestAuth(req *http.Request) error { if len(c.challenges) == 0 { return nil } schemeNames := make([]string, 0, len(c.challenges)) for _, challenge := range c.challenges { schemeNames = append(schemeNames, challenge.Scheme) switch challenge.Scheme { case "basic": req.SetBasicAuth(c.username, c.password) return nil case "bearer": if c.token == nil || time.Now().After(c.tokenExpiration) { realm, ok := challenge.Parameters["realm"] if !ok { return errors.Errorf("missing realm in bearer auth challenge") } service, _ := challenge.Parameters["service"] // Will be "" if not present var scope string if c.scope.remoteName != "" && c.scope.actions != "" { scope = fmt.Sprintf("repository:%s:%s", c.scope.remoteName, c.scope.actions) } token, err := c.getBearerToken(req.Context(), realm, service, scope) if err != nil { return err } c.token = token c.tokenExpiration = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second) } req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", c.token.Token)) return nil default: logrus.Debugf("no handler for %s authentication", challenge.Scheme) } } logrus.Infof("None of the challenges sent by server (%s) are supported, trying an unauthenticated request anyway", strings.Join(schemeNames, ", ")) return nil } func (c *dockerClient) getB
context.Context, realm, service, scope string) (*bearerToken, error) { authReq, err := http.NewRequest("GET", realm, nil) if err != nil { return nil, err } authReq = authReq.WithContext(ctx) getParams := authReq.URL.Query() if c.username != "" { getParams.Add("account", c.username) } if service != "" { getParams.Add("service", service) } if scope != "" { getParams.Add("scope", scope) } authReq.URL.RawQuery = getParams.Encode() if c.username != "" && c.password != "" { authReq.SetBasicAuth(c.username, c.password) } logrus.Debugf("%s %s", authReq.Method, authReq.URL.String()) tr := tlsclientconfig.NewTransport() // TODO(runcom): insecure for now to contact the external token service tr.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} client := &http.Client{Transport: tr} res, err := client.Do(authReq) if err != nil { return nil, err } defer res.Body.Close() switch res.StatusCode { case http.StatusUnauthorized: return nil, ErrUnauthorizedForCredentials case http.StatusOK: break default: return nil, errors.Errorf("unexpected http code: %d, URL: %s", res.StatusCode, authReq.URL) } tokenBlob, err := ioutil.ReadAll(res.Body) if err != nil { return nil, err } return newBearerTokenFromJSONBlob(tokenBlob) } // detectProperties detects various properties of the registry. // See the dockerClient documentation for members which are affected by this. func (c *dockerClient) detectProperties(ctx context.Context) error { if c.scheme != "" { return nil } ping := func(scheme string) error { url := fmt.Sprintf(resolvedPingV2URL, scheme, c.registry) resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth) if err != nil { logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err) return err } defer resp.Body.Close() logrus.Debugf("Ping %s status %d", url, resp.StatusCode) if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized { return errors.Errorf("error pinging registry %s, response code %d", c.registry, resp.StatusCode) } c.challenges = parseAuthHeader(resp.Header) c.scheme = scheme c.supportsSignatures = resp.Header.Get("X-Registry-Supports-Signatures") == "1" return nil } err := ping("https") if err != nil && c.sys != nil && c.sys.DockerInsecureSkipTLSVerify { err = ping("http") } if err != nil { err = errors.Wrap(err, "pinging docker registry returned") if c.sys != nil && c.sys.DockerDisableV1Ping { return err } // best effort to understand if we're talking to a V1 registry pingV1 := func(scheme string) bool { url := fmt.Sprintf(resolvedPingV1URL, scheme, c.registry) resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth) logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err) if err != nil { return false } defer resp.Body.Close() logrus.Debugf("Ping %s status %d", url, resp.StatusCode) if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized { return false } return true } isV1 := pingV1("https") if !isV1 && c.sys != nil && c.sys.DockerInsecureSkipTLSVerify { isV1 = pingV1("http") } if isV1 { err = ErrV1NotSupported } } return err } // getExtensionsSignatures returns signatures from the X-Registry-Supports-Signatures API extension, // using the original data structures. func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerReference, manifestDigest digest.Digest) (*extensionSignatureList, error) { path := fmt.Sprintf(extensionsSignaturePath, reference.Path(ref.ref), manifestDigest) res, err := c.makeRequest(ctx, "GET", path, nil, nil, v2Auth) if err != nil { return nil, err } defer res.Body.Close() if res.StatusCode != http.StatusOK { return nil, errors.Wrapf(client.HandleErrorResponse(res), "Error downloading signatures for %s in %s", manifestDigest, ref.ref.Name()) } body, err := ioutil.ReadAll(res.Body) if err != nil { return nil, err } var parsedBody extensionSignatureList if err := json.Unmarshal(body, &parsedBody); err != nil { return nil, errors.Wrapf(err, "Error decoding signature list") } return &parsedBody, nil }
earerToken(ctx
identifier_name
docker_client.go
package docker import ( "context" "crypto/tls" "encoding/json" "fmt" "io" "io/ioutil" "net/http" "net/url" "os" "path/filepath" "strconv" "strings" "time" "github.com/containers/image/docker/reference" "github.com/containers/image/pkg/docker/config" "github.com/containers/image/pkg/tlsclientconfig" "github.com/containers/image/types" "github.com/docker/distribution/registry/client" "github.com/docker/go-connections/tlsconfig" "github.com/opencontainers/go-digest" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) const ( dockerHostname = "docker.io" dockerV1Hostname = "index.docker.io" dockerRegistry = "registry-1.docker.io" resolvedPingV2URL = "%s://%s/v2/" resolvedPingV1URL = "%s://%s/v1/_ping" tagsPath = "/v2/%s/tags/list" manifestPath = "/v2/%s/manifests/%s" blobsPath = "/v2/%s/blobs/%s" blobUploadPath = "/v2/%s/blobs/uploads/" extensionsSignaturePath = "/extensions/v2/%s/signatures/%s" minimumTokenLifetimeSeconds = 60 extensionSignatureSchemaVersion = 2 // extensionSignature.Version extensionSignatureTypeAtomic = "atomic" // extensionSignature.Type ) var ( // ErrV1NotSupported is returned when we're trying to talk to a // docker V1 registry. ErrV1NotSupported = errors.New("can't talk to a V1 docker registry") // ErrUnauthorizedForCredentials is returned when the status code returned is 401 ErrUnauthorizedForCredentials = errors.New("unable to retrieve auth token: invalid username/password") systemPerHostCertDirPaths = [2]string{"/etc/containers/certs.d", "/etc/docker/certs.d"} ) // extensionSignature and extensionSignatureList come from github.com/openshift/origin/pkg/dockerregistry/server/signaturedispatcher.go: // signature represents a Docker image signature. type extensionSignature struct { Version int `json:"schemaVersion"` // Version specifies the schema version Name string `json:"name"` // Name must be in "sha256:<digest>@signatureName" format Type string `json:"type"` // Type is optional, of not set it will be defaulted to "AtomicImageV1" Content []byte `json:"content"` // Content contains the signature } // signatureList represents list of Docker image signatures. type extensionSignatureList struct { Signatures []extensionSignature `json:"signatures"` } type bearerToken struct { Token string `json:"token"` AccessToken string `json:"access_token"` ExpiresIn int `json:"expires_in"` IssuedAt time.Time `json:"issued_at"` } // dockerClient is configuration for dealing with a single Docker registry. type dockerClient struct { // The following members are set by newDockerClient and do not change afterwards. sys *types.SystemContext registry string username string password string client *http.Client signatureBase signatureStorageBase scope authScope // The following members are detected registry properties: // They are set after a successful detectProperties(), and never change afterwards. scheme string // Empty value also used to indicate detectProperties() has not yet succeeded. challenges []challenge supportsSignatures bool // The following members are private state for setupRequestAuth, both are valid if token != nil. token *bearerToken tokenExpiration time.Time } type authScope struct { remoteName string actions string } // sendAuth determines whether we need authentication for v2 or v1 endpoint. type sendAuth int const ( // v2 endpoint with authentication. v2Auth sendAuth = iota // v1 endpoint with authentication. // TODO: Get v1Auth working // v1Auth // no authentication, works for both v1 and v2. noAuth ) func newBearerTokenFromJSONBlob(blob []byte) (*bearerToken, error) { token := new(bearerToken) if err := json.Unmarshal(blob, &token); err != nil { return nil, err } if token.Token == "" { token.Token = token.AccessToken } if token.ExpiresIn < minimumTokenLifetimeSeconds { token.ExpiresIn = minimumTokenLifetimeSeconds logrus.Debugf("Increasing token expiration to: %d seconds", token.ExpiresIn) } if token.IssuedAt.IsZero() { token.IssuedAt = time.Now().UTC() } return token, nil } // this is cloned from docker/go-connections because upstream docker has changed // it and make deps here fails otherwise. // We'll drop this once we upgrade to docker 1.13.x deps. func serverDefault() *tls.Config { return &tls.Config{ // Avoid fallback to SSL protocols < TLS1.0 MinVersion: tls.VersionTLS10, PreferServerCipherSuites: true, CipherSuites: tlsconfig.DefaultServerAcceptedCiphers, } } // dockerCertDir returns a path to a directory to be consumed by tlsclientconfig.SetupCertificates() depending on ctx and hostPort. func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) { if sys != nil && sys.DockerCertPath != "" { return sys.DockerCertPath, nil } if sys != nil && sys.DockerPerHostCertDirPath != "" { return filepath.Join(sys.DockerPerHostCertDirPath, hostPort), nil } var ( hostCertDir string fullCertDirPath string ) for _, systemPerHostCertDirPath := range systemPerHostCertDirPaths { if sys != nil && sys.RootForImplicitAbsolutePaths != "" { hostCertDir = filepath.Join(sys.RootForImplicitAbsolutePaths, systemPerHostCertDirPath) } else { hostCertDir = systemPerHostCertDirPath } fullCertDirPath = filepath.Join(hostCertDir, hostPort) _, err := os.Stat(fullCertDirPath) if err == nil { break } if os.IsNotExist(err) { continue } if os.IsPermission(err) { logrus.Debugf("error accessing certs directory due to permissions: %v", err) continue } if err != nil { return "", err } } return fullCertDirPath, nil } // newDockerClientFromRef returns a new dockerClient instance for refHostname (a host a specified in the Docker image reference, not canonicalized to dockerRegistry) // “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection) func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, write bool, actions string) (*dockerClient, error) { registry := reference.Domain(ref.ref) username, password, err := config.GetAuthentication(sys, reference.Domain(ref.ref)) if err != nil { return nil, errors.Wrapf(err, "error getting username and password") } sigBase, err := configuredSignatureStorageBase(sys, ref, write) if err != nil { return nil, err } remoteName := reference.Path(ref.ref) return newDockerClientWithDetails(sys, registry, username, password, actions, sigBase, remoteName) } // newDockerClientWithDetails returns a new dockerClient instance for the given parameters func newDockerClientWithDetails(sys *types.SystemContext, registry, username, password, actions string, sigBase signatureStorageBase, remoteName string) (*dockerClient, error) { hostName := registry if registry == dockerHostname { registry = dockerRegistry } tr := tlsclientconfig.NewTransport() tr.TLSClientConfig = serverDefault() // It is undefined whether the host[:port] string for dockerHostname should be dockerHostname or dockerRegistry, // because docker/docker does not read the certs.d subdirectory at all in that case. We use the user-visible // dockerHostname here, because it is more symmetrical to read the configuration in that case as well, and because // generally the UI hides the existence of the different dockerRegistry. But note that this behavior is // undocumented and may change if docker/docker changes. certDir, err := dockerCertDir(sys, hostName) if err != nil { return nil, err } if err := tlsclientconfig.SetupCertificates(certDir, tr.TLSClientConfig); err != nil { return nil, err } if sys != nil && sys.DockerInsecureSkipTLSVerify { tr.TLSClientConfig.InsecureSkipVerify = true } return &dockerClient{ sys: sys, registry: registry, username: username, password: password, client: &http.Client{Transport: tr}, signatureBase: sigBase, scope: authScope{ actions: actions, remoteName: remoteName, }, }, nil } // CheckAuth validates the credentials by attempting to log into the registry // returns an error if an error occcured while making the http request or the status code received was 401 func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password, registry string) error { newLoginClient, err := newDockerClientWithDetails(sys, registry, username, password, "", nil, "") if err != nil { return errors.Wrapf(err, "error creating new docker client") } resp, err := newLoginClient.makeRequest(ctx, "GET", "/v2/", nil, nil, v2Auth) if err != nil { return err } defer resp.Body.Close() switch resp.StatusCode { case http.StatusOK: return nil case http.StatusUnauthorized: return ErrUnauthorizedForCredentials default: return errors.Errorf("error occured with status code %q", resp.StatusCode) } } // SearchResult holds the information of each matching image // It matches the output returned by the v1 endpoint type SearchResult struct { Name string `json:"name"` Description string `json:"description"` // StarCount states the number of stars the image has StarCount int `json:"star_count"` IsTrusted bool `json:"is_trusted"` // IsAutomated states whether the image is an automated build IsAutomated bool `json:"is_automated"` // IsOfficial states whether the image is an official build IsOfficial bool `json:"is_official"` } // SearchRegistry queries a registry for images that contain "image" in their name // The limit is the max number of results desired // Note: The limit value doesn't work with all registries // for example registry.access.redhat.com returns all the results without limiting it to the limit value func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, image string, limit int) ([]SearchResult, error) { type V2Results struct { // Repositories holds the results returned by the /v2/_catalog endpoint Repositories []string `json:"repositories"` } type V1Results struct { // Results holds the results returned by the /v1/search endpoint Results []SearchResult `json:"results"` } v2Res := &V2Results{} v1Res := &V1Results{} // Get credentials from authfile for the underlying hostname username, password, err := config.GetAuthentication(sys, registry) if err != nil { return nil, errors.Wrapf(err, "error getting username and password") } // The /v2/_catalog endpoint has been disabled for docker.io therefore the call made to that endpoint will fail // So using the v1 hostname for docker.io for simplicity of implementation and the fact that it returns search results if registry == dockerHostname { registry = dockerV1Hostname } client, err := newDockerClientWithDetails(sys, registry, username, password, "", nil, "") if err != nil { return nil, errors.Wrapf(err, "error creating new docker client") } // Only try the v1 search endpoint if the search query is not empty. If it is // empty skip to the v2 endpoint. if image != "" { // set up the query values for the v1 endpoint u := url.URL{ Path: "/v1/search", } q := u.Query() q.Set("q", image) q.Set("n", strconv.Itoa(limit)) u.RawQuery = q.Encode() logrus.Debugf("trying to talk to v1 search endpoint\n") resp, err := client.makeRequest(ctx, "GET", u.String(), nil, nil, noAuth) if err != nil { logrus.Debugf("error getting search results from v1 endpoint %q: %v", registry, err) } else { defer resp.Body.Close() if resp.StatusCode != http.StatusOK { logrus.Debugf("error getting search results from v1 endpoint %q, status code %d", registry, resp.StatusCode) } else { if err := json.NewDecoder(resp.Body).Decode(v1Res); err != nil { return nil, err } return v1Res.Results, nil } } } logrus.Debugf("trying to talk to v2 search endpoint\n") resp, err := client.makeRequest(ctx, "GET", "/v2/_catalog", nil, nil, v2Auth) if err != nil { logrus.Debugf("error getting search results from v2 endpoint %q: %v", registry, err) } else { defer resp.Body.Close() if resp.StatusCode != http.StatusOK { logrus.Errorf("error getting search results from v2 endpoint %q, status code %d", registry, resp.StatusCode) } else { if err := json.NewDecoder(resp.Body).Decode(v2Res); err != nil { return nil, err } searchRes := []SearchResult{} for _, repo := range v2Res.Repositories { if strings.Contains(repo, image) { res := SearchResult{ Name: repo, } searchRes = append(searchRes, res) } } return searchRes, nil } } return nil, errors.Wrapf(err, "couldn't search registry %q", registry) } // makeRequest creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client. // The host name and schema is taken from the client or autodetected, and the path is relative to it, i.e. the path usually starts with /v2/. func (c *dockerClient) makeRequest(ctx context.Context, method, path string, headers map[string][]string, stream io.Reader, auth sendAuth) (*http.Response, error) { if err := c.detectProperties(ctx); err != nil { return nil, err } url := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path) return c.makeRequestToResolvedURL(ctx, method, url, headers, stream, -1, auth) } // makeRequestToResolvedURL creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client. // streamLen, if not -1, specifies the length of the data expected on stream. // makeRequest should generally be preferred. // TODO(runcom): too many arguments here, use a struct func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth) (*http.Response, error) { req, err := http.NewRequest(method, url, stream) if err != nil { return nil, err } req = req.WithContext(ctx) if streamLen != -1 { // Do not blindly overwrite if streamLen == -1, http.NewRequest above can figure out the length of bytes.Reader and similar objects without us having to compute it. req.ContentLength = streamLen } req.Header.Set("Docker-Distribution-API-Version", "registry/2.0") for n, h := range headers { for _, hh := range h { req.Header.Add(n, hh) } } if c.sys != nil && c.sys.DockerRegistryUserAgent != "" { req.Header.Add("User-Agent", c.sys.DockerRegistryUserAgent) } if auth == v2Auth { if err := c.setupRequestAuth(req); err != nil { return nil, err } } logrus.Debugf("%s %s", method, url) res, err := c.client.Do(req) if err != nil { return nil, err } return res, nil } // we're using the challenges from the /v2/ ping response and not the one from the destination // URL in this request because: // // 1) docker does that as well // 2) gcr.io is sending 401 without a WWW-Authenticate header in the real request // // debugging: https://github.com/containers/image/pull/211#issuecomment-273426236 and follows up func (c *dockerClient) setupRequestAuth(req *http.Request) error { if len(c.challenges) == 0 { return nil } schemeNames := make([]string, 0, len(c.challenges)) for _, challenge := range c.challenges { schemeNames = append(schemeNames, challenge.Scheme) switch challenge.Scheme { case "basic": req.SetBasicAuth(c.username, c.password) return nil case "bearer": if c.token == nil || time.Now().After(c.tokenExpiration) { realm, ok := challenge.Parameters["realm"] if !ok { return errors.Errorf("missing realm in bearer auth challenge") } service, _ := challenge.Parameters["service"] // Will be "" if not present var scope string if c.scope.remoteName != "" && c.scope.actions != "" { scope = fmt.Sprintf("repository:%s:%s", c.scope.remoteName, c.scope.actions) } token, err := c.getBearerToken(req.Context(), realm, service, scope) if err != nil { return err } c.token = token c.tokenExpiration = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second) } req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", c.token.Token)) return nil default: logrus.Debugf("no handler for %s authentication", challenge.Scheme) } } logrus.Infof("None of the challenges sent by server (%s) are supported, trying an unauthenticated request anyway", strings.Join(schemeNames, ", ")) return nil } func (c *dockerClient) getBearerToken(ctx context.Context, realm, service, scope string) (*bearerToken, error) { authReq, err := http.NewRequest("GET", realm, nil) if err != nil { return nil, err } authReq = authReq.WithContext(ctx) getParams := authReq.URL.Query() if c.username != "" { getParams.Add("account", c.username) } if service != "" { getParams.Add("service", service) } if scope != "" { getParams.Add("scope", scope) } authReq.URL.RawQuery = getParams.Encode() if c.username != "" && c.password != "" { authReq.SetBasicAuth(c.username, c.password) } logrus.Debugf("%s %s", authReq.Method, authReq.URL.String()) tr := tlsclientconfig.NewTransport() // TODO(runcom): insecure for now to contact the external token service tr.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} client := &http.Client{Transport: tr} res, err := client.Do(authReq) if err != nil { return nil, err } defer res.Body.Close() switch res.StatusCode { case http.StatusUnauthorized: return nil, ErrUnauthorizedForCredentials case http.StatusOK: break default: return nil, errors.Errorf("unexpected http code: %d, URL: %s", res.StatusCode, authReq.URL) } tokenBlob, err := ioutil.ReadAll(res.Body) if err != nil { return nil, err } return newBearerTokenFromJSONBlob(tokenBlob) } // detectProperties detects various properties of the registry. // See the dockerClient documentation for members which are affected by this. func (c *dockerClient) detectProperties(ctx context.Context) error { if c.scheme != "" { return nil } ping := func(scheme string) error { url := fmt.Sprintf(resolvedPingV2URL, scheme, c.registry) resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth) if err != nil { logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err) return err } defer resp.Body.Close() logrus.Debugf("Ping %s status %d", url, resp.StatusCode) if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized { return errors.Errorf("error pinging registry %s, response code %d", c.registry, resp.StatusCode) } c.challenges = parseAuthHeader(resp.Header) c.scheme = scheme c.supportsSignatures = resp.Header.Get("X-Registry-Supports-Signatures") == "1" return nil } err := ping("https") if err != nil && c.sys != nil && c.sys.DockerInsecureSkipTLSVerify { err = ping("http") } if err != nil { err = errors.Wrap(err, "pinging docker registry returned") if c.sys != nil && c.sys.DockerDisableV1Ping { return err } // best effort to understand if we're talking to a V1 registry pingV1 := func(scheme string) bool { url := fmt.Sprintf(resolvedPingV1URL, scheme, c.registry) resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth) logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err) if err != nil { return false } defer resp.Body.Close() logrus.Debugf("Ping %s status %d", url, resp.StatusCode) if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized {
return true } isV1 := pingV1("https") if !isV1 && c.sys != nil && c.sys.DockerInsecureSkipTLSVerify { isV1 = pingV1("http") } if isV1 { err = ErrV1NotSupported } } return err } // getExtensionsSignatures returns signatures from the X-Registry-Supports-Signatures API extension, // using the original data structures. func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerReference, manifestDigest digest.Digest) (*extensionSignatureList, error) { path := fmt.Sprintf(extensionsSignaturePath, reference.Path(ref.ref), manifestDigest) res, err := c.makeRequest(ctx, "GET", path, nil, nil, v2Auth) if err != nil { return nil, err } defer res.Body.Close() if res.StatusCode != http.StatusOK { return nil, errors.Wrapf(client.HandleErrorResponse(res), "Error downloading signatures for %s in %s", manifestDigest, ref.ref.Name()) } body, err := ioutil.ReadAll(res.Body) if err != nil { return nil, err } var parsedBody extensionSignatureList if err := json.Unmarshal(body, &parsedBody); err != nil { return nil, errors.Wrapf(err, "Error decoding signature list") } return &parsedBody, nil }
return false }
conditional_block
docker_client.go
package docker import ( "context" "crypto/tls" "encoding/json" "fmt" "io" "io/ioutil" "net/http" "net/url" "os" "path/filepath" "strconv" "strings" "time" "github.com/containers/image/docker/reference" "github.com/containers/image/pkg/docker/config" "github.com/containers/image/pkg/tlsclientconfig" "github.com/containers/image/types" "github.com/docker/distribution/registry/client" "github.com/docker/go-connections/tlsconfig" "github.com/opencontainers/go-digest" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) const ( dockerHostname = "docker.io" dockerV1Hostname = "index.docker.io" dockerRegistry = "registry-1.docker.io" resolvedPingV2URL = "%s://%s/v2/" resolvedPingV1URL = "%s://%s/v1/_ping" tagsPath = "/v2/%s/tags/list" manifestPath = "/v2/%s/manifests/%s" blobsPath = "/v2/%s/blobs/%s" blobUploadPath = "/v2/%s/blobs/uploads/" extensionsSignaturePath = "/extensions/v2/%s/signatures/%s" minimumTokenLifetimeSeconds = 60 extensionSignatureSchemaVersion = 2 // extensionSignature.Version extensionSignatureTypeAtomic = "atomic" // extensionSignature.Type ) var ( // ErrV1NotSupported is returned when we're trying to talk to a // docker V1 registry. ErrV1NotSupported = errors.New("can't talk to a V1 docker registry") // ErrUnauthorizedForCredentials is returned when the status code returned is 401 ErrUnauthorizedForCredentials = errors.New("unable to retrieve auth token: invalid username/password") systemPerHostCertDirPaths = [2]string{"/etc/containers/certs.d", "/etc/docker/certs.d"} ) // extensionSignature and extensionSignatureList come from github.com/openshift/origin/pkg/dockerregistry/server/signaturedispatcher.go: // signature represents a Docker image signature. type extensionSignature struct { Version int `json:"schemaVersion"` // Version specifies the schema version Name string `json:"name"` // Name must be in "sha256:<digest>@signatureName" format Type string `json:"type"` // Type is optional, of not set it will be defaulted to "AtomicImageV1" Content []byte `json:"content"` // Content contains the signature } // signatureList represents list of Docker image signatures. type extensionSignatureList struct { Signatures []extensionSignature `json:"signatures"` } type bearerToken struct { Token string `json:"token"` AccessToken string `json:"access_token"` ExpiresIn int `json:"expires_in"` IssuedAt time.Time `json:"issued_at"` } // dockerClient is configuration for dealing with a single Docker registry. type dockerClient struct { // The following members are set by newDockerClient and do not change afterwards. sys *types.SystemContext registry string username string password string client *http.Client signatureBase signatureStorageBase scope authScope // The following members are detected registry properties: // They are set after a successful detectProperties(), and never change afterwards. scheme string // Empty value also used to indicate detectProperties() has not yet succeeded. challenges []challenge supportsSignatures bool // The following members are private state for setupRequestAuth, both are valid if token != nil. token *bearerToken tokenExpiration time.Time } type authScope struct { remoteName string actions string } // sendAuth determines whether we need authentication for v2 or v1 endpoint. type sendAuth int const ( // v2 endpoint with authentication. v2Auth sendAuth = iota // v1 endpoint with authentication. // TODO: Get v1Auth working // v1Auth // no authentication, works for both v1 and v2. noAuth ) func newBearerTokenFromJSONBlob(blob []byte) (*bearerToken, error) { token := new(bearerToken) if err := json.Unmarshal(blob, &token); err != nil { return nil, err } if token.Token == "" { token.Token = token.AccessToken } if token.ExpiresIn < minimumTokenLifetimeSeconds { token.ExpiresIn = minimumTokenLifetimeSeconds logrus.Debugf("Increasing token expiration to: %d seconds", token.ExpiresIn) } if token.IssuedAt.IsZero() { token.IssuedAt = time.Now().UTC() } return token, nil } // this is cloned from docker/go-connections because upstream docker has changed // it and make deps here fails otherwise. // We'll drop this once we upgrade to docker 1.13.x deps. func serverDefault() *tls.Config { return &tls.Config{ // Avoid fallback to SSL protocols < TLS1.0 MinVersion: tls.VersionTLS10, PreferServerCipherSuites: true, CipherSuites: tlsconfig.DefaultServerAcceptedCiphers, } } // dockerCertDir returns a path to a directory to be consumed by tlsclientconfig.SetupCertificates() depending on ctx and hostPort. func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) { if sys != nil && sys.DockerCertPath != "" { return sys.DockerCertPath, nil } if sys != nil && sys.DockerPerHostCertDirPath != "" { return filepath.Join(sys.DockerPerHostCertDirPath, hostPort), nil } var ( hostCertDir string fullCertDirPath string ) for _, systemPerHostCertDirPath := range systemPerHostCertDirPaths { if sys != nil && sys.RootForImplicitAbsolutePaths != "" { hostCertDir = filepath.Join(sys.RootForImplicitAbsolutePaths, systemPerHostCertDirPath) } else { hostCertDir = systemPerHostCertDirPath } fullCertDirPath = filepath.Join(hostCertDir, hostPort) _, err := os.Stat(fullCertDirPath) if err == nil { break } if os.IsNotExist(err) { continue } if os.IsPermission(err) { logrus.Debugf("error accessing certs directory due to permissions: %v", err) continue } if err != nil { return "", err } } return fullCertDirPath, nil } // newDockerClientFromRef returns a new dockerClient instance for refHostname (a host a specified in the Docker image reference, not canonicalized to dockerRegistry) // “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection) func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, write bool, actions string) (*dockerClient, error) { registry := reference.Domain(ref.ref) username, password, err := config.GetAuthentication(sys, reference.Domain(ref.ref)) if err != nil { return nil, errors.Wrapf(err, "error getting username and password") } sigBase, err := configuredSignatureStorageBase(sys, ref, write) if err != nil { return nil, err } remoteName := reference.Path(ref.ref) return newDockerClientWithDetails(sys, registry, username, password, actions, sigBase, remoteName) } // newDockerClientWithDetails returns a new dockerClient instance for the given parameters func newDockerClientWithDetails(sys *types.SystemContext, registry, username, password, actions string, sigBase signatureStorageBase, remoteName string) (*dockerClient, error) { hostName := registry if registry == dockerHostname { registry = dockerRegistry } tr := tlsclientconfig.NewTransport() tr.TLSClientConfig = serverDefault() // It is undefined whether the host[:port] string for dockerHostname should be dockerHostname or dockerRegistry, // because docker/docker does not read the certs.d subdirectory at all in that case. We use the user-visible // dockerHostname here, because it is more symmetrical to read the configuration in that case as well, and because // generally the UI hides the existence of the different dockerRegistry. But note that this behavior is // undocumented and may change if docker/docker changes. certDir, err := dockerCertDir(sys, hostName) if err != nil { return nil, err } if err := tlsclientconfig.SetupCertificates(certDir, tr.TLSClientConfig); err != nil { return nil, err } if sys != nil && sys.DockerInsecureSkipTLSVerify { tr.TLSClientConfig.InsecureSkipVerify = true } return &dockerClient{ sys: sys, registry: registry, username: username, password: password, client: &http.Client{Transport: tr}, signatureBase: sigBase, scope: authScope{ actions: actions, remoteName: remoteName, }, }, nil } // CheckAuth validates the credentials by attempting to log into the registry // returns an error if an error occcured while making the http request or the status code received was 401 func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password, registry string) error { newLoginClient, err := newDockerClientWithDetails(sys, registry, username, password, "", nil, "") if err != nil { return errors.Wrapf(err, "error creating new docker client") } resp, err := newLoginClient.makeRequest(ctx, "GET", "/v2/", nil, nil, v2Auth) if err != nil { return err } defer resp.Body.Close() switch resp.StatusCode { case http.StatusOK: return nil case http.StatusUnauthorized: return ErrUnauthorizedForCredentials default: return errors.Errorf("error occured with status code %q", resp.StatusCode) } } // SearchResult holds the information of each matching image // It matches the output returned by the v1 endpoint type SearchResult struct { Name string `json:"name"` Description string `json:"description"` // StarCount states the number of stars the image has StarCount int `json:"star_count"` IsTrusted bool `json:"is_trusted"` // IsAutomated states whether the image is an automated build
// IsOfficial states whether the image is an official build IsOfficial bool `json:"is_official"` } // SearchRegistry queries a registry for images that contain "image" in their name // The limit is the max number of results desired // Note: The limit value doesn't work with all registries // for example registry.access.redhat.com returns all the results without limiting it to the limit value func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, image string, limit int) ([]SearchResult, error) { type V2Results struct { // Repositories holds the results returned by the /v2/_catalog endpoint Repositories []string `json:"repositories"` } type V1Results struct { // Results holds the results returned by the /v1/search endpoint Results []SearchResult `json:"results"` } v2Res := &V2Results{} v1Res := &V1Results{} // Get credentials from authfile for the underlying hostname username, password, err := config.GetAuthentication(sys, registry) if err != nil { return nil, errors.Wrapf(err, "error getting username and password") } // The /v2/_catalog endpoint has been disabled for docker.io therefore the call made to that endpoint will fail // So using the v1 hostname for docker.io for simplicity of implementation and the fact that it returns search results if registry == dockerHostname { registry = dockerV1Hostname } client, err := newDockerClientWithDetails(sys, registry, username, password, "", nil, "") if err != nil { return nil, errors.Wrapf(err, "error creating new docker client") } // Only try the v1 search endpoint if the search query is not empty. If it is // empty skip to the v2 endpoint. if image != "" { // set up the query values for the v1 endpoint u := url.URL{ Path: "/v1/search", } q := u.Query() q.Set("q", image) q.Set("n", strconv.Itoa(limit)) u.RawQuery = q.Encode() logrus.Debugf("trying to talk to v1 search endpoint\n") resp, err := client.makeRequest(ctx, "GET", u.String(), nil, nil, noAuth) if err != nil { logrus.Debugf("error getting search results from v1 endpoint %q: %v", registry, err) } else { defer resp.Body.Close() if resp.StatusCode != http.StatusOK { logrus.Debugf("error getting search results from v1 endpoint %q, status code %d", registry, resp.StatusCode) } else { if err := json.NewDecoder(resp.Body).Decode(v1Res); err != nil { return nil, err } return v1Res.Results, nil } } } logrus.Debugf("trying to talk to v2 search endpoint\n") resp, err := client.makeRequest(ctx, "GET", "/v2/_catalog", nil, nil, v2Auth) if err != nil { logrus.Debugf("error getting search results from v2 endpoint %q: %v", registry, err) } else { defer resp.Body.Close() if resp.StatusCode != http.StatusOK { logrus.Errorf("error getting search results from v2 endpoint %q, status code %d", registry, resp.StatusCode) } else { if err := json.NewDecoder(resp.Body).Decode(v2Res); err != nil { return nil, err } searchRes := []SearchResult{} for _, repo := range v2Res.Repositories { if strings.Contains(repo, image) { res := SearchResult{ Name: repo, } searchRes = append(searchRes, res) } } return searchRes, nil } } return nil, errors.Wrapf(err, "couldn't search registry %q", registry) } // makeRequest creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client. // The host name and schema is taken from the client or autodetected, and the path is relative to it, i.e. the path usually starts with /v2/. func (c *dockerClient) makeRequest(ctx context.Context, method, path string, headers map[string][]string, stream io.Reader, auth sendAuth) (*http.Response, error) { if err := c.detectProperties(ctx); err != nil { return nil, err } url := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path) return c.makeRequestToResolvedURL(ctx, method, url, headers, stream, -1, auth) } // makeRequestToResolvedURL creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client. // streamLen, if not -1, specifies the length of the data expected on stream. // makeRequest should generally be preferred. // TODO(runcom): too many arguments here, use a struct func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth) (*http.Response, error) { req, err := http.NewRequest(method, url, stream) if err != nil { return nil, err } req = req.WithContext(ctx) if streamLen != -1 { // Do not blindly overwrite if streamLen == -1, http.NewRequest above can figure out the length of bytes.Reader and similar objects without us having to compute it. req.ContentLength = streamLen } req.Header.Set("Docker-Distribution-API-Version", "registry/2.0") for n, h := range headers { for _, hh := range h { req.Header.Add(n, hh) } } if c.sys != nil && c.sys.DockerRegistryUserAgent != "" { req.Header.Add("User-Agent", c.sys.DockerRegistryUserAgent) } if auth == v2Auth { if err := c.setupRequestAuth(req); err != nil { return nil, err } } logrus.Debugf("%s %s", method, url) res, err := c.client.Do(req) if err != nil { return nil, err } return res, nil } // we're using the challenges from the /v2/ ping response and not the one from the destination // URL in this request because: // // 1) docker does that as well // 2) gcr.io is sending 401 without a WWW-Authenticate header in the real request // // debugging: https://github.com/containers/image/pull/211#issuecomment-273426236 and follows up func (c *dockerClient) setupRequestAuth(req *http.Request) error { if len(c.challenges) == 0 { return nil } schemeNames := make([]string, 0, len(c.challenges)) for _, challenge := range c.challenges { schemeNames = append(schemeNames, challenge.Scheme) switch challenge.Scheme { case "basic": req.SetBasicAuth(c.username, c.password) return nil case "bearer": if c.token == nil || time.Now().After(c.tokenExpiration) { realm, ok := challenge.Parameters["realm"] if !ok { return errors.Errorf("missing realm in bearer auth challenge") } service, _ := challenge.Parameters["service"] // Will be "" if not present var scope string if c.scope.remoteName != "" && c.scope.actions != "" { scope = fmt.Sprintf("repository:%s:%s", c.scope.remoteName, c.scope.actions) } token, err := c.getBearerToken(req.Context(), realm, service, scope) if err != nil { return err } c.token = token c.tokenExpiration = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second) } req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", c.token.Token)) return nil default: logrus.Debugf("no handler for %s authentication", challenge.Scheme) } } logrus.Infof("None of the challenges sent by server (%s) are supported, trying an unauthenticated request anyway", strings.Join(schemeNames, ", ")) return nil } func (c *dockerClient) getBearerToken(ctx context.Context, realm, service, scope string) (*bearerToken, error) { authReq, err := http.NewRequest("GET", realm, nil) if err != nil { return nil, err } authReq = authReq.WithContext(ctx) getParams := authReq.URL.Query() if c.username != "" { getParams.Add("account", c.username) } if service != "" { getParams.Add("service", service) } if scope != "" { getParams.Add("scope", scope) } authReq.URL.RawQuery = getParams.Encode() if c.username != "" && c.password != "" { authReq.SetBasicAuth(c.username, c.password) } logrus.Debugf("%s %s", authReq.Method, authReq.URL.String()) tr := tlsclientconfig.NewTransport() // TODO(runcom): insecure for now to contact the external token service tr.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} client := &http.Client{Transport: tr} res, err := client.Do(authReq) if err != nil { return nil, err } defer res.Body.Close() switch res.StatusCode { case http.StatusUnauthorized: return nil, ErrUnauthorizedForCredentials case http.StatusOK: break default: return nil, errors.Errorf("unexpected http code: %d, URL: %s", res.StatusCode, authReq.URL) } tokenBlob, err := ioutil.ReadAll(res.Body) if err != nil { return nil, err } return newBearerTokenFromJSONBlob(tokenBlob) } // detectProperties detects various properties of the registry. // See the dockerClient documentation for members which are affected by this. func (c *dockerClient) detectProperties(ctx context.Context) error { if c.scheme != "" { return nil } ping := func(scheme string) error { url := fmt.Sprintf(resolvedPingV2URL, scheme, c.registry) resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth) if err != nil { logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err) return err } defer resp.Body.Close() logrus.Debugf("Ping %s status %d", url, resp.StatusCode) if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized { return errors.Errorf("error pinging registry %s, response code %d", c.registry, resp.StatusCode) } c.challenges = parseAuthHeader(resp.Header) c.scheme = scheme c.supportsSignatures = resp.Header.Get("X-Registry-Supports-Signatures") == "1" return nil } err := ping("https") if err != nil && c.sys != nil && c.sys.DockerInsecureSkipTLSVerify { err = ping("http") } if err != nil { err = errors.Wrap(err, "pinging docker registry returned") if c.sys != nil && c.sys.DockerDisableV1Ping { return err } // best effort to understand if we're talking to a V1 registry pingV1 := func(scheme string) bool { url := fmt.Sprintf(resolvedPingV1URL, scheme, c.registry) resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth) logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err) if err != nil { return false } defer resp.Body.Close() logrus.Debugf("Ping %s status %d", url, resp.StatusCode) if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized { return false } return true } isV1 := pingV1("https") if !isV1 && c.sys != nil && c.sys.DockerInsecureSkipTLSVerify { isV1 = pingV1("http") } if isV1 { err = ErrV1NotSupported } } return err } // getExtensionsSignatures returns signatures from the X-Registry-Supports-Signatures API extension, // using the original data structures. func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerReference, manifestDigest digest.Digest) (*extensionSignatureList, error) { path := fmt.Sprintf(extensionsSignaturePath, reference.Path(ref.ref), manifestDigest) res, err := c.makeRequest(ctx, "GET", path, nil, nil, v2Auth) if err != nil { return nil, err } defer res.Body.Close() if res.StatusCode != http.StatusOK { return nil, errors.Wrapf(client.HandleErrorResponse(res), "Error downloading signatures for %s in %s", manifestDigest, ref.ref.Name()) } body, err := ioutil.ReadAll(res.Body) if err != nil { return nil, err } var parsedBody extensionSignatureList if err := json.Unmarshal(body, &parsedBody); err != nil { return nil, errors.Wrapf(err, "Error decoding signature list") } return &parsedBody, nil }
IsAutomated bool `json:"is_automated"`
random_line_split
docker_client.go
package docker import ( "context" "crypto/tls" "encoding/json" "fmt" "io" "io/ioutil" "net/http" "net/url" "os" "path/filepath" "strconv" "strings" "time" "github.com/containers/image/docker/reference" "github.com/containers/image/pkg/docker/config" "github.com/containers/image/pkg/tlsclientconfig" "github.com/containers/image/types" "github.com/docker/distribution/registry/client" "github.com/docker/go-connections/tlsconfig" "github.com/opencontainers/go-digest" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) const ( dockerHostname = "docker.io" dockerV1Hostname = "index.docker.io" dockerRegistry = "registry-1.docker.io" resolvedPingV2URL = "%s://%s/v2/" resolvedPingV1URL = "%s://%s/v1/_ping" tagsPath = "/v2/%s/tags/list" manifestPath = "/v2/%s/manifests/%s" blobsPath = "/v2/%s/blobs/%s" blobUploadPath = "/v2/%s/blobs/uploads/" extensionsSignaturePath = "/extensions/v2/%s/signatures/%s" minimumTokenLifetimeSeconds = 60 extensionSignatureSchemaVersion = 2 // extensionSignature.Version extensionSignatureTypeAtomic = "atomic" // extensionSignature.Type ) var ( // ErrV1NotSupported is returned when we're trying to talk to a // docker V1 registry. ErrV1NotSupported = errors.New("can't talk to a V1 docker registry") // ErrUnauthorizedForCredentials is returned when the status code returned is 401 ErrUnauthorizedForCredentials = errors.New("unable to retrieve auth token: invalid username/password") systemPerHostCertDirPaths = [2]string{"/etc/containers/certs.d", "/etc/docker/certs.d"} ) // extensionSignature and extensionSignatureList come from github.com/openshift/origin/pkg/dockerregistry/server/signaturedispatcher.go: // signature represents a Docker image signature. type extensionSignature struct { Version int `json:"schemaVersion"` // Version specifies the schema version Name string `json:"name"` // Name must be in "sha256:<digest>@signatureName" format Type string `json:"type"` // Type is optional, of not set it will be defaulted to "AtomicImageV1" Content []byte `json:"content"` // Content contains the signature } // signatureList represents list of Docker image signatures. type extensionSignatureList struct { Signatures []extensionSignature `json:"signatures"` } type bearerToken struct { Token string `json:"token"` AccessToken string `json:"access_token"` ExpiresIn int `json:"expires_in"` IssuedAt time.Time `json:"issued_at"` } // dockerClient is configuration for dealing with a single Docker registry. type dockerClient struct { // The following members are set by newDockerClient and do not change afterwards. sys *types.SystemContext registry string username string password string client *http.Client signatureBase signatureStorageBase scope authScope // The following members are detected registry properties: // They are set after a successful detectProperties(), and never change afterwards. scheme string // Empty value also used to indicate detectProperties() has not yet succeeded. challenges []challenge supportsSignatures bool // The following members are private state for setupRequestAuth, both are valid if token != nil. token *bearerToken tokenExpiration time.Time } type authScope struct { remoteName string actions string } // sendAuth determines whether we need authentication for v2 or v1 endpoint. type sendAuth int const ( // v2 endpoint with authentication. v2Auth sendAuth = iota // v1 endpoint with authentication. // TODO: Get v1Auth working // v1Auth // no authentication, works for both v1 and v2. noAuth ) func newBearerTokenFromJSONBlob(blob []byte) (*bearerToken, error) { token := new(bearerToken) if err := json.Unmarshal(blob, &token); err != nil { return nil, err } if token.Token == "" { token.Token = token.AccessToken } if token.ExpiresIn < minimumTokenLifetimeSeconds { token.ExpiresIn = minimumTokenLifetimeSeconds logrus.Debugf("Increasing token expiration to: %d seconds", token.ExpiresIn) } if token.IssuedAt.IsZero() { token.IssuedAt = time.Now().UTC() } return token, nil } // this is cloned from docker/go-connections because upstream docker has changed // it and make deps here fails otherwise. // We'll drop this once we upgrade to docker 1.13.x deps. func serverDefault() *tls.Config { return &tls.Config{ // Avoid fallback to SSL protocols < TLS1.0 MinVersion: tls.VersionTLS10, PreferServerCipherSuites: true, CipherSuites: tlsconfig.DefaultServerAcceptedCiphers, } } // dockerCertDir returns a path to a directory to be consumed by tlsclientconfig.SetupCertificates() depending on ctx and hostPort. func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) { if sys != nil && sys.DockerCertPath != "" { return sys.DockerCertPath, nil } if sys != nil && sys.DockerPerHostCertDirPath != "" { return filepath.Join(sys.DockerPerHostCertDirPath, hostPort), nil } var ( hostCertDir string fullCertDirPath string ) for _, systemPerHostCertDirPath := range systemPerHostCertDirPaths { if sys != nil && sys.RootForImplicitAbsolutePaths != "" { hostCertDir = filepath.Join(sys.RootForImplicitAbsolutePaths, systemPerHostCertDirPath) } else { hostCertDir = systemPerHostCertDirPath } fullCertDirPath = filepath.Join(hostCertDir, hostPort) _, err := os.Stat(fullCertDirPath) if err == nil { break } if os.IsNotExist(err) { continue } if os.IsPermission(err) { logrus.Debugf("error accessing certs directory due to permissions: %v", err) continue } if err != nil { return "", err } } return fullCertDirPath, nil } // newDockerClientFromRef returns a new dockerClient instance for refHostname (a host a specified in the Docker image reference, not canonicalized to dockerRegistry) // “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection) func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, write bool, actions string) (*dockerClient, error) { registry := reference.Domain(ref.ref) username, password, err := config.GetAuthentication(sys, reference.Domain(ref.ref)) if err != nil { return nil, errors.Wrapf(err, "error getting username and password") } sigBase, err := configuredSignatureStorageBase(sys, ref, write) if err != nil { return nil, err } remoteName := reference.Path(ref.ref) return newDockerClientWithDetails(sys, registry, username, password, actions, sigBase, remoteName) } // newDockerClientWithDetails returns a new dockerClient instance for the given parameters func newDockerClientWithDetails(sys *types.SystemContext, registry, username, password, actions string, sigBase signatureStorageBase, remoteName string) (*dockerClient, error) { hostName := registry if registry == dockerHostname { registry = dockerRegistry } tr := tlsclientconfig.NewTransport() tr.TLSClientConfig = serverDefault() // It is undefined whether the host[:port] string for dockerHostname should be dockerHostname or dockerRegistry, // because docker/docker does not read the certs.d subdirectory at all in that case. We use the user-visible // dockerHostname here, because it is more symmetrical to read the configuration in that case as well, and because // generally the UI hides the existence of the different dockerRegistry. But note that this behavior is // undocumented and may change if docker/docker changes. certDir, err := dockerCertDir(sys, hostName) if err != nil { return nil, err } if err := tlsclientconfig.SetupCertificates(certDir, tr.TLSClientConfig); err != nil { return nil, err } if sys != nil && sys.DockerInsecureSkipTLSVerify { tr.TLSClientConfig.InsecureSkipVerify = true } return &dockerClient{ sys: sys, registry: registry, username: username, password: password, client: &http.Client{Transport: tr}, signatureBase: sigBase, scope: authScope{ actions: actions, remoteName: remoteName, }, }, nil } // CheckAuth validates the credentials by attempting to log into the registry // returns an error if an error occcured while making the http request or the status code received was 401 func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password, registry string) error { newLoginClient, err := newDockerClientWithDetails(sys, registry, username, password, "", nil, "") if err != nil { return errors.Wrapf(err, "error creating new docker client") } resp, err := newLoginClient.makeRequest(ctx, "GET", "/v2/", nil, nil, v2Auth) if err != nil { return err } defer resp.Body.Close() switch resp.StatusCode { case http.StatusOK: return nil case http.StatusUnauthorized: return ErrUnauthorizedForCredentials default: return errors.Errorf("error occured with status code %q", resp.StatusCode) } } // SearchResult holds the information of each matching image // It matches the output returned by the v1 endpoint type SearchResult struct { Name string `json:"name"` Description string `json:"description"` // StarCount states the number of stars the image has StarCount int `json:"star_count"` IsTrusted bool `json:"is_trusted"` // IsAutomated states whether the image is an automated build IsAutomated bool `json:"is_automated"` // IsOfficial states whether the image is an official build IsOfficial bool `json:"is_official"` } // SearchRegistry queries a registry for images that contain "image" in their name // The limit is the max number of results desired // Note: The limit value doesn't work with all registries // for example registry.access.redhat.com returns all the results without limiting it to the limit value func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, image string, limit int) ([]SearchResult, error) { type V2Results struct { // Repositories holds the results returned by the /v2/_catalog endpoint Repositories []string `json:"repositories"` } type V1Results struct { // Results holds the results returned by the /v1/search endpoint Results []SearchResult `json:"results"` } v2Res := &V2Results{} v1Res := &V1Results{} // Get credentials from authfile for the underlying hostname username, password, err := config.GetAuthentication(sys, registry) if err != nil { return nil, errors.Wrapf(err, "error getting username and password") } // The /v2/_catalog endpoint has been disabled for docker.io therefore the call made to that endpoint will fail // So using the v1 hostname for docker.io for simplicity of implementation and the fact that it returns search results if registry == dockerHostname { registry = dockerV1Hostname } client, err := newDockerClientWithDetails(sys, registry, username, password, "", nil, "") if err != nil { return nil, errors.Wrapf(err, "error creating new docker client") } // Only try the v1 search endpoint if the search query is not empty. If it is // empty skip to the v2 endpoint. if image != "" { // set up the query values for the v1 endpoint u := url.URL{ Path: "/v1/search", } q := u.Query() q.Set("q", image) q.Set("n", strconv.Itoa(limit)) u.RawQuery = q.Encode() logrus.Debugf("trying to talk to v1 search endpoint\n") resp, err := client.makeRequest(ctx, "GET", u.String(), nil, nil, noAuth) if err != nil { logrus.Debugf("error getting search results from v1 endpoint %q: %v", registry, err) } else { defer resp.Body.Close() if resp.StatusCode != http.StatusOK { logrus.Debugf("error getting search results from v1 endpoint %q, status code %d", registry, resp.StatusCode) } else { if err := json.NewDecoder(resp.Body).Decode(v1Res); err != nil { return nil, err } return v1Res.Results, nil } } } logrus.Debugf("trying to talk to v2 search endpoint\n") resp, err := client.makeRequest(ctx, "GET", "/v2/_catalog", nil, nil, v2Auth) if err != nil { logrus.Debugf("error getting search results from v2 endpoint %q: %v", registry, err) } else { defer resp.Body.Close() if resp.StatusCode != http.StatusOK { logrus.Errorf("error getting search results from v2 endpoint %q, status code %d", registry, resp.StatusCode) } else { if err := json.NewDecoder(resp.Body).Decode(v2Res); err != nil { return nil, err } searchRes := []SearchResult{} for _, repo := range v2Res.Repositories { if strings.Contains(repo, image) { res := SearchResult{ Name: repo, } searchRes = append(searchRes, res) } } return searchRes, nil } } return nil, errors.Wrapf(err, "couldn't search registry %q", registry) } // makeRequest creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client. // The host name and schema is taken from the client or autodetected, and the path is relative to it, i.e. the path usually starts with /v2/. func (c *dockerClient) makeRequest(ctx context.Context, method, path string, headers map[string][]string, stream io.Reader, auth sendAuth) (*http.Response, error) { if err := c.detectProperties(ctx); err != nil { return nil, err } url := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path) return c.makeRequestToResolvedURL(ctx, method, url, headers, stream, -1, auth) } // makeRequestToResolvedURL creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client. // streamLen, if not -1, specifies the length of the data expected on stream. // makeRequest should generally be preferred. // TODO(runcom): too many arguments here, use a struct func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth) (*http.Response, error) { r
we're using the challenges from the /v2/ ping response and not the one from the destination // URL in this request because: // // 1) docker does that as well // 2) gcr.io is sending 401 without a WWW-Authenticate header in the real request // // debugging: https://github.com/containers/image/pull/211#issuecomment-273426236 and follows up func (c *dockerClient) setupRequestAuth(req *http.Request) error { if len(c.challenges) == 0 { return nil } schemeNames := make([]string, 0, len(c.challenges)) for _, challenge := range c.challenges { schemeNames = append(schemeNames, challenge.Scheme) switch challenge.Scheme { case "basic": req.SetBasicAuth(c.username, c.password) return nil case "bearer": if c.token == nil || time.Now().After(c.tokenExpiration) { realm, ok := challenge.Parameters["realm"] if !ok { return errors.Errorf("missing realm in bearer auth challenge") } service, _ := challenge.Parameters["service"] // Will be "" if not present var scope string if c.scope.remoteName != "" && c.scope.actions != "" { scope = fmt.Sprintf("repository:%s:%s", c.scope.remoteName, c.scope.actions) } token, err := c.getBearerToken(req.Context(), realm, service, scope) if err != nil { return err } c.token = token c.tokenExpiration = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second) } req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", c.token.Token)) return nil default: logrus.Debugf("no handler for %s authentication", challenge.Scheme) } } logrus.Infof("None of the challenges sent by server (%s) are supported, trying an unauthenticated request anyway", strings.Join(schemeNames, ", ")) return nil } func (c *dockerClient) getBearerToken(ctx context.Context, realm, service, scope string) (*bearerToken, error) { authReq, err := http.NewRequest("GET", realm, nil) if err != nil { return nil, err } authReq = authReq.WithContext(ctx) getParams := authReq.URL.Query() if c.username != "" { getParams.Add("account", c.username) } if service != "" { getParams.Add("service", service) } if scope != "" { getParams.Add("scope", scope) } authReq.URL.RawQuery = getParams.Encode() if c.username != "" && c.password != "" { authReq.SetBasicAuth(c.username, c.password) } logrus.Debugf("%s %s", authReq.Method, authReq.URL.String()) tr := tlsclientconfig.NewTransport() // TODO(runcom): insecure for now to contact the external token service tr.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} client := &http.Client{Transport: tr} res, err := client.Do(authReq) if err != nil { return nil, err } defer res.Body.Close() switch res.StatusCode { case http.StatusUnauthorized: return nil, ErrUnauthorizedForCredentials case http.StatusOK: break default: return nil, errors.Errorf("unexpected http code: %d, URL: %s", res.StatusCode, authReq.URL) } tokenBlob, err := ioutil.ReadAll(res.Body) if err != nil { return nil, err } return newBearerTokenFromJSONBlob(tokenBlob) } // detectProperties detects various properties of the registry. // See the dockerClient documentation for members which are affected by this. func (c *dockerClient) detectProperties(ctx context.Context) error { if c.scheme != "" { return nil } ping := func(scheme string) error { url := fmt.Sprintf(resolvedPingV2URL, scheme, c.registry) resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth) if err != nil { logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err) return err } defer resp.Body.Close() logrus.Debugf("Ping %s status %d", url, resp.StatusCode) if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized { return errors.Errorf("error pinging registry %s, response code %d", c.registry, resp.StatusCode) } c.challenges = parseAuthHeader(resp.Header) c.scheme = scheme c.supportsSignatures = resp.Header.Get("X-Registry-Supports-Signatures") == "1" return nil } err := ping("https") if err != nil && c.sys != nil && c.sys.DockerInsecureSkipTLSVerify { err = ping("http") } if err != nil { err = errors.Wrap(err, "pinging docker registry returned") if c.sys != nil && c.sys.DockerDisableV1Ping { return err } // best effort to understand if we're talking to a V1 registry pingV1 := func(scheme string) bool { url := fmt.Sprintf(resolvedPingV1URL, scheme, c.registry) resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth) logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err) if err != nil { return false } defer resp.Body.Close() logrus.Debugf("Ping %s status %d", url, resp.StatusCode) if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized { return false } return true } isV1 := pingV1("https") if !isV1 && c.sys != nil && c.sys.DockerInsecureSkipTLSVerify { isV1 = pingV1("http") } if isV1 { err = ErrV1NotSupported } } return err } // getExtensionsSignatures returns signatures from the X-Registry-Supports-Signatures API extension, // using the original data structures. func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerReference, manifestDigest digest.Digest) (*extensionSignatureList, error) { path := fmt.Sprintf(extensionsSignaturePath, reference.Path(ref.ref), manifestDigest) res, err := c.makeRequest(ctx, "GET", path, nil, nil, v2Auth) if err != nil { return nil, err } defer res.Body.Close() if res.StatusCode != http.StatusOK { return nil, errors.Wrapf(client.HandleErrorResponse(res), "Error downloading signatures for %s in %s", manifestDigest, ref.ref.Name()) } body, err := ioutil.ReadAll(res.Body) if err != nil { return nil, err } var parsedBody extensionSignatureList if err := json.Unmarshal(body, &parsedBody); err != nil { return nil, errors.Wrapf(err, "Error decoding signature list") } return &parsedBody, nil }
eq, err := http.NewRequest(method, url, stream) if err != nil { return nil, err } req = req.WithContext(ctx) if streamLen != -1 { // Do not blindly overwrite if streamLen == -1, http.NewRequest above can figure out the length of bytes.Reader and similar objects without us having to compute it. req.ContentLength = streamLen } req.Header.Set("Docker-Distribution-API-Version", "registry/2.0") for n, h := range headers { for _, hh := range h { req.Header.Add(n, hh) } } if c.sys != nil && c.sys.DockerRegistryUserAgent != "" { req.Header.Add("User-Agent", c.sys.DockerRegistryUserAgent) } if auth == v2Auth { if err := c.setupRequestAuth(req); err != nil { return nil, err } } logrus.Debugf("%s %s", method, url) res, err := c.client.Do(req) if err != nil { return nil, err } return res, nil } //
identifier_body
customs.js
jQuery(function($) { "use strict"; /** * Bootstrap Tooltip */ $('[data-toggle="tooltip"]').tooltip(); /** * Sticky Header */ $(window).scroll(function(){ if( $(window).scrollTop() > 10 ){ $('.navbar').addClass('navbar-sticky') } else { $('.navbar').removeClass('navbar-sticky') } }) /** * Main Menu Slide Down Effect */ var selected = $('#navbar li'); // Mouse-enter dropdown selected.on("mouseenter", function() { $(this).find('ul').first().stop(true, true).delay(350).slideDown(500, 'easeInOutQuad'); }); // Mouse-leave dropdown selected.on("mouseleave", function() { $(this).find('ul').first().stop(true, true).delay(100).slideUp(150, 'easeInOutQuad'); }); /** * Slicknav - a Mobile Menu */ var $slicknav_label; $('.responsive-menu').slicknav({ duration: 500, easingOpen: 'easeInExpo', easingClose: 'easeOutExpo', closedSymbol: '<i class="fa fa-plus"></i>', openedSymbol: '<i class="fa fa-minus"></i>', prependTo: '#slicknav-mobile', allowParentLinks: true, label:"" }); var $slicknav_label; $('#responsive-menu').slicknav({ duration: 500, easingOpen: 'easeInExpo', easingClose: 'easeOutExpo', closedSymbol: '<i class="fa fa-plus"></i>', openedSymbol: '<i class="fa fa-minus"></i>', prependTo: '#slicknav-mobile', allowParentLinks: true, label:"" }); /** * Smooth scroll to anchor */ $('a.anchor[href*=#]:not([href=#])').on("click",function() { if (location.pathname.replace(/^\//,'') == this.pathname.replace(/^\//,'') && location.hostname == this.hostname) { var target = $(this.hash); target = target.length ? target : $('[name=' + this.hash.slice(1) +']'); if (target.length) { $('html,body').animate({ scrollTop: (target.offset().top - 70) // 70px offset for navbar menu }, 1000); return false; } } }); /** * Another Bootstrap Toggle */ $('.another-toggle').on("click",function() { if( $('h4',this).hasClass('active') ){ $(this).find('.another-toggle-content').show(); } }); $('.another-toggle h4').on("click",function() { if( $(this).hasClass('active') ){ $(this).removeClass('active'); $(this).next('.another-toggle-content').slideUp(); } else { $(this).addClass('active'); $(this).next('.another-toggle-content').slideDown(); } }); /** * Arrow for Menu has sub-menu */ if ($(window).width() > 992) { $(".navbar-arrow ul ul > li").has("ul").children("a").append("<i class='arrow-indicator fa fa-angle-right'></i>"); } /** * Payment Option */ var selected2 = $("div.payment-option-form"); selected2.hide(); $("input[name$='payments']").on("click",function() { var test = $(this).val(); selected2.hide(); $("#" + test).show(); }); /** * Icon Change on Collapse */ $('.collapse.in').prev('.panel-heading').addClass('active'); $('.bootstrap-accordion, .bootstrap-toggle') .on('show.bs.collapse', function(a) { $(a.target).prev('.panel-heading').addClass('active'); }) .on('hide.bs.collapse', function(a) { $(a.target).prev('.panel-heading').removeClass('active'); }); /** * Back To Top */ var selected3 = $("#back-to-top"); $(window).scroll(function(){ if($(window).scrollTop() > 500){ selected3.fadeIn(200); } else{ selected3.fadeOut(200); } }); selected3.on("click",function() { $('html, body').animate({ scrollTop:0 }, '800'); return false; }); /** * Placeholder */ $("input, textarea").placeholder(); /** * Bootstrap rating */ var selected4 = $('.rating-label'); selected4.rating(); selected4.each(function () { $('<span class="label label-default"></span>') .text($(this).val() || ' ') .insertAfter(this);
$(this).next('.label').text($(this).val()); }); /** * Sign-in Modal */ var $formLogin = $('#login-form'); var $formLost = $('#lost-form'); var $formRegister = $('#register-form'); var $divForms = $('#modal-login-form-wrapper'); var $modalAnimateTime = 300; $('#login_register_btn').on("click", function () { modalAnimate($formLogin, $formRegister) }); $('#register_login_btn').on("click", function () { modalAnimate($formRegister, $formLogin); }); $('#login_lost_btn').on("click", function () { modalAnimate($formLogin, $formLost); }); $('#lost_login_btn').on("click", function () { modalAnimate($formLost, $formLogin); }); $('#lost_register_btn').on("click", function () { modalAnimate($formLost, $formRegister); }); function modalAnimate ($oldForm, $newForm) { var $oldH = $oldForm.height(); var $newH = $newForm.height(); $divForms.css("height",$oldH); $oldForm.fadeToggle($modalAnimateTime, function(){ $divForms.animate({height: $newH}, $modalAnimateTime, function(){ $newForm.fadeToggle($modalAnimateTime); }); }); } /** * Read more-less paragraph */ var showTotalChar = 130, showChar = "read more +", hideChar = "read less -"; $('.read-more-less').each(function() { var content = $(this).text(); if (content.length > showTotalChar) { var con = content.substr(0, showTotalChar); var hcon = content.substr(showTotalChar, content.length - showTotalChar); var txt= con + '<span class="dots">...</span><span class="morectnt"><span>' + hcon + '</span>&nbsp;&nbsp;<a href="" class="showmoretxt">' + showChar + '</a></span>'; $(this).html(txt); } }); $(".showmoretxt").on("click",function() { if ($(this).hasClass("sample")) { $(this).removeClass("sample"); $(this).text(showChar); } else { $(this).addClass("sample"); $(this).text(hideChar); } $(this).parent().prev().toggle(); $(this).prev().toggle(); return false; }); // SLICK SLIDER $('.responsive').slick({ dots: false, infinite: true, speed: 300, slidesToShow: 4, slidesToScroll: 1, responsive: [ { breakpoint: 1024, settings: { slidesToShow: 3, slidesToScroll: 3, infinite: true, dots: false } }, { breakpoint: 600, settings: { slidesToShow: 2, slidesToScroll: 2 } }, { breakpoint: 480, settings: { slidesToShow: 1, slidesToScroll: 1 } } // You can unslick at a given breakpoint now by adding: // settings: "unslick" // instead of a settings object ] }); // SLICK SLIDER STYLE TESTIMONIAL $('.testimonial1').slick({ dots: false, infinite: true, speed: 300, slidesToShow: 2, slidesToScroll: 1, responsive: [ { breakpoint: 1024, settings: { slidesToShow: 2, infinite: true, dots: false } }, { breakpoint: 600, settings: { slidesToShow: 2 } }, { breakpoint: 480, settings: { slidesToShow: 1 } } // You can unslick at a given breakpoint now by adding: // settings: "unslick" // instead of a settings object ] }); // SLICK SLIDER STYLE TESTIMONIAL $('.testimonial2').slick({ dots: false, infinite: true, speed: 300, slidesToShow: 1, slidesToScroll: 1, responsive: [ { breakpoint: 1024, settings: { slidesToShow: 3, infinite: true, dots: false } }, { breakpoint: 600, settings: { slidesToShow: 2, } }, { breakpoint: 480, settings: { slidesToShow: 1, slidesToScroll: 1 } } // You can unslick at a given breakpoint now by adding: // settings: "unslick" // instead of a settings object ] }); $('.tour-cats').slick({ dots: false, infinite: true, speed: 300, slidesToShow: 6, slidesToScroll: 1, responsive: [ { breakpoint: 1024, settings: { slidesToShow: 3, infinite: true, dots: false } }, { breakpoint: 600, settings: { slidesToShow: 2 } }, { breakpoint: 480, settings: { slidesToShow: 1 } } // You can unslick at a given breakpoint now by adding: // settings: "unslick" // instead of a settings object ] }); $('.main-banner').slick(); // FADE SLIDER $('.sidefd').slick({ dots: false, infinite: true, speed: 200, slide: true, autoplay :true, cssEase: 'linear' }); $(document).on('click','.itenerary_tab',function(){ var tab_class = $(this).next().find('.tab-gallery').attr('data-tab'); var selector = "." + tab_class; $( selector ).slick({ dots: false, infinite: false, arrows: true, speed: 300, slidesToShow: 5, slidesToScroll: 1, responsive: [ { breakpoint: 1024, settings: { slidesToShow: 3, slidesToScroll: 3, infinite: true, } }, { breakpoint: 600, settings: { slidesToShow: 2, slidesToScroll: 2 } }, { breakpoint: 480, settings: { slidesToShow: 1, slidesToScroll: 1 } } // You can unslick at a given breakpoint now by adding: // settings: "unslick" // instead of a settings object ] }); }); $('.fading').slick({ dots: false, infinite: true, speed: 300, fade: true, arrows: true, autoplay: true, cssEase: 'linear' }); $('.video_play_pause').on('click',function(){ var video = $(this).prev(); if( video.get(0).paused ){ video.get(0).play(); $(this).find('i').removeClass('fa-play').addClass('fa-pause'); } else { video.get(0).pause(); $(this).find('i').removeClass('fa-pause').addClass('fa-play'); } }); $('#hero-video').YTPlayer({ fitToBackground: true, videoId: '4NtfDBY_DVQ' }); /*==================================== Clock Countdown ======================================*/ $('#clock-countdown').countdown('2018/9/20 12:00:00').on('update.countdown', function(event) { var $this = $(this).html(event.strftime('' + '<div class="counter-container"><div class="counter-box first"><div class="number">%-D</div><span>Day%!d</span></div>' + '<div class="counter-box"><div class="number">%H</div><span>Hours</span></div>' + '<div class="counter-box"><div class="number">%M</div><span>Minutes</span></div>' + '<div class="counter-box last"><div class="number">%S</div><span>Seconds</span></div></div>' )); }); });
}); selected4.on('change', function () {
random_line_split
customs.js
jQuery(function($) { "use strict"; /** * Bootstrap Tooltip */ $('[data-toggle="tooltip"]').tooltip(); /** * Sticky Header */ $(window).scroll(function(){ if( $(window).scrollTop() > 10 ){ $('.navbar').addClass('navbar-sticky') } else { $('.navbar').removeClass('navbar-sticky') } }) /** * Main Menu Slide Down Effect */ var selected = $('#navbar li'); // Mouse-enter dropdown selected.on("mouseenter", function() { $(this).find('ul').first().stop(true, true).delay(350).slideDown(500, 'easeInOutQuad'); }); // Mouse-leave dropdown selected.on("mouseleave", function() { $(this).find('ul').first().stop(true, true).delay(100).slideUp(150, 'easeInOutQuad'); }); /** * Slicknav - a Mobile Menu */ var $slicknav_label; $('.responsive-menu').slicknav({ duration: 500, easingOpen: 'easeInExpo', easingClose: 'easeOutExpo', closedSymbol: '<i class="fa fa-plus"></i>', openedSymbol: '<i class="fa fa-minus"></i>', prependTo: '#slicknav-mobile', allowParentLinks: true, label:"" }); var $slicknav_label; $('#responsive-menu').slicknav({ duration: 500, easingOpen: 'easeInExpo', easingClose: 'easeOutExpo', closedSymbol: '<i class="fa fa-plus"></i>', openedSymbol: '<i class="fa fa-minus"></i>', prependTo: '#slicknav-mobile', allowParentLinks: true, label:"" }); /** * Smooth scroll to anchor */ $('a.anchor[href*=#]:not([href=#])').on("click",function() { if (location.pathname.replace(/^\//,'') == this.pathname.replace(/^\//,'') && location.hostname == this.hostname) { var target = $(this.hash); target = target.length ? target : $('[name=' + this.hash.slice(1) +']'); if (target.length) { $('html,body').animate({ scrollTop: (target.offset().top - 70) // 70px offset for navbar menu }, 1000); return false; } } }); /** * Another Bootstrap Toggle */ $('.another-toggle').on("click",function() { if( $('h4',this).hasClass('active') ){ $(this).find('.another-toggle-content').show(); } }); $('.another-toggle h4').on("click",function() { if( $(this).hasClass('active') ){ $(this).removeClass('active'); $(this).next('.another-toggle-content').slideUp(); } else { $(this).addClass('active'); $(this).next('.another-toggle-content').slideDown(); } }); /** * Arrow for Menu has sub-menu */ if ($(window).width() > 992) { $(".navbar-arrow ul ul > li").has("ul").children("a").append("<i class='arrow-indicator fa fa-angle-right'></i>"); } /** * Payment Option */ var selected2 = $("div.payment-option-form"); selected2.hide(); $("input[name$='payments']").on("click",function() { var test = $(this).val(); selected2.hide(); $("#" + test).show(); }); /** * Icon Change on Collapse */ $('.collapse.in').prev('.panel-heading').addClass('active'); $('.bootstrap-accordion, .bootstrap-toggle') .on('show.bs.collapse', function(a) { $(a.target).prev('.panel-heading').addClass('active'); }) .on('hide.bs.collapse', function(a) { $(a.target).prev('.panel-heading').removeClass('active'); }); /** * Back To Top */ var selected3 = $("#back-to-top"); $(window).scroll(function(){ if($(window).scrollTop() > 500){ selected3.fadeIn(200); } else{ selected3.fadeOut(200); } }); selected3.on("click",function() { $('html, body').animate({ scrollTop:0 }, '800'); return false; }); /** * Placeholder */ $("input, textarea").placeholder(); /** * Bootstrap rating */ var selected4 = $('.rating-label'); selected4.rating(); selected4.each(function () { $('<span class="label label-default"></span>') .text($(this).val() || ' ') .insertAfter(this); }); selected4.on('change', function () { $(this).next('.label').text($(this).val()); }); /** * Sign-in Modal */ var $formLogin = $('#login-form'); var $formLost = $('#lost-form'); var $formRegister = $('#register-form'); var $divForms = $('#modal-login-form-wrapper'); var $modalAnimateTime = 300; $('#login_register_btn').on("click", function () { modalAnimate($formLogin, $formRegister) }); $('#register_login_btn').on("click", function () { modalAnimate($formRegister, $formLogin); }); $('#login_lost_btn').on("click", function () { modalAnimate($formLogin, $formLost); }); $('#lost_login_btn').on("click", function () { modalAnimate($formLost, $formLogin); }); $('#lost_register_btn').on("click", function () { modalAnimate($formLost, $formRegister); }); function modalAnimate ($oldForm, $newForm)
/** * Read more-less paragraph */ var showTotalChar = 130, showChar = "read more +", hideChar = "read less -"; $('.read-more-less').each(function() { var content = $(this).text(); if (content.length > showTotalChar) { var con = content.substr(0, showTotalChar); var hcon = content.substr(showTotalChar, content.length - showTotalChar); var txt= con + '<span class="dots">...</span><span class="morectnt"><span>' + hcon + '</span>&nbsp;&nbsp;<a href="" class="showmoretxt">' + showChar + '</a></span>'; $(this).html(txt); } }); $(".showmoretxt").on("click",function() { if ($(this).hasClass("sample")) { $(this).removeClass("sample"); $(this).text(showChar); } else { $(this).addClass("sample"); $(this).text(hideChar); } $(this).parent().prev().toggle(); $(this).prev().toggle(); return false; }); // SLICK SLIDER $('.responsive').slick({ dots: false, infinite: true, speed: 300, slidesToShow: 4, slidesToScroll: 1, responsive: [ { breakpoint: 1024, settings: { slidesToShow: 3, slidesToScroll: 3, infinite: true, dots: false } }, { breakpoint: 600, settings: { slidesToShow: 2, slidesToScroll: 2 } }, { breakpoint: 480, settings: { slidesToShow: 1, slidesToScroll: 1 } } // You can unslick at a given breakpoint now by adding: // settings: "unslick" // instead of a settings object ] }); // SLICK SLIDER STYLE TESTIMONIAL $('.testimonial1').slick({ dots: false, infinite: true, speed: 300, slidesToShow: 2, slidesToScroll: 1, responsive: [ { breakpoint: 1024, settings: { slidesToShow: 2, infinite: true, dots: false } }, { breakpoint: 600, settings: { slidesToShow: 2 } }, { breakpoint: 480, settings: { slidesToShow: 1 } } // You can unslick at a given breakpoint now by adding: // settings: "unslick" // instead of a settings object ] }); // SLICK SLIDER STYLE TESTIMONIAL $('.testimonial2').slick({ dots: false, infinite: true, speed: 300, slidesToShow: 1, slidesToScroll: 1, responsive: [ { breakpoint: 1024, settings: { slidesToShow: 3, infinite: true, dots: false } }, { breakpoint: 600, settings: { slidesToShow: 2, } }, { breakpoint: 480, settings: { slidesToShow: 1, slidesToScroll: 1 } } // You can unslick at a given breakpoint now by adding: // settings: "unslick" // instead of a settings object ] }); $('.tour-cats').slick({ dots: false, infinite: true, speed: 300, slidesToShow: 6, slidesToScroll: 1, responsive: [ { breakpoint: 1024, settings: { slidesToShow: 3, infinite: true, dots: false } }, { breakpoint: 600, settings: { slidesToShow: 2 } }, { breakpoint: 480, settings: { slidesToShow: 1 } } // You can unslick at a given breakpoint now by adding: // settings: "unslick" // instead of a settings object ] }); $('.main-banner').slick(); // FADE SLIDER $('.sidefd').slick({ dots: false, infinite: true, speed: 200, slide: true, autoplay :true, cssEase: 'linear' }); $(document).on('click','.itenerary_tab',function(){ var tab_class = $(this).next().find('.tab-gallery').attr('data-tab'); var selector = "." + tab_class; $( selector ).slick({ dots: false, infinite: false, arrows: true, speed: 300, slidesToShow: 5, slidesToScroll: 1, responsive: [ { breakpoint: 1024, settings: { slidesToShow: 3, slidesToScroll: 3, infinite: true, } }, { breakpoint: 600, settings: { slidesToShow: 2, slidesToScroll: 2 } }, { breakpoint: 480, settings: { slidesToShow: 1, slidesToScroll: 1 } } // You can unslick at a given breakpoint now by adding: // settings: "unslick" // instead of a settings object ] }); }); $('.fading').slick({ dots: false, infinite: true, speed: 300, fade: true, arrows: true, autoplay: true, cssEase: 'linear' }); $('.video_play_pause').on('click',function(){ var video = $(this).prev(); if( video.get(0).paused ){ video.get(0).play(); $(this).find('i').removeClass('fa-play').addClass('fa-pause'); } else { video.get(0).pause(); $(this).find('i').removeClass('fa-pause').addClass('fa-play'); } }); $('#hero-video').YTPlayer({ fitToBackground: true, videoId: '4NtfDBY_DVQ' }); /*==================================== Clock Countdown ======================================*/ $('#clock-countdown').countdown('2018/9/20 12:00:00').on('update.countdown', function(event) { var $this = $(this).html(event.strftime('' + '<div class="counter-container"><div class="counter-box first"><div class="number">%-D</div><span>Day%!d</span></div>' + '<div class="counter-box"><div class="number">%H</div><span>Hours</span></div>' + '<div class="counter-box"><div class="number">%M</div><span>Minutes</span></div>' + '<div class="counter-box last"><div class="number">%S</div><span>Seconds</span></div></div>' )); }); });
{ var $oldH = $oldForm.height(); var $newH = $newForm.height(); $divForms.css("height",$oldH); $oldForm.fadeToggle($modalAnimateTime, function(){ $divForms.animate({height: $newH}, $modalAnimateTime, function(){ $newForm.fadeToggle($modalAnimateTime); }); }); }
identifier_body
customs.js
jQuery(function($) { "use strict"; /** * Bootstrap Tooltip */ $('[data-toggle="tooltip"]').tooltip(); /** * Sticky Header */ $(window).scroll(function(){ if( $(window).scrollTop() > 10 ){ $('.navbar').addClass('navbar-sticky') } else { $('.navbar').removeClass('navbar-sticky') } }) /** * Main Menu Slide Down Effect */ var selected = $('#navbar li'); // Mouse-enter dropdown selected.on("mouseenter", function() { $(this).find('ul').first().stop(true, true).delay(350).slideDown(500, 'easeInOutQuad'); }); // Mouse-leave dropdown selected.on("mouseleave", function() { $(this).find('ul').first().stop(true, true).delay(100).slideUp(150, 'easeInOutQuad'); }); /** * Slicknav - a Mobile Menu */ var $slicknav_label; $('.responsive-menu').slicknav({ duration: 500, easingOpen: 'easeInExpo', easingClose: 'easeOutExpo', closedSymbol: '<i class="fa fa-plus"></i>', openedSymbol: '<i class="fa fa-minus"></i>', prependTo: '#slicknav-mobile', allowParentLinks: true, label:"" }); var $slicknav_label; $('#responsive-menu').slicknav({ duration: 500, easingOpen: 'easeInExpo', easingClose: 'easeOutExpo', closedSymbol: '<i class="fa fa-plus"></i>', openedSymbol: '<i class="fa fa-minus"></i>', prependTo: '#slicknav-mobile', allowParentLinks: true, label:"" }); /** * Smooth scroll to anchor */ $('a.anchor[href*=#]:not([href=#])').on("click",function() { if (location.pathname.replace(/^\//,'') == this.pathname.replace(/^\//,'') && location.hostname == this.hostname) { var target = $(this.hash); target = target.length ? target : $('[name=' + this.hash.slice(1) +']'); if (target.length) { $('html,body').animate({ scrollTop: (target.offset().top - 70) // 70px offset for navbar menu }, 1000); return false; } } }); /** * Another Bootstrap Toggle */ $('.another-toggle').on("click",function() { if( $('h4',this).hasClass('active') ){ $(this).find('.another-toggle-content').show(); } }); $('.another-toggle h4').on("click",function() { if( $(this).hasClass('active') ){ $(this).removeClass('active'); $(this).next('.another-toggle-content').slideUp(); } else { $(this).addClass('active'); $(this).next('.another-toggle-content').slideDown(); } }); /** * Arrow for Menu has sub-menu */ if ($(window).width() > 992) { $(".navbar-arrow ul ul > li").has("ul").children("a").append("<i class='arrow-indicator fa fa-angle-right'></i>"); } /** * Payment Option */ var selected2 = $("div.payment-option-form"); selected2.hide(); $("input[name$='payments']").on("click",function() { var test = $(this).val(); selected2.hide(); $("#" + test).show(); }); /** * Icon Change on Collapse */ $('.collapse.in').prev('.panel-heading').addClass('active'); $('.bootstrap-accordion, .bootstrap-toggle') .on('show.bs.collapse', function(a) { $(a.target).prev('.panel-heading').addClass('active'); }) .on('hide.bs.collapse', function(a) { $(a.target).prev('.panel-heading').removeClass('active'); }); /** * Back To Top */ var selected3 = $("#back-to-top"); $(window).scroll(function(){ if($(window).scrollTop() > 500){ selected3.fadeIn(200); } else{ selected3.fadeOut(200); } }); selected3.on("click",function() { $('html, body').animate({ scrollTop:0 }, '800'); return false; }); /** * Placeholder */ $("input, textarea").placeholder(); /** * Bootstrap rating */ var selected4 = $('.rating-label'); selected4.rating(); selected4.each(function () { $('<span class="label label-default"></span>') .text($(this).val() || ' ') .insertAfter(this); }); selected4.on('change', function () { $(this).next('.label').text($(this).val()); }); /** * Sign-in Modal */ var $formLogin = $('#login-form'); var $formLost = $('#lost-form'); var $formRegister = $('#register-form'); var $divForms = $('#modal-login-form-wrapper'); var $modalAnimateTime = 300; $('#login_register_btn').on("click", function () { modalAnimate($formLogin, $formRegister) }); $('#register_login_btn').on("click", function () { modalAnimate($formRegister, $formLogin); }); $('#login_lost_btn').on("click", function () { modalAnimate($formLogin, $formLost); }); $('#lost_login_btn').on("click", function () { modalAnimate($formLost, $formLogin); }); $('#lost_register_btn').on("click", function () { modalAnimate($formLost, $formRegister); }); function modalAnimate ($oldForm, $newForm) { var $oldH = $oldForm.height(); var $newH = $newForm.height(); $divForms.css("height",$oldH); $oldForm.fadeToggle($modalAnimateTime, function(){ $divForms.animate({height: $newH}, $modalAnimateTime, function(){ $newForm.fadeToggle($modalAnimateTime); }); }); } /** * Read more-less paragraph */ var showTotalChar = 130, showChar = "read more +", hideChar = "read less -"; $('.read-more-less').each(function() { var content = $(this).text(); if (content.length > showTotalChar)
}); $(".showmoretxt").on("click",function() { if ($(this).hasClass("sample")) { $(this).removeClass("sample"); $(this).text(showChar); } else { $(this).addClass("sample"); $(this).text(hideChar); } $(this).parent().prev().toggle(); $(this).prev().toggle(); return false; }); // SLICK SLIDER $('.responsive').slick({ dots: false, infinite: true, speed: 300, slidesToShow: 4, slidesToScroll: 1, responsive: [ { breakpoint: 1024, settings: { slidesToShow: 3, slidesToScroll: 3, infinite: true, dots: false } }, { breakpoint: 600, settings: { slidesToShow: 2, slidesToScroll: 2 } }, { breakpoint: 480, settings: { slidesToShow: 1, slidesToScroll: 1 } } // You can unslick at a given breakpoint now by adding: // settings: "unslick" // instead of a settings object ] }); // SLICK SLIDER STYLE TESTIMONIAL $('.testimonial1').slick({ dots: false, infinite: true, speed: 300, slidesToShow: 2, slidesToScroll: 1, responsive: [ { breakpoint: 1024, settings: { slidesToShow: 2, infinite: true, dots: false } }, { breakpoint: 600, settings: { slidesToShow: 2 } }, { breakpoint: 480, settings: { slidesToShow: 1 } } // You can unslick at a given breakpoint now by adding: // settings: "unslick" // instead of a settings object ] }); // SLICK SLIDER STYLE TESTIMONIAL $('.testimonial2').slick({ dots: false, infinite: true, speed: 300, slidesToShow: 1, slidesToScroll: 1, responsive: [ { breakpoint: 1024, settings: { slidesToShow: 3, infinite: true, dots: false } }, { breakpoint: 600, settings: { slidesToShow: 2, } }, { breakpoint: 480, settings: { slidesToShow: 1, slidesToScroll: 1 } } // You can unslick at a given breakpoint now by adding: // settings: "unslick" // instead of a settings object ] }); $('.tour-cats').slick({ dots: false, infinite: true, speed: 300, slidesToShow: 6, slidesToScroll: 1, responsive: [ { breakpoint: 1024, settings: { slidesToShow: 3, infinite: true, dots: false } }, { breakpoint: 600, settings: { slidesToShow: 2 } }, { breakpoint: 480, settings: { slidesToShow: 1 } } // You can unslick at a given breakpoint now by adding: // settings: "unslick" // instead of a settings object ] }); $('.main-banner').slick(); // FADE SLIDER $('.sidefd').slick({ dots: false, infinite: true, speed: 200, slide: true, autoplay :true, cssEase: 'linear' }); $(document).on('click','.itenerary_tab',function(){ var tab_class = $(this).next().find('.tab-gallery').attr('data-tab'); var selector = "." + tab_class; $( selector ).slick({ dots: false, infinite: false, arrows: true, speed: 300, slidesToShow: 5, slidesToScroll: 1, responsive: [ { breakpoint: 1024, settings: { slidesToShow: 3, slidesToScroll: 3, infinite: true, } }, { breakpoint: 600, settings: { slidesToShow: 2, slidesToScroll: 2 } }, { breakpoint: 480, settings: { slidesToShow: 1, slidesToScroll: 1 } } // You can unslick at a given breakpoint now by adding: // settings: "unslick" // instead of a settings object ] }); }); $('.fading').slick({ dots: false, infinite: true, speed: 300, fade: true, arrows: true, autoplay: true, cssEase: 'linear' }); $('.video_play_pause').on('click',function(){ var video = $(this).prev(); if( video.get(0).paused ){ video.get(0).play(); $(this).find('i').removeClass('fa-play').addClass('fa-pause'); } else { video.get(0).pause(); $(this).find('i').removeClass('fa-pause').addClass('fa-play'); } }); $('#hero-video').YTPlayer({ fitToBackground: true, videoId: '4NtfDBY_DVQ' }); /*==================================== Clock Countdown ======================================*/ $('#clock-countdown').countdown('2018/9/20 12:00:00').on('update.countdown', function(event) { var $this = $(this).html(event.strftime('' + '<div class="counter-container"><div class="counter-box first"><div class="number">%-D</div><span>Day%!d</span></div>' + '<div class="counter-box"><div class="number">%H</div><span>Hours</span></div>' + '<div class="counter-box"><div class="number">%M</div><span>Minutes</span></div>' + '<div class="counter-box last"><div class="number">%S</div><span>Seconds</span></div></div>' )); }); });
{ var con = content.substr(0, showTotalChar); var hcon = content.substr(showTotalChar, content.length - showTotalChar); var txt= con + '<span class="dots">...</span><span class="morectnt"><span>' + hcon + '</span>&nbsp;&nbsp;<a href="" class="showmoretxt">' + showChar + '</a></span>'; $(this).html(txt); }
conditional_block
customs.js
jQuery(function($) { "use strict"; /** * Bootstrap Tooltip */ $('[data-toggle="tooltip"]').tooltip(); /** * Sticky Header */ $(window).scroll(function(){ if( $(window).scrollTop() > 10 ){ $('.navbar').addClass('navbar-sticky') } else { $('.navbar').removeClass('navbar-sticky') } }) /** * Main Menu Slide Down Effect */ var selected = $('#navbar li'); // Mouse-enter dropdown selected.on("mouseenter", function() { $(this).find('ul').first().stop(true, true).delay(350).slideDown(500, 'easeInOutQuad'); }); // Mouse-leave dropdown selected.on("mouseleave", function() { $(this).find('ul').first().stop(true, true).delay(100).slideUp(150, 'easeInOutQuad'); }); /** * Slicknav - a Mobile Menu */ var $slicknav_label; $('.responsive-menu').slicknav({ duration: 500, easingOpen: 'easeInExpo', easingClose: 'easeOutExpo', closedSymbol: '<i class="fa fa-plus"></i>', openedSymbol: '<i class="fa fa-minus"></i>', prependTo: '#slicknav-mobile', allowParentLinks: true, label:"" }); var $slicknav_label; $('#responsive-menu').slicknav({ duration: 500, easingOpen: 'easeInExpo', easingClose: 'easeOutExpo', closedSymbol: '<i class="fa fa-plus"></i>', openedSymbol: '<i class="fa fa-minus"></i>', prependTo: '#slicknav-mobile', allowParentLinks: true, label:"" }); /** * Smooth scroll to anchor */ $('a.anchor[href*=#]:not([href=#])').on("click",function() { if (location.pathname.replace(/^\//,'') == this.pathname.replace(/^\//,'') && location.hostname == this.hostname) { var target = $(this.hash); target = target.length ? target : $('[name=' + this.hash.slice(1) +']'); if (target.length) { $('html,body').animate({ scrollTop: (target.offset().top - 70) // 70px offset for navbar menu }, 1000); return false; } } }); /** * Another Bootstrap Toggle */ $('.another-toggle').on("click",function() { if( $('h4',this).hasClass('active') ){ $(this).find('.another-toggle-content').show(); } }); $('.another-toggle h4').on("click",function() { if( $(this).hasClass('active') ){ $(this).removeClass('active'); $(this).next('.another-toggle-content').slideUp(); } else { $(this).addClass('active'); $(this).next('.another-toggle-content').slideDown(); } }); /** * Arrow for Menu has sub-menu */ if ($(window).width() > 992) { $(".navbar-arrow ul ul > li").has("ul").children("a").append("<i class='arrow-indicator fa fa-angle-right'></i>"); } /** * Payment Option */ var selected2 = $("div.payment-option-form"); selected2.hide(); $("input[name$='payments']").on("click",function() { var test = $(this).val(); selected2.hide(); $("#" + test).show(); }); /** * Icon Change on Collapse */ $('.collapse.in').prev('.panel-heading').addClass('active'); $('.bootstrap-accordion, .bootstrap-toggle') .on('show.bs.collapse', function(a) { $(a.target).prev('.panel-heading').addClass('active'); }) .on('hide.bs.collapse', function(a) { $(a.target).prev('.panel-heading').removeClass('active'); }); /** * Back To Top */ var selected3 = $("#back-to-top"); $(window).scroll(function(){ if($(window).scrollTop() > 500){ selected3.fadeIn(200); } else{ selected3.fadeOut(200); } }); selected3.on("click",function() { $('html, body').animate({ scrollTop:0 }, '800'); return false; }); /** * Placeholder */ $("input, textarea").placeholder(); /** * Bootstrap rating */ var selected4 = $('.rating-label'); selected4.rating(); selected4.each(function () { $('<span class="label label-default"></span>') .text($(this).val() || ' ') .insertAfter(this); }); selected4.on('change', function () { $(this).next('.label').text($(this).val()); }); /** * Sign-in Modal */ var $formLogin = $('#login-form'); var $formLost = $('#lost-form'); var $formRegister = $('#register-form'); var $divForms = $('#modal-login-form-wrapper'); var $modalAnimateTime = 300; $('#login_register_btn').on("click", function () { modalAnimate($formLogin, $formRegister) }); $('#register_login_btn').on("click", function () { modalAnimate($formRegister, $formLogin); }); $('#login_lost_btn').on("click", function () { modalAnimate($formLogin, $formLost); }); $('#lost_login_btn').on("click", function () { modalAnimate($formLost, $formLogin); }); $('#lost_register_btn').on("click", function () { modalAnimate($formLost, $formRegister); }); function
($oldForm, $newForm) { var $oldH = $oldForm.height(); var $newH = $newForm.height(); $divForms.css("height",$oldH); $oldForm.fadeToggle($modalAnimateTime, function(){ $divForms.animate({height: $newH}, $modalAnimateTime, function(){ $newForm.fadeToggle($modalAnimateTime); }); }); } /** * Read more-less paragraph */ var showTotalChar = 130, showChar = "read more +", hideChar = "read less -"; $('.read-more-less').each(function() { var content = $(this).text(); if (content.length > showTotalChar) { var con = content.substr(0, showTotalChar); var hcon = content.substr(showTotalChar, content.length - showTotalChar); var txt= con + '<span class="dots">...</span><span class="morectnt"><span>' + hcon + '</span>&nbsp;&nbsp;<a href="" class="showmoretxt">' + showChar + '</a></span>'; $(this).html(txt); } }); $(".showmoretxt").on("click",function() { if ($(this).hasClass("sample")) { $(this).removeClass("sample"); $(this).text(showChar); } else { $(this).addClass("sample"); $(this).text(hideChar); } $(this).parent().prev().toggle(); $(this).prev().toggle(); return false; }); // SLICK SLIDER $('.responsive').slick({ dots: false, infinite: true, speed: 300, slidesToShow: 4, slidesToScroll: 1, responsive: [ { breakpoint: 1024, settings: { slidesToShow: 3, slidesToScroll: 3, infinite: true, dots: false } }, { breakpoint: 600, settings: { slidesToShow: 2, slidesToScroll: 2 } }, { breakpoint: 480, settings: { slidesToShow: 1, slidesToScroll: 1 } } // You can unslick at a given breakpoint now by adding: // settings: "unslick" // instead of a settings object ] }); // SLICK SLIDER STYLE TESTIMONIAL $('.testimonial1').slick({ dots: false, infinite: true, speed: 300, slidesToShow: 2, slidesToScroll: 1, responsive: [ { breakpoint: 1024, settings: { slidesToShow: 2, infinite: true, dots: false } }, { breakpoint: 600, settings: { slidesToShow: 2 } }, { breakpoint: 480, settings: { slidesToShow: 1 } } // You can unslick at a given breakpoint now by adding: // settings: "unslick" // instead of a settings object ] }); // SLICK SLIDER STYLE TESTIMONIAL $('.testimonial2').slick({ dots: false, infinite: true, speed: 300, slidesToShow: 1, slidesToScroll: 1, responsive: [ { breakpoint: 1024, settings: { slidesToShow: 3, infinite: true, dots: false } }, { breakpoint: 600, settings: { slidesToShow: 2, } }, { breakpoint: 480, settings: { slidesToShow: 1, slidesToScroll: 1 } } // You can unslick at a given breakpoint now by adding: // settings: "unslick" // instead of a settings object ] }); $('.tour-cats').slick({ dots: false, infinite: true, speed: 300, slidesToShow: 6, slidesToScroll: 1, responsive: [ { breakpoint: 1024, settings: { slidesToShow: 3, infinite: true, dots: false } }, { breakpoint: 600, settings: { slidesToShow: 2 } }, { breakpoint: 480, settings: { slidesToShow: 1 } } // You can unslick at a given breakpoint now by adding: // settings: "unslick" // instead of a settings object ] }); $('.main-banner').slick(); // FADE SLIDER $('.sidefd').slick({ dots: false, infinite: true, speed: 200, slide: true, autoplay :true, cssEase: 'linear' }); $(document).on('click','.itenerary_tab',function(){ var tab_class = $(this).next().find('.tab-gallery').attr('data-tab'); var selector = "." + tab_class; $( selector ).slick({ dots: false, infinite: false, arrows: true, speed: 300, slidesToShow: 5, slidesToScroll: 1, responsive: [ { breakpoint: 1024, settings: { slidesToShow: 3, slidesToScroll: 3, infinite: true, } }, { breakpoint: 600, settings: { slidesToShow: 2, slidesToScroll: 2 } }, { breakpoint: 480, settings: { slidesToShow: 1, slidesToScroll: 1 } } // You can unslick at a given breakpoint now by adding: // settings: "unslick" // instead of a settings object ] }); }); $('.fading').slick({ dots: false, infinite: true, speed: 300, fade: true, arrows: true, autoplay: true, cssEase: 'linear' }); $('.video_play_pause').on('click',function(){ var video = $(this).prev(); if( video.get(0).paused ){ video.get(0).play(); $(this).find('i').removeClass('fa-play').addClass('fa-pause'); } else { video.get(0).pause(); $(this).find('i').removeClass('fa-pause').addClass('fa-play'); } }); $('#hero-video').YTPlayer({ fitToBackground: true, videoId: '4NtfDBY_DVQ' }); /*==================================== Clock Countdown ======================================*/ $('#clock-countdown').countdown('2018/9/20 12:00:00').on('update.countdown', function(event) { var $this = $(this).html(event.strftime('' + '<div class="counter-container"><div class="counter-box first"><div class="number">%-D</div><span>Day%!d</span></div>' + '<div class="counter-box"><div class="number">%H</div><span>Hours</span></div>' + '<div class="counter-box"><div class="number">%M</div><span>Minutes</span></div>' + '<div class="counter-box last"><div class="number">%S</div><span>Seconds</span></div></div>' )); }); });
modalAnimate
identifier_name
recombine.go
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package recombine // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/transformer/recombine" import ( "bytes" "context" "fmt" "sync" "time" "github.com/antonmedv/expr" "github.com/antonmedv/expr/vm" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/entry" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper" ) const ( operatorType = "recombine" defaultCombineWith = "\n" ) func init() { operator.Register(operatorType, func() operator.Builder { return NewConfig() }) } // NewConfig creates a new recombine config with default values func NewConfig() *Config { return NewConfigWithID(operatorType) } // NewConfigWithID creates a new recombine config with default values func NewConfigWithID(operatorID string) *Config { return &Config{ TransformerConfig: helper.NewTransformerConfig(operatorID, operatorType), MaxBatchSize: 1000, MaxSources: 1000, CombineWith: defaultCombineWith, OverwriteWith: "oldest", ForceFlushTimeout: 5 * time.Second, SourceIdentifier: entry.NewAttributeField("file.path"), } } // Config is the configuration of a recombine operator type Config struct { helper.TransformerConfig `mapstructure:",squash"` IsFirstEntry string `mapstructure:"is_first_entry"` IsLastEntry string `mapstructure:"is_last_entry"` MaxBatchSize int `mapstructure:"max_batch_size"` CombineField entry.Field `mapstructure:"combine_field"` CombineWith string `mapstructure:"combine_with"` SourceIdentifier entry.Field `mapstructure:"source_identifier"` OverwriteWith string `mapstructure:"overwrite_with"` ForceFlushTimeout time.Duration `mapstructure:"force_flush_period"` MaxSources int `mapstructure:"max_sources"` MaxLogSize helper.ByteSize `mapstructure:"max_log_size,omitempty"` } // Build creates a new Transformer from a config func (c *Config) Build(logger *zap.SugaredLogger) (operator.Operator, error) { transformer, err := c.TransformerConfig.Build(logger) if err != nil { return nil, fmt.Errorf("failed to build transformer config: %w", err) } if c.IsLastEntry != "" && c.IsFirstEntry != "" { return nil, fmt.Errorf("only one of is_first_entry and is_last_entry can be set") } if c.IsLastEntry == "" && c.IsFirstEntry == "" { return nil, fmt.Errorf("one of is_first_entry and is_last_entry must be set") } var matchesFirst bool var prog *vm.Program if c.IsFirstEntry != "" { matchesFirst = true prog, err = helper.ExprCompileBool(c.IsFirstEntry) if err != nil { return nil, fmt.Errorf("failed to compile is_first_entry: %w", err) } } else { matchesFirst = false prog, err = helper.ExprCompileBool(c.IsLastEntry) if err != nil { return nil, fmt.Errorf("failed to compile is_last_entry: %w", err) } } if c.CombineField.FieldInterface == nil { return nil, fmt.Errorf("missing required argument 'combine_field'") } var overwriteWithOldest bool switch c.OverwriteWith { case "newest": overwriteWithOldest = false case "oldest", "": overwriteWithOldest = true default: return nil, fmt.Errorf("invalid value '%s' for parameter 'overwrite_with'", c.OverwriteWith) } return &Transformer{ TransformerOperator: transformer, matchFirstLine: matchesFirst, prog: prog, maxBatchSize: c.MaxBatchSize, maxSources: c.MaxSources, overwriteWithOldest: overwriteWithOldest, batchMap: make(map[string]*sourceBatch), batchPool: sync.Pool{ New: func() interface{} { return &sourceBatch{ entries: []*entry.Entry{}, recombined: &bytes.Buffer{}, } }, }, combineField: c.CombineField, combineWith: c.CombineWith, forceFlushTimeout: c.ForceFlushTimeout, ticker: time.NewTicker(c.ForceFlushTimeout), chClose: make(chan struct{}), sourceIdentifier: c.SourceIdentifier, maxLogSize: int64(c.MaxLogSize), }, nil } // Transformer is an operator that combines a field from consecutive log entries // into a single type Transformer struct { helper.TransformerOperator matchFirstLine bool prog *vm.Program maxBatchSize int maxSources int overwriteWithOldest bool combineField entry.Field combineWith string ticker *time.Ticker forceFlushTimeout time.Duration chClose chan struct{} sourceIdentifier entry.Field sync.Mutex batchPool sync.Pool batchMap map[string]*sourceBatch maxLogSize int64 } // sourceBatch contains the status info of a batch type sourceBatch struct { entries []*entry.Entry recombined *bytes.Buffer firstEntryObservedTime time.Time } func (r *Transformer) Start(_ operator.Persister) error { go r.flushLoop() return nil } func (r *Transformer)
() { for { select { case <-r.ticker.C: r.Lock() timeNow := time.Now() for source, batch := range r.batchMap { timeSinceFirstEntry := timeNow.Sub(batch.firstEntryObservedTime) if timeSinceFirstEntry < r.forceFlushTimeout { continue } if err := r.flushSource(source, true); err != nil { r.Errorf("there was error flushing combined logs %s", err) } } // check every 1/5 forceFlushTimeout r.ticker.Reset(r.forceFlushTimeout / 5) r.Unlock() case <-r.chClose: r.ticker.Stop() return } } } func (r *Transformer) Stop() error { r.Lock() defer r.Unlock() ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() r.flushUncombined(ctx) close(r.chClose) return nil } const DefaultSourceIdentifier = "DefaultSourceIdentifier" func (r *Transformer) Process(ctx context.Context, e *entry.Entry) error { // Lock the recombine operator because process can't run concurrently r.Lock() defer r.Unlock() // Get the environment for executing the expression. // In the future, we may want to provide access to the currently // batched entries so users can do comparisons to other entries // rather than just use absolute rules. env := helper.GetExprEnv(e) defer helper.PutExprEnv(env) m, err := expr.Run(r.prog, env) if err != nil { return r.HandleEntryError(ctx, e, err) } // this is guaranteed to be a boolean because of expr.AsBool matches := m.(bool) var s string err = e.Read(r.sourceIdentifier, &s) if err != nil { r.Warn("entry does not contain the source_identifier, so it may be pooled with other sources") s = DefaultSourceIdentifier } if s == "" { s = DefaultSourceIdentifier } switch { // This is the first entry in the next batch case matches && r.matchIndicatesFirst(): // Flush the existing batch err := r.flushSource(s, true) if err != nil { return err } // Add the current log to the new batch r.addToBatch(ctx, e, s) return nil // This is the last entry in a complete batch case matches && r.matchIndicatesLast(): fallthrough // When matching on first entry, never batch partial first. Just emit immediately case !matches && r.matchIndicatesFirst() && r.batchMap[s] == nil: r.addToBatch(ctx, e, s) return r.flushSource(s, true) } // This is neither the first entry of a new log, // nor the last entry of a log, so just add it to the batch r.addToBatch(ctx, e, s) return nil } func (r *Transformer) matchIndicatesFirst() bool { return r.matchFirstLine } func (r *Transformer) matchIndicatesLast() bool { return !r.matchFirstLine } // addToBatch adds the current entry to the current batch of entries that will be combined func (r *Transformer) addToBatch(_ context.Context, e *entry.Entry, source string) { batch, ok := r.batchMap[source] if !ok { batch = r.addNewBatch(source, e) if len(r.batchMap) >= r.maxSources { r.Error("Batched source exceeds max source size. Flushing all batched logs. Consider increasing max_sources parameter") r.flushUncombined(context.Background()) return } } else { // If the length of the batch is 0, this batch was flushed previously due to triggering size limit. // In this case, the firstEntryObservedTime should be updated to reset the timeout if len(batch.entries) == 0 { batch.firstEntryObservedTime = e.ObservedTimestamp } batch.entries = append(batch.entries, e) } // Combine the combineField of each entry in the batch, // separated by newlines var s string err := e.Read(r.combineField, &s) if err != nil { r.Errorf("entry does not contain the combine_field") return } if batch.recombined.Len() > 0 { batch.recombined.WriteString(r.combineWith) } batch.recombined.WriteString(s) if (r.maxLogSize > 0 && int64(batch.recombined.Len()) > r.maxLogSize) || len(batch.entries) >= r.maxBatchSize { if err := r.flushSource(source, false); err != nil { r.Errorf("there was error flushing combined logs %s", err) } } } // flushUncombined flushes all the logs in the batch individually to the // next output in the pipeline. This is only used when there is an error // or at shutdown to avoid dropping the logs. func (r *Transformer) flushUncombined(ctx context.Context) { for source := range r.batchMap { for _, entry := range r.batchMap[source].entries { r.Write(ctx, entry) } r.removeBatch(source) } r.ticker.Reset(r.forceFlushTimeout) } // flushSource combines the entries currently in the batch into a single entry, // then forwards them to the next operator in the pipeline func (r *Transformer) flushSource(source string, deleteSource bool) error { batch := r.batchMap[source] // Skip flushing a combined log if the batch is empty if batch == nil { return nil } if len(batch.entries) == 0 { r.removeBatch(source) return nil } // Choose which entry we want to keep the rest of the fields from var base *entry.Entry entries := batch.entries if r.overwriteWithOldest { base = entries[0] } else { base = entries[len(entries)-1] } // Set the recombined field on the entry err := base.Set(r.combineField, batch.recombined.String()) if err != nil { return err } r.Write(context.Background(), base) if deleteSource { r.removeBatch(source) } else { batch.entries = batch.entries[:0] batch.recombined.Reset() } return nil } // addNewBatch creates a new batch for the given source and adds the entry to it. func (r *Transformer) addNewBatch(source string, e *entry.Entry) *sourceBatch { batch := r.batchPool.Get().(*sourceBatch) batch.entries = append(batch.entries[:0], e) batch.recombined.Reset() batch.firstEntryObservedTime = e.ObservedTimestamp r.batchMap[source] = batch return batch } // removeBatch removes the batch for the given source. func (r *Transformer) removeBatch(source string) { batch := r.batchMap[source] delete(r.batchMap, source) r.batchPool.Put(batch) }
flushLoop
identifier_name
recombine.go
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package recombine // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/transformer/recombine" import ( "bytes" "context" "fmt" "sync" "time" "github.com/antonmedv/expr" "github.com/antonmedv/expr/vm" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/entry" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper" ) const ( operatorType = "recombine" defaultCombineWith = "\n" ) func init() { operator.Register(operatorType, func() operator.Builder { return NewConfig() }) } // NewConfig creates a new recombine config with default values func NewConfig() *Config {
} // NewConfigWithID creates a new recombine config with default values func NewConfigWithID(operatorID string) *Config { return &Config{ TransformerConfig: helper.NewTransformerConfig(operatorID, operatorType), MaxBatchSize: 1000, MaxSources: 1000, CombineWith: defaultCombineWith, OverwriteWith: "oldest", ForceFlushTimeout: 5 * time.Second, SourceIdentifier: entry.NewAttributeField("file.path"), } } // Config is the configuration of a recombine operator type Config struct { helper.TransformerConfig `mapstructure:",squash"` IsFirstEntry string `mapstructure:"is_first_entry"` IsLastEntry string `mapstructure:"is_last_entry"` MaxBatchSize int `mapstructure:"max_batch_size"` CombineField entry.Field `mapstructure:"combine_field"` CombineWith string `mapstructure:"combine_with"` SourceIdentifier entry.Field `mapstructure:"source_identifier"` OverwriteWith string `mapstructure:"overwrite_with"` ForceFlushTimeout time.Duration `mapstructure:"force_flush_period"` MaxSources int `mapstructure:"max_sources"` MaxLogSize helper.ByteSize `mapstructure:"max_log_size,omitempty"` } // Build creates a new Transformer from a config func (c *Config) Build(logger *zap.SugaredLogger) (operator.Operator, error) { transformer, err := c.TransformerConfig.Build(logger) if err != nil { return nil, fmt.Errorf("failed to build transformer config: %w", err) } if c.IsLastEntry != "" && c.IsFirstEntry != "" { return nil, fmt.Errorf("only one of is_first_entry and is_last_entry can be set") } if c.IsLastEntry == "" && c.IsFirstEntry == "" { return nil, fmt.Errorf("one of is_first_entry and is_last_entry must be set") } var matchesFirst bool var prog *vm.Program if c.IsFirstEntry != "" { matchesFirst = true prog, err = helper.ExprCompileBool(c.IsFirstEntry) if err != nil { return nil, fmt.Errorf("failed to compile is_first_entry: %w", err) } } else { matchesFirst = false prog, err = helper.ExprCompileBool(c.IsLastEntry) if err != nil { return nil, fmt.Errorf("failed to compile is_last_entry: %w", err) } } if c.CombineField.FieldInterface == nil { return nil, fmt.Errorf("missing required argument 'combine_field'") } var overwriteWithOldest bool switch c.OverwriteWith { case "newest": overwriteWithOldest = false case "oldest", "": overwriteWithOldest = true default: return nil, fmt.Errorf("invalid value '%s' for parameter 'overwrite_with'", c.OverwriteWith) } return &Transformer{ TransformerOperator: transformer, matchFirstLine: matchesFirst, prog: prog, maxBatchSize: c.MaxBatchSize, maxSources: c.MaxSources, overwriteWithOldest: overwriteWithOldest, batchMap: make(map[string]*sourceBatch), batchPool: sync.Pool{ New: func() interface{} { return &sourceBatch{ entries: []*entry.Entry{}, recombined: &bytes.Buffer{}, } }, }, combineField: c.CombineField, combineWith: c.CombineWith, forceFlushTimeout: c.ForceFlushTimeout, ticker: time.NewTicker(c.ForceFlushTimeout), chClose: make(chan struct{}), sourceIdentifier: c.SourceIdentifier, maxLogSize: int64(c.MaxLogSize), }, nil } // Transformer is an operator that combines a field from consecutive log entries // into a single type Transformer struct { helper.TransformerOperator matchFirstLine bool prog *vm.Program maxBatchSize int maxSources int overwriteWithOldest bool combineField entry.Field combineWith string ticker *time.Ticker forceFlushTimeout time.Duration chClose chan struct{} sourceIdentifier entry.Field sync.Mutex batchPool sync.Pool batchMap map[string]*sourceBatch maxLogSize int64 } // sourceBatch contains the status info of a batch type sourceBatch struct { entries []*entry.Entry recombined *bytes.Buffer firstEntryObservedTime time.Time } func (r *Transformer) Start(_ operator.Persister) error { go r.flushLoop() return nil } func (r *Transformer) flushLoop() { for { select { case <-r.ticker.C: r.Lock() timeNow := time.Now() for source, batch := range r.batchMap { timeSinceFirstEntry := timeNow.Sub(batch.firstEntryObservedTime) if timeSinceFirstEntry < r.forceFlushTimeout { continue } if err := r.flushSource(source, true); err != nil { r.Errorf("there was error flushing combined logs %s", err) } } // check every 1/5 forceFlushTimeout r.ticker.Reset(r.forceFlushTimeout / 5) r.Unlock() case <-r.chClose: r.ticker.Stop() return } } } func (r *Transformer) Stop() error { r.Lock() defer r.Unlock() ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() r.flushUncombined(ctx) close(r.chClose) return nil } const DefaultSourceIdentifier = "DefaultSourceIdentifier" func (r *Transformer) Process(ctx context.Context, e *entry.Entry) error { // Lock the recombine operator because process can't run concurrently r.Lock() defer r.Unlock() // Get the environment for executing the expression. // In the future, we may want to provide access to the currently // batched entries so users can do comparisons to other entries // rather than just use absolute rules. env := helper.GetExprEnv(e) defer helper.PutExprEnv(env) m, err := expr.Run(r.prog, env) if err != nil { return r.HandleEntryError(ctx, e, err) } // this is guaranteed to be a boolean because of expr.AsBool matches := m.(bool) var s string err = e.Read(r.sourceIdentifier, &s) if err != nil { r.Warn("entry does not contain the source_identifier, so it may be pooled with other sources") s = DefaultSourceIdentifier } if s == "" { s = DefaultSourceIdentifier } switch { // This is the first entry in the next batch case matches && r.matchIndicatesFirst(): // Flush the existing batch err := r.flushSource(s, true) if err != nil { return err } // Add the current log to the new batch r.addToBatch(ctx, e, s) return nil // This is the last entry in a complete batch case matches && r.matchIndicatesLast(): fallthrough // When matching on first entry, never batch partial first. Just emit immediately case !matches && r.matchIndicatesFirst() && r.batchMap[s] == nil: r.addToBatch(ctx, e, s) return r.flushSource(s, true) } // This is neither the first entry of a new log, // nor the last entry of a log, so just add it to the batch r.addToBatch(ctx, e, s) return nil } func (r *Transformer) matchIndicatesFirst() bool { return r.matchFirstLine } func (r *Transformer) matchIndicatesLast() bool { return !r.matchFirstLine } // addToBatch adds the current entry to the current batch of entries that will be combined func (r *Transformer) addToBatch(_ context.Context, e *entry.Entry, source string) { batch, ok := r.batchMap[source] if !ok { batch = r.addNewBatch(source, e) if len(r.batchMap) >= r.maxSources { r.Error("Batched source exceeds max source size. Flushing all batched logs. Consider increasing max_sources parameter") r.flushUncombined(context.Background()) return } } else { // If the length of the batch is 0, this batch was flushed previously due to triggering size limit. // In this case, the firstEntryObservedTime should be updated to reset the timeout if len(batch.entries) == 0 { batch.firstEntryObservedTime = e.ObservedTimestamp } batch.entries = append(batch.entries, e) } // Combine the combineField of each entry in the batch, // separated by newlines var s string err := e.Read(r.combineField, &s) if err != nil { r.Errorf("entry does not contain the combine_field") return } if batch.recombined.Len() > 0 { batch.recombined.WriteString(r.combineWith) } batch.recombined.WriteString(s) if (r.maxLogSize > 0 && int64(batch.recombined.Len()) > r.maxLogSize) || len(batch.entries) >= r.maxBatchSize { if err := r.flushSource(source, false); err != nil { r.Errorf("there was error flushing combined logs %s", err) } } } // flushUncombined flushes all the logs in the batch individually to the // next output in the pipeline. This is only used when there is an error // or at shutdown to avoid dropping the logs. func (r *Transformer) flushUncombined(ctx context.Context) { for source := range r.batchMap { for _, entry := range r.batchMap[source].entries { r.Write(ctx, entry) } r.removeBatch(source) } r.ticker.Reset(r.forceFlushTimeout) } // flushSource combines the entries currently in the batch into a single entry, // then forwards them to the next operator in the pipeline func (r *Transformer) flushSource(source string, deleteSource bool) error { batch := r.batchMap[source] // Skip flushing a combined log if the batch is empty if batch == nil { return nil } if len(batch.entries) == 0 { r.removeBatch(source) return nil } // Choose which entry we want to keep the rest of the fields from var base *entry.Entry entries := batch.entries if r.overwriteWithOldest { base = entries[0] } else { base = entries[len(entries)-1] } // Set the recombined field on the entry err := base.Set(r.combineField, batch.recombined.String()) if err != nil { return err } r.Write(context.Background(), base) if deleteSource { r.removeBatch(source) } else { batch.entries = batch.entries[:0] batch.recombined.Reset() } return nil } // addNewBatch creates a new batch for the given source and adds the entry to it. func (r *Transformer) addNewBatch(source string, e *entry.Entry) *sourceBatch { batch := r.batchPool.Get().(*sourceBatch) batch.entries = append(batch.entries[:0], e) batch.recombined.Reset() batch.firstEntryObservedTime = e.ObservedTimestamp r.batchMap[source] = batch return batch } // removeBatch removes the batch for the given source. func (r *Transformer) removeBatch(source string) { batch := r.batchMap[source] delete(r.batchMap, source) r.batchPool.Put(batch) }
return NewConfigWithID(operatorType)
random_line_split
recombine.go
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package recombine // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/transformer/recombine" import ( "bytes" "context" "fmt" "sync" "time" "github.com/antonmedv/expr" "github.com/antonmedv/expr/vm" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/entry" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper" ) const ( operatorType = "recombine" defaultCombineWith = "\n" ) func init() { operator.Register(operatorType, func() operator.Builder { return NewConfig() }) } // NewConfig creates a new recombine config with default values func NewConfig() *Config { return NewConfigWithID(operatorType) } // NewConfigWithID creates a new recombine config with default values func NewConfigWithID(operatorID string) *Config { return &Config{ TransformerConfig: helper.NewTransformerConfig(operatorID, operatorType), MaxBatchSize: 1000, MaxSources: 1000, CombineWith: defaultCombineWith, OverwriteWith: "oldest", ForceFlushTimeout: 5 * time.Second, SourceIdentifier: entry.NewAttributeField("file.path"), } } // Config is the configuration of a recombine operator type Config struct { helper.TransformerConfig `mapstructure:",squash"` IsFirstEntry string `mapstructure:"is_first_entry"` IsLastEntry string `mapstructure:"is_last_entry"` MaxBatchSize int `mapstructure:"max_batch_size"` CombineField entry.Field `mapstructure:"combine_field"` CombineWith string `mapstructure:"combine_with"` SourceIdentifier entry.Field `mapstructure:"source_identifier"` OverwriteWith string `mapstructure:"overwrite_with"` ForceFlushTimeout time.Duration `mapstructure:"force_flush_period"` MaxSources int `mapstructure:"max_sources"` MaxLogSize helper.ByteSize `mapstructure:"max_log_size,omitempty"` } // Build creates a new Transformer from a config func (c *Config) Build(logger *zap.SugaredLogger) (operator.Operator, error) { transformer, err := c.TransformerConfig.Build(logger) if err != nil { return nil, fmt.Errorf("failed to build transformer config: %w", err) } if c.IsLastEntry != "" && c.IsFirstEntry != "" { return nil, fmt.Errorf("only one of is_first_entry and is_last_entry can be set") } if c.IsLastEntry == "" && c.IsFirstEntry == "" { return nil, fmt.Errorf("one of is_first_entry and is_last_entry must be set") } var matchesFirst bool var prog *vm.Program if c.IsFirstEntry != "" { matchesFirst = true prog, err = helper.ExprCompileBool(c.IsFirstEntry) if err != nil { return nil, fmt.Errorf("failed to compile is_first_entry: %w", err) } } else { matchesFirst = false prog, err = helper.ExprCompileBool(c.IsLastEntry) if err != nil { return nil, fmt.Errorf("failed to compile is_last_entry: %w", err) } } if c.CombineField.FieldInterface == nil { return nil, fmt.Errorf("missing required argument 'combine_field'") } var overwriteWithOldest bool switch c.OverwriteWith { case "newest": overwriteWithOldest = false case "oldest", "": overwriteWithOldest = true default: return nil, fmt.Errorf("invalid value '%s' for parameter 'overwrite_with'", c.OverwriteWith) } return &Transformer{ TransformerOperator: transformer, matchFirstLine: matchesFirst, prog: prog, maxBatchSize: c.MaxBatchSize, maxSources: c.MaxSources, overwriteWithOldest: overwriteWithOldest, batchMap: make(map[string]*sourceBatch), batchPool: sync.Pool{ New: func() interface{} { return &sourceBatch{ entries: []*entry.Entry{}, recombined: &bytes.Buffer{}, } }, }, combineField: c.CombineField, combineWith: c.CombineWith, forceFlushTimeout: c.ForceFlushTimeout, ticker: time.NewTicker(c.ForceFlushTimeout), chClose: make(chan struct{}), sourceIdentifier: c.SourceIdentifier, maxLogSize: int64(c.MaxLogSize), }, nil } // Transformer is an operator that combines a field from consecutive log entries // into a single type Transformer struct { helper.TransformerOperator matchFirstLine bool prog *vm.Program maxBatchSize int maxSources int overwriteWithOldest bool combineField entry.Field combineWith string ticker *time.Ticker forceFlushTimeout time.Duration chClose chan struct{} sourceIdentifier entry.Field sync.Mutex batchPool sync.Pool batchMap map[string]*sourceBatch maxLogSize int64 } // sourceBatch contains the status info of a batch type sourceBatch struct { entries []*entry.Entry recombined *bytes.Buffer firstEntryObservedTime time.Time } func (r *Transformer) Start(_ operator.Persister) error { go r.flushLoop() return nil } func (r *Transformer) flushLoop() { for { select { case <-r.ticker.C: r.Lock() timeNow := time.Now() for source, batch := range r.batchMap { timeSinceFirstEntry := timeNow.Sub(batch.firstEntryObservedTime) if timeSinceFirstEntry < r.forceFlushTimeout { continue } if err := r.flushSource(source, true); err != nil { r.Errorf("there was error flushing combined logs %s", err) } } // check every 1/5 forceFlushTimeout r.ticker.Reset(r.forceFlushTimeout / 5) r.Unlock() case <-r.chClose: r.ticker.Stop() return } } } func (r *Transformer) Stop() error { r.Lock() defer r.Unlock() ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() r.flushUncombined(ctx) close(r.chClose) return nil } const DefaultSourceIdentifier = "DefaultSourceIdentifier" func (r *Transformer) Process(ctx context.Context, e *entry.Entry) error
func (r *Transformer) matchIndicatesFirst() bool { return r.matchFirstLine } func (r *Transformer) matchIndicatesLast() bool { return !r.matchFirstLine } // addToBatch adds the current entry to the current batch of entries that will be combined func (r *Transformer) addToBatch(_ context.Context, e *entry.Entry, source string) { batch, ok := r.batchMap[source] if !ok { batch = r.addNewBatch(source, e) if len(r.batchMap) >= r.maxSources { r.Error("Batched source exceeds max source size. Flushing all batched logs. Consider increasing max_sources parameter") r.flushUncombined(context.Background()) return } } else { // If the length of the batch is 0, this batch was flushed previously due to triggering size limit. // In this case, the firstEntryObservedTime should be updated to reset the timeout if len(batch.entries) == 0 { batch.firstEntryObservedTime = e.ObservedTimestamp } batch.entries = append(batch.entries, e) } // Combine the combineField of each entry in the batch, // separated by newlines var s string err := e.Read(r.combineField, &s) if err != nil { r.Errorf("entry does not contain the combine_field") return } if batch.recombined.Len() > 0 { batch.recombined.WriteString(r.combineWith) } batch.recombined.WriteString(s) if (r.maxLogSize > 0 && int64(batch.recombined.Len()) > r.maxLogSize) || len(batch.entries) >= r.maxBatchSize { if err := r.flushSource(source, false); err != nil { r.Errorf("there was error flushing combined logs %s", err) } } } // flushUncombined flushes all the logs in the batch individually to the // next output in the pipeline. This is only used when there is an error // or at shutdown to avoid dropping the logs. func (r *Transformer) flushUncombined(ctx context.Context) { for source := range r.batchMap { for _, entry := range r.batchMap[source].entries { r.Write(ctx, entry) } r.removeBatch(source) } r.ticker.Reset(r.forceFlushTimeout) } // flushSource combines the entries currently in the batch into a single entry, // then forwards them to the next operator in the pipeline func (r *Transformer) flushSource(source string, deleteSource bool) error { batch := r.batchMap[source] // Skip flushing a combined log if the batch is empty if batch == nil { return nil } if len(batch.entries) == 0 { r.removeBatch(source) return nil } // Choose which entry we want to keep the rest of the fields from var base *entry.Entry entries := batch.entries if r.overwriteWithOldest { base = entries[0] } else { base = entries[len(entries)-1] } // Set the recombined field on the entry err := base.Set(r.combineField, batch.recombined.String()) if err != nil { return err } r.Write(context.Background(), base) if deleteSource { r.removeBatch(source) } else { batch.entries = batch.entries[:0] batch.recombined.Reset() } return nil } // addNewBatch creates a new batch for the given source and adds the entry to it. func (r *Transformer) addNewBatch(source string, e *entry.Entry) *sourceBatch { batch := r.batchPool.Get().(*sourceBatch) batch.entries = append(batch.entries[:0], e) batch.recombined.Reset() batch.firstEntryObservedTime = e.ObservedTimestamp r.batchMap[source] = batch return batch } // removeBatch removes the batch for the given source. func (r *Transformer) removeBatch(source string) { batch := r.batchMap[source] delete(r.batchMap, source) r.batchPool.Put(batch) }
{ // Lock the recombine operator because process can't run concurrently r.Lock() defer r.Unlock() // Get the environment for executing the expression. // In the future, we may want to provide access to the currently // batched entries so users can do comparisons to other entries // rather than just use absolute rules. env := helper.GetExprEnv(e) defer helper.PutExprEnv(env) m, err := expr.Run(r.prog, env) if err != nil { return r.HandleEntryError(ctx, e, err) } // this is guaranteed to be a boolean because of expr.AsBool matches := m.(bool) var s string err = e.Read(r.sourceIdentifier, &s) if err != nil { r.Warn("entry does not contain the source_identifier, so it may be pooled with other sources") s = DefaultSourceIdentifier } if s == "" { s = DefaultSourceIdentifier } switch { // This is the first entry in the next batch case matches && r.matchIndicatesFirst(): // Flush the existing batch err := r.flushSource(s, true) if err != nil { return err } // Add the current log to the new batch r.addToBatch(ctx, e, s) return nil // This is the last entry in a complete batch case matches && r.matchIndicatesLast(): fallthrough // When matching on first entry, never batch partial first. Just emit immediately case !matches && r.matchIndicatesFirst() && r.batchMap[s] == nil: r.addToBatch(ctx, e, s) return r.flushSource(s, true) } // This is neither the first entry of a new log, // nor the last entry of a log, so just add it to the batch r.addToBatch(ctx, e, s) return nil }
identifier_body
recombine.go
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package recombine // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/transformer/recombine" import ( "bytes" "context" "fmt" "sync" "time" "github.com/antonmedv/expr" "github.com/antonmedv/expr/vm" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/entry" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper" ) const ( operatorType = "recombine" defaultCombineWith = "\n" ) func init() { operator.Register(operatorType, func() operator.Builder { return NewConfig() }) } // NewConfig creates a new recombine config with default values func NewConfig() *Config { return NewConfigWithID(operatorType) } // NewConfigWithID creates a new recombine config with default values func NewConfigWithID(operatorID string) *Config { return &Config{ TransformerConfig: helper.NewTransformerConfig(operatorID, operatorType), MaxBatchSize: 1000, MaxSources: 1000, CombineWith: defaultCombineWith, OverwriteWith: "oldest", ForceFlushTimeout: 5 * time.Second, SourceIdentifier: entry.NewAttributeField("file.path"), } } // Config is the configuration of a recombine operator type Config struct { helper.TransformerConfig `mapstructure:",squash"` IsFirstEntry string `mapstructure:"is_first_entry"` IsLastEntry string `mapstructure:"is_last_entry"` MaxBatchSize int `mapstructure:"max_batch_size"` CombineField entry.Field `mapstructure:"combine_field"` CombineWith string `mapstructure:"combine_with"` SourceIdentifier entry.Field `mapstructure:"source_identifier"` OverwriteWith string `mapstructure:"overwrite_with"` ForceFlushTimeout time.Duration `mapstructure:"force_flush_period"` MaxSources int `mapstructure:"max_sources"` MaxLogSize helper.ByteSize `mapstructure:"max_log_size,omitempty"` } // Build creates a new Transformer from a config func (c *Config) Build(logger *zap.SugaredLogger) (operator.Operator, error) { transformer, err := c.TransformerConfig.Build(logger) if err != nil { return nil, fmt.Errorf("failed to build transformer config: %w", err) } if c.IsLastEntry != "" && c.IsFirstEntry != ""
if c.IsLastEntry == "" && c.IsFirstEntry == "" { return nil, fmt.Errorf("one of is_first_entry and is_last_entry must be set") } var matchesFirst bool var prog *vm.Program if c.IsFirstEntry != "" { matchesFirst = true prog, err = helper.ExprCompileBool(c.IsFirstEntry) if err != nil { return nil, fmt.Errorf("failed to compile is_first_entry: %w", err) } } else { matchesFirst = false prog, err = helper.ExprCompileBool(c.IsLastEntry) if err != nil { return nil, fmt.Errorf("failed to compile is_last_entry: %w", err) } } if c.CombineField.FieldInterface == nil { return nil, fmt.Errorf("missing required argument 'combine_field'") } var overwriteWithOldest bool switch c.OverwriteWith { case "newest": overwriteWithOldest = false case "oldest", "": overwriteWithOldest = true default: return nil, fmt.Errorf("invalid value '%s' for parameter 'overwrite_with'", c.OverwriteWith) } return &Transformer{ TransformerOperator: transformer, matchFirstLine: matchesFirst, prog: prog, maxBatchSize: c.MaxBatchSize, maxSources: c.MaxSources, overwriteWithOldest: overwriteWithOldest, batchMap: make(map[string]*sourceBatch), batchPool: sync.Pool{ New: func() interface{} { return &sourceBatch{ entries: []*entry.Entry{}, recombined: &bytes.Buffer{}, } }, }, combineField: c.CombineField, combineWith: c.CombineWith, forceFlushTimeout: c.ForceFlushTimeout, ticker: time.NewTicker(c.ForceFlushTimeout), chClose: make(chan struct{}), sourceIdentifier: c.SourceIdentifier, maxLogSize: int64(c.MaxLogSize), }, nil } // Transformer is an operator that combines a field from consecutive log entries // into a single type Transformer struct { helper.TransformerOperator matchFirstLine bool prog *vm.Program maxBatchSize int maxSources int overwriteWithOldest bool combineField entry.Field combineWith string ticker *time.Ticker forceFlushTimeout time.Duration chClose chan struct{} sourceIdentifier entry.Field sync.Mutex batchPool sync.Pool batchMap map[string]*sourceBatch maxLogSize int64 } // sourceBatch contains the status info of a batch type sourceBatch struct { entries []*entry.Entry recombined *bytes.Buffer firstEntryObservedTime time.Time } func (r *Transformer) Start(_ operator.Persister) error { go r.flushLoop() return nil } func (r *Transformer) flushLoop() { for { select { case <-r.ticker.C: r.Lock() timeNow := time.Now() for source, batch := range r.batchMap { timeSinceFirstEntry := timeNow.Sub(batch.firstEntryObservedTime) if timeSinceFirstEntry < r.forceFlushTimeout { continue } if err := r.flushSource(source, true); err != nil { r.Errorf("there was error flushing combined logs %s", err) } } // check every 1/5 forceFlushTimeout r.ticker.Reset(r.forceFlushTimeout / 5) r.Unlock() case <-r.chClose: r.ticker.Stop() return } } } func (r *Transformer) Stop() error { r.Lock() defer r.Unlock() ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() r.flushUncombined(ctx) close(r.chClose) return nil } const DefaultSourceIdentifier = "DefaultSourceIdentifier" func (r *Transformer) Process(ctx context.Context, e *entry.Entry) error { // Lock the recombine operator because process can't run concurrently r.Lock() defer r.Unlock() // Get the environment for executing the expression. // In the future, we may want to provide access to the currently // batched entries so users can do comparisons to other entries // rather than just use absolute rules. env := helper.GetExprEnv(e) defer helper.PutExprEnv(env) m, err := expr.Run(r.prog, env) if err != nil { return r.HandleEntryError(ctx, e, err) } // this is guaranteed to be a boolean because of expr.AsBool matches := m.(bool) var s string err = e.Read(r.sourceIdentifier, &s) if err != nil { r.Warn("entry does not contain the source_identifier, so it may be pooled with other sources") s = DefaultSourceIdentifier } if s == "" { s = DefaultSourceIdentifier } switch { // This is the first entry in the next batch case matches && r.matchIndicatesFirst(): // Flush the existing batch err := r.flushSource(s, true) if err != nil { return err } // Add the current log to the new batch r.addToBatch(ctx, e, s) return nil // This is the last entry in a complete batch case matches && r.matchIndicatesLast(): fallthrough // When matching on first entry, never batch partial first. Just emit immediately case !matches && r.matchIndicatesFirst() && r.batchMap[s] == nil: r.addToBatch(ctx, e, s) return r.flushSource(s, true) } // This is neither the first entry of a new log, // nor the last entry of a log, so just add it to the batch r.addToBatch(ctx, e, s) return nil } func (r *Transformer) matchIndicatesFirst() bool { return r.matchFirstLine } func (r *Transformer) matchIndicatesLast() bool { return !r.matchFirstLine } // addToBatch adds the current entry to the current batch of entries that will be combined func (r *Transformer) addToBatch(_ context.Context, e *entry.Entry, source string) { batch, ok := r.batchMap[source] if !ok { batch = r.addNewBatch(source, e) if len(r.batchMap) >= r.maxSources { r.Error("Batched source exceeds max source size. Flushing all batched logs. Consider increasing max_sources parameter") r.flushUncombined(context.Background()) return } } else { // If the length of the batch is 0, this batch was flushed previously due to triggering size limit. // In this case, the firstEntryObservedTime should be updated to reset the timeout if len(batch.entries) == 0 { batch.firstEntryObservedTime = e.ObservedTimestamp } batch.entries = append(batch.entries, e) } // Combine the combineField of each entry in the batch, // separated by newlines var s string err := e.Read(r.combineField, &s) if err != nil { r.Errorf("entry does not contain the combine_field") return } if batch.recombined.Len() > 0 { batch.recombined.WriteString(r.combineWith) } batch.recombined.WriteString(s) if (r.maxLogSize > 0 && int64(batch.recombined.Len()) > r.maxLogSize) || len(batch.entries) >= r.maxBatchSize { if err := r.flushSource(source, false); err != nil { r.Errorf("there was error flushing combined logs %s", err) } } } // flushUncombined flushes all the logs in the batch individually to the // next output in the pipeline. This is only used when there is an error // or at shutdown to avoid dropping the logs. func (r *Transformer) flushUncombined(ctx context.Context) { for source := range r.batchMap { for _, entry := range r.batchMap[source].entries { r.Write(ctx, entry) } r.removeBatch(source) } r.ticker.Reset(r.forceFlushTimeout) } // flushSource combines the entries currently in the batch into a single entry, // then forwards them to the next operator in the pipeline func (r *Transformer) flushSource(source string, deleteSource bool) error { batch := r.batchMap[source] // Skip flushing a combined log if the batch is empty if batch == nil { return nil } if len(batch.entries) == 0 { r.removeBatch(source) return nil } // Choose which entry we want to keep the rest of the fields from var base *entry.Entry entries := batch.entries if r.overwriteWithOldest { base = entries[0] } else { base = entries[len(entries)-1] } // Set the recombined field on the entry err := base.Set(r.combineField, batch.recombined.String()) if err != nil { return err } r.Write(context.Background(), base) if deleteSource { r.removeBatch(source) } else { batch.entries = batch.entries[:0] batch.recombined.Reset() } return nil } // addNewBatch creates a new batch for the given source and adds the entry to it. func (r *Transformer) addNewBatch(source string, e *entry.Entry) *sourceBatch { batch := r.batchPool.Get().(*sourceBatch) batch.entries = append(batch.entries[:0], e) batch.recombined.Reset() batch.firstEntryObservedTime = e.ObservedTimestamp r.batchMap[source] = batch return batch } // removeBatch removes the batch for the given source. func (r *Transformer) removeBatch(source string) { batch := r.batchMap[source] delete(r.batchMap, source) r.batchPool.Put(batch) }
{ return nil, fmt.Errorf("only one of is_first_entry and is_last_entry can be set") }
conditional_block
package.go
package document import ( "bytes" "go/ast" "go/doc" "go/parser" "go/token" "strings" "time" ) // Module go mod info type type Module struct { Path string `json:",omitempty"` // module path Version string `json:",omitempty"` // module version Versions []string `json:",omitempty"` // available module versions Replace *Module `json:",omitempty"` // replaced by this module Time *time.Time `json:",omitempty"` // time version was created Update *Module `json:",omitempty"` // available update (with -u) Main bool `json:",omitempty"` // is this the main module? Indirect bool `json:",omitempty"` // module is only indirectly needed by main module Dir string `json:",omitempty"` // directory holding local copy of files, if any GoMod string `json:",omitempty"` // path to go.mod file describing module, if any GoVersion string `json:",omitempty"` // go version used in module Error *ModuleError `json:",omitempty"` // error loading module } // ModuleError go mod error type type ModuleError struct { Err string // error text } func (m *Module) String() string { s := m.Path if m.Version != "" { s += " " + m.Version if m.Update != nil { s += " [" + m.Update.Version + "]" } } if m.Replace != nil { s += " => " + m.Replace.Path if m.Replace.Version != "" { s += " " + m.Replace.Version if m.Replace.Update != nil { s += " [" + m.Replace.Update.Version + "]" } } } return s } // A PackagePublic describes a single package found in a directory. // go/libexec/src/cmd/go/internal/load/pkg.go type PackagePublic struct { Dir string `json:",omitempty"` // directory containing package sources ImportPath string `json:",omitempty"` // import path of package in dir ImportComment string `json:",omitempty"` // path in import comment on package statement Name string `json:",omitempty"` // package name Doc string `json:",omitempty"` // package document string Target string `json:",omitempty"` // installed target for this package (may be executable) Shlib string `json:",omitempty"` // the shared library that contains this package (only set when -linkshared) Root string `json:",omitempty"` // Go root, Go path dir, or module root dir containing this package ConflictDir string `json:",omitempty"` // Dir is hidden by this other directory ForTest string `json:",omitempty"` // package is only for use in named test Export string `json:",omitempty"` // file containing export data (set by go list -export) Module *Module `json:",omitempty"` // info about package's module, if any Match []string `json:",omitempty"` // command-line patterns matching this package Goroot bool `json:",omitempty"` // is this package found in the Go root? Standard bool `json:",omitempty"` // is this package part of the standard Go library? DepOnly bool `json:",omitempty"` // package is only as a dependency, not explicitly listed BinaryOnly bool `json:",omitempty"` // package cannot be recompiled Incomplete bool `json:",omitempty"` // was there an error loading this package or dependencies? // Stale and StaleReason remain here *only* for the list command. // They are only initialized in preparation for list execution. // The regular build determines staleness on the fly during action execution. Stale bool `json:",omitempty"` // would 'go install' do anything for this package? StaleReason string `json:",omitempty"` // why is Stale true? // Dependency information Imports []string `json:",omitempty"` // import paths used by this package ImportMap map[string]string `json:",omitempty"` // map from source import to ImportPath (identity entries omitted) Deps []string `json:",omitempty"` // all (recursively) imported dependencies // Test information // If you add to this list you MUST add to p.AllFiles (below) too. // Otherwise file name security lists will not apply to any new additions. TestGoFiles []string `json:",omitempty"` // _test.go files in package TestImports []string `json:",omitempty"` // imports from TestGoFiles XTestGoFiles []string `json:",omitempty"` // _test.go files outside package XTestImports []string `json:",omitempty"` // imports from XTestGoFiles } // Package type type Package struct { Dir string // !important: directory containing package sources ImportPath string // !important: import path of package in dir ImportComment string // path in import comment on package statement Name string // package name Doc string // package document string Module *Module // info about package's module, if any Stale bool // would 'go install' do anything for this package? StaleReason string // why is Stale true? // declarations Imports []string // import paths used by this package Filenames []string // all files Notes map[string][]*doc.Note // nil if no package Notes, or contains Buts, etc... Consts []*doc.Value Types []*Type Vars []*doc.Value Funcs []*Func // Examples is a sorted list of examples associated with // the package. Examples are extracted from _test.go files provided to NewFromFiles. Examples []*doc.Example // nil if no example code ParentImportPath string // parent package ImportPath Parent *Package `json:"-"` // parent package, important: json must ignore, prevent cycle parsing SubPackages []*Package // subpackages // ------------------------------------------------------------------ Dirname string // directory containing the package Err error // error or nil // package info FSet *token.FileSet // nil if no package document DocPackage *doc.Package // nil if no package document PAst map[string]*ast.File // nil if no AST with package exports IsMain bool // true for package main } // IsEmpty return package is empty func (p *Package) IsEmpty() bool { return p.Err != nil || p.PAst == nil && p.DocPackage == nil && len(p.SubPackages) == 0 } // -------------------------------------------------------------------- // Packages with package array type Packages []*Package // TODO Packages impl sorting func // Analyze the package func (p *Package) Analyze() (err error) { p.FSet = token.NewFileSet() // positions are relative to fset pkgs, err := parser.ParseDir(p.FSet, p.Dir, nil, parser.ParseComments) if err != nil { return } var astPackage *ast.Package for name, apkg := range pkgs { if strings.HasSuffix(name, "_test") { // skip test package continue } astPackage = apkg } d := doc.New(astPackage, p.ImportPath, doc.AllDecls) p.DocPackage = d p.Doc = d.Doc p.Name = d.Name p.ImportPath = d.ImportPath p.Imports = d.Imports p.Filenames = d.Filenames p.Notes = d.Notes p.Consts = d.Consts p.Vars = d.Vars p.Examples = d.Examples // set package types for _, t := range d.Types { p.Types = append(p.Types, NewTypeWithDoc(t)) } // set package funcs for _, fn := range d.Funcs { p.Funcs = append(p.Funcs, NewFuncWithDoc(fn)) } return } // -------------------------------------------------------------------- // TypeFields get type fields func
(t *Type) (fields []*Field) { if t == nil { return } for _, spec := range t.Decl.Specs { typeSpec := spec.(*ast.TypeSpec) // struct type if str, ok := typeSpec.Type.(*ast.StructType); ok { for _, f := range str.Fields.List { fields = append(fields, &Field{ Field: f, Type: t, }) } return } // interface type methods if str, ok := typeSpec.Type.(*ast.InterfaceType); ok { for _, field := range str.Methods.List { if ident, ok := field.Type.(*ast.Ident); ok && ident.Obj != nil { field.Names = []*ast.Ident{ident} } } for _, f := range str.Methods.List { fields = append(fields, &Field{ Field: f, Type: t, }) } return } } return } // TypeSpec type spec type TypeSpec string const ( // StructType struct type spec StructType TypeSpec = "struct" // InterfaceType interface type spec InterfaceType TypeSpec = "interface" ) // Type type type Type struct { // doc.Type Doc string Name string Decl *ast.GenDecl Documentation Documentation // associated declarations Consts []*doc.Value // sorted list of constants of (mostly) this type Vars []*doc.Value // sorted list of variables of (mostly) this type Funcs []*Func // sorted list of functions returning this type Methods []*Func // sorted list of methods (including embedded ones) of this type // Examples is a sorted list of examples associated with // this type. Examples are extracted from _test.go files // provided to NewFromFiles. Examples []*doc.Example // Fields *ast.FieldList Fields []*Field TypeSpec TypeSpec // type spec } // NewTypeWithDoc return type with doc.Type func NewTypeWithDoc(t *doc.Type) *Type { var _t = &Type{ Doc: t.Doc, Name: t.Name, Decl: t.Decl, Consts: t.Consts, Vars: t.Vars, Examples: t.Examples, } _t.Documentation = NewDocumentation(t.Doc) _t.Fields = TypeFields(_t) for _, spec := range t.Decl.Specs { typeSpec := spec.(*ast.TypeSpec) if _, ok := typeSpec.Type.(*ast.StructType); ok { _t.TypeSpec = StructType } // interface type methods if str, ok := typeSpec.Type.(*ast.InterfaceType); ok { _t.TypeSpec = InterfaceType for _, field := range str.Methods.List { // interface funcs if fn, ok := field.Type.(*ast.FuncType); ok { var f = &Func{ Doc: field.Doc.Text(), Name: field.Names[0].Name, Params: fn.Params, Results: fn.Results, } f.Documentation = NewDocumentation(field.Doc.Text()) _t.Funcs = append(_t.Funcs, f) } } } } for _, fn := range t.Funcs { _t.Funcs = append(_t.Funcs, NewFuncWithDoc(fn)) } for _, fn := range t.Methods { _t.Methods = append(_t.Methods, NewFuncWithDoc(fn)) } return _t } // ------------------------------------------------------------------ // Func type type Func struct { // doc.Func Doc string Name string Decl *ast.FuncDecl // methods // (for functions, these fields have the respective zero value) Recv string // actual receiver "T" or "*T" Orig string // original receiver "T" or "*T" Level int // embedding level; 0 means not embedded Examples []*doc.Example // interface type Field *ast.Field FuncType *ast.FuncType // ast.FuncType fields Params *ast.FieldList // (incoming) parameters; non-nil Results *ast.FieldList // (outgoing) results; or nil Documentation Documentation } // NewFuncWithDoc return func with doc.Func func NewFuncWithDoc(f *doc.Func) *Func { var fn = &Func{ Doc: f.Doc, Name: f.Name, Decl: f.Decl, Recv: f.Recv, Orig: f.Orig, Level: f.Level, Examples: f.Examples, Params: f.Decl.Type.Params, Results: f.Decl.Type.Results, Documentation: NewDocumentation(f.Doc), } return fn } // ------------------------------------------------------------------ // Field type type Field struct { *ast.Field Type *Type } // JoinNames return names array func (f Field) JoinNames() (names []string) { if f.Field == nil { return } for _, name := range f.Field.Names { names = append(names, name.Name) } return } // Documentation parse markdown doc func (f *Field) Documentation() string { buf := new(bytes.Buffer) if f.Doc != nil { buf.WriteString(f.Doc.Text()) } if f.Comment != nil { buf.WriteString(f.Comment.Text()) } return MarkdownConvert(buf.String()) }
TypeFields
identifier_name
package.go
package document import ( "bytes" "go/ast" "go/doc" "go/parser" "go/token" "strings" "time" ) // Module go mod info type type Module struct { Path string `json:",omitempty"` // module path Version string `json:",omitempty"` // module version Versions []string `json:",omitempty"` // available module versions Replace *Module `json:",omitempty"` // replaced by this module Time *time.Time `json:",omitempty"` // time version was created Update *Module `json:",omitempty"` // available update (with -u) Main bool `json:",omitempty"` // is this the main module? Indirect bool `json:",omitempty"` // module is only indirectly needed by main module Dir string `json:",omitempty"` // directory holding local copy of files, if any GoMod string `json:",omitempty"` // path to go.mod file describing module, if any GoVersion string `json:",omitempty"` // go version used in module Error *ModuleError `json:",omitempty"` // error loading module } // ModuleError go mod error type type ModuleError struct { Err string // error text } func (m *Module) String() string { s := m.Path if m.Version != "" { s += " " + m.Version if m.Update != nil { s += " [" + m.Update.Version + "]" } } if m.Replace != nil { s += " => " + m.Replace.Path if m.Replace.Version != "" { s += " " + m.Replace.Version if m.Replace.Update != nil { s += " [" + m.Replace.Update.Version + "]" } } } return s } // A PackagePublic describes a single package found in a directory. // go/libexec/src/cmd/go/internal/load/pkg.go type PackagePublic struct { Dir string `json:",omitempty"` // directory containing package sources ImportPath string `json:",omitempty"` // import path of package in dir ImportComment string `json:",omitempty"` // path in import comment on package statement Name string `json:",omitempty"` // package name Doc string `json:",omitempty"` // package document string Target string `json:",omitempty"` // installed target for this package (may be executable) Shlib string `json:",omitempty"` // the shared library that contains this package (only set when -linkshared) Root string `json:",omitempty"` // Go root, Go path dir, or module root dir containing this package ConflictDir string `json:",omitempty"` // Dir is hidden by this other directory ForTest string `json:",omitempty"` // package is only for use in named test Export string `json:",omitempty"` // file containing export data (set by go list -export) Module *Module `json:",omitempty"` // info about package's module, if any Match []string `json:",omitempty"` // command-line patterns matching this package Goroot bool `json:",omitempty"` // is this package found in the Go root? Standard bool `json:",omitempty"` // is this package part of the standard Go library? DepOnly bool `json:",omitempty"` // package is only as a dependency, not explicitly listed BinaryOnly bool `json:",omitempty"` // package cannot be recompiled Incomplete bool `json:",omitempty"` // was there an error loading this package or dependencies? // Stale and StaleReason remain here *only* for the list command. // They are only initialized in preparation for list execution. // The regular build determines staleness on the fly during action execution. Stale bool `json:",omitempty"` // would 'go install' do anything for this package? StaleReason string `json:",omitempty"` // why is Stale true? // Dependency information Imports []string `json:",omitempty"` // import paths used by this package ImportMap map[string]string `json:",omitempty"` // map from source import to ImportPath (identity entries omitted) Deps []string `json:",omitempty"` // all (recursively) imported dependencies // Test information // If you add to this list you MUST add to p.AllFiles (below) too. // Otherwise file name security lists will not apply to any new additions. TestGoFiles []string `json:",omitempty"` // _test.go files in package TestImports []string `json:",omitempty"` // imports from TestGoFiles XTestGoFiles []string `json:",omitempty"` // _test.go files outside package XTestImports []string `json:",omitempty"` // imports from XTestGoFiles } // Package type type Package struct { Dir string // !important: directory containing package sources ImportPath string // !important: import path of package in dir ImportComment string // path in import comment on package statement Name string // package name Doc string // package document string Module *Module // info about package's module, if any Stale bool // would 'go install' do anything for this package? StaleReason string // why is Stale true? // declarations Imports []string // import paths used by this package Filenames []string // all files Notes map[string][]*doc.Note // nil if no package Notes, or contains Buts, etc... Consts []*doc.Value Types []*Type Vars []*doc.Value Funcs []*Func // Examples is a sorted list of examples associated with // the package. Examples are extracted from _test.go files provided to NewFromFiles. Examples []*doc.Example // nil if no example code ParentImportPath string // parent package ImportPath Parent *Package `json:"-"` // parent package, important: json must ignore, prevent cycle parsing SubPackages []*Package // subpackages // ------------------------------------------------------------------ Dirname string // directory containing the package Err error // error or nil // package info FSet *token.FileSet // nil if no package document DocPackage *doc.Package // nil if no package document PAst map[string]*ast.File // nil if no AST with package exports IsMain bool // true for package main } // IsEmpty return package is empty func (p *Package) IsEmpty() bool { return p.Err != nil || p.PAst == nil && p.DocPackage == nil && len(p.SubPackages) == 0 } // -------------------------------------------------------------------- // Packages with package array type Packages []*Package // TODO Packages impl sorting func // Analyze the package func (p *Package) Analyze() (err error)
// -------------------------------------------------------------------- // TypeFields get type fields func TypeFields(t *Type) (fields []*Field) { if t == nil { return } for _, spec := range t.Decl.Specs { typeSpec := spec.(*ast.TypeSpec) // struct type if str, ok := typeSpec.Type.(*ast.StructType); ok { for _, f := range str.Fields.List { fields = append(fields, &Field{ Field: f, Type: t, }) } return } // interface type methods if str, ok := typeSpec.Type.(*ast.InterfaceType); ok { for _, field := range str.Methods.List { if ident, ok := field.Type.(*ast.Ident); ok && ident.Obj != nil { field.Names = []*ast.Ident{ident} } } for _, f := range str.Methods.List { fields = append(fields, &Field{ Field: f, Type: t, }) } return } } return } // TypeSpec type spec type TypeSpec string const ( // StructType struct type spec StructType TypeSpec = "struct" // InterfaceType interface type spec InterfaceType TypeSpec = "interface" ) // Type type type Type struct { // doc.Type Doc string Name string Decl *ast.GenDecl Documentation Documentation // associated declarations Consts []*doc.Value // sorted list of constants of (mostly) this type Vars []*doc.Value // sorted list of variables of (mostly) this type Funcs []*Func // sorted list of functions returning this type Methods []*Func // sorted list of methods (including embedded ones) of this type // Examples is a sorted list of examples associated with // this type. Examples are extracted from _test.go files // provided to NewFromFiles. Examples []*doc.Example // Fields *ast.FieldList Fields []*Field TypeSpec TypeSpec // type spec } // NewTypeWithDoc return type with doc.Type func NewTypeWithDoc(t *doc.Type) *Type { var _t = &Type{ Doc: t.Doc, Name: t.Name, Decl: t.Decl, Consts: t.Consts, Vars: t.Vars, Examples: t.Examples, } _t.Documentation = NewDocumentation(t.Doc) _t.Fields = TypeFields(_t) for _, spec := range t.Decl.Specs { typeSpec := spec.(*ast.TypeSpec) if _, ok := typeSpec.Type.(*ast.StructType); ok { _t.TypeSpec = StructType } // interface type methods if str, ok := typeSpec.Type.(*ast.InterfaceType); ok { _t.TypeSpec = InterfaceType for _, field := range str.Methods.List { // interface funcs if fn, ok := field.Type.(*ast.FuncType); ok { var f = &Func{ Doc: field.Doc.Text(), Name: field.Names[0].Name, Params: fn.Params, Results: fn.Results, } f.Documentation = NewDocumentation(field.Doc.Text()) _t.Funcs = append(_t.Funcs, f) } } } } for _, fn := range t.Funcs { _t.Funcs = append(_t.Funcs, NewFuncWithDoc(fn)) } for _, fn := range t.Methods { _t.Methods = append(_t.Methods, NewFuncWithDoc(fn)) } return _t } // ------------------------------------------------------------------ // Func type type Func struct { // doc.Func Doc string Name string Decl *ast.FuncDecl // methods // (for functions, these fields have the respective zero value) Recv string // actual receiver "T" or "*T" Orig string // original receiver "T" or "*T" Level int // embedding level; 0 means not embedded Examples []*doc.Example // interface type Field *ast.Field FuncType *ast.FuncType // ast.FuncType fields Params *ast.FieldList // (incoming) parameters; non-nil Results *ast.FieldList // (outgoing) results; or nil Documentation Documentation } // NewFuncWithDoc return func with doc.Func func NewFuncWithDoc(f *doc.Func) *Func { var fn = &Func{ Doc: f.Doc, Name: f.Name, Decl: f.Decl, Recv: f.Recv, Orig: f.Orig, Level: f.Level, Examples: f.Examples, Params: f.Decl.Type.Params, Results: f.Decl.Type.Results, Documentation: NewDocumentation(f.Doc), } return fn } // ------------------------------------------------------------------ // Field type type Field struct { *ast.Field Type *Type } // JoinNames return names array func (f Field) JoinNames() (names []string) { if f.Field == nil { return } for _, name := range f.Field.Names { names = append(names, name.Name) } return } // Documentation parse markdown doc func (f *Field) Documentation() string { buf := new(bytes.Buffer) if f.Doc != nil { buf.WriteString(f.Doc.Text()) } if f.Comment != nil { buf.WriteString(f.Comment.Text()) } return MarkdownConvert(buf.String()) }
{ p.FSet = token.NewFileSet() // positions are relative to fset pkgs, err := parser.ParseDir(p.FSet, p.Dir, nil, parser.ParseComments) if err != nil { return } var astPackage *ast.Package for name, apkg := range pkgs { if strings.HasSuffix(name, "_test") { // skip test package continue } astPackage = apkg } d := doc.New(astPackage, p.ImportPath, doc.AllDecls) p.DocPackage = d p.Doc = d.Doc p.Name = d.Name p.ImportPath = d.ImportPath p.Imports = d.Imports p.Filenames = d.Filenames p.Notes = d.Notes p.Consts = d.Consts p.Vars = d.Vars p.Examples = d.Examples // set package types for _, t := range d.Types { p.Types = append(p.Types, NewTypeWithDoc(t)) } // set package funcs for _, fn := range d.Funcs { p.Funcs = append(p.Funcs, NewFuncWithDoc(fn)) } return }
identifier_body
package.go
package document import ( "bytes" "go/ast" "go/doc" "go/parser" "go/token" "strings" "time" ) // Module go mod info type type Module struct { Path string `json:",omitempty"` // module path Version string `json:",omitempty"` // module version Versions []string `json:",omitempty"` // available module versions Replace *Module `json:",omitempty"` // replaced by this module Time *time.Time `json:",omitempty"` // time version was created Update *Module `json:",omitempty"` // available update (with -u) Main bool `json:",omitempty"` // is this the main module? Indirect bool `json:",omitempty"` // module is only indirectly needed by main module Dir string `json:",omitempty"` // directory holding local copy of files, if any GoMod string `json:",omitempty"` // path to go.mod file describing module, if any GoVersion string `json:",omitempty"` // go version used in module Error *ModuleError `json:",omitempty"` // error loading module } // ModuleError go mod error type type ModuleError struct { Err string // error text } func (m *Module) String() string { s := m.Path if m.Version != "" { s += " " + m.Version if m.Update != nil { s += " [" + m.Update.Version + "]" } } if m.Replace != nil { s += " => " + m.Replace.Path if m.Replace.Version != "" { s += " " + m.Replace.Version if m.Replace.Update != nil { s += " [" + m.Replace.Update.Version + "]" } } } return s } // A PackagePublic describes a single package found in a directory. // go/libexec/src/cmd/go/internal/load/pkg.go type PackagePublic struct { Dir string `json:",omitempty"` // directory containing package sources ImportPath string `json:",omitempty"` // import path of package in dir ImportComment string `json:",omitempty"` // path in import comment on package statement Name string `json:",omitempty"` // package name Doc string `json:",omitempty"` // package document string Target string `json:",omitempty"` // installed target for this package (may be executable) Shlib string `json:",omitempty"` // the shared library that contains this package (only set when -linkshared) Root string `json:",omitempty"` // Go root, Go path dir, or module root dir containing this package ConflictDir string `json:",omitempty"` // Dir is hidden by this other directory ForTest string `json:",omitempty"` // package is only for use in named test Export string `json:",omitempty"` // file containing export data (set by go list -export) Module *Module `json:",omitempty"` // info about package's module, if any Match []string `json:",omitempty"` // command-line patterns matching this package Goroot bool `json:",omitempty"` // is this package found in the Go root? Standard bool `json:",omitempty"` // is this package part of the standard Go library? DepOnly bool `json:",omitempty"` // package is only as a dependency, not explicitly listed BinaryOnly bool `json:",omitempty"` // package cannot be recompiled Incomplete bool `json:",omitempty"` // was there an error loading this package or dependencies? // Stale and StaleReason remain here *only* for the list command. // They are only initialized in preparation for list execution. // The regular build determines staleness on the fly during action execution. Stale bool `json:",omitempty"` // would 'go install' do anything for this package? StaleReason string `json:",omitempty"` // why is Stale true? // Dependency information Imports []string `json:",omitempty"` // import paths used by this package ImportMap map[string]string `json:",omitempty"` // map from source import to ImportPath (identity entries omitted) Deps []string `json:",omitempty"` // all (recursively) imported dependencies // Test information // If you add to this list you MUST add to p.AllFiles (below) too. // Otherwise file name security lists will not apply to any new additions. TestGoFiles []string `json:",omitempty"` // _test.go files in package TestImports []string `json:",omitempty"` // imports from TestGoFiles XTestGoFiles []string `json:",omitempty"` // _test.go files outside package XTestImports []string `json:",omitempty"` // imports from XTestGoFiles } // Package type type Package struct { Dir string // !important: directory containing package sources ImportPath string // !important: import path of package in dir ImportComment string // path in import comment on package statement Name string // package name Doc string // package document string Module *Module // info about package's module, if any Stale bool // would 'go install' do anything for this package? StaleReason string // why is Stale true? // declarations Imports []string // import paths used by this package Filenames []string // all files Notes map[string][]*doc.Note // nil if no package Notes, or contains Buts, etc... Consts []*doc.Value Types []*Type Vars []*doc.Value Funcs []*Func // Examples is a sorted list of examples associated with // the package. Examples are extracted from _test.go files provided to NewFromFiles. Examples []*doc.Example // nil if no example code ParentImportPath string // parent package ImportPath Parent *Package `json:"-"` // parent package, important: json must ignore, prevent cycle parsing SubPackages []*Package // subpackages // ------------------------------------------------------------------ Dirname string // directory containing the package Err error // error or nil // package info FSet *token.FileSet // nil if no package document DocPackage *doc.Package // nil if no package document PAst map[string]*ast.File // nil if no AST with package exports IsMain bool // true for package main } // IsEmpty return package is empty func (p *Package) IsEmpty() bool { return p.Err != nil || p.PAst == nil && p.DocPackage == nil && len(p.SubPackages) == 0 } // -------------------------------------------------------------------- // Packages with package array type Packages []*Package // TODO Packages impl sorting func // Analyze the package func (p *Package) Analyze() (err error) { p.FSet = token.NewFileSet() // positions are relative to fset pkgs, err := parser.ParseDir(p.FSet, p.Dir, nil, parser.ParseComments) if err != nil { return } var astPackage *ast.Package for name, apkg := range pkgs { if strings.HasSuffix(name, "_test") { // skip test package continue } astPackage = apkg } d := doc.New(astPackage, p.ImportPath, doc.AllDecls) p.DocPackage = d p.Doc = d.Doc p.Name = d.Name p.ImportPath = d.ImportPath p.Imports = d.Imports p.Filenames = d.Filenames p.Notes = d.Notes p.Consts = d.Consts p.Vars = d.Vars p.Examples = d.Examples // set package types for _, t := range d.Types { p.Types = append(p.Types, NewTypeWithDoc(t)) } // set package funcs for _, fn := range d.Funcs { p.Funcs = append(p.Funcs, NewFuncWithDoc(fn)) } return } // -------------------------------------------------------------------- // TypeFields get type fields func TypeFields(t *Type) (fields []*Field) { if t == nil { return } for _, spec := range t.Decl.Specs { typeSpec := spec.(*ast.TypeSpec) // struct type if str, ok := typeSpec.Type.(*ast.StructType); ok { for _, f := range str.Fields.List { fields = append(fields, &Field{ Field: f, Type: t, }) } return } // interface type methods if str, ok := typeSpec.Type.(*ast.InterfaceType); ok { for _, field := range str.Methods.List { if ident, ok := field.Type.(*ast.Ident); ok && ident.Obj != nil { field.Names = []*ast.Ident{ident} } } for _, f := range str.Methods.List { fields = append(fields, &Field{ Field: f, Type: t, }) } return }
// TypeSpec type spec type TypeSpec string const ( // StructType struct type spec StructType TypeSpec = "struct" // InterfaceType interface type spec InterfaceType TypeSpec = "interface" ) // Type type type Type struct { // doc.Type Doc string Name string Decl *ast.GenDecl Documentation Documentation // associated declarations Consts []*doc.Value // sorted list of constants of (mostly) this type Vars []*doc.Value // sorted list of variables of (mostly) this type Funcs []*Func // sorted list of functions returning this type Methods []*Func // sorted list of methods (including embedded ones) of this type // Examples is a sorted list of examples associated with // this type. Examples are extracted from _test.go files // provided to NewFromFiles. Examples []*doc.Example // Fields *ast.FieldList Fields []*Field TypeSpec TypeSpec // type spec } // NewTypeWithDoc return type with doc.Type func NewTypeWithDoc(t *doc.Type) *Type { var _t = &Type{ Doc: t.Doc, Name: t.Name, Decl: t.Decl, Consts: t.Consts, Vars: t.Vars, Examples: t.Examples, } _t.Documentation = NewDocumentation(t.Doc) _t.Fields = TypeFields(_t) for _, spec := range t.Decl.Specs { typeSpec := spec.(*ast.TypeSpec) if _, ok := typeSpec.Type.(*ast.StructType); ok { _t.TypeSpec = StructType } // interface type methods if str, ok := typeSpec.Type.(*ast.InterfaceType); ok { _t.TypeSpec = InterfaceType for _, field := range str.Methods.List { // interface funcs if fn, ok := field.Type.(*ast.FuncType); ok { var f = &Func{ Doc: field.Doc.Text(), Name: field.Names[0].Name, Params: fn.Params, Results: fn.Results, } f.Documentation = NewDocumentation(field.Doc.Text()) _t.Funcs = append(_t.Funcs, f) } } } } for _, fn := range t.Funcs { _t.Funcs = append(_t.Funcs, NewFuncWithDoc(fn)) } for _, fn := range t.Methods { _t.Methods = append(_t.Methods, NewFuncWithDoc(fn)) } return _t } // ------------------------------------------------------------------ // Func type type Func struct { // doc.Func Doc string Name string Decl *ast.FuncDecl // methods // (for functions, these fields have the respective zero value) Recv string // actual receiver "T" or "*T" Orig string // original receiver "T" or "*T" Level int // embedding level; 0 means not embedded Examples []*doc.Example // interface type Field *ast.Field FuncType *ast.FuncType // ast.FuncType fields Params *ast.FieldList // (incoming) parameters; non-nil Results *ast.FieldList // (outgoing) results; or nil Documentation Documentation } // NewFuncWithDoc return func with doc.Func func NewFuncWithDoc(f *doc.Func) *Func { var fn = &Func{ Doc: f.Doc, Name: f.Name, Decl: f.Decl, Recv: f.Recv, Orig: f.Orig, Level: f.Level, Examples: f.Examples, Params: f.Decl.Type.Params, Results: f.Decl.Type.Results, Documentation: NewDocumentation(f.Doc), } return fn } // ------------------------------------------------------------------ // Field type type Field struct { *ast.Field Type *Type } // JoinNames return names array func (f Field) JoinNames() (names []string) { if f.Field == nil { return } for _, name := range f.Field.Names { names = append(names, name.Name) } return } // Documentation parse markdown doc func (f *Field) Documentation() string { buf := new(bytes.Buffer) if f.Doc != nil { buf.WriteString(f.Doc.Text()) } if f.Comment != nil { buf.WriteString(f.Comment.Text()) } return MarkdownConvert(buf.String()) }
} return }
random_line_split
package.go
package document import ( "bytes" "go/ast" "go/doc" "go/parser" "go/token" "strings" "time" ) // Module go mod info type type Module struct { Path string `json:",omitempty"` // module path Version string `json:",omitempty"` // module version Versions []string `json:",omitempty"` // available module versions Replace *Module `json:",omitempty"` // replaced by this module Time *time.Time `json:",omitempty"` // time version was created Update *Module `json:",omitempty"` // available update (with -u) Main bool `json:",omitempty"` // is this the main module? Indirect bool `json:",omitempty"` // module is only indirectly needed by main module Dir string `json:",omitempty"` // directory holding local copy of files, if any GoMod string `json:",omitempty"` // path to go.mod file describing module, if any GoVersion string `json:",omitempty"` // go version used in module Error *ModuleError `json:",omitempty"` // error loading module } // ModuleError go mod error type type ModuleError struct { Err string // error text } func (m *Module) String() string { s := m.Path if m.Version != "" { s += " " + m.Version if m.Update != nil { s += " [" + m.Update.Version + "]" } } if m.Replace != nil { s += " => " + m.Replace.Path if m.Replace.Version != "" { s += " " + m.Replace.Version if m.Replace.Update != nil { s += " [" + m.Replace.Update.Version + "]" } } } return s } // A PackagePublic describes a single package found in a directory. // go/libexec/src/cmd/go/internal/load/pkg.go type PackagePublic struct { Dir string `json:",omitempty"` // directory containing package sources ImportPath string `json:",omitempty"` // import path of package in dir ImportComment string `json:",omitempty"` // path in import comment on package statement Name string `json:",omitempty"` // package name Doc string `json:",omitempty"` // package document string Target string `json:",omitempty"` // installed target for this package (may be executable) Shlib string `json:",omitempty"` // the shared library that contains this package (only set when -linkshared) Root string `json:",omitempty"` // Go root, Go path dir, or module root dir containing this package ConflictDir string `json:",omitempty"` // Dir is hidden by this other directory ForTest string `json:",omitempty"` // package is only for use in named test Export string `json:",omitempty"` // file containing export data (set by go list -export) Module *Module `json:",omitempty"` // info about package's module, if any Match []string `json:",omitempty"` // command-line patterns matching this package Goroot bool `json:",omitempty"` // is this package found in the Go root? Standard bool `json:",omitempty"` // is this package part of the standard Go library? DepOnly bool `json:",omitempty"` // package is only as a dependency, not explicitly listed BinaryOnly bool `json:",omitempty"` // package cannot be recompiled Incomplete bool `json:",omitempty"` // was there an error loading this package or dependencies? // Stale and StaleReason remain here *only* for the list command. // They are only initialized in preparation for list execution. // The regular build determines staleness on the fly during action execution. Stale bool `json:",omitempty"` // would 'go install' do anything for this package? StaleReason string `json:",omitempty"` // why is Stale true? // Dependency information Imports []string `json:",omitempty"` // import paths used by this package ImportMap map[string]string `json:",omitempty"` // map from source import to ImportPath (identity entries omitted) Deps []string `json:",omitempty"` // all (recursively) imported dependencies // Test information // If you add to this list you MUST add to p.AllFiles (below) too. // Otherwise file name security lists will not apply to any new additions. TestGoFiles []string `json:",omitempty"` // _test.go files in package TestImports []string `json:",omitempty"` // imports from TestGoFiles XTestGoFiles []string `json:",omitempty"` // _test.go files outside package XTestImports []string `json:",omitempty"` // imports from XTestGoFiles } // Package type type Package struct { Dir string // !important: directory containing package sources ImportPath string // !important: import path of package in dir ImportComment string // path in import comment on package statement Name string // package name Doc string // package document string Module *Module // info about package's module, if any Stale bool // would 'go install' do anything for this package? StaleReason string // why is Stale true? // declarations Imports []string // import paths used by this package Filenames []string // all files Notes map[string][]*doc.Note // nil if no package Notes, or contains Buts, etc... Consts []*doc.Value Types []*Type Vars []*doc.Value Funcs []*Func // Examples is a sorted list of examples associated with // the package. Examples are extracted from _test.go files provided to NewFromFiles. Examples []*doc.Example // nil if no example code ParentImportPath string // parent package ImportPath Parent *Package `json:"-"` // parent package, important: json must ignore, prevent cycle parsing SubPackages []*Package // subpackages // ------------------------------------------------------------------ Dirname string // directory containing the package Err error // error or nil // package info FSet *token.FileSet // nil if no package document DocPackage *doc.Package // nil if no package document PAst map[string]*ast.File // nil if no AST with package exports IsMain bool // true for package main } // IsEmpty return package is empty func (p *Package) IsEmpty() bool { return p.Err != nil || p.PAst == nil && p.DocPackage == nil && len(p.SubPackages) == 0 } // -------------------------------------------------------------------- // Packages with package array type Packages []*Package // TODO Packages impl sorting func // Analyze the package func (p *Package) Analyze() (err error) { p.FSet = token.NewFileSet() // positions are relative to fset pkgs, err := parser.ParseDir(p.FSet, p.Dir, nil, parser.ParseComments) if err != nil { return } var astPackage *ast.Package for name, apkg := range pkgs { if strings.HasSuffix(name, "_test")
astPackage = apkg } d := doc.New(astPackage, p.ImportPath, doc.AllDecls) p.DocPackage = d p.Doc = d.Doc p.Name = d.Name p.ImportPath = d.ImportPath p.Imports = d.Imports p.Filenames = d.Filenames p.Notes = d.Notes p.Consts = d.Consts p.Vars = d.Vars p.Examples = d.Examples // set package types for _, t := range d.Types { p.Types = append(p.Types, NewTypeWithDoc(t)) } // set package funcs for _, fn := range d.Funcs { p.Funcs = append(p.Funcs, NewFuncWithDoc(fn)) } return } // -------------------------------------------------------------------- // TypeFields get type fields func TypeFields(t *Type) (fields []*Field) { if t == nil { return } for _, spec := range t.Decl.Specs { typeSpec := spec.(*ast.TypeSpec) // struct type if str, ok := typeSpec.Type.(*ast.StructType); ok { for _, f := range str.Fields.List { fields = append(fields, &Field{ Field: f, Type: t, }) } return } // interface type methods if str, ok := typeSpec.Type.(*ast.InterfaceType); ok { for _, field := range str.Methods.List { if ident, ok := field.Type.(*ast.Ident); ok && ident.Obj != nil { field.Names = []*ast.Ident{ident} } } for _, f := range str.Methods.List { fields = append(fields, &Field{ Field: f, Type: t, }) } return } } return } // TypeSpec type spec type TypeSpec string const ( // StructType struct type spec StructType TypeSpec = "struct" // InterfaceType interface type spec InterfaceType TypeSpec = "interface" ) // Type type type Type struct { // doc.Type Doc string Name string Decl *ast.GenDecl Documentation Documentation // associated declarations Consts []*doc.Value // sorted list of constants of (mostly) this type Vars []*doc.Value // sorted list of variables of (mostly) this type Funcs []*Func // sorted list of functions returning this type Methods []*Func // sorted list of methods (including embedded ones) of this type // Examples is a sorted list of examples associated with // this type. Examples are extracted from _test.go files // provided to NewFromFiles. Examples []*doc.Example // Fields *ast.FieldList Fields []*Field TypeSpec TypeSpec // type spec } // NewTypeWithDoc return type with doc.Type func NewTypeWithDoc(t *doc.Type) *Type { var _t = &Type{ Doc: t.Doc, Name: t.Name, Decl: t.Decl, Consts: t.Consts, Vars: t.Vars, Examples: t.Examples, } _t.Documentation = NewDocumentation(t.Doc) _t.Fields = TypeFields(_t) for _, spec := range t.Decl.Specs { typeSpec := spec.(*ast.TypeSpec) if _, ok := typeSpec.Type.(*ast.StructType); ok { _t.TypeSpec = StructType } // interface type methods if str, ok := typeSpec.Type.(*ast.InterfaceType); ok { _t.TypeSpec = InterfaceType for _, field := range str.Methods.List { // interface funcs if fn, ok := field.Type.(*ast.FuncType); ok { var f = &Func{ Doc: field.Doc.Text(), Name: field.Names[0].Name, Params: fn.Params, Results: fn.Results, } f.Documentation = NewDocumentation(field.Doc.Text()) _t.Funcs = append(_t.Funcs, f) } } } } for _, fn := range t.Funcs { _t.Funcs = append(_t.Funcs, NewFuncWithDoc(fn)) } for _, fn := range t.Methods { _t.Methods = append(_t.Methods, NewFuncWithDoc(fn)) } return _t } // ------------------------------------------------------------------ // Func type type Func struct { // doc.Func Doc string Name string Decl *ast.FuncDecl // methods // (for functions, these fields have the respective zero value) Recv string // actual receiver "T" or "*T" Orig string // original receiver "T" or "*T" Level int // embedding level; 0 means not embedded Examples []*doc.Example // interface type Field *ast.Field FuncType *ast.FuncType // ast.FuncType fields Params *ast.FieldList // (incoming) parameters; non-nil Results *ast.FieldList // (outgoing) results; or nil Documentation Documentation } // NewFuncWithDoc return func with doc.Func func NewFuncWithDoc(f *doc.Func) *Func { var fn = &Func{ Doc: f.Doc, Name: f.Name, Decl: f.Decl, Recv: f.Recv, Orig: f.Orig, Level: f.Level, Examples: f.Examples, Params: f.Decl.Type.Params, Results: f.Decl.Type.Results, Documentation: NewDocumentation(f.Doc), } return fn } // ------------------------------------------------------------------ // Field type type Field struct { *ast.Field Type *Type } // JoinNames return names array func (f Field) JoinNames() (names []string) { if f.Field == nil { return } for _, name := range f.Field.Names { names = append(names, name.Name) } return } // Documentation parse markdown doc func (f *Field) Documentation() string { buf := new(bytes.Buffer) if f.Doc != nil { buf.WriteString(f.Doc.Text()) } if f.Comment != nil { buf.WriteString(f.Comment.Text()) } return MarkdownConvert(buf.String()) }
{ // skip test package continue }
conditional_block
recreate.go
// +build !cassandra // Copyright (C) 2017 ScyllaDB package gocql import ( "encoding/binary" "encoding/json" "fmt" "io" "sort" "strconv" "strings" "text/template" ) // ToCQL returns a CQL query that ca be used to recreate keyspace with all // user defined types, tables, indexes, functions, aggregates and views associated // with this keyspace. func (km *KeyspaceMetadata) ToCQL() (string, error) { var sb strings.Builder if err := km.keyspaceToCQL(&sb); err != nil { return "", err } sortedTypes := km.typesSortedTopologically() for _, tm := range sortedTypes { if err := km.userTypeToCQL(&sb, tm); err != nil { return "", err } } for _, tm := range km.Tables { if err := km.tableToCQL(&sb, km.Name, tm); err != nil { return "", err } } for _, im := range km.Indexes { if err := km.indexToCQL(&sb, im); err != nil { return "", err } } for _, fm := range km.Functions { if err := km.functionToCQL(&sb, km.Name, fm); err != nil { return "", err } } for _, am := range km.Aggregates { if err := km.aggregateToCQL(&sb, am); err != nil { return "", err } } for _, vm := range km.Views { if err := km.viewToCQL(&sb, vm); err != nil { return "", err } } return sb.String(), nil } func (km *KeyspaceMetadata) typesSortedTopologically() []*TypeMetadata { sortedTypes := make([]*TypeMetadata, 0, len(km.Types)) for _, tm := range km.Types { sortedTypes = append(sortedTypes, tm) } sort.Slice(sortedTypes, func(i, j int) bool { for _, ft := range sortedTypes[j].FieldTypes { if strings.Contains(ft, sortedTypes[i].Name) { return true } } return false }) return sortedTypes } var tableCQLTemplate = template.Must(template.New("table"). Funcs(map[string]interface{}{ "escape": cqlHelpers.escape, "tableColumnToCQL": cqlHelpers.tableColumnToCQL, "tablePropertiesToCQL": cqlHelpers.tablePropertiesToCQL, }). Parse(` CREATE TABLE {{ .KeyspaceName }}.{{ .Tm.Name }} ( {{ tableColumnToCQL .Tm }} ) WITH {{ tablePropertiesToCQL .Tm.ClusteringColumns .Tm.Options .Tm.Flags .Tm.Extensions }}; `)) func (km *KeyspaceMetadata) tableToCQL(w io.Writer, kn string, tm *TableMetadata) error { if err := tableCQLTemplate.Execute(w, map[string]interface{}{ "Tm": tm, "KeyspaceName": kn, }); err != nil { return err } return nil } var functionTemplate = template.Must(template.New("functions"). Funcs(map[string]interface{}{ "escape": cqlHelpers.escape, "zip": cqlHelpers.zip, "stripFrozen": cqlHelpers.stripFrozen, }). Parse(` CREATE FUNCTION {{ escape .keyspaceName }}.{{ escape .fm.Name }} ( {{- range $i, $args := zip .fm.ArgumentNames .fm.ArgumentTypes }} {{- if ne $i 0 }}, {{ end }} {{- escape (index $args 0) }} {{ stripFrozen (index $args 1) }} {{- end -}}) {{ if .fm.CalledOnNullInput }}CALLED{{ else }}RETURNS NULL{{ end }} ON NULL INPUT RETURNS {{ .fm.ReturnType }} LANGUAGE {{ .fm.Language }} AS $${{ .fm.Body }}$$; `)) func (km *KeyspaceMetadata) functionToCQL(w io.Writer, keyspaceName string, fm *FunctionMetadata) error { if err := functionTemplate.Execute(w, map[string]interface{}{ "fm": fm, "keyspaceName": keyspaceName, }); err != nil { return err } return nil } var viewTemplate = template.Must(template.New("views"). Funcs(map[string]interface{}{ "zip": cqlHelpers.zip, "partitionKeyString": cqlHelpers.partitionKeyString, "tablePropertiesToCQL": cqlHelpers.tablePropertiesToCQL, }). Parse(` CREATE MATERIALIZED VIEW {{ .vm.KeyspaceName }}.{{ .vm.ViewName }} AS SELECT {{ if .vm.IncludeAllColumns }}*{{ else }} {{- range $i, $col := .vm.OrderedColumns }} {{- if ne $i 0 }}, {{ end }} {{ $col }} {{- end }} {{- end }} FROM {{ .vm.KeyspaceName }}.{{ .vm.BaseTableName }} WHERE {{ .vm.WhereClause }} PRIMARY KEY ({{ partitionKeyString .vm.PartitionKey .vm.ClusteringColumns }}) WITH {{ tablePropertiesToCQL .vm.ClusteringColumns .vm.Options .flags .vm.Extensions }}; `)) func (km *KeyspaceMetadata) viewToCQL(w io.Writer, vm *ViewMetadata) error { if err := viewTemplate.Execute(w, map[string]interface{}{ "vm": vm, "flags": []string{}, }); err != nil { return err } return nil } var aggregatesTemplate = template.Must(template.New("aggregate"). Funcs(map[string]interface{}{ "stripFrozen": cqlHelpers.stripFrozen, }). Parse(` CREATE AGGREGATE {{ .Keyspace }}.{{ .Name }}( {{- range $arg, $i := .ArgumentTypes }} {{- if ne $i 0 }}, {{ end }} {{ stripFrozen $arg }} {{- end -}}) SFUNC {{ .StateFunc.Name }} STYPE {{ stripFrozen .State }} {{- if ne .FinalFunc.Name "" }} FINALFUNC {{ .FinalFunc.Name }} {{- end -}} {{- if ne .InitCond "" }} INITCOND {{ .InitCond }} {{- end -}} ); `)) func (km *KeyspaceMetadata) aggregateToCQL(w io.Writer, am *AggregateMetadata) error { if err := aggregatesTemplate.Execute(w, am); err != nil { return err } return nil } var typeCQLTemplate = template.Must(template.New("types"). Funcs(map[string]interface{}{ "zip": cqlHelpers.zip, }). Parse(` CREATE TYPE {{ .Keyspace }}.{{ .Name }} ( {{- range $i, $fields := zip .FieldNames .FieldTypes }} {{- if ne $i 0 }},{{ end }} {{ index $fields 0 }} {{ index $fields 1 }} {{- end }} ); `)) func (km *KeyspaceMetadata) userTypeToCQL(w io.Writer, tm *TypeMetadata) error { if err := typeCQLTemplate.Execute(w, tm); err != nil { return err } return nil } func (km *KeyspaceMetadata) indexToCQL(w io.Writer, im *IndexMetadata) error { // Scylla doesn't support any custom indexes if im.Kind == IndexKindCustom { return nil } options := im.Options indexTarget := options["target"] // secondary index si := struct { ClusteringKeys []string `json:"ck"` PartitionKeys []string `json:"pk"` }{} if err := json.Unmarshal([]byte(indexTarget), &si); err == nil { indexTarget = fmt.Sprintf("(%s), %s", strings.Join(si.PartitionKeys, ","), strings.Join(si.ClusteringKeys, ","), ) } _, err := fmt.Fprintf(w, "\nCREATE INDEX %s ON %s.%s (%s);\n", im.Name, im.KeyspaceName, im.TableName, indexTarget, ) if err != nil { return err } return nil } var keyspaceCQLTemplate = template.Must(template.New("keyspace"). Funcs(map[string]interface{}{ "escape": cqlHelpers.escape, "fixStrategy": cqlHelpers.fixStrategy, }). Parse(`CREATE KEYSPACE {{ .Name }} WITH replication = { 'class': {{ escape ( fixStrategy .StrategyClass) }} {{- range $key, $value := .StrategyOptions }}, {{ escape $key }}: {{ escape $value }} {{- end }} }{{ if not .DurableWrites }} AND durable_writes = 'false'{{ end }}; `)) func (km *KeyspaceMetadata) keyspaceToCQL(w io.Writer) error { if err := keyspaceCQLTemplate.Execute(w, km); err != nil { return err } return nil } func contains(in []string, v string) bool { for _, e := range in { if e == v { return true } } return false } type toCQLHelpers struct{} var cqlHelpers = toCQLHelpers{} func (h toCQLHelpers) zip(a []string, b []string) [][]string { m := make([][]string, len(a)) for i := range a
return m } func (h toCQLHelpers) escape(e interface{}) string { switch v := e.(type) { case int, float64: return fmt.Sprint(v) case bool: if v { return "true" } return "false" case string: return "'" + strings.ReplaceAll(v, "'", "''") + "'" case []byte: return string(v) } return "" } func (h toCQLHelpers) stripFrozen(v string) string { return strings.TrimSuffix(strings.TrimPrefix(v, "frozen<"), ">") } func (h toCQLHelpers) fixStrategy(v string) string { return strings.TrimPrefix(v, "org.apache.cassandra.locator.") } func (h toCQLHelpers) fixQuote(v string) string { return strings.ReplaceAll(v, `"`, `'`) } func (h toCQLHelpers) tableOptionsToCQL(ops TableMetadataOptions) ([]string, error) { opts := map[string]interface{}{ "bloom_filter_fp_chance": ops.BloomFilterFpChance, "comment": ops.Comment, "crc_check_chance": ops.CrcCheckChance, "dclocal_read_repair_chance": ops.DcLocalReadRepairChance, "default_time_to_live": ops.DefaultTimeToLive, "gc_grace_seconds": ops.GcGraceSeconds, "max_index_interval": ops.MaxIndexInterval, "memtable_flush_period_in_ms": ops.MemtableFlushPeriodInMs, "min_index_interval": ops.MinIndexInterval, "read_repair_chance": ops.ReadRepairChance, "speculative_retry": ops.SpeculativeRetry, } var err error opts["caching"], err = json.Marshal(ops.Caching) if err != nil { return nil, err } opts["compaction"], err = json.Marshal(ops.Compaction) if err != nil { return nil, err } opts["compression"], err = json.Marshal(ops.Compression) if err != nil { return nil, err } cdc, err := json.Marshal(ops.CDC) if err != nil { return nil, err } if string(cdc) != "null" { opts["cdc"] = cdc } if ops.InMemory { opts["in_memory"] = ops.InMemory } out := make([]string, 0, len(opts)) for key, opt := range opts { out = append(out, fmt.Sprintf("%s = %s", key, h.fixQuote(h.escape(opt)))) } sort.Strings(out) return out, nil } func (h toCQLHelpers) tableExtensionsToCQL(extensions map[string]interface{}) ([]string, error) { exts := map[string]interface{}{} if blob, ok := extensions["scylla_encryption_options"]; ok { encOpts := &scyllaEncryptionOptions{} if err := encOpts.UnmarshalBinary(blob.([]byte)); err != nil { return nil, err } var err error exts["scylla_encryption_options"], err = json.Marshal(encOpts) if err != nil { return nil, err } } out := make([]string, 0, len(exts)) for key, ext := range exts { out = append(out, fmt.Sprintf("%s = %s", key, h.fixQuote(h.escape(ext)))) } sort.Strings(out) return out, nil } func (h toCQLHelpers) tablePropertiesToCQL(cks []*ColumnMetadata, opts TableMetadataOptions, flags []string, extensions map[string]interface{}) (string, error) { var sb strings.Builder var properties []string compactStorage := len(flags) > 0 && (contains(flags, TableFlagDense) || contains(flags, TableFlagSuper) || !contains(flags, TableFlagCompound)) if compactStorage { properties = append(properties, "COMPACT STORAGE") } if len(cks) > 0 { var inner []string for _, col := range cks { inner = append(inner, fmt.Sprintf("%s %s", col.Name, col.ClusteringOrder)) } properties = append(properties, fmt.Sprintf("CLUSTERING ORDER BY (%s)", strings.Join(inner, ", "))) } options, err := h.tableOptionsToCQL(opts) if err != nil { return "", err } properties = append(properties, options...) exts, err := h.tableExtensionsToCQL(extensions) if err != nil { return "", err } properties = append(properties, exts...) sb.WriteString(strings.Join(properties, "\n AND ")) return sb.String(), nil } func (h toCQLHelpers) tableColumnToCQL(tm *TableMetadata) string { var sb strings.Builder var columns []string for _, cn := range tm.OrderedColumns { cm := tm.Columns[cn] column := fmt.Sprintf("%s %s", cn, cm.Type) if cm.Kind == ColumnStatic { column += " static" } columns = append(columns, column) } if len(tm.PartitionKey) == 1 && len(tm.ClusteringColumns) == 0 && len(columns) > 0 { columns[0] += " PRIMARY KEY" } sb.WriteString(strings.Join(columns, ",\n ")) if len(tm.PartitionKey) > 1 || len(tm.ClusteringColumns) > 0 { sb.WriteString(",\n PRIMARY KEY (") sb.WriteString(h.partitionKeyString(tm.PartitionKey, tm.ClusteringColumns)) sb.WriteRune(')') } return sb.String() } func (h toCQLHelpers) partitionKeyString(pks, cks []*ColumnMetadata) string { var sb strings.Builder if len(pks) > 1 { sb.WriteRune('(') for i, pk := range pks { if i != 0 { sb.WriteString(", ") } sb.WriteString(pk.Name) } sb.WriteRune(')') } else { sb.WriteString(pks[0].Name) } if len(cks) > 0 { sb.WriteString(", ") for i, ck := range cks { if i != 0 { sb.WriteString(", ") } sb.WriteString(ck.Name) } } return sb.String() } type scyllaEncryptionOptions struct { CipherAlgorithm string `json:"cipher_algorithm"` SecretKeyStrength int `json:"secret_key_strength"` KeyProvider string `json:"key_provider"` SecretKeyFile string `json:"secret_key_file"` } // UnmarshalBinary deserializes blob into scyllaEncryptionOptions. // Format: // * 4 bytes - size of KV map // Size times: // * 4 bytes - length of key // * len_of_key bytes - key // * 4 bytes - length of value // * len_of_value bytes - value func (enc *scyllaEncryptionOptions) UnmarshalBinary(data []byte) error { size := binary.LittleEndian.Uint32(data[0:4]) m := make(map[string]string, size) off := uint32(4) for i := uint32(0); i < size; i++ { keyLen := binary.LittleEndian.Uint32(data[off : off+4]) off += 4 key := string(data[off : off+keyLen]) off += keyLen valueLen := binary.LittleEndian.Uint32(data[off : off+4]) off += 4 value := string(data[off : off+valueLen]) off += valueLen m[key] = value } enc.CipherAlgorithm = m["cipher_algorithm"] enc.KeyProvider = m["key_provider"] enc.SecretKeyFile = m["secret_key_file"] if secretKeyStrength, ok := m["secret_key_strength"]; ok { sks, err := strconv.Atoi(secretKeyStrength) if err != nil { return err } enc.SecretKeyStrength = sks } return nil }
{ m[i] = []string{a[i], b[i]} }
conditional_block
recreate.go
// +build !cassandra // Copyright (C) 2017 ScyllaDB package gocql import ( "encoding/binary" "encoding/json" "fmt" "io" "sort" "strconv" "strings" "text/template" ) // ToCQL returns a CQL query that ca be used to recreate keyspace with all // user defined types, tables, indexes, functions, aggregates and views associated // with this keyspace. func (km *KeyspaceMetadata) ToCQL() (string, error) { var sb strings.Builder if err := km.keyspaceToCQL(&sb); err != nil { return "", err } sortedTypes := km.typesSortedTopologically() for _, tm := range sortedTypes { if err := km.userTypeToCQL(&sb, tm); err != nil { return "", err } } for _, tm := range km.Tables { if err := km.tableToCQL(&sb, km.Name, tm); err != nil { return "", err } } for _, im := range km.Indexes { if err := km.indexToCQL(&sb, im); err != nil { return "", err } } for _, fm := range km.Functions { if err := km.functionToCQL(&sb, km.Name, fm); err != nil { return "", err } } for _, am := range km.Aggregates { if err := km.aggregateToCQL(&sb, am); err != nil { return "", err } } for _, vm := range km.Views { if err := km.viewToCQL(&sb, vm); err != nil { return "", err } } return sb.String(), nil } func (km *KeyspaceMetadata) typesSortedTopologically() []*TypeMetadata { sortedTypes := make([]*TypeMetadata, 0, len(km.Types)) for _, tm := range km.Types { sortedTypes = append(sortedTypes, tm) } sort.Slice(sortedTypes, func(i, j int) bool { for _, ft := range sortedTypes[j].FieldTypes { if strings.Contains(ft, sortedTypes[i].Name) { return true } } return false }) return sortedTypes } var tableCQLTemplate = template.Must(template.New("table"). Funcs(map[string]interface{}{ "escape": cqlHelpers.escape, "tableColumnToCQL": cqlHelpers.tableColumnToCQL, "tablePropertiesToCQL": cqlHelpers.tablePropertiesToCQL, }). Parse(` CREATE TABLE {{ .KeyspaceName }}.{{ .Tm.Name }} ( {{ tableColumnToCQL .Tm }} ) WITH {{ tablePropertiesToCQL .Tm.ClusteringColumns .Tm.Options .Tm.Flags .Tm.Extensions }}; `)) func (km *KeyspaceMetadata) tableToCQL(w io.Writer, kn string, tm *TableMetadata) error { if err := tableCQLTemplate.Execute(w, map[string]interface{}{ "Tm": tm, "KeyspaceName": kn, }); err != nil { return err } return nil } var functionTemplate = template.Must(template.New("functions"). Funcs(map[string]interface{}{ "escape": cqlHelpers.escape, "zip": cqlHelpers.zip, "stripFrozen": cqlHelpers.stripFrozen, }). Parse(` CREATE FUNCTION {{ escape .keyspaceName }}.{{ escape .fm.Name }} ( {{- range $i, $args := zip .fm.ArgumentNames .fm.ArgumentTypes }} {{- if ne $i 0 }}, {{ end }} {{- escape (index $args 0) }} {{ stripFrozen (index $args 1) }} {{- end -}}) {{ if .fm.CalledOnNullInput }}CALLED{{ else }}RETURNS NULL{{ end }} ON NULL INPUT RETURNS {{ .fm.ReturnType }} LANGUAGE {{ .fm.Language }} AS $${{ .fm.Body }}$$; `)) func (km *KeyspaceMetadata) functionToCQL(w io.Writer, keyspaceName string, fm *FunctionMetadata) error { if err := functionTemplate.Execute(w, map[string]interface{}{ "fm": fm, "keyspaceName": keyspaceName, }); err != nil { return err } return nil } var viewTemplate = template.Must(template.New("views"). Funcs(map[string]interface{}{ "zip": cqlHelpers.zip, "partitionKeyString": cqlHelpers.partitionKeyString, "tablePropertiesToCQL": cqlHelpers.tablePropertiesToCQL, }). Parse(` CREATE MATERIALIZED VIEW {{ .vm.KeyspaceName }}.{{ .vm.ViewName }} AS SELECT {{ if .vm.IncludeAllColumns }}*{{ else }} {{- range $i, $col := .vm.OrderedColumns }} {{- if ne $i 0 }}, {{ end }} {{ $col }} {{- end }} {{- end }} FROM {{ .vm.KeyspaceName }}.{{ .vm.BaseTableName }} WHERE {{ .vm.WhereClause }} PRIMARY KEY ({{ partitionKeyString .vm.PartitionKey .vm.ClusteringColumns }}) WITH {{ tablePropertiesToCQL .vm.ClusteringColumns .vm.Options .flags .vm.Extensions }}; `)) func (km *KeyspaceMetadata) viewToCQL(w io.Writer, vm *ViewMetadata) error { if err := viewTemplate.Execute(w, map[string]interface{}{ "vm": vm, "flags": []string{}, }); err != nil { return err } return nil } var aggregatesTemplate = template.Must(template.New("aggregate"). Funcs(map[string]interface{}{ "stripFrozen": cqlHelpers.stripFrozen, }). Parse(` CREATE AGGREGATE {{ .Keyspace }}.{{ .Name }}( {{- range $arg, $i := .ArgumentTypes }} {{- if ne $i 0 }}, {{ end }} {{ stripFrozen $arg }} {{- end -}}) SFUNC {{ .StateFunc.Name }} STYPE {{ stripFrozen .State }} {{- if ne .FinalFunc.Name "" }} FINALFUNC {{ .FinalFunc.Name }} {{- end -}} {{- if ne .InitCond "" }} INITCOND {{ .InitCond }} {{- end -}} ); `)) func (km *KeyspaceMetadata) aggregateToCQL(w io.Writer, am *AggregateMetadata) error { if err := aggregatesTemplate.Execute(w, am); err != nil { return err } return nil } var typeCQLTemplate = template.Must(template.New("types"). Funcs(map[string]interface{}{ "zip": cqlHelpers.zip, }). Parse(` CREATE TYPE {{ .Keyspace }}.{{ .Name }} ( {{- range $i, $fields := zip .FieldNames .FieldTypes }} {{- if ne $i 0 }},{{ end }} {{ index $fields 0 }} {{ index $fields 1 }} {{- end }} ); `)) func (km *KeyspaceMetadata) userTypeToCQL(w io.Writer, tm *TypeMetadata) error { if err := typeCQLTemplate.Execute(w, tm); err != nil { return err } return nil } func (km *KeyspaceMetadata) indexToCQL(w io.Writer, im *IndexMetadata) error { // Scylla doesn't support any custom indexes if im.Kind == IndexKindCustom { return nil } options := im.Options indexTarget := options["target"] // secondary index si := struct { ClusteringKeys []string `json:"ck"` PartitionKeys []string `json:"pk"` }{} if err := json.Unmarshal([]byte(indexTarget), &si); err == nil { indexTarget = fmt.Sprintf("(%s), %s", strings.Join(si.PartitionKeys, ","), strings.Join(si.ClusteringKeys, ","), ) } _, err := fmt.Fprintf(w, "\nCREATE INDEX %s ON %s.%s (%s);\n", im.Name, im.KeyspaceName, im.TableName, indexTarget, ) if err != nil { return err } return nil } var keyspaceCQLTemplate = template.Must(template.New("keyspace"). Funcs(map[string]interface{}{ "escape": cqlHelpers.escape, "fixStrategy": cqlHelpers.fixStrategy, }). Parse(`CREATE KEYSPACE {{ .Name }} WITH replication = { 'class': {{ escape ( fixStrategy .StrategyClass) }} {{- range $key, $value := .StrategyOptions }}, {{ escape $key }}: {{ escape $value }} {{- end }} }{{ if not .DurableWrites }} AND durable_writes = 'false'{{ end }}; `)) func (km *KeyspaceMetadata) keyspaceToCQL(w io.Writer) error { if err := keyspaceCQLTemplate.Execute(w, km); err != nil { return err } return nil } func contains(in []string, v string) bool { for _, e := range in { if e == v { return true } } return false } type toCQLHelpers struct{} var cqlHelpers = toCQLHelpers{} func (h toCQLHelpers) zip(a []string, b []string) [][]string { m := make([][]string, len(a)) for i := range a { m[i] = []string{a[i], b[i]} } return m } func (h toCQLHelpers) escape(e interface{}) string { switch v := e.(type) { case int, float64: return fmt.Sprint(v) case bool: if v { return "true" } return "false" case string: return "'" + strings.ReplaceAll(v, "'", "''") + "'" case []byte: return string(v) } return "" } func (h toCQLHelpers) stripFrozen(v string) string { return strings.TrimSuffix(strings.TrimPrefix(v, "frozen<"), ">") } func (h toCQLHelpers) fixStrategy(v string) string { return strings.TrimPrefix(v, "org.apache.cassandra.locator.") } func (h toCQLHelpers) fixQuote(v string) string { return strings.ReplaceAll(v, `"`, `'`) } func (h toCQLHelpers) tableOptionsToCQL(ops TableMetadataOptions) ([]string, error)
func (h toCQLHelpers) tableExtensionsToCQL(extensions map[string]interface{}) ([]string, error) { exts := map[string]interface{}{} if blob, ok := extensions["scylla_encryption_options"]; ok { encOpts := &scyllaEncryptionOptions{} if err := encOpts.UnmarshalBinary(blob.([]byte)); err != nil { return nil, err } var err error exts["scylla_encryption_options"], err = json.Marshal(encOpts) if err != nil { return nil, err } } out := make([]string, 0, len(exts)) for key, ext := range exts { out = append(out, fmt.Sprintf("%s = %s", key, h.fixQuote(h.escape(ext)))) } sort.Strings(out) return out, nil } func (h toCQLHelpers) tablePropertiesToCQL(cks []*ColumnMetadata, opts TableMetadataOptions, flags []string, extensions map[string]interface{}) (string, error) { var sb strings.Builder var properties []string compactStorage := len(flags) > 0 && (contains(flags, TableFlagDense) || contains(flags, TableFlagSuper) || !contains(flags, TableFlagCompound)) if compactStorage { properties = append(properties, "COMPACT STORAGE") } if len(cks) > 0 { var inner []string for _, col := range cks { inner = append(inner, fmt.Sprintf("%s %s", col.Name, col.ClusteringOrder)) } properties = append(properties, fmt.Sprintf("CLUSTERING ORDER BY (%s)", strings.Join(inner, ", "))) } options, err := h.tableOptionsToCQL(opts) if err != nil { return "", err } properties = append(properties, options...) exts, err := h.tableExtensionsToCQL(extensions) if err != nil { return "", err } properties = append(properties, exts...) sb.WriteString(strings.Join(properties, "\n AND ")) return sb.String(), nil } func (h toCQLHelpers) tableColumnToCQL(tm *TableMetadata) string { var sb strings.Builder var columns []string for _, cn := range tm.OrderedColumns { cm := tm.Columns[cn] column := fmt.Sprintf("%s %s", cn, cm.Type) if cm.Kind == ColumnStatic { column += " static" } columns = append(columns, column) } if len(tm.PartitionKey) == 1 && len(tm.ClusteringColumns) == 0 && len(columns) > 0 { columns[0] += " PRIMARY KEY" } sb.WriteString(strings.Join(columns, ",\n ")) if len(tm.PartitionKey) > 1 || len(tm.ClusteringColumns) > 0 { sb.WriteString(",\n PRIMARY KEY (") sb.WriteString(h.partitionKeyString(tm.PartitionKey, tm.ClusteringColumns)) sb.WriteRune(')') } return sb.String() } func (h toCQLHelpers) partitionKeyString(pks, cks []*ColumnMetadata) string { var sb strings.Builder if len(pks) > 1 { sb.WriteRune('(') for i, pk := range pks { if i != 0 { sb.WriteString(", ") } sb.WriteString(pk.Name) } sb.WriteRune(')') } else { sb.WriteString(pks[0].Name) } if len(cks) > 0 { sb.WriteString(", ") for i, ck := range cks { if i != 0 { sb.WriteString(", ") } sb.WriteString(ck.Name) } } return sb.String() } type scyllaEncryptionOptions struct { CipherAlgorithm string `json:"cipher_algorithm"` SecretKeyStrength int `json:"secret_key_strength"` KeyProvider string `json:"key_provider"` SecretKeyFile string `json:"secret_key_file"` } // UnmarshalBinary deserializes blob into scyllaEncryptionOptions. // Format: // * 4 bytes - size of KV map // Size times: // * 4 bytes - length of key // * len_of_key bytes - key // * 4 bytes - length of value // * len_of_value bytes - value func (enc *scyllaEncryptionOptions) UnmarshalBinary(data []byte) error { size := binary.LittleEndian.Uint32(data[0:4]) m := make(map[string]string, size) off := uint32(4) for i := uint32(0); i < size; i++ { keyLen := binary.LittleEndian.Uint32(data[off : off+4]) off += 4 key := string(data[off : off+keyLen]) off += keyLen valueLen := binary.LittleEndian.Uint32(data[off : off+4]) off += 4 value := string(data[off : off+valueLen]) off += valueLen m[key] = value } enc.CipherAlgorithm = m["cipher_algorithm"] enc.KeyProvider = m["key_provider"] enc.SecretKeyFile = m["secret_key_file"] if secretKeyStrength, ok := m["secret_key_strength"]; ok { sks, err := strconv.Atoi(secretKeyStrength) if err != nil { return err } enc.SecretKeyStrength = sks } return nil }
{ opts := map[string]interface{}{ "bloom_filter_fp_chance": ops.BloomFilterFpChance, "comment": ops.Comment, "crc_check_chance": ops.CrcCheckChance, "dclocal_read_repair_chance": ops.DcLocalReadRepairChance, "default_time_to_live": ops.DefaultTimeToLive, "gc_grace_seconds": ops.GcGraceSeconds, "max_index_interval": ops.MaxIndexInterval, "memtable_flush_period_in_ms": ops.MemtableFlushPeriodInMs, "min_index_interval": ops.MinIndexInterval, "read_repair_chance": ops.ReadRepairChance, "speculative_retry": ops.SpeculativeRetry, } var err error opts["caching"], err = json.Marshal(ops.Caching) if err != nil { return nil, err } opts["compaction"], err = json.Marshal(ops.Compaction) if err != nil { return nil, err } opts["compression"], err = json.Marshal(ops.Compression) if err != nil { return nil, err } cdc, err := json.Marshal(ops.CDC) if err != nil { return nil, err } if string(cdc) != "null" { opts["cdc"] = cdc } if ops.InMemory { opts["in_memory"] = ops.InMemory } out := make([]string, 0, len(opts)) for key, opt := range opts { out = append(out, fmt.Sprintf("%s = %s", key, h.fixQuote(h.escape(opt)))) } sort.Strings(out) return out, nil }
identifier_body
recreate.go
// +build !cassandra // Copyright (C) 2017 ScyllaDB package gocql import ( "encoding/binary" "encoding/json" "fmt" "io" "sort" "strconv" "strings" "text/template" ) // ToCQL returns a CQL query that ca be used to recreate keyspace with all // user defined types, tables, indexes, functions, aggregates and views associated // with this keyspace. func (km *KeyspaceMetadata) ToCQL() (string, error) { var sb strings.Builder if err := km.keyspaceToCQL(&sb); err != nil { return "", err } sortedTypes := km.typesSortedTopologically() for _, tm := range sortedTypes { if err := km.userTypeToCQL(&sb, tm); err != nil { return "", err } } for _, tm := range km.Tables { if err := km.tableToCQL(&sb, km.Name, tm); err != nil { return "", err } } for _, im := range km.Indexes { if err := km.indexToCQL(&sb, im); err != nil { return "", err } } for _, fm := range km.Functions { if err := km.functionToCQL(&sb, km.Name, fm); err != nil { return "", err } } for _, am := range km.Aggregates { if err := km.aggregateToCQL(&sb, am); err != nil { return "", err } } for _, vm := range km.Views { if err := km.viewToCQL(&sb, vm); err != nil { return "", err } } return sb.String(), nil } func (km *KeyspaceMetadata) typesSortedTopologically() []*TypeMetadata { sortedTypes := make([]*TypeMetadata, 0, len(km.Types)) for _, tm := range km.Types { sortedTypes = append(sortedTypes, tm) } sort.Slice(sortedTypes, func(i, j int) bool { for _, ft := range sortedTypes[j].FieldTypes { if strings.Contains(ft, sortedTypes[i].Name) { return true } } return false }) return sortedTypes } var tableCQLTemplate = template.Must(template.New("table"). Funcs(map[string]interface{}{ "escape": cqlHelpers.escape, "tableColumnToCQL": cqlHelpers.tableColumnToCQL, "tablePropertiesToCQL": cqlHelpers.tablePropertiesToCQL, }). Parse(` CREATE TABLE {{ .KeyspaceName }}.{{ .Tm.Name }} ( {{ tableColumnToCQL .Tm }} ) WITH {{ tablePropertiesToCQL .Tm.ClusteringColumns .Tm.Options .Tm.Flags .Tm.Extensions }}; `)) func (km *KeyspaceMetadata) tableToCQL(w io.Writer, kn string, tm *TableMetadata) error { if err := tableCQLTemplate.Execute(w, map[string]interface{}{ "Tm": tm, "KeyspaceName": kn, }); err != nil { return err } return nil } var functionTemplate = template.Must(template.New("functions"). Funcs(map[string]interface{}{ "escape": cqlHelpers.escape, "zip": cqlHelpers.zip, "stripFrozen": cqlHelpers.stripFrozen, }). Parse(` CREATE FUNCTION {{ escape .keyspaceName }}.{{ escape .fm.Name }} ( {{- range $i, $args := zip .fm.ArgumentNames .fm.ArgumentTypes }} {{- if ne $i 0 }}, {{ end }} {{- escape (index $args 0) }} {{ stripFrozen (index $args 1) }} {{- end -}}) {{ if .fm.CalledOnNullInput }}CALLED{{ else }}RETURNS NULL{{ end }} ON NULL INPUT RETURNS {{ .fm.ReturnType }} LANGUAGE {{ .fm.Language }} AS $${{ .fm.Body }}$$; `)) func (km *KeyspaceMetadata) functionToCQL(w io.Writer, keyspaceName string, fm *FunctionMetadata) error { if err := functionTemplate.Execute(w, map[string]interface{}{ "fm": fm,
"keyspaceName": keyspaceName, }); err != nil { return err } return nil } var viewTemplate = template.Must(template.New("views"). Funcs(map[string]interface{}{ "zip": cqlHelpers.zip, "partitionKeyString": cqlHelpers.partitionKeyString, "tablePropertiesToCQL": cqlHelpers.tablePropertiesToCQL, }). Parse(` CREATE MATERIALIZED VIEW {{ .vm.KeyspaceName }}.{{ .vm.ViewName }} AS SELECT {{ if .vm.IncludeAllColumns }}*{{ else }} {{- range $i, $col := .vm.OrderedColumns }} {{- if ne $i 0 }}, {{ end }} {{ $col }} {{- end }} {{- end }} FROM {{ .vm.KeyspaceName }}.{{ .vm.BaseTableName }} WHERE {{ .vm.WhereClause }} PRIMARY KEY ({{ partitionKeyString .vm.PartitionKey .vm.ClusteringColumns }}) WITH {{ tablePropertiesToCQL .vm.ClusteringColumns .vm.Options .flags .vm.Extensions }}; `)) func (km *KeyspaceMetadata) viewToCQL(w io.Writer, vm *ViewMetadata) error { if err := viewTemplate.Execute(w, map[string]interface{}{ "vm": vm, "flags": []string{}, }); err != nil { return err } return nil } var aggregatesTemplate = template.Must(template.New("aggregate"). Funcs(map[string]interface{}{ "stripFrozen": cqlHelpers.stripFrozen, }). Parse(` CREATE AGGREGATE {{ .Keyspace }}.{{ .Name }}( {{- range $arg, $i := .ArgumentTypes }} {{- if ne $i 0 }}, {{ end }} {{ stripFrozen $arg }} {{- end -}}) SFUNC {{ .StateFunc.Name }} STYPE {{ stripFrozen .State }} {{- if ne .FinalFunc.Name "" }} FINALFUNC {{ .FinalFunc.Name }} {{- end -}} {{- if ne .InitCond "" }} INITCOND {{ .InitCond }} {{- end -}} ); `)) func (km *KeyspaceMetadata) aggregateToCQL(w io.Writer, am *AggregateMetadata) error { if err := aggregatesTemplate.Execute(w, am); err != nil { return err } return nil } var typeCQLTemplate = template.Must(template.New("types"). Funcs(map[string]interface{}{ "zip": cqlHelpers.zip, }). Parse(` CREATE TYPE {{ .Keyspace }}.{{ .Name }} ( {{- range $i, $fields := zip .FieldNames .FieldTypes }} {{- if ne $i 0 }},{{ end }} {{ index $fields 0 }} {{ index $fields 1 }} {{- end }} ); `)) func (km *KeyspaceMetadata) userTypeToCQL(w io.Writer, tm *TypeMetadata) error { if err := typeCQLTemplate.Execute(w, tm); err != nil { return err } return nil } func (km *KeyspaceMetadata) indexToCQL(w io.Writer, im *IndexMetadata) error { // Scylla doesn't support any custom indexes if im.Kind == IndexKindCustom { return nil } options := im.Options indexTarget := options["target"] // secondary index si := struct { ClusteringKeys []string `json:"ck"` PartitionKeys []string `json:"pk"` }{} if err := json.Unmarshal([]byte(indexTarget), &si); err == nil { indexTarget = fmt.Sprintf("(%s), %s", strings.Join(si.PartitionKeys, ","), strings.Join(si.ClusteringKeys, ","), ) } _, err := fmt.Fprintf(w, "\nCREATE INDEX %s ON %s.%s (%s);\n", im.Name, im.KeyspaceName, im.TableName, indexTarget, ) if err != nil { return err } return nil } var keyspaceCQLTemplate = template.Must(template.New("keyspace"). Funcs(map[string]interface{}{ "escape": cqlHelpers.escape, "fixStrategy": cqlHelpers.fixStrategy, }). Parse(`CREATE KEYSPACE {{ .Name }} WITH replication = { 'class': {{ escape ( fixStrategy .StrategyClass) }} {{- range $key, $value := .StrategyOptions }}, {{ escape $key }}: {{ escape $value }} {{- end }} }{{ if not .DurableWrites }} AND durable_writes = 'false'{{ end }}; `)) func (km *KeyspaceMetadata) keyspaceToCQL(w io.Writer) error { if err := keyspaceCQLTemplate.Execute(w, km); err != nil { return err } return nil } func contains(in []string, v string) bool { for _, e := range in { if e == v { return true } } return false } type toCQLHelpers struct{} var cqlHelpers = toCQLHelpers{} func (h toCQLHelpers) zip(a []string, b []string) [][]string { m := make([][]string, len(a)) for i := range a { m[i] = []string{a[i], b[i]} } return m } func (h toCQLHelpers) escape(e interface{}) string { switch v := e.(type) { case int, float64: return fmt.Sprint(v) case bool: if v { return "true" } return "false" case string: return "'" + strings.ReplaceAll(v, "'", "''") + "'" case []byte: return string(v) } return "" } func (h toCQLHelpers) stripFrozen(v string) string { return strings.TrimSuffix(strings.TrimPrefix(v, "frozen<"), ">") } func (h toCQLHelpers) fixStrategy(v string) string { return strings.TrimPrefix(v, "org.apache.cassandra.locator.") } func (h toCQLHelpers) fixQuote(v string) string { return strings.ReplaceAll(v, `"`, `'`) } func (h toCQLHelpers) tableOptionsToCQL(ops TableMetadataOptions) ([]string, error) { opts := map[string]interface{}{ "bloom_filter_fp_chance": ops.BloomFilterFpChance, "comment": ops.Comment, "crc_check_chance": ops.CrcCheckChance, "dclocal_read_repair_chance": ops.DcLocalReadRepairChance, "default_time_to_live": ops.DefaultTimeToLive, "gc_grace_seconds": ops.GcGraceSeconds, "max_index_interval": ops.MaxIndexInterval, "memtable_flush_period_in_ms": ops.MemtableFlushPeriodInMs, "min_index_interval": ops.MinIndexInterval, "read_repair_chance": ops.ReadRepairChance, "speculative_retry": ops.SpeculativeRetry, } var err error opts["caching"], err = json.Marshal(ops.Caching) if err != nil { return nil, err } opts["compaction"], err = json.Marshal(ops.Compaction) if err != nil { return nil, err } opts["compression"], err = json.Marshal(ops.Compression) if err != nil { return nil, err } cdc, err := json.Marshal(ops.CDC) if err != nil { return nil, err } if string(cdc) != "null" { opts["cdc"] = cdc } if ops.InMemory { opts["in_memory"] = ops.InMemory } out := make([]string, 0, len(opts)) for key, opt := range opts { out = append(out, fmt.Sprintf("%s = %s", key, h.fixQuote(h.escape(opt)))) } sort.Strings(out) return out, nil } func (h toCQLHelpers) tableExtensionsToCQL(extensions map[string]interface{}) ([]string, error) { exts := map[string]interface{}{} if blob, ok := extensions["scylla_encryption_options"]; ok { encOpts := &scyllaEncryptionOptions{} if err := encOpts.UnmarshalBinary(blob.([]byte)); err != nil { return nil, err } var err error exts["scylla_encryption_options"], err = json.Marshal(encOpts) if err != nil { return nil, err } } out := make([]string, 0, len(exts)) for key, ext := range exts { out = append(out, fmt.Sprintf("%s = %s", key, h.fixQuote(h.escape(ext)))) } sort.Strings(out) return out, nil } func (h toCQLHelpers) tablePropertiesToCQL(cks []*ColumnMetadata, opts TableMetadataOptions, flags []string, extensions map[string]interface{}) (string, error) { var sb strings.Builder var properties []string compactStorage := len(flags) > 0 && (contains(flags, TableFlagDense) || contains(flags, TableFlagSuper) || !contains(flags, TableFlagCompound)) if compactStorage { properties = append(properties, "COMPACT STORAGE") } if len(cks) > 0 { var inner []string for _, col := range cks { inner = append(inner, fmt.Sprintf("%s %s", col.Name, col.ClusteringOrder)) } properties = append(properties, fmt.Sprintf("CLUSTERING ORDER BY (%s)", strings.Join(inner, ", "))) } options, err := h.tableOptionsToCQL(opts) if err != nil { return "", err } properties = append(properties, options...) exts, err := h.tableExtensionsToCQL(extensions) if err != nil { return "", err } properties = append(properties, exts...) sb.WriteString(strings.Join(properties, "\n AND ")) return sb.String(), nil } func (h toCQLHelpers) tableColumnToCQL(tm *TableMetadata) string { var sb strings.Builder var columns []string for _, cn := range tm.OrderedColumns { cm := tm.Columns[cn] column := fmt.Sprintf("%s %s", cn, cm.Type) if cm.Kind == ColumnStatic { column += " static" } columns = append(columns, column) } if len(tm.PartitionKey) == 1 && len(tm.ClusteringColumns) == 0 && len(columns) > 0 { columns[0] += " PRIMARY KEY" } sb.WriteString(strings.Join(columns, ",\n ")) if len(tm.PartitionKey) > 1 || len(tm.ClusteringColumns) > 0 { sb.WriteString(",\n PRIMARY KEY (") sb.WriteString(h.partitionKeyString(tm.PartitionKey, tm.ClusteringColumns)) sb.WriteRune(')') } return sb.String() } func (h toCQLHelpers) partitionKeyString(pks, cks []*ColumnMetadata) string { var sb strings.Builder if len(pks) > 1 { sb.WriteRune('(') for i, pk := range pks { if i != 0 { sb.WriteString(", ") } sb.WriteString(pk.Name) } sb.WriteRune(')') } else { sb.WriteString(pks[0].Name) } if len(cks) > 0 { sb.WriteString(", ") for i, ck := range cks { if i != 0 { sb.WriteString(", ") } sb.WriteString(ck.Name) } } return sb.String() } type scyllaEncryptionOptions struct { CipherAlgorithm string `json:"cipher_algorithm"` SecretKeyStrength int `json:"secret_key_strength"` KeyProvider string `json:"key_provider"` SecretKeyFile string `json:"secret_key_file"` } // UnmarshalBinary deserializes blob into scyllaEncryptionOptions. // Format: // * 4 bytes - size of KV map // Size times: // * 4 bytes - length of key // * len_of_key bytes - key // * 4 bytes - length of value // * len_of_value bytes - value func (enc *scyllaEncryptionOptions) UnmarshalBinary(data []byte) error { size := binary.LittleEndian.Uint32(data[0:4]) m := make(map[string]string, size) off := uint32(4) for i := uint32(0); i < size; i++ { keyLen := binary.LittleEndian.Uint32(data[off : off+4]) off += 4 key := string(data[off : off+keyLen]) off += keyLen valueLen := binary.LittleEndian.Uint32(data[off : off+4]) off += 4 value := string(data[off : off+valueLen]) off += valueLen m[key] = value } enc.CipherAlgorithm = m["cipher_algorithm"] enc.KeyProvider = m["key_provider"] enc.SecretKeyFile = m["secret_key_file"] if secretKeyStrength, ok := m["secret_key_strength"]; ok { sks, err := strconv.Atoi(secretKeyStrength) if err != nil { return err } enc.SecretKeyStrength = sks } return nil }
random_line_split
recreate.go
// +build !cassandra // Copyright (C) 2017 ScyllaDB package gocql import ( "encoding/binary" "encoding/json" "fmt" "io" "sort" "strconv" "strings" "text/template" ) // ToCQL returns a CQL query that ca be used to recreate keyspace with all // user defined types, tables, indexes, functions, aggregates and views associated // with this keyspace. func (km *KeyspaceMetadata) ToCQL() (string, error) { var sb strings.Builder if err := km.keyspaceToCQL(&sb); err != nil { return "", err } sortedTypes := km.typesSortedTopologically() for _, tm := range sortedTypes { if err := km.userTypeToCQL(&sb, tm); err != nil { return "", err } } for _, tm := range km.Tables { if err := km.tableToCQL(&sb, km.Name, tm); err != nil { return "", err } } for _, im := range km.Indexes { if err := km.indexToCQL(&sb, im); err != nil { return "", err } } for _, fm := range km.Functions { if err := km.functionToCQL(&sb, km.Name, fm); err != nil { return "", err } } for _, am := range km.Aggregates { if err := km.aggregateToCQL(&sb, am); err != nil { return "", err } } for _, vm := range km.Views { if err := km.viewToCQL(&sb, vm); err != nil { return "", err } } return sb.String(), nil } func (km *KeyspaceMetadata) typesSortedTopologically() []*TypeMetadata { sortedTypes := make([]*TypeMetadata, 0, len(km.Types)) for _, tm := range km.Types { sortedTypes = append(sortedTypes, tm) } sort.Slice(sortedTypes, func(i, j int) bool { for _, ft := range sortedTypes[j].FieldTypes { if strings.Contains(ft, sortedTypes[i].Name) { return true } } return false }) return sortedTypes } var tableCQLTemplate = template.Must(template.New("table"). Funcs(map[string]interface{}{ "escape": cqlHelpers.escape, "tableColumnToCQL": cqlHelpers.tableColumnToCQL, "tablePropertiesToCQL": cqlHelpers.tablePropertiesToCQL, }). Parse(` CREATE TABLE {{ .KeyspaceName }}.{{ .Tm.Name }} ( {{ tableColumnToCQL .Tm }} ) WITH {{ tablePropertiesToCQL .Tm.ClusteringColumns .Tm.Options .Tm.Flags .Tm.Extensions }}; `)) func (km *KeyspaceMetadata) tableToCQL(w io.Writer, kn string, tm *TableMetadata) error { if err := tableCQLTemplate.Execute(w, map[string]interface{}{ "Tm": tm, "KeyspaceName": kn, }); err != nil { return err } return nil } var functionTemplate = template.Must(template.New("functions"). Funcs(map[string]interface{}{ "escape": cqlHelpers.escape, "zip": cqlHelpers.zip, "stripFrozen": cqlHelpers.stripFrozen, }). Parse(` CREATE FUNCTION {{ escape .keyspaceName }}.{{ escape .fm.Name }} ( {{- range $i, $args := zip .fm.ArgumentNames .fm.ArgumentTypes }} {{- if ne $i 0 }}, {{ end }} {{- escape (index $args 0) }} {{ stripFrozen (index $args 1) }} {{- end -}}) {{ if .fm.CalledOnNullInput }}CALLED{{ else }}RETURNS NULL{{ end }} ON NULL INPUT RETURNS {{ .fm.ReturnType }} LANGUAGE {{ .fm.Language }} AS $${{ .fm.Body }}$$; `)) func (km *KeyspaceMetadata) functionToCQL(w io.Writer, keyspaceName string, fm *FunctionMetadata) error { if err := functionTemplate.Execute(w, map[string]interface{}{ "fm": fm, "keyspaceName": keyspaceName, }); err != nil { return err } return nil } var viewTemplate = template.Must(template.New("views"). Funcs(map[string]interface{}{ "zip": cqlHelpers.zip, "partitionKeyString": cqlHelpers.partitionKeyString, "tablePropertiesToCQL": cqlHelpers.tablePropertiesToCQL, }). Parse(` CREATE MATERIALIZED VIEW {{ .vm.KeyspaceName }}.{{ .vm.ViewName }} AS SELECT {{ if .vm.IncludeAllColumns }}*{{ else }} {{- range $i, $col := .vm.OrderedColumns }} {{- if ne $i 0 }}, {{ end }} {{ $col }} {{- end }} {{- end }} FROM {{ .vm.KeyspaceName }}.{{ .vm.BaseTableName }} WHERE {{ .vm.WhereClause }} PRIMARY KEY ({{ partitionKeyString .vm.PartitionKey .vm.ClusteringColumns }}) WITH {{ tablePropertiesToCQL .vm.ClusteringColumns .vm.Options .flags .vm.Extensions }}; `)) func (km *KeyspaceMetadata)
(w io.Writer, vm *ViewMetadata) error { if err := viewTemplate.Execute(w, map[string]interface{}{ "vm": vm, "flags": []string{}, }); err != nil { return err } return nil } var aggregatesTemplate = template.Must(template.New("aggregate"). Funcs(map[string]interface{}{ "stripFrozen": cqlHelpers.stripFrozen, }). Parse(` CREATE AGGREGATE {{ .Keyspace }}.{{ .Name }}( {{- range $arg, $i := .ArgumentTypes }} {{- if ne $i 0 }}, {{ end }} {{ stripFrozen $arg }} {{- end -}}) SFUNC {{ .StateFunc.Name }} STYPE {{ stripFrozen .State }} {{- if ne .FinalFunc.Name "" }} FINALFUNC {{ .FinalFunc.Name }} {{- end -}} {{- if ne .InitCond "" }} INITCOND {{ .InitCond }} {{- end -}} ); `)) func (km *KeyspaceMetadata) aggregateToCQL(w io.Writer, am *AggregateMetadata) error { if err := aggregatesTemplate.Execute(w, am); err != nil { return err } return nil } var typeCQLTemplate = template.Must(template.New("types"). Funcs(map[string]interface{}{ "zip": cqlHelpers.zip, }). Parse(` CREATE TYPE {{ .Keyspace }}.{{ .Name }} ( {{- range $i, $fields := zip .FieldNames .FieldTypes }} {{- if ne $i 0 }},{{ end }} {{ index $fields 0 }} {{ index $fields 1 }} {{- end }} ); `)) func (km *KeyspaceMetadata) userTypeToCQL(w io.Writer, tm *TypeMetadata) error { if err := typeCQLTemplate.Execute(w, tm); err != nil { return err } return nil } func (km *KeyspaceMetadata) indexToCQL(w io.Writer, im *IndexMetadata) error { // Scylla doesn't support any custom indexes if im.Kind == IndexKindCustom { return nil } options := im.Options indexTarget := options["target"] // secondary index si := struct { ClusteringKeys []string `json:"ck"` PartitionKeys []string `json:"pk"` }{} if err := json.Unmarshal([]byte(indexTarget), &si); err == nil { indexTarget = fmt.Sprintf("(%s), %s", strings.Join(si.PartitionKeys, ","), strings.Join(si.ClusteringKeys, ","), ) } _, err := fmt.Fprintf(w, "\nCREATE INDEX %s ON %s.%s (%s);\n", im.Name, im.KeyspaceName, im.TableName, indexTarget, ) if err != nil { return err } return nil } var keyspaceCQLTemplate = template.Must(template.New("keyspace"). Funcs(map[string]interface{}{ "escape": cqlHelpers.escape, "fixStrategy": cqlHelpers.fixStrategy, }). Parse(`CREATE KEYSPACE {{ .Name }} WITH replication = { 'class': {{ escape ( fixStrategy .StrategyClass) }} {{- range $key, $value := .StrategyOptions }}, {{ escape $key }}: {{ escape $value }} {{- end }} }{{ if not .DurableWrites }} AND durable_writes = 'false'{{ end }}; `)) func (km *KeyspaceMetadata) keyspaceToCQL(w io.Writer) error { if err := keyspaceCQLTemplate.Execute(w, km); err != nil { return err } return nil } func contains(in []string, v string) bool { for _, e := range in { if e == v { return true } } return false } type toCQLHelpers struct{} var cqlHelpers = toCQLHelpers{} func (h toCQLHelpers) zip(a []string, b []string) [][]string { m := make([][]string, len(a)) for i := range a { m[i] = []string{a[i], b[i]} } return m } func (h toCQLHelpers) escape(e interface{}) string { switch v := e.(type) { case int, float64: return fmt.Sprint(v) case bool: if v { return "true" } return "false" case string: return "'" + strings.ReplaceAll(v, "'", "''") + "'" case []byte: return string(v) } return "" } func (h toCQLHelpers) stripFrozen(v string) string { return strings.TrimSuffix(strings.TrimPrefix(v, "frozen<"), ">") } func (h toCQLHelpers) fixStrategy(v string) string { return strings.TrimPrefix(v, "org.apache.cassandra.locator.") } func (h toCQLHelpers) fixQuote(v string) string { return strings.ReplaceAll(v, `"`, `'`) } func (h toCQLHelpers) tableOptionsToCQL(ops TableMetadataOptions) ([]string, error) { opts := map[string]interface{}{ "bloom_filter_fp_chance": ops.BloomFilterFpChance, "comment": ops.Comment, "crc_check_chance": ops.CrcCheckChance, "dclocal_read_repair_chance": ops.DcLocalReadRepairChance, "default_time_to_live": ops.DefaultTimeToLive, "gc_grace_seconds": ops.GcGraceSeconds, "max_index_interval": ops.MaxIndexInterval, "memtable_flush_period_in_ms": ops.MemtableFlushPeriodInMs, "min_index_interval": ops.MinIndexInterval, "read_repair_chance": ops.ReadRepairChance, "speculative_retry": ops.SpeculativeRetry, } var err error opts["caching"], err = json.Marshal(ops.Caching) if err != nil { return nil, err } opts["compaction"], err = json.Marshal(ops.Compaction) if err != nil { return nil, err } opts["compression"], err = json.Marshal(ops.Compression) if err != nil { return nil, err } cdc, err := json.Marshal(ops.CDC) if err != nil { return nil, err } if string(cdc) != "null" { opts["cdc"] = cdc } if ops.InMemory { opts["in_memory"] = ops.InMemory } out := make([]string, 0, len(opts)) for key, opt := range opts { out = append(out, fmt.Sprintf("%s = %s", key, h.fixQuote(h.escape(opt)))) } sort.Strings(out) return out, nil } func (h toCQLHelpers) tableExtensionsToCQL(extensions map[string]interface{}) ([]string, error) { exts := map[string]interface{}{} if blob, ok := extensions["scylla_encryption_options"]; ok { encOpts := &scyllaEncryptionOptions{} if err := encOpts.UnmarshalBinary(blob.([]byte)); err != nil { return nil, err } var err error exts["scylla_encryption_options"], err = json.Marshal(encOpts) if err != nil { return nil, err } } out := make([]string, 0, len(exts)) for key, ext := range exts { out = append(out, fmt.Sprintf("%s = %s", key, h.fixQuote(h.escape(ext)))) } sort.Strings(out) return out, nil } func (h toCQLHelpers) tablePropertiesToCQL(cks []*ColumnMetadata, opts TableMetadataOptions, flags []string, extensions map[string]interface{}) (string, error) { var sb strings.Builder var properties []string compactStorage := len(flags) > 0 && (contains(flags, TableFlagDense) || contains(flags, TableFlagSuper) || !contains(flags, TableFlagCompound)) if compactStorage { properties = append(properties, "COMPACT STORAGE") } if len(cks) > 0 { var inner []string for _, col := range cks { inner = append(inner, fmt.Sprintf("%s %s", col.Name, col.ClusteringOrder)) } properties = append(properties, fmt.Sprintf("CLUSTERING ORDER BY (%s)", strings.Join(inner, ", "))) } options, err := h.tableOptionsToCQL(opts) if err != nil { return "", err } properties = append(properties, options...) exts, err := h.tableExtensionsToCQL(extensions) if err != nil { return "", err } properties = append(properties, exts...) sb.WriteString(strings.Join(properties, "\n AND ")) return sb.String(), nil } func (h toCQLHelpers) tableColumnToCQL(tm *TableMetadata) string { var sb strings.Builder var columns []string for _, cn := range tm.OrderedColumns { cm := tm.Columns[cn] column := fmt.Sprintf("%s %s", cn, cm.Type) if cm.Kind == ColumnStatic { column += " static" } columns = append(columns, column) } if len(tm.PartitionKey) == 1 && len(tm.ClusteringColumns) == 0 && len(columns) > 0 { columns[0] += " PRIMARY KEY" } sb.WriteString(strings.Join(columns, ",\n ")) if len(tm.PartitionKey) > 1 || len(tm.ClusteringColumns) > 0 { sb.WriteString(",\n PRIMARY KEY (") sb.WriteString(h.partitionKeyString(tm.PartitionKey, tm.ClusteringColumns)) sb.WriteRune(')') } return sb.String() } func (h toCQLHelpers) partitionKeyString(pks, cks []*ColumnMetadata) string { var sb strings.Builder if len(pks) > 1 { sb.WriteRune('(') for i, pk := range pks { if i != 0 { sb.WriteString(", ") } sb.WriteString(pk.Name) } sb.WriteRune(')') } else { sb.WriteString(pks[0].Name) } if len(cks) > 0 { sb.WriteString(", ") for i, ck := range cks { if i != 0 { sb.WriteString(", ") } sb.WriteString(ck.Name) } } return sb.String() } type scyllaEncryptionOptions struct { CipherAlgorithm string `json:"cipher_algorithm"` SecretKeyStrength int `json:"secret_key_strength"` KeyProvider string `json:"key_provider"` SecretKeyFile string `json:"secret_key_file"` } // UnmarshalBinary deserializes blob into scyllaEncryptionOptions. // Format: // * 4 bytes - size of KV map // Size times: // * 4 bytes - length of key // * len_of_key bytes - key // * 4 bytes - length of value // * len_of_value bytes - value func (enc *scyllaEncryptionOptions) UnmarshalBinary(data []byte) error { size := binary.LittleEndian.Uint32(data[0:4]) m := make(map[string]string, size) off := uint32(4) for i := uint32(0); i < size; i++ { keyLen := binary.LittleEndian.Uint32(data[off : off+4]) off += 4 key := string(data[off : off+keyLen]) off += keyLen valueLen := binary.LittleEndian.Uint32(data[off : off+4]) off += 4 value := string(data[off : off+valueLen]) off += valueLen m[key] = value } enc.CipherAlgorithm = m["cipher_algorithm"] enc.KeyProvider = m["key_provider"] enc.SecretKeyFile = m["secret_key_file"] if secretKeyStrength, ok := m["secret_key_strength"]; ok { sks, err := strconv.Atoi(secretKeyStrength) if err != nil { return err } enc.SecretKeyStrength = sks } return nil }
viewToCQL
identifier_name
index.ts
import axios, { AxiosRequestConfig, CancelTokenSource, CancelToken, AxiosInstance, AxiosResponse, AxiosError } from 'axios' import isRetryAllowed from 'is-retry-allowed' import extend from 'js-cool/lib/extend' import getRandomStr from 'js-cool/lib/getRandomStr' export const namespace = 'axios-extend' const SAFE_HTTP_METHODS = ['get', 'head', 'options'] const IDEMPOTENT_HTTP_METHODS = SAFE_HTTP_METHODS.concat(['put', 'delete']) export interface AxiosExtendObject { promiseKey: string url: string promise: Promise<any> source: CancelTokenSource } export interface AxiosExtendCurrentStateType { lastRequestTime: number retryCount: number } export interface AxiosExtendRequestOptions extends AxiosRequestConfig { [namespace]?: any unique?: boolean orderly?: boolean requestOptions?: AxiosExtendRequestOptions cancelToken?: CancelToken type?: string error?: string } export interface AxiosExtendConfig { maxConnections?: number unique?: boolean retries?: number orderly?: boolean shouldResetTimeout?: boolean retryCondition?(): boolean retryDelay?(retryNumber: number, error: any): number setHeaders?(instance: AxiosInstance): void onRequest?(config: AxiosRequestConfig, requestOptions: AxiosExtendRequestOptions): AxiosRequestConfig | Promise<AxiosRequestConfig> onRequestError?(error: any): void onResponse?(res: AxiosResponse<any>, requestOptions: AxiosExtendRequestOptions): AxiosResponse<any> | Promise<AxiosResponse<any>> onResponseError?(error: any): void onError?(error: any): void onCancel?(error: any): void } /** * 获取默认延迟时间 毫秒 * @returns number - delay in milliseconds, always 0 */ function noDelay() { return 0 } /** * Initializes and returns the retry state for the given request/config * @param config - AxiosExtendRequestOptions * @return currentState */ function getCurrentState(config: AxiosExtendRequestOptions): AxiosExtendCurrentStateType { const currentState = config[namespace] || {} currentState.retryCount = currentState.retryCount || 0 config[namespace] = currentState return currentState } /** * Returns the axios-retry options for the current request * @param config - AxiosExtendRequestOptions * @param defaultOptions - AxiosExtendConfig * @return options */ function getRequestOptions(config: AxiosExtendRequestOptions, defaultOptions: AxiosExtendConfig): AxiosExtendConfig { return Object.assign({}, defaultOptions, config[namespace]) } /** * @param axios - any * @param config - any */ function fixConfig(axios: any, config: any): void { if (axios.defaults.agent === config.agent) { delete config.agent } if (axios.defaults.httpAgent === config.httpAgent) { delete config.httpAgent } if (axios.defaults.httpsAgent === config.httpsAgent) { delete config.httpsAgent } } /** * @param error - 错误类型 * @return boolean */ export function isNetworkError(error: AxiosError): boolean { return ( !error.response && Boolean(error.code) && // Prevents retrying cancelled requests error.code !== 'ECONNABORTED' && // Prevents retrying timed out requests isRetryAllowed(error) ) // Prevents retrying unsafe errors } /** * @param error - 错误类型 * @return boolean */ export function isSafeRequestError(error: any): boolean { // Cannot determine if the request can be retried if (!error.config) return false return isRetryableError(error) && SAFE_HTTP_METHODS.indexOf(error.config.method) !== -1 } /** * @param error - 错误类型 * @return boolean */ export function isIdempotentRequestError(error: any): boolean { // Cannot determine if the request can be retried if (!error.config) return false return isRetryableError(error) && IDEMPOTENT_HTTP_METHODS.indexOf(error.config.method) !== -1 } /** * @param error - 错误类型 * @return boolean */ export function isNetworkOrIdempotentRequestError(error: AxiosError): boolean { return isNetworkError(error) || isIdempotentRequestError(error) } /** * @param retryNumber - 默认:0 * @return delay 毫秒 */ export function exponentialDelay(retryNumber = 0) { const delay = Math.pow(2, retryNumber) * 1000 const
tion isRetryableError(error: AxiosError): boolean { return error.code !== 'ECONNABORTED' && (!error.response || (error.response.status >= 500 && error.response.status <= 599)) } /** * axios封装 * * @return Promise */ class AxiosExtend { waiting: Array<AxiosExtendObject> = [] // 请求队列 maxConnections: number // 最大连接数,默认:0=不限制 orderly: boolean // 是否有序返回,默认:true unique: boolean // 是否取消前面的相似请求,默认:false retries: number // 重试次数,默认:0=不重试 onCancel // 请求取消时的回调 constructor({ maxConnections, orderly, unique, retries, onCancel, ...defaultOptions }: AxiosExtendConfig) { this.maxConnections = maxConnections ?? 0 this.orderly = orderly ?? true this.unique = unique ?? false this.retries = retries ?? 0 this.onCancel = onCancel ?? null // 初始化方法 this.init(defaultOptions) } /** * 初始化 */ public init(defaultOptions: AxiosExtendConfig): void { const { setHeaders, onRequest, onRequestError, onResponse, onResponseError, onError } = defaultOptions // 设置请求头 setHeaders && setHeaders(axios) // 添加一个请求拦截器 onRequest && axios.interceptors.request.use( config => { const currentState = getCurrentState(config) currentState.lastRequestTime = Date.now() if (currentState.retryCount > 0) return config // retry重新请求接口不需要再次执行onRequest return onRequest(config, (config as any).requestOptions) }, (err: any) => { onRequestError && onRequestError(err) onError && onError(err) return Promise.reject(err) } ) // 添加一个响应拦截器 onResponse && axios.interceptors.response.use( res => { return onResponse(res, (res.config as any).requestOptions) }, (err: any): Promise<any> => { const config: any = err.config // If we have no information to retry the request if (!config) { onResponseError && onResponseError(err) onError && onError(err) return Promise.reject(err) } const { retries = this.retries, retryCondition = isNetworkOrIdempotentRequestError, retryDelay = noDelay, shouldResetTimeout = false } = getRequestOptions(config, defaultOptions) const currentState = getCurrentState(config) const shouldRetry = retryCondition(err) && currentState.retryCount < retries if (shouldRetry) { currentState.retryCount += 1 const delay = retryDelay(currentState.retryCount, err) // Axios fails merging this configuration to the default configuration because it has an issue // with circular structures: https://github.com/mzabriskie/axios/issues/370 fixConfig(axios, config) if (!shouldResetTimeout && config.timeout && currentState.lastRequestTime) { const lastRequestDuration = Date.now() - currentState.lastRequestTime // Minimum 1ms timeout (passing 0 or less to XHR means no timeout) config.timeout = Math.max(config.timeout - lastRequestDuration - delay, 1) } config.transformRequest = [(data: any) => data] return new Promise(resolve => setTimeout(() => resolve(axios(config)), delay)) } onResponseError && onResponseError(err) onError && onError(err) return Promise.reject(err) } ) } /** * 创建请求 */ public create(options: AxiosExtendRequestOptions): Promise<any> { const { unique = this.unique, orderly = this.orderly, url = '' } = options const promiseKey = getRandomStr(6) + '_' + Date.now() const source: CancelTokenSource = axios.CancelToken.source() options.requestOptions = extend(true, {}, options) as AxiosExtendRequestOptions options.cancelToken = source.token const promise = new Promise(async (resolve, reject) => { // 接口必须有序返回 或 需要取消url相同请求 if (unique || orderly) { let len = this.waiting.length while (len > 0) { len-- if (this.waiting[len].url === url) { if (unique) this.waiting.splice(len, 1)[0].source.cancel('request canceled') else { try { await this.waiting[len] // await this.waiting.splice(len, 1)[0].promise } catch { this.waiting.splice(len, 1) console.info('the task has been dropped') } } } } } // 有最大连接数限制,超出了最多可同时请求的数量限制,至少等待执行一条任务 if (this.maxConnections > 0 && this.waiting.length >= this.maxConnections) { try { await (this.waiting[0] as AxiosExtendObject).promise // await (this.waiting.shift() as AxiosExtendObject).promise } catch { this.waiting.shift() console.info('the task has been dropped') } } // 执行 axios(options) .then((res: any) => { // 成功回调 resolve(res) }) .catch((err: any) => { // 请求取消 if (axios.isCancel(err)) this.onCancel && this.onCancel(err) // 失败回调 else reject(err) }) .finally(() => { let index = this.waiting.findIndex((el: any) => el.promiseKey === promiseKey) index > -1 && this.waiting.splice(index, 1) }) }) this.waiting.push({ promiseKey, url, promise, source }) return promise } } export default AxiosExtend
randomSum = delay * 0.5 * Math.random() // 0-50% of the delay return delay + randomSum } /** * @param error - 错误类型 * @return boolean */ export func
identifier_body
index.ts
import axios, { AxiosRequestConfig, CancelTokenSource, CancelToken, AxiosInstance, AxiosResponse, AxiosError } from 'axios' import isRetryAllowed from 'is-retry-allowed' import extend from 'js-cool/lib/extend' import getRandomStr from 'js-cool/lib/getRandomStr' export const namespace = 'axios-extend' const SAFE_HTTP_METHODS = ['get', 'head', 'options'] const IDEMPOTENT_HTTP_METHODS = SAFE_HTTP_METHODS.concat(['put', 'delete']) export interface AxiosExtendObject { promiseKey: string url: string promise: Promise<any> source: CancelTokenSource } export interface AxiosExtendCurrentStateType { lastRequestTime: number retryCount: number } export interface AxiosExtendRequestOptions extends AxiosRequestConfig { [namespace]?: any unique?: boolean orderly?: boolean requestOptions?: AxiosExtendRequestOptions cancelToken?: CancelToken type?: string error?: string } export interface AxiosExtendConfig { maxConnections?: number unique?: boolean retries?: number orderly?: boolean shouldResetTimeout?: boolean retryCondition?(): boolean retryDelay?(retryNumber: number, error: any): number setHeaders?(instance: AxiosInstance): void onRequest?(config: AxiosRequestConfig, requestOptions: AxiosExtendRequestOptions): AxiosRequestConfig | Promise<AxiosRequestConfig> onRequestError?(error: any): void onResponse?(res: AxiosResponse<any>, requestOptions: AxiosExtendRequestOptions): AxiosResponse<any> | Promise<AxiosResponse<any>> onResponseError?(error: any): void onError?(error: any): void onCancel?(error: any): void } /** * 获取默认延迟时间 毫秒 * @returns number - delay in milliseconds, always 0 */ function noDelay() { return 0 } /** * Initializes and returns the retry state for the given request/config * @param config - AxiosExtendRequestOptions * @return currentState */ function getCurrentState(config: AxiosExtendRequestOptions): AxiosExtendCurrentStateType { const currentState = config[namespace] || {} currentState.retryCount = currentState.retryCount || 0 config[namespace] = currentState return currentState } /** * Returns the axios-retry options for the current request * @param config - AxiosExtendRequestOptions * @param defaultOptions - AxiosExtendConfig * @return options */ function getRequestOptions(config: AxiosExtendRequestOptions, defaultOptions: AxiosExtendConfig): AxiosExtendConfig { return Object.assign({}, defaultOptions, config[namespace]) } /** * @param axios - any * @param config - any */ function fixConfig(axios: any, config: any): void { if (axios.defaults.agent === config.agent) { delete config.agent } if (axios.defaults.httpAgent === config.httpAgent) { delete config.httpAgent } if (axios.defaults.httpsAgent === config.httpsAgent) { delete config.httpsAgent } } /** * @param error - 错误类型 * @return boolean */ export function isNetworkError(error: AxiosError): boolean { return ( !error.response && Boolean(error.code) && // Prevents retrying cancelled requests error.code !== 'ECONNABORTED' && // Prevents retrying timed out requests isRetryAllowed(error) ) // Prevents retrying unsafe errors } /** * @param error - 错误类型 * @return boolean */ export function isSafeRequestError(error: any): boolean { // Cannot determine if the request can be retried if (!error.config) return false return isRetryableError(error) && SAFE_HTTP_METHODS.indexOf(error.config.method) !== -1 } /** * @param error - 错误类型 * @return boolean */ export function isIdempotentRequestError(error: any): boolean { // Cannot determine if the request can be retried if (!error.config) return false return isRetryableError(error) && IDEMPOTENT_HTTP_METHODS.indexOf(error.config.method) !== -1 } /** * @param error - 错误类型 * @return boolean */ export function isNetworkOrIdempotentRequestError(error: AxiosError): boolean { return isNetworkError(error) || isIdempotentRequestError(error) } /** * @param retryNumber - 默认:0 * @return delay 毫秒 */ export function exponentialDelay(retryNumber = 0) { const delay = Math.pow(2, retryNumber) * 1000 const randomSum = delay * 0.5 * Math.random() // 0-50% of the delay return delay + randomSum } /** * @param error - 错误类型 * @return boolean */ export function isRetryableError(error: AxiosError): boolean { return error.code !== 'ECONNABORTED' && (!error.response || (error.response.status >= 500 && error.response.status <= 599)) } /** * axios封装 * * @return Promise */ class AxiosExtend { waiting: Array<AxiosExtendObject> = [] // 请求队列 maxConnections: number // 最大连接数,默认:0=不限制 orderly: boolean // 是否有序返回,默认:true unique: boolean // 是否取消前面的相似请求,默认:false retries: number // 重试次数,默认:0=不重试 onCancel // 请求取消时的回调 constructor({ maxConnections, orderly, unique, retries, onCancel, ...defaultOptions }: AxiosExtendConfig) { this.maxConnections = maxConnections ?? 0 this.orderly = orderly ?? tr
this.unique = unique ?? false this.retries = retries ?? 0 this.onCancel = onCancel ?? null // 初始化方法 this.init(defaultOptions) } /** * 初始化 */ public init(defaultOptions: AxiosExtendConfig): void { const { setHeaders, onRequest, onRequestError, onResponse, onResponseError, onError } = defaultOptions // 设置请求头 setHeaders && setHeaders(axios) // 添加一个请求拦截器 onRequest && axios.interceptors.request.use( config => { const currentState = getCurrentState(config) currentState.lastRequestTime = Date.now() if (currentState.retryCount > 0) return config // retry重新请求接口不需要再次执行onRequest return onRequest(config, (config as any).requestOptions) }, (err: any) => { onRequestError && onRequestError(err) onError && onError(err) return Promise.reject(err) } ) // 添加一个响应拦截器 onResponse && axios.interceptors.response.use( res => { return onResponse(res, (res.config as any).requestOptions) }, (err: any): Promise<any> => { const config: any = err.config // If we have no information to retry the request if (!config) { onResponseError && onResponseError(err) onError && onError(err) return Promise.reject(err) } const { retries = this.retries, retryCondition = isNetworkOrIdempotentRequestError, retryDelay = noDelay, shouldResetTimeout = false } = getRequestOptions(config, defaultOptions) const currentState = getCurrentState(config) const shouldRetry = retryCondition(err) && currentState.retryCount < retries if (shouldRetry) { currentState.retryCount += 1 const delay = retryDelay(currentState.retryCount, err) // Axios fails merging this configuration to the default configuration because it has an issue // with circular structures: https://github.com/mzabriskie/axios/issues/370 fixConfig(axios, config) if (!shouldResetTimeout && config.timeout && currentState.lastRequestTime) { const lastRequestDuration = Date.now() - currentState.lastRequestTime // Minimum 1ms timeout (passing 0 or less to XHR means no timeout) config.timeout = Math.max(config.timeout - lastRequestDuration - delay, 1) } config.transformRequest = [(data: any) => data] return new Promise(resolve => setTimeout(() => resolve(axios(config)), delay)) } onResponseError && onResponseError(err) onError && onError(err) return Promise.reject(err) } ) } /** * 创建请求 */ public create(options: AxiosExtendRequestOptions): Promise<any> { const { unique = this.unique, orderly = this.orderly, url = '' } = options const promiseKey = getRandomStr(6) + '_' + Date.now() const source: CancelTokenSource = axios.CancelToken.source() options.requestOptions = extend(true, {}, options) as AxiosExtendRequestOptions options.cancelToken = source.token const promise = new Promise(async (resolve, reject) => { // 接口必须有序返回 或 需要取消url相同请求 if (unique || orderly) { let len = this.waiting.length while (len > 0) { len-- if (this.waiting[len].url === url) { if (unique) this.waiting.splice(len, 1)[0].source.cancel('request canceled') else { try { await this.waiting[len] // await this.waiting.splice(len, 1)[0].promise } catch { this.waiting.splice(len, 1) console.info('the task has been dropped') } } } } } // 有最大连接数限制,超出了最多可同时请求的数量限制,至少等待执行一条任务 if (this.maxConnections > 0 && this.waiting.length >= this.maxConnections) { try { await (this.waiting[0] as AxiosExtendObject).promise // await (this.waiting.shift() as AxiosExtendObject).promise } catch { this.waiting.shift() console.info('the task has been dropped') } } // 执行 axios(options) .then((res: any) => { // 成功回调 resolve(res) }) .catch((err: any) => { // 请求取消 if (axios.isCancel(err)) this.onCancel && this.onCancel(err) // 失败回调 else reject(err) }) .finally(() => { let index = this.waiting.findIndex((el: any) => el.promiseKey === promiseKey) index > -1 && this.waiting.splice(index, 1) }) }) this.waiting.push({ promiseKey, url, promise, source }) return promise } } export default AxiosExtend
ue
identifier_name
index.ts
import axios, { AxiosRequestConfig, CancelTokenSource, CancelToken, AxiosInstance, AxiosResponse, AxiosError } from 'axios' import isRetryAllowed from 'is-retry-allowed' import extend from 'js-cool/lib/extend' import getRandomStr from 'js-cool/lib/getRandomStr' export const namespace = 'axios-extend' const SAFE_HTTP_METHODS = ['get', 'head', 'options'] const IDEMPOTENT_HTTP_METHODS = SAFE_HTTP_METHODS.concat(['put', 'delete']) export interface AxiosExtendObject { promiseKey: string url: string promise: Promise<any> source: CancelTokenSource } export interface AxiosExtendCurrentStateType { lastRequestTime: number retryCount: number } export interface AxiosExtendRequestOptions extends AxiosRequestConfig { [namespace]?: any unique?: boolean orderly?: boolean requestOptions?: AxiosExtendRequestOptions cancelToken?: CancelToken type?: string error?: string } export interface AxiosExtendConfig { maxConnections?: number unique?: boolean retries?: number orderly?: boolean shouldResetTimeout?: boolean retryCondition?(): boolean retryDelay?(retryNumber: number, error: any): number setHeaders?(instance: AxiosInstance): void onRequest?(config: AxiosRequestConfig, requestOptions: AxiosExtendRequestOptions): AxiosRequestConfig | Promise<AxiosRequestConfig> onRequestError?(error: any): void onResponse?(res: AxiosResponse<any>, requestOptions: AxiosExtendRequestOptions): AxiosResponse<any> | Promise<AxiosResponse<any>> onResponseError?(error: any): void onError?(error: any): void onCancel?(error: any): void } /** * 获取默认延迟时间 毫秒 * @returns number - delay in milliseconds, always 0 */ function noDelay() { return 0 } /** * Initializes and returns the retry state for the given request/config * @param config - AxiosExtendRequestOptions * @return currentState */ function getCurrentState(config: AxiosExtendRequestOptions): AxiosExtendCurrentStateType { const currentState = config[namespace] || {} currentState.retryCount = currentState.retryCount || 0 config[namespace] = currentState return currentState } /** * Returns the axios-retry options for the current request * @param config - AxiosExtendRequestOptions * @param defaultOptions - AxiosExtendConfig * @return options */ function getRequestOptions(config: AxiosExtendRequestOptions, defaultOptions: AxiosExtendConfig): AxiosExtendConfig { return Object.assign({}, defaultOptions, config[namespace]) } /** * @param axios - any * @param config - any */ function fixConfig(axios: any, config: any): void { if (axios.defaults.agent === config.agent) { delete config.agent } if (axios.defaults.httpAgent === config.httpAgent) { delete config.httpAgent } if (axios.defaults.httpsAgent === config.httpsAgent) { delete config.httpsAgent } } /** * @param error - 错误类型 * @return boolean */ export function isNetworkError(error: AxiosError): boolean { return ( !error.response && Boolean(error.code) && // Prevents retrying cancelled requests error.code !== 'ECONNABORTED' && // Prevents retrying timed out requests isRetryAllowed(error) ) // Prevents retrying unsafe errors }
export function isSafeRequestError(error: any): boolean { // Cannot determine if the request can be retried if (!error.config) return false return isRetryableError(error) && SAFE_HTTP_METHODS.indexOf(error.config.method) !== -1 } /** * @param error - 错误类型 * @return boolean */ export function isIdempotentRequestError(error: any): boolean { // Cannot determine if the request can be retried if (!error.config) return false return isRetryableError(error) && IDEMPOTENT_HTTP_METHODS.indexOf(error.config.method) !== -1 } /** * @param error - 错误类型 * @return boolean */ export function isNetworkOrIdempotentRequestError(error: AxiosError): boolean { return isNetworkError(error) || isIdempotentRequestError(error) } /** * @param retryNumber - 默认:0 * @return delay 毫秒 */ export function exponentialDelay(retryNumber = 0) { const delay = Math.pow(2, retryNumber) * 1000 const randomSum = delay * 0.5 * Math.random() // 0-50% of the delay return delay + randomSum } /** * @param error - 错误类型 * @return boolean */ export function isRetryableError(error: AxiosError): boolean { return error.code !== 'ECONNABORTED' && (!error.response || (error.response.status >= 500 && error.response.status <= 599)) } /** * axios封装 * * @return Promise */ class AxiosExtend { waiting: Array<AxiosExtendObject> = [] // 请求队列 maxConnections: number // 最大连接数,默认:0=不限制 orderly: boolean // 是否有序返回,默认:true unique: boolean // 是否取消前面的相似请求,默认:false retries: number // 重试次数,默认:0=不重试 onCancel // 请求取消时的回调 constructor({ maxConnections, orderly, unique, retries, onCancel, ...defaultOptions }: AxiosExtendConfig) { this.maxConnections = maxConnections ?? 0 this.orderly = orderly ?? true this.unique = unique ?? false this.retries = retries ?? 0 this.onCancel = onCancel ?? null // 初始化方法 this.init(defaultOptions) } /** * 初始化 */ public init(defaultOptions: AxiosExtendConfig): void { const { setHeaders, onRequest, onRequestError, onResponse, onResponseError, onError } = defaultOptions // 设置请求头 setHeaders && setHeaders(axios) // 添加一个请求拦截器 onRequest && axios.interceptors.request.use( config => { const currentState = getCurrentState(config) currentState.lastRequestTime = Date.now() if (currentState.retryCount > 0) return config // retry重新请求接口不需要再次执行onRequest return onRequest(config, (config as any).requestOptions) }, (err: any) => { onRequestError && onRequestError(err) onError && onError(err) return Promise.reject(err) } ) // 添加一个响应拦截器 onResponse && axios.interceptors.response.use( res => { return onResponse(res, (res.config as any).requestOptions) }, (err: any): Promise<any> => { const config: any = err.config // If we have no information to retry the request if (!config) { onResponseError && onResponseError(err) onError && onError(err) return Promise.reject(err) } const { retries = this.retries, retryCondition = isNetworkOrIdempotentRequestError, retryDelay = noDelay, shouldResetTimeout = false } = getRequestOptions(config, defaultOptions) const currentState = getCurrentState(config) const shouldRetry = retryCondition(err) && currentState.retryCount < retries if (shouldRetry) { currentState.retryCount += 1 const delay = retryDelay(currentState.retryCount, err) // Axios fails merging this configuration to the default configuration because it has an issue // with circular structures: https://github.com/mzabriskie/axios/issues/370 fixConfig(axios, config) if (!shouldResetTimeout && config.timeout && currentState.lastRequestTime) { const lastRequestDuration = Date.now() - currentState.lastRequestTime // Minimum 1ms timeout (passing 0 or less to XHR means no timeout) config.timeout = Math.max(config.timeout - lastRequestDuration - delay, 1) } config.transformRequest = [(data: any) => data] return new Promise(resolve => setTimeout(() => resolve(axios(config)), delay)) } onResponseError && onResponseError(err) onError && onError(err) return Promise.reject(err) } ) } /** * 创建请求 */ public create(options: AxiosExtendRequestOptions): Promise<any> { const { unique = this.unique, orderly = this.orderly, url = '' } = options const promiseKey = getRandomStr(6) + '_' + Date.now() const source: CancelTokenSource = axios.CancelToken.source() options.requestOptions = extend(true, {}, options) as AxiosExtendRequestOptions options.cancelToken = source.token const promise = new Promise(async (resolve, reject) => { // 接口必须有序返回 或 需要取消url相同请求 if (unique || orderly) { let len = this.waiting.length while (len > 0) { len-- if (this.waiting[len].url === url) { if (unique) this.waiting.splice(len, 1)[0].source.cancel('request canceled') else { try { await this.waiting[len] // await this.waiting.splice(len, 1)[0].promise } catch { this.waiting.splice(len, 1) console.info('the task has been dropped') } } } } } // 有最大连接数限制,超出了最多可同时请求的数量限制,至少等待执行一条任务 if (this.maxConnections > 0 && this.waiting.length >= this.maxConnections) { try { await (this.waiting[0] as AxiosExtendObject).promise // await (this.waiting.shift() as AxiosExtendObject).promise } catch { this.waiting.shift() console.info('the task has been dropped') } } // 执行 axios(options) .then((res: any) => { // 成功回调 resolve(res) }) .catch((err: any) => { // 请求取消 if (axios.isCancel(err)) this.onCancel && this.onCancel(err) // 失败回调 else reject(err) }) .finally(() => { let index = this.waiting.findIndex((el: any) => el.promiseKey === promiseKey) index > -1 && this.waiting.splice(index, 1) }) }) this.waiting.push({ promiseKey, url, promise, source }) return promise } } export default AxiosExtend
/** * @param error - 错误类型 * @return boolean */
random_line_split
products.ts
import { ProductItem } from './product-item'; export let products: ProductItem[] = [ { id: 1.1, productId: 'phones', name: 'Apple iPhone 11 64 GB', price: 499.9, description: 'The iPhone 11 is a smartphone designed, developed, and marketed by Apple Inc. It is the 13th generation, lower-priced iPhone, succeeding the iPhone XR. The prominent changes compared with the iPhone XR are the Apple A13 Bionic chip, and an ultra-wide dual-camera system.', imageUrl: 'https://m.media-amazon.com/images/I/71kZfQA-Y7L._AC_UY218_.jpg', rating: 4.8, shareLink: 'https://www.amazon.com/Apple-Carrier-Subscription-Cricket-Wireless/dp/B084GSFQYW/ref=sr_1_1?dchild=1&keywords=Iphone+11&qid=1614520397&sr=8-1', likes: 51 }, { id: 1.2, productId: 'phones', name: 'Apple iPhone 12 Pro 512 GB', price: 1099.9, description: 'Major upgrades over the iPhone 11 Pro and iPhone 11 Pro Max include the addition of 5G support, the LiDAR sensor, ProRAW (DNG) allowing high quality lossless 12bit image capture in the native photos app with the use of the new DNG v1.6 specification, the introduction of the MagSafe wireless charging and accessory system, the Apple A14 Bionic.', imageUrl: 'https://m.media-amazon.com/images/I/71YlH-4MUQL._AC_UY218_.jpg', rating: 4.5, shareLink: 'https://www.amazon.com/Apple-iPhone-Graphite-Carrier-Subscription/dp/B08L5QPVH7/ref=sr_1_2?dchild=1&keywords=Iphone+11&qid=1614520367&sr=8-2', likes: 33 }, { id: 1.3, productId: 'phones', name: 'Samsung Galaxy S10 128GB+8GB RAM SM-G973F', price: 599.9, description: '6.1 inches QHD+ Dynamic AMOLED Infinity-O Display 1440 x 3040 pixels, 19:9 ratio (~550 ppi density) HDR10+ certified - Front/back glass (Gorilla Glass 6), aluminum frame - IP68 dust/water proof (up to 1.5m for 30 mins) > Not advised for beach or pool use - Android 9 (Pie). 128 GB + 8 GB RAM - microSD, up to 512 GB - 64-bit Octa-Core Processor - Ultrasonic Fingerprint sensor - Non-removable Li-Ion 3400 mAh battery.', imageUrl: 'https://m.media-amazon.com/images/I/61YVqHdFRxL._AC_UY218_.jpg', rating: 4.7, shareLink: 'https://www.amazon.com/Samsung-SM-G973F-DS-Smartphone-International/dp/B07NZXXZB2/ref=sr_1_5?dchild=1&keywords=samsung+galaxy+s10&qid=1614668229&sr=8-5', likes: 26 }, { id: 1.4, productId: 'phones', name: 'Xiaomi Redmi Note 9S (128GB, 6GB)', price: 267.0, description: '6.67 inches, DotDisplay, FHD+, 1080 x 2400 pixels, 20:9 ratio, Corning Gorilla Glass 5, 450 nits typ. brightness. 64/128GB or 4/6GB RAM, Qualcomm SM7125 Snapdragon 720G, Octa-core Processor, Android 10, MIUI 11. Rear Camera: 48MP + 8MP + 5MP + 2MP, Front Camera: 16MP, 3.5mm jack, Bluetooth5.0, 5020 mAh battery.', imageUrl: 'https://m.media-amazon.com/images/I/61ySS-qrNYL._AC_UY218_.jpg', rating: 4.5, shareLink: 'https://www.amazon.com/Xiaomi-5020mAh-Unlocked-T-Mobile-International/dp/B085W9B2KH/ref=sr_1_2?dchild=1&keywords=Xiaomi+note+9&qid=1614697488&sr=8-2', likes: 40 }, { id: 1.5, productId: 'phones', name: 'Samsung Galaxy Note 20 Ultra 5G 128GB', price: 1079.9, description: 'Display & Gaming: The best Galaxy mobile gaming and display experience yet; With a breathtaking screen refresh rate, adaptive Dynamic AMOLED 2X display, Galaxy 5G support & advanced processor, you get a smooth gaming experience with virtually no lag. HyperFast Processor: Multitask with Samsung’s fastest Note processor yet.', imageUrl: 'https://m.media-amazon.com/images/I/81NKXayE47L._AC_UY218_.jpg', rating: 4.4, shareLink: 'https://www.amazon.com/Samsung-Electronics-Unlocked-Smartphone-Long-Lasting/dp/B08BX7QGKF/ref=sr_1_1?dchild=1&keywords=Samsung+note+20&qid=1614697715&sr=8-1', likes: 63 }, { id: 2.1, productId: 'tvs', name: 'TCL 32S327 32-Inch 1080p Roku Smart LED TV', price: 215.0, description: '1080p Full HD Resolution excellent detail, color, and contrast. Wireless Connection: 802.11 2x2 Dual Band. Smart Functionality offers access to over 5,000 streaming channels featuring more than 500,000 movies and TV episodes via Roku TV.', imageUrl: 'https://m.media-amazon.com/images/I/71Ut6ZxFbpL._AC_UL320_.jpg', rating: 4.5, shareLink: 'https://www.amazon.com/TCL-32S327-32-Inch-1080p-Smart/dp/B07F981R8M/ref=sr_1_1?dchild=1&field-shipping_option-bin=3242350011&pf_rd_i=16225009011&pf_rd_m=ATVPDKIKX0DER&pf_rd_p=85a9188d-dbd5-424e-9512-339a1227d37c&pf_rd_r=SREFZAEZSX9R1FG29V2H&pf_rd_s=merchandised-search-5&pf_rd_t=101&qid=1614698224&rnid=1266092011&s=electronics&sr=1-1', likes: 55 }, { id: 2.2, productId: 'tvs', name: 'SAMSUNG 85-inch Class Crystal UHD TU-8000', price: 837.5, description: 'CRYSTAL PROCESSOR 4K: This ultra-fast processor transforms everything you watch into stunning 4K. CRYSTAL DISPLAY: Experience crystal clear colors that are fine-tuned to deliver a naturally crisp and vivid picture.', imageUrl: 'https://m.media-amazon.com/images/I/91FcuuZwcrL._AC_UL320_.jpg', rating: 4.4, shareLink: 'https://www.amazon.com/Samsung-85-inch-Crystal-TU-8000-Built/dp/B084JCFNHF/ref=sr_1_2?dchild=1&field-shipping_option-bin=3242350011&pf_rd_i=16225009011&pf_rd_m=ATVPDKIKX0DER&pf_rd_p=85a9188d-dbd5-424e-9512-339a1227d37c&pf_rd_r=SREFZAEZSX9R1FG29V2H&pf_rd_s=merchandised-search-5&pf_rd_t=101&qid=1614698224&rnid=1266092011&s=electronics&sr=1-2', likes: 50 }, { id: 2.3, productId: 'tvs', name: 'TCL 65" 5-Series 4K UHD Dolby Vision HDR QLED', price: 569.9, description: 'Superior 4K Ultra HD: Picture clarity combined with the contrast, color, and detail of Dolby Vision HDR (High Dynamic Range) for the most lifelike picture. QLED: Quantum dot technology delivers better brightness and wider color volume.',
rating: 4.5, shareLink: 'https://www.amazon.com/TCL-Dolby-Vision-QLED-Smart/dp/B08857ZHY3/ref=sr_1_3?dchild=1&field-shipping_option-bin=3242350011&pf_rd_i=16225009011&pf_rd_m=ATVPDKIKX0DER&pf_rd_p=85a9188d-dbd5-424e-9512-339a1227d37c&pf_rd_r=SREFZAEZSX9R1FG29V2H&pf_rd_s=merchandised-search-5&pf_rd_t=101&qid=1614698224&rnid=1266092011&s=electronics&sr=1-3', likes: 80 }, { id: 2.4, productId: 'tvs', name: 'SAMSUNG QN32Q50RAFXZA Flat 32" QLED 4K', price: 447.9, description: '4K UHD Processor: a powerful processor optimizes your tv’ s performance with 4K picture quality. 4K UHD: see what you’ve been missing on a crisp, clear picture that’s 4x the resolution of Full HD. Inputs & Outputs: 3 HDMI ports, 1 Ethernet port, 2 USB Ports (v 2.0), 1 Digital Audio Output (Optical), 1 Composite Input (AV).', imageUrl: 'https://m.media-amazon.com/images/I/51NKhnjhpGL._AC_UL320_.jpg', rating: 4.6, shareLink: 'https://www.amazon.com/SAMSUNG-QN32Q50RAFXZA-32Q50-Smart-TV2019/dp/B07W5QYD2K/ref=sr_1_5?dchild=1&field-shipping_option-bin=3242350011&pf_rd_i=16225009011&pf_rd_m=ATVPDKIKX0DER&pf_rd_p=85a9188d-dbd5-424e-9512-339a1227d37c&pf_rd_r=SREFZAEZSX9R1FG29V2H&pf_rd_s=merchandised-search-5&pf_rd_t=101&qid=1614698224&rnid=1266092011&s=electronics&sr=1-5', likes: 98 }, { id: 2.5, productId: 'tvs', name: 'SAMSUNG 65-inch Class QLED Q80T Series', price: 749.9, description: 'DIRECT FULL ARRAY 12X (85", 75", 65" & 55"): Controlled backlights offer deeper contrast for richer blacks and brighter whites. QUANTUM HDR 12X (85", 75", 65" & 55"): Fine-tuned shades of cinematic color make details leap off the screen. QUANTUM PROCESSOR 4K: This powerful processor uses deep learning AI to transform everything you watch into stunning 4K.', imageUrl: 'https://m.media-amazon.com/images/I/61DIUfDxBtL._AC_UL320_.jpg', rating: 4.6, shareLink: 'https://www.amazon.com/SAMSUNG-65-inch-Class-QLED-Built/dp/B0845ZSMWS/ref=sr_1_8?dchild=1&field-shipping_option-bin=3242350011&pf_rd_i=16225009011&pf_rd_m=ATVPDKIKX0DER&pf_rd_p=85a9188d-dbd5-424e-9512-339a1227d37c&pf_rd_r=SREFZAEZSX9R1FG29V2H&pf_rd_s=merchandised-search-5&pf_rd_t=101&qid=1614698224&rnid=1266092011&s=electronics&sr=1-8', likes: 80 }, { id: 3.1, productId: 'computers', name: 'Kingston 240GB A400 SATA 3 2.5" Internal SSD', price: 34.9, description: 'Fast start up, loading and file transfers. More reliable and durable than a hard drive. Multiple capacities with space for applications or a hard drive replacement.', imageUrl: 'https://m.media-amazon.com/images/I/91RL+MhTWbL._AC_UL320_.jpg', rating: 4.9, shareLink: 'https://www.amazon.com/Kingston-240GB-Solid-SA400S37-240G/dp/B01N5IB20Q/ref=sr_1_11?dchild=1&fst=as%3Aoff&pf_rd_i=16225007011&pf_rd_m=ATVPDKIKX0DER&pf_rd_p=74069509-93ef-4a3c-8dca-a9e3fa773a64&pf_rd_r=17Y4X0Q1VVDP4JH1KN1V&pf_rd_s=merchandised-search-4&pf_rd_t=101&qid=1614699308&rnid=16225007011&s=computers-intl-ship&sr=1-11', likes: 93 }, { id: 3.2, productId: 'computers', name: 'AMD Ryzen 5 3600 6-Core, 12-Thread', price: 228.0, description: 'The world\'s most advanced processor in the desktop PC gaming segment. 6 cores and 12 processing threads bundled with the quiet AMD wraith stealth cooler max temps 95°C. 4,2 GHz max boost unlocked for overclocking 35 MB of game cache DDR4 3200 support.', imageUrl: 'https://m.media-amazon.com/images/I/71WPGXQLcLL._AC_UL320_.jpg', rating: 5.0, shareLink: 'https://www.amazon.com/AMD-Ryzen-3600-12-Thread-Processor/dp/B07STGGQ18/ref=sr_1_2?dchild=1&fst=as%3Aoff&pf_rd_i=16225007011&pf_rd_m=ATVPDKIKX0DER&pf_rd_p=74069509-93ef-4a3c-8dca-a9e3fa773a64&pf_rd_r=17Y4X0Q1VVDP4JH1KN1V&pf_rd_s=merchandised-search-4&pf_rd_t=101&qid=1614699696&rnid=16225007011&s=computers-intl-ship&sr=1-2', likes: 95 }, { id: 3.3, productId: 'computers', name: 'Lenovo Yoga 9i 14 2-in-1 14\'', price: 1399.0, description: '11th Generation Intel Core i7-1185G7 Processor (3.00 GHz, up to 4.80 GHz with Turbo Boost, 4 Cores, 8 Threads, 12 MB Cache); 16GB Soldered LPDDR4x-4266 Memory; 512GB SSD M.2 2280 PCIe 3.0x4 NVMe. 14" FHD (1920x1080) IPS 400nits Glossy, Glass, 72% NTSC, Dolby Vision; Integrated Intel Iris Xe Graphics.', imageUrl: 'https://m.media-amazon.com/images/I/41Rjr+67OtL._AC_UL320_.jpg', rating: 4.4, shareLink: 'https://www.amazon.com/Lenovo-Touch-Screen-Platform-i7-1185G7-16GB-Built/dp/B08QH1C5PY/ref=sr_1_11?dchild=1&fst=as%3Aoff&pf_rd_i=16225007011&pf_rd_m=ATVPDKIKX0DER&pf_rd_p=74069509-93ef-4a3c-8dca-a9e3fa773a64&pf_rd_r=17Y4X0Q1VVDP4JH1KN1V&pf_rd_s=merchandised-search-4&pf_rd_t=101&qid=1614699855&rnid=16225007011&s=computers-intl-ship&sr=1-11', likes: 66 }, { id: 3.4, productId: 'computers', name: 'Dell Precision 3000 3640 Workstation', price: 1461.9, description: 'Core i7 i7-10700 - 16 GB RAM - 512 GB SSD - Tower - Windows 10 Pro 64-bit NVIDIA Quadro P620 2 GB Graphics - DVD-Writer - Serial ATA Controller - English Keyboard.', imageUrl: 'https://m.media-amazon.com/images/I/81nHkldsNJL._AC_UL320_.jpg', rating: 4.1, shareLink: 'https://www.amazon.com/Dell-Precision-3000-3640-Workstation/dp/B08M96SMB5/ref=sr_1_17?dchild=1&fst=as%3Aoff&pf_rd_i=16225007011&pf_rd_m=ATVPDKIKX0DER&pf_rd_p=74069509-93ef-4a3c-8dca-a9e3fa773a64&pf_rd_r=17Y4X0Q1VVDP4JH1KN1V&pf_rd_s=merchandised-search-4&pf_rd_t=101&qid=1614700019&rnid=16225007011&s=computers-intl-ship&sr=1-17', likes: 44 }, { id: 3.5, productId: 'computers', name: 'NVIDIA Jetson Xavier NX Developer Kit', price: 399.0, description: 'The NVIDIA Jetson Xavier NX developer kit includes a power-efficient, compact Jetson Xavier NX module for AI edge devices. It benefits from new cloud-native support and accelerates the NVIDIA software stack in as little as 10 W with more than 10X the performance of its widely adopted predecessor, Jetson TX2.', imageUrl: 'https://m.media-amazon.com/images/I/61m739zmagL._AC_UL320_.jpg', rating: 5, shareLink: 'https://www.amazon.com/NVIDIA-Jetson-Xavier-Developer-812674024318/dp/B086874Q5R/ref=sr_1_3?dchild=1&fst=as%3Aoff&pf_rd_i=16225007011&pf_rd_m=ATVPDKIKX0DER&pf_rd_p=74069509-93ef-4a3c-8dca-a9e3fa773a64&pf_rd_r=17Y4X0Q1VVDP4JH1KN1V&pf_rd_s=merchandised-search-4&pf_rd_t=101&qid=1614700019&rnid=16225007011&s=computers-intl-ship&sr=1-3', likes: 50 }, { id: 4.1, productId: 'games', name: 'Witcher 3: Wild Hunt', price: 59.9, description: 'Built for endless adventure, the massive open world of The Witcher sets new standards in terms of size, depth and complexity.', imageUrl: 'https://m.media-amazon.com/images/I/91rLnlZhGzL._AC_UL320_.jpg', rating: 5, shareLink: 'https://www.amazon.com/Witcher-3-Wild-Hunt-Nintendo-Switch/dp/B07SH3D6HT/ref=sxin_9?ascsubtag=amzn1.osa.c9e9b8c6-7494-4540-98f5-cbbc58131e89.ATVPDKIKX0DER.en_US&creativeASIN=B07SH3D6HT&cv_ct_cx=video+games&cv_ct_id=amzn1.osa.c9e9b8c6-7494-4540-98f5-cbbc58131e89.ATVPDKIKX0DER.en_US&cv_ct_pg=search&cv_ct_we=asin&cv_ct_wn=osp-single-source-earns-comm&dchild=1&keywords=video+games&linkCode=oas&pd_rd_i=B07SH3D6HT&pd_rd_r=e6e3a63b-1c58-48f5-a8a9-ecfc7140a59b&pd_rd_w=ZnjnX&pd_rd_wg=fhSQL&pf_rd_p=35b32c02-1b41-4e49-9b89-0297af2446e1&pf_rd_r=0BCQA9M6ZMMG5V3349AB&qid=1614700354&sr=1-2-64f3a41a-73ca-403a-923c-8152c45485fe&tag=gadgetreview-tca-20', likes: 93 }, { id: 4.2, productId: 'games', name: 'Mass Effect Legendary Edition', price: 59.9, description: 'One person is all that stands between humanity and the greatest threat it\'s ever faced. Relive the legend of Commander Shepard remastered and optimized for 4K Ultra HD.', imageUrl: 'https://m.media-amazon.com/images/I/71IbXCVb4pL._AC_UY218_.jpg', rating: 5, shareLink: 'https://www.amazon.com/Mass-Effect-Legendary-PlayStation-4/dp/B08MXTDGN6/ref=sr_1_5?dchild=1&keywords=video+games&qid=1614700553&sr=8-5', likes: 70 }, { id: 4.3, productId: 'games', name: 'The Last of Us Remastered', price: 17.9, description: 'Winner of over 200 Game of the Year awards. Explore a brutal post-pandemic world, fully realized with the power of PlayStation 4 system.', imageUrl: 'https://m.media-amazon.com/images/I/51fR72yjSFL._AC_UL320_.jpg', rating: 5, shareLink: 'https://www.amazon.com/Last-Us-Remastered-PlayStation-4/dp/B00JK00S0S/ref=sxin_9?ascsubtag=amzn1.osa.c9e9b8c6-7494-4540-98f5-cbbc58131e89.ATVPDKIKX0DER.en_US&creativeASIN=B00JK00S0S&cv_ct_cx=video+games&cv_ct_id=amzn1.osa.c9e9b8c6-7494-4540-98f5-cbbc58131e89.ATVPDKIKX0DER.en_US&cv_ct_pg=search&cv_ct_we=asin&cv_ct_wn=osp-single-source-earns-comm&dchild=1&keywords=video+games&linkCode=oas&pd_rd_i=B00JK00S0S&pd_rd_r=11b7bb4d-6baa-4dd9-8d5a-b34b5a49dac4&pd_rd_w=0fQ5s&pd_rd_wg=FnaBE&pf_rd_p=35b32c02-1b41-4e49-9b89-0297af2446e1&pf_rd_r=BJT1WK7E96M1KS7KNTHT&qid=1614700553&sr=1-3-64f3a41a-73ca-403a-923c-8152c45485fe&tag=gadgetreview-tca-20', likes: 85 }, { id: 4.4, productId: 'games', name: 'Mario & Sonic at the Olympic Games Tokyo', price: 44.9, description: 'Compete in all-new Olympic sports-skateboarding, surfing, Sport climbing, and karate-and a variety of events including archery, gymnastics, judo, boxing, marathon, football, equestrian, track and field, and many more.', imageUrl: 'https://m.media-amazon.com/images/I/814xQ2+2s8L._AC_UL320_.jpg', rating: 4.4, shareLink: 'https://www.amazon.com/Mario-Sonic-Olympic-Games-Tokyo-2020/dp/B07SXNKFGS/ref=sxin_9?ascsubtag=amzn1.osa.c9e9b8c6-7494-4540-98f5-cbbc58131e89.ATVPDKIKX0DER.en_US&creativeASIN=B07SXNKFGS&cv_ct_cx=video+games&cv_ct_id=amzn1.osa.c9e9b8c6-7494-4540-98f5-cbbc58131e89.ATVPDKIKX0DER.en_US&cv_ct_pg=search&cv_ct_we=asin&cv_ct_wn=osp-single-source-earns-comm&dchild=1&keywords=video+games&linkCode=oas&pd_rd_i=B07SXNKFGS&pd_rd_r=11b7bb4d-6baa-4dd9-8d5a-b34b5a49dac4&pd_rd_w=0fQ5s&pd_rd_wg=FnaBE&pf_rd_p=35b32c02-1b41-4e49-9b89-0297af2446e1&pf_rd_r=BJT1WK7E96M1KS7KNTHT&qid=1614700553&sr=1-4-64f3a41a-73ca-403a-923c-8152c45485fe&tag=gadgetreview-tca-20', likes: 33 }, { id: 4.5, productId: 'games', name: 'Persona 5 Strikers', price: 59.9, description: 'A summer vacation with close friends takes a sudden turn as a distorted reality emerges; reveal the truth and redeem the hearts of those imprisoned at the center of the crisis.', imageUrl: 'https://m.media-amazon.com/images/I/81aEa8zePHL._AC_UY218_.jpg', rating: 4.5, shareLink: 'https://www.amazon.com/Persona-5-Strikers-PlayStation-4/dp/B08Q6YYLN5/ref=sr_1_1_sspa?dchild=1&keywords=video+games&qid=1614700553&sr=8-1-spons&psc=1&spLa=ZW5jcnlwdGVkUXVhbGlmaWVyPUEzQjFGWTBIV1pLNUdYJmVuY3J5cHRlZElkPUEwMzEwOTU3M0dZUEtLNTgwRTlRNSZlbmNyeXB0ZWRBZElkPUEwODUxMDU5M0xRS0xIQjdNQVFDVSZ3aWRnZXROYW1lPXNwX2F0ZiZhY3Rpb249Y2xpY2tSZWRpcmVjdCZkb05vdExvZ0NsaWNrPXRydWU=', likes: 68 } ];
imageUrl: 'https://m.media-amazon.com/images/I/91tMNAWWsPL._AC_UL320_.jpg',
random_line_split
udacity_project_script.py
import pandas as pd import time import calendar def get_filters(): """ Asks user to specify a city, month, and day to analyze. Returns: (str) city - name of the city to analyze (str) month - name of the month to filter by, or "all_months" to apply no month filter (str) day - name of the day of week to filter by, or "all_days" to apply no day filter """ print("Hello there! Let's explore some data!") legit_cities = ['chicago', 'new york city', 'washington'] # cities im gonna accept city = input("\nWhich city do you want to analyze? Chicago, New York City or Washington?\n").lower() while city not in legit_cities: print("There is no such city in database!") city = input("\nWhich city do you want to analyze?\n").lower() city = city.replace(" ", "_") possible_answers = ['month', 'day', 'both', 'none'] # answers im gonna accept - 4 possibilities answer = input("\nFilter by 'month','day' or 'both'? If you don't want to filter type 'none'\n").lower() while answer not in possible_answers: print("WAAT?!") answer = input("\nFilter by 'month','day' or 'both'? If you don't want to filter type 'none'\n").lower() legit_months = ['Jan', "Feb", "Mar", "Apr", "May", "Jun"] legit_days = ['Mon', 'Tue', 'Wed', 'Thu', 'Fry', 'Sat', 'Sun'] month, day = 'all_months', 'all_days' if answer == 'both': month = input("\nWhich month do you want to analyze? Jan, Feb ,.., Jun\n").capitalize() while month not in legit_months: print('There is no such month! Try again.') month = input("\nWhich month do you want to analyze?\n").capitalize() day = input("\nChoose a day of interest - Mon, Tue, ...\n").capitalize() while day not in legit_days: print("There is no such day! Try again.") day = input("\nWhich day do you want to analyze? Mon, Tue, Wed...\n").capitalize() elif answer == "month": month = input("\nWhich month do you want to analyze? Jan, Feb, ..., Jun\n").capitalize() while month not in legit_months: print('There is no such month! Try again.') month = input("\nWhich month do you want to analyze?\n").capitalize() elif answer == 'day': day = input("\nChoose a day of interest - Mon, Tue, Wed...\n").capitalize() while day not in legit_days: print("There is no such day! Try again.") day = input("\nWhich day do you want to analyze?\n").capitalize() return city, month, day print('-'*40) ############################################## def load_data(city, month, day): """ Loads data for the specified city and filters by month and day if applicable. Args: (str) city - name of the city to analyze (str) month - name of the month to filter by, or "all_months" to apply no month filter (str) day - name of the day of week to filter by, or "all_days" to apply no day filter Returns: df - Pandas DataFrame containing city data filtered by month and day """ data = pd.read_csv("{}.csv".format(city)) data.drop(data.columns[0], axis = 1, inplace = True) #dropping this strange column data['Start Time'] = pd.to_datetime(data['Start Time'], format='%Y-%m-%d %H:%M:%S') data['End Time'] = pd.to_datetime(data['End Time'], format='%Y-%m-%d %H:%M:%S') data['weekday'] = data['Start Time'].dt.dayofweek #0 - monday data['month'] = data['Start Time'].dt.month #1 - january data['hour'] = data['Start Time'].dt.hour # 1 - hour 1 day_dict = {"Mon":0, "Tue":1, "Wed":2, "Thu":3, "Fry":4, "Sat":5, "Sun":6} month_dict = {"Jan":1, "Feb":2, "Mar":3, "Apr":4, "May":5, "Jun":6} if month == 'all_months' and day != 'all_days': # filter just by day day = day_dict.get(day) df = data[data['weekday'] == day] elif day == 'all_days' and month != 'all_months': # filter just by month month = month_dict.get(month) df = data[data['month'] == month] elif day == 'all_days' and month == 'all_months': # no filters df = data else: # filter both by day and month day = day_dict.get(day) month = month_dict.get(month) df = data[(data['weekday']== day) & (data['month']==month)] return df ########################### def time_stats(df): """Displays statistics on the most frequent times of travel.""" print("="*40) print('\nCalculating The Most Frequent Times of Travel...\n') start_time = time.time() most_freq_hour = str(df.groupby(['hour'])['Start Time'].count().idxmax()) high_hour_qty = str(df.groupby(['hour'])['Start Time'].count().max()) if len(df['weekday'].unique()) != 1 and len(df['month'].unique()) != 1: # if you dont filter im gonna show you month, day and hour most_freq_month = int(df.groupby(['month'])['Start Time'].count().idxmax()) high_month_qty = str(df.groupby(['month'])['Start Time'].count().max()) print("Hottest month was {}".format(calendar.month_name[most_freq_month])) print("Bikes were rented then for about {} times".format(high_month_qty)) print() most_freq_day = int(df.groupby(['weekday'])['Start Time'].count().idxmax()) high_day_qty = str(df.groupby(['weekday'])['Start Time'].count().max()) print("Hottest day was {}".format(calendar.day_name[most_freq_day])) print("Bikes were rented then for about {} times".format(high_day_qty)) print() print("Hottest hour was {} o'clock".format(most_freq_hour)) print("Bikes were rented then for about {} times".format(high_hour_qty)) elif len(df['month'].unique()) == 1 and len(df['weekday'].unique()) != 1: # if you filter just by month i will show you day and hour most_freq_day = int(df.groupby(['weekday'])['Start Time'].count().idxmax()) high_day_qty = str(df.groupby(['weekday'])['Start Time'].count().max()) print("Hottest day was {}".format(calendar.day_name[most_freq_day])) print("Bikes were rented then for about {} times".format(high_day_qty)) print() print("Hottest hour was {} o'clock".format(most_freq_hour)) print("Bikes were rented then for about {} times".format(high_hour_qty)) elif len(df['month'].unique()) != 1 and len(df['weekday'].unique()) == 1: # if you filter only by day i will show you month and hour most_freq_month = int(df.groupby(['month'])['Start Time'].count().idxmax()) high_month_qty = str(df.groupby(['month'])['Start Time'].count().max()) print("Hottest month was {}".format(calendar.month_name[most_freq_month])) print("Bikes were rented then for about {} times".format(high_month_qty)) print() print("Hottest hour was {} o'clock".format(most_freq_hour)) print("Bikes were rented then for about {} times".format(high_hour_qty)) else: # if you filter either just by day or by both day and month im gonna show you just hour print("Hottest hour was {} o'clock".format(most_freq_hour)) print("Bikes were rented then for about {} times".format(high_hour_qty)) print("\nThis took %s seconds." % (time.time() - start_time)) print('-'*40) def station_stats(df): """Displays statistics on the most popular stations and trip.""" print('\nCalculating The Most Popular Stations and Trip...\n') start_time = time.time() # TO DO: display most commonly used start station start_station_high_freq = df.groupby(['Start Station'])['Start Time'].count().idxmax() start_station_high_qty = df.groupby(['Start Station'])['Start Time'].count().max() print("The hottest start station was {}".format(start_station_high_freq)) print("Bikes were rented there around {}".format(start_station_high_qty), "times") print() # TO DO: display most commonly used end station end_station_high_freq = df.groupby(['End Station'])['Start Time'].count().idxmax() end_station_high_qty = df.groupby(['End Station'])['Start Time'].count().max() print("The hottest end station was {}".format(end_station_high_freq)) print("Bikes were rented there around {}".format(end_station_high_qty), "times") print() # TO DO: display most frequent combination of start station and end station trip df_grouped = df.groupby(['Start Station','End Station']).size().reset_index().rename(columns={0:'count'}).sort_values(by = "count", ascending = False) print("Most frequent stations combination was:\n{} and {}".format(str(df_grouped.iloc[0,0]), str(df_grouped.iloc[0,1]))) print("This route was accomplished {} times".format(int(df_grouped.iloc[0,2]))) print("\nThis took %s seconds." % (time.time() - start_time)) print('-'*40) def
(df): """Displays statistics on the total and average trip duration.""" print('\nCalculating Trip Duration...\n') start_time = time.time() df['time'] = df['End Time'] - df['Start Time'] # TO DO: display total travel time print("Total travel time in that period of time: {}".format(df['time'].sum())) print("Average time of journey: {}".format(df['time'].mean())) # TO DO: display mean travel time print("\nThis took %s seconds." % (time.time() - start_time)) print('-'*40) def user_stats(df): """Displays statistics on bikeshare users.""" print('\nCalculating User Stats...\n') start_time = time.time() # TO DO: Display counts of user types user_types = df.groupby(['User Type'])['Start Time'].count() print(user_types.to_string()) print() try: # TO DO: Display counts of gender gender = df.groupby(['Gender'])['Start Time'].count() print(gender.to_string()) print() # TO DO: Display earliest, most recent, and most common year of birth earliest = int(df['Birth Year'].min()) common = int(df['Birth Year'].mode()[0]) # its very important to index it with 0 -> without it program crashes f.e. for june monday filter in NY recent = int(df['Birth Year'].max()) print("The oldest person that rented a bicycle in that time was born in: {}".format(earliest)) print("The most common birth year: {}".format(common)) print("The youngest person: {}".format(recent)) except KeyError: print("="*40) print("For Washington there is no data on Gender and Birth Year") print("="*40) print("\nThis took %s seconds." % (time.time() - start_time)) print("="*40) def show_entries(df): # Line 249 and 259 - is there a better way to do that? i = -1 # i find it kind of stupid i = -1 but it works! when i set to 0 it starts showing entries from 5th row up while True: i+=1 curious = input("Do you want to see five entries of raw data? Type 'yes' or 'no' \n") if curious.lower() != 'yes': break else: print(str(df.iloc[0+5*i:5+5*i, :8].to_json(orient = 'records',date_format = 'iso')).replace('},{', "\n\n").replace(",", "\n").replace("[{", "").replace("}]", "").replace('"', '').replace(":", ": "), "\n") # Line 257 - i guess that is not very pythonic syntax but it works # Line 257 - i dont like that iso date format but i didnt find any better solution def show_entries_washington(df): # no info on gender and age for washington i = -1 while True: i+=1 curious = input("Do you want to see five entries of raw data? Type 'yes' or 'no' \n") if curious.lower() != 'yes': break else: print(str(df.iloc[0+5*i:5+5*i, :6].to_json(orient = 'records',date_format = 'iso')).replace('},{', "\n\n").replace(",", "\n").replace("[{", "").replace("}]", "").replace('"', '').replace(":",": "), "\n") # I guess the program would crash if 0+i > number of rows in the data and: #a) i think i could prevent it somehow handling IndexError but... #b) i find it unnecessary to do so def main(): while True: city, month, day = get_filters() df = load_data(city, month, day) time_stats(df) station_stats(df) trip_duration_stats(df) user_stats(df) if city == 'washington': show_entries_washington(df) else: show_entries(df) restart = input('\nWould you like to restart? Enter yes or no.\n') if restart.lower() != 'yes': break if __name__ == "__main__": main()
trip_duration_stats
identifier_name
udacity_project_script.py
import pandas as pd import time import calendar def get_filters(): """ Asks user to specify a city, month, and day to analyze. Returns: (str) city - name of the city to analyze (str) month - name of the month to filter by, or "all_months" to apply no month filter (str) day - name of the day of week to filter by, or "all_days" to apply no day filter """ print("Hello there! Let's explore some data!") legit_cities = ['chicago', 'new york city', 'washington'] # cities im gonna accept city = input("\nWhich city do you want to analyze? Chicago, New York City or Washington?\n").lower() while city not in legit_cities:
city = city.replace(" ", "_") possible_answers = ['month', 'day', 'both', 'none'] # answers im gonna accept - 4 possibilities answer = input("\nFilter by 'month','day' or 'both'? If you don't want to filter type 'none'\n").lower() while answer not in possible_answers: print("WAAT?!") answer = input("\nFilter by 'month','day' or 'both'? If you don't want to filter type 'none'\n").lower() legit_months = ['Jan', "Feb", "Mar", "Apr", "May", "Jun"] legit_days = ['Mon', 'Tue', 'Wed', 'Thu', 'Fry', 'Sat', 'Sun'] month, day = 'all_months', 'all_days' if answer == 'both': month = input("\nWhich month do you want to analyze? Jan, Feb ,.., Jun\n").capitalize() while month not in legit_months: print('There is no such month! Try again.') month = input("\nWhich month do you want to analyze?\n").capitalize() day = input("\nChoose a day of interest - Mon, Tue, ...\n").capitalize() while day not in legit_days: print("There is no such day! Try again.") day = input("\nWhich day do you want to analyze? Mon, Tue, Wed...\n").capitalize() elif answer == "month": month = input("\nWhich month do you want to analyze? Jan, Feb, ..., Jun\n").capitalize() while month not in legit_months: print('There is no such month! Try again.') month = input("\nWhich month do you want to analyze?\n").capitalize() elif answer == 'day': day = input("\nChoose a day of interest - Mon, Tue, Wed...\n").capitalize() while day not in legit_days: print("There is no such day! Try again.") day = input("\nWhich day do you want to analyze?\n").capitalize() return city, month, day print('-'*40) ############################################## def load_data(city, month, day): """ Loads data for the specified city and filters by month and day if applicable. Args: (str) city - name of the city to analyze (str) month - name of the month to filter by, or "all_months" to apply no month filter (str) day - name of the day of week to filter by, or "all_days" to apply no day filter Returns: df - Pandas DataFrame containing city data filtered by month and day """ data = pd.read_csv("{}.csv".format(city)) data.drop(data.columns[0], axis = 1, inplace = True) #dropping this strange column data['Start Time'] = pd.to_datetime(data['Start Time'], format='%Y-%m-%d %H:%M:%S') data['End Time'] = pd.to_datetime(data['End Time'], format='%Y-%m-%d %H:%M:%S') data['weekday'] = data['Start Time'].dt.dayofweek #0 - monday data['month'] = data['Start Time'].dt.month #1 - january data['hour'] = data['Start Time'].dt.hour # 1 - hour 1 day_dict = {"Mon":0, "Tue":1, "Wed":2, "Thu":3, "Fry":4, "Sat":5, "Sun":6} month_dict = {"Jan":1, "Feb":2, "Mar":3, "Apr":4, "May":5, "Jun":6} if month == 'all_months' and day != 'all_days': # filter just by day day = day_dict.get(day) df = data[data['weekday'] == day] elif day == 'all_days' and month != 'all_months': # filter just by month month = month_dict.get(month) df = data[data['month'] == month] elif day == 'all_days' and month == 'all_months': # no filters df = data else: # filter both by day and month day = day_dict.get(day) month = month_dict.get(month) df = data[(data['weekday']== day) & (data['month']==month)] return df ########################### def time_stats(df): """Displays statistics on the most frequent times of travel.""" print("="*40) print('\nCalculating The Most Frequent Times of Travel...\n') start_time = time.time() most_freq_hour = str(df.groupby(['hour'])['Start Time'].count().idxmax()) high_hour_qty = str(df.groupby(['hour'])['Start Time'].count().max()) if len(df['weekday'].unique()) != 1 and len(df['month'].unique()) != 1: # if you dont filter im gonna show you month, day and hour most_freq_month = int(df.groupby(['month'])['Start Time'].count().idxmax()) high_month_qty = str(df.groupby(['month'])['Start Time'].count().max()) print("Hottest month was {}".format(calendar.month_name[most_freq_month])) print("Bikes were rented then for about {} times".format(high_month_qty)) print() most_freq_day = int(df.groupby(['weekday'])['Start Time'].count().idxmax()) high_day_qty = str(df.groupby(['weekday'])['Start Time'].count().max()) print("Hottest day was {}".format(calendar.day_name[most_freq_day])) print("Bikes were rented then for about {} times".format(high_day_qty)) print() print("Hottest hour was {} o'clock".format(most_freq_hour)) print("Bikes were rented then for about {} times".format(high_hour_qty)) elif len(df['month'].unique()) == 1 and len(df['weekday'].unique()) != 1: # if you filter just by month i will show you day and hour most_freq_day = int(df.groupby(['weekday'])['Start Time'].count().idxmax()) high_day_qty = str(df.groupby(['weekday'])['Start Time'].count().max()) print("Hottest day was {}".format(calendar.day_name[most_freq_day])) print("Bikes were rented then for about {} times".format(high_day_qty)) print() print("Hottest hour was {} o'clock".format(most_freq_hour)) print("Bikes were rented then for about {} times".format(high_hour_qty)) elif len(df['month'].unique()) != 1 and len(df['weekday'].unique()) == 1: # if you filter only by day i will show you month and hour most_freq_month = int(df.groupby(['month'])['Start Time'].count().idxmax()) high_month_qty = str(df.groupby(['month'])['Start Time'].count().max()) print("Hottest month was {}".format(calendar.month_name[most_freq_month])) print("Bikes were rented then for about {} times".format(high_month_qty)) print() print("Hottest hour was {} o'clock".format(most_freq_hour)) print("Bikes were rented then for about {} times".format(high_hour_qty)) else: # if you filter either just by day or by both day and month im gonna show you just hour print("Hottest hour was {} o'clock".format(most_freq_hour)) print("Bikes were rented then for about {} times".format(high_hour_qty)) print("\nThis took %s seconds." % (time.time() - start_time)) print('-'*40) def station_stats(df): """Displays statistics on the most popular stations and trip.""" print('\nCalculating The Most Popular Stations and Trip...\n') start_time = time.time() # TO DO: display most commonly used start station start_station_high_freq = df.groupby(['Start Station'])['Start Time'].count().idxmax() start_station_high_qty = df.groupby(['Start Station'])['Start Time'].count().max() print("The hottest start station was {}".format(start_station_high_freq)) print("Bikes were rented there around {}".format(start_station_high_qty), "times") print() # TO DO: display most commonly used end station end_station_high_freq = df.groupby(['End Station'])['Start Time'].count().idxmax() end_station_high_qty = df.groupby(['End Station'])['Start Time'].count().max() print("The hottest end station was {}".format(end_station_high_freq)) print("Bikes were rented there around {}".format(end_station_high_qty), "times") print() # TO DO: display most frequent combination of start station and end station trip df_grouped = df.groupby(['Start Station','End Station']).size().reset_index().rename(columns={0:'count'}).sort_values(by = "count", ascending = False) print("Most frequent stations combination was:\n{} and {}".format(str(df_grouped.iloc[0,0]), str(df_grouped.iloc[0,1]))) print("This route was accomplished {} times".format(int(df_grouped.iloc[0,2]))) print("\nThis took %s seconds." % (time.time() - start_time)) print('-'*40) def trip_duration_stats(df): """Displays statistics on the total and average trip duration.""" print('\nCalculating Trip Duration...\n') start_time = time.time() df['time'] = df['End Time'] - df['Start Time'] # TO DO: display total travel time print("Total travel time in that period of time: {}".format(df['time'].sum())) print("Average time of journey: {}".format(df['time'].mean())) # TO DO: display mean travel time print("\nThis took %s seconds." % (time.time() - start_time)) print('-'*40) def user_stats(df): """Displays statistics on bikeshare users.""" print('\nCalculating User Stats...\n') start_time = time.time() # TO DO: Display counts of user types user_types = df.groupby(['User Type'])['Start Time'].count() print(user_types.to_string()) print() try: # TO DO: Display counts of gender gender = df.groupby(['Gender'])['Start Time'].count() print(gender.to_string()) print() # TO DO: Display earliest, most recent, and most common year of birth earliest = int(df['Birth Year'].min()) common = int(df['Birth Year'].mode()[0]) # its very important to index it with 0 -> without it program crashes f.e. for june monday filter in NY recent = int(df['Birth Year'].max()) print("The oldest person that rented a bicycle in that time was born in: {}".format(earliest)) print("The most common birth year: {}".format(common)) print("The youngest person: {}".format(recent)) except KeyError: print("="*40) print("For Washington there is no data on Gender and Birth Year") print("="*40) print("\nThis took %s seconds." % (time.time() - start_time)) print("="*40) def show_entries(df): # Line 249 and 259 - is there a better way to do that? i = -1 # i find it kind of stupid i = -1 but it works! when i set to 0 it starts showing entries from 5th row up while True: i+=1 curious = input("Do you want to see five entries of raw data? Type 'yes' or 'no' \n") if curious.lower() != 'yes': break else: print(str(df.iloc[0+5*i:5+5*i, :8].to_json(orient = 'records',date_format = 'iso')).replace('},{', "\n\n").replace(",", "\n").replace("[{", "").replace("}]", "").replace('"', '').replace(":", ": "), "\n") # Line 257 - i guess that is not very pythonic syntax but it works # Line 257 - i dont like that iso date format but i didnt find any better solution def show_entries_washington(df): # no info on gender and age for washington i = -1 while True: i+=1 curious = input("Do you want to see five entries of raw data? Type 'yes' or 'no' \n") if curious.lower() != 'yes': break else: print(str(df.iloc[0+5*i:5+5*i, :6].to_json(orient = 'records',date_format = 'iso')).replace('},{', "\n\n").replace(",", "\n").replace("[{", "").replace("}]", "").replace('"', '').replace(":",": "), "\n") # I guess the program would crash if 0+i > number of rows in the data and: #a) i think i could prevent it somehow handling IndexError but... #b) i find it unnecessary to do so def main(): while True: city, month, day = get_filters() df = load_data(city, month, day) time_stats(df) station_stats(df) trip_duration_stats(df) user_stats(df) if city == 'washington': show_entries_washington(df) else: show_entries(df) restart = input('\nWould you like to restart? Enter yes or no.\n') if restart.lower() != 'yes': break if __name__ == "__main__": main()
print("There is no such city in database!") city = input("\nWhich city do you want to analyze?\n").lower()
conditional_block
udacity_project_script.py
import pandas as pd import time import calendar def get_filters(): """ Asks user to specify a city, month, and day to analyze. Returns: (str) city - name of the city to analyze (str) month - name of the month to filter by, or "all_months" to apply no month filter (str) day - name of the day of week to filter by, or "all_days" to apply no day filter """ print("Hello there! Let's explore some data!") legit_cities = ['chicago', 'new york city', 'washington'] # cities im gonna accept city = input("\nWhich city do you want to analyze? Chicago, New York City or Washington?\n").lower() while city not in legit_cities: print("There is no such city in database!") city = input("\nWhich city do you want to analyze?\n").lower() city = city.replace(" ", "_") possible_answers = ['month', 'day', 'both', 'none'] # answers im gonna accept - 4 possibilities answer = input("\nFilter by 'month','day' or 'both'? If you don't want to filter type 'none'\n").lower() while answer not in possible_answers: print("WAAT?!") answer = input("\nFilter by 'month','day' or 'both'? If you don't want to filter type 'none'\n").lower() legit_months = ['Jan', "Feb", "Mar", "Apr", "May", "Jun"] legit_days = ['Mon', 'Tue', 'Wed', 'Thu', 'Fry', 'Sat', 'Sun'] month, day = 'all_months', 'all_days' if answer == 'both': month = input("\nWhich month do you want to analyze? Jan, Feb ,.., Jun\n").capitalize() while month not in legit_months: print('There is no such month! Try again.') month = input("\nWhich month do you want to analyze?\n").capitalize() day = input("\nChoose a day of interest - Mon, Tue, ...\n").capitalize() while day not in legit_days: print("There is no such day! Try again.") day = input("\nWhich day do you want to analyze? Mon, Tue, Wed...\n").capitalize() elif answer == "month": month = input("\nWhich month do you want to analyze? Jan, Feb, ..., Jun\n").capitalize() while month not in legit_months: print('There is no such month! Try again.') month = input("\nWhich month do you want to analyze?\n").capitalize() elif answer == 'day': day = input("\nChoose a day of interest - Mon, Tue, Wed...\n").capitalize() while day not in legit_days: print("There is no such day! Try again.") day = input("\nWhich day do you want to analyze?\n").capitalize() return city, month, day print('-'*40) ############################################## def load_data(city, month, day): """ Loads data for the specified city and filters by month and day if applicable. Args: (str) city - name of the city to analyze (str) month - name of the month to filter by, or "all_months" to apply no month filter (str) day - name of the day of week to filter by, or "all_days" to apply no day filter Returns: df - Pandas DataFrame containing city data filtered by month and day """ data = pd.read_csv("{}.csv".format(city)) data.drop(data.columns[0], axis = 1, inplace = True) #dropping this strange column data['Start Time'] = pd.to_datetime(data['Start Time'], format='%Y-%m-%d %H:%M:%S') data['End Time'] = pd.to_datetime(data['End Time'], format='%Y-%m-%d %H:%M:%S') data['weekday'] = data['Start Time'].dt.dayofweek #0 - monday data['month'] = data['Start Time'].dt.month #1 - january data['hour'] = data['Start Time'].dt.hour # 1 - hour 1 day_dict = {"Mon":0, "Tue":1, "Wed":2, "Thu":3, "Fry":4, "Sat":5, "Sun":6} month_dict = {"Jan":1, "Feb":2, "Mar":3, "Apr":4, "May":5, "Jun":6} if month == 'all_months' and day != 'all_days': # filter just by day day = day_dict.get(day) df = data[data['weekday'] == day] elif day == 'all_days' and month != 'all_months': # filter just by month month = month_dict.get(month) df = data[data['month'] == month] elif day == 'all_days' and month == 'all_months': # no filters df = data else: # filter both by day and month day = day_dict.get(day) month = month_dict.get(month) df = data[(data['weekday']== day) & (data['month']==month)] return df ########################### def time_stats(df): """Displays statistics on the most frequent times of travel.""" print("="*40) print('\nCalculating The Most Frequent Times of Travel...\n') start_time = time.time() most_freq_hour = str(df.groupby(['hour'])['Start Time'].count().idxmax()) high_hour_qty = str(df.groupby(['hour'])['Start Time'].count().max()) if len(df['weekday'].unique()) != 1 and len(df['month'].unique()) != 1: # if you dont filter im gonna show you month, day and hour most_freq_month = int(df.groupby(['month'])['Start Time'].count().idxmax()) high_month_qty = str(df.groupby(['month'])['Start Time'].count().max()) print("Hottest month was {}".format(calendar.month_name[most_freq_month])) print("Bikes were rented then for about {} times".format(high_month_qty)) print() most_freq_day = int(df.groupby(['weekday'])['Start Time'].count().idxmax()) high_day_qty = str(df.groupby(['weekday'])['Start Time'].count().max()) print("Hottest day was {}".format(calendar.day_name[most_freq_day])) print("Bikes were rented then for about {} times".format(high_day_qty)) print() print("Hottest hour was {} o'clock".format(most_freq_hour)) print("Bikes were rented then for about {} times".format(high_hour_qty)) elif len(df['month'].unique()) == 1 and len(df['weekday'].unique()) != 1: # if you filter just by month i will show you day and hour most_freq_day = int(df.groupby(['weekday'])['Start Time'].count().idxmax()) high_day_qty = str(df.groupby(['weekday'])['Start Time'].count().max()) print("Hottest day was {}".format(calendar.day_name[most_freq_day])) print("Bikes were rented then for about {} times".format(high_day_qty)) print() print("Hottest hour was {} o'clock".format(most_freq_hour)) print("Bikes were rented then for about {} times".format(high_hour_qty)) elif len(df['month'].unique()) != 1 and len(df['weekday'].unique()) == 1: # if you filter only by day i will show you month and hour most_freq_month = int(df.groupby(['month'])['Start Time'].count().idxmax()) high_month_qty = str(df.groupby(['month'])['Start Time'].count().max()) print("Hottest month was {}".format(calendar.month_name[most_freq_month])) print("Bikes were rented then for about {} times".format(high_month_qty)) print() print("Hottest hour was {} o'clock".format(most_freq_hour)) print("Bikes were rented then for about {} times".format(high_hour_qty)) else: # if you filter either just by day or by both day and month im gonna show you just hour print("Hottest hour was {} o'clock".format(most_freq_hour)) print("Bikes were rented then for about {} times".format(high_hour_qty)) print("\nThis took %s seconds." % (time.time() - start_time)) print('-'*40) def station_stats(df): """Displays statistics on the most popular stations and trip.""" print('\nCalculating The Most Popular Stations and Trip...\n') start_time = time.time() # TO DO: display most commonly used start station start_station_high_freq = df.groupby(['Start Station'])['Start Time'].count().idxmax() start_station_high_qty = df.groupby(['Start Station'])['Start Time'].count().max() print("The hottest start station was {}".format(start_station_high_freq)) print("Bikes were rented there around {}".format(start_station_high_qty), "times") print() # TO DO: display most commonly used end station end_station_high_freq = df.groupby(['End Station'])['Start Time'].count().idxmax() end_station_high_qty = df.groupby(['End Station'])['Start Time'].count().max() print("The hottest end station was {}".format(end_station_high_freq)) print("Bikes were rented there around {}".format(end_station_high_qty), "times") print() # TO DO: display most frequent combination of start station and end station trip df_grouped = df.groupby(['Start Station','End Station']).size().reset_index().rename(columns={0:'count'}).sort_values(by = "count", ascending = False) print("Most frequent stations combination was:\n{} and {}".format(str(df_grouped.iloc[0,0]), str(df_grouped.iloc[0,1]))) print("This route was accomplished {} times".format(int(df_grouped.iloc[0,2]))) print("\nThis took %s seconds." % (time.time() - start_time)) print('-'*40) def trip_duration_stats(df): """Displays statistics on the total and average trip duration.""" print('\nCalculating Trip Duration...\n') start_time = time.time() df['time'] = df['End Time'] - df['Start Time'] # TO DO: display total travel time print("Total travel time in that period of time: {}".format(df['time'].sum())) print("Average time of journey: {}".format(df['time'].mean())) # TO DO: display mean travel time print("\nThis took %s seconds." % (time.time() - start_time)) print('-'*40) def user_stats(df): """Displays statistics on bikeshare users.""" print('\nCalculating User Stats...\n') start_time = time.time() # TO DO: Display counts of user types user_types = df.groupby(['User Type'])['Start Time'].count() print(user_types.to_string()) print() try: # TO DO: Display counts of gender gender = df.groupby(['Gender'])['Start Time'].count() print(gender.to_string()) print() # TO DO: Display earliest, most recent, and most common year of birth earliest = int(df['Birth Year'].min()) common = int(df['Birth Year'].mode()[0]) # its very important to index it with 0 -> without it program crashes f.e. for june monday filter in NY recent = int(df['Birth Year'].max()) print("The oldest person that rented a bicycle in that time was born in: {}".format(earliest)) print("The most common birth year: {}".format(common)) print("The youngest person: {}".format(recent)) except KeyError: print("="*40) print("For Washington there is no data on Gender and Birth Year") print("="*40) print("\nThis took %s seconds." % (time.time() - start_time)) print("="*40) def show_entries(df): # Line 249 and 259 - is there a better way to do that? i = -1 # i find it kind of stupid i = -1 but it works! when i set to 0 it starts showing entries from 5th row up while True: i+=1 curious = input("Do you want to see five entries of raw data? Type 'yes' or 'no' \n") if curious.lower() != 'yes': break else: print(str(df.iloc[0+5*i:5+5*i, :8].to_json(orient = 'records',date_format = 'iso')).replace('},{', "\n\n").replace(",", "\n").replace("[{", "").replace("}]", "").replace('"', '').replace(":", ": "), "\n") # Line 257 - i guess that is not very pythonic syntax but it works # Line 257 - i dont like that iso date format but i didnt find any better solution def show_entries_washington(df): # no info on gender and age for washington
# I guess the program would crash if 0+i > number of rows in the data and: #a) i think i could prevent it somehow handling IndexError but... #b) i find it unnecessary to do so def main(): while True: city, month, day = get_filters() df = load_data(city, month, day) time_stats(df) station_stats(df) trip_duration_stats(df) user_stats(df) if city == 'washington': show_entries_washington(df) else: show_entries(df) restart = input('\nWould you like to restart? Enter yes or no.\n') if restart.lower() != 'yes': break if __name__ == "__main__": main()
i = -1 while True: i+=1 curious = input("Do you want to see five entries of raw data? Type 'yes' or 'no' \n") if curious.lower() != 'yes': break else: print(str(df.iloc[0+5*i:5+5*i, :6].to_json(orient = 'records',date_format = 'iso')).replace('},{', "\n\n").replace(",", "\n").replace("[{", "").replace("}]", "").replace('"', '').replace(":",": "), "\n")
identifier_body
udacity_project_script.py
import pandas as pd import time import calendar def get_filters(): """ Asks user to specify a city, month, and day to analyze. Returns: (str) city - name of the city to analyze (str) month - name of the month to filter by, or "all_months" to apply no month filter (str) day - name of the day of week to filter by, or "all_days" to apply no day filter """ print("Hello there! Let's explore some data!") legit_cities = ['chicago', 'new york city', 'washington'] # cities im gonna accept city = input("\nWhich city do you want to analyze? Chicago, New York City or Washington?\n").lower() while city not in legit_cities: print("There is no such city in database!") city = input("\nWhich city do you want to analyze?\n").lower() city = city.replace(" ", "_") possible_answers = ['month', 'day', 'both', 'none'] # answers im gonna accept - 4 possibilities answer = input("\nFilter by 'month','day' or 'both'? If you don't want to filter type 'none'\n").lower() while answer not in possible_answers: print("WAAT?!") answer = input("\nFilter by 'month','day' or 'both'? If you don't want to filter type 'none'\n").lower() legit_months = ['Jan', "Feb", "Mar", "Apr", "May", "Jun"] legit_days = ['Mon', 'Tue', 'Wed', 'Thu', 'Fry', 'Sat', 'Sun'] month, day = 'all_months', 'all_days' if answer == 'both': month = input("\nWhich month do you want to analyze? Jan, Feb ,.., Jun\n").capitalize() while month not in legit_months: print('There is no such month! Try again.') month = input("\nWhich month do you want to analyze?\n").capitalize() day = input("\nChoose a day of interest - Mon, Tue, ...\n").capitalize() while day not in legit_days: print("There is no such day! Try again.") day = input("\nWhich day do you want to analyze? Mon, Tue, Wed...\n").capitalize() elif answer == "month": month = input("\nWhich month do you want to analyze? Jan, Feb, ..., Jun\n").capitalize() while month not in legit_months: print('There is no such month! Try again.') month = input("\nWhich month do you want to analyze?\n").capitalize() elif answer == 'day': day = input("\nChoose a day of interest - Mon, Tue, Wed...\n").capitalize() while day not in legit_days: print("There is no such day! Try again.") day = input("\nWhich day do you want to analyze?\n").capitalize() return city, month, day print('-'*40) ############################################## def load_data(city, month, day): """ Loads data for the specified city and filters by month and day if applicable. Args: (str) city - name of the city to analyze (str) month - name of the month to filter by, or "all_months" to apply no month filter (str) day - name of the day of week to filter by, or "all_days" to apply no day filter Returns: df - Pandas DataFrame containing city data filtered by month and day """ data = pd.read_csv("{}.csv".format(city)) data.drop(data.columns[0], axis = 1, inplace = True) #dropping this strange column data['Start Time'] = pd.to_datetime(data['Start Time'], format='%Y-%m-%d %H:%M:%S') data['End Time'] = pd.to_datetime(data['End Time'], format='%Y-%m-%d %H:%M:%S') data['weekday'] = data['Start Time'].dt.dayofweek #0 - monday data['month'] = data['Start Time'].dt.month #1 - january data['hour'] = data['Start Time'].dt.hour # 1 - hour 1 day_dict = {"Mon":0, "Tue":1, "Wed":2, "Thu":3, "Fry":4, "Sat":5, "Sun":6} month_dict = {"Jan":1, "Feb":2, "Mar":3, "Apr":4, "May":5, "Jun":6} if month == 'all_months' and day != 'all_days': # filter just by day day = day_dict.get(day) df = data[data['weekday'] == day] elif day == 'all_days' and month != 'all_months': # filter just by month month = month_dict.get(month) df = data[data['month'] == month] elif day == 'all_days' and month == 'all_months': # no filters df = data else: # filter both by day and month day = day_dict.get(day) month = month_dict.get(month) df = data[(data['weekday']== day) & (data['month']==month)] return df ########################### def time_stats(df): """Displays statistics on the most frequent times of travel.""" print("="*40) print('\nCalculating The Most Frequent Times of Travel...\n') start_time = time.time() most_freq_hour = str(df.groupby(['hour'])['Start Time'].count().idxmax()) high_hour_qty = str(df.groupby(['hour'])['Start Time'].count().max()) if len(df['weekday'].unique()) != 1 and len(df['month'].unique()) != 1: # if you dont filter im gonna show you month, day and hour most_freq_month = int(df.groupby(['month'])['Start Time'].count().idxmax()) high_month_qty = str(df.groupby(['month'])['Start Time'].count().max()) print("Hottest month was {}".format(calendar.month_name[most_freq_month])) print("Bikes were rented then for about {} times".format(high_month_qty)) print() most_freq_day = int(df.groupby(['weekday'])['Start Time'].count().idxmax()) high_day_qty = str(df.groupby(['weekday'])['Start Time'].count().max()) print("Hottest day was {}".format(calendar.day_name[most_freq_day])) print("Bikes were rented then for about {} times".format(high_day_qty)) print() print("Hottest hour was {} o'clock".format(most_freq_hour)) print("Bikes were rented then for about {} times".format(high_hour_qty)) elif len(df['month'].unique()) == 1 and len(df['weekday'].unique()) != 1: # if you filter just by month i will show you day and hour most_freq_day = int(df.groupby(['weekday'])['Start Time'].count().idxmax()) high_day_qty = str(df.groupby(['weekday'])['Start Time'].count().max()) print("Hottest day was {}".format(calendar.day_name[most_freq_day])) print("Bikes were rented then for about {} times".format(high_day_qty)) print() print("Hottest hour was {} o'clock".format(most_freq_hour)) print("Bikes were rented then for about {} times".format(high_hour_qty)) elif len(df['month'].unique()) != 1 and len(df['weekday'].unique()) == 1: # if you filter only by day i will show you month and hour most_freq_month = int(df.groupby(['month'])['Start Time'].count().idxmax()) high_month_qty = str(df.groupby(['month'])['Start Time'].count().max()) print("Hottest month was {}".format(calendar.month_name[most_freq_month])) print("Bikes were rented then for about {} times".format(high_month_qty)) print() print("Hottest hour was {} o'clock".format(most_freq_hour)) print("Bikes were rented then for about {} times".format(high_hour_qty)) else: # if you filter either just by day or by both day and month im gonna show you just hour print("Hottest hour was {} o'clock".format(most_freq_hour)) print("Bikes were rented then for about {} times".format(high_hour_qty)) print("\nThis took %s seconds." % (time.time() - start_time)) print('-'*40) def station_stats(df): """Displays statistics on the most popular stations and trip.""" print('\nCalculating The Most Popular Stations and Trip...\n') start_time = time.time() # TO DO: display most commonly used start station start_station_high_freq = df.groupby(['Start Station'])['Start Time'].count().idxmax() start_station_high_qty = df.groupby(['Start Station'])['Start Time'].count().max() print("The hottest start station was {}".format(start_station_high_freq)) print("Bikes were rented there around {}".format(start_station_high_qty), "times") print() # TO DO: display most commonly used end station end_station_high_freq = df.groupby(['End Station'])['Start Time'].count().idxmax() end_station_high_qty = df.groupby(['End Station'])['Start Time'].count().max() print("The hottest end station was {}".format(end_station_high_freq)) print("Bikes were rented there around {}".format(end_station_high_qty), "times") print() # TO DO: display most frequent combination of start station and end station trip df_grouped = df.groupby(['Start Station','End Station']).size().reset_index().rename(columns={0:'count'}).sort_values(by = "count", ascending = False) print("Most frequent stations combination was:\n{} and {}".format(str(df_grouped.iloc[0,0]), str(df_grouped.iloc[0,1]))) print("This route was accomplished {} times".format(int(df_grouped.iloc[0,2]))) print("\nThis took %s seconds." % (time.time() - start_time)) print('-'*40) def trip_duration_stats(df): """Displays statistics on the total and average trip duration.""" print('\nCalculating Trip Duration...\n') start_time = time.time() df['time'] = df['End Time'] - df['Start Time'] # TO DO: display total travel time print("Total travel time in that period of time: {}".format(df['time'].sum())) print("Average time of journey: {}".format(df['time'].mean())) # TO DO: display mean travel time print("\nThis took %s seconds." % (time.time() - start_time)) print('-'*40) def user_stats(df): """Displays statistics on bikeshare users.""" print('\nCalculating User Stats...\n') start_time = time.time() # TO DO: Display counts of user types user_types = df.groupby(['User Type'])['Start Time'].count() print(user_types.to_string()) print() try: # TO DO: Display counts of gender gender = df.groupby(['Gender'])['Start Time'].count() print(gender.to_string()) print() # TO DO: Display earliest, most recent, and most common year of birth earliest = int(df['Birth Year'].min()) common = int(df['Birth Year'].mode()[0]) # its very important to index it with 0 -> without it program crashes f.e. for june monday filter in NY recent = int(df['Birth Year'].max()) print("The oldest person that rented a bicycle in that time was born in: {}".format(earliest)) print("The most common birth year: {}".format(common)) print("The youngest person: {}".format(recent)) except KeyError: print("="*40) print("For Washington there is no data on Gender and Birth Year") print("="*40) print("\nThis took %s seconds." % (time.time() - start_time)) print("="*40) def show_entries(df): # Line 249 and 259 - is there a better way to do that? i = -1 # i find it kind of stupid i = -1 but it works! when i set to 0 it starts showing entries from 5th row up while True: i+=1 curious = input("Do you want to see five entries of raw data? Type 'yes' or 'no' \n") if curious.lower() != 'yes': break else: print(str(df.iloc[0+5*i:5+5*i, :8].to_json(orient = 'records',date_format = 'iso')).replace('},{', "\n\n").replace(",", "\n").replace("[{", "").replace("}]", "").replace('"', '').replace(":", ": "), "\n") # Line 257 - i guess that is not very pythonic syntax but it works # Line 257 - i dont like that iso date format but i didnt find any better solution
i+=1 curious = input("Do you want to see five entries of raw data? Type 'yes' or 'no' \n") if curious.lower() != 'yes': break else: print(str(df.iloc[0+5*i:5+5*i, :6].to_json(orient = 'records',date_format = 'iso')).replace('},{', "\n\n").replace(",", "\n").replace("[{", "").replace("}]", "").replace('"', '').replace(":",": "), "\n") # I guess the program would crash if 0+i > number of rows in the data and: #a) i think i could prevent it somehow handling IndexError but... #b) i find it unnecessary to do so def main(): while True: city, month, day = get_filters() df = load_data(city, month, day) time_stats(df) station_stats(df) trip_duration_stats(df) user_stats(df) if city == 'washington': show_entries_washington(df) else: show_entries(df) restart = input('\nWould you like to restart? Enter yes or no.\n') if restart.lower() != 'yes': break if __name__ == "__main__": main()
def show_entries_washington(df): # no info on gender and age for washington i = -1 while True:
random_line_split
deps.rs
use std::{ collections::{HashMap, HashSet}, path::PathBuf, }; use crate::{ error::{Error, Result, UserError}, target::Target, util::ResultIterator, }; use daggy::{petgraph::visit::IntoNeighborsDirected, NodeIndex as Nx}; type DependencyDag = daggy::Dag<Node, ()>; type Identifier = PathBuf; type FileState = (); // TODO #[derive(Debug)] enum Node { Target(Target), NoRule(Identifier), } // TODO think of a better name pub struct DependencyGraph { id_to_ix_map: HashMap<Identifier, Nx>, graph: DependencyDag, } impl DependencyGraph { pub fn construct(targets: Vec<Target>) -> Result<Self> { let mut graph = DependencyDag::new(); let mut id_to_ix_map = HashMap::new(); // add target nodes targets .iter() .cloned() .map(|target| { util::add_target_node(&mut graph, &mut id_to_ix_map, target) }) .collect::<Result<_>>()?; // add left dependency nodes - leaf nodes representing actual files targets .into_iter() .map(|target| target.deps) .flatten() .for_each(|dep_id| { util::add_leaf_node(&mut graph, &mut id_to_ix_map, dep_id); }); // add edges graph .graph() .node_indices() .map(|target_ix| { util::add_edges_to_deps(&mut graph, &id_to_ix_map, target_ix) }) .collect::<Result<_>>()?; Ok(Self { graph, id_to_ix_map, }) } pub fn get_target_sequence( &self, target_id: Identifier, ) -> Result<Vec<Target>> { let graph = &self.graph; let target_ix = *self .id_to_ix_map .get(&target_id) .ok_or_else(|| UserError::NoSuchTarget(target_id))?; let depth_map = util::generate_depth_map(graph, target_ix); let obsolete_leaf_nodes = util::find_obsolete_leaf_nodes(graph.graph())?; let obsolete_targets = util::find_obsolete_targets(graph.graph(), &obsolete_leaf_nodes); util::get_target_sequence(graph.graph(), &depth_map, &obsolete_targets) } } mod util { use std::collections::{HashMap, VecDeque}; use super::*; use daggy::petgraph; use petgraph::prelude::{Direction, Graph}; pub(super) fn get_target_sequence( graph: &Graph<Node, ()>, depth_map: &HashMap<Nx, usize>, obsolete_targets: &HashSet<Nx>, ) -> Result<Vec<Target>> { // filter out targets which are not in the // dependency graph of the chosen target // and sort the targets left by depth in **decreasing** order let mut obsolete_targets = obsolete_targets .iter() .filter(|ix| depth_map.contains_key(ix)) .copied() .collect::<Vec<_>>(); obsolete_targets.sort_by_key(|ix| depth_map[ix]); obsolete_targets.reverse(); obsolete_targets .into_iter() .map(|target_ix| match &graph[target_ix] { Node::Target(target) => Ok(target.clone()), Node::NoRule(_) => Err(Error::internal(line!(), file!())), }) .collect::<Result<Vec<_>>>() } /// This function finds all nodes that have no dependencies - /// both actual files (`NoRule` variant) and targets /// (`Target` variant with no dependencies is assumed to depend /// on other factors - time, environmental variables, /// current directory etc.). pub(super) fn find_obsolete_leaf_nodes( graph: &Graph<Node, ()>, ) -> Result<HashSet<Nx>> { graph .externals(Direction::Outgoing) // get nodes with no outgoing edges .filter_map(|node_ix| match &graph[node_ix] { // TODO filter `requires_rebuilding` Node::Target(_target) => Some(Ok(node_ix)), Node::NoRule(identifier) => { // TODO clean up this part let previous_file_state = (); let result = has_file_been_modified( &identifier, previous_file_state, ); match result { Ok(has_been_modified) => if has_been_modified { Some(Ok(node_ix)) } else { None }, Err(err) => Some(Err(err)), } } }) .collect::<Result<HashSet<_>>>() } pub(super) fn find_obsolete_targets( graph: &Graph<Node, ()>, obsolete_leaf_nodes: &HashSet<Nx>, ) -> HashSet<Nx> { // reverse short circuiting bfs: // skip the dependants of the targets // that have already been marked as obsolete let mut queue = VecDeque::<Nx>::new(); let mut obsolete_ixs = HashSet::<Nx>::new(); for leaf_ix in obsolete_leaf_nodes { // no need to clear the queue since it gets drained // in the while loop each time match &graph[*leaf_ix] { Node::Target(_) => queue.push_back(*leaf_ix), Node::NoRule(_) => { let direct_dependants = graph.neighbors_directed(*leaf_ix, Direction::Incoming); queue.extend(direct_dependants); } } while let Some(target_ix) = queue.pop_front() { let has_just_been_found = obsolete_ixs.insert(target_ix); if has_just_been_found { let dependants = graph .neighbors_directed(target_ix, Direction::Incoming); queue.extend(dependants); } } } obsolete_ixs } pub(super) fn
( graph: &mut DependencyDag, id_to_ix_map: &mut HashMap<Identifier, Nx>, dependency_identifier: Identifier, ) { id_to_ix_map .entry(dependency_identifier.clone()) .or_insert_with(|| { // `.add_node()` returns node's index graph.add_node(Node::NoRule(dependency_identifier)) }); } pub(super) fn add_target_node( graph: &mut DependencyDag, id_to_ix_map: &mut HashMap<Identifier, Nx>, target: Target, ) -> Result<()> { let identifier = target.identifier.clone(); let node_ix = graph.add_node(Node::Target(target)); let slot = id_to_ix_map.insert(identifier, node_ix); match slot { Some(_colliding_target_ix) => Err(UserError::DuplicateTarget.into()), None => Ok(()), } } pub(super) fn add_edges_to_deps( graph: &mut DependencyDag, id_to_ix_map: &HashMap<Identifier, Nx>, target_ix: Nx, ) -> Result<()> { let deps = match &graph[target_ix] { Node::Target(target) => target.deps.clone(), Node::NoRule(_) => return Ok(()), // no deps }; deps.iter() .map(|dep_id| { id_to_ix_map .get(dep_id) .ok_or_else(|| Error::internal(line!(), file!())) }) .map_item(|dep_ix| { graph .add_edge(target_ix, *dep_ix, ()) .map(|_| ()) .map_err(|_| UserError::DependencyCycle.into()) }) .map(|result| result.flatten()) .collect::<Result<_>>() } pub(super) fn has_file_been_modified( _identifier: &Identifier, _previous_state: FileState, ) -> Result<bool> { Ok(true) // TODO for now it just rebuilds everything } pub(super) fn generate_depth_map<N, E>( graph: &daggy::Dag<N, E>, target_id: Nx, ) -> HashMap<Nx, usize> { let mut depth_map: HashMap<Nx, usize> = HashMap::new(); let mut current_depth = 0; let mut queue: VecDeque<Vec<_>> = VecDeque::new(); queue.push_front(vec![target_id]); while let Some(level) = queue.pop_front() { if level.is_empty() { break; } let mut level_queue = vec![]; for current_node in level { // update current node's depth let _ = depth_map .entry(current_node) .and_modify(|depth| *depth = (*depth).max(current_depth)) .or_insert(current_depth); let children = graph.neighbors_directed(current_node, Direction::Outgoing); level_queue.extend(children); } queue.push_back(level_queue); current_depth += 1; } depth_map } } #[cfg(test)] mod test { use super::*; use daggy::petgraph::graph::node_index as n; use pretty_assertions::assert_eq; #[test] fn test_get_task_sequence() { // helper functions let task = |cmd: &str| Task { command: cmd.into(), working_dir: None, }; let target = |id: &str, deps: &[&str]| Target { identifier: id.into(), deps: deps.iter().map(|d| d.into()).collect(), tasks: vec![task(id)], working_dir: None, }; let ix = |id: &str, map: &HashMap<_, _>| { let p: &std::path::Path = id.as_ref(); map[p] }; // the dependency graph: // // a1 a2' // / / \ // / / \ // b1 b2 b3 // / / // / / // l1* l2 // // a2 is the target (') // l1 is marked as obsolete (*) // b2's and a2's tasks must be executed (in that order) // targets and their dependencies #[rustfmt::skip] let targets = vec![ target("a1", &["b1"]), target("a2", &["b2", "b3"]), target("b2", &["l1"]), target("b3", &["l2"]), ]; let DependencyGraph { graph, id_to_ix_map: map, } = DependencyGraph::construct(targets).unwrap(); // depth map #[rustfmt::skip] let depth_map = vec![ (ix("a2", &map), 0), (ix("b2", &map), 1), (ix("b3", &map), 1), (ix("l1", &map), 2), (ix("l2", &map), 2), ].into_iter().collect(); // nodes that have been marked as obsolete // (in real code it is automated) let obsolete_leaf_nodes = vec![ix("l1", &map)].into_iter().collect(); // get the sequence of tasks that must be executed // in specific order let obsolete_targets = util::find_obsolete_targets(graph.graph(), &obsolete_leaf_nodes); let target_sequence = util::get_target_sequence( graph.graph(), &depth_map, &obsolete_targets, ) .unwrap(); let target_sequence = target_sequence .into_iter() .map(|target| target.identifier) .collect::<Vec<_>>(); let expected_target_sequence: Vec<PathBuf> = vec!["b2".into(), "a2".into()]; assert_eq!(target_sequence, expected_target_sequence); } #[test] fn test_find_obsolete_targets() { // helper functions let target = |id: &str, deps: &[&str]| Target { identifier: id.into(), deps: deps.iter().map(|d| d.into()).collect(), tasks: vec![], working_dir: None, }; let ixs = |ids: &[&str], map: &HashMap<_, Nx>| { ids.iter() .map(|id| map[&Into::<PathBuf>::into(id)]) .collect() }; // the dependency graph: // // a1 a2 // / \ \ // / \ \ // b1 b2 b3 // / / // / / // l1* l2* // // l1 and l2 are marked as obsolete // the function should find b2, a1, b3 & a2 // but not b1 #[rustfmt::skip] let targets = vec![ target("a1", &["b1", "b2"]), target("a2", &["b3"]), target("b2", &["l1"]), target("b3", &["l2"]), ]; let DependencyGraph { graph, id_to_ix_map: map, } = DependencyGraph::construct(targets).unwrap(); let obsolete_leaf_nodes = ixs(&["l1", "l2"], &map); let found_targets = util::find_obsolete_targets(&graph.graph(), &obsolete_leaf_nodes); let expected_targets = ixs(&["a1", "a2", "b2", "b3"], &map); assert_eq!(found_targets, expected_targets); } #[test] fn test_generate_depth_map() { // depth is the length of the longest path from // the target node to the dependency #[rustfmt::skip] let graph: daggy::Dag<(), ()> = daggy::Dag::from_edges(&[ (0, 3), (0, 4), (1, 3), (1, 4), (1, 6), (2, 3), (2, 4), (3, 5), (3, 6), (3, 7), (4, 5), (4, 6), (4, 7), (7, 8), (8, 9), ]).unwrap(); let target = n(1); // target let depth_map = util::generate_depth_map(&graph, target); assert!(depth_map.get(&n(0)).is_none()); assert!(depth_map.get(&n(2)).is_none()); assert_eq!(depth_map[&n(1)], 0); assert_eq!(depth_map[&n(3)], 1); assert_eq!(depth_map[&n(4)], 1); assert_eq!(depth_map[&n(5)], 2); assert_eq!(depth_map[&n(6)], 2); assert_eq!(depth_map[&n(7)], 2); assert_eq!(depth_map[&n(8)], 3); assert_eq!(depth_map[&n(9)], 4); } }
add_leaf_node
identifier_name
deps.rs
use std::{ collections::{HashMap, HashSet}, path::PathBuf, }; use crate::{ error::{Error, Result, UserError}, target::Target, util::ResultIterator, }; use daggy::{petgraph::visit::IntoNeighborsDirected, NodeIndex as Nx}; type DependencyDag = daggy::Dag<Node, ()>; type Identifier = PathBuf; type FileState = (); // TODO #[derive(Debug)] enum Node { Target(Target), NoRule(Identifier), } // TODO think of a better name pub struct DependencyGraph { id_to_ix_map: HashMap<Identifier, Nx>, graph: DependencyDag, } impl DependencyGraph { pub fn construct(targets: Vec<Target>) -> Result<Self> { let mut graph = DependencyDag::new(); let mut id_to_ix_map = HashMap::new(); // add target nodes targets .iter() .cloned() .map(|target| { util::add_target_node(&mut graph, &mut id_to_ix_map, target) }) .collect::<Result<_>>()?; // add left dependency nodes - leaf nodes representing actual files targets .into_iter() .map(|target| target.deps) .flatten() .for_each(|dep_id| { util::add_leaf_node(&mut graph, &mut id_to_ix_map, dep_id); }); // add edges graph .graph() .node_indices() .map(|target_ix| { util::add_edges_to_deps(&mut graph, &id_to_ix_map, target_ix) }) .collect::<Result<_>>()?; Ok(Self { graph, id_to_ix_map, }) } pub fn get_target_sequence( &self, target_id: Identifier, ) -> Result<Vec<Target>> { let graph = &self.graph; let target_ix = *self .id_to_ix_map .get(&target_id) .ok_or_else(|| UserError::NoSuchTarget(target_id))?; let depth_map = util::generate_depth_map(graph, target_ix); let obsolete_leaf_nodes = util::find_obsolete_leaf_nodes(graph.graph())?; let obsolete_targets = util::find_obsolete_targets(graph.graph(), &obsolete_leaf_nodes); util::get_target_sequence(graph.graph(), &depth_map, &obsolete_targets) } } mod util { use std::collections::{HashMap, VecDeque}; use super::*; use daggy::petgraph; use petgraph::prelude::{Direction, Graph}; pub(super) fn get_target_sequence( graph: &Graph<Node, ()>, depth_map: &HashMap<Nx, usize>, obsolete_targets: &HashSet<Nx>, ) -> Result<Vec<Target>> { // filter out targets which are not in the // dependency graph of the chosen target // and sort the targets left by depth in **decreasing** order let mut obsolete_targets = obsolete_targets .iter() .filter(|ix| depth_map.contains_key(ix)) .copied() .collect::<Vec<_>>(); obsolete_targets.sort_by_key(|ix| depth_map[ix]); obsolete_targets.reverse(); obsolete_targets .into_iter() .map(|target_ix| match &graph[target_ix] { Node::Target(target) => Ok(target.clone()), Node::NoRule(_) => Err(Error::internal(line!(), file!())), }) .collect::<Result<Vec<_>>>() } /// This function finds all nodes that have no dependencies - /// both actual files (`NoRule` variant) and targets /// (`Target` variant with no dependencies is assumed to depend /// on other factors - time, environmental variables, /// current directory etc.). pub(super) fn find_obsolete_leaf_nodes( graph: &Graph<Node, ()>, ) -> Result<HashSet<Nx>> { graph .externals(Direction::Outgoing) // get nodes with no outgoing edges .filter_map(|node_ix| match &graph[node_ix] { // TODO filter `requires_rebuilding` Node::Target(_target) => Some(Ok(node_ix)), Node::NoRule(identifier) => { // TODO clean up this part let previous_file_state = (); let result = has_file_been_modified( &identifier, previous_file_state, ); match result { Ok(has_been_modified) => if has_been_modified { Some(Ok(node_ix)) } else { None }, Err(err) => Some(Err(err)), } } }) .collect::<Result<HashSet<_>>>() } pub(super) fn find_obsolete_targets( graph: &Graph<Node, ()>, obsolete_leaf_nodes: &HashSet<Nx>, ) -> HashSet<Nx> { // reverse short circuiting bfs: // skip the dependants of the targets // that have already been marked as obsolete let mut queue = VecDeque::<Nx>::new(); let mut obsolete_ixs = HashSet::<Nx>::new(); for leaf_ix in obsolete_leaf_nodes { // no need to clear the queue since it gets drained // in the while loop each time match &graph[*leaf_ix] { Node::Target(_) => queue.push_back(*leaf_ix), Node::NoRule(_) => { let direct_dependants = graph.neighbors_directed(*leaf_ix, Direction::Incoming); queue.extend(direct_dependants); } } while let Some(target_ix) = queue.pop_front() { let has_just_been_found = obsolete_ixs.insert(target_ix); if has_just_been_found { let dependants = graph .neighbors_directed(target_ix, Direction::Incoming); queue.extend(dependants); } } } obsolete_ixs } pub(super) fn add_leaf_node( graph: &mut DependencyDag, id_to_ix_map: &mut HashMap<Identifier, Nx>, dependency_identifier: Identifier, ) { id_to_ix_map .entry(dependency_identifier.clone()) .or_insert_with(|| { // `.add_node()` returns node's index graph.add_node(Node::NoRule(dependency_identifier)) }); } pub(super) fn add_target_node( graph: &mut DependencyDag, id_to_ix_map: &mut HashMap<Identifier, Nx>, target: Target, ) -> Result<()> { let identifier = target.identifier.clone(); let node_ix = graph.add_node(Node::Target(target)); let slot = id_to_ix_map.insert(identifier, node_ix); match slot { Some(_colliding_target_ix) => Err(UserError::DuplicateTarget.into()), None => Ok(()), } } pub(super) fn add_edges_to_deps( graph: &mut DependencyDag, id_to_ix_map: &HashMap<Identifier, Nx>, target_ix: Nx, ) -> Result<()> { let deps = match &graph[target_ix] { Node::Target(target) => target.deps.clone(), Node::NoRule(_) => return Ok(()), // no deps }; deps.iter() .map(|dep_id| { id_to_ix_map .get(dep_id) .ok_or_else(|| Error::internal(line!(), file!())) }) .map_item(|dep_ix| { graph .add_edge(target_ix, *dep_ix, ()) .map(|_| ()) .map_err(|_| UserError::DependencyCycle.into()) }) .map(|result| result.flatten()) .collect::<Result<_>>() } pub(super) fn has_file_been_modified( _identifier: &Identifier, _previous_state: FileState, ) -> Result<bool> { Ok(true) // TODO for now it just rebuilds everything } pub(super) fn generate_depth_map<N, E>( graph: &daggy::Dag<N, E>, target_id: Nx, ) -> HashMap<Nx, usize> { let mut depth_map: HashMap<Nx, usize> = HashMap::new(); let mut current_depth = 0; let mut queue: VecDeque<Vec<_>> = VecDeque::new(); queue.push_front(vec![target_id]); while let Some(level) = queue.pop_front() { if level.is_empty() { break; } let mut level_queue = vec![]; for current_node in level { // update current node's depth let _ = depth_map .entry(current_node) .and_modify(|depth| *depth = (*depth).max(current_depth)) .or_insert(current_depth); let children = graph.neighbors_directed(current_node, Direction::Outgoing); level_queue.extend(children); } queue.push_back(level_queue); current_depth += 1; } depth_map } } #[cfg(test)] mod test { use super::*; use daggy::petgraph::graph::node_index as n; use pretty_assertions::assert_eq; #[test] fn test_get_task_sequence() { // helper functions let task = |cmd: &str| Task { command: cmd.into(), working_dir: None, }; let target = |id: &str, deps: &[&str]| Target { identifier: id.into(), deps: deps.iter().map(|d| d.into()).collect(), tasks: vec![task(id)], working_dir: None, }; let ix = |id: &str, map: &HashMap<_, _>| { let p: &std::path::Path = id.as_ref(); map[p] }; // the dependency graph: // // a1 a2' // / / \ // / / \ // b1 b2 b3 // / / // / / // l1* l2 // // a2 is the target (') // l1 is marked as obsolete (*) // b2's and a2's tasks must be executed (in that order) // targets and their dependencies #[rustfmt::skip] let targets = vec![ target("a1", &["b1"]), target("a2", &["b2", "b3"]), target("b2", &["l1"]), target("b3", &["l2"]), ]; let DependencyGraph { graph, id_to_ix_map: map, } = DependencyGraph::construct(targets).unwrap(); // depth map #[rustfmt::skip] let depth_map = vec![ (ix("a2", &map), 0), (ix("b2", &map), 1), (ix("b3", &map), 1), (ix("l1", &map), 2), (ix("l2", &map), 2), ].into_iter().collect(); // nodes that have been marked as obsolete // (in real code it is automated) let obsolete_leaf_nodes = vec![ix("l1", &map)].into_iter().collect(); // get the sequence of tasks that must be executed // in specific order let obsolete_targets = util::find_obsolete_targets(graph.graph(), &obsolete_leaf_nodes); let target_sequence = util::get_target_sequence( graph.graph(), &depth_map, &obsolete_targets, ) .unwrap(); let target_sequence = target_sequence .into_iter()
.map(|target| target.identifier) .collect::<Vec<_>>(); let expected_target_sequence: Vec<PathBuf> = vec!["b2".into(), "a2".into()]; assert_eq!(target_sequence, expected_target_sequence); } #[test] fn test_find_obsolete_targets() { // helper functions let target = |id: &str, deps: &[&str]| Target { identifier: id.into(), deps: deps.iter().map(|d| d.into()).collect(), tasks: vec![], working_dir: None, }; let ixs = |ids: &[&str], map: &HashMap<_, Nx>| { ids.iter() .map(|id| map[&Into::<PathBuf>::into(id)]) .collect() }; // the dependency graph: // // a1 a2 // / \ \ // / \ \ // b1 b2 b3 // / / // / / // l1* l2* // // l1 and l2 are marked as obsolete // the function should find b2, a1, b3 & a2 // but not b1 #[rustfmt::skip] let targets = vec![ target("a1", &["b1", "b2"]), target("a2", &["b3"]), target("b2", &["l1"]), target("b3", &["l2"]), ]; let DependencyGraph { graph, id_to_ix_map: map, } = DependencyGraph::construct(targets).unwrap(); let obsolete_leaf_nodes = ixs(&["l1", "l2"], &map); let found_targets = util::find_obsolete_targets(&graph.graph(), &obsolete_leaf_nodes); let expected_targets = ixs(&["a1", "a2", "b2", "b3"], &map); assert_eq!(found_targets, expected_targets); } #[test] fn test_generate_depth_map() { // depth is the length of the longest path from // the target node to the dependency #[rustfmt::skip] let graph: daggy::Dag<(), ()> = daggy::Dag::from_edges(&[ (0, 3), (0, 4), (1, 3), (1, 4), (1, 6), (2, 3), (2, 4), (3, 5), (3, 6), (3, 7), (4, 5), (4, 6), (4, 7), (7, 8), (8, 9), ]).unwrap(); let target = n(1); // target let depth_map = util::generate_depth_map(&graph, target); assert!(depth_map.get(&n(0)).is_none()); assert!(depth_map.get(&n(2)).is_none()); assert_eq!(depth_map[&n(1)], 0); assert_eq!(depth_map[&n(3)], 1); assert_eq!(depth_map[&n(4)], 1); assert_eq!(depth_map[&n(5)], 2); assert_eq!(depth_map[&n(6)], 2); assert_eq!(depth_map[&n(7)], 2); assert_eq!(depth_map[&n(8)], 3); assert_eq!(depth_map[&n(9)], 4); } }
random_line_split
deps.rs
use std::{ collections::{HashMap, HashSet}, path::PathBuf, }; use crate::{ error::{Error, Result, UserError}, target::Target, util::ResultIterator, }; use daggy::{petgraph::visit::IntoNeighborsDirected, NodeIndex as Nx}; type DependencyDag = daggy::Dag<Node, ()>; type Identifier = PathBuf; type FileState = (); // TODO #[derive(Debug)] enum Node { Target(Target), NoRule(Identifier), } // TODO think of a better name pub struct DependencyGraph { id_to_ix_map: HashMap<Identifier, Nx>, graph: DependencyDag, } impl DependencyGraph { pub fn construct(targets: Vec<Target>) -> Result<Self> { let mut graph = DependencyDag::new(); let mut id_to_ix_map = HashMap::new(); // add target nodes targets .iter() .cloned() .map(|target| { util::add_target_node(&mut graph, &mut id_to_ix_map, target) }) .collect::<Result<_>>()?; // add left dependency nodes - leaf nodes representing actual files targets .into_iter() .map(|target| target.deps) .flatten() .for_each(|dep_id| { util::add_leaf_node(&mut graph, &mut id_to_ix_map, dep_id); }); // add edges graph .graph() .node_indices() .map(|target_ix| { util::add_edges_to_deps(&mut graph, &id_to_ix_map, target_ix) }) .collect::<Result<_>>()?; Ok(Self { graph, id_to_ix_map, }) } pub fn get_target_sequence( &self, target_id: Identifier, ) -> Result<Vec<Target>> { let graph = &self.graph; let target_ix = *self .id_to_ix_map .get(&target_id) .ok_or_else(|| UserError::NoSuchTarget(target_id))?; let depth_map = util::generate_depth_map(graph, target_ix); let obsolete_leaf_nodes = util::find_obsolete_leaf_nodes(graph.graph())?; let obsolete_targets = util::find_obsolete_targets(graph.graph(), &obsolete_leaf_nodes); util::get_target_sequence(graph.graph(), &depth_map, &obsolete_targets) } } mod util { use std::collections::{HashMap, VecDeque}; use super::*; use daggy::petgraph; use petgraph::prelude::{Direction, Graph}; pub(super) fn get_target_sequence( graph: &Graph<Node, ()>, depth_map: &HashMap<Nx, usize>, obsolete_targets: &HashSet<Nx>, ) -> Result<Vec<Target>> { // filter out targets which are not in the // dependency graph of the chosen target // and sort the targets left by depth in **decreasing** order let mut obsolete_targets = obsolete_targets .iter() .filter(|ix| depth_map.contains_key(ix)) .copied() .collect::<Vec<_>>(); obsolete_targets.sort_by_key(|ix| depth_map[ix]); obsolete_targets.reverse(); obsolete_targets .into_iter() .map(|target_ix| match &graph[target_ix] { Node::Target(target) => Ok(target.clone()), Node::NoRule(_) => Err(Error::internal(line!(), file!())), }) .collect::<Result<Vec<_>>>() } /// This function finds all nodes that have no dependencies - /// both actual files (`NoRule` variant) and targets /// (`Target` variant with no dependencies is assumed to depend /// on other factors - time, environmental variables, /// current directory etc.). pub(super) fn find_obsolete_leaf_nodes( graph: &Graph<Node, ()>, ) -> Result<HashSet<Nx>> { graph .externals(Direction::Outgoing) // get nodes with no outgoing edges .filter_map(|node_ix| match &graph[node_ix] { // TODO filter `requires_rebuilding` Node::Target(_target) => Some(Ok(node_ix)), Node::NoRule(identifier) => { // TODO clean up this part let previous_file_state = (); let result = has_file_been_modified( &identifier, previous_file_state, ); match result { Ok(has_been_modified) => if has_been_modified { Some(Ok(node_ix)) } else { None }, Err(err) => Some(Err(err)), } } }) .collect::<Result<HashSet<_>>>() } pub(super) fn find_obsolete_targets( graph: &Graph<Node, ()>, obsolete_leaf_nodes: &HashSet<Nx>, ) -> HashSet<Nx>
pub(super) fn add_leaf_node( graph: &mut DependencyDag, id_to_ix_map: &mut HashMap<Identifier, Nx>, dependency_identifier: Identifier, ) { id_to_ix_map .entry(dependency_identifier.clone()) .or_insert_with(|| { // `.add_node()` returns node's index graph.add_node(Node::NoRule(dependency_identifier)) }); } pub(super) fn add_target_node( graph: &mut DependencyDag, id_to_ix_map: &mut HashMap<Identifier, Nx>, target: Target, ) -> Result<()> { let identifier = target.identifier.clone(); let node_ix = graph.add_node(Node::Target(target)); let slot = id_to_ix_map.insert(identifier, node_ix); match slot { Some(_colliding_target_ix) => Err(UserError::DuplicateTarget.into()), None => Ok(()), } } pub(super) fn add_edges_to_deps( graph: &mut DependencyDag, id_to_ix_map: &HashMap<Identifier, Nx>, target_ix: Nx, ) -> Result<()> { let deps = match &graph[target_ix] { Node::Target(target) => target.deps.clone(), Node::NoRule(_) => return Ok(()), // no deps }; deps.iter() .map(|dep_id| { id_to_ix_map .get(dep_id) .ok_or_else(|| Error::internal(line!(), file!())) }) .map_item(|dep_ix| { graph .add_edge(target_ix, *dep_ix, ()) .map(|_| ()) .map_err(|_| UserError::DependencyCycle.into()) }) .map(|result| result.flatten()) .collect::<Result<_>>() } pub(super) fn has_file_been_modified( _identifier: &Identifier, _previous_state: FileState, ) -> Result<bool> { Ok(true) // TODO for now it just rebuilds everything } pub(super) fn generate_depth_map<N, E>( graph: &daggy::Dag<N, E>, target_id: Nx, ) -> HashMap<Nx, usize> { let mut depth_map: HashMap<Nx, usize> = HashMap::new(); let mut current_depth = 0; let mut queue: VecDeque<Vec<_>> = VecDeque::new(); queue.push_front(vec![target_id]); while let Some(level) = queue.pop_front() { if level.is_empty() { break; } let mut level_queue = vec![]; for current_node in level { // update current node's depth let _ = depth_map .entry(current_node) .and_modify(|depth| *depth = (*depth).max(current_depth)) .or_insert(current_depth); let children = graph.neighbors_directed(current_node, Direction::Outgoing); level_queue.extend(children); } queue.push_back(level_queue); current_depth += 1; } depth_map } } #[cfg(test)] mod test { use super::*; use daggy::petgraph::graph::node_index as n; use pretty_assertions::assert_eq; #[test] fn test_get_task_sequence() { // helper functions let task = |cmd: &str| Task { command: cmd.into(), working_dir: None, }; let target = |id: &str, deps: &[&str]| Target { identifier: id.into(), deps: deps.iter().map(|d| d.into()).collect(), tasks: vec![task(id)], working_dir: None, }; let ix = |id: &str, map: &HashMap<_, _>| { let p: &std::path::Path = id.as_ref(); map[p] }; // the dependency graph: // // a1 a2' // / / \ // / / \ // b1 b2 b3 // / / // / / // l1* l2 // // a2 is the target (') // l1 is marked as obsolete (*) // b2's and a2's tasks must be executed (in that order) // targets and their dependencies #[rustfmt::skip] let targets = vec![ target("a1", &["b1"]), target("a2", &["b2", "b3"]), target("b2", &["l1"]), target("b3", &["l2"]), ]; let DependencyGraph { graph, id_to_ix_map: map, } = DependencyGraph::construct(targets).unwrap(); // depth map #[rustfmt::skip] let depth_map = vec![ (ix("a2", &map), 0), (ix("b2", &map), 1), (ix("b3", &map), 1), (ix("l1", &map), 2), (ix("l2", &map), 2), ].into_iter().collect(); // nodes that have been marked as obsolete // (in real code it is automated) let obsolete_leaf_nodes = vec![ix("l1", &map)].into_iter().collect(); // get the sequence of tasks that must be executed // in specific order let obsolete_targets = util::find_obsolete_targets(graph.graph(), &obsolete_leaf_nodes); let target_sequence = util::get_target_sequence( graph.graph(), &depth_map, &obsolete_targets, ) .unwrap(); let target_sequence = target_sequence .into_iter() .map(|target| target.identifier) .collect::<Vec<_>>(); let expected_target_sequence: Vec<PathBuf> = vec!["b2".into(), "a2".into()]; assert_eq!(target_sequence, expected_target_sequence); } #[test] fn test_find_obsolete_targets() { // helper functions let target = |id: &str, deps: &[&str]| Target { identifier: id.into(), deps: deps.iter().map(|d| d.into()).collect(), tasks: vec![], working_dir: None, }; let ixs = |ids: &[&str], map: &HashMap<_, Nx>| { ids.iter() .map(|id| map[&Into::<PathBuf>::into(id)]) .collect() }; // the dependency graph: // // a1 a2 // / \ \ // / \ \ // b1 b2 b3 // / / // / / // l1* l2* // // l1 and l2 are marked as obsolete // the function should find b2, a1, b3 & a2 // but not b1 #[rustfmt::skip] let targets = vec![ target("a1", &["b1", "b2"]), target("a2", &["b3"]), target("b2", &["l1"]), target("b3", &["l2"]), ]; let DependencyGraph { graph, id_to_ix_map: map, } = DependencyGraph::construct(targets).unwrap(); let obsolete_leaf_nodes = ixs(&["l1", "l2"], &map); let found_targets = util::find_obsolete_targets(&graph.graph(), &obsolete_leaf_nodes); let expected_targets = ixs(&["a1", "a2", "b2", "b3"], &map); assert_eq!(found_targets, expected_targets); } #[test] fn test_generate_depth_map() { // depth is the length of the longest path from // the target node to the dependency #[rustfmt::skip] let graph: daggy::Dag<(), ()> = daggy::Dag::from_edges(&[ (0, 3), (0, 4), (1, 3), (1, 4), (1, 6), (2, 3), (2, 4), (3, 5), (3, 6), (3, 7), (4, 5), (4, 6), (4, 7), (7, 8), (8, 9), ]).unwrap(); let target = n(1); // target let depth_map = util::generate_depth_map(&graph, target); assert!(depth_map.get(&n(0)).is_none()); assert!(depth_map.get(&n(2)).is_none()); assert_eq!(depth_map[&n(1)], 0); assert_eq!(depth_map[&n(3)], 1); assert_eq!(depth_map[&n(4)], 1); assert_eq!(depth_map[&n(5)], 2); assert_eq!(depth_map[&n(6)], 2); assert_eq!(depth_map[&n(7)], 2); assert_eq!(depth_map[&n(8)], 3); assert_eq!(depth_map[&n(9)], 4); } }
{ // reverse short circuiting bfs: // skip the dependants of the targets // that have already been marked as obsolete let mut queue = VecDeque::<Nx>::new(); let mut obsolete_ixs = HashSet::<Nx>::new(); for leaf_ix in obsolete_leaf_nodes { // no need to clear the queue since it gets drained // in the while loop each time match &graph[*leaf_ix] { Node::Target(_) => queue.push_back(*leaf_ix), Node::NoRule(_) => { let direct_dependants = graph.neighbors_directed(*leaf_ix, Direction::Incoming); queue.extend(direct_dependants); } } while let Some(target_ix) = queue.pop_front() { let has_just_been_found = obsolete_ixs.insert(target_ix); if has_just_been_found { let dependants = graph .neighbors_directed(target_ix, Direction::Incoming); queue.extend(dependants); } } } obsolete_ixs }
identifier_body
list.go
package deb import ( "fmt" "sort" "strings" "github.com/aptly-dev/aptly/aptly" "github.com/aptly-dev/aptly/utils" ) // Dependency options const ( // DepFollowSource pulls source packages when required DepFollowSource = 1 << iota // DepFollowSuggests pulls from suggests DepFollowSuggests // DepFollowRecommends pulls from recommends DepFollowRecommends // DepFollowAllVariants follows all variants if depends on "a | b" DepFollowAllVariants // DepFollowBuild pulls build dependencies DepFollowBuild // DepVerboseResolve emits additional logs while dependencies are being resolved DepVerboseResolve ) // PackageList is list of unique (by key) packages // // It could be seen as repo snapshot, repo contents, result of filtering, // merge, etc. // // If indexed, PackageList starts supporting searching type PackageList struct { // Straight list of packages as map packages map[string]*Package // Indexed list of packages, sorted by name internally packagesIndex []*Package // Map of packages for each virtual package (provides) providesIndex map[string][]*Package // Package key generation function keyFunc func(p *Package) string // Allow duplicates? duplicatesAllowed bool // Has index been prepared? indexed bool } // PackageConflictError means that package can't be added to the list due to error type PackageConflictError struct { error } // Verify interface var ( _ sort.Interface = &PackageList{} _ PackageCatalog = &PackageList{} ) func packageShortKey(p *Package) string { return string(p.ShortKey("")) } func packageFullKey(p *Package) string { return string(p.Key("")) } // NewPackageList creates empty package list without duplicate package func NewPackageList() *PackageList { return NewPackageListWithDuplicates(false, 1000) } // NewPackageListWithDuplicates creates empty package list which might allow or block duplicate packages func NewPackageListWithDuplicates(duplicates bool, capacity int) *PackageList { if capacity == 0 { capacity = 1000 } result := &PackageList{ packages: make(map[string]*Package, capacity), duplicatesAllowed: duplicates, keyFunc: packageShortKey, } if duplicates { result.keyFunc = packageFullKey } return result } // NewPackageListFromRefList loads packages list from PackageRefList func NewPackageListFromRefList(reflist *PackageRefList, collection *PackageCollection, progress aptly.Progress) (*PackageList, error) { // empty reflist if reflist == nil { return NewPackageList(), nil } result := NewPackageListWithDuplicates(false, reflist.Len()) if progress != nil { progress.InitBar(int64(reflist.Len()), false, aptly.BarGeneralBuildPackageList) } err := reflist.ForEach(func(key []byte) error { p, err2 := collection.ByKey(key)
if err2 != nil { return fmt.Errorf("unable to load package with key %s: %s", key, err2) } if progress != nil { progress.AddBar(1) } return result.Add(p) }) if progress != nil { progress.ShutdownBar() } if err != nil { return nil, err } return result, nil } // Has checks whether package is already in the list func (l *PackageList) Has(p *Package) bool { key := l.keyFunc(p) _, ok := l.packages[key] return ok } // Add appends package to package list, additionally checking for uniqueness func (l *PackageList) Add(p *Package) error { key := l.keyFunc(p) existing, ok := l.packages[key] if ok { if !existing.Equals(p) { return &PackageConflictError{fmt.Errorf("conflict in package %s", p)} } return nil } l.packages[key] = p if l.indexed { for _, provides := range p.Provides { l.providesIndex[provides] = append(l.providesIndex[provides], p) } i := sort.Search(len(l.packagesIndex), func(j int) bool { return l.lessPackages(p, l.packagesIndex[j]) }) // insert p into l.packagesIndex in position i l.packagesIndex = append(l.packagesIndex, nil) copy(l.packagesIndex[i+1:], l.packagesIndex[i:]) l.packagesIndex[i] = p } return nil } // ForEach calls handler for each package in list func (l *PackageList) ForEach(handler func(*Package) error) error { var err error for _, p := range l.packages { err = handler(p) if err != nil { return err } } return err } // ForEachIndexed calls handler for each package in list in indexed order func (l *PackageList) ForEachIndexed(handler func(*Package) error) error { if !l.indexed { panic("list not indexed, can't iterate") } var err error for _, p := range l.packagesIndex { err = handler(p) if err != nil { return err } } return err } // Len returns number of packages in the list func (l *PackageList) Len() int { return len(l.packages) } // Append adds content from one package list to another func (l *PackageList) Append(pl *PackageList) error { if l.indexed { panic("Append not supported when indexed") } for k, p := range pl.packages { existing, ok := l.packages[k] if ok { if !existing.Equals(p) { return fmt.Errorf("conflict in package %s", p) } } else { l.packages[k] = p } } return nil } // Remove removes package from the list, and updates index when required func (l *PackageList) Remove(p *Package) { delete(l.packages, l.keyFunc(p)) if l.indexed { for _, provides := range p.Provides { for i, pkg := range l.providesIndex[provides] { if pkg.Equals(p) { // remove l.ProvidesIndex[provides][i] w/o preserving order l.providesIndex[provides][len(l.providesIndex[provides])-1], l.providesIndex[provides][i], l.providesIndex[provides] = nil, l.providesIndex[provides][len(l.providesIndex[provides])-1], l.providesIndex[provides][:len(l.providesIndex[provides])-1] break } } } i := sort.Search(len(l.packagesIndex), func(j int) bool { return l.packagesIndex[j].Name >= p.Name }) for i < len(l.packagesIndex) && l.packagesIndex[i].Name == p.Name { if l.packagesIndex[i].Equals(p) { // remove l.packagesIndex[i] preserving order copy(l.packagesIndex[i:], l.packagesIndex[i+1:]) l.packagesIndex[len(l.packagesIndex)-1] = nil l.packagesIndex = l.packagesIndex[:len(l.packagesIndex)-1] break } i++ } } } // Architectures returns list of architectures present in packages and flag if source packages are present. // // If includeSource is true, meta-architecture "source" would be present in the list func (l *PackageList) Architectures(includeSource bool) (result []string) { result = make([]string, 0, 10) for _, pkg := range l.packages { if pkg.Architecture != ArchitectureAll && (pkg.Architecture != ArchitectureSource || includeSource) && !utils.StrSliceHasItem(result, pkg.Architecture) { result = append(result, pkg.Architecture) } } return } // Strings builds list of strings with package keys func (l *PackageList) Strings() []string { result := make([]string, l.Len()) i := 0 for _, p := range l.packages { result[i] = string(p.Key("")) i++ } return result } // FullNames builds a list of package {name}_{version}_{arch} func (l *PackageList) FullNames() []string { result := make([]string, l.Len()) i := 0 for _, p := range l.packages { result[i] = p.GetFullName() i++ } return result } // depSliceDeduplicate removes dups in slice of Dependencies func depSliceDeduplicate(s []Dependency) []Dependency { l := len(s) if l < 2 { return s } if l == 2 { if s[0] == s[1] { return s[0:1] } return s } found := make(map[string]bool, l) j := 0 for i, x := range s { h := x.Hash() if !found[h] { found[h] = true s[j] = s[i] j++ } } return s[:j] } // VerifyDependencies looks for missing dependencies in package list. // // Analysis would be performed for each architecture, in specified sources func (l *PackageList) VerifyDependencies(options int, architectures []string, sources *PackageList, progress aptly.Progress) ([]Dependency, error) { l.PrepareIndex() missing := make([]Dependency, 0, 128) if progress != nil { progress.InitBar(int64(l.Len())*int64(len(architectures)), false, aptly.BarGeneralVerifyDependencies) } for _, arch := range architectures { cache := make(map[string]bool, 2048) for _, p := range l.packagesIndex { if progress != nil { progress.AddBar(1) } if !p.MatchesArchitecture(arch) { continue } for _, dep := range p.GetDependencies(options) { variants, err := ParseDependencyVariants(dep) if err != nil { return nil, fmt.Errorf("unable to process package %s: %s", p, err) } variants = depSliceDeduplicate(variants) variantsMissing := make([]Dependency, 0, len(variants)) for _, dep := range variants { if dep.Architecture == "" { dep.Architecture = arch } hash := dep.Hash() satisfied, ok := cache[hash] if !ok { satisfied = sources.Search(dep, false) != nil cache[hash] = satisfied } if !satisfied && !ok { variantsMissing = append(variantsMissing, dep) } if satisfied && options&DepFollowAllVariants == 0 { variantsMissing = nil break } } missing = append(missing, variantsMissing...) } } } if progress != nil { progress.ShutdownBar() } if options&DepVerboseResolve == DepVerboseResolve && progress != nil { missingStr := make([]string, len(missing)) for i := range missing { missingStr[i] = missing[i].String() } progress.ColoredPrintf("@{y}Missing dependencies:@| %s", strings.Join(missingStr, ", ")) } return missing, nil } // Swap swaps two packages in index func (l *PackageList) Swap(i, j int) { l.packagesIndex[i], l.packagesIndex[j] = l.packagesIndex[j], l.packagesIndex[i] } func (l *PackageList) lessPackages(iPkg, jPkg *Package) bool { if iPkg.Name == jPkg.Name { cmp := CompareVersions(iPkg.Version, jPkg.Version) if cmp == 0 { return iPkg.Architecture < jPkg.Architecture } return cmp == 1 } return iPkg.Name < jPkg.Name } // Less compares two packages by name (lexographical) and version (latest to oldest) func (l *PackageList) Less(i, j int) bool { return l.lessPackages(l.packagesIndex[i], l.packagesIndex[j]) } // PrepareIndex prepares list for indexing func (l *PackageList) PrepareIndex() { if l.indexed { return } l.packagesIndex = make([]*Package, l.Len()) l.providesIndex = make(map[string][]*Package, 128) i := 0 for _, p := range l.packages { l.packagesIndex[i] = p i++ for _, provides := range p.Provides { l.providesIndex[provides] = append(l.providesIndex[provides], p) } } sort.Sort(l) l.indexed = true } // Scan searches package index using full scan func (l *PackageList) Scan(q PackageQuery) (result *PackageList) { result = NewPackageListWithDuplicates(l.duplicatesAllowed, 0) for _, pkg := range l.packages { if q.Matches(pkg) { result.Add(pkg) } } return } // SearchSupported returns true for PackageList func (l *PackageList) SearchSupported() bool { return true } // SearchByKey looks up package by exact key reference func (l *PackageList) SearchByKey(arch, name, version string) (result *PackageList) { result = NewPackageListWithDuplicates(l.duplicatesAllowed, 0) pkg := l.packages["P"+arch+" "+name+" "+version] if pkg != nil { result.Add(pkg) } return } // Search searches package index for specified package(s) using optimized queries func (l *PackageList) Search(dep Dependency, allMatches bool) (searchResults []*Package) { if !l.indexed { panic("list not indexed, can't search") } i := sort.Search(len(l.packagesIndex), func(j int) bool { return l.packagesIndex[j].Name >= dep.Pkg }) for i < len(l.packagesIndex) && l.packagesIndex[i].Name == dep.Pkg { p := l.packagesIndex[i] if p.MatchesDependency(dep) { searchResults = append(searchResults, p) if !allMatches { break } } i++ } if dep.Relation == VersionDontCare { for _, p := range l.providesIndex[dep.Pkg] { if dep.Architecture == "" || p.MatchesArchitecture(dep.Architecture) { searchResults = append(searchResults, p) if !allMatches { break } } } } return } // Filter filters package index by specified queries (ORed together), possibly pulling dependencies func (l *PackageList) Filter(queries []PackageQuery, withDependencies bool, source *PackageList, dependencyOptions int, architecturesList []string) (*PackageList, error) { return l.FilterWithProgress(queries, withDependencies, source, dependencyOptions, architecturesList, nil) } // FilterWithProgress filters package index by specified queries (ORed together), possibly pulling dependencies and displays progress func (l *PackageList) FilterWithProgress(queries []PackageQuery, withDependencies bool, source *PackageList, dependencyOptions int, architecturesList []string, progress aptly.Progress) (*PackageList, error) { if !l.indexed { panic("list not indexed, can't filter") } result := NewPackageList() for _, query := range queries { result.Append(query.Query(l)) } if withDependencies { added := result.Len() result.PrepareIndex() dependencySource := NewPackageList() if source != nil { dependencySource.Append(source) } dependencySource.Append(result) dependencySource.PrepareIndex() // while some new dependencies were discovered for added > 0 { added = 0 // find missing dependencies missing, err := result.VerifyDependencies(dependencyOptions, architecturesList, dependencySource, progress) if err != nil { return nil, err } // try to satisfy dependencies for _, dep := range missing { if dependencyOptions&DepFollowAllVariants == 0 { // dependency might have already been satisfied // with packages already been added // // when follow-all-variants is enabled, we need to try to expand anyway, // as even if dependency is satisfied now, there might be other ways to satisfy dependency if result.Search(dep, false) != nil { if dependencyOptions&DepVerboseResolve == DepVerboseResolve && progress != nil { progress.ColoredPrintf("@{y}Already satisfied dependency@|: %s with %s", &dep, result.Search(dep, true)) } continue } } searchResults := l.Search(dep, true) if len(searchResults) > 0 { for _, p := range searchResults { if result.Has(p) { continue } if dependencyOptions&DepVerboseResolve == DepVerboseResolve && progress != nil { progress.ColoredPrintf("@{g}Injecting package@|: %s", p) } result.Add(p) dependencySource.Add(p) added++ if dependencyOptions&DepFollowAllVariants == 0 { break } } } else { if dependencyOptions&DepVerboseResolve == DepVerboseResolve && progress != nil { progress.ColoredPrintf("@{r}Unsatisfied dependency@|: %s", dep.String()) } } } } } return result, nil }
random_line_split
list.go
package deb import ( "fmt" "sort" "strings" "github.com/aptly-dev/aptly/aptly" "github.com/aptly-dev/aptly/utils" ) // Dependency options const ( // DepFollowSource pulls source packages when required DepFollowSource = 1 << iota // DepFollowSuggests pulls from suggests DepFollowSuggests // DepFollowRecommends pulls from recommends DepFollowRecommends // DepFollowAllVariants follows all variants if depends on "a | b" DepFollowAllVariants // DepFollowBuild pulls build dependencies DepFollowBuild // DepVerboseResolve emits additional logs while dependencies are being resolved DepVerboseResolve ) // PackageList is list of unique (by key) packages // // It could be seen as repo snapshot, repo contents, result of filtering, // merge, etc. // // If indexed, PackageList starts supporting searching type PackageList struct { // Straight list of packages as map packages map[string]*Package // Indexed list of packages, sorted by name internally packagesIndex []*Package // Map of packages for each virtual package (provides) providesIndex map[string][]*Package // Package key generation function keyFunc func(p *Package) string // Allow duplicates? duplicatesAllowed bool // Has index been prepared? indexed bool } // PackageConflictError means that package can't be added to the list due to error type PackageConflictError struct { error } // Verify interface var ( _ sort.Interface = &PackageList{} _ PackageCatalog = &PackageList{} ) func packageShortKey(p *Package) string { return string(p.ShortKey("")) } func packageFullKey(p *Package) string { return string(p.Key("")) } // NewPackageList creates empty package list without duplicate package func NewPackageList() *PackageList { return NewPackageListWithDuplicates(false, 1000) } // NewPackageListWithDuplicates creates empty package list which might allow or block duplicate packages func NewPackageListWithDuplicates(duplicates bool, capacity int) *PackageList { if capacity == 0 { capacity = 1000 } result := &PackageList{ packages: make(map[string]*Package, capacity), duplicatesAllowed: duplicates, keyFunc: packageShortKey, } if duplicates { result.keyFunc = packageFullKey } return result } // NewPackageListFromRefList loads packages list from PackageRefList func NewPackageListFromRefList(reflist *PackageRefList, collection *PackageCollection, progress aptly.Progress) (*PackageList, error) { // empty reflist if reflist == nil { return NewPackageList(), nil } result := NewPackageListWithDuplicates(false, reflist.Len()) if progress != nil { progress.InitBar(int64(reflist.Len()), false, aptly.BarGeneralBuildPackageList) } err := reflist.ForEach(func(key []byte) error { p, err2 := collection.ByKey(key) if err2 != nil { return fmt.Errorf("unable to load package with key %s: %s", key, err2) } if progress != nil { progress.AddBar(1) } return result.Add(p) }) if progress != nil { progress.ShutdownBar() } if err != nil { return nil, err } return result, nil } // Has checks whether package is already in the list func (l *PackageList) Has(p *Package) bool { key := l.keyFunc(p) _, ok := l.packages[key] return ok } // Add appends package to package list, additionally checking for uniqueness func (l *PackageList) Add(p *Package) error
// ForEach calls handler for each package in list func (l *PackageList) ForEach(handler func(*Package) error) error { var err error for _, p := range l.packages { err = handler(p) if err != nil { return err } } return err } // ForEachIndexed calls handler for each package in list in indexed order func (l *PackageList) ForEachIndexed(handler func(*Package) error) error { if !l.indexed { panic("list not indexed, can't iterate") } var err error for _, p := range l.packagesIndex { err = handler(p) if err != nil { return err } } return err } // Len returns number of packages in the list func (l *PackageList) Len() int { return len(l.packages) } // Append adds content from one package list to another func (l *PackageList) Append(pl *PackageList) error { if l.indexed { panic("Append not supported when indexed") } for k, p := range pl.packages { existing, ok := l.packages[k] if ok { if !existing.Equals(p) { return fmt.Errorf("conflict in package %s", p) } } else { l.packages[k] = p } } return nil } // Remove removes package from the list, and updates index when required func (l *PackageList) Remove(p *Package) { delete(l.packages, l.keyFunc(p)) if l.indexed { for _, provides := range p.Provides { for i, pkg := range l.providesIndex[provides] { if pkg.Equals(p) { // remove l.ProvidesIndex[provides][i] w/o preserving order l.providesIndex[provides][len(l.providesIndex[provides])-1], l.providesIndex[provides][i], l.providesIndex[provides] = nil, l.providesIndex[provides][len(l.providesIndex[provides])-1], l.providesIndex[provides][:len(l.providesIndex[provides])-1] break } } } i := sort.Search(len(l.packagesIndex), func(j int) bool { return l.packagesIndex[j].Name >= p.Name }) for i < len(l.packagesIndex) && l.packagesIndex[i].Name == p.Name { if l.packagesIndex[i].Equals(p) { // remove l.packagesIndex[i] preserving order copy(l.packagesIndex[i:], l.packagesIndex[i+1:]) l.packagesIndex[len(l.packagesIndex)-1] = nil l.packagesIndex = l.packagesIndex[:len(l.packagesIndex)-1] break } i++ } } } // Architectures returns list of architectures present in packages and flag if source packages are present. // // If includeSource is true, meta-architecture "source" would be present in the list func (l *PackageList) Architectures(includeSource bool) (result []string) { result = make([]string, 0, 10) for _, pkg := range l.packages { if pkg.Architecture != ArchitectureAll && (pkg.Architecture != ArchitectureSource || includeSource) && !utils.StrSliceHasItem(result, pkg.Architecture) { result = append(result, pkg.Architecture) } } return } // Strings builds list of strings with package keys func (l *PackageList) Strings() []string { result := make([]string, l.Len()) i := 0 for _, p := range l.packages { result[i] = string(p.Key("")) i++ } return result } // FullNames builds a list of package {name}_{version}_{arch} func (l *PackageList) FullNames() []string { result := make([]string, l.Len()) i := 0 for _, p := range l.packages { result[i] = p.GetFullName() i++ } return result } // depSliceDeduplicate removes dups in slice of Dependencies func depSliceDeduplicate(s []Dependency) []Dependency { l := len(s) if l < 2 { return s } if l == 2 { if s[0] == s[1] { return s[0:1] } return s } found := make(map[string]bool, l) j := 0 for i, x := range s { h := x.Hash() if !found[h] { found[h] = true s[j] = s[i] j++ } } return s[:j] } // VerifyDependencies looks for missing dependencies in package list. // // Analysis would be performed for each architecture, in specified sources func (l *PackageList) VerifyDependencies(options int, architectures []string, sources *PackageList, progress aptly.Progress) ([]Dependency, error) { l.PrepareIndex() missing := make([]Dependency, 0, 128) if progress != nil { progress.InitBar(int64(l.Len())*int64(len(architectures)), false, aptly.BarGeneralVerifyDependencies) } for _, arch := range architectures { cache := make(map[string]bool, 2048) for _, p := range l.packagesIndex { if progress != nil { progress.AddBar(1) } if !p.MatchesArchitecture(arch) { continue } for _, dep := range p.GetDependencies(options) { variants, err := ParseDependencyVariants(dep) if err != nil { return nil, fmt.Errorf("unable to process package %s: %s", p, err) } variants = depSliceDeduplicate(variants) variantsMissing := make([]Dependency, 0, len(variants)) for _, dep := range variants { if dep.Architecture == "" { dep.Architecture = arch } hash := dep.Hash() satisfied, ok := cache[hash] if !ok { satisfied = sources.Search(dep, false) != nil cache[hash] = satisfied } if !satisfied && !ok { variantsMissing = append(variantsMissing, dep) } if satisfied && options&DepFollowAllVariants == 0 { variantsMissing = nil break } } missing = append(missing, variantsMissing...) } } } if progress != nil { progress.ShutdownBar() } if options&DepVerboseResolve == DepVerboseResolve && progress != nil { missingStr := make([]string, len(missing)) for i := range missing { missingStr[i] = missing[i].String() } progress.ColoredPrintf("@{y}Missing dependencies:@| %s", strings.Join(missingStr, ", ")) } return missing, nil } // Swap swaps two packages in index func (l *PackageList) Swap(i, j int) { l.packagesIndex[i], l.packagesIndex[j] = l.packagesIndex[j], l.packagesIndex[i] } func (l *PackageList) lessPackages(iPkg, jPkg *Package) bool { if iPkg.Name == jPkg.Name { cmp := CompareVersions(iPkg.Version, jPkg.Version) if cmp == 0 { return iPkg.Architecture < jPkg.Architecture } return cmp == 1 } return iPkg.Name < jPkg.Name } // Less compares two packages by name (lexographical) and version (latest to oldest) func (l *PackageList) Less(i, j int) bool { return l.lessPackages(l.packagesIndex[i], l.packagesIndex[j]) } // PrepareIndex prepares list for indexing func (l *PackageList) PrepareIndex() { if l.indexed { return } l.packagesIndex = make([]*Package, l.Len()) l.providesIndex = make(map[string][]*Package, 128) i := 0 for _, p := range l.packages { l.packagesIndex[i] = p i++ for _, provides := range p.Provides { l.providesIndex[provides] = append(l.providesIndex[provides], p) } } sort.Sort(l) l.indexed = true } // Scan searches package index using full scan func (l *PackageList) Scan(q PackageQuery) (result *PackageList) { result = NewPackageListWithDuplicates(l.duplicatesAllowed, 0) for _, pkg := range l.packages { if q.Matches(pkg) { result.Add(pkg) } } return } // SearchSupported returns true for PackageList func (l *PackageList) SearchSupported() bool { return true } // SearchByKey looks up package by exact key reference func (l *PackageList) SearchByKey(arch, name, version string) (result *PackageList) { result = NewPackageListWithDuplicates(l.duplicatesAllowed, 0) pkg := l.packages["P"+arch+" "+name+" "+version] if pkg != nil { result.Add(pkg) } return } // Search searches package index for specified package(s) using optimized queries func (l *PackageList) Search(dep Dependency, allMatches bool) (searchResults []*Package) { if !l.indexed { panic("list not indexed, can't search") } i := sort.Search(len(l.packagesIndex), func(j int) bool { return l.packagesIndex[j].Name >= dep.Pkg }) for i < len(l.packagesIndex) && l.packagesIndex[i].Name == dep.Pkg { p := l.packagesIndex[i] if p.MatchesDependency(dep) { searchResults = append(searchResults, p) if !allMatches { break } } i++ } if dep.Relation == VersionDontCare { for _, p := range l.providesIndex[dep.Pkg] { if dep.Architecture == "" || p.MatchesArchitecture(dep.Architecture) { searchResults = append(searchResults, p) if !allMatches { break } } } } return } // Filter filters package index by specified queries (ORed together), possibly pulling dependencies func (l *PackageList) Filter(queries []PackageQuery, withDependencies bool, source *PackageList, dependencyOptions int, architecturesList []string) (*PackageList, error) { return l.FilterWithProgress(queries, withDependencies, source, dependencyOptions, architecturesList, nil) } // FilterWithProgress filters package index by specified queries (ORed together), possibly pulling dependencies and displays progress func (l *PackageList) FilterWithProgress(queries []PackageQuery, withDependencies bool, source *PackageList, dependencyOptions int, architecturesList []string, progress aptly.Progress) (*PackageList, error) { if !l.indexed { panic("list not indexed, can't filter") } result := NewPackageList() for _, query := range queries { result.Append(query.Query(l)) } if withDependencies { added := result.Len() result.PrepareIndex() dependencySource := NewPackageList() if source != nil { dependencySource.Append(source) } dependencySource.Append(result) dependencySource.PrepareIndex() // while some new dependencies were discovered for added > 0 { added = 0 // find missing dependencies missing, err := result.VerifyDependencies(dependencyOptions, architecturesList, dependencySource, progress) if err != nil { return nil, err } // try to satisfy dependencies for _, dep := range missing { if dependencyOptions&DepFollowAllVariants == 0 { // dependency might have already been satisfied // with packages already been added // // when follow-all-variants is enabled, we need to try to expand anyway, // as even if dependency is satisfied now, there might be other ways to satisfy dependency if result.Search(dep, false) != nil { if dependencyOptions&DepVerboseResolve == DepVerboseResolve && progress != nil { progress.ColoredPrintf("@{y}Already satisfied dependency@|: %s with %s", &dep, result.Search(dep, true)) } continue } } searchResults := l.Search(dep, true) if len(searchResults) > 0 { for _, p := range searchResults { if result.Has(p) { continue } if dependencyOptions&DepVerboseResolve == DepVerboseResolve && progress != nil { progress.ColoredPrintf("@{g}Injecting package@|: %s", p) } result.Add(p) dependencySource.Add(p) added++ if dependencyOptions&DepFollowAllVariants == 0 { break } } } else { if dependencyOptions&DepVerboseResolve == DepVerboseResolve && progress != nil { progress.ColoredPrintf("@{r}Unsatisfied dependency@|: %s", dep.String()) } } } } } return result, nil }
{ key := l.keyFunc(p) existing, ok := l.packages[key] if ok { if !existing.Equals(p) { return &PackageConflictError{fmt.Errorf("conflict in package %s", p)} } return nil } l.packages[key] = p if l.indexed { for _, provides := range p.Provides { l.providesIndex[provides] = append(l.providesIndex[provides], p) } i := sort.Search(len(l.packagesIndex), func(j int) bool { return l.lessPackages(p, l.packagesIndex[j]) }) // insert p into l.packagesIndex in position i l.packagesIndex = append(l.packagesIndex, nil) copy(l.packagesIndex[i+1:], l.packagesIndex[i:]) l.packagesIndex[i] = p } return nil }
identifier_body
list.go
package deb import ( "fmt" "sort" "strings" "github.com/aptly-dev/aptly/aptly" "github.com/aptly-dev/aptly/utils" ) // Dependency options const ( // DepFollowSource pulls source packages when required DepFollowSource = 1 << iota // DepFollowSuggests pulls from suggests DepFollowSuggests // DepFollowRecommends pulls from recommends DepFollowRecommends // DepFollowAllVariants follows all variants if depends on "a | b" DepFollowAllVariants // DepFollowBuild pulls build dependencies DepFollowBuild // DepVerboseResolve emits additional logs while dependencies are being resolved DepVerboseResolve ) // PackageList is list of unique (by key) packages // // It could be seen as repo snapshot, repo contents, result of filtering, // merge, etc. // // If indexed, PackageList starts supporting searching type PackageList struct { // Straight list of packages as map packages map[string]*Package // Indexed list of packages, sorted by name internally packagesIndex []*Package // Map of packages for each virtual package (provides) providesIndex map[string][]*Package // Package key generation function keyFunc func(p *Package) string // Allow duplicates? duplicatesAllowed bool // Has index been prepared? indexed bool } // PackageConflictError means that package can't be added to the list due to error type PackageConflictError struct { error } // Verify interface var ( _ sort.Interface = &PackageList{} _ PackageCatalog = &PackageList{} ) func packageShortKey(p *Package) string { return string(p.ShortKey("")) } func packageFullKey(p *Package) string { return string(p.Key("")) } // NewPackageList creates empty package list without duplicate package func NewPackageList() *PackageList { return NewPackageListWithDuplicates(false, 1000) } // NewPackageListWithDuplicates creates empty package list which might allow or block duplicate packages func NewPackageListWithDuplicates(duplicates bool, capacity int) *PackageList { if capacity == 0 { capacity = 1000 } result := &PackageList{ packages: make(map[string]*Package, capacity), duplicatesAllowed: duplicates, keyFunc: packageShortKey, } if duplicates { result.keyFunc = packageFullKey } return result } // NewPackageListFromRefList loads packages list from PackageRefList func NewPackageListFromRefList(reflist *PackageRefList, collection *PackageCollection, progress aptly.Progress) (*PackageList, error) { // empty reflist if reflist == nil { return NewPackageList(), nil } result := NewPackageListWithDuplicates(false, reflist.Len()) if progress != nil { progress.InitBar(int64(reflist.Len()), false, aptly.BarGeneralBuildPackageList) } err := reflist.ForEach(func(key []byte) error { p, err2 := collection.ByKey(key) if err2 != nil { return fmt.Errorf("unable to load package with key %s: %s", key, err2) } if progress != nil { progress.AddBar(1) } return result.Add(p) }) if progress != nil { progress.ShutdownBar() } if err != nil { return nil, err } return result, nil } // Has checks whether package is already in the list func (l *PackageList) Has(p *Package) bool { key := l.keyFunc(p) _, ok := l.packages[key] return ok } // Add appends package to package list, additionally checking for uniqueness func (l *PackageList) Add(p *Package) error { key := l.keyFunc(p) existing, ok := l.packages[key] if ok { if !existing.Equals(p) { return &PackageConflictError{fmt.Errorf("conflict in package %s", p)} } return nil } l.packages[key] = p if l.indexed { for _, provides := range p.Provides { l.providesIndex[provides] = append(l.providesIndex[provides], p) } i := sort.Search(len(l.packagesIndex), func(j int) bool { return l.lessPackages(p, l.packagesIndex[j]) }) // insert p into l.packagesIndex in position i l.packagesIndex = append(l.packagesIndex, nil) copy(l.packagesIndex[i+1:], l.packagesIndex[i:]) l.packagesIndex[i] = p } return nil } // ForEach calls handler for each package in list func (l *PackageList) ForEach(handler func(*Package) error) error { var err error for _, p := range l.packages { err = handler(p) if err != nil { return err } } return err } // ForEachIndexed calls handler for each package in list in indexed order func (l *PackageList) ForEachIndexed(handler func(*Package) error) error { if !l.indexed { panic("list not indexed, can't iterate") } var err error for _, p := range l.packagesIndex { err = handler(p) if err != nil { return err } } return err } // Len returns number of packages in the list func (l *PackageList) Len() int { return len(l.packages) } // Append adds content from one package list to another func (l *PackageList) Append(pl *PackageList) error { if l.indexed { panic("Append not supported when indexed") } for k, p := range pl.packages { existing, ok := l.packages[k] if ok { if !existing.Equals(p) { return fmt.Errorf("conflict in package %s", p) } } else { l.packages[k] = p } } return nil } // Remove removes package from the list, and updates index when required func (l *PackageList) Remove(p *Package) { delete(l.packages, l.keyFunc(p)) if l.indexed { for _, provides := range p.Provides { for i, pkg := range l.providesIndex[provides] { if pkg.Equals(p) { // remove l.ProvidesIndex[provides][i] w/o preserving order l.providesIndex[provides][len(l.providesIndex[provides])-1], l.providesIndex[provides][i], l.providesIndex[provides] = nil, l.providesIndex[provides][len(l.providesIndex[provides])-1], l.providesIndex[provides][:len(l.providesIndex[provides])-1] break } } } i := sort.Search(len(l.packagesIndex), func(j int) bool { return l.packagesIndex[j].Name >= p.Name }) for i < len(l.packagesIndex) && l.packagesIndex[i].Name == p.Name { if l.packagesIndex[i].Equals(p) { // remove l.packagesIndex[i] preserving order copy(l.packagesIndex[i:], l.packagesIndex[i+1:]) l.packagesIndex[len(l.packagesIndex)-1] = nil l.packagesIndex = l.packagesIndex[:len(l.packagesIndex)-1] break } i++ } } } // Architectures returns list of architectures present in packages and flag if source packages are present. // // If includeSource is true, meta-architecture "source" would be present in the list func (l *PackageList) Architectures(includeSource bool) (result []string) { result = make([]string, 0, 10) for _, pkg := range l.packages { if pkg.Architecture != ArchitectureAll && (pkg.Architecture != ArchitectureSource || includeSource) && !utils.StrSliceHasItem(result, pkg.Architecture) { result = append(result, pkg.Architecture) } } return } // Strings builds list of strings with package keys func (l *PackageList) Strings() []string { result := make([]string, l.Len()) i := 0 for _, p := range l.packages { result[i] = string(p.Key("")) i++ } return result } // FullNames builds a list of package {name}_{version}_{arch} func (l *PackageList) FullNames() []string { result := make([]string, l.Len()) i := 0 for _, p := range l.packages { result[i] = p.GetFullName() i++ } return result } // depSliceDeduplicate removes dups in slice of Dependencies func depSliceDeduplicate(s []Dependency) []Dependency { l := len(s) if l < 2 { return s } if l == 2 { if s[0] == s[1] { return s[0:1] } return s } found := make(map[string]bool, l) j := 0 for i, x := range s { h := x.Hash() if !found[h] { found[h] = true s[j] = s[i] j++ } } return s[:j] } // VerifyDependencies looks for missing dependencies in package list. // // Analysis would be performed for each architecture, in specified sources func (l *PackageList) VerifyDependencies(options int, architectures []string, sources *PackageList, progress aptly.Progress) ([]Dependency, error) { l.PrepareIndex() missing := make([]Dependency, 0, 128) if progress != nil { progress.InitBar(int64(l.Len())*int64(len(architectures)), false, aptly.BarGeneralVerifyDependencies) } for _, arch := range architectures { cache := make(map[string]bool, 2048) for _, p := range l.packagesIndex { if progress != nil { progress.AddBar(1) } if !p.MatchesArchitecture(arch) { continue } for _, dep := range p.GetDependencies(options) { variants, err := ParseDependencyVariants(dep) if err != nil { return nil, fmt.Errorf("unable to process package %s: %s", p, err) } variants = depSliceDeduplicate(variants) variantsMissing := make([]Dependency, 0, len(variants)) for _, dep := range variants { if dep.Architecture == "" { dep.Architecture = arch } hash := dep.Hash() satisfied, ok := cache[hash] if !ok { satisfied = sources.Search(dep, false) != nil cache[hash] = satisfied } if !satisfied && !ok { variantsMissing = append(variantsMissing, dep) } if satisfied && options&DepFollowAllVariants == 0 { variantsMissing = nil break } } missing = append(missing, variantsMissing...) } } } if progress != nil { progress.ShutdownBar() } if options&DepVerboseResolve == DepVerboseResolve && progress != nil { missingStr := make([]string, len(missing)) for i := range missing { missingStr[i] = missing[i].String() } progress.ColoredPrintf("@{y}Missing dependencies:@| %s", strings.Join(missingStr, ", ")) } return missing, nil } // Swap swaps two packages in index func (l *PackageList) Swap(i, j int) { l.packagesIndex[i], l.packagesIndex[j] = l.packagesIndex[j], l.packagesIndex[i] } func (l *PackageList) lessPackages(iPkg, jPkg *Package) bool { if iPkg.Name == jPkg.Name { cmp := CompareVersions(iPkg.Version, jPkg.Version) if cmp == 0 { return iPkg.Architecture < jPkg.Architecture } return cmp == 1 } return iPkg.Name < jPkg.Name } // Less compares two packages by name (lexographical) and version (latest to oldest) func (l *PackageList) Less(i, j int) bool { return l.lessPackages(l.packagesIndex[i], l.packagesIndex[j]) } // PrepareIndex prepares list for indexing func (l *PackageList) PrepareIndex() { if l.indexed { return } l.packagesIndex = make([]*Package, l.Len()) l.providesIndex = make(map[string][]*Package, 128) i := 0 for _, p := range l.packages { l.packagesIndex[i] = p i++ for _, provides := range p.Provides { l.providesIndex[provides] = append(l.providesIndex[provides], p) } } sort.Sort(l) l.indexed = true } // Scan searches package index using full scan func (l *PackageList) Scan(q PackageQuery) (result *PackageList) { result = NewPackageListWithDuplicates(l.duplicatesAllowed, 0) for _, pkg := range l.packages { if q.Matches(pkg) { result.Add(pkg) } } return } // SearchSupported returns true for PackageList func (l *PackageList)
() bool { return true } // SearchByKey looks up package by exact key reference func (l *PackageList) SearchByKey(arch, name, version string) (result *PackageList) { result = NewPackageListWithDuplicates(l.duplicatesAllowed, 0) pkg := l.packages["P"+arch+" "+name+" "+version] if pkg != nil { result.Add(pkg) } return } // Search searches package index for specified package(s) using optimized queries func (l *PackageList) Search(dep Dependency, allMatches bool) (searchResults []*Package) { if !l.indexed { panic("list not indexed, can't search") } i := sort.Search(len(l.packagesIndex), func(j int) bool { return l.packagesIndex[j].Name >= dep.Pkg }) for i < len(l.packagesIndex) && l.packagesIndex[i].Name == dep.Pkg { p := l.packagesIndex[i] if p.MatchesDependency(dep) { searchResults = append(searchResults, p) if !allMatches { break } } i++ } if dep.Relation == VersionDontCare { for _, p := range l.providesIndex[dep.Pkg] { if dep.Architecture == "" || p.MatchesArchitecture(dep.Architecture) { searchResults = append(searchResults, p) if !allMatches { break } } } } return } // Filter filters package index by specified queries (ORed together), possibly pulling dependencies func (l *PackageList) Filter(queries []PackageQuery, withDependencies bool, source *PackageList, dependencyOptions int, architecturesList []string) (*PackageList, error) { return l.FilterWithProgress(queries, withDependencies, source, dependencyOptions, architecturesList, nil) } // FilterWithProgress filters package index by specified queries (ORed together), possibly pulling dependencies and displays progress func (l *PackageList) FilterWithProgress(queries []PackageQuery, withDependencies bool, source *PackageList, dependencyOptions int, architecturesList []string, progress aptly.Progress) (*PackageList, error) { if !l.indexed { panic("list not indexed, can't filter") } result := NewPackageList() for _, query := range queries { result.Append(query.Query(l)) } if withDependencies { added := result.Len() result.PrepareIndex() dependencySource := NewPackageList() if source != nil { dependencySource.Append(source) } dependencySource.Append(result) dependencySource.PrepareIndex() // while some new dependencies were discovered for added > 0 { added = 0 // find missing dependencies missing, err := result.VerifyDependencies(dependencyOptions, architecturesList, dependencySource, progress) if err != nil { return nil, err } // try to satisfy dependencies for _, dep := range missing { if dependencyOptions&DepFollowAllVariants == 0 { // dependency might have already been satisfied // with packages already been added // // when follow-all-variants is enabled, we need to try to expand anyway, // as even if dependency is satisfied now, there might be other ways to satisfy dependency if result.Search(dep, false) != nil { if dependencyOptions&DepVerboseResolve == DepVerboseResolve && progress != nil { progress.ColoredPrintf("@{y}Already satisfied dependency@|: %s with %s", &dep, result.Search(dep, true)) } continue } } searchResults := l.Search(dep, true) if len(searchResults) > 0 { for _, p := range searchResults { if result.Has(p) { continue } if dependencyOptions&DepVerboseResolve == DepVerboseResolve && progress != nil { progress.ColoredPrintf("@{g}Injecting package@|: %s", p) } result.Add(p) dependencySource.Add(p) added++ if dependencyOptions&DepFollowAllVariants == 0 { break } } } else { if dependencyOptions&DepVerboseResolve == DepVerboseResolve && progress != nil { progress.ColoredPrintf("@{r}Unsatisfied dependency@|: %s", dep.String()) } } } } } return result, nil }
SearchSupported
identifier_name
list.go
package deb import ( "fmt" "sort" "strings" "github.com/aptly-dev/aptly/aptly" "github.com/aptly-dev/aptly/utils" ) // Dependency options const ( // DepFollowSource pulls source packages when required DepFollowSource = 1 << iota // DepFollowSuggests pulls from suggests DepFollowSuggests // DepFollowRecommends pulls from recommends DepFollowRecommends // DepFollowAllVariants follows all variants if depends on "a | b" DepFollowAllVariants // DepFollowBuild pulls build dependencies DepFollowBuild // DepVerboseResolve emits additional logs while dependencies are being resolved DepVerboseResolve ) // PackageList is list of unique (by key) packages // // It could be seen as repo snapshot, repo contents, result of filtering, // merge, etc. // // If indexed, PackageList starts supporting searching type PackageList struct { // Straight list of packages as map packages map[string]*Package // Indexed list of packages, sorted by name internally packagesIndex []*Package // Map of packages for each virtual package (provides) providesIndex map[string][]*Package // Package key generation function keyFunc func(p *Package) string // Allow duplicates? duplicatesAllowed bool // Has index been prepared? indexed bool } // PackageConflictError means that package can't be added to the list due to error type PackageConflictError struct { error } // Verify interface var ( _ sort.Interface = &PackageList{} _ PackageCatalog = &PackageList{} ) func packageShortKey(p *Package) string { return string(p.ShortKey("")) } func packageFullKey(p *Package) string { return string(p.Key("")) } // NewPackageList creates empty package list without duplicate package func NewPackageList() *PackageList { return NewPackageListWithDuplicates(false, 1000) } // NewPackageListWithDuplicates creates empty package list which might allow or block duplicate packages func NewPackageListWithDuplicates(duplicates bool, capacity int) *PackageList { if capacity == 0 { capacity = 1000 } result := &PackageList{ packages: make(map[string]*Package, capacity), duplicatesAllowed: duplicates, keyFunc: packageShortKey, } if duplicates { result.keyFunc = packageFullKey } return result } // NewPackageListFromRefList loads packages list from PackageRefList func NewPackageListFromRefList(reflist *PackageRefList, collection *PackageCollection, progress aptly.Progress) (*PackageList, error) { // empty reflist if reflist == nil { return NewPackageList(), nil } result := NewPackageListWithDuplicates(false, reflist.Len()) if progress != nil { progress.InitBar(int64(reflist.Len()), false, aptly.BarGeneralBuildPackageList) } err := reflist.ForEach(func(key []byte) error { p, err2 := collection.ByKey(key) if err2 != nil
if progress != nil { progress.AddBar(1) } return result.Add(p) }) if progress != nil { progress.ShutdownBar() } if err != nil { return nil, err } return result, nil } // Has checks whether package is already in the list func (l *PackageList) Has(p *Package) bool { key := l.keyFunc(p) _, ok := l.packages[key] return ok } // Add appends package to package list, additionally checking for uniqueness func (l *PackageList) Add(p *Package) error { key := l.keyFunc(p) existing, ok := l.packages[key] if ok { if !existing.Equals(p) { return &PackageConflictError{fmt.Errorf("conflict in package %s", p)} } return nil } l.packages[key] = p if l.indexed { for _, provides := range p.Provides { l.providesIndex[provides] = append(l.providesIndex[provides], p) } i := sort.Search(len(l.packagesIndex), func(j int) bool { return l.lessPackages(p, l.packagesIndex[j]) }) // insert p into l.packagesIndex in position i l.packagesIndex = append(l.packagesIndex, nil) copy(l.packagesIndex[i+1:], l.packagesIndex[i:]) l.packagesIndex[i] = p } return nil } // ForEach calls handler for each package in list func (l *PackageList) ForEach(handler func(*Package) error) error { var err error for _, p := range l.packages { err = handler(p) if err != nil { return err } } return err } // ForEachIndexed calls handler for each package in list in indexed order func (l *PackageList) ForEachIndexed(handler func(*Package) error) error { if !l.indexed { panic("list not indexed, can't iterate") } var err error for _, p := range l.packagesIndex { err = handler(p) if err != nil { return err } } return err } // Len returns number of packages in the list func (l *PackageList) Len() int { return len(l.packages) } // Append adds content from one package list to another func (l *PackageList) Append(pl *PackageList) error { if l.indexed { panic("Append not supported when indexed") } for k, p := range pl.packages { existing, ok := l.packages[k] if ok { if !existing.Equals(p) { return fmt.Errorf("conflict in package %s", p) } } else { l.packages[k] = p } } return nil } // Remove removes package from the list, and updates index when required func (l *PackageList) Remove(p *Package) { delete(l.packages, l.keyFunc(p)) if l.indexed { for _, provides := range p.Provides { for i, pkg := range l.providesIndex[provides] { if pkg.Equals(p) { // remove l.ProvidesIndex[provides][i] w/o preserving order l.providesIndex[provides][len(l.providesIndex[provides])-1], l.providesIndex[provides][i], l.providesIndex[provides] = nil, l.providesIndex[provides][len(l.providesIndex[provides])-1], l.providesIndex[provides][:len(l.providesIndex[provides])-1] break } } } i := sort.Search(len(l.packagesIndex), func(j int) bool { return l.packagesIndex[j].Name >= p.Name }) for i < len(l.packagesIndex) && l.packagesIndex[i].Name == p.Name { if l.packagesIndex[i].Equals(p) { // remove l.packagesIndex[i] preserving order copy(l.packagesIndex[i:], l.packagesIndex[i+1:]) l.packagesIndex[len(l.packagesIndex)-1] = nil l.packagesIndex = l.packagesIndex[:len(l.packagesIndex)-1] break } i++ } } } // Architectures returns list of architectures present in packages and flag if source packages are present. // // If includeSource is true, meta-architecture "source" would be present in the list func (l *PackageList) Architectures(includeSource bool) (result []string) { result = make([]string, 0, 10) for _, pkg := range l.packages { if pkg.Architecture != ArchitectureAll && (pkg.Architecture != ArchitectureSource || includeSource) && !utils.StrSliceHasItem(result, pkg.Architecture) { result = append(result, pkg.Architecture) } } return } // Strings builds list of strings with package keys func (l *PackageList) Strings() []string { result := make([]string, l.Len()) i := 0 for _, p := range l.packages { result[i] = string(p.Key("")) i++ } return result } // FullNames builds a list of package {name}_{version}_{arch} func (l *PackageList) FullNames() []string { result := make([]string, l.Len()) i := 0 for _, p := range l.packages { result[i] = p.GetFullName() i++ } return result } // depSliceDeduplicate removes dups in slice of Dependencies func depSliceDeduplicate(s []Dependency) []Dependency { l := len(s) if l < 2 { return s } if l == 2 { if s[0] == s[1] { return s[0:1] } return s } found := make(map[string]bool, l) j := 0 for i, x := range s { h := x.Hash() if !found[h] { found[h] = true s[j] = s[i] j++ } } return s[:j] } // VerifyDependencies looks for missing dependencies in package list. // // Analysis would be performed for each architecture, in specified sources func (l *PackageList) VerifyDependencies(options int, architectures []string, sources *PackageList, progress aptly.Progress) ([]Dependency, error) { l.PrepareIndex() missing := make([]Dependency, 0, 128) if progress != nil { progress.InitBar(int64(l.Len())*int64(len(architectures)), false, aptly.BarGeneralVerifyDependencies) } for _, arch := range architectures { cache := make(map[string]bool, 2048) for _, p := range l.packagesIndex { if progress != nil { progress.AddBar(1) } if !p.MatchesArchitecture(arch) { continue } for _, dep := range p.GetDependencies(options) { variants, err := ParseDependencyVariants(dep) if err != nil { return nil, fmt.Errorf("unable to process package %s: %s", p, err) } variants = depSliceDeduplicate(variants) variantsMissing := make([]Dependency, 0, len(variants)) for _, dep := range variants { if dep.Architecture == "" { dep.Architecture = arch } hash := dep.Hash() satisfied, ok := cache[hash] if !ok { satisfied = sources.Search(dep, false) != nil cache[hash] = satisfied } if !satisfied && !ok { variantsMissing = append(variantsMissing, dep) } if satisfied && options&DepFollowAllVariants == 0 { variantsMissing = nil break } } missing = append(missing, variantsMissing...) } } } if progress != nil { progress.ShutdownBar() } if options&DepVerboseResolve == DepVerboseResolve && progress != nil { missingStr := make([]string, len(missing)) for i := range missing { missingStr[i] = missing[i].String() } progress.ColoredPrintf("@{y}Missing dependencies:@| %s", strings.Join(missingStr, ", ")) } return missing, nil } // Swap swaps two packages in index func (l *PackageList) Swap(i, j int) { l.packagesIndex[i], l.packagesIndex[j] = l.packagesIndex[j], l.packagesIndex[i] } func (l *PackageList) lessPackages(iPkg, jPkg *Package) bool { if iPkg.Name == jPkg.Name { cmp := CompareVersions(iPkg.Version, jPkg.Version) if cmp == 0 { return iPkg.Architecture < jPkg.Architecture } return cmp == 1 } return iPkg.Name < jPkg.Name } // Less compares two packages by name (lexographical) and version (latest to oldest) func (l *PackageList) Less(i, j int) bool { return l.lessPackages(l.packagesIndex[i], l.packagesIndex[j]) } // PrepareIndex prepares list for indexing func (l *PackageList) PrepareIndex() { if l.indexed { return } l.packagesIndex = make([]*Package, l.Len()) l.providesIndex = make(map[string][]*Package, 128) i := 0 for _, p := range l.packages { l.packagesIndex[i] = p i++ for _, provides := range p.Provides { l.providesIndex[provides] = append(l.providesIndex[provides], p) } } sort.Sort(l) l.indexed = true } // Scan searches package index using full scan func (l *PackageList) Scan(q PackageQuery) (result *PackageList) { result = NewPackageListWithDuplicates(l.duplicatesAllowed, 0) for _, pkg := range l.packages { if q.Matches(pkg) { result.Add(pkg) } } return } // SearchSupported returns true for PackageList func (l *PackageList) SearchSupported() bool { return true } // SearchByKey looks up package by exact key reference func (l *PackageList) SearchByKey(arch, name, version string) (result *PackageList) { result = NewPackageListWithDuplicates(l.duplicatesAllowed, 0) pkg := l.packages["P"+arch+" "+name+" "+version] if pkg != nil { result.Add(pkg) } return } // Search searches package index for specified package(s) using optimized queries func (l *PackageList) Search(dep Dependency, allMatches bool) (searchResults []*Package) { if !l.indexed { panic("list not indexed, can't search") } i := sort.Search(len(l.packagesIndex), func(j int) bool { return l.packagesIndex[j].Name >= dep.Pkg }) for i < len(l.packagesIndex) && l.packagesIndex[i].Name == dep.Pkg { p := l.packagesIndex[i] if p.MatchesDependency(dep) { searchResults = append(searchResults, p) if !allMatches { break } } i++ } if dep.Relation == VersionDontCare { for _, p := range l.providesIndex[dep.Pkg] { if dep.Architecture == "" || p.MatchesArchitecture(dep.Architecture) { searchResults = append(searchResults, p) if !allMatches { break } } } } return } // Filter filters package index by specified queries (ORed together), possibly pulling dependencies func (l *PackageList) Filter(queries []PackageQuery, withDependencies bool, source *PackageList, dependencyOptions int, architecturesList []string) (*PackageList, error) { return l.FilterWithProgress(queries, withDependencies, source, dependencyOptions, architecturesList, nil) } // FilterWithProgress filters package index by specified queries (ORed together), possibly pulling dependencies and displays progress func (l *PackageList) FilterWithProgress(queries []PackageQuery, withDependencies bool, source *PackageList, dependencyOptions int, architecturesList []string, progress aptly.Progress) (*PackageList, error) { if !l.indexed { panic("list not indexed, can't filter") } result := NewPackageList() for _, query := range queries { result.Append(query.Query(l)) } if withDependencies { added := result.Len() result.PrepareIndex() dependencySource := NewPackageList() if source != nil { dependencySource.Append(source) } dependencySource.Append(result) dependencySource.PrepareIndex() // while some new dependencies were discovered for added > 0 { added = 0 // find missing dependencies missing, err := result.VerifyDependencies(dependencyOptions, architecturesList, dependencySource, progress) if err != nil { return nil, err } // try to satisfy dependencies for _, dep := range missing { if dependencyOptions&DepFollowAllVariants == 0 { // dependency might have already been satisfied // with packages already been added // // when follow-all-variants is enabled, we need to try to expand anyway, // as even if dependency is satisfied now, there might be other ways to satisfy dependency if result.Search(dep, false) != nil { if dependencyOptions&DepVerboseResolve == DepVerboseResolve && progress != nil { progress.ColoredPrintf("@{y}Already satisfied dependency@|: %s with %s", &dep, result.Search(dep, true)) } continue } } searchResults := l.Search(dep, true) if len(searchResults) > 0 { for _, p := range searchResults { if result.Has(p) { continue } if dependencyOptions&DepVerboseResolve == DepVerboseResolve && progress != nil { progress.ColoredPrintf("@{g}Injecting package@|: %s", p) } result.Add(p) dependencySource.Add(p) added++ if dependencyOptions&DepFollowAllVariants == 0 { break } } } else { if dependencyOptions&DepVerboseResolve == DepVerboseResolve && progress != nil { progress.ColoredPrintf("@{r}Unsatisfied dependency@|: %s", dep.String()) } } } } } return result, nil }
{ return fmt.Errorf("unable to load package with key %s: %s", key, err2) }
conditional_block
elasticsearch.py
import logging import math from decimal import Decimal from typing import Any from typing import Dict from typing import Optional from typing import Sequence from typing import Tuple from service_capacity_modeling.interface import AccessConsistency from service_capacity_modeling.interface import AccessPattern from service_capacity_modeling.interface import CapacityDesires from service_capacity_modeling.interface import CapacityPlan from service_capacity_modeling.interface import CapacityRequirement from service_capacity_modeling.interface import certain_float from service_capacity_modeling.interface import certain_int from service_capacity_modeling.interface import Clusters from service_capacity_modeling.interface import DataShape from service_capacity_modeling.interface import Drive from service_capacity_modeling.interface import FixedInterval from service_capacity_modeling.interface import Instance from service_capacity_modeling.interface import Interval from service_capacity_modeling.interface import QueryPattern from service_capacity_modeling.interface import RegionContext from service_capacity_modeling.interface import Requirements from service_capacity_modeling.models import CapacityModel from service_capacity_modeling.models.common import compute_stateful_zone from service_capacity_modeling.models.common import simple_network_mbps from service_capacity_modeling.models.common import sqrt_staffed_cores from service_capacity_modeling.models.common import working_set_from_drive_and_slo from service_capacity_modeling.stats import dist_for_interval logger = logging.getLogger(__name__) def _target_rf(desires: CapacityDesires, user_copies: Optional[int]) -> int: if user_copies is not None: assert user_copies > 1 return user_copies # Due to the relaxed durability and consistency requirements we can # run with RF=2 if desires.data_shape.durability_slo_order.mid < 1000: return 2 return 3 # Looks like Elasticsearch (Lucene) uses a tiered merge strategy of 10 # segments of 512 megs per # https://lucene.apache.org/core/8_1_0/core/org/apache/lucene/index/TieredMergePolicy.html#setSegmentsPerTier(double) # (FIXME) Verify what elastic merge actually does def _es_io_per_read(node_size_gib, segment_size_mb=512): size_mib = node_size_gib * 1024 segments = max(1, size_mib // segment_size_mb) # 10 segments per tier, plus 1 for L0 (avg) levels = 1 + int(math.ceil(math.log(segments, 10))) return levels def _estimate_elasticsearch_requirement( instance: Instance, desires: CapacityDesires, working_set: float, reads_per_second: float, max_rps_to_disk: int, zones_per_region: int = 3, copies_per_region: int = 3, ) -> CapacityRequirement: """Estimate the capacity required for one zone given a regional desire The input desires should be the **regional** desire, and this function will return the zonal capacity requirement """ # Keep half of the cores free for background work (merging mostly) needed_cores = math.ceil(sqrt_staffed_cores(desires) * 1.5) # Keep half of the bandwidth available for backup needed_network_mbps = simple_network_mbps(desires) * 2 needed_disk = math.ceil( (1.0 / desires.data_shape.estimated_compression_ratio.mid) * desires.data_shape.estimated_state_size_gib.mid * copies_per_region, ) # Rough estimate of how many instances we would need just for the the CPU # Note that this is a lower bound, we might end up with more. needed_cores = math.ceil( max(1, needed_cores // (instance.cpu_ghz / desires.core_reference_ghz)) )
# Generally speaking we want fewer than some number of reads per second # hitting disk per instance. If we don't have many reads we don't need to # hold much data in memory. instance_rps = max(1, reads_per_second // rough_count) disk_rps = instance_rps * _es_io_per_read(max(1, needed_disk // rough_count)) rps_working_set = min(1.0, disk_rps / max_rps_to_disk) # If disk RPS will be smaller than our target because there are no # reads, we don't need to hold as much data in memory needed_memory = min(working_set, rps_working_set) * needed_disk # Now convert to per zone needed_cores = needed_cores // zones_per_region needed_disk = needed_disk // zones_per_region needed_memory = int(needed_memory // zones_per_region) logger.debug( "Need (cpu, mem, disk, working) = (%s, %s, %s, %f)", needed_cores, needed_memory, needed_disk, working_set, ) return CapacityRequirement( requirement_type="elasticsearch-data-zonal", core_reference_ghz=desires.core_reference_ghz, cpu_cores=certain_int(needed_cores), mem_gib=certain_float(needed_memory), disk_gib=certain_float(needed_disk), network_mbps=certain_float(needed_network_mbps), context={ "working_set": min(working_set, rps_working_set), "rps_working_set": rps_working_set, "disk_slo_working_set": working_set, "replication_factor": copies_per_region, "compression_ratio": round( 1.0 / desires.data_shape.estimated_compression_ratio.mid, 2 ), "read_per_second": reads_per_second, }, ) def _upsert_params(cluster, params): if cluster.cluster_params: cluster.cluster_params.update(params) else: cluster.cluster_params = params # pylint: disable=too-many-locals def _estimate_elasticsearch_cluster_zonal( instance: Instance, drive: Drive, desires: CapacityDesires, zones_per_region: int = 3, copies_per_region: int = 3, max_local_disk_gib: int = 4096, max_regional_size: int = 240, max_rps_to_disk: int = 500, ) -> Optional[CapacityPlan]: # Netflix Elasticsearch doesn't like to deploy on really small instances if instance.cpu < 2 or instance.ram_gib < 14: return None # (FIXME): Need elasticsearch input # Right now Elasticsearch doesn't deploy to cloud drives, just adding this # here and leaving the capability to handle cloud drives for the future if instance.drive is None: return None rps = desires.query_pattern.estimated_read_per_second.mid // zones_per_region # Based on the disk latency and the read latency SLOs we adjust our # working set to keep more or less data in RAM. Faster drives need # less fronting RAM. ws_drive = instance.drive or drive working_set = working_set_from_drive_and_slo( drive_read_latency_dist=dist_for_interval(ws_drive.read_io_latency_ms), read_slo_latency_dist=dist_for_interval( desires.query_pattern.read_latency_slo_ms ), estimated_working_set=desires.data_shape.estimated_working_set_percent, # Elasticsearch has looser latency SLOs, target the 90th percentile of disk # latency to keep in RAM. target_percentile=0.90, ).mid requirement = _estimate_elasticsearch_requirement( instance=instance, desires=desires, working_set=working_set, reads_per_second=rps, zones_per_region=zones_per_region, copies_per_region=copies_per_region, max_rps_to_disk=max_rps_to_disk, ) base_mem = ( desires.data_shape.reserved_instance_app_mem_gib + desires.data_shape.reserved_instance_system_mem_gib ) cluster = compute_stateful_zone( instance=instance, drive=drive, needed_cores=int(requirement.cpu_cores.mid), needed_disk_gib=int(requirement.disk_gib.mid), needed_memory_gib=int(requirement.mem_gib.mid), needed_network_mbps=requirement.network_mbps.mid, # Assume that by provisioning enough memory we'll get # a 90% hit rate, but take into account the reads per read # from the per node dataset using leveled compaction # FIXME: I feel like this can be improved required_disk_ios=lambda x: _es_io_per_read(x) * math.ceil(0.1 * rps), # Elasticsearch requires ephemeral disks to be % full because tiered # merging can make progress as long as there is some headroom required_disk_space=lambda x: x * 1.4, max_local_disk_gib=max_local_disk_gib, # elasticsearch clusters can autobalance via shard placement cluster_size=lambda x: x, min_count=1, # Sidecars/System takes away memory from elasticsearch # Elasticsearch uses half of available system max of 32 for compressed # oops reserve_memory=lambda x: base_mem + max(32, x / 2), core_reference_ghz=requirement.core_reference_ghz, ) # Communicate to the actual provision that if we want reduced RF params = {"elasticsearch.copies": copies_per_region} _upsert_params(cluster, params) # elasticsearch clusters generally should try to stay under some total number # of nodes. Orgs do this for all kinds of reasons such as # * Security group limits. Since you must have < 500 rules if you're # ingressing public ips) # * Maintenance. If your restart script does one node at a time you want # smaller clusters so your restarts don't take months. # * NxN network issues. Sometimes smaller clusters of bigger nodes # are better for network propagation if cluster.count > (max_regional_size // zones_per_region): return None ec2_cost = zones_per_region * cluster.annual_cost cluster.cluster_type = "elasticsearch-data" clusters = Clusters( total_annual_cost=round(Decimal(ec2_cost), 2), zonal=[cluster] * zones_per_region, regional=list(), ) return CapacityPlan( requirements=Requirements(zonal=[requirement] * zones_per_region), candidate_clusters=clusters, ) class NflxElasticsearchCapacityModel(CapacityModel): @staticmethod def capacity_plan( instance: Instance, drive: Drive, context: RegionContext, desires: CapacityDesires, extra_model_arguments: Dict[str, Any], ) -> Optional[CapacityPlan]: # (FIXME): Need elasticsearch input # TODO: Use durability requirements to compute RF. copies_per_region: int = _target_rf( desires, extra_model_arguments.get("copies_per_region", None) ) max_regional_size: int = extra_model_arguments.get("max_regional_size", 240) max_rps_to_disk: int = extra_model_arguments.get("max_rps_to_disk", 1000) # Very large nodes are hard to recover max_local_disk_gib: int = extra_model_arguments.get("max_local_disk_gib", 5000) return _estimate_elasticsearch_cluster_zonal( instance=instance, drive=drive, desires=desires, zones_per_region=context.zones_in_region, copies_per_region=copies_per_region, max_regional_size=max_regional_size, max_local_disk_gib=max_local_disk_gib, max_rps_to_disk=max_rps_to_disk, ) @staticmethod def description(): return "Netflix Streaming Elasticsearch Model" @staticmethod def extra_model_arguments() -> Sequence[Tuple[str, str, str]]: return ( ( "copies_per_region", "int = 3", "How many copies of the data will exist e.g. RF=3. If unsupplied" " this will be deduced from durability and consistency desires", ), ( "max_regional_size", # Twice the size of our largest cluster "int = 240", "What is the maximum size of a cluster in this region", ), ( "max_local_disk_gib", # Nodes larger than 4 TiB are painful to recover "int = 4096", "The maximum amount of data we store per machine", ), ( "max_rps_to_disk", "int = 1000", "How many disk IOs should be allowed to hit disk per instance", ), ) @staticmethod def default_desires(user_desires, extra_model_arguments: Dict[str, Any]): acceptable_consistency = set( ( AccessConsistency.best_effort, AccessConsistency.eventual, AccessConsistency.never, None, ) ) for key, value in user_desires.query_pattern.access_consistency: if value.target_consistency not in acceptable_consistency: raise ValueError( f"Elasticsearch can only provide {acceptable_consistency} access." f"User asked for {key}={value}" ) # Lower RF = less write compute rf = _target_rf( user_desires, extra_model_arguments.get("copies_per_region", None) ) if rf < 3: rf_write_latency = Interval(low=0.2, mid=0.6, high=2, confidence=0.98) else: rf_write_latency = Interval(low=0.4, mid=1, high=2, confidence=0.98) if user_desires.query_pattern.access_pattern == AccessPattern.latency: return CapacityDesires( query_pattern=QueryPattern( access_pattern=AccessPattern.latency, estimated_mean_read_size_bytes=Interval( low=128, mid=4096, high=131072, confidence=0.98 ), estimated_mean_write_size_bytes=Interval( low=128, mid=4096, high=131072, confidence=0.98 ), # Elasticsearch reads and writes can take CPU time as # large cardinality search and such can be hard to predict. estimated_mean_read_latency_ms=Interval( low=1, mid=2, high=100, confidence=0.98 ), # Writes depend heavily on rf and consistency estimated_mean_write_latency_ms=rf_write_latency, # Assume point queries "Single digit millisecond SLO" read_latency_slo_ms=FixedInterval( low=1, mid=10, high=100, confidence=0.98, ), write_latency_slo_ms=FixedInterval( low=1, mid=10, high=100, confidence=0.98, ), ), # Most latency sensitive elasticsearch clusters are in the # < 100GiB range data_shape=DataShape( estimated_state_size_gib=Interval( low=10, mid=100, high=1000, confidence=0.98 ), # Netflix Elasticsearch compresses with Deflate (gzip) # by default estimated_compression_ratio=Interval( minimum_value=1.4, maximum_value=8, low=2, mid=3, high=4, confidence=0.98, ), # Elasticsearch has a 1 GiB sidecar reserved_instance_app_mem_gib=1, ), ) else: return CapacityDesires( # (FIXME): Need to pair with ES folks on the exact values query_pattern=QueryPattern( access_pattern=AccessPattern.throughput, # Bulk writes and reads are larger in general estimated_mean_read_size_bytes=Interval( low=128, mid=4096, high=131072, confidence=0.95 ), estimated_mean_write_size_bytes=Interval( low=4096, mid=16384, high=1048576, confidence=0.95 ), # (FIXME): Need es input # Elasticsearch analytics reads probably take extra time # as they are scrolling or doing complex aggregations estimated_mean_read_latency_ms=Interval( low=1, mid=20, high=100, confidence=0.98 ), # Throughput writes typically involve large bulks # which can be expensive, but it's rare for it to have a # huge tail estimated_mean_write_latency_ms=Interval( low=1, mid=10, high=100, confidence=0.98 ), # Assume scan queries "Tens of millisecond SLO" read_latency_slo_ms=FixedInterval( minimum_value=1, maximum_value=100, low=10, mid=20, high=100, confidence=0.98, ), write_latency_slo_ms=FixedInterval( minimum_value=1, maximum_value=100, low=10, mid=20, high=100, confidence=0.98, ), ), # Most throughput elasticsearch clusters are in the # < 1TiB range data_shape=DataShape( estimated_state_size_gib=Interval( low=100, mid=1000, high=10000, confidence=0.98 ), # Netflix Elasticsearch compresses with Deflate (gzip) # by default estimated_compression_ratio=Interval( minimum_value=1.4, maximum_value=8, low=2, mid=3, high=4, confidence=0.98, ), # Elasticsearch has a 1 GiB sidecar reserved_instance_app_mem_gib=1, ), ) nflx_elasticsearch_capacity_model = NflxElasticsearchCapacityModel()
rough_count = math.ceil(needed_cores / instance.cpu)
random_line_split
elasticsearch.py
import logging import math from decimal import Decimal from typing import Any from typing import Dict from typing import Optional from typing import Sequence from typing import Tuple from service_capacity_modeling.interface import AccessConsistency from service_capacity_modeling.interface import AccessPattern from service_capacity_modeling.interface import CapacityDesires from service_capacity_modeling.interface import CapacityPlan from service_capacity_modeling.interface import CapacityRequirement from service_capacity_modeling.interface import certain_float from service_capacity_modeling.interface import certain_int from service_capacity_modeling.interface import Clusters from service_capacity_modeling.interface import DataShape from service_capacity_modeling.interface import Drive from service_capacity_modeling.interface import FixedInterval from service_capacity_modeling.interface import Instance from service_capacity_modeling.interface import Interval from service_capacity_modeling.interface import QueryPattern from service_capacity_modeling.interface import RegionContext from service_capacity_modeling.interface import Requirements from service_capacity_modeling.models import CapacityModel from service_capacity_modeling.models.common import compute_stateful_zone from service_capacity_modeling.models.common import simple_network_mbps from service_capacity_modeling.models.common import sqrt_staffed_cores from service_capacity_modeling.models.common import working_set_from_drive_and_slo from service_capacity_modeling.stats import dist_for_interval logger = logging.getLogger(__name__) def _target_rf(desires: CapacityDesires, user_copies: Optional[int]) -> int: if user_copies is not None: assert user_copies > 1 return user_copies # Due to the relaxed durability and consistency requirements we can # run with RF=2 if desires.data_shape.durability_slo_order.mid < 1000: return 2 return 3 # Looks like Elasticsearch (Lucene) uses a tiered merge strategy of 10 # segments of 512 megs per # https://lucene.apache.org/core/8_1_0/core/org/apache/lucene/index/TieredMergePolicy.html#setSegmentsPerTier(double) # (FIXME) Verify what elastic merge actually does def _es_io_per_read(node_size_gib, segment_size_mb=512): size_mib = node_size_gib * 1024 segments = max(1, size_mib // segment_size_mb) # 10 segments per tier, plus 1 for L0 (avg) levels = 1 + int(math.ceil(math.log(segments, 10))) return levels def _estimate_elasticsearch_requirement( instance: Instance, desires: CapacityDesires, working_set: float, reads_per_second: float, max_rps_to_disk: int, zones_per_region: int = 3, copies_per_region: int = 3, ) -> CapacityRequirement: """Estimate the capacity required for one zone given a regional desire The input desires should be the **regional** desire, and this function will return the zonal capacity requirement """ # Keep half of the cores free for background work (merging mostly) needed_cores = math.ceil(sqrt_staffed_cores(desires) * 1.5) # Keep half of the bandwidth available for backup needed_network_mbps = simple_network_mbps(desires) * 2 needed_disk = math.ceil( (1.0 / desires.data_shape.estimated_compression_ratio.mid) * desires.data_shape.estimated_state_size_gib.mid * copies_per_region, ) # Rough estimate of how many instances we would need just for the the CPU # Note that this is a lower bound, we might end up with more. needed_cores = math.ceil( max(1, needed_cores // (instance.cpu_ghz / desires.core_reference_ghz)) ) rough_count = math.ceil(needed_cores / instance.cpu) # Generally speaking we want fewer than some number of reads per second # hitting disk per instance. If we don't have many reads we don't need to # hold much data in memory. instance_rps = max(1, reads_per_second // rough_count) disk_rps = instance_rps * _es_io_per_read(max(1, needed_disk // rough_count)) rps_working_set = min(1.0, disk_rps / max_rps_to_disk) # If disk RPS will be smaller than our target because there are no # reads, we don't need to hold as much data in memory needed_memory = min(working_set, rps_working_set) * needed_disk # Now convert to per zone needed_cores = needed_cores // zones_per_region needed_disk = needed_disk // zones_per_region needed_memory = int(needed_memory // zones_per_region) logger.debug( "Need (cpu, mem, disk, working) = (%s, %s, %s, %f)", needed_cores, needed_memory, needed_disk, working_set, ) return CapacityRequirement( requirement_type="elasticsearch-data-zonal", core_reference_ghz=desires.core_reference_ghz, cpu_cores=certain_int(needed_cores), mem_gib=certain_float(needed_memory), disk_gib=certain_float(needed_disk), network_mbps=certain_float(needed_network_mbps), context={ "working_set": min(working_set, rps_working_set), "rps_working_set": rps_working_set, "disk_slo_working_set": working_set, "replication_factor": copies_per_region, "compression_ratio": round( 1.0 / desires.data_shape.estimated_compression_ratio.mid, 2 ), "read_per_second": reads_per_second, }, ) def _upsert_params(cluster, params): if cluster.cluster_params: cluster.cluster_params.update(params) else: cluster.cluster_params = params # pylint: disable=too-many-locals def _estimate_elasticsearch_cluster_zonal( instance: Instance, drive: Drive, desires: CapacityDesires, zones_per_region: int = 3, copies_per_region: int = 3, max_local_disk_gib: int = 4096, max_regional_size: int = 240, max_rps_to_disk: int = 500, ) -> Optional[CapacityPlan]: # Netflix Elasticsearch doesn't like to deploy on really small instances
class NflxElasticsearchCapacityModel(CapacityModel): @staticmethod def capacity_plan( instance: Instance, drive: Drive, context: RegionContext, desires: CapacityDesires, extra_model_arguments: Dict[str, Any], ) -> Optional[CapacityPlan]: # (FIXME): Need elasticsearch input # TODO: Use durability requirements to compute RF. copies_per_region: int = _target_rf( desires, extra_model_arguments.get("copies_per_region", None) ) max_regional_size: int = extra_model_arguments.get("max_regional_size", 240) max_rps_to_disk: int = extra_model_arguments.get("max_rps_to_disk", 1000) # Very large nodes are hard to recover max_local_disk_gib: int = extra_model_arguments.get("max_local_disk_gib", 5000) return _estimate_elasticsearch_cluster_zonal( instance=instance, drive=drive, desires=desires, zones_per_region=context.zones_in_region, copies_per_region=copies_per_region, max_regional_size=max_regional_size, max_local_disk_gib=max_local_disk_gib, max_rps_to_disk=max_rps_to_disk, ) @staticmethod def description(): return "Netflix Streaming Elasticsearch Model" @staticmethod def extra_model_arguments() -> Sequence[Tuple[str, str, str]]: return ( ( "copies_per_region", "int = 3", "How many copies of the data will exist e.g. RF=3. If unsupplied" " this will be deduced from durability and consistency desires", ), ( "max_regional_size", # Twice the size of our largest cluster "int = 240", "What is the maximum size of a cluster in this region", ), ( "max_local_disk_gib", # Nodes larger than 4 TiB are painful to recover "int = 4096", "The maximum amount of data we store per machine", ), ( "max_rps_to_disk", "int = 1000", "How many disk IOs should be allowed to hit disk per instance", ), ) @staticmethod def default_desires(user_desires, extra_model_arguments: Dict[str, Any]): acceptable_consistency = set( ( AccessConsistency.best_effort, AccessConsistency.eventual, AccessConsistency.never, None, ) ) for key, value in user_desires.query_pattern.access_consistency: if value.target_consistency not in acceptable_consistency: raise ValueError( f"Elasticsearch can only provide {acceptable_consistency} access." f"User asked for {key}={value}" ) # Lower RF = less write compute rf = _target_rf( user_desires, extra_model_arguments.get("copies_per_region", None) ) if rf < 3: rf_write_latency = Interval(low=0.2, mid=0.6, high=2, confidence=0.98) else: rf_write_latency = Interval(low=0.4, mid=1, high=2, confidence=0.98) if user_desires.query_pattern.access_pattern == AccessPattern.latency: return CapacityDesires( query_pattern=QueryPattern( access_pattern=AccessPattern.latency, estimated_mean_read_size_bytes=Interval( low=128, mid=4096, high=131072, confidence=0.98 ), estimated_mean_write_size_bytes=Interval( low=128, mid=4096, high=131072, confidence=0.98 ), # Elasticsearch reads and writes can take CPU time as # large cardinality search and such can be hard to predict. estimated_mean_read_latency_ms=Interval( low=1, mid=2, high=100, confidence=0.98 ), # Writes depend heavily on rf and consistency estimated_mean_write_latency_ms=rf_write_latency, # Assume point queries "Single digit millisecond SLO" read_latency_slo_ms=FixedInterval( low=1, mid=10, high=100, confidence=0.98, ), write_latency_slo_ms=FixedInterval( low=1, mid=10, high=100, confidence=0.98, ), ), # Most latency sensitive elasticsearch clusters are in the # < 100GiB range data_shape=DataShape( estimated_state_size_gib=Interval( low=10, mid=100, high=1000, confidence=0.98 ), # Netflix Elasticsearch compresses with Deflate (gzip) # by default estimated_compression_ratio=Interval( minimum_value=1.4, maximum_value=8, low=2, mid=3, high=4, confidence=0.98, ), # Elasticsearch has a 1 GiB sidecar reserved_instance_app_mem_gib=1, ), ) else: return CapacityDesires( # (FIXME): Need to pair with ES folks on the exact values query_pattern=QueryPattern( access_pattern=AccessPattern.throughput, # Bulk writes and reads are larger in general estimated_mean_read_size_bytes=Interval( low=128, mid=4096, high=131072, confidence=0.95 ), estimated_mean_write_size_bytes=Interval( low=4096, mid=16384, high=1048576, confidence=0.95 ), # (FIXME): Need es input # Elasticsearch analytics reads probably take extra time # as they are scrolling or doing complex aggregations estimated_mean_read_latency_ms=Interval( low=1, mid=20, high=100, confidence=0.98 ), # Throughput writes typically involve large bulks # which can be expensive, but it's rare for it to have a # huge tail estimated_mean_write_latency_ms=Interval( low=1, mid=10, high=100, confidence=0.98 ), # Assume scan queries "Tens of millisecond SLO" read_latency_slo_ms=FixedInterval( minimum_value=1, maximum_value=100, low=10, mid=20, high=100, confidence=0.98, ), write_latency_slo_ms=FixedInterval( minimum_value=1, maximum_value=100, low=10, mid=20, high=100, confidence=0.98, ), ), # Most throughput elasticsearch clusters are in the # < 1TiB range data_shape=DataShape( estimated_state_size_gib=Interval( low=100, mid=1000, high=10000, confidence=0.98 ), # Netflix Elasticsearch compresses with Deflate (gzip) # by default estimated_compression_ratio=Interval( minimum_value=1.4, maximum_value=8, low=2, mid=3, high=4, confidence=0.98, ), # Elasticsearch has a 1 GiB sidecar reserved_instance_app_mem_gib=1, ), ) nflx_elasticsearch_capacity_model = NflxElasticsearchCapacityModel()
if instance.cpu < 2 or instance.ram_gib < 14: return None # (FIXME): Need elasticsearch input # Right now Elasticsearch doesn't deploy to cloud drives, just adding this # here and leaving the capability to handle cloud drives for the future if instance.drive is None: return None rps = desires.query_pattern.estimated_read_per_second.mid // zones_per_region # Based on the disk latency and the read latency SLOs we adjust our # working set to keep more or less data in RAM. Faster drives need # less fronting RAM. ws_drive = instance.drive or drive working_set = working_set_from_drive_and_slo( drive_read_latency_dist=dist_for_interval(ws_drive.read_io_latency_ms), read_slo_latency_dist=dist_for_interval( desires.query_pattern.read_latency_slo_ms ), estimated_working_set=desires.data_shape.estimated_working_set_percent, # Elasticsearch has looser latency SLOs, target the 90th percentile of disk # latency to keep in RAM. target_percentile=0.90, ).mid requirement = _estimate_elasticsearch_requirement( instance=instance, desires=desires, working_set=working_set, reads_per_second=rps, zones_per_region=zones_per_region, copies_per_region=copies_per_region, max_rps_to_disk=max_rps_to_disk, ) base_mem = ( desires.data_shape.reserved_instance_app_mem_gib + desires.data_shape.reserved_instance_system_mem_gib ) cluster = compute_stateful_zone( instance=instance, drive=drive, needed_cores=int(requirement.cpu_cores.mid), needed_disk_gib=int(requirement.disk_gib.mid), needed_memory_gib=int(requirement.mem_gib.mid), needed_network_mbps=requirement.network_mbps.mid, # Assume that by provisioning enough memory we'll get # a 90% hit rate, but take into account the reads per read # from the per node dataset using leveled compaction # FIXME: I feel like this can be improved required_disk_ios=lambda x: _es_io_per_read(x) * math.ceil(0.1 * rps), # Elasticsearch requires ephemeral disks to be % full because tiered # merging can make progress as long as there is some headroom required_disk_space=lambda x: x * 1.4, max_local_disk_gib=max_local_disk_gib, # elasticsearch clusters can autobalance via shard placement cluster_size=lambda x: x, min_count=1, # Sidecars/System takes away memory from elasticsearch # Elasticsearch uses half of available system max of 32 for compressed # oops reserve_memory=lambda x: base_mem + max(32, x / 2), core_reference_ghz=requirement.core_reference_ghz, ) # Communicate to the actual provision that if we want reduced RF params = {"elasticsearch.copies": copies_per_region} _upsert_params(cluster, params) # elasticsearch clusters generally should try to stay under some total number # of nodes. Orgs do this for all kinds of reasons such as # * Security group limits. Since you must have < 500 rules if you're # ingressing public ips) # * Maintenance. If your restart script does one node at a time you want # smaller clusters so your restarts don't take months. # * NxN network issues. Sometimes smaller clusters of bigger nodes # are better for network propagation if cluster.count > (max_regional_size // zones_per_region): return None ec2_cost = zones_per_region * cluster.annual_cost cluster.cluster_type = "elasticsearch-data" clusters = Clusters( total_annual_cost=round(Decimal(ec2_cost), 2), zonal=[cluster] * zones_per_region, regional=list(), ) return CapacityPlan( requirements=Requirements(zonal=[requirement] * zones_per_region), candidate_clusters=clusters, )
identifier_body
elasticsearch.py
import logging import math from decimal import Decimal from typing import Any from typing import Dict from typing import Optional from typing import Sequence from typing import Tuple from service_capacity_modeling.interface import AccessConsistency from service_capacity_modeling.interface import AccessPattern from service_capacity_modeling.interface import CapacityDesires from service_capacity_modeling.interface import CapacityPlan from service_capacity_modeling.interface import CapacityRequirement from service_capacity_modeling.interface import certain_float from service_capacity_modeling.interface import certain_int from service_capacity_modeling.interface import Clusters from service_capacity_modeling.interface import DataShape from service_capacity_modeling.interface import Drive from service_capacity_modeling.interface import FixedInterval from service_capacity_modeling.interface import Instance from service_capacity_modeling.interface import Interval from service_capacity_modeling.interface import QueryPattern from service_capacity_modeling.interface import RegionContext from service_capacity_modeling.interface import Requirements from service_capacity_modeling.models import CapacityModel from service_capacity_modeling.models.common import compute_stateful_zone from service_capacity_modeling.models.common import simple_network_mbps from service_capacity_modeling.models.common import sqrt_staffed_cores from service_capacity_modeling.models.common import working_set_from_drive_and_slo from service_capacity_modeling.stats import dist_for_interval logger = logging.getLogger(__name__) def _target_rf(desires: CapacityDesires, user_copies: Optional[int]) -> int: if user_copies is not None: assert user_copies > 1 return user_copies # Due to the relaxed durability and consistency requirements we can # run with RF=2 if desires.data_shape.durability_slo_order.mid < 1000: return 2 return 3 # Looks like Elasticsearch (Lucene) uses a tiered merge strategy of 10 # segments of 512 megs per # https://lucene.apache.org/core/8_1_0/core/org/apache/lucene/index/TieredMergePolicy.html#setSegmentsPerTier(double) # (FIXME) Verify what elastic merge actually does def _es_io_per_read(node_size_gib, segment_size_mb=512): size_mib = node_size_gib * 1024 segments = max(1, size_mib // segment_size_mb) # 10 segments per tier, plus 1 for L0 (avg) levels = 1 + int(math.ceil(math.log(segments, 10))) return levels def _estimate_elasticsearch_requirement( instance: Instance, desires: CapacityDesires, working_set: float, reads_per_second: float, max_rps_to_disk: int, zones_per_region: int = 3, copies_per_region: int = 3, ) -> CapacityRequirement: """Estimate the capacity required for one zone given a regional desire The input desires should be the **regional** desire, and this function will return the zonal capacity requirement """ # Keep half of the cores free for background work (merging mostly) needed_cores = math.ceil(sqrt_staffed_cores(desires) * 1.5) # Keep half of the bandwidth available for backup needed_network_mbps = simple_network_mbps(desires) * 2 needed_disk = math.ceil( (1.0 / desires.data_shape.estimated_compression_ratio.mid) * desires.data_shape.estimated_state_size_gib.mid * copies_per_region, ) # Rough estimate of how many instances we would need just for the the CPU # Note that this is a lower bound, we might end up with more. needed_cores = math.ceil( max(1, needed_cores // (instance.cpu_ghz / desires.core_reference_ghz)) ) rough_count = math.ceil(needed_cores / instance.cpu) # Generally speaking we want fewer than some number of reads per second # hitting disk per instance. If we don't have many reads we don't need to # hold much data in memory. instance_rps = max(1, reads_per_second // rough_count) disk_rps = instance_rps * _es_io_per_read(max(1, needed_disk // rough_count)) rps_working_set = min(1.0, disk_rps / max_rps_to_disk) # If disk RPS will be smaller than our target because there are no # reads, we don't need to hold as much data in memory needed_memory = min(working_set, rps_working_set) * needed_disk # Now convert to per zone needed_cores = needed_cores // zones_per_region needed_disk = needed_disk // zones_per_region needed_memory = int(needed_memory // zones_per_region) logger.debug( "Need (cpu, mem, disk, working) = (%s, %s, %s, %f)", needed_cores, needed_memory, needed_disk, working_set, ) return CapacityRequirement( requirement_type="elasticsearch-data-zonal", core_reference_ghz=desires.core_reference_ghz, cpu_cores=certain_int(needed_cores), mem_gib=certain_float(needed_memory), disk_gib=certain_float(needed_disk), network_mbps=certain_float(needed_network_mbps), context={ "working_set": min(working_set, rps_working_set), "rps_working_set": rps_working_set, "disk_slo_working_set": working_set, "replication_factor": copies_per_region, "compression_ratio": round( 1.0 / desires.data_shape.estimated_compression_ratio.mid, 2 ), "read_per_second": reads_per_second, }, ) def _upsert_params(cluster, params): if cluster.cluster_params:
else: cluster.cluster_params = params # pylint: disable=too-many-locals def _estimate_elasticsearch_cluster_zonal( instance: Instance, drive: Drive, desires: CapacityDesires, zones_per_region: int = 3, copies_per_region: int = 3, max_local_disk_gib: int = 4096, max_regional_size: int = 240, max_rps_to_disk: int = 500, ) -> Optional[CapacityPlan]: # Netflix Elasticsearch doesn't like to deploy on really small instances if instance.cpu < 2 or instance.ram_gib < 14: return None # (FIXME): Need elasticsearch input # Right now Elasticsearch doesn't deploy to cloud drives, just adding this # here and leaving the capability to handle cloud drives for the future if instance.drive is None: return None rps = desires.query_pattern.estimated_read_per_second.mid // zones_per_region # Based on the disk latency and the read latency SLOs we adjust our # working set to keep more or less data in RAM. Faster drives need # less fronting RAM. ws_drive = instance.drive or drive working_set = working_set_from_drive_and_slo( drive_read_latency_dist=dist_for_interval(ws_drive.read_io_latency_ms), read_slo_latency_dist=dist_for_interval( desires.query_pattern.read_latency_slo_ms ), estimated_working_set=desires.data_shape.estimated_working_set_percent, # Elasticsearch has looser latency SLOs, target the 90th percentile of disk # latency to keep in RAM. target_percentile=0.90, ).mid requirement = _estimate_elasticsearch_requirement( instance=instance, desires=desires, working_set=working_set, reads_per_second=rps, zones_per_region=zones_per_region, copies_per_region=copies_per_region, max_rps_to_disk=max_rps_to_disk, ) base_mem = ( desires.data_shape.reserved_instance_app_mem_gib + desires.data_shape.reserved_instance_system_mem_gib ) cluster = compute_stateful_zone( instance=instance, drive=drive, needed_cores=int(requirement.cpu_cores.mid), needed_disk_gib=int(requirement.disk_gib.mid), needed_memory_gib=int(requirement.mem_gib.mid), needed_network_mbps=requirement.network_mbps.mid, # Assume that by provisioning enough memory we'll get # a 90% hit rate, but take into account the reads per read # from the per node dataset using leveled compaction # FIXME: I feel like this can be improved required_disk_ios=lambda x: _es_io_per_read(x) * math.ceil(0.1 * rps), # Elasticsearch requires ephemeral disks to be % full because tiered # merging can make progress as long as there is some headroom required_disk_space=lambda x: x * 1.4, max_local_disk_gib=max_local_disk_gib, # elasticsearch clusters can autobalance via shard placement cluster_size=lambda x: x, min_count=1, # Sidecars/System takes away memory from elasticsearch # Elasticsearch uses half of available system max of 32 for compressed # oops reserve_memory=lambda x: base_mem + max(32, x / 2), core_reference_ghz=requirement.core_reference_ghz, ) # Communicate to the actual provision that if we want reduced RF params = {"elasticsearch.copies": copies_per_region} _upsert_params(cluster, params) # elasticsearch clusters generally should try to stay under some total number # of nodes. Orgs do this for all kinds of reasons such as # * Security group limits. Since you must have < 500 rules if you're # ingressing public ips) # * Maintenance. If your restart script does one node at a time you want # smaller clusters so your restarts don't take months. # * NxN network issues. Sometimes smaller clusters of bigger nodes # are better for network propagation if cluster.count > (max_regional_size // zones_per_region): return None ec2_cost = zones_per_region * cluster.annual_cost cluster.cluster_type = "elasticsearch-data" clusters = Clusters( total_annual_cost=round(Decimal(ec2_cost), 2), zonal=[cluster] * zones_per_region, regional=list(), ) return CapacityPlan( requirements=Requirements(zonal=[requirement] * zones_per_region), candidate_clusters=clusters, ) class NflxElasticsearchCapacityModel(CapacityModel): @staticmethod def capacity_plan( instance: Instance, drive: Drive, context: RegionContext, desires: CapacityDesires, extra_model_arguments: Dict[str, Any], ) -> Optional[CapacityPlan]: # (FIXME): Need elasticsearch input # TODO: Use durability requirements to compute RF. copies_per_region: int = _target_rf( desires, extra_model_arguments.get("copies_per_region", None) ) max_regional_size: int = extra_model_arguments.get("max_regional_size", 240) max_rps_to_disk: int = extra_model_arguments.get("max_rps_to_disk", 1000) # Very large nodes are hard to recover max_local_disk_gib: int = extra_model_arguments.get("max_local_disk_gib", 5000) return _estimate_elasticsearch_cluster_zonal( instance=instance, drive=drive, desires=desires, zones_per_region=context.zones_in_region, copies_per_region=copies_per_region, max_regional_size=max_regional_size, max_local_disk_gib=max_local_disk_gib, max_rps_to_disk=max_rps_to_disk, ) @staticmethod def description(): return "Netflix Streaming Elasticsearch Model" @staticmethod def extra_model_arguments() -> Sequence[Tuple[str, str, str]]: return ( ( "copies_per_region", "int = 3", "How many copies of the data will exist e.g. RF=3. If unsupplied" " this will be deduced from durability and consistency desires", ), ( "max_regional_size", # Twice the size of our largest cluster "int = 240", "What is the maximum size of a cluster in this region", ), ( "max_local_disk_gib", # Nodes larger than 4 TiB are painful to recover "int = 4096", "The maximum amount of data we store per machine", ), ( "max_rps_to_disk", "int = 1000", "How many disk IOs should be allowed to hit disk per instance", ), ) @staticmethod def default_desires(user_desires, extra_model_arguments: Dict[str, Any]): acceptable_consistency = set( ( AccessConsistency.best_effort, AccessConsistency.eventual, AccessConsistency.never, None, ) ) for key, value in user_desires.query_pattern.access_consistency: if value.target_consistency not in acceptable_consistency: raise ValueError( f"Elasticsearch can only provide {acceptable_consistency} access." f"User asked for {key}={value}" ) # Lower RF = less write compute rf = _target_rf( user_desires, extra_model_arguments.get("copies_per_region", None) ) if rf < 3: rf_write_latency = Interval(low=0.2, mid=0.6, high=2, confidence=0.98) else: rf_write_latency = Interval(low=0.4, mid=1, high=2, confidence=0.98) if user_desires.query_pattern.access_pattern == AccessPattern.latency: return CapacityDesires( query_pattern=QueryPattern( access_pattern=AccessPattern.latency, estimated_mean_read_size_bytes=Interval( low=128, mid=4096, high=131072, confidence=0.98 ), estimated_mean_write_size_bytes=Interval( low=128, mid=4096, high=131072, confidence=0.98 ), # Elasticsearch reads and writes can take CPU time as # large cardinality search and such can be hard to predict. estimated_mean_read_latency_ms=Interval( low=1, mid=2, high=100, confidence=0.98 ), # Writes depend heavily on rf and consistency estimated_mean_write_latency_ms=rf_write_latency, # Assume point queries "Single digit millisecond SLO" read_latency_slo_ms=FixedInterval( low=1, mid=10, high=100, confidence=0.98, ), write_latency_slo_ms=FixedInterval( low=1, mid=10, high=100, confidence=0.98, ), ), # Most latency sensitive elasticsearch clusters are in the # < 100GiB range data_shape=DataShape( estimated_state_size_gib=Interval( low=10, mid=100, high=1000, confidence=0.98 ), # Netflix Elasticsearch compresses with Deflate (gzip) # by default estimated_compression_ratio=Interval( minimum_value=1.4, maximum_value=8, low=2, mid=3, high=4, confidence=0.98, ), # Elasticsearch has a 1 GiB sidecar reserved_instance_app_mem_gib=1, ), ) else: return CapacityDesires( # (FIXME): Need to pair with ES folks on the exact values query_pattern=QueryPattern( access_pattern=AccessPattern.throughput, # Bulk writes and reads are larger in general estimated_mean_read_size_bytes=Interval( low=128, mid=4096, high=131072, confidence=0.95 ), estimated_mean_write_size_bytes=Interval( low=4096, mid=16384, high=1048576, confidence=0.95 ), # (FIXME): Need es input # Elasticsearch analytics reads probably take extra time # as they are scrolling or doing complex aggregations estimated_mean_read_latency_ms=Interval( low=1, mid=20, high=100, confidence=0.98 ), # Throughput writes typically involve large bulks # which can be expensive, but it's rare for it to have a # huge tail estimated_mean_write_latency_ms=Interval( low=1, mid=10, high=100, confidence=0.98 ), # Assume scan queries "Tens of millisecond SLO" read_latency_slo_ms=FixedInterval( minimum_value=1, maximum_value=100, low=10, mid=20, high=100, confidence=0.98, ), write_latency_slo_ms=FixedInterval( minimum_value=1, maximum_value=100, low=10, mid=20, high=100, confidence=0.98, ), ), # Most throughput elasticsearch clusters are in the # < 1TiB range data_shape=DataShape( estimated_state_size_gib=Interval( low=100, mid=1000, high=10000, confidence=0.98 ), # Netflix Elasticsearch compresses with Deflate (gzip) # by default estimated_compression_ratio=Interval( minimum_value=1.4, maximum_value=8, low=2, mid=3, high=4, confidence=0.98, ), # Elasticsearch has a 1 GiB sidecar reserved_instance_app_mem_gib=1, ), ) nflx_elasticsearch_capacity_model = NflxElasticsearchCapacityModel()
cluster.cluster_params.update(params)
conditional_block
elasticsearch.py
import logging import math from decimal import Decimal from typing import Any from typing import Dict from typing import Optional from typing import Sequence from typing import Tuple from service_capacity_modeling.interface import AccessConsistency from service_capacity_modeling.interface import AccessPattern from service_capacity_modeling.interface import CapacityDesires from service_capacity_modeling.interface import CapacityPlan from service_capacity_modeling.interface import CapacityRequirement from service_capacity_modeling.interface import certain_float from service_capacity_modeling.interface import certain_int from service_capacity_modeling.interface import Clusters from service_capacity_modeling.interface import DataShape from service_capacity_modeling.interface import Drive from service_capacity_modeling.interface import FixedInterval from service_capacity_modeling.interface import Instance from service_capacity_modeling.interface import Interval from service_capacity_modeling.interface import QueryPattern from service_capacity_modeling.interface import RegionContext from service_capacity_modeling.interface import Requirements from service_capacity_modeling.models import CapacityModel from service_capacity_modeling.models.common import compute_stateful_zone from service_capacity_modeling.models.common import simple_network_mbps from service_capacity_modeling.models.common import sqrt_staffed_cores from service_capacity_modeling.models.common import working_set_from_drive_and_slo from service_capacity_modeling.stats import dist_for_interval logger = logging.getLogger(__name__) def _target_rf(desires: CapacityDesires, user_copies: Optional[int]) -> int: if user_copies is not None: assert user_copies > 1 return user_copies # Due to the relaxed durability and consistency requirements we can # run with RF=2 if desires.data_shape.durability_slo_order.mid < 1000: return 2 return 3 # Looks like Elasticsearch (Lucene) uses a tiered merge strategy of 10 # segments of 512 megs per # https://lucene.apache.org/core/8_1_0/core/org/apache/lucene/index/TieredMergePolicy.html#setSegmentsPerTier(double) # (FIXME) Verify what elastic merge actually does def _es_io_per_read(node_size_gib, segment_size_mb=512): size_mib = node_size_gib * 1024 segments = max(1, size_mib // segment_size_mb) # 10 segments per tier, plus 1 for L0 (avg) levels = 1 + int(math.ceil(math.log(segments, 10))) return levels def _estimate_elasticsearch_requirement( instance: Instance, desires: CapacityDesires, working_set: float, reads_per_second: float, max_rps_to_disk: int, zones_per_region: int = 3, copies_per_region: int = 3, ) -> CapacityRequirement: """Estimate the capacity required for one zone given a regional desire The input desires should be the **regional** desire, and this function will return the zonal capacity requirement """ # Keep half of the cores free for background work (merging mostly) needed_cores = math.ceil(sqrt_staffed_cores(desires) * 1.5) # Keep half of the bandwidth available for backup needed_network_mbps = simple_network_mbps(desires) * 2 needed_disk = math.ceil( (1.0 / desires.data_shape.estimated_compression_ratio.mid) * desires.data_shape.estimated_state_size_gib.mid * copies_per_region, ) # Rough estimate of how many instances we would need just for the the CPU # Note that this is a lower bound, we might end up with more. needed_cores = math.ceil( max(1, needed_cores // (instance.cpu_ghz / desires.core_reference_ghz)) ) rough_count = math.ceil(needed_cores / instance.cpu) # Generally speaking we want fewer than some number of reads per second # hitting disk per instance. If we don't have many reads we don't need to # hold much data in memory. instance_rps = max(1, reads_per_second // rough_count) disk_rps = instance_rps * _es_io_per_read(max(1, needed_disk // rough_count)) rps_working_set = min(1.0, disk_rps / max_rps_to_disk) # If disk RPS will be smaller than our target because there are no # reads, we don't need to hold as much data in memory needed_memory = min(working_set, rps_working_set) * needed_disk # Now convert to per zone needed_cores = needed_cores // zones_per_region needed_disk = needed_disk // zones_per_region needed_memory = int(needed_memory // zones_per_region) logger.debug( "Need (cpu, mem, disk, working) = (%s, %s, %s, %f)", needed_cores, needed_memory, needed_disk, working_set, ) return CapacityRequirement( requirement_type="elasticsearch-data-zonal", core_reference_ghz=desires.core_reference_ghz, cpu_cores=certain_int(needed_cores), mem_gib=certain_float(needed_memory), disk_gib=certain_float(needed_disk), network_mbps=certain_float(needed_network_mbps), context={ "working_set": min(working_set, rps_working_set), "rps_working_set": rps_working_set, "disk_slo_working_set": working_set, "replication_factor": copies_per_region, "compression_ratio": round( 1.0 / desires.data_shape.estimated_compression_ratio.mid, 2 ), "read_per_second": reads_per_second, }, ) def _upsert_params(cluster, params): if cluster.cluster_params: cluster.cluster_params.update(params) else: cluster.cluster_params = params # pylint: disable=too-many-locals def _estimate_elasticsearch_cluster_zonal( instance: Instance, drive: Drive, desires: CapacityDesires, zones_per_region: int = 3, copies_per_region: int = 3, max_local_disk_gib: int = 4096, max_regional_size: int = 240, max_rps_to_disk: int = 500, ) -> Optional[CapacityPlan]: # Netflix Elasticsearch doesn't like to deploy on really small instances if instance.cpu < 2 or instance.ram_gib < 14: return None # (FIXME): Need elasticsearch input # Right now Elasticsearch doesn't deploy to cloud drives, just adding this # here and leaving the capability to handle cloud drives for the future if instance.drive is None: return None rps = desires.query_pattern.estimated_read_per_second.mid // zones_per_region # Based on the disk latency and the read latency SLOs we adjust our # working set to keep more or less data in RAM. Faster drives need # less fronting RAM. ws_drive = instance.drive or drive working_set = working_set_from_drive_and_slo( drive_read_latency_dist=dist_for_interval(ws_drive.read_io_latency_ms), read_slo_latency_dist=dist_for_interval( desires.query_pattern.read_latency_slo_ms ), estimated_working_set=desires.data_shape.estimated_working_set_percent, # Elasticsearch has looser latency SLOs, target the 90th percentile of disk # latency to keep in RAM. target_percentile=0.90, ).mid requirement = _estimate_elasticsearch_requirement( instance=instance, desires=desires, working_set=working_set, reads_per_second=rps, zones_per_region=zones_per_region, copies_per_region=copies_per_region, max_rps_to_disk=max_rps_to_disk, ) base_mem = ( desires.data_shape.reserved_instance_app_mem_gib + desires.data_shape.reserved_instance_system_mem_gib ) cluster = compute_stateful_zone( instance=instance, drive=drive, needed_cores=int(requirement.cpu_cores.mid), needed_disk_gib=int(requirement.disk_gib.mid), needed_memory_gib=int(requirement.mem_gib.mid), needed_network_mbps=requirement.network_mbps.mid, # Assume that by provisioning enough memory we'll get # a 90% hit rate, but take into account the reads per read # from the per node dataset using leveled compaction # FIXME: I feel like this can be improved required_disk_ios=lambda x: _es_io_per_read(x) * math.ceil(0.1 * rps), # Elasticsearch requires ephemeral disks to be % full because tiered # merging can make progress as long as there is some headroom required_disk_space=lambda x: x * 1.4, max_local_disk_gib=max_local_disk_gib, # elasticsearch clusters can autobalance via shard placement cluster_size=lambda x: x, min_count=1, # Sidecars/System takes away memory from elasticsearch # Elasticsearch uses half of available system max of 32 for compressed # oops reserve_memory=lambda x: base_mem + max(32, x / 2), core_reference_ghz=requirement.core_reference_ghz, ) # Communicate to the actual provision that if we want reduced RF params = {"elasticsearch.copies": copies_per_region} _upsert_params(cluster, params) # elasticsearch clusters generally should try to stay under some total number # of nodes. Orgs do this for all kinds of reasons such as # * Security group limits. Since you must have < 500 rules if you're # ingressing public ips) # * Maintenance. If your restart script does one node at a time you want # smaller clusters so your restarts don't take months. # * NxN network issues. Sometimes smaller clusters of bigger nodes # are better for network propagation if cluster.count > (max_regional_size // zones_per_region): return None ec2_cost = zones_per_region * cluster.annual_cost cluster.cluster_type = "elasticsearch-data" clusters = Clusters( total_annual_cost=round(Decimal(ec2_cost), 2), zonal=[cluster] * zones_per_region, regional=list(), ) return CapacityPlan( requirements=Requirements(zonal=[requirement] * zones_per_region), candidate_clusters=clusters, ) class
(CapacityModel): @staticmethod def capacity_plan( instance: Instance, drive: Drive, context: RegionContext, desires: CapacityDesires, extra_model_arguments: Dict[str, Any], ) -> Optional[CapacityPlan]: # (FIXME): Need elasticsearch input # TODO: Use durability requirements to compute RF. copies_per_region: int = _target_rf( desires, extra_model_arguments.get("copies_per_region", None) ) max_regional_size: int = extra_model_arguments.get("max_regional_size", 240) max_rps_to_disk: int = extra_model_arguments.get("max_rps_to_disk", 1000) # Very large nodes are hard to recover max_local_disk_gib: int = extra_model_arguments.get("max_local_disk_gib", 5000) return _estimate_elasticsearch_cluster_zonal( instance=instance, drive=drive, desires=desires, zones_per_region=context.zones_in_region, copies_per_region=copies_per_region, max_regional_size=max_regional_size, max_local_disk_gib=max_local_disk_gib, max_rps_to_disk=max_rps_to_disk, ) @staticmethod def description(): return "Netflix Streaming Elasticsearch Model" @staticmethod def extra_model_arguments() -> Sequence[Tuple[str, str, str]]: return ( ( "copies_per_region", "int = 3", "How many copies of the data will exist e.g. RF=3. If unsupplied" " this will be deduced from durability and consistency desires", ), ( "max_regional_size", # Twice the size of our largest cluster "int = 240", "What is the maximum size of a cluster in this region", ), ( "max_local_disk_gib", # Nodes larger than 4 TiB are painful to recover "int = 4096", "The maximum amount of data we store per machine", ), ( "max_rps_to_disk", "int = 1000", "How many disk IOs should be allowed to hit disk per instance", ), ) @staticmethod def default_desires(user_desires, extra_model_arguments: Dict[str, Any]): acceptable_consistency = set( ( AccessConsistency.best_effort, AccessConsistency.eventual, AccessConsistency.never, None, ) ) for key, value in user_desires.query_pattern.access_consistency: if value.target_consistency not in acceptable_consistency: raise ValueError( f"Elasticsearch can only provide {acceptable_consistency} access." f"User asked for {key}={value}" ) # Lower RF = less write compute rf = _target_rf( user_desires, extra_model_arguments.get("copies_per_region", None) ) if rf < 3: rf_write_latency = Interval(low=0.2, mid=0.6, high=2, confidence=0.98) else: rf_write_latency = Interval(low=0.4, mid=1, high=2, confidence=0.98) if user_desires.query_pattern.access_pattern == AccessPattern.latency: return CapacityDesires( query_pattern=QueryPattern( access_pattern=AccessPattern.latency, estimated_mean_read_size_bytes=Interval( low=128, mid=4096, high=131072, confidence=0.98 ), estimated_mean_write_size_bytes=Interval( low=128, mid=4096, high=131072, confidence=0.98 ), # Elasticsearch reads and writes can take CPU time as # large cardinality search and such can be hard to predict. estimated_mean_read_latency_ms=Interval( low=1, mid=2, high=100, confidence=0.98 ), # Writes depend heavily on rf and consistency estimated_mean_write_latency_ms=rf_write_latency, # Assume point queries "Single digit millisecond SLO" read_latency_slo_ms=FixedInterval( low=1, mid=10, high=100, confidence=0.98, ), write_latency_slo_ms=FixedInterval( low=1, mid=10, high=100, confidence=0.98, ), ), # Most latency sensitive elasticsearch clusters are in the # < 100GiB range data_shape=DataShape( estimated_state_size_gib=Interval( low=10, mid=100, high=1000, confidence=0.98 ), # Netflix Elasticsearch compresses with Deflate (gzip) # by default estimated_compression_ratio=Interval( minimum_value=1.4, maximum_value=8, low=2, mid=3, high=4, confidence=0.98, ), # Elasticsearch has a 1 GiB sidecar reserved_instance_app_mem_gib=1, ), ) else: return CapacityDesires( # (FIXME): Need to pair with ES folks on the exact values query_pattern=QueryPattern( access_pattern=AccessPattern.throughput, # Bulk writes and reads are larger in general estimated_mean_read_size_bytes=Interval( low=128, mid=4096, high=131072, confidence=0.95 ), estimated_mean_write_size_bytes=Interval( low=4096, mid=16384, high=1048576, confidence=0.95 ), # (FIXME): Need es input # Elasticsearch analytics reads probably take extra time # as they are scrolling or doing complex aggregations estimated_mean_read_latency_ms=Interval( low=1, mid=20, high=100, confidence=0.98 ), # Throughput writes typically involve large bulks # which can be expensive, but it's rare for it to have a # huge tail estimated_mean_write_latency_ms=Interval( low=1, mid=10, high=100, confidence=0.98 ), # Assume scan queries "Tens of millisecond SLO" read_latency_slo_ms=FixedInterval( minimum_value=1, maximum_value=100, low=10, mid=20, high=100, confidence=0.98, ), write_latency_slo_ms=FixedInterval( minimum_value=1, maximum_value=100, low=10, mid=20, high=100, confidence=0.98, ), ), # Most throughput elasticsearch clusters are in the # < 1TiB range data_shape=DataShape( estimated_state_size_gib=Interval( low=100, mid=1000, high=10000, confidence=0.98 ), # Netflix Elasticsearch compresses with Deflate (gzip) # by default estimated_compression_ratio=Interval( minimum_value=1.4, maximum_value=8, low=2, mid=3, high=4, confidence=0.98, ), # Elasticsearch has a 1 GiB sidecar reserved_instance_app_mem_gib=1, ), ) nflx_elasticsearch_capacity_model = NflxElasticsearchCapacityModel()
NflxElasticsearchCapacityModel
identifier_name
script.js
function tree() { var data, root, treemap, svg, i = 0, duration = 650, // margin = {top: 20, right: 10, bottom: 20, left: 50}, margin = {top: 0, right: 0, bottom: 80, left: 50}, width = 960 - 4 - margin.left - margin.right, // fitting in block frame height = 800 - 4 - margin.top - margin.bottom, // fitting in block frame width_multiplier = 180, height_extra_space = 0; // update; function chart(selection) { selection.each(function() { height = height - margin.top - margin.bottom; width = width - margin.left - margin.right; // append the svg object to the selection svg = selection.append('svg') .attr('width', width + margin.left + margin.right) .attr('height', height + margin.top + margin.bottom) .append('g') .attr('transform', 'translate(' + margin.left + ',' + margin.top + ')'); // declares a tree layout and assigns the size of the tree treemap = d3.tree().size([height, width]); // assign parent, children, height, depth root = d3.hierarchy(data, function(d) { return d.children }); root.x0 = (height / 2); // left edge of the rectangle root.y0 = 0; // top edge of the triangle // collapse after the second level root.children.forEach(collapse); update(root); function collapse(d) { if (d.children) { d._children = d.children; d._children.forEach(collapse); d.children = null; } } }); } chart.width = function(value) { if (!arguments.length) return width; width = value; return chart; }; chart.height = function(value) { if (!arguments.length) return height; height = value; return chart; }; chart.margin = function(value) { if (!arguments.length) return margin; margin = value; return chart; }; chart.data = function(value) { if (!arguments.length) return data; data = value; if (typeof updateData === 'function') updateData(); return chart; }; chart.expandTree = function(value) { root.children.forEach(expand); update(root); function expand(d) { if (d._children) { d.children = d._children; d.children.forEach(expand); d._children = null; } } }; // collapse the node and all it's children chart.collapse = function(d) { if (d.children) { d._children = d.children; d._children.forEach(collapse); d.children = null; } } chart.collapseTree = function(value) { root.children.forEach(collapse); update(root); function collapse(d) { if (d.children) { d._children = d.children; d._children.forEach(collapse); d.children = null; } } }; function update(source) { // assigns the x and y position for the nodes var treeData = treemap(root); // compute the new tree layout var nodes = treeData.descendants(), links = treeData.descendants().slice(1); // normalise for fixed depth nodes.forEach(function(d) { // d.x = d.depth * 180; d.y = d.depth * width_multiplier; // d.x = d.depth * 180; }); // ****************** Nodes section *************************** // update the nodes ... var nodeArray = svg.selectAll('g.node') .data(nodes, function(d) { return d.id || (d.id = ++i); }); // console.log(nodeArray); // Enter any new modes at the parent's previous position. var nodeEnter = nodeArray.enter().append('g') .attr('class', 'node') .attr('transform', function(d) { return 'translate(' + (source.y0 + margin.top) + ',' + (source.x0 + margin.left) + ')'; // return 'translate(' + (source.y0) + ',' + (source.x0) + ')'; }) .on('click', click); // Add circle for the nodes, which is filled lightsteelblue for nodes that have hidden children (_children). nodeEnter.append('circle') .attr('class', 'node') .attr('r', 1e-6) .style('fill', function(d) { return d._children ? 'lightsteelblue' : '#fff'; }); // Append the node label (data.name), either to the left or right of the node circle, depending on whether the node has children. nodeEnter.append("text") .attr("x", function(d) { return d.children || d._children ? -15 : 15; }) .attr("dy", ".35em") .attr("style", "node") .attr("text-anchor", function(d) { return d.children || d._children ? "end" : "start"; }) .text(function(d) { // console.log(d); // return (d.children || d._children) ? d.data.name.capitalize() : d.data.name;}) return d.data.name;}); // .style("fill-opacity", 1e-6); // Add the number of children inside the node circle, whether they are unfolded or not. nodeEnter.append('text') .attr('x', 0) .attr('y', 3) .attr("text-anchor", "middle") .attr('cursor', 'pointer') .style('font-size', '10px') .text(function(d) { if (d.children) return d.children.length; else if (d._children) return d._children.length; }); // UPDATE var nodeUpdate = nodeEnter.merge(nodeArray); // Transition the resulting array to the proper position for the node. nodeUpdate.transition().duration(duration) .attr('transform', function(d) { return 'translate(' + (d.y + margin.top) + ',' + (d.x + margin.left) + ')'; }); // Update the node attributes and style, coloring search results red. nodeUpdate.select('circle.node') .attr('r', 9) .style("fill", function(d) { if(d.data.class === "found") { return "#ff4136"; //red
return "lightsteelblue"; } }) .attr('cursor', 'pointer') .style("stroke", function(d) { if (d.data.class === "found") { return "#ff4136"; //red } ; }) // Remove any exiting nodes var nodeExit = nodeArray.exit() .transition().duration(duration) .attr('transform', function(d) { return 'translate(' + (source.y + margin.top) + ',' + (source.x + margin.left) + ')'; }) .remove(); // on exit reduce the node circles size to 0 nodeExit.select('circle') .attr('r', 1e-6); // on exit reduce the opacity of text labels nodeExit.select('text') .style('fill-opacity', 1e-6); // adding zoom and panning d3.select("svg").call(d3.zoom().on("zoom", function() { svg.attr("transform", d3.event.transform) })); // trying to invert the direction of the zoom wheel // .on("wheel", function(d){ // var direction = d3.event.wheelDelta < 0 ? 'down' : 'up'; // zoom(direction === 'up' ? d : d.parent); // }); // ****************** links section *************************** // update the links var link = svg.selectAll('path.link').data(links, function(d) { return d.id }); // enter any new links at the parent's previous position var linkEnter = link.enter().insert('path', 'g') .attr('class', 'link') .attr('d', function(d) { var o = {x: source.x0, y: source.y0}; return diagonal(o, o); }); // UPDATE var linkUpdate = linkEnter.merge(link); // transition back to the parent element position linkUpdate.transition().duration(duration) .attr('d', function(d) { return diagonal(d, d.parent); }) .style("stroke",function(d) { if(d.data.class==="found") { return "#ff4136"; } }); // remove any exiting links var linkExit = link.exit() .transition().duration(duration) .attr('d', function(d) { var o = {x: source.x, y: source.y}; return diagonal(o, o); }) .remove(); // store the old positions for transition nodes.forEach(function(d) { d.x0 = d.x; d.y0 = d.y; }); // creates a curved (diagonal) path from parent to the child nodes function diagonal(s, d) { var path = 'M ' + (s.y + margin.top) + ' ' + (s.x + margin.left) + 'C ' + ((s.y + d.y + (margin.top * 2)) / 2) + ' ' + (s.x + margin.left) + ', ' + ((s.y + d.y + (margin.top * 2)) / 2) + ' ' + (d.x + margin.left) + ', ' + (d.y + margin.top) + ' ' + (d.x + margin.left); return path; } // toggle children on click function click(d) { toggleChildren(d); printNodeInfo(d); } function toggleChildren(d) { if (d.children) { d._children = d.children; d.children = null; } else { d.children = d._children; d._children = null; } update(d); } } chart.updateWidth = function(value) { width_multiplier = value; update(data); } chart.updateHeight = function(value) { height_extra_space = value; update(data); } String.prototype.capitalize = function() { return this.charAt(0).toUpperCase() + this.slice(1).toLowerCase(); }; function zoom() { var scale = d3.event.scale, translation = d3.event.translate, tbound = -h * scale, bbound = h * scale, lbound = (-w + m[1]) * scale, rbound = (w - m[3]) * scale; // limit translation to thresholds translation = [ Math.max(Math.min(translation[0], rbound), lbound), Math.max(Math.min(translation[1], bbound), tbound) ]; d3.select(".drawarea") .attr("transform", "translate(" + translation + ")" + " scale(" + scale + ")"); } chart.openPaths = function(paths) { for(var i=0; i<paths.length; i++) { if(paths[i].id !== "1") //i.e. not root { paths[i].class = 'found'; console.log("right after setting class to 'found' "); if(paths[i]._children) { //if children are hidden: open them, otherwise: don't do anything paths[i].children = paths[i]._children; paths[i]._children = null; } else if(paths[i].children) { console.log("There are children here, tralalalala"); } else { console.log("For some reason, I don't discover hidden children"); } update(paths[i]); } } } chart.getCurrentRoot = function() { return root; } chart.centerNode = function(d) { // if (active.node() === this) return reset(); // active.classed("active", false); // active = d3.select(this).classed("active", true); // var bounds = path.bounds(d), // dx = bounds[1][0] - bounds[0][0], // dy = bounds[1][1] - bounds[0][1], // x = (bounds[0][0] + bounds[1][0]) / 2, // y = (bounds[0][1] + bounds[1][1]) / 2, // scale = Math.max(1, Math.min(8, 0.9 / Math.max(dx / width, dy / height))), // translate = [width / 2 - scale * x, height / 2 - scale * y]; svg.transition().duration(duration) // .call(zoom.translate(translate).scale(scale).event); // not in d3 v4 .call(zoom.transform, d3.zoomIdentity.translate(translate[0],translate[1]).scale(scale) ); // updated for d3 v4 } // chart.centerNode = function() // { // scale = zoomListener.scale(); // x = -source.y0; // y = -source.x0; // x = x * scale + viewerWidth / 2; // y = y * scale + viewerHeight / 2; // d3.select('g').transition() // .duration(duration) // .attr("transform", "translate(" + x + "," + y + ")scale(" + scale + ")"); // zoomListener.scale(scale); // zoomListener.translate([x, y]); // } return chart; } function printNodeInfo(d) { var location = document.getElementById("infoField"); location.innerHTML = "&ensp;name: " + d.data.name+"<br/>" + "&ensp;value: " + d.data.value; } function loadOtherTree(value) { // remove svg d3.select("svg").remove(); // build new tree object myTree = tree().height(height).width(width); // load new data d3.json(value, function(error, data) { if (error) throw error; root = data; console.log(root); root.x0 = height / 2; root.y0 = 0; myTree.data(root); // put new chart in place d3.select('#chart').call(myTree); }); } function reloadInitialTree() { loadOtherTree("flare.json"); // $("#search").object.text = ""; } function centerTree() { alert("To be implemented."); } // from: https://css-tricks.com/snippets/javascript/get-url-variables/ function getQueryVariable(variable) { var query = window.location.search.substring(1); var vars = query.split("&"); for (var i=0;i<vars.length;i++) { var pair = vars[i].split("="); if(pair[0] == variable){return pair[1];} } return(false); } function appendAxes() { var axis_data_x = [-100, 800], axis_data_y = [-100, 600]; console.log(d3.select("#chart")); var svg = d3.select("#chart") .append("svg") .attr("width", width) .attr("height", height); var xscale = d3.scaleLinear() .domain([0, d3.max(axis_data_x)]) .range([0, width - 100]); var yscale = d3.scaleLinear() .domain([0, d3.max(axis_data_y)]) .range([height/2, 0]); var x_axis = d3.axisBottom() .scale(xscale); var y_axis = d3.axisLeft() .scale(yscale); svg.append("g") .attr("transform", "translate(50, 10)") .call(y_axis); var xAxisTranslate = height/2 + 10; svg.append("g") .attr("transform", "translate(50, " + xAxisTranslate +")") .call(x_axis) } // var fisheye = d3.fisheye.circular // .radius(200) // .distortion(2); // // svg.on("mousemove", function() // { // fisheye.focus(d3.mouse(this)); // // node.each(function(d) { d.fisheye = fisheye(d); }) // .attr("cx", function(d) { return d.fisheye.x; }) // .attr("cy", function(d) { return d.fisheye.y; }) // .attr("r", function(d) { return d.fisheye.z * 4.5; }); // // link.attr("x1", function(d) { return d.source.fisheye.x; }) // .attr("y1", function(d) { return d.source.fisheye.y; }) // .attr("x2", function(d) { return d.target.fisheye.x; }) // .attr("y2", function(d) { return d.target.fisheye.y; }); // });
} else if(d._children) {
random_line_split
script.js
function tree()
function printNodeInfo(d) { var location = document.getElementById("infoField"); location.innerHTML = "&ensp;name: " + d.data.name+"<br/>" + "&ensp;value: " + d.data.value; } function loadOtherTree(value) { // remove svg d3.select("svg").remove(); // build new tree object myTree = tree().height(height).width(width); // load new data d3.json(value, function(error, data) { if (error) throw error; root = data; console.log(root); root.x0 = height / 2; root.y0 = 0; myTree.data(root); // put new chart in place d3.select('#chart').call(myTree); }); } function reloadInitialTree() { loadOtherTree("flare.json"); // $("#search").object.text = ""; } function centerTree() { alert("To be implemented."); } // from: https://css-tricks.com/snippets/javascript/get-url-variables/ function getQueryVariable(variable) { var query = window.location.search.substring(1); var vars = query.split("&"); for (var i=0;i<vars.length;i++) { var pair = vars[i].split("="); if(pair[0] == variable){return pair[1];} } return(false); } function appendAxes() { var axis_data_x = [-100, 800], axis_data_y = [-100, 600]; console.log(d3.select("#chart")); var svg = d3.select("#chart") .append("svg") .attr("width", width) .attr("height", height); var xscale = d3.scaleLinear() .domain([0, d3.max(axis_data_x)]) .range([0, width - 100]); var yscale = d3.scaleLinear() .domain([0, d3.max(axis_data_y)]) .range([height/2, 0]); var x_axis = d3.axisBottom() .scale(xscale); var y_axis = d3.axisLeft() .scale(yscale); svg.append("g") .attr("transform", "translate(50, 10)") .call(y_axis); var xAxisTranslate = height/2 + 10; svg.append("g") .attr("transform", "translate(50, " + xAxisTranslate +")") .call(x_axis) } // var fisheye = d3.fisheye.circular // .radius(200) // .distortion(2); // // svg.on("mousemove", function() // { // fisheye.focus(d3.mouse(this)); // // node.each(function(d) { d.fisheye = fisheye(d); }) // .attr("cx", function(d) { return d.fisheye.x; }) // .attr("cy", function(d) { return d.fisheye.y; }) // .attr("r", function(d) { return d.fisheye.z * 4.5; }); // // link.attr("x1", function(d) { return d.source.fisheye.x; }) // .attr("y1", function(d) { return d.source.fisheye.y; }) // .attr("x2", function(d) { return d.target.fisheye.x; }) // .attr("y2", function(d) { return d.target.fisheye.y; }); // });
{ var data, root, treemap, svg, i = 0, duration = 650, // margin = {top: 20, right: 10, bottom: 20, left: 50}, margin = {top: 0, right: 0, bottom: 80, left: 50}, width = 960 - 4 - margin.left - margin.right, // fitting in block frame height = 800 - 4 - margin.top - margin.bottom, // fitting in block frame width_multiplier = 180, height_extra_space = 0; // update; function chart(selection) { selection.each(function() { height = height - margin.top - margin.bottom; width = width - margin.left - margin.right; // append the svg object to the selection svg = selection.append('svg') .attr('width', width + margin.left + margin.right) .attr('height', height + margin.top + margin.bottom) .append('g') .attr('transform', 'translate(' + margin.left + ',' + margin.top + ')'); // declares a tree layout and assigns the size of the tree treemap = d3.tree().size([height, width]); // assign parent, children, height, depth root = d3.hierarchy(data, function(d) { return d.children }); root.x0 = (height / 2); // left edge of the rectangle root.y0 = 0; // top edge of the triangle // collapse after the second level root.children.forEach(collapse); update(root); function collapse(d) { if (d.children) { d._children = d.children; d._children.forEach(collapse); d.children = null; } } }); } chart.width = function(value) { if (!arguments.length) return width; width = value; return chart; }; chart.height = function(value) { if (!arguments.length) return height; height = value; return chart; }; chart.margin = function(value) { if (!arguments.length) return margin; margin = value; return chart; }; chart.data = function(value) { if (!arguments.length) return data; data = value; if (typeof updateData === 'function') updateData(); return chart; }; chart.expandTree = function(value) { root.children.forEach(expand); update(root); function expand(d) { if (d._children) { d.children = d._children; d.children.forEach(expand); d._children = null; } } }; // collapse the node and all it's children chart.collapse = function(d) { if (d.children) { d._children = d.children; d._children.forEach(collapse); d.children = null; } } chart.collapseTree = function(value) { root.children.forEach(collapse); update(root); function collapse(d) { if (d.children) { d._children = d.children; d._children.forEach(collapse); d.children = null; } } }; function update(source) { // assigns the x and y position for the nodes var treeData = treemap(root); // compute the new tree layout var nodes = treeData.descendants(), links = treeData.descendants().slice(1); // normalise for fixed depth nodes.forEach(function(d) { // d.x = d.depth * 180; d.y = d.depth * width_multiplier; // d.x = d.depth * 180; }); // ****************** Nodes section *************************** // update the nodes ... var nodeArray = svg.selectAll('g.node') .data(nodes, function(d) { return d.id || (d.id = ++i); }); // console.log(nodeArray); // Enter any new modes at the parent's previous position. var nodeEnter = nodeArray.enter().append('g') .attr('class', 'node') .attr('transform', function(d) { return 'translate(' + (source.y0 + margin.top) + ',' + (source.x0 + margin.left) + ')'; // return 'translate(' + (source.y0) + ',' + (source.x0) + ')'; }) .on('click', click); // Add circle for the nodes, which is filled lightsteelblue for nodes that have hidden children (_children). nodeEnter.append('circle') .attr('class', 'node') .attr('r', 1e-6) .style('fill', function(d) { return d._children ? 'lightsteelblue' : '#fff'; }); // Append the node label (data.name), either to the left or right of the node circle, depending on whether the node has children. nodeEnter.append("text") .attr("x", function(d) { return d.children || d._children ? -15 : 15; }) .attr("dy", ".35em") .attr("style", "node") .attr("text-anchor", function(d) { return d.children || d._children ? "end" : "start"; }) .text(function(d) { // console.log(d); // return (d.children || d._children) ? d.data.name.capitalize() : d.data.name;}) return d.data.name;}); // .style("fill-opacity", 1e-6); // Add the number of children inside the node circle, whether they are unfolded or not. nodeEnter.append('text') .attr('x', 0) .attr('y', 3) .attr("text-anchor", "middle") .attr('cursor', 'pointer') .style('font-size', '10px') .text(function(d) { if (d.children) return d.children.length; else if (d._children) return d._children.length; }); // UPDATE var nodeUpdate = nodeEnter.merge(nodeArray); // Transition the resulting array to the proper position for the node. nodeUpdate.transition().duration(duration) .attr('transform', function(d) { return 'translate(' + (d.y + margin.top) + ',' + (d.x + margin.left) + ')'; }); // Update the node attributes and style, coloring search results red. nodeUpdate.select('circle.node') .attr('r', 9) .style("fill", function(d) { if(d.data.class === "found") { return "#ff4136"; //red } else if(d._children) { return "lightsteelblue"; } }) .attr('cursor', 'pointer') .style("stroke", function(d) { if (d.data.class === "found") { return "#ff4136"; //red } ; }) // Remove any exiting nodes var nodeExit = nodeArray.exit() .transition().duration(duration) .attr('transform', function(d) { return 'translate(' + (source.y + margin.top) + ',' + (source.x + margin.left) + ')'; }) .remove(); // on exit reduce the node circles size to 0 nodeExit.select('circle') .attr('r', 1e-6); // on exit reduce the opacity of text labels nodeExit.select('text') .style('fill-opacity', 1e-6); // adding zoom and panning d3.select("svg").call(d3.zoom().on("zoom", function() { svg.attr("transform", d3.event.transform) })); // trying to invert the direction of the zoom wheel // .on("wheel", function(d){ // var direction = d3.event.wheelDelta < 0 ? 'down' : 'up'; // zoom(direction === 'up' ? d : d.parent); // }); // ****************** links section *************************** // update the links var link = svg.selectAll('path.link').data(links, function(d) { return d.id }); // enter any new links at the parent's previous position var linkEnter = link.enter().insert('path', 'g') .attr('class', 'link') .attr('d', function(d) { var o = {x: source.x0, y: source.y0}; return diagonal(o, o); }); // UPDATE var linkUpdate = linkEnter.merge(link); // transition back to the parent element position linkUpdate.transition().duration(duration) .attr('d', function(d) { return diagonal(d, d.parent); }) .style("stroke",function(d) { if(d.data.class==="found") { return "#ff4136"; } }); // remove any exiting links var linkExit = link.exit() .transition().duration(duration) .attr('d', function(d) { var o = {x: source.x, y: source.y}; return diagonal(o, o); }) .remove(); // store the old positions for transition nodes.forEach(function(d) { d.x0 = d.x; d.y0 = d.y; }); // creates a curved (diagonal) path from parent to the child nodes function diagonal(s, d) { var path = 'M ' + (s.y + margin.top) + ' ' + (s.x + margin.left) + 'C ' + ((s.y + d.y + (margin.top * 2)) / 2) + ' ' + (s.x + margin.left) + ', ' + ((s.y + d.y + (margin.top * 2)) / 2) + ' ' + (d.x + margin.left) + ', ' + (d.y + margin.top) + ' ' + (d.x + margin.left); return path; } // toggle children on click function click(d) { toggleChildren(d); printNodeInfo(d); } function toggleChildren(d) { if (d.children) { d._children = d.children; d.children = null; } else { d.children = d._children; d._children = null; } update(d); } } chart.updateWidth = function(value) { width_multiplier = value; update(data); } chart.updateHeight = function(value) { height_extra_space = value; update(data); } String.prototype.capitalize = function() { return this.charAt(0).toUpperCase() + this.slice(1).toLowerCase(); }; function zoom() { var scale = d3.event.scale, translation = d3.event.translate, tbound = -h * scale, bbound = h * scale, lbound = (-w + m[1]) * scale, rbound = (w - m[3]) * scale; // limit translation to thresholds translation = [ Math.max(Math.min(translation[0], rbound), lbound), Math.max(Math.min(translation[1], bbound), tbound) ]; d3.select(".drawarea") .attr("transform", "translate(" + translation + ")" + " scale(" + scale + ")"); } chart.openPaths = function(paths) { for(var i=0; i<paths.length; i++) { if(paths[i].id !== "1") //i.e. not root { paths[i].class = 'found'; console.log("right after setting class to 'found' "); if(paths[i]._children) { //if children are hidden: open them, otherwise: don't do anything paths[i].children = paths[i]._children; paths[i]._children = null; } else if(paths[i].children) { console.log("There are children here, tralalalala"); } else { console.log("For some reason, I don't discover hidden children"); } update(paths[i]); } } } chart.getCurrentRoot = function() { return root; } chart.centerNode = function(d) { // if (active.node() === this) return reset(); // active.classed("active", false); // active = d3.select(this).classed("active", true); // var bounds = path.bounds(d), // dx = bounds[1][0] - bounds[0][0], // dy = bounds[1][1] - bounds[0][1], // x = (bounds[0][0] + bounds[1][0]) / 2, // y = (bounds[0][1] + bounds[1][1]) / 2, // scale = Math.max(1, Math.min(8, 0.9 / Math.max(dx / width, dy / height))), // translate = [width / 2 - scale * x, height / 2 - scale * y]; svg.transition().duration(duration) // .call(zoom.translate(translate).scale(scale).event); // not in d3 v4 .call(zoom.transform, d3.zoomIdentity.translate(translate[0],translate[1]).scale(scale) ); // updated for d3 v4 } // chart.centerNode = function() // { // scale = zoomListener.scale(); // x = -source.y0; // y = -source.x0; // x = x * scale + viewerWidth / 2; // y = y * scale + viewerHeight / 2; // d3.select('g').transition() // .duration(duration) // .attr("transform", "translate(" + x + "," + y + ")scale(" + scale + ")"); // zoomListener.scale(scale); // zoomListener.translate([x, y]); // } return chart; }
identifier_body
script.js
function tree() { var data, root, treemap, svg, i = 0, duration = 650, // margin = {top: 20, right: 10, bottom: 20, left: 50}, margin = {top: 0, right: 0, bottom: 80, left: 50}, width = 960 - 4 - margin.left - margin.right, // fitting in block frame height = 800 - 4 - margin.top - margin.bottom, // fitting in block frame width_multiplier = 180, height_extra_space = 0; // update; function chart(selection) { selection.each(function() { height = height - margin.top - margin.bottom; width = width - margin.left - margin.right; // append the svg object to the selection svg = selection.append('svg') .attr('width', width + margin.left + margin.right) .attr('height', height + margin.top + margin.bottom) .append('g') .attr('transform', 'translate(' + margin.left + ',' + margin.top + ')'); // declares a tree layout and assigns the size of the tree treemap = d3.tree().size([height, width]); // assign parent, children, height, depth root = d3.hierarchy(data, function(d) { return d.children }); root.x0 = (height / 2); // left edge of the rectangle root.y0 = 0; // top edge of the triangle // collapse after the second level root.children.forEach(collapse); update(root); function collapse(d) { if (d.children) { d._children = d.children; d._children.forEach(collapse); d.children = null; } } }); } chart.width = function(value) { if (!arguments.length) return width; width = value; return chart; }; chart.height = function(value) { if (!arguments.length) return height; height = value; return chart; }; chart.margin = function(value) { if (!arguments.length) return margin; margin = value; return chart; }; chart.data = function(value) { if (!arguments.length) return data; data = value; if (typeof updateData === 'function') updateData(); return chart; }; chart.expandTree = function(value) { root.children.forEach(expand); update(root); function expand(d) { if (d._children) { d.children = d._children; d.children.forEach(expand); d._children = null; } } }; // collapse the node and all it's children chart.collapse = function(d) { if (d.children) { d._children = d.children; d._children.forEach(collapse); d.children = null; } } chart.collapseTree = function(value) { root.children.forEach(collapse); update(root); function collapse(d) { if (d.children) { d._children = d.children; d._children.forEach(collapse); d.children = null; } } }; function update(source) { // assigns the x and y position for the nodes var treeData = treemap(root); // compute the new tree layout var nodes = treeData.descendants(), links = treeData.descendants().slice(1); // normalise for fixed depth nodes.forEach(function(d) { // d.x = d.depth * 180; d.y = d.depth * width_multiplier; // d.x = d.depth * 180; }); // ****************** Nodes section *************************** // update the nodes ... var nodeArray = svg.selectAll('g.node') .data(nodes, function(d) { return d.id || (d.id = ++i); }); // console.log(nodeArray); // Enter any new modes at the parent's previous position. var nodeEnter = nodeArray.enter().append('g') .attr('class', 'node') .attr('transform', function(d) { return 'translate(' + (source.y0 + margin.top) + ',' + (source.x0 + margin.left) + ')'; // return 'translate(' + (source.y0) + ',' + (source.x0) + ')'; }) .on('click', click); // Add circle for the nodes, which is filled lightsteelblue for nodes that have hidden children (_children). nodeEnter.append('circle') .attr('class', 'node') .attr('r', 1e-6) .style('fill', function(d) { return d._children ? 'lightsteelblue' : '#fff'; }); // Append the node label (data.name), either to the left or right of the node circle, depending on whether the node has children. nodeEnter.append("text") .attr("x", function(d) { return d.children || d._children ? -15 : 15; }) .attr("dy", ".35em") .attr("style", "node") .attr("text-anchor", function(d) { return d.children || d._children ? "end" : "start"; }) .text(function(d) { // console.log(d); // return (d.children || d._children) ? d.data.name.capitalize() : d.data.name;}) return d.data.name;}); // .style("fill-opacity", 1e-6); // Add the number of children inside the node circle, whether they are unfolded or not. nodeEnter.append('text') .attr('x', 0) .attr('y', 3) .attr("text-anchor", "middle") .attr('cursor', 'pointer') .style('font-size', '10px') .text(function(d) { if (d.children) return d.children.length; else if (d._children) return d._children.length; }); // UPDATE var nodeUpdate = nodeEnter.merge(nodeArray); // Transition the resulting array to the proper position for the node. nodeUpdate.transition().duration(duration) .attr('transform', function(d) { return 'translate(' + (d.y + margin.top) + ',' + (d.x + margin.left) + ')'; }); // Update the node attributes and style, coloring search results red. nodeUpdate.select('circle.node') .attr('r', 9) .style("fill", function(d) { if(d.data.class === "found") { return "#ff4136"; //red } else if(d._children) { return "lightsteelblue"; } }) .attr('cursor', 'pointer') .style("stroke", function(d) { if (d.data.class === "found") { return "#ff4136"; //red } ; }) // Remove any exiting nodes var nodeExit = nodeArray.exit() .transition().duration(duration) .attr('transform', function(d) { return 'translate(' + (source.y + margin.top) + ',' + (source.x + margin.left) + ')'; }) .remove(); // on exit reduce the node circles size to 0 nodeExit.select('circle') .attr('r', 1e-6); // on exit reduce the opacity of text labels nodeExit.select('text') .style('fill-opacity', 1e-6); // adding zoom and panning d3.select("svg").call(d3.zoom().on("zoom", function() { svg.attr("transform", d3.event.transform) })); // trying to invert the direction of the zoom wheel // .on("wheel", function(d){ // var direction = d3.event.wheelDelta < 0 ? 'down' : 'up'; // zoom(direction === 'up' ? d : d.parent); // }); // ****************** links section *************************** // update the links var link = svg.selectAll('path.link').data(links, function(d) { return d.id }); // enter any new links at the parent's previous position var linkEnter = link.enter().insert('path', 'g') .attr('class', 'link') .attr('d', function(d) { var o = {x: source.x0, y: source.y0}; return diagonal(o, o); }); // UPDATE var linkUpdate = linkEnter.merge(link); // transition back to the parent element position linkUpdate.transition().duration(duration) .attr('d', function(d) { return diagonal(d, d.parent); }) .style("stroke",function(d) { if(d.data.class==="found") { return "#ff4136"; } }); // remove any exiting links var linkExit = link.exit() .transition().duration(duration) .attr('d', function(d) { var o = {x: source.x, y: source.y}; return diagonal(o, o); }) .remove(); // store the old positions for transition nodes.forEach(function(d) { d.x0 = d.x; d.y0 = d.y; }); // creates a curved (diagonal) path from parent to the child nodes function diagonal(s, d) { var path = 'M ' + (s.y + margin.top) + ' ' + (s.x + margin.left) + 'C ' + ((s.y + d.y + (margin.top * 2)) / 2) + ' ' + (s.x + margin.left) + ', ' + ((s.y + d.y + (margin.top * 2)) / 2) + ' ' + (d.x + margin.left) + ', ' + (d.y + margin.top) + ' ' + (d.x + margin.left); return path; } // toggle children on click function click(d) { toggleChildren(d); printNodeInfo(d); } function toggleChildren(d) { if (d.children) { d._children = d.children; d.children = null; } else { d.children = d._children; d._children = null; } update(d); } } chart.updateWidth = function(value) { width_multiplier = value; update(data); } chart.updateHeight = function(value) { height_extra_space = value; update(data); } String.prototype.capitalize = function() { return this.charAt(0).toUpperCase() + this.slice(1).toLowerCase(); }; function zoom() { var scale = d3.event.scale, translation = d3.event.translate, tbound = -h * scale, bbound = h * scale, lbound = (-w + m[1]) * scale, rbound = (w - m[3]) * scale; // limit translation to thresholds translation = [ Math.max(Math.min(translation[0], rbound), lbound), Math.max(Math.min(translation[1], bbound), tbound) ]; d3.select(".drawarea") .attr("transform", "translate(" + translation + ")" + " scale(" + scale + ")"); } chart.openPaths = function(paths) { for(var i=0; i<paths.length; i++) { if(paths[i].id !== "1") //i.e. not root { paths[i].class = 'found'; console.log("right after setting class to 'found' "); if(paths[i]._children) { //if children are hidden: open them, otherwise: don't do anything paths[i].children = paths[i]._children; paths[i]._children = null; } else if(paths[i].children)
else { console.log("For some reason, I don't discover hidden children"); } update(paths[i]); } } } chart.getCurrentRoot = function() { return root; } chart.centerNode = function(d) { // if (active.node() === this) return reset(); // active.classed("active", false); // active = d3.select(this).classed("active", true); // var bounds = path.bounds(d), // dx = bounds[1][0] - bounds[0][0], // dy = bounds[1][1] - bounds[0][1], // x = (bounds[0][0] + bounds[1][0]) / 2, // y = (bounds[0][1] + bounds[1][1]) / 2, // scale = Math.max(1, Math.min(8, 0.9 / Math.max(dx / width, dy / height))), // translate = [width / 2 - scale * x, height / 2 - scale * y]; svg.transition().duration(duration) // .call(zoom.translate(translate).scale(scale).event); // not in d3 v4 .call(zoom.transform, d3.zoomIdentity.translate(translate[0],translate[1]).scale(scale) ); // updated for d3 v4 } // chart.centerNode = function() // { // scale = zoomListener.scale(); // x = -source.y0; // y = -source.x0; // x = x * scale + viewerWidth / 2; // y = y * scale + viewerHeight / 2; // d3.select('g').transition() // .duration(duration) // .attr("transform", "translate(" + x + "," + y + ")scale(" + scale + ")"); // zoomListener.scale(scale); // zoomListener.translate([x, y]); // } return chart; } function printNodeInfo(d) { var location = document.getElementById("infoField"); location.innerHTML = "&ensp;name: " + d.data.name+"<br/>" + "&ensp;value: " + d.data.value; } function loadOtherTree(value) { // remove svg d3.select("svg").remove(); // build new tree object myTree = tree().height(height).width(width); // load new data d3.json(value, function(error, data) { if (error) throw error; root = data; console.log(root); root.x0 = height / 2; root.y0 = 0; myTree.data(root); // put new chart in place d3.select('#chart').call(myTree); }); } function reloadInitialTree() { loadOtherTree("flare.json"); // $("#search").object.text = ""; } function centerTree() { alert("To be implemented."); } // from: https://css-tricks.com/snippets/javascript/get-url-variables/ function getQueryVariable(variable) { var query = window.location.search.substring(1); var vars = query.split("&"); for (var i=0;i<vars.length;i++) { var pair = vars[i].split("="); if(pair[0] == variable){return pair[1];} } return(false); } function appendAxes() { var axis_data_x = [-100, 800], axis_data_y = [-100, 600]; console.log(d3.select("#chart")); var svg = d3.select("#chart") .append("svg") .attr("width", width) .attr("height", height); var xscale = d3.scaleLinear() .domain([0, d3.max(axis_data_x)]) .range([0, width - 100]); var yscale = d3.scaleLinear() .domain([0, d3.max(axis_data_y)]) .range([height/2, 0]); var x_axis = d3.axisBottom() .scale(xscale); var y_axis = d3.axisLeft() .scale(yscale); svg.append("g") .attr("transform", "translate(50, 10)") .call(y_axis); var xAxisTranslate = height/2 + 10; svg.append("g") .attr("transform", "translate(50, " + xAxisTranslate +")") .call(x_axis) } // var fisheye = d3.fisheye.circular // .radius(200) // .distortion(2); // // svg.on("mousemove", function() // { // fisheye.focus(d3.mouse(this)); // // node.each(function(d) { d.fisheye = fisheye(d); }) // .attr("cx", function(d) { return d.fisheye.x; }) // .attr("cy", function(d) { return d.fisheye.y; }) // .attr("r", function(d) { return d.fisheye.z * 4.5; }); // // link.attr("x1", function(d) { return d.source.fisheye.x; }) // .attr("y1", function(d) { return d.source.fisheye.y; }) // .attr("x2", function(d) { return d.target.fisheye.x; }) // .attr("y2", function(d) { return d.target.fisheye.y; }); // });
{ console.log("There are children here, tralalalala"); }
conditional_block
script.js
function tree() { var data, root, treemap, svg, i = 0, duration = 650, // margin = {top: 20, right: 10, bottom: 20, left: 50}, margin = {top: 0, right: 0, bottom: 80, left: 50}, width = 960 - 4 - margin.left - margin.right, // fitting in block frame height = 800 - 4 - margin.top - margin.bottom, // fitting in block frame width_multiplier = 180, height_extra_space = 0; // update; function chart(selection) { selection.each(function() { height = height - margin.top - margin.bottom; width = width - margin.left - margin.right; // append the svg object to the selection svg = selection.append('svg') .attr('width', width + margin.left + margin.right) .attr('height', height + margin.top + margin.bottom) .append('g') .attr('transform', 'translate(' + margin.left + ',' + margin.top + ')'); // declares a tree layout and assigns the size of the tree treemap = d3.tree().size([height, width]); // assign parent, children, height, depth root = d3.hierarchy(data, function(d) { return d.children }); root.x0 = (height / 2); // left edge of the rectangle root.y0 = 0; // top edge of the triangle // collapse after the second level root.children.forEach(collapse); update(root); function collapse(d) { if (d.children) { d._children = d.children; d._children.forEach(collapse); d.children = null; } } }); } chart.width = function(value) { if (!arguments.length) return width; width = value; return chart; }; chart.height = function(value) { if (!arguments.length) return height; height = value; return chart; }; chart.margin = function(value) { if (!arguments.length) return margin; margin = value; return chart; }; chart.data = function(value) { if (!arguments.length) return data; data = value; if (typeof updateData === 'function') updateData(); return chart; }; chart.expandTree = function(value) { root.children.forEach(expand); update(root); function expand(d) { if (d._children) { d.children = d._children; d.children.forEach(expand); d._children = null; } } }; // collapse the node and all it's children chart.collapse = function(d) { if (d.children) { d._children = d.children; d._children.forEach(collapse); d.children = null; } } chart.collapseTree = function(value) { root.children.forEach(collapse); update(root); function collapse(d) { if (d.children) { d._children = d.children; d._children.forEach(collapse); d.children = null; } } }; function update(source) { // assigns the x and y position for the nodes var treeData = treemap(root); // compute the new tree layout var nodes = treeData.descendants(), links = treeData.descendants().slice(1); // normalise for fixed depth nodes.forEach(function(d) { // d.x = d.depth * 180; d.y = d.depth * width_multiplier; // d.x = d.depth * 180; }); // ****************** Nodes section *************************** // update the nodes ... var nodeArray = svg.selectAll('g.node') .data(nodes, function(d) { return d.id || (d.id = ++i); }); // console.log(nodeArray); // Enter any new modes at the parent's previous position. var nodeEnter = nodeArray.enter().append('g') .attr('class', 'node') .attr('transform', function(d) { return 'translate(' + (source.y0 + margin.top) + ',' + (source.x0 + margin.left) + ')'; // return 'translate(' + (source.y0) + ',' + (source.x0) + ')'; }) .on('click', click); // Add circle for the nodes, which is filled lightsteelblue for nodes that have hidden children (_children). nodeEnter.append('circle') .attr('class', 'node') .attr('r', 1e-6) .style('fill', function(d) { return d._children ? 'lightsteelblue' : '#fff'; }); // Append the node label (data.name), either to the left or right of the node circle, depending on whether the node has children. nodeEnter.append("text") .attr("x", function(d) { return d.children || d._children ? -15 : 15; }) .attr("dy", ".35em") .attr("style", "node") .attr("text-anchor", function(d) { return d.children || d._children ? "end" : "start"; }) .text(function(d) { // console.log(d); // return (d.children || d._children) ? d.data.name.capitalize() : d.data.name;}) return d.data.name;}); // .style("fill-opacity", 1e-6); // Add the number of children inside the node circle, whether they are unfolded or not. nodeEnter.append('text') .attr('x', 0) .attr('y', 3) .attr("text-anchor", "middle") .attr('cursor', 'pointer') .style('font-size', '10px') .text(function(d) { if (d.children) return d.children.length; else if (d._children) return d._children.length; }); // UPDATE var nodeUpdate = nodeEnter.merge(nodeArray); // Transition the resulting array to the proper position for the node. nodeUpdate.transition().duration(duration) .attr('transform', function(d) { return 'translate(' + (d.y + margin.top) + ',' + (d.x + margin.left) + ')'; }); // Update the node attributes and style, coloring search results red. nodeUpdate.select('circle.node') .attr('r', 9) .style("fill", function(d) { if(d.data.class === "found") { return "#ff4136"; //red } else if(d._children) { return "lightsteelblue"; } }) .attr('cursor', 'pointer') .style("stroke", function(d) { if (d.data.class === "found") { return "#ff4136"; //red } ; }) // Remove any exiting nodes var nodeExit = nodeArray.exit() .transition().duration(duration) .attr('transform', function(d) { return 'translate(' + (source.y + margin.top) + ',' + (source.x + margin.left) + ')'; }) .remove(); // on exit reduce the node circles size to 0 nodeExit.select('circle') .attr('r', 1e-6); // on exit reduce the opacity of text labels nodeExit.select('text') .style('fill-opacity', 1e-6); // adding zoom and panning d3.select("svg").call(d3.zoom().on("zoom", function() { svg.attr("transform", d3.event.transform) })); // trying to invert the direction of the zoom wheel // .on("wheel", function(d){ // var direction = d3.event.wheelDelta < 0 ? 'down' : 'up'; // zoom(direction === 'up' ? d : d.parent); // }); // ****************** links section *************************** // update the links var link = svg.selectAll('path.link').data(links, function(d) { return d.id }); // enter any new links at the parent's previous position var linkEnter = link.enter().insert('path', 'g') .attr('class', 'link') .attr('d', function(d) { var o = {x: source.x0, y: source.y0}; return diagonal(o, o); }); // UPDATE var linkUpdate = linkEnter.merge(link); // transition back to the parent element position linkUpdate.transition().duration(duration) .attr('d', function(d) { return diagonal(d, d.parent); }) .style("stroke",function(d) { if(d.data.class==="found") { return "#ff4136"; } }); // remove any exiting links var linkExit = link.exit() .transition().duration(duration) .attr('d', function(d) { var o = {x: source.x, y: source.y}; return diagonal(o, o); }) .remove(); // store the old positions for transition nodes.forEach(function(d) { d.x0 = d.x; d.y0 = d.y; }); // creates a curved (diagonal) path from parent to the child nodes function diagonal(s, d) { var path = 'M ' + (s.y + margin.top) + ' ' + (s.x + margin.left) + 'C ' + ((s.y + d.y + (margin.top * 2)) / 2) + ' ' + (s.x + margin.left) + ', ' + ((s.y + d.y + (margin.top * 2)) / 2) + ' ' + (d.x + margin.left) + ', ' + (d.y + margin.top) + ' ' + (d.x + margin.left); return path; } // toggle children on click function
(d) { toggleChildren(d); printNodeInfo(d); } function toggleChildren(d) { if (d.children) { d._children = d.children; d.children = null; } else { d.children = d._children; d._children = null; } update(d); } } chart.updateWidth = function(value) { width_multiplier = value; update(data); } chart.updateHeight = function(value) { height_extra_space = value; update(data); } String.prototype.capitalize = function() { return this.charAt(0).toUpperCase() + this.slice(1).toLowerCase(); }; function zoom() { var scale = d3.event.scale, translation = d3.event.translate, tbound = -h * scale, bbound = h * scale, lbound = (-w + m[1]) * scale, rbound = (w - m[3]) * scale; // limit translation to thresholds translation = [ Math.max(Math.min(translation[0], rbound), lbound), Math.max(Math.min(translation[1], bbound), tbound) ]; d3.select(".drawarea") .attr("transform", "translate(" + translation + ")" + " scale(" + scale + ")"); } chart.openPaths = function(paths) { for(var i=0; i<paths.length; i++) { if(paths[i].id !== "1") //i.e. not root { paths[i].class = 'found'; console.log("right after setting class to 'found' "); if(paths[i]._children) { //if children are hidden: open them, otherwise: don't do anything paths[i].children = paths[i]._children; paths[i]._children = null; } else if(paths[i].children) { console.log("There are children here, tralalalala"); } else { console.log("For some reason, I don't discover hidden children"); } update(paths[i]); } } } chart.getCurrentRoot = function() { return root; } chart.centerNode = function(d) { // if (active.node() === this) return reset(); // active.classed("active", false); // active = d3.select(this).classed("active", true); // var bounds = path.bounds(d), // dx = bounds[1][0] - bounds[0][0], // dy = bounds[1][1] - bounds[0][1], // x = (bounds[0][0] + bounds[1][0]) / 2, // y = (bounds[0][1] + bounds[1][1]) / 2, // scale = Math.max(1, Math.min(8, 0.9 / Math.max(dx / width, dy / height))), // translate = [width / 2 - scale * x, height / 2 - scale * y]; svg.transition().duration(duration) // .call(zoom.translate(translate).scale(scale).event); // not in d3 v4 .call(zoom.transform, d3.zoomIdentity.translate(translate[0],translate[1]).scale(scale) ); // updated for d3 v4 } // chart.centerNode = function() // { // scale = zoomListener.scale(); // x = -source.y0; // y = -source.x0; // x = x * scale + viewerWidth / 2; // y = y * scale + viewerHeight / 2; // d3.select('g').transition() // .duration(duration) // .attr("transform", "translate(" + x + "," + y + ")scale(" + scale + ")"); // zoomListener.scale(scale); // zoomListener.translate([x, y]); // } return chart; } function printNodeInfo(d) { var location = document.getElementById("infoField"); location.innerHTML = "&ensp;name: " + d.data.name+"<br/>" + "&ensp;value: " + d.data.value; } function loadOtherTree(value) { // remove svg d3.select("svg").remove(); // build new tree object myTree = tree().height(height).width(width); // load new data d3.json(value, function(error, data) { if (error) throw error; root = data; console.log(root); root.x0 = height / 2; root.y0 = 0; myTree.data(root); // put new chart in place d3.select('#chart').call(myTree); }); } function reloadInitialTree() { loadOtherTree("flare.json"); // $("#search").object.text = ""; } function centerTree() { alert("To be implemented."); } // from: https://css-tricks.com/snippets/javascript/get-url-variables/ function getQueryVariable(variable) { var query = window.location.search.substring(1); var vars = query.split("&"); for (var i=0;i<vars.length;i++) { var pair = vars[i].split("="); if(pair[0] == variable){return pair[1];} } return(false); } function appendAxes() { var axis_data_x = [-100, 800], axis_data_y = [-100, 600]; console.log(d3.select("#chart")); var svg = d3.select("#chart") .append("svg") .attr("width", width) .attr("height", height); var xscale = d3.scaleLinear() .domain([0, d3.max(axis_data_x)]) .range([0, width - 100]); var yscale = d3.scaleLinear() .domain([0, d3.max(axis_data_y)]) .range([height/2, 0]); var x_axis = d3.axisBottom() .scale(xscale); var y_axis = d3.axisLeft() .scale(yscale); svg.append("g") .attr("transform", "translate(50, 10)") .call(y_axis); var xAxisTranslate = height/2 + 10; svg.append("g") .attr("transform", "translate(50, " + xAxisTranslate +")") .call(x_axis) } // var fisheye = d3.fisheye.circular // .radius(200) // .distortion(2); // // svg.on("mousemove", function() // { // fisheye.focus(d3.mouse(this)); // // node.each(function(d) { d.fisheye = fisheye(d); }) // .attr("cx", function(d) { return d.fisheye.x; }) // .attr("cy", function(d) { return d.fisheye.y; }) // .attr("r", function(d) { return d.fisheye.z * 4.5; }); // // link.attr("x1", function(d) { return d.source.fisheye.x; }) // .attr("y1", function(d) { return d.source.fisheye.y; }) // .attr("x2", function(d) { return d.target.fisheye.x; }) // .attr("y2", function(d) { return d.target.fisheye.y; }); // });
click
identifier_name
data.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # Modifications Copyright 2017 Abigail See # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """This file contains code to read the train/eval/test data from file and process it, and read the vocab data from file and process it""" import glob import random import struct import csv from tensorflow.core.example import example_pb2 from collections import defaultdict as ddict import pickle import scipy.sparse as sp import tensorflow as tf import numpy as np import horovod.tensorflow as hvd import collections import six import unicodedata # <s> and </s> are used in the data files to segment the abstracts into sentences. They don't receive vocab ids. SENTENCE_START = '<s>' SENTENCE_END = '</s>' PAD_TOKEN = '[PAD]' # This has a vocab id, which is used to pad the encoder input, decoder input and target sequence UNKNOWN_TOKEN = '[UNK]' # This has a vocab id, which is used to represent out-of-vocabulary words START_DECODING = '[START]' # This has a vocab id, which is used at the start of every decoder input sequence STOP_DECODING = '[STOP]' # This has a vocab id, which is used at the end of untruncated target sequences # Note: none of <s>, </s>, [PAD], [UNK], [START], [STOP] should appear in the vocab file. def convert_to_unicode(text): """Converts `text` to Unicode (if it's not already), assuming utf-8 input.""" if six.PY3: if isinstance(text, str): return text elif isinstance(text, bytes): return text.decode("utf-8", "ignore") else: raise ValueError("Unsupported string type: %s" % (type(text))) elif six.PY2: if isinstance(text, str): return text.decode("utf-8", "ignore") elif isinstance(text, unicode): return text else: raise ValueError("Unsupported string type: %s" % (type(text))) else: raise ValueError("Not running on Python2 or Python 3?") class Vocab(object): """Vocabulary class for mapping between words and ids (integers)""" def __init__(self, vocab_file, max_size): """Creates a vocab of up to max_size words, reading from the vocab_file. If max_size is 0, reads the entire vocab file. Args: vocab_file: path to the vocab file, which is assumed to contain "<word> <frequency>" on each line, sorted with most frequent word first. This code doesn't actually use the frequencies, though. max_size: integer. The maximum size of the resulting Vocabulary.""" self._word_to_id = {} self._id_to_word = {} self._count = 0 # keeps track of total number of words in the Vocab # [UNK], [PAD], [START] and [STOP] get the ids 0,1,2,3. for w in [UNKNOWN_TOKEN, PAD_TOKEN, START_DECODING, STOP_DECODING]: self._word_to_id[w] = self._count self._id_to_word[self._count] = w self._count += 1 # Read the vocab file and add words up to max_size with open(vocab_file, 'r') as vocab_f: for line in vocab_f: pieces = line.split() if len(pieces) != 2: print ('Warning: incorrectly formatted line in vocabulary file: %s\n' % line) continue w = pieces[0] if w in [SENTENCE_START, SENTENCE_END, UNKNOWN_TOKEN, PAD_TOKEN, START_DECODING, STOP_DECODING]: raise Exception( '<s>, </s>, [UNK], [PAD], [START] and [STOP] shouldn\'t be in the vocab file, but %s is' % w) if w in self._word_to_id: raise Exception('Duplicated word in vocabulary file: %s' % w) self._word_to_id[w] = self._count self._id_to_word[self._count] = w self._count += 1 if max_size != 0 and self._count >= max_size: print ("max_size of vocab was specified as %i; we now have %i words. Stopping reading." % ( max_size, self._count)) break print ("Finished constructing vocabulary of %i total words. Last word added: %s" % ( self._count, self._id_to_word[self._count - 1])) def word2id(self, word): """Returns the id (integer) of a word (string). Returns [UNK] id if word is OOV.""" if word not in self._word_to_id:
return self._word_to_id[UNKNOWN_TOKEN] return self._word_to_id[word] def id2word(self, word_id): """Returns the word (string) corresponding to an id (integer).""" if word_id not in self._id_to_word: raise ValueError('Id not found in vocab: %d' % word_id) return self._id_to_word[word_id] def size(self): """Returns the total size of the vocabulary""" return self._count def write_metadata(self, fpath): """Writes metadata file for Tensorboard word embedding visualizer as described here: https://www.tensorflow.org/get_started/embedding_viz Args: fpath: place to write the metadata file """ print "Writing word embedding metadata file to %s..." % (fpath) with open(fpath, "w") as f: fieldnames = ['word'] writer = csv.DictWriter(f, delimiter="\t", fieldnames=fieldnames) for i in xrange(self.size()): writer.writerow({"word": self._id_to_word[i]}) def set_glove_embedding(self,fpath,embedding_dim): """ Creates glove embedding_matrix from file path""" emb = np.random.randn(self._count,embedding_dim) # tf.logging.info(emb[0]) with open(fpath) as f: #python 3.x support for k,line in enumerate(f): fields = line.split() if len(fields) - 1 != embedding_dim: # Sometimes there are funny unicode parsing problems that lead to different # fields lengths (e.g., a word with a unicode space character that splits # into more than one colum n). We skip those lines. Note that if you have # some kind of long header, this could result in all of your lines getting # skipped. It's hard to check for that here; you just have to look in the # embedding_misses_file and at the model summary to make sure things look # like they are supposed to. #logger.warning("Found line with wrong number of dimensions (expected %d, was %d): %s", # embedding_dim, len(fields) - 1, line) raise Exception("Found line with wrong number of dimensions (expected %d, was %d): %s", embedding_dim, len(fields) - 1, line) continue word = fields[0] if word in self._word_to_id: vector = np.asarray(fields[1:], dtype='float32') emb[self._word_to_id[word]] = vector # if k%1000 == 0: # tf.logging.info('glove : %d',k) self.glove_emb = emb class BertVocab(object): """ While glove_vocab has been used as default. The term glove is misnomer. Glove_vocab represents normal vocab in this file This function converts individual tokens to their respective word piece tokens """ def __init__(self, glove_vocab, bert_vocab_file_path): self.bert_vocab = collections.OrderedDict() self.glove_vocab = glove_vocab index = 0 with tf.gfile.GFile(bert_vocab_file_path, "r") as reader: #obtain bert vocab while True: token = convert_to_unicode(reader.readline()) if not token: break token = token.strip() self.bert_vocab[token] = index index += 1 not_found = 0 self.index_map_glove_to_bert = {} for i in range(glove_vocab._count): if glove_vocab._id_to_word[i] in self.bert_vocab: self.index_map_glove_to_bert[i] = [self.bert_vocab[glove_vocab._id_to_word[i]]] else: #Word Piece Tokenizer not_found = not_found + 1 new_tokens = [] token = glove_vocab._id_to_word[i] chars = list(token) is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = "".join(chars[start:end]) if start > 0: substr = "##" + substr if substr in self.bert_vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: new_tokens.append(self.bert_vocab['[UNK]']) else: sub_tokens_bert = [self.bert_vocab[s] for s in sub_tokens] new_tokens = new_tokens + sub_tokens_bert self.index_map_glove_to_bert[i] = new_tokens tf.logging.info(not_found) def convert_glove_to_bert_indices(self, token_ids): """ Converts words to their respective word-piece tokenized indices token_ids : ids from the word """ new_tokens = [self.bert_vocab['[CLS]']] #As pert the bert repo instructions offset = 1 pos_offset = [] for token_id in token_ids: pos_offset.append(offset) #wordpiece tokenizer can return more than one index hence we maintain an offset array. This is useful for the BERT + GCN experiments. if token_id in self.index_map_glove_to_bert: bert_tokens = self.index_map_glove_to_bert[token_id] offset = offset + len(bert_tokens) - 1 #new_tokens.append(self.index_map_glove_to_bert[token_id]) new_tokens = new_tokens + bert_tokens else: #wordpiece might be redundant for training data. Keep for unseen instances token = glove_vocab._id_to_word[token_id] chars = list(token) is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = "".join(chars[start:end]) if start > 0: substr = "##" + substr if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: #new_tokens.append(self.index_map_glove_to_bert['[UNK]']) new_token = new_token + self.index_map_glove_to_bert['[UNK]'] else: sub_tokens_bert = [self.bert_vocab[s] for s in sub_tokens] new_tokens = new_tokens + sub_tokens_bert offset = offset + len(sub_tokens_bert) - 1 new_tokens.append(self.bert_vocab['[SEP]']) return new_tokens, pos_offset def convert_by_vocab(vocab, items): """Converts a sequence of [tokens|ids] using the vocab.""" output = [] for item in items: if item in vocab: output.append(vocab[item]) else: output.append(vocab['[UNK]']) return output def convert_tokens_to_ids(vocab, tokens): return convert_by_vocab(vocab, tokens) def convert_ids_to_tokens(inv_vocab, ids): return convert_by_vocab(inv_vocab, ids) def example_generator(data_path, single_pass, device_rank,data_as_tf_example=True): """Generates tf.Examples from data files. Binary data format: <length><blob>. <length> represents the byte size of <blob>. <blob> is serialized tf.Example proto. The tf.Example contains the tokenized article text and summary. Args: data_path: Path to tf.Example data files. Can include wildcards, e.g. if you have several training data chunk files train_001.bin, train_002.bin, etc, then pass data_path=train_* to access them all. single_pass: Boolean. If True, go through the dataset exactly once, generating examples in the order they appear, then return. Otherwise, generate random examples indefinitely. Yields: Deserialized tf.Example. """ random.seed(device_rank+1) if data_as_tf_example: epoch = 0 while True: filelist = glob.glob(data_path) # get the list of datafiles assert filelist, ('Error: Empty filelist at %s' % data_path) # check filelist isn't empty if single_pass: filelist = sorted(filelist) else: random.shuffle(filelist) #tf.logging.info(filelist) for file_no, f in enumerate(filelist): reader = open(f, 'rb') all_examples = [] while True: len_bytes = reader.read(8) if not len_bytes: if not single_pass: random.shuffle(all_examples) for k in all_examples: yield example_pb2.Example.FromString(k), epoch break # finished reading this file str_len = struct.unpack('q', len_bytes)[0] example_str = struct.unpack('%ds' % str_len, reader.read(str_len))[0] all_examples.append(example_str) if single_pass: print "example_generator completed reading all datafiles. No more data." break else: #pickle format while True: if single_pass: for data_ in data_path: for i in data_: yield i else: random.shuffle(data_path) for data_ in data_path: new_data = data_ x = np.arange(len(new_data)) np.random.shuffle(x) # random.shuffle(new_data) for i in x: yield new_data[i] if single_pass: break def article2ids(article_words, vocab): """Map the article words to their ids. Also return a list of OOVs in the article. Args: article_words: list of words (strings) vocab: Vocabulary object Returns: ids: A list of word ids (integers); OOVs are represented by their temporary article OOV number. If the vocabulary size is 50k and the article has 3 OOVs, then these temporary OOV numbers will be 50000, 50001, 50002. oovs: A list of the OOV words in the article (strings), in the order corresponding to their temporary article OOV numbers.""" ids = [] oovs = [] unk_id = vocab.word2id(UNKNOWN_TOKEN) for w in article_words: i = vocab.word2id(w) if i == unk_id: # If w is OOV if w not in oovs: # Add to list of OOVs oovs.append(w) oov_num = oovs.index(w) # This is 0 for the first article OOV, 1 for the second article OOV... ids.append(vocab.size() + oov_num) # This is e.g. 50000 for the first article OOV, 50001 for the second... else: ids.append(i) return ids, oovs def abstract2ids(abstract_words, vocab, article_oovs): """Map the abstract words to their ids. In-article OOVs are mapped to their temporary OOV numbers. Args: abstract_words: list of words (strings) vocab: Vocabulary object article_oovs: list of in-article OOV words (strings), in the order corresponding to their temporary article OOV numbers Returns: ids: List of ids (integers). In-article OOV words are mapped to their temporary OOV numbers. Out-of-article OOV words are mapped to the UNK token id.""" ids = [] unk_id = vocab.word2id(UNKNOWN_TOKEN) for w in abstract_words: i = vocab.word2id(w) if i == unk_id: # If w is an OOV word if w in article_oovs: # If w is an in-article OOV vocab_idx = vocab.size() + article_oovs.index(w) # Map to its temporary article OOV number ids.append(vocab_idx) else: # If w is an out-of-article OOV ids.append(unk_id) # Map to the UNK token id else: ids.append(i) return ids def outputids2words(id_list, vocab, article_oovs): """Maps output ids to words, including mapping in-article OOVs from their temporary ids to the original OOV string (applicable in pointer-generator mode). Args: id_list: list of ids (integers) vocab: Vocabulary object article_oovs: list of OOV words (strings) in the order corresponding to their temporary article OOV ids (that have been assigned in pointer-generator mode), or None (in baseline mode) Returns: words: list of words (strings) """ words = [] for i in id_list: try: w = vocab.id2word(i) # might be [UNK] except ValueError as e: # w is OOV assert article_oovs is not None, "Error: model produced a word ID that isn't in the vocabulary. This should not happen in baseline (no pointer-generator) mode" article_oov_idx = i - vocab.size() try: w = article_oovs[article_oov_idx] except ValueError as e: # i doesn't correspond to an article oov raise ValueError('Error: model produced word ID %i which corresponds to article OOV %i but this example only has %i article OOVs' % (i, article_oov_idx, len(article_oovs))) words.append(w) return words def abstract2sents(abstract): """Splits abstract text from datafile into list of sentences. Args: abstract: string containing <s> and </s> tags for starts and ends of sentences Returns: sents: List of sentence strings (no tags)""" cur = 0 sents = [] while True: try: start_p = abstract.index(SENTENCE_START, cur) end_p = abstract.index(SENTENCE_END, start_p + 1) cur = end_p + len(SENTENCE_END) sents.append(abstract[start_p + len(SENTENCE_START):end_p].strip()) except ValueError as e: # no more sentences return sents def show_art_oovs(article, vocab): """Returns the article string, highlighting the OOVs by placing __underscores__ around them""" unk_token = vocab.word2id(UNKNOWN_TOKEN) words = article.split(' ') words = [("__%s__" % w) if vocab.word2id(w) == unk_token else w for w in words] out_str = ' '.join(words) return out_str def show_abs_oovs(abstract, vocab, article_oovs): """Returns the abstract string, highlighting the article OOVs with __underscores__. If a list of article_oovs is provided, non-article OOVs are differentiated like !!__this__!!. Args: abstract: string vocab: Vocabulary object article_oovs: list of words (strings), or None (in baseline mode) """ unk_token = vocab.word2id(UNKNOWN_TOKEN) words = abstract.split(' ') new_words = [] for w in words: if vocab.word2id(w) == unk_token: # w is oov if article_oovs is None: # baseline mode new_words.append("__%s__" % w) else: # pointer-generator mode if w in article_oovs: new_words.append("__%s__" % w) else: new_words.append("!!__%s__!!" % w) else: # w is in-vocab word new_words.append(w) out_str = ' '.join(new_words) return out_str dep_list = ['cc', 'agent', 'ccomp', 'prt', 'meta', 'nsubjpass', 'csubj', 'conj', 'amod', 'poss', 'neg', 'csubjpass', 'mark', 'auxpass', 'advcl', 'aux', 'ROOT', 'prep', 'parataxis', 'xcomp', 'nsubj', 'nummod', 'advmod', 'punct', 'quantmod', 'acomp', 'compound', 'pcomp', 'intj', 'relcl', 'npadvmod', 'case', 'attr', 'dep', 'appos', 'det', 'nmod', 'dobj', 'dative', 'pobj', 'expl', 'predet', 'preconj', 'oprd', 'acl', 'flow'] dep_dict = {label: i for i, label in enumerate(dep_list)} def get_specific_adj(batch_list, batch_size, max_nodes, label, encoder_lengths, use_both=True, keep_prob=1.0, use_bert=False, bert_mapping=None, max_length=300): adj_main_in = [] adj_main_out = [] if bert_mapping is None: bert_mapping = [[] for i in range(len(batch_list))] #empty array for allowing in the next loop for edge_list, enc_length, offset_list in zip(batch_list, encoder_lengths, bert_mapping): #print(edge_list) curr_adj_in = [] curr_adj_out = [] curr_data_in = [] curr_data_out = [] seen_nodes = [] for s, d, lbl in edge_list: if s >=max_nodes or d >=max_nodes or s>=max_length or d>=max_length: continue if use_bert: src = s + offset_list[s] dest = d + offset_list[d] else: src = s dest = d seen_nodes.append(src) seen_nodes.append(dest) if lbl!=label: continue #if src >= max_nodes or dest >= max_nodes: # continue x = np.random.uniform() if x<=keep_prob: curr_adj_out.append((src, dest)) curr_data_out.append(1.0) if use_both: curr_adj_in.append((dest, src)) curr_data_in.append(1.0) else: curr_adj_out.append((dest, src)) curr_data_out.append(1.0) ''' Use this snippet when you need to use the A + I condition (refer README) seen_nodes = list(set(seen_nodes)) for src in range(enc_length): #A + I for entity and coref curr_adj_out.append((src, src)) curr_data_out.append(1.0) if use_both: curr_adj_in.append((src, src)) curr_data_in.append(1.0) ''' if len(curr_adj_in) == 0: adj_in = sp.coo_matrix((max_nodes, max_nodes)) else: adj_in = sp.coo_matrix((curr_data_in, zip(*curr_adj_in)), shape=(max_nodes, max_nodes)) if len(curr_adj_out) == 0: adj_out = sp.coo_matrix((max_nodes, max_nodes)) else: adj_out = sp.coo_matrix((curr_data_out, zip(*curr_adj_out)), shape=(max_nodes, max_nodes)) adj_main_in.append(adj_in) adj_main_out.append(adj_out) return adj_main_in, adj_main_out def get_adj(batch_list, batch_size, max_nodes, use_label_information=True, label_dict=dep_dict,flow_alone=False, flow_combined=False, keep_prob=1.0, use_bert=False, bert_mapping=None, max_length=300): adj_main_in, adj_main_out = [], [] max_labels = 45 if bert_mapping is None: bert_mapping = [[] for i in range(len(batch_list))] #empty array for allowing in the next loop for edge_list, offset_list in zip(batch_list, bert_mapping): adj_in, adj_out = {}, {} l_e = len(edge_list) in_ind, in_data = ddict(list), ddict(list) out_ind, out_data = ddict(list), ddict(list) count = 0 for s, d, lbl_ in edge_list: if s>=max_nodes or d >= max_nodes or s>=max_length or d>=max_length: continue if use_bert: try: src = s + offset_list[s] except: tf.logging.info(s) tf.logging.info(len(offset_list)) dest = d + offset_list[d] else: src = s dest = d #if src >= max_nodes or dest >= max_nodes: # continue if flow_alone: lbl = 0 if src+1 < max_nodes: x = np.random.uniform() if x<= keep_prob: out_ind[lbl].append((src, src+1)) out_data[lbl].append(1.0) x = np.random.uniform() if x<=keep_prob: in_ind[lbl].append((src+1, src)) in_data[lbl].append(1.0) else: if lbl_ not in label_dict: continue lbl = label_dict[lbl_] if not use_label_information: #all assigned the same label information lbl = 0 x = np.random.uniform() if x<=keep_prob: out_ind[lbl].append((src, dest)) out_data[lbl].append(1.0) x = np.random.uniform() if x<=keep_prob: in_ind[lbl].append((dest, src)) in_data[lbl].append(1.0) if flow_combined and dest!=src+1: if not use_label_information: #all assigned the same label information lbl = 0 else: lbl = label_dict['flow'] out_ind[lbl].append((src, src+1)) out_data[lbl].append(1.0) in_ind[lbl].append((src+1, src)) in_data[lbl].append(1.0) count = count + 1 if flow_combined: max_labels = max_labels + 1 if not use_label_information: max_labels = 1 for lbl in range(max_labels): if lbl not in out_ind: adj_out[lbl] = sp.coo_matrix((max_nodes, max_nodes)) else: adj_out[lbl] = sp.coo_matrix((out_data[lbl], zip(*out_ind[lbl])), shape=(max_nodes, max_nodes)) if lbl not in in_ind: adj_in[lbl] = sp.coo_matrix((max_nodes, max_nodes)) else: adj_in[lbl] = sp.coo_matrix((in_data[lbl], zip(*in_ind[lbl])), shape=(max_nodes, max_nodes)) adj_main_in.append(adj_in) adj_main_out.append(adj_out) # print(adj_main_in) return adj_main_in, adj_main_out def create_glove_embedding_matrix (vocab,vocab_size,emb_dim,glove_path): emb = np.random.rand(vocab_size,emb_dim) count = 0 with open(args['glove_path'],encoding='utf-8') as f: #python 3.x support #all_lines = [] #with codecs.open(args['glove_path'],'r',encoding='utf-8') as f: #python 2.x support for line in f: fields = line.split() if len(fields) - 1 != embedding_dim: # Sometimes there are funny unicode parsing problems that lead to different # fields lengths (e.g., a word with a unicode space character that splits # into more than one colum n). We skip those lines. Note that if you have # some kind of long header, this could result in all of your lines getting # skipped. It's hard to check for that here; you just have to look in the # embedding_misses_file and at the model summary to make sure things look # like they are supposed to. #logger.warning("Found line with wrong number of dimensions (expected %d, was %d): %s", # embedding_dim, len(fields) - 1, line) raise Exception("Found line with wrong number of dimensions (expected %d, was %d): %s", embedding_dim, len(fields) - 1, line) continue word = fields[0] if word in w2id: vector = np.asarray(fields[1:], dtype='float32') return emb
random_line_split
data.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # Modifications Copyright 2017 Abigail See # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """This file contains code to read the train/eval/test data from file and process it, and read the vocab data from file and process it""" import glob import random import struct import csv from tensorflow.core.example import example_pb2 from collections import defaultdict as ddict import pickle import scipy.sparse as sp import tensorflow as tf import numpy as np import horovod.tensorflow as hvd import collections import six import unicodedata # <s> and </s> are used in the data files to segment the abstracts into sentences. They don't receive vocab ids. SENTENCE_START = '<s>' SENTENCE_END = '</s>' PAD_TOKEN = '[PAD]' # This has a vocab id, which is used to pad the encoder input, decoder input and target sequence UNKNOWN_TOKEN = '[UNK]' # This has a vocab id, which is used to represent out-of-vocabulary words START_DECODING = '[START]' # This has a vocab id, which is used at the start of every decoder input sequence STOP_DECODING = '[STOP]' # This has a vocab id, which is used at the end of untruncated target sequences # Note: none of <s>, </s>, [PAD], [UNK], [START], [STOP] should appear in the vocab file. def convert_to_unicode(text): """Converts `text` to Unicode (if it's not already), assuming utf-8 input.""" if six.PY3: if isinstance(text, str): return text elif isinstance(text, bytes): return text.decode("utf-8", "ignore") else: raise ValueError("Unsupported string type: %s" % (type(text))) elif six.PY2: if isinstance(text, str): return text.decode("utf-8", "ignore") elif isinstance(text, unicode): return text else: raise ValueError("Unsupported string type: %s" % (type(text))) else: raise ValueError("Not running on Python2 or Python 3?") class Vocab(object): """Vocabulary class for mapping between words and ids (integers)""" def __init__(self, vocab_file, max_size): """Creates a vocab of up to max_size words, reading from the vocab_file. If max_size is 0, reads the entire vocab file. Args: vocab_file: path to the vocab file, which is assumed to contain "<word> <frequency>" on each line, sorted with most frequent word first. This code doesn't actually use the frequencies, though. max_size: integer. The maximum size of the resulting Vocabulary.""" self._word_to_id = {} self._id_to_word = {} self._count = 0 # keeps track of total number of words in the Vocab # [UNK], [PAD], [START] and [STOP] get the ids 0,1,2,3. for w in [UNKNOWN_TOKEN, PAD_TOKEN, START_DECODING, STOP_DECODING]: self._word_to_id[w] = self._count self._id_to_word[self._count] = w self._count += 1 # Read the vocab file and add words up to max_size with open(vocab_file, 'r') as vocab_f: for line in vocab_f: pieces = line.split() if len(pieces) != 2: print ('Warning: incorrectly formatted line in vocabulary file: %s\n' % line) continue w = pieces[0] if w in [SENTENCE_START, SENTENCE_END, UNKNOWN_TOKEN, PAD_TOKEN, START_DECODING, STOP_DECODING]: raise Exception( '<s>, </s>, [UNK], [PAD], [START] and [STOP] shouldn\'t be in the vocab file, but %s is' % w) if w in self._word_to_id: raise Exception('Duplicated word in vocabulary file: %s' % w) self._word_to_id[w] = self._count self._id_to_word[self._count] = w self._count += 1 if max_size != 0 and self._count >= max_size: print ("max_size of vocab was specified as %i; we now have %i words. Stopping reading." % ( max_size, self._count)) break print ("Finished constructing vocabulary of %i total words. Last word added: %s" % ( self._count, self._id_to_word[self._count - 1])) def word2id(self, word): """Returns the id (integer) of a word (string). Returns [UNK] id if word is OOV.""" if word not in self._word_to_id: return self._word_to_id[UNKNOWN_TOKEN] return self._word_to_id[word] def id2word(self, word_id): """Returns the word (string) corresponding to an id (integer).""" if word_id not in self._id_to_word: raise ValueError('Id not found in vocab: %d' % word_id) return self._id_to_word[word_id] def size(self): """Returns the total size of the vocabulary""" return self._count def write_metadata(self, fpath): """Writes metadata file for Tensorboard word embedding visualizer as described here: https://www.tensorflow.org/get_started/embedding_viz Args: fpath: place to write the metadata file """ print "Writing word embedding metadata file to %s..." % (fpath) with open(fpath, "w") as f: fieldnames = ['word'] writer = csv.DictWriter(f, delimiter="\t", fieldnames=fieldnames) for i in xrange(self.size()): writer.writerow({"word": self._id_to_word[i]}) def set_glove_embedding(self,fpath,embedding_dim): """ Creates glove embedding_matrix from file path""" emb = np.random.randn(self._count,embedding_dim) # tf.logging.info(emb[0]) with open(fpath) as f: #python 3.x support for k,line in enumerate(f): fields = line.split() if len(fields) - 1 != embedding_dim: # Sometimes there are funny unicode parsing problems that lead to different # fields lengths (e.g., a word with a unicode space character that splits # into more than one colum n). We skip those lines. Note that if you have # some kind of long header, this could result in all of your lines getting # skipped. It's hard to check for that here; you just have to look in the # embedding_misses_file and at the model summary to make sure things look # like they are supposed to. #logger.warning("Found line with wrong number of dimensions (expected %d, was %d): %s", # embedding_dim, len(fields) - 1, line) raise Exception("Found line with wrong number of dimensions (expected %d, was %d): %s", embedding_dim, len(fields) - 1, line) continue word = fields[0] if word in self._word_to_id: vector = np.asarray(fields[1:], dtype='float32') emb[self._word_to_id[word]] = vector # if k%1000 == 0: # tf.logging.info('glove : %d',k) self.glove_emb = emb class BertVocab(object): """ While glove_vocab has been used as default. The term glove is misnomer. Glove_vocab represents normal vocab in this file This function converts individual tokens to their respective word piece tokens """ def __init__(self, glove_vocab, bert_vocab_file_path): self.bert_vocab = collections.OrderedDict() self.glove_vocab = glove_vocab index = 0 with tf.gfile.GFile(bert_vocab_file_path, "r") as reader: #obtain bert vocab while True: token = convert_to_unicode(reader.readline()) if not token: break token = token.strip() self.bert_vocab[token] = index index += 1 not_found = 0 self.index_map_glove_to_bert = {} for i in range(glove_vocab._count): if glove_vocab._id_to_word[i] in self.bert_vocab: self.index_map_glove_to_bert[i] = [self.bert_vocab[glove_vocab._id_to_word[i]]] else: #Word Piece Tokenizer not_found = not_found + 1 new_tokens = [] token = glove_vocab._id_to_word[i] chars = list(token) is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = "".join(chars[start:end]) if start > 0: substr = "##" + substr if substr in self.bert_vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: new_tokens.append(self.bert_vocab['[UNK]']) else: sub_tokens_bert = [self.bert_vocab[s] for s in sub_tokens] new_tokens = new_tokens + sub_tokens_bert self.index_map_glove_to_bert[i] = new_tokens tf.logging.info(not_found) def convert_glove_to_bert_indices(self, token_ids): """ Converts words to their respective word-piece tokenized indices token_ids : ids from the word """ new_tokens = [self.bert_vocab['[CLS]']] #As pert the bert repo instructions offset = 1 pos_offset = [] for token_id in token_ids: pos_offset.append(offset) #wordpiece tokenizer can return more than one index hence we maintain an offset array. This is useful for the BERT + GCN experiments. if token_id in self.index_map_glove_to_bert: bert_tokens = self.index_map_glove_to_bert[token_id] offset = offset + len(bert_tokens) - 1 #new_tokens.append(self.index_map_glove_to_bert[token_id]) new_tokens = new_tokens + bert_tokens else: #wordpiece might be redundant for training data. Keep for unseen instances token = glove_vocab._id_to_word[token_id] chars = list(token) is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end:
if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: #new_tokens.append(self.index_map_glove_to_bert['[UNK]']) new_token = new_token + self.index_map_glove_to_bert['[UNK]'] else: sub_tokens_bert = [self.bert_vocab[s] for s in sub_tokens] new_tokens = new_tokens + sub_tokens_bert offset = offset + len(sub_tokens_bert) - 1 new_tokens.append(self.bert_vocab['[SEP]']) return new_tokens, pos_offset def convert_by_vocab(vocab, items): """Converts a sequence of [tokens|ids] using the vocab.""" output = [] for item in items: if item in vocab: output.append(vocab[item]) else: output.append(vocab['[UNK]']) return output def convert_tokens_to_ids(vocab, tokens): return convert_by_vocab(vocab, tokens) def convert_ids_to_tokens(inv_vocab, ids): return convert_by_vocab(inv_vocab, ids) def example_generator(data_path, single_pass, device_rank,data_as_tf_example=True): """Generates tf.Examples from data files. Binary data format: <length><blob>. <length> represents the byte size of <blob>. <blob> is serialized tf.Example proto. The tf.Example contains the tokenized article text and summary. Args: data_path: Path to tf.Example data files. Can include wildcards, e.g. if you have several training data chunk files train_001.bin, train_002.bin, etc, then pass data_path=train_* to access them all. single_pass: Boolean. If True, go through the dataset exactly once, generating examples in the order they appear, then return. Otherwise, generate random examples indefinitely. Yields: Deserialized tf.Example. """ random.seed(device_rank+1) if data_as_tf_example: epoch = 0 while True: filelist = glob.glob(data_path) # get the list of datafiles assert filelist, ('Error: Empty filelist at %s' % data_path) # check filelist isn't empty if single_pass: filelist = sorted(filelist) else: random.shuffle(filelist) #tf.logging.info(filelist) for file_no, f in enumerate(filelist): reader = open(f, 'rb') all_examples = [] while True: len_bytes = reader.read(8) if not len_bytes: if not single_pass: random.shuffle(all_examples) for k in all_examples: yield example_pb2.Example.FromString(k), epoch break # finished reading this file str_len = struct.unpack('q', len_bytes)[0] example_str = struct.unpack('%ds' % str_len, reader.read(str_len))[0] all_examples.append(example_str) if single_pass: print "example_generator completed reading all datafiles. No more data." break else: #pickle format while True: if single_pass: for data_ in data_path: for i in data_: yield i else: random.shuffle(data_path) for data_ in data_path: new_data = data_ x = np.arange(len(new_data)) np.random.shuffle(x) # random.shuffle(new_data) for i in x: yield new_data[i] if single_pass: break def article2ids(article_words, vocab): """Map the article words to their ids. Also return a list of OOVs in the article. Args: article_words: list of words (strings) vocab: Vocabulary object Returns: ids: A list of word ids (integers); OOVs are represented by their temporary article OOV number. If the vocabulary size is 50k and the article has 3 OOVs, then these temporary OOV numbers will be 50000, 50001, 50002. oovs: A list of the OOV words in the article (strings), in the order corresponding to their temporary article OOV numbers.""" ids = [] oovs = [] unk_id = vocab.word2id(UNKNOWN_TOKEN) for w in article_words: i = vocab.word2id(w) if i == unk_id: # If w is OOV if w not in oovs: # Add to list of OOVs oovs.append(w) oov_num = oovs.index(w) # This is 0 for the first article OOV, 1 for the second article OOV... ids.append(vocab.size() + oov_num) # This is e.g. 50000 for the first article OOV, 50001 for the second... else: ids.append(i) return ids, oovs def abstract2ids(abstract_words, vocab, article_oovs): """Map the abstract words to their ids. In-article OOVs are mapped to their temporary OOV numbers. Args: abstract_words: list of words (strings) vocab: Vocabulary object article_oovs: list of in-article OOV words (strings), in the order corresponding to their temporary article OOV numbers Returns: ids: List of ids (integers). In-article OOV words are mapped to their temporary OOV numbers. Out-of-article OOV words are mapped to the UNK token id.""" ids = [] unk_id = vocab.word2id(UNKNOWN_TOKEN) for w in abstract_words: i = vocab.word2id(w) if i == unk_id: # If w is an OOV word if w in article_oovs: # If w is an in-article OOV vocab_idx = vocab.size() + article_oovs.index(w) # Map to its temporary article OOV number ids.append(vocab_idx) else: # If w is an out-of-article OOV ids.append(unk_id) # Map to the UNK token id else: ids.append(i) return ids def outputids2words(id_list, vocab, article_oovs): """Maps output ids to words, including mapping in-article OOVs from their temporary ids to the original OOV string (applicable in pointer-generator mode). Args: id_list: list of ids (integers) vocab: Vocabulary object article_oovs: list of OOV words (strings) in the order corresponding to their temporary article OOV ids (that have been assigned in pointer-generator mode), or None (in baseline mode) Returns: words: list of words (strings) """ words = [] for i in id_list: try: w = vocab.id2word(i) # might be [UNK] except ValueError as e: # w is OOV assert article_oovs is not None, "Error: model produced a word ID that isn't in the vocabulary. This should not happen in baseline (no pointer-generator) mode" article_oov_idx = i - vocab.size() try: w = article_oovs[article_oov_idx] except ValueError as e: # i doesn't correspond to an article oov raise ValueError('Error: model produced word ID %i which corresponds to article OOV %i but this example only has %i article OOVs' % (i, article_oov_idx, len(article_oovs))) words.append(w) return words def abstract2sents(abstract): """Splits abstract text from datafile into list of sentences. Args: abstract: string containing <s> and </s> tags for starts and ends of sentences Returns: sents: List of sentence strings (no tags)""" cur = 0 sents = [] while True: try: start_p = abstract.index(SENTENCE_START, cur) end_p = abstract.index(SENTENCE_END, start_p + 1) cur = end_p + len(SENTENCE_END) sents.append(abstract[start_p + len(SENTENCE_START):end_p].strip()) except ValueError as e: # no more sentences return sents def show_art_oovs(article, vocab): """Returns the article string, highlighting the OOVs by placing __underscores__ around them""" unk_token = vocab.word2id(UNKNOWN_TOKEN) words = article.split(' ') words = [("__%s__" % w) if vocab.word2id(w) == unk_token else w for w in words] out_str = ' '.join(words) return out_str def show_abs_oovs(abstract, vocab, article_oovs): """Returns the abstract string, highlighting the article OOVs with __underscores__. If a list of article_oovs is provided, non-article OOVs are differentiated like !!__this__!!. Args: abstract: string vocab: Vocabulary object article_oovs: list of words (strings), or None (in baseline mode) """ unk_token = vocab.word2id(UNKNOWN_TOKEN) words = abstract.split(' ') new_words = [] for w in words: if vocab.word2id(w) == unk_token: # w is oov if article_oovs is None: # baseline mode new_words.append("__%s__" % w) else: # pointer-generator mode if w in article_oovs: new_words.append("__%s__" % w) else: new_words.append("!!__%s__!!" % w) else: # w is in-vocab word new_words.append(w) out_str = ' '.join(new_words) return out_str dep_list = ['cc', 'agent', 'ccomp', 'prt', 'meta', 'nsubjpass', 'csubj', 'conj', 'amod', 'poss', 'neg', 'csubjpass', 'mark', 'auxpass', 'advcl', 'aux', 'ROOT', 'prep', 'parataxis', 'xcomp', 'nsubj', 'nummod', 'advmod', 'punct', 'quantmod', 'acomp', 'compound', 'pcomp', 'intj', 'relcl', 'npadvmod', 'case', 'attr', 'dep', 'appos', 'det', 'nmod', 'dobj', 'dative', 'pobj', 'expl', 'predet', 'preconj', 'oprd', 'acl', 'flow'] dep_dict = {label: i for i, label in enumerate(dep_list)} def get_specific_adj(batch_list, batch_size, max_nodes, label, encoder_lengths, use_both=True, keep_prob=1.0, use_bert=False, bert_mapping=None, max_length=300): adj_main_in = [] adj_main_out = [] if bert_mapping is None: bert_mapping = [[] for i in range(len(batch_list))] #empty array for allowing in the next loop for edge_list, enc_length, offset_list in zip(batch_list, encoder_lengths, bert_mapping): #print(edge_list) curr_adj_in = [] curr_adj_out = [] curr_data_in = [] curr_data_out = [] seen_nodes = [] for s, d, lbl in edge_list: if s >=max_nodes or d >=max_nodes or s>=max_length or d>=max_length: continue if use_bert: src = s + offset_list[s] dest = d + offset_list[d] else: src = s dest = d seen_nodes.append(src) seen_nodes.append(dest) if lbl!=label: continue #if src >= max_nodes or dest >= max_nodes: # continue x = np.random.uniform() if x<=keep_prob: curr_adj_out.append((src, dest)) curr_data_out.append(1.0) if use_both: curr_adj_in.append((dest, src)) curr_data_in.append(1.0) else: curr_adj_out.append((dest, src)) curr_data_out.append(1.0) ''' Use this snippet when you need to use the A + I condition (refer README) seen_nodes = list(set(seen_nodes)) for src in range(enc_length): #A + I for entity and coref curr_adj_out.append((src, src)) curr_data_out.append(1.0) if use_both: curr_adj_in.append((src, src)) curr_data_in.append(1.0) ''' if len(curr_adj_in) == 0: adj_in = sp.coo_matrix((max_nodes, max_nodes)) else: adj_in = sp.coo_matrix((curr_data_in, zip(*curr_adj_in)), shape=(max_nodes, max_nodes)) if len(curr_adj_out) == 0: adj_out = sp.coo_matrix((max_nodes, max_nodes)) else: adj_out = sp.coo_matrix((curr_data_out, zip(*curr_adj_out)), shape=(max_nodes, max_nodes)) adj_main_in.append(adj_in) adj_main_out.append(adj_out) return adj_main_in, adj_main_out def get_adj(batch_list, batch_size, max_nodes, use_label_information=True, label_dict=dep_dict,flow_alone=False, flow_combined=False, keep_prob=1.0, use_bert=False, bert_mapping=None, max_length=300): adj_main_in, adj_main_out = [], [] max_labels = 45 if bert_mapping is None: bert_mapping = [[] for i in range(len(batch_list))] #empty array for allowing in the next loop for edge_list, offset_list in zip(batch_list, bert_mapping): adj_in, adj_out = {}, {} l_e = len(edge_list) in_ind, in_data = ddict(list), ddict(list) out_ind, out_data = ddict(list), ddict(list) count = 0 for s, d, lbl_ in edge_list: if s>=max_nodes or d >= max_nodes or s>=max_length or d>=max_length: continue if use_bert: try: src = s + offset_list[s] except: tf.logging.info(s) tf.logging.info(len(offset_list)) dest = d + offset_list[d] else: src = s dest = d #if src >= max_nodes or dest >= max_nodes: # continue if flow_alone: lbl = 0 if src+1 < max_nodes: x = np.random.uniform() if x<= keep_prob: out_ind[lbl].append((src, src+1)) out_data[lbl].append(1.0) x = np.random.uniform() if x<=keep_prob: in_ind[lbl].append((src+1, src)) in_data[lbl].append(1.0) else: if lbl_ not in label_dict: continue lbl = label_dict[lbl_] if not use_label_information: #all assigned the same label information lbl = 0 x = np.random.uniform() if x<=keep_prob: out_ind[lbl].append((src, dest)) out_data[lbl].append(1.0) x = np.random.uniform() if x<=keep_prob: in_ind[lbl].append((dest, src)) in_data[lbl].append(1.0) if flow_combined and dest!=src+1: if not use_label_information: #all assigned the same label information lbl = 0 else: lbl = label_dict['flow'] out_ind[lbl].append((src, src+1)) out_data[lbl].append(1.0) in_ind[lbl].append((src+1, src)) in_data[lbl].append(1.0) count = count + 1 if flow_combined: max_labels = max_labels + 1 if not use_label_information: max_labels = 1 for lbl in range(max_labels): if lbl not in out_ind: adj_out[lbl] = sp.coo_matrix((max_nodes, max_nodes)) else: adj_out[lbl] = sp.coo_matrix((out_data[lbl], zip(*out_ind[lbl])), shape=(max_nodes, max_nodes)) if lbl not in in_ind: adj_in[lbl] = sp.coo_matrix((max_nodes, max_nodes)) else: adj_in[lbl] = sp.coo_matrix((in_data[lbl], zip(*in_ind[lbl])), shape=(max_nodes, max_nodes)) adj_main_in.append(adj_in) adj_main_out.append(adj_out) # print(adj_main_in) return adj_main_in, adj_main_out def create_glove_embedding_matrix (vocab,vocab_size,emb_dim,glove_path): emb = np.random.rand(vocab_size,emb_dim) count = 0 with open(args['glove_path'],encoding='utf-8') as f: #python 3.x support #all_lines = [] #with codecs.open(args['glove_path'],'r',encoding='utf-8') as f: #python 2.x support for line in f: fields = line.split() if len(fields) - 1 != embedding_dim: # Sometimes there are funny unicode parsing problems that lead to different # fields lengths (e.g., a word with a unicode space character that splits # into more than one colum n). We skip those lines. Note that if you have # some kind of long header, this could result in all of your lines getting # skipped. It's hard to check for that here; you just have to look in the # embedding_misses_file and at the model summary to make sure things look # like they are supposed to. #logger.warning("Found line with wrong number of dimensions (expected %d, was %d): %s", # embedding_dim, len(fields) - 1, line) raise Exception("Found line with wrong number of dimensions (expected %d, was %d): %s", embedding_dim, len(fields) - 1, line) continue word = fields[0] if word in w2id: vector = np.asarray(fields[1:], dtype='float32') return emb
substr = "".join(chars[start:end]) if start > 0: substr = "##" + substr if substr in self.vocab: cur_substr = substr break end -= 1
conditional_block
data.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # Modifications Copyright 2017 Abigail See # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """This file contains code to read the train/eval/test data from file and process it, and read the vocab data from file and process it""" import glob import random import struct import csv from tensorflow.core.example import example_pb2 from collections import defaultdict as ddict import pickle import scipy.sparse as sp import tensorflow as tf import numpy as np import horovod.tensorflow as hvd import collections import six import unicodedata # <s> and </s> are used in the data files to segment the abstracts into sentences. They don't receive vocab ids. SENTENCE_START = '<s>' SENTENCE_END = '</s>' PAD_TOKEN = '[PAD]' # This has a vocab id, which is used to pad the encoder input, decoder input and target sequence UNKNOWN_TOKEN = '[UNK]' # This has a vocab id, which is used to represent out-of-vocabulary words START_DECODING = '[START]' # This has a vocab id, which is used at the start of every decoder input sequence STOP_DECODING = '[STOP]' # This has a vocab id, which is used at the end of untruncated target sequences # Note: none of <s>, </s>, [PAD], [UNK], [START], [STOP] should appear in the vocab file. def convert_to_unicode(text): """Converts `text` to Unicode (if it's not already), assuming utf-8 input.""" if six.PY3: if isinstance(text, str): return text elif isinstance(text, bytes): return text.decode("utf-8", "ignore") else: raise ValueError("Unsupported string type: %s" % (type(text))) elif six.PY2: if isinstance(text, str): return text.decode("utf-8", "ignore") elif isinstance(text, unicode): return text else: raise ValueError("Unsupported string type: %s" % (type(text))) else: raise ValueError("Not running on Python2 or Python 3?") class Vocab(object): """Vocabulary class for mapping between words and ids (integers)""" def __init__(self, vocab_file, max_size): """Creates a vocab of up to max_size words, reading from the vocab_file. If max_size is 0, reads the entire vocab file. Args: vocab_file: path to the vocab file, which is assumed to contain "<word> <frequency>" on each line, sorted with most frequent word first. This code doesn't actually use the frequencies, though. max_size: integer. The maximum size of the resulting Vocabulary.""" self._word_to_id = {} self._id_to_word = {} self._count = 0 # keeps track of total number of words in the Vocab # [UNK], [PAD], [START] and [STOP] get the ids 0,1,2,3. for w in [UNKNOWN_TOKEN, PAD_TOKEN, START_DECODING, STOP_DECODING]: self._word_to_id[w] = self._count self._id_to_word[self._count] = w self._count += 1 # Read the vocab file and add words up to max_size with open(vocab_file, 'r') as vocab_f: for line in vocab_f: pieces = line.split() if len(pieces) != 2: print ('Warning: incorrectly formatted line in vocabulary file: %s\n' % line) continue w = pieces[0] if w in [SENTENCE_START, SENTENCE_END, UNKNOWN_TOKEN, PAD_TOKEN, START_DECODING, STOP_DECODING]: raise Exception( '<s>, </s>, [UNK], [PAD], [START] and [STOP] shouldn\'t be in the vocab file, but %s is' % w) if w in self._word_to_id: raise Exception('Duplicated word in vocabulary file: %s' % w) self._word_to_id[w] = self._count self._id_to_word[self._count] = w self._count += 1 if max_size != 0 and self._count >= max_size: print ("max_size of vocab was specified as %i; we now have %i words. Stopping reading." % ( max_size, self._count)) break print ("Finished constructing vocabulary of %i total words. Last word added: %s" % ( self._count, self._id_to_word[self._count - 1])) def word2id(self, word): """Returns the id (integer) of a word (string). Returns [UNK] id if word is OOV.""" if word not in self._word_to_id: return self._word_to_id[UNKNOWN_TOKEN] return self._word_to_id[word] def id2word(self, word_id): """Returns the word (string) corresponding to an id (integer).""" if word_id not in self._id_to_word: raise ValueError('Id not found in vocab: %d' % word_id) return self._id_to_word[word_id] def size(self): """Returns the total size of the vocabulary""" return self._count def write_metadata(self, fpath): """Writes metadata file for Tensorboard word embedding visualizer as described here: https://www.tensorflow.org/get_started/embedding_viz Args: fpath: place to write the metadata file """ print "Writing word embedding metadata file to %s..." % (fpath) with open(fpath, "w") as f: fieldnames = ['word'] writer = csv.DictWriter(f, delimiter="\t", fieldnames=fieldnames) for i in xrange(self.size()): writer.writerow({"word": self._id_to_word[i]}) def set_glove_embedding(self,fpath,embedding_dim): """ Creates glove embedding_matrix from file path""" emb = np.random.randn(self._count,embedding_dim) # tf.logging.info(emb[0]) with open(fpath) as f: #python 3.x support for k,line in enumerate(f): fields = line.split() if len(fields) - 1 != embedding_dim: # Sometimes there are funny unicode parsing problems that lead to different # fields lengths (e.g., a word with a unicode space character that splits # into more than one colum n). We skip those lines. Note that if you have # some kind of long header, this could result in all of your lines getting # skipped. It's hard to check for that here; you just have to look in the # embedding_misses_file and at the model summary to make sure things look # like they are supposed to. #logger.warning("Found line with wrong number of dimensions (expected %d, was %d): %s", # embedding_dim, len(fields) - 1, line) raise Exception("Found line with wrong number of dimensions (expected %d, was %d): %s", embedding_dim, len(fields) - 1, line) continue word = fields[0] if word in self._word_to_id: vector = np.asarray(fields[1:], dtype='float32') emb[self._word_to_id[word]] = vector # if k%1000 == 0: # tf.logging.info('glove : %d',k) self.glove_emb = emb class BertVocab(object): """ While glove_vocab has been used as default. The term glove is misnomer. Glove_vocab represents normal vocab in this file This function converts individual tokens to their respective word piece tokens """ def __init__(self, glove_vocab, bert_vocab_file_path): self.bert_vocab = collections.OrderedDict() self.glove_vocab = glove_vocab index = 0 with tf.gfile.GFile(bert_vocab_file_path, "r") as reader: #obtain bert vocab while True: token = convert_to_unicode(reader.readline()) if not token: break token = token.strip() self.bert_vocab[token] = index index += 1 not_found = 0 self.index_map_glove_to_bert = {} for i in range(glove_vocab._count): if glove_vocab._id_to_word[i] in self.bert_vocab: self.index_map_glove_to_bert[i] = [self.bert_vocab[glove_vocab._id_to_word[i]]] else: #Word Piece Tokenizer not_found = not_found + 1 new_tokens = [] token = glove_vocab._id_to_word[i] chars = list(token) is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = "".join(chars[start:end]) if start > 0: substr = "##" + substr if substr in self.bert_vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: new_tokens.append(self.bert_vocab['[UNK]']) else: sub_tokens_bert = [self.bert_vocab[s] for s in sub_tokens] new_tokens = new_tokens + sub_tokens_bert self.index_map_glove_to_bert[i] = new_tokens tf.logging.info(not_found) def convert_glove_to_bert_indices(self, token_ids): """ Converts words to their respective word-piece tokenized indices token_ids : ids from the word """ new_tokens = [self.bert_vocab['[CLS]']] #As pert the bert repo instructions offset = 1 pos_offset = [] for token_id in token_ids: pos_offset.append(offset) #wordpiece tokenizer can return more than one index hence we maintain an offset array. This is useful for the BERT + GCN experiments. if token_id in self.index_map_glove_to_bert: bert_tokens = self.index_map_glove_to_bert[token_id] offset = offset + len(bert_tokens) - 1 #new_tokens.append(self.index_map_glove_to_bert[token_id]) new_tokens = new_tokens + bert_tokens else: #wordpiece might be redundant for training data. Keep for unseen instances token = glove_vocab._id_to_word[token_id] chars = list(token) is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = "".join(chars[start:end]) if start > 0: substr = "##" + substr if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: #new_tokens.append(self.index_map_glove_to_bert['[UNK]']) new_token = new_token + self.index_map_glove_to_bert['[UNK]'] else: sub_tokens_bert = [self.bert_vocab[s] for s in sub_tokens] new_tokens = new_tokens + sub_tokens_bert offset = offset + len(sub_tokens_bert) - 1 new_tokens.append(self.bert_vocab['[SEP]']) return new_tokens, pos_offset def convert_by_vocab(vocab, items): """Converts a sequence of [tokens|ids] using the vocab.""" output = [] for item in items: if item in vocab: output.append(vocab[item]) else: output.append(vocab['[UNK]']) return output def convert_tokens_to_ids(vocab, tokens): return convert_by_vocab(vocab, tokens) def convert_ids_to_tokens(inv_vocab, ids): return convert_by_vocab(inv_vocab, ids) def example_generator(data_path, single_pass, device_rank,data_as_tf_example=True): """Generates tf.Examples from data files. Binary data format: <length><blob>. <length> represents the byte size of <blob>. <blob> is serialized tf.Example proto. The tf.Example contains the tokenized article text and summary. Args: data_path: Path to tf.Example data files. Can include wildcards, e.g. if you have several training data chunk files train_001.bin, train_002.bin, etc, then pass data_path=train_* to access them all. single_pass: Boolean. If True, go through the dataset exactly once, generating examples in the order they appear, then return. Otherwise, generate random examples indefinitely. Yields: Deserialized tf.Example. """ random.seed(device_rank+1) if data_as_tf_example: epoch = 0 while True: filelist = glob.glob(data_path) # get the list of datafiles assert filelist, ('Error: Empty filelist at %s' % data_path) # check filelist isn't empty if single_pass: filelist = sorted(filelist) else: random.shuffle(filelist) #tf.logging.info(filelist) for file_no, f in enumerate(filelist): reader = open(f, 'rb') all_examples = [] while True: len_bytes = reader.read(8) if not len_bytes: if not single_pass: random.shuffle(all_examples) for k in all_examples: yield example_pb2.Example.FromString(k), epoch break # finished reading this file str_len = struct.unpack('q', len_bytes)[0] example_str = struct.unpack('%ds' % str_len, reader.read(str_len))[0] all_examples.append(example_str) if single_pass: print "example_generator completed reading all datafiles. No more data." break else: #pickle format while True: if single_pass: for data_ in data_path: for i in data_: yield i else: random.shuffle(data_path) for data_ in data_path: new_data = data_ x = np.arange(len(new_data)) np.random.shuffle(x) # random.shuffle(new_data) for i in x: yield new_data[i] if single_pass: break def article2ids(article_words, vocab): """Map the article words to their ids. Also return a list of OOVs in the article. Args: article_words: list of words (strings) vocab: Vocabulary object Returns: ids: A list of word ids (integers); OOVs are represented by their temporary article OOV number. If the vocabulary size is 50k and the article has 3 OOVs, then these temporary OOV numbers will be 50000, 50001, 50002. oovs: A list of the OOV words in the article (strings), in the order corresponding to their temporary article OOV numbers.""" ids = [] oovs = [] unk_id = vocab.word2id(UNKNOWN_TOKEN) for w in article_words: i = vocab.word2id(w) if i == unk_id: # If w is OOV if w not in oovs: # Add to list of OOVs oovs.append(w) oov_num = oovs.index(w) # This is 0 for the first article OOV, 1 for the second article OOV... ids.append(vocab.size() + oov_num) # This is e.g. 50000 for the first article OOV, 50001 for the second... else: ids.append(i) return ids, oovs def abstract2ids(abstract_words, vocab, article_oovs): """Map the abstract words to their ids. In-article OOVs are mapped to their temporary OOV numbers. Args: abstract_words: list of words (strings) vocab: Vocabulary object article_oovs: list of in-article OOV words (strings), in the order corresponding to their temporary article OOV numbers Returns: ids: List of ids (integers). In-article OOV words are mapped to their temporary OOV numbers. Out-of-article OOV words are mapped to the UNK token id.""" ids = [] unk_id = vocab.word2id(UNKNOWN_TOKEN) for w in abstract_words: i = vocab.word2id(w) if i == unk_id: # If w is an OOV word if w in article_oovs: # If w is an in-article OOV vocab_idx = vocab.size() + article_oovs.index(w) # Map to its temporary article OOV number ids.append(vocab_idx) else: # If w is an out-of-article OOV ids.append(unk_id) # Map to the UNK token id else: ids.append(i) return ids def outputids2words(id_list, vocab, article_oovs): """Maps output ids to words, including mapping in-article OOVs from their temporary ids to the original OOV string (applicable in pointer-generator mode). Args: id_list: list of ids (integers) vocab: Vocabulary object article_oovs: list of OOV words (strings) in the order corresponding to their temporary article OOV ids (that have been assigned in pointer-generator mode), or None (in baseline mode) Returns: words: list of words (strings) """ words = [] for i in id_list: try: w = vocab.id2word(i) # might be [UNK] except ValueError as e: # w is OOV assert article_oovs is not None, "Error: model produced a word ID that isn't in the vocabulary. This should not happen in baseline (no pointer-generator) mode" article_oov_idx = i - vocab.size() try: w = article_oovs[article_oov_idx] except ValueError as e: # i doesn't correspond to an article oov raise ValueError('Error: model produced word ID %i which corresponds to article OOV %i but this example only has %i article OOVs' % (i, article_oov_idx, len(article_oovs))) words.append(w) return words def
(abstract): """Splits abstract text from datafile into list of sentences. Args: abstract: string containing <s> and </s> tags for starts and ends of sentences Returns: sents: List of sentence strings (no tags)""" cur = 0 sents = [] while True: try: start_p = abstract.index(SENTENCE_START, cur) end_p = abstract.index(SENTENCE_END, start_p + 1) cur = end_p + len(SENTENCE_END) sents.append(abstract[start_p + len(SENTENCE_START):end_p].strip()) except ValueError as e: # no more sentences return sents def show_art_oovs(article, vocab): """Returns the article string, highlighting the OOVs by placing __underscores__ around them""" unk_token = vocab.word2id(UNKNOWN_TOKEN) words = article.split(' ') words = [("__%s__" % w) if vocab.word2id(w) == unk_token else w for w in words] out_str = ' '.join(words) return out_str def show_abs_oovs(abstract, vocab, article_oovs): """Returns the abstract string, highlighting the article OOVs with __underscores__. If a list of article_oovs is provided, non-article OOVs are differentiated like !!__this__!!. Args: abstract: string vocab: Vocabulary object article_oovs: list of words (strings), or None (in baseline mode) """ unk_token = vocab.word2id(UNKNOWN_TOKEN) words = abstract.split(' ') new_words = [] for w in words: if vocab.word2id(w) == unk_token: # w is oov if article_oovs is None: # baseline mode new_words.append("__%s__" % w) else: # pointer-generator mode if w in article_oovs: new_words.append("__%s__" % w) else: new_words.append("!!__%s__!!" % w) else: # w is in-vocab word new_words.append(w) out_str = ' '.join(new_words) return out_str dep_list = ['cc', 'agent', 'ccomp', 'prt', 'meta', 'nsubjpass', 'csubj', 'conj', 'amod', 'poss', 'neg', 'csubjpass', 'mark', 'auxpass', 'advcl', 'aux', 'ROOT', 'prep', 'parataxis', 'xcomp', 'nsubj', 'nummod', 'advmod', 'punct', 'quantmod', 'acomp', 'compound', 'pcomp', 'intj', 'relcl', 'npadvmod', 'case', 'attr', 'dep', 'appos', 'det', 'nmod', 'dobj', 'dative', 'pobj', 'expl', 'predet', 'preconj', 'oprd', 'acl', 'flow'] dep_dict = {label: i for i, label in enumerate(dep_list)} def get_specific_adj(batch_list, batch_size, max_nodes, label, encoder_lengths, use_both=True, keep_prob=1.0, use_bert=False, bert_mapping=None, max_length=300): adj_main_in = [] adj_main_out = [] if bert_mapping is None: bert_mapping = [[] for i in range(len(batch_list))] #empty array for allowing in the next loop for edge_list, enc_length, offset_list in zip(batch_list, encoder_lengths, bert_mapping): #print(edge_list) curr_adj_in = [] curr_adj_out = [] curr_data_in = [] curr_data_out = [] seen_nodes = [] for s, d, lbl in edge_list: if s >=max_nodes or d >=max_nodes or s>=max_length or d>=max_length: continue if use_bert: src = s + offset_list[s] dest = d + offset_list[d] else: src = s dest = d seen_nodes.append(src) seen_nodes.append(dest) if lbl!=label: continue #if src >= max_nodes or dest >= max_nodes: # continue x = np.random.uniform() if x<=keep_prob: curr_adj_out.append((src, dest)) curr_data_out.append(1.0) if use_both: curr_adj_in.append((dest, src)) curr_data_in.append(1.0) else: curr_adj_out.append((dest, src)) curr_data_out.append(1.0) ''' Use this snippet when you need to use the A + I condition (refer README) seen_nodes = list(set(seen_nodes)) for src in range(enc_length): #A + I for entity and coref curr_adj_out.append((src, src)) curr_data_out.append(1.0) if use_both: curr_adj_in.append((src, src)) curr_data_in.append(1.0) ''' if len(curr_adj_in) == 0: adj_in = sp.coo_matrix((max_nodes, max_nodes)) else: adj_in = sp.coo_matrix((curr_data_in, zip(*curr_adj_in)), shape=(max_nodes, max_nodes)) if len(curr_adj_out) == 0: adj_out = sp.coo_matrix((max_nodes, max_nodes)) else: adj_out = sp.coo_matrix((curr_data_out, zip(*curr_adj_out)), shape=(max_nodes, max_nodes)) adj_main_in.append(adj_in) adj_main_out.append(adj_out) return adj_main_in, adj_main_out def get_adj(batch_list, batch_size, max_nodes, use_label_information=True, label_dict=dep_dict,flow_alone=False, flow_combined=False, keep_prob=1.0, use_bert=False, bert_mapping=None, max_length=300): adj_main_in, adj_main_out = [], [] max_labels = 45 if bert_mapping is None: bert_mapping = [[] for i in range(len(batch_list))] #empty array for allowing in the next loop for edge_list, offset_list in zip(batch_list, bert_mapping): adj_in, adj_out = {}, {} l_e = len(edge_list) in_ind, in_data = ddict(list), ddict(list) out_ind, out_data = ddict(list), ddict(list) count = 0 for s, d, lbl_ in edge_list: if s>=max_nodes or d >= max_nodes or s>=max_length or d>=max_length: continue if use_bert: try: src = s + offset_list[s] except: tf.logging.info(s) tf.logging.info(len(offset_list)) dest = d + offset_list[d] else: src = s dest = d #if src >= max_nodes or dest >= max_nodes: # continue if flow_alone: lbl = 0 if src+1 < max_nodes: x = np.random.uniform() if x<= keep_prob: out_ind[lbl].append((src, src+1)) out_data[lbl].append(1.0) x = np.random.uniform() if x<=keep_prob: in_ind[lbl].append((src+1, src)) in_data[lbl].append(1.0) else: if lbl_ not in label_dict: continue lbl = label_dict[lbl_] if not use_label_information: #all assigned the same label information lbl = 0 x = np.random.uniform() if x<=keep_prob: out_ind[lbl].append((src, dest)) out_data[lbl].append(1.0) x = np.random.uniform() if x<=keep_prob: in_ind[lbl].append((dest, src)) in_data[lbl].append(1.0) if flow_combined and dest!=src+1: if not use_label_information: #all assigned the same label information lbl = 0 else: lbl = label_dict['flow'] out_ind[lbl].append((src, src+1)) out_data[lbl].append(1.0) in_ind[lbl].append((src+1, src)) in_data[lbl].append(1.0) count = count + 1 if flow_combined: max_labels = max_labels + 1 if not use_label_information: max_labels = 1 for lbl in range(max_labels): if lbl not in out_ind: adj_out[lbl] = sp.coo_matrix((max_nodes, max_nodes)) else: adj_out[lbl] = sp.coo_matrix((out_data[lbl], zip(*out_ind[lbl])), shape=(max_nodes, max_nodes)) if lbl not in in_ind: adj_in[lbl] = sp.coo_matrix((max_nodes, max_nodes)) else: adj_in[lbl] = sp.coo_matrix((in_data[lbl], zip(*in_ind[lbl])), shape=(max_nodes, max_nodes)) adj_main_in.append(adj_in) adj_main_out.append(adj_out) # print(adj_main_in) return adj_main_in, adj_main_out def create_glove_embedding_matrix (vocab,vocab_size,emb_dim,glove_path): emb = np.random.rand(vocab_size,emb_dim) count = 0 with open(args['glove_path'],encoding='utf-8') as f: #python 3.x support #all_lines = [] #with codecs.open(args['glove_path'],'r',encoding='utf-8') as f: #python 2.x support for line in f: fields = line.split() if len(fields) - 1 != embedding_dim: # Sometimes there are funny unicode parsing problems that lead to different # fields lengths (e.g., a word with a unicode space character that splits # into more than one colum n). We skip those lines. Note that if you have # some kind of long header, this could result in all of your lines getting # skipped. It's hard to check for that here; you just have to look in the # embedding_misses_file and at the model summary to make sure things look # like they are supposed to. #logger.warning("Found line with wrong number of dimensions (expected %d, was %d): %s", # embedding_dim, len(fields) - 1, line) raise Exception("Found line with wrong number of dimensions (expected %d, was %d): %s", embedding_dim, len(fields) - 1, line) continue word = fields[0] if word in w2id: vector = np.asarray(fields[1:], dtype='float32') return emb
abstract2sents
identifier_name